2017-03-08 12:21:57 +01:00
// Package onedrive provides an interface to the Microsoft OneDrive
2015-10-04 23:08:31 +02:00
// object storage system.
package onedrive
import (
2018-04-20 13:55:49 +02:00
"encoding/base64"
"encoding/hex"
2017-03-12 13:00:10 +01:00
"encoding/json"
2015-10-04 23:08:31 +02:00
"fmt"
"io"
"log"
"net/http"
2016-11-25 22:52:43 +01:00
"path"
2015-10-04 23:08:31 +02:00
"strings"
"time"
2018-01-11 17:05:41 +01:00
"github.com/ncw/rclone/backend/onedrive/api"
2015-10-04 23:08:31 +02:00
"github.com/ncw/rclone/fs"
2018-01-12 17:30:54 +01:00
"github.com/ncw/rclone/fs/config"
2018-05-14 19:06:57 +02:00
"github.com/ncw/rclone/fs/config/configmap"
"github.com/ncw/rclone/fs/config/configstruct"
2018-01-18 21:19:55 +01:00
"github.com/ncw/rclone/fs/config/obscure"
2018-01-12 17:30:54 +01:00
"github.com/ncw/rclone/fs/fserrors"
"github.com/ncw/rclone/fs/hash"
2018-01-11 17:29:20 +01:00
"github.com/ncw/rclone/lib/dircache"
"github.com/ncw/rclone/lib/oauthutil"
"github.com/ncw/rclone/lib/pacer"
2018-01-12 17:30:54 +01:00
"github.com/ncw/rclone/lib/readers"
2018-01-11 17:29:20 +01:00
"github.com/ncw/rclone/lib/rest"
2016-05-30 20:49:21 +02:00
"github.com/pkg/errors"
2015-10-04 23:08:31 +02:00
"golang.org/x/oauth2"
)
const (
2018-08-18 12:06:22 +02:00
rcloneClientID = "b15665d9-eda6-4092-8539-0eec376afd59"
rcloneEncryptedClientSecret = "_JUdzh3LnKNqSPcf4Wu5fgMFIQOI8glZu_akYgR8yf6egowNBg-R"
minSleep = 10 * time . Millisecond
maxSleep = 2 * time . Second
decayConstant = 2 // bigger for slower decay, exponential
graphURL = "https://graph.microsoft.com/v1.0"
2018-08-21 03:52:24 +02:00
configDriveID = "drive_id"
configDriveType = "drive_type"
2018-08-18 12:06:22 +02:00
driveTypePersonal = "personal"
driveTypeBusiness = "business"
driveTypeSharepoint = "documentLibrary"
2018-09-07 13:02:27 +02:00
defaultChunkSize = 10 * fs . MebiByte
chunkSizeMultiple = 320 * fs . KibiByte
2015-10-04 23:08:31 +02:00
)
// Globals
var (
2017-08-03 21:57:42 +02:00
// Description of how to auth for this app for a business account
2018-08-18 12:06:22 +02:00
oauthConfig = & oauth2 . Config {
2017-08-03 21:57:42 +02:00
Endpoint : oauth2 . Endpoint {
2018-08-18 12:06:22 +02:00
AuthURL : "https://login.microsoftonline.com/common/oauth2/v2.0/authorize" ,
TokenURL : "https://login.microsoftonline.com/common/oauth2/v2.0/token" ,
2017-08-03 21:57:42 +02:00
} ,
2018-08-18 12:06:22 +02:00
Scopes : [ ] string { "Files.Read" , "Files.ReadWrite" , "Files.Read.All" , "Files.ReadWrite.All" , "offline_access" } ,
ClientID : rcloneClientID ,
ClientSecret : obscure . MustReveal ( rcloneEncryptedClientSecret ) ,
2017-08-03 21:57:42 +02:00
RedirectURL : oauthutil . RedirectLocalhostURL ,
}
2015-10-04 23:08:31 +02:00
)
// Register with Fs
func init ( ) {
2016-02-18 12:35:25 +01:00
fs . Register ( & fs . RegInfo {
2016-02-15 19:11:53 +01:00
Name : "onedrive" ,
Description : "Microsoft OneDrive" ,
NewFs : NewFs ,
2018-05-14 19:06:57 +02:00
Config : func ( name string , m configmap . Mapper ) {
2018-08-18 12:06:22 +02:00
err := oauthutil . Config ( "onedrive" , name , m , oauthConfig )
if err != nil {
log . Fatalf ( "Failed to configure token: %v" , err )
return
}
2017-09-06 17:19:52 +02:00
2018-08-18 12:06:22 +02:00
// Are we running headless?
if automatic , _ := m . Get ( config . ConfigAutomatic ) ; automatic != "" {
// Yes, okay we are done
return
}
2017-08-03 21:57:42 +02:00
2018-08-18 12:06:22 +02:00
type driveResource struct {
DriveID string ` json:"id" `
DriveName string ` json:"name" `
DriveType string ` json:"driveType" `
}
type drivesResponse struct {
Drives [ ] driveResource ` json:"value" `
}
2017-08-03 21:57:42 +02:00
2018-08-18 12:06:22 +02:00
type siteResource struct {
SiteID string ` json:"id" `
SiteName string ` json:"displayName" `
SiteURL string ` json:"webUrl" `
}
type siteResponse struct {
Sites [ ] siteResource ` json:"value" `
}
oAuthClient , _ , err := oauthutil . NewClient ( name , m , oauthConfig )
if err != nil {
log . Fatalf ( "Failed to configure OneDrive: %v" , err )
}
srv := rest . NewClient ( oAuthClient )
var opts rest . Opts
var finalDriveID string
var siteID string
switch config . Choose ( "Your choice" ,
[ ] string { "onedrive" , "sharepoint" , "driveid" , "siteid" , "search" } ,
2018-08-30 05:36:50 +02:00
[ ] string { "OneDrive Personal or Business" , "Root Sharepoint site" , "Type in driveID" , "Type in SiteID" , "Search a Sharepoint site" } ,
2018-08-18 12:06:22 +02:00
false ) {
2017-08-03 21:57:42 +02:00
2018-08-18 12:06:22 +02:00
case "onedrive" :
opts = rest . Opts {
2017-09-06 17:19:52 +02:00
Method : "GET" ,
2018-08-18 12:06:22 +02:00
RootURL : graphURL ,
Path : "/me/drives" ,
2017-08-03 21:57:42 +02:00
}
2018-08-18 12:06:22 +02:00
case "sharepoint" :
opts = rest . Opts {
Method : "GET" ,
RootURL : graphURL ,
Path : "/sites/root/drives" ,
2017-08-03 21:57:42 +02:00
}
2018-08-18 12:06:22 +02:00
case "driveid" :
fmt . Printf ( "Paste your Drive ID here> " )
finalDriveID = config . ReadLine ( )
case "siteid" :
fmt . Printf ( "Paste your Site ID here> " )
siteID = config . ReadLine ( )
case "search" :
fmt . Printf ( "What to search for> " )
searchTerm := config . ReadLine ( )
opts = rest . Opts {
Method : "GET" ,
RootURL : graphURL ,
Path : "/sites?search=" + searchTerm ,
2017-08-03 21:57:42 +02:00
}
2018-08-18 12:06:22 +02:00
sites := siteResponse { }
2018-08-21 04:55:35 +02:00
_ , err := srv . CallJSON ( & opts , nil , & sites )
2018-08-18 12:06:22 +02:00
if err != nil {
log . Fatalf ( "Failed to query available sites: %v" , err )
2017-08-03 21:57:42 +02:00
}
2018-08-18 12:06:22 +02:00
if len ( sites . Sites ) == 0 {
log . Fatalf ( "Search for '%s' returned no results" , searchTerm )
} else {
fmt . Printf ( "Found %d sites, please select the one you want to use:\n" , len ( sites . Sites ) )
for index , site := range sites . Sites {
fmt . Printf ( "%d: %s (%s) id=%s\n" , index , site . SiteName , site . SiteURL , site . SiteID )
}
siteID = sites . Sites [ config . ChooseNumber ( "Chose drive to use:" , 0 , len ( sites . Sites ) - 1 ) ] . SiteID
2017-09-06 17:19:52 +02:00
}
2018-08-18 12:06:22 +02:00
}
2017-09-06 17:19:52 +02:00
2018-08-18 12:06:22 +02:00
// if we have a siteID we need to ask for the drives
if siteID != "" {
2017-09-06 17:19:52 +02:00
opts = rest . Opts {
2018-08-18 12:06:22 +02:00
Method : "GET" ,
RootURL : graphURL ,
Path : "/sites/" + siteID + "/drives" ,
2017-09-06 17:19:52 +02:00
}
2018-08-18 12:06:22 +02:00
}
2017-09-06 17:19:52 +02:00
2018-08-18 12:06:22 +02:00
// We don't have the final ID yet?
// query Microsoft Graph
if finalDriveID == "" {
drives := drivesResponse { }
2018-08-21 04:55:35 +02:00
_ , err := srv . CallJSON ( & opts , nil , & drives )
2017-09-06 17:19:52 +02:00
if err != nil {
2018-08-18 12:06:22 +02:00
log . Fatalf ( "Failed to query available drives: %v" , err )
2017-09-06 17:19:52 +02:00
}
2018-08-18 12:06:22 +02:00
if len ( drives . Drives ) == 0 {
log . Fatalf ( "No drives found" )
} else {
fmt . Printf ( "Found %d drives, please select the one you want to use:\n" , len ( drives . Drives ) )
for index , drive := range drives . Drives {
fmt . Printf ( "%d: %s (%s) id=%s\n" , index , drive . DriveName , drive . DriveType , drive . DriveID )
}
finalDriveID = drives . Drives [ config . ChooseNumber ( "Chose drive to use:" , 0 , len ( drives . Drives ) - 1 ) ] . DriveID
2017-08-03 21:57:42 +02:00
}
2015-10-04 23:08:31 +02:00
}
2018-08-18 12:06:22 +02:00
// Test the driveID and get drive type
opts = rest . Opts {
Method : "GET" ,
RootURL : graphURL ,
Path : "/drives/" + finalDriveID + "/root" }
var rootItem api . Item
2018-08-21 04:55:35 +02:00
_ , err = srv . CallJSON ( & opts , nil , & rootItem )
2018-08-18 12:06:22 +02:00
if err != nil {
log . Fatalf ( "Failed to query root for drive %s: %v" , finalDriveID , err )
}
fmt . Printf ( "Found drive '%s' of type '%s', URL: %s\nIs that okay?\n" , rootItem . Name , rootItem . ParentReference . DriveType , rootItem . WebURL )
// This does not work, YET :)
if ! config . Confirm ( ) {
log . Fatalf ( "Cancelled by user" )
}
2018-09-06 18:07:16 +02:00
m . Set ( configDriveID , finalDriveID )
m . Set ( configDriveType , rootItem . ParentReference . DriveType )
config . SaveConfig ( )
2015-10-04 23:08:31 +02:00
} ,
Options : [ ] fs . Option { {
2018-01-12 17:30:54 +01:00
Name : config . ConfigClientID ,
2018-05-14 19:06:57 +02:00
Help : "Microsoft App Client Id\nLeave blank normally." ,
2015-10-04 23:08:31 +02:00
} , {
2018-01-12 17:30:54 +01:00
Name : config . ConfigClientSecret ,
2018-05-14 19:06:57 +02:00
Help : "Microsoft App Client Secret\nLeave blank normally." ,
} , {
2018-10-01 19:36:15 +02:00
Name : "chunk_size" ,
Help : ` Chunk size to upload files with - must be multiple of 320 k .
Above this size files will be chunked - must be multiple of 320 k . Note
that the chunks will be buffered into memory . ` ,
2018-09-07 13:02:27 +02:00
Default : defaultChunkSize ,
2018-05-14 19:06:57 +02:00
Advanced : true ,
2018-08-21 04:50:17 +02:00
} , {
Name : "drive_id" ,
Help : "The ID of the drive to use" ,
Default : "" ,
Advanced : true ,
} , {
Name : "drive_type" ,
Help : "The type of the drive ( personal | business | documentLibrary )" ,
Default : "" ,
Advanced : true ,
2018-10-03 06:46:25 +02:00
} , {
2018-10-01 19:36:15 +02:00
Name : "expose_onenote_files" ,
Help : ` Set to make OneNote files show up in directory listings .
By default rclone will hide OneNote files in directory listings because
operations like "Open" and "Update" won ' t work on them . But this
behaviour may also prevent you from deleting them . If you want to
delete OneNote files or otherwise want them to show up in directory
listing , set this option . ` ,
2018-10-03 06:46:25 +02:00
Default : false ,
Advanced : true ,
2015-10-04 23:08:31 +02:00
} } ,
} )
2018-05-14 19:06:57 +02:00
}
2017-08-03 21:57:42 +02:00
2018-05-14 19:06:57 +02:00
// Options defines the configuration for this backend
type Options struct {
2018-10-03 06:46:25 +02:00
ChunkSize fs . SizeSuffix ` config:"chunk_size" `
DriveID string ` config:"drive_id" `
DriveType string ` config:"drive_type" `
ExposeOneNoteFiles bool ` config:"expose_onenote_files" `
2015-10-04 23:08:31 +02:00
}
// Fs represents a remote one drive
type Fs struct {
2017-01-29 21:42:43 +01:00
name string // name of this remote
root string // the path we are working on
2018-05-14 19:06:57 +02:00
opt Options // parsed options
2017-01-29 21:42:43 +01:00
features * fs . Features // optional features
srv * rest . Client // the connection to the one drive server
dirCache * dircache . DirCache // Map of directory path to directory id
pacer * pacer . Pacer // pacer for API calls
tokenRenewer * oauthutil . Renew // renew the token on expiry
2018-08-18 12:06:22 +02:00
driveID string // ID to use for querying Microsoft Graph
driveType string // https://developer.microsoft.com/en-us/graph/docs/api-reference/v1.0/resources/drive
2015-10-04 23:08:31 +02:00
}
// Object describes a one drive object
//
// Will definitely have info but maybe not meta
type Object struct {
2018-10-03 06:46:25 +02:00
fs * Fs // what this object is part of
remote string // The remote path
hasMetaData bool // whether info below has been set
isOneNoteFile bool // Whether the object is a OneNote file
size int64 // size of the object
modTime time . Time // modification time of the object
id string // ID of the object
sha1 string // SHA-1 of the object content
quickxorhash string // QuickXorHash of the object content
mimeType string // Content-Type of object from server (may not be as uploaded)
2015-10-04 23:08:31 +02:00
}
// ------------------------------------------------------------
// Name of the remote (as passed into NewFs)
func ( f * Fs ) Name ( ) string {
return f . name
}
// Root of the remote (as passed into NewFs)
func ( f * Fs ) Root ( ) string {
return f . root
}
// String converts this Fs to a string
func ( f * Fs ) String ( ) string {
return fmt . Sprintf ( "One drive root '%s'" , f . root )
}
2017-01-13 18:21:47 +01:00
// Features returns the optional features of this Fs
func ( f * Fs ) Features ( ) * fs . Features {
return f . features
}
2015-10-04 23:08:31 +02:00
// parsePath parses an one drive 'url'
func parsePath ( path string ) ( root string ) {
root = strings . Trim ( path , "/" )
return
}
// retryErrorCodes is a slice of error codes that we will retry
var retryErrorCodes = [ ] int {
429 , // Too Many Requests.
500 , // Internal Server Error
502 , // Bad Gateway
503 , // Service Unavailable
504 , // Gateway Timeout
509 , // Bandwidth Limit Exceeded
}
// shouldRetry returns a boolean as to whether this resp and err
// deserve to be retried. It returns the err as a convenience
func shouldRetry ( resp * http . Response , err error ) ( bool , error ) {
2017-03-23 14:10:43 +01:00
authRety := false
if resp != nil && resp . StatusCode == 401 && len ( resp . Header [ "Www-Authenticate" ] ) == 1 && strings . Index ( resp . Header [ "Www-Authenticate" ] [ 0 ] , "expired_token" ) >= 0 {
authRety = true
fs . Debugf ( nil , "Should retry: %v" , err )
}
2018-01-12 17:30:54 +01:00
return authRety || fserrors . ShouldRetry ( err ) || fserrors . ShouldRetryHTTP ( resp , retryErrorCodes ) , err
2015-10-04 23:08:31 +02:00
}
// readMetaDataForPath reads the metadata from the path
func ( f * Fs ) readMetaDataForPath ( path string ) ( info * api . Item , resp * http . Response , err error ) {
2018-08-18 12:06:22 +02:00
var opts rest . Opts
if len ( path ) == 0 {
opts = rest . Opts {
Method : "GET" ,
Path : "/root" ,
}
} else {
opts = rest . Opts {
Method : "GET" ,
Path : "/root:/" + rest . URLPathEscape ( replaceReservedChars ( path ) ) ,
}
2015-10-04 23:08:31 +02:00
}
err = f . pacer . Call ( func ( ) ( bool , error ) {
resp , err = f . srv . CallJSON ( & opts , nil , & info )
return shouldRetry ( resp , err )
} )
2018-07-11 19:48:59 +02:00
2015-10-04 23:08:31 +02:00
return info , resp , err
}
2015-11-27 13:46:13 +01:00
// errorHandler parses a non 2xx error response into an error
func errorHandler ( resp * http . Response ) error {
// Decode error response
errResponse := new ( api . Error )
err := rest . DecodeJSON ( resp , & errResponse )
if err != nil {
2017-02-09 12:01:20 +01:00
fs . Debugf ( nil , "Couldn't decode error response: %v" , err )
2015-11-27 13:46:13 +01:00
}
if errResponse . ErrorInfo . Code == "" {
errResponse . ErrorInfo . Code = resp . Status
}
return errResponse
}
2018-09-07 13:02:27 +02:00
func checkUploadChunkSize ( cs fs . SizeSuffix ) error {
const minChunkSize = fs . Byte
if cs % chunkSizeMultiple != 0 {
return errors . Errorf ( "%s is not a multiple of %s" , cs , chunkSizeMultiple )
}
if cs < minChunkSize {
return errors . Errorf ( "%s is less than %s" , cs , minChunkSize )
}
return nil
}
func ( f * Fs ) setUploadChunkSize ( cs fs . SizeSuffix ) ( old fs . SizeSuffix , err error ) {
err = checkUploadChunkSize ( cs )
if err == nil {
old , f . opt . ChunkSize = f . opt . ChunkSize , cs
}
return
}
2015-10-04 23:08:31 +02:00
// NewFs constructs an Fs from the path, container:path
2018-05-14 19:06:57 +02:00
func NewFs ( name , root string , m configmap . Mapper ) ( fs . Fs , error ) {
// Parse config into Options struct
opt := new ( Options )
err := configstruct . Set ( m , opt )
if err != nil {
return nil , err
}
2018-09-07 13:02:27 +02:00
err = checkUploadChunkSize ( opt . ChunkSize )
if err != nil {
return nil , errors . Wrap ( err , "onedrive: chunk size" )
2018-05-14 19:06:57 +02:00
}
2017-08-03 21:57:42 +02:00
2018-08-21 04:50:17 +02:00
if opt . DriveID == "" || opt . DriveType == "" {
2018-11-11 11:40:13 +01:00
return nil , errors . New ( "unable to get drive_id and drive_type - if you are upgrading from older versions of rclone, please run `rclone config` and re-configure this backend" )
2018-08-21 04:50:17 +02:00
}
2018-08-18 12:06:22 +02:00
2015-10-04 23:08:31 +02:00
root = parsePath ( root )
2018-05-14 19:06:57 +02:00
oAuthClient , ts , err := oauthutil . NewClient ( name , m , oauthConfig )
2015-10-04 23:08:31 +02:00
if err != nil {
2018-11-11 11:40:13 +01:00
return nil , errors . Wrap ( err , "failed to configure OneDrive" )
2015-10-04 23:08:31 +02:00
}
f := & Fs {
2018-08-18 12:06:22 +02:00
name : name ,
root : root ,
opt : * opt ,
2018-08-21 04:50:17 +02:00
driveID : opt . DriveID ,
driveType : opt . DriveType ,
srv : rest . NewClient ( oAuthClient ) . SetRoot ( graphURL + "/drives/" + opt . DriveID ) ,
2018-08-18 12:06:22 +02:00
pacer : pacer . New ( ) . SetMinSleep ( minSleep ) . SetMaxSleep ( maxSleep ) . SetDecayConstant ( decayConstant ) ,
2015-10-04 23:08:31 +02:00
}
2017-08-09 16:27:43 +02:00
f . features = ( & fs . Features {
2018-08-18 12:06:22 +02:00
CaseInsensitive : true ,
ReadMimeType : true ,
2017-08-09 16:27:43 +02:00
CanHaveEmptyDirectories : true ,
} ) . Fill ( f )
2015-11-27 13:46:13 +01:00
f . srv . SetErrorHandler ( errorHandler )
2015-10-04 23:08:31 +02:00
2017-01-29 21:42:43 +01:00
// Renew the token in the background
f . tokenRenewer = oauthutil . NewRenew ( f . String ( ) , ts , func ( ) error {
_ , _ , err := f . readMetaDataForPath ( "" )
return err
} )
2017-06-27 12:34:32 +02:00
// Get rootID
rootInfo , _ , err := f . readMetaDataForPath ( "" )
if err != nil || rootInfo . ID == "" {
return nil , errors . Wrap ( err , "failed to get root" )
}
2015-10-04 23:08:31 +02:00
f . dirCache = dircache . New ( root , rootInfo . ID , f )
// Find the current root
err = f . dirCache . FindRoot ( false )
if err != nil {
// Assume it is a file
newRoot , remote := dircache . SplitPath ( root )
2018-10-14 15:41:26 +02:00
tempF := * f
tempF . dirCache = dircache . New ( newRoot , rootInfo . ID , & tempF )
tempF . root = newRoot
2015-10-04 23:08:31 +02:00
// Make new Fs which is the parent
2018-10-14 15:41:26 +02:00
err = tempF . dirCache . FindRoot ( false )
2015-10-04 23:08:31 +02:00
if err != nil {
// No root so return old f
return f , nil
}
2018-10-14 15:41:26 +02:00
_ , err := tempF . newObjectWithInfo ( remote , nil )
2016-06-25 22:23:20 +02:00
if err != nil {
if err == fs . ErrorObjectNotFound {
// File doesn't exist so return old f
return f , nil
}
return nil , err
2015-10-04 23:08:31 +02:00
}
2018-10-14 15:41:26 +02:00
// XXX: update the old f here instead of returning tempF, since
// `features` were already filled with functions having *f as a receiver.
// See https://github.com/ncw/rclone/issues/2182
f . dirCache = tempF . dirCache
f . root = tempF . root
2016-06-21 19:01:53 +02:00
// return an error with an fs which points to the parent
2018-10-14 15:41:26 +02:00
return f , fs . ErrorIsFile
2015-10-04 23:08:31 +02:00
}
return f , nil
}
// rootSlash returns root with a slash on if it is empty, otherwise empty string
func ( f * Fs ) rootSlash ( ) string {
if f . root == "" {
return f . root
}
return f . root + "/"
}
// Return an Object from a path
//
2016-06-25 22:23:20 +02:00
// If it can't be found it returns the error fs.ErrorObjectNotFound.
func ( f * Fs ) newObjectWithInfo ( remote string , info * api . Item ) ( fs . Object , error ) {
2015-10-04 23:08:31 +02:00
o := & Object {
fs : f ,
remote : remote ,
}
2017-03-06 21:11:54 +01:00
var err error
2015-10-04 23:08:31 +02:00
if info != nil {
// Set info
2017-03-06 21:11:54 +01:00
err = o . setMetaData ( info )
2015-10-04 23:08:31 +02:00
} else {
2017-03-06 21:11:54 +01:00
err = o . readMetaData ( ) // reads info and meta, returning an error
}
if err != nil {
return nil , err
2015-10-04 23:08:31 +02:00
}
2016-06-25 22:23:20 +02:00
return o , nil
2015-10-04 23:08:31 +02:00
}
2016-06-25 22:23:20 +02:00
// NewObject finds the Object at remote. If it can't be found
// it returns the error fs.ErrorObjectNotFound.
func ( f * Fs ) NewObject ( remote string ) ( fs . Object , error ) {
2015-10-04 23:08:31 +02:00
return f . newObjectWithInfo ( remote , nil )
}
// FindLeaf finds a directory of name leaf in the folder with ID pathID
func ( f * Fs ) FindLeaf ( pathID , leaf string ) ( pathIDOut string , found bool , err error ) {
2017-02-09 12:01:20 +01:00
// fs.Debugf(f, "FindLeaf(%q, %q)", pathID, leaf)
2015-10-04 23:08:31 +02:00
parent , ok := f . dirCache . GetInv ( pathID )
if ! ok {
2016-06-12 16:06:02 +02:00
return "" , false , errors . New ( "couldn't find parent ID" )
2015-10-04 23:08:31 +02:00
}
path := leaf
if parent != "" {
path = parent + "/" + path
}
if f . dirCache . FoundRoot ( ) {
path = f . rootSlash ( ) + path
}
info , resp , err := f . readMetaDataForPath ( path )
if err != nil {
if resp != nil && resp . StatusCode == http . StatusNotFound {
return "" , false , nil
}
return "" , false , err
}
2018-10-03 06:46:25 +02:00
if info . GetPackageType ( ) == api . PackageTypeOneNote {
return "" , false , errors . New ( "found OneNote file when looking for folder" )
}
2018-07-11 19:48:59 +02:00
if info . GetFolder ( ) == nil {
2016-06-12 16:06:02 +02:00
return "" , false , errors . New ( "found file when looking for folder" )
2015-10-04 23:08:31 +02:00
}
2018-07-11 19:48:59 +02:00
return info . GetID ( ) , true , nil
2015-10-04 23:08:31 +02:00
}
// CreateDir makes a directory with pathID as parent and name leaf
2018-07-11 19:48:59 +02:00
func ( f * Fs ) CreateDir ( dirID , leaf string ) ( newID string , err error ) {
// fs.Debugf(f, "CreateDir(%q, %q)\n", dirID, leaf)
2015-10-04 23:08:31 +02:00
var resp * http . Response
var info * api . Item
2018-07-11 19:48:59 +02:00
opts := newOptsCall ( dirID , "POST" , "/children" )
2015-10-04 23:08:31 +02:00
mkdir := api . CreateItemRequest {
2015-10-30 09:40:14 +01:00
Name : replaceReservedChars ( leaf ) ,
2015-10-04 23:08:31 +02:00
ConflictBehavior : "fail" ,
}
err = f . pacer . Call ( func ( ) ( bool , error ) {
resp , err = f . srv . CallJSON ( & opts , & mkdir , & info )
return shouldRetry ( resp , err )
} )
if err != nil {
//fmt.Printf("...Error %v\n", err)
return "" , err
}
2018-07-11 19:48:59 +02:00
2015-10-04 23:08:31 +02:00
//fmt.Printf("...Id %q\n", *info.Id)
2018-07-11 19:48:59 +02:00
return info . GetID ( ) , nil
2015-10-04 23:08:31 +02:00
}
// list the objects into the function supplied
//
// If directories is set it only sends directories
// User function to process a File item from listAll
//
// Should return true to finish processing
type listAllFn func ( * api . Item ) bool
// Lists the directory required calling the user function on each item found
//
// If the user fn ever returns true then it early exits with found = true
func ( f * Fs ) listAll ( dirID string , directoriesOnly bool , filesOnly bool , fn listAllFn ) ( found bool , err error ) {
// Top parameter asks for bigger pages of data
// https://dev.onedrive.com/odata/optional-query-parameters.htm
2018-08-18 12:06:22 +02:00
opts := newOptsCall ( dirID , "GET" , "/children?$top=1000" )
2015-10-04 23:08:31 +02:00
OUTER :
for {
var result api . ListChildrenResponse
var resp * http . Response
err = f . pacer . Call ( func ( ) ( bool , error ) {
resp , err = f . srv . CallJSON ( & opts , nil , & result )
return shouldRetry ( resp , err )
} )
if err != nil {
2016-05-30 20:49:21 +02:00
return found , errors . Wrap ( err , "couldn't list files" )
2015-10-04 23:08:31 +02:00
}
if len ( result . Value ) == 0 {
break
}
for i := range result . Value {
item := & result . Value [ i ]
2018-07-11 19:48:59 +02:00
isFolder := item . GetFolder ( ) != nil
2015-10-04 23:08:31 +02:00
if isFolder {
if filesOnly {
continue
}
} else {
if directoriesOnly {
continue
}
}
if item . Deleted != nil {
continue
}
2018-07-11 19:48:59 +02:00
item . Name = restoreReservedChars ( item . GetName ( ) )
2015-10-04 23:08:31 +02:00
if fn ( item ) {
found = true
break OUTER
}
}
if result . NextLink == "" {
break
}
2017-07-07 09:18:13 +02:00
opts . Path = ""
opts . RootURL = result . NextLink
2015-10-04 23:08:31 +02:00
}
return
}
2017-06-11 23:43:31 +02:00
// List the objects and directories in dir into entries. The
// entries can be returned in any order but should be for a
// complete directory.
//
// dir should be "" to list the root, and should not have
// trailing slashes.
//
// This should return ErrDirNotFound if the directory isn't
// found.
func ( f * Fs ) List ( dir string ) ( entries fs . DirEntries , err error ) {
err = f . dirCache . FindRoot ( false )
if err != nil {
return nil , err
}
directoryID , err := f . dirCache . FindDir ( dir , false )
if err != nil {
return nil , err
}
var iErr error
_ , err = f . listAll ( directoryID , false , false , func ( info * api . Item ) bool {
2018-10-03 06:46:25 +02:00
if ! f . opt . ExposeOneNoteFiles && info . GetPackageType ( ) == api . PackageTypeOneNote {
fs . Debugf ( info . Name , "OneNote file not shown in directory listing" )
return false
}
2018-07-11 19:48:59 +02:00
remote := path . Join ( dir , info . GetName ( ) )
folder := info . GetFolder ( )
if folder != nil {
2017-06-11 23:43:31 +02:00
// cache the directory ID for later lookups
2018-07-11 19:48:59 +02:00
id := info . GetID ( )
f . dirCache . Put ( remote , id )
d := fs . NewDir ( remote , time . Time ( info . GetLastModifiedDateTime ( ) ) ) . SetID ( id )
if folder != nil {
d . SetItems ( folder . ChildCount )
2016-04-21 21:06:21 +02:00
}
2017-06-11 23:43:31 +02:00
entries = append ( entries , d )
2015-10-04 23:08:31 +02:00
} else {
2016-06-25 22:23:20 +02:00
o , err := f . newObjectWithInfo ( remote , info )
if err != nil {
2017-06-11 23:43:31 +02:00
iErr = err
2016-06-25 22:23:20 +02:00
return true
2015-10-04 23:08:31 +02:00
}
2017-06-11 23:43:31 +02:00
entries = append ( entries , o )
2015-10-04 23:08:31 +02:00
}
return false
} )
2017-06-11 23:43:31 +02:00
if err != nil {
return nil , err
}
if iErr != nil {
return nil , iErr
}
return entries , nil
2015-10-04 23:08:31 +02:00
}
2015-10-30 09:40:14 +01:00
// Creates from the parameters passed in a half finished Object which
// must have setMetaData called on it
2015-10-04 23:08:31 +02:00
//
2015-10-30 09:40:14 +01:00
// Returns the object, leaf, directoryID and error
2015-10-04 23:08:31 +02:00
//
2015-10-30 09:40:14 +01:00
// Used to create new objects
func ( f * Fs ) createObject ( remote string , modTime time . Time , size int64 ) ( o * Object , leaf string , directoryID string , err error ) {
2015-10-04 23:08:31 +02:00
// Create the directory for the object if it doesn't exist
2017-03-15 21:55:05 +01:00
leaf , directoryID , err = f . dirCache . FindRootAndPath ( remote , true )
2015-10-04 23:08:31 +02:00
if err != nil {
2015-10-30 09:40:14 +01:00
return nil , leaf , directoryID , err
2015-10-04 23:08:31 +02:00
}
// Temporary Object under construction
2015-10-30 09:40:14 +01:00
o = & Object {
2015-10-04 23:08:31 +02:00
fs : f ,
remote : remote ,
}
2015-10-30 09:40:14 +01:00
return o , leaf , directoryID , nil
}
// Put the object into the container
//
// Copy the reader in to the new object which is returned
//
// The new object may have been created if an error is returned
2017-05-28 13:44:22 +02:00
func ( f * Fs ) Put ( in io . Reader , src fs . ObjectInfo , options ... fs . OpenOption ) ( fs . Object , error ) {
2016-02-18 12:35:25 +01:00
remote := src . Remote ( )
size := src . Size ( )
modTime := src . ModTime ( )
2015-10-30 09:40:14 +01:00
o , _ , _ , err := f . createObject ( remote , modTime , size )
if err != nil {
return nil , err
}
2017-05-28 13:44:22 +02:00
return o , o . Update ( in , src , options ... )
2015-10-04 23:08:31 +02:00
}
// Mkdir creates the container if it doesn't exist
2016-11-25 22:52:43 +01:00
func ( f * Fs ) Mkdir ( dir string ) error {
err := f . dirCache . FindRoot ( true )
if err != nil {
return err
}
if dir != "" {
_ , err = f . dirCache . FindDir ( dir , true )
}
return err
2015-10-04 23:08:31 +02:00
}
// deleteObject removes an object by ID
func ( f * Fs ) deleteObject ( id string ) error {
2018-07-11 19:48:59 +02:00
opts := newOptsCall ( id , "DELETE" , "" )
opts . NoResponse = true
2015-10-04 23:08:31 +02:00
return f . pacer . Call ( func ( ) ( bool , error ) {
resp , err := f . srv . Call ( & opts )
return shouldRetry ( resp , err )
} )
}
// purgeCheck removes the root directory, if check is set then it
// refuses to do so if it has anything in
2016-11-25 22:52:43 +01:00
func ( f * Fs ) purgeCheck ( dir string , check bool ) error {
root := path . Join ( f . root , dir )
if root == "" {
2016-06-12 16:06:02 +02:00
return errors . New ( "can't purge root directory" )
2015-10-04 23:08:31 +02:00
}
dc := f . dirCache
2017-01-15 13:18:07 +01:00
err := dc . FindRoot ( false )
if err != nil {
return err
}
2016-11-25 22:52:43 +01:00
rootID , err := dc . FindDir ( dir , false )
2015-10-04 23:08:31 +02:00
if err != nil {
return err
}
2018-09-01 00:07:12 +02:00
if check {
// check to see if there are any items
found , err := f . listAll ( rootID , false , false , func ( item * api . Item ) bool {
return true
} )
if err != nil {
return err
}
if found {
return fs . ErrorDirectoryNotEmpty
}
2015-10-04 23:08:31 +02:00
}
err = f . deleteObject ( rootID )
if err != nil {
return err
}
2016-11-25 22:52:43 +01:00
f . dirCache . FlushDir ( dir )
2015-10-04 23:08:31 +02:00
if err != nil {
return err
}
return nil
}
// Rmdir deletes the root folder
//
// Returns an error if it isn't empty
2016-11-25 22:52:43 +01:00
func ( f * Fs ) Rmdir ( dir string ) error {
return f . purgeCheck ( dir , true )
2015-10-04 23:08:31 +02:00
}
// Precision return the precision of this Fs
func ( f * Fs ) Precision ( ) time . Duration {
return time . Second
}
2015-10-30 09:40:14 +01:00
// waitForJob waits for the job with status in url to complete
func ( f * Fs ) waitForJob ( location string , o * Object ) error {
deadline := time . Now ( ) . Add ( fs . Config . Timeout )
for time . Now ( ) . Before ( deadline ) {
var resp * http . Response
var err error
2017-03-12 13:00:10 +01:00
var body [ ] byte
2015-10-30 09:40:14 +01:00
err = f . pacer . Call ( func ( ) ( bool , error ) {
2018-08-18 12:06:22 +02:00
resp , err = http . Get ( location )
2017-03-12 13:00:10 +01:00
if err != nil {
2018-01-12 17:30:54 +01:00
return fserrors . ShouldRetry ( err ) , err
2017-03-12 13:00:10 +01:00
}
body , err = rest . ReadBody ( resp )
2018-01-12 17:30:54 +01:00
return fserrors . ShouldRetry ( err ) , err
2015-10-30 09:40:14 +01:00
} )
if err != nil {
return err
}
2017-03-12 13:00:10 +01:00
// Try to decode the body first as an api.AsyncOperationStatus
var status api . AsyncOperationStatus
err = json . Unmarshal ( body , & status )
if err != nil {
return errors . Wrapf ( err , "async status result not JSON: %q" , body )
}
2018-08-18 12:06:22 +02:00
switch status . Status {
case "failed" :
case "deleteFailed" :
{
return errors . Errorf ( "%s: async operation returned %q" , o . remote , status . Status )
2015-10-30 09:40:14 +01:00
}
2018-08-18 12:06:22 +02:00
case "completed" :
err = o . readMetaData ( )
return errors . Wrapf ( err , "async operation completed but readMetaData failed" )
2015-10-30 09:40:14 +01:00
}
2018-08-18 12:06:22 +02:00
2015-10-30 09:40:14 +01:00
time . Sleep ( 1 * time . Second )
}
2016-06-12 16:06:02 +02:00
return errors . Errorf ( "async operation didn't complete after %v" , fs . Config . Timeout )
2015-10-30 09:40:14 +01:00
}
2015-10-04 23:08:31 +02:00
// Copy src to this remote using server side copy operations.
//
// This is stored with the remote path given
//
// It returns the destination Object and a possible error
//
// Will only be called if src.Fs().Name() == f.Name()
//
// If it isn't possible then return fs.ErrorCantCopy
2015-11-22 12:04:16 +01:00
func ( f * Fs ) Copy ( src fs . Object , remote string ) ( fs . Object , error ) {
2015-10-30 09:40:14 +01:00
srcObj , ok := src . ( * Object )
if ! ok {
2017-02-09 12:01:20 +01:00
fs . Debugf ( src , "Can't copy - not same remote type" )
2015-10-30 09:40:14 +01:00
return nil , fs . ErrorCantCopy
}
err := srcObj . readMetaData ( )
if err != nil {
return nil , err
}
2017-03-11 23:22:13 +01:00
srcPath := srcObj . fs . rootSlash ( ) + srcObj . remote
dstPath := f . rootSlash ( ) + remote
if strings . ToLower ( srcPath ) == strings . ToLower ( dstPath ) {
return nil , errors . Errorf ( "can't copy %q -> %q as are same name when lowercase" , srcPath , dstPath )
2017-02-22 20:28:22 +01:00
}
2015-10-30 09:40:14 +01:00
// Create temporary object
dstObj , leaf , directoryID , err := f . createObject ( remote , srcObj . modTime , srcObj . size )
if err != nil {
return nil , err
}
// Copy the object
2018-08-18 12:06:22 +02:00
opts := newOptsCall ( srcObj . id , "POST" , "/copy" )
2018-07-11 19:48:59 +02:00
opts . ExtraHeaders = map [ string ] string { "Prefer" : "respond-async" }
opts . NoResponse = true
id , _ , _ := parseDirID ( directoryID )
2015-10-30 09:40:14 +01:00
replacedLeaf := replaceReservedChars ( leaf )
2018-08-04 12:16:43 +02:00
copyReq := api . CopyItemRequest {
2015-10-30 09:40:14 +01:00
Name : & replacedLeaf ,
ParentReference : api . ItemReference {
2018-08-18 12:06:22 +02:00
DriveID : f . driveID ,
ID : id ,
2015-10-30 09:40:14 +01:00
} ,
}
var resp * http . Response
err = f . pacer . Call ( func ( ) ( bool , error ) {
2018-08-04 12:16:43 +02:00
resp , err = f . srv . CallJSON ( & opts , & copyReq , nil )
2015-10-30 09:40:14 +01:00
return shouldRetry ( resp , err )
} )
if err != nil {
return nil , err
}
// read location header
location := resp . Header . Get ( "Location" )
if location == "" {
2016-06-12 16:06:02 +02:00
return nil , errors . New ( "didn't receive location header in copy response" )
2015-10-30 09:40:14 +01:00
}
// Wait for job to finish
err = f . waitForJob ( location , dstObj )
if err != nil {
return nil , err
}
2018-03-15 08:06:17 +01:00
2018-03-15 08:06:17 +01:00
// Copy does NOT copy the modTime from the source and there seems to
// be no way to set date before
// This will create TWO versions on OneDrive
2018-03-15 08:06:17 +01:00
err = dstObj . SetModTime ( srcObj . ModTime ( ) )
if err != nil {
return nil , err
}
2015-10-30 09:40:14 +01:00
return dstObj , nil
}
2015-10-04 23:08:31 +02:00
// Purge deletes all the files and the container
//
// Optional interface: Only implement this if you have a way of
// deleting all the files quicker than just running Remove() on the
// result of List()
func ( f * Fs ) Purge ( ) error {
2016-11-25 22:52:43 +01:00
return f . purgeCheck ( "" , false )
2015-10-04 23:08:31 +02:00
}
2017-03-14 16:35:10 +01:00
// Move src to this remote using server side move operations.
//
// This is stored with the remote path given
//
// It returns the destination Object and a possible error
//
// Will only be called if src.Fs().Name() == f.Name()
//
// If it isn't possible then return fs.ErrorCantMove
func ( f * Fs ) Move ( src fs . Object , remote string ) ( fs . Object , error ) {
srcObj , ok := src . ( * Object )
if ! ok {
fs . Debugf ( src , "Can't move - not same remote type" )
return nil , fs . ErrorCantMove
}
// Create temporary object
dstObj , leaf , directoryID , err := f . createObject ( remote , srcObj . modTime , srcObj . size )
if err != nil {
return nil , err
}
// Move the object
2018-07-11 19:48:59 +02:00
opts := newOptsCall ( srcObj . id , "PATCH" , "" )
id , _ , _ := parseDirID ( directoryID )
2017-03-14 16:35:10 +01:00
move := api . MoveItemRequest {
Name : replaceReservedChars ( leaf ) ,
ParentReference : & api . ItemReference {
2018-07-11 19:48:59 +02:00
ID : id ,
2017-03-14 16:35:10 +01:00
} ,
// We set the mod time too as it gets reset otherwise
FileSystemInfo : & api . FileSystemInfoFacet {
CreatedDateTime : api . Timestamp ( srcObj . modTime ) ,
LastModifiedDateTime : api . Timestamp ( srcObj . modTime ) ,
} ,
}
var resp * http . Response
var info api . Item
err = f . pacer . Call ( func ( ) ( bool , error ) {
resp , err = f . srv . CallJSON ( & opts , & move , & info )
return shouldRetry ( resp , err )
} )
if err != nil {
return nil , err
}
err = dstObj . setMetaData ( & info )
if err != nil {
return nil , err
}
return dstObj , nil
}
2018-08-02 18:13:37 +02:00
// DirMove moves src, srcRemote to this remote at dstRemote
// using server side move operations.
//
// Will only be called if src.Fs().Name() == f.Name()
//
// If it isn't possible then return fs.ErrorCantDirMove
//
// If destination exists then return fs.ErrorDirExists
func ( f * Fs ) DirMove ( src fs . Fs , srcRemote , dstRemote string ) error {
srcFs , ok := src . ( * Fs )
if ! ok {
fs . Debugf ( srcFs , "Can't move directory - not same remote type" )
return fs . ErrorCantDirMove
}
srcPath := path . Join ( srcFs . root , srcRemote )
dstPath := path . Join ( f . root , dstRemote )
// Refuse to move to or from the root
if srcPath == "" || dstPath == "" {
fs . Debugf ( src , "DirMove error: Can't move root" )
return errors . New ( "can't move root directory" )
}
// find the root src directory
err := srcFs . dirCache . FindRoot ( false )
if err != nil {
return err
}
// find the root dst directory
if dstRemote != "" {
err = f . dirCache . FindRoot ( true )
if err != nil {
return err
}
} else {
if f . dirCache . FoundRoot ( ) {
return fs . ErrorDirExists
}
}
// Find ID of dst parent, creating subdirs if necessary
var leaf , dstDirectoryID string
findPath := dstRemote
if dstRemote == "" {
findPath = f . root
}
leaf , dstDirectoryID , err = f . dirCache . FindPath ( findPath , true )
if err != nil {
return err
}
parsedDstDirID , _ , _ := parseDirID ( dstDirectoryID )
// Check destination does not exist
if dstRemote != "" {
_ , err = f . dirCache . FindDir ( dstRemote , false )
if err == fs . ErrorDirNotFound {
// OK
} else if err != nil {
return err
} else {
return fs . ErrorDirExists
}
}
// Find ID of src
srcID , err := srcFs . dirCache . FindDir ( srcRemote , false )
if err != nil {
return err
}
// Get timestamps of src so they can be preserved
srcInfo , _ , err := srcFs . readMetaDataForPath ( srcPath )
if err != nil {
return err
}
// Do the move
opts := newOptsCall ( srcID , "PATCH" , "" )
move := api . MoveItemRequest {
Name : replaceReservedChars ( leaf ) ,
ParentReference : & api . ItemReference {
ID : parsedDstDirID ,
} ,
// We set the mod time too as it gets reset otherwise
FileSystemInfo : & api . FileSystemInfoFacet {
CreatedDateTime : srcInfo . CreatedDateTime ,
LastModifiedDateTime : srcInfo . LastModifiedDateTime ,
} ,
}
var resp * http . Response
var info api . Item
err = f . pacer . Call ( func ( ) ( bool , error ) {
resp , err = f . srv . CallJSON ( & opts , & move , & info )
return shouldRetry ( resp , err )
} )
if err != nil {
return err
}
srcFs . dirCache . FlushDir ( srcRemote )
return nil
}
2016-12-09 16:39:29 +01:00
// DirCacheFlush resets the directory cache - used in testing as an
// optional interface
func ( f * Fs ) DirCacheFlush ( ) {
f . dirCache . ResetRoot ( )
}
2018-04-16 23:19:25 +02:00
// About gets quota information
func ( f * Fs ) About ( ) ( usage * fs . Usage , err error ) {
var drive api . Drive
opts := rest . Opts {
Method : "GET" ,
Path : "" ,
}
var resp * http . Response
err = f . pacer . Call ( func ( ) ( bool , error ) {
resp , err = f . srv . CallJSON ( & opts , nil , & drive )
return shouldRetry ( resp , err )
} )
if err != nil {
return nil , errors . Wrap ( err , "about failed" )
}
q := drive . Quota
usage = & fs . Usage {
Total : fs . NewUsageValue ( q . Total ) , // quota of bytes that can be used
Used : fs . NewUsageValue ( q . Used ) , // bytes in use
Trashed : fs . NewUsageValue ( q . Deleted ) , // bytes in trash
Free : fs . NewUsageValue ( q . Remaining ) , // bytes which can be uploaded before reaching the quota
}
return usage , nil
}
2016-01-11 13:39:33 +01:00
// Hashes returns the supported hash sets.
2018-01-12 17:30:54 +01:00
func ( f * Fs ) Hashes ( ) hash . Set {
2018-08-18 12:06:22 +02:00
if f . driveType == driveTypePersonal {
return hash . Set ( hash . SHA1 )
2018-04-20 13:55:49 +02:00
}
2018-08-18 12:06:22 +02:00
return hash . Set ( hash . QuickXorHash )
2016-01-11 13:39:33 +01:00
}
2018-10-09 14:11:48 +02:00
// PublicLink returns a link for downloading without accout.
func ( f * Fs ) PublicLink ( remote string ) ( link string , err error ) {
2018-10-14 15:17:53 +02:00
info , _ , err := f . readMetaDataForPath ( f . srvPath ( remote ) )
2018-10-09 14:11:48 +02:00
if err != nil {
return "" , err
}
opts := newOptsCall ( info . ID , "POST" , "/createLink" )
share := api . CreateShareLinkRequest {
Type : "view" ,
Scope : "anonymous" ,
}
var resp * http . Response
var result api . CreateShareLinkResponse
err = f . pacer . Call ( func ( ) ( bool , error ) {
resp , err = f . srv . CallJSON ( & opts , & share , & result )
return shouldRetry ( resp , err )
} )
if err != nil {
fmt . Println ( err )
return "" , err
}
return result . Link . WebURL , nil
}
2015-10-04 23:08:31 +02:00
// ------------------------------------------------------------
// Fs returns the parent Fs
2016-02-18 12:35:25 +01:00
func ( o * Object ) Fs ( ) fs . Info {
2015-10-04 23:08:31 +02:00
return o . fs
}
// Return a string version
func ( o * Object ) String ( ) string {
if o == nil {
return "<nil>"
}
return o . remote
}
// Remote returns the remote path
func ( o * Object ) Remote ( ) string {
return o . remote
}
2018-10-14 15:17:53 +02:00
// srvPath returns a path for use in server given a remote
func ( f * Fs ) srvPath ( remote string ) string {
return replaceReservedChars ( f . rootSlash ( ) + remote )
}
2015-10-04 23:08:31 +02:00
// srvPath returns a path for use in server
func ( o * Object ) srvPath ( ) string {
2018-10-14 15:17:53 +02:00
return o . fs . srvPath ( o . remote )
2015-10-04 23:08:31 +02:00
}
2016-01-11 13:39:33 +01:00
// Hash returns the SHA-1 of an object returning a lowercase hex string
2018-01-12 17:30:54 +01:00
func ( o * Object ) Hash ( t hash . Type ) ( string , error ) {
2018-08-18 12:06:22 +02:00
if o . fs . driveType == driveTypePersonal {
if t == hash . SHA1 {
return o . sha1 , nil
}
} else {
if t == hash . QuickXorHash {
return o . quickxorhash , nil
2018-04-20 13:55:49 +02:00
}
2016-01-11 13:39:33 +01:00
}
2018-08-18 12:06:22 +02:00
return "" , hash . ErrUnsupported
2015-10-04 23:08:31 +02:00
}
// Size returns the size of an object in bytes
func ( o * Object ) Size ( ) int64 {
err := o . readMetaData ( )
if err != nil {
2017-02-09 12:01:20 +01:00
fs . Logf ( o , "Failed to read metadata: %v" , err )
2015-10-04 23:08:31 +02:00
return 0
}
return o . size
}
// setMetaData sets the metadata from info
2017-03-06 21:11:54 +01:00
func ( o * Object ) setMetaData ( info * api . Item ) ( err error ) {
2018-07-11 19:48:59 +02:00
if info . GetFolder ( ) != nil {
2017-03-06 21:11:54 +01:00
return errors . Wrapf ( fs . ErrorNotAFile , "%q" , o . remote )
}
2015-10-04 23:08:31 +02:00
o . hasMetaData = true
2018-07-11 19:48:59 +02:00
o . size = info . GetSize ( )
2016-01-11 13:39:33 +01:00
2018-10-03 06:46:25 +02:00
o . isOneNoteFile = info . GetPackageType ( ) == api . PackageTypeOneNote
2018-04-20 13:55:49 +02:00
// Docs: https://docs.microsoft.com/en-us/onedrive/developer/rest-api/resources/hashes
2016-01-17 11:45:17 +01:00
//
2018-04-20 13:55:49 +02:00
// We use SHA1 for onedrive personal and QuickXorHash for onedrive for business
2018-07-11 19:48:59 +02:00
file := info . GetFile ( )
if file != nil {
o . mimeType = file . MimeType
if file . Hashes . Sha1Hash != "" {
o . sha1 = strings . ToLower ( file . Hashes . Sha1Hash )
2016-09-21 23:13:24 +02:00
}
2018-07-11 19:48:59 +02:00
if file . Hashes . QuickXorHash != "" {
h , err := base64 . StdEncoding . DecodeString ( file . Hashes . QuickXorHash )
2018-04-20 13:55:49 +02:00
if err != nil {
2018-07-11 19:48:59 +02:00
fs . Errorf ( o , "Failed to decode QuickXorHash %q: %v" , file . Hashes . QuickXorHash , err )
2018-04-20 13:55:49 +02:00
} else {
o . quickxorhash = hex . EncodeToString ( h )
}
}
2016-01-11 13:39:33 +01:00
}
2018-07-11 19:48:59 +02:00
fileSystemInfo := info . GetFileSystemInfo ( )
if fileSystemInfo != nil {
o . modTime = time . Time ( fileSystemInfo . LastModifiedDateTime )
2015-10-04 23:08:31 +02:00
} else {
2018-07-11 19:48:59 +02:00
o . modTime = time . Time ( info . GetLastModifiedDateTime ( ) )
2015-10-04 23:08:31 +02:00
}
2018-07-11 19:48:59 +02:00
o . id = info . GetID ( )
2017-03-06 21:11:54 +01:00
return nil
2015-10-04 23:08:31 +02:00
}
// readMetaData gets the metadata if it hasn't already been fetched
//
// it also sets the info
func ( o * Object ) readMetaData ( ) ( err error ) {
if o . hasMetaData {
return nil
}
info , _ , err := o . fs . readMetaDataForPath ( o . srvPath ( ) )
if err != nil {
2016-06-25 22:23:20 +02:00
if apiErr , ok := err . ( * api . Error ) ; ok {
if apiErr . ErrorInfo . Code == "itemNotFound" {
return fs . ErrorObjectNotFound
}
}
2015-10-04 23:08:31 +02:00
return err
}
2017-03-06 21:11:54 +01:00
return o . setMetaData ( info )
2015-10-04 23:08:31 +02:00
}
// ModTime returns the modification time of the object
//
//
// It attempts to read the objects mtime and if that isn't present the
// LastModified returned in the http headers
func ( o * Object ) ModTime ( ) time . Time {
err := o . readMetaData ( )
if err != nil {
2017-02-09 12:01:20 +01:00
fs . Logf ( o , "Failed to read metadata: %v" , err )
2015-10-04 23:08:31 +02:00
return time . Now ( )
}
return o . modTime
}
// setModTime sets the modification time of the local fs object
func ( o * Object ) setModTime ( modTime time . Time ) ( * api . Item , error ) {
2018-07-11 19:48:59 +02:00
var opts rest . Opts
_ , directoryID , _ := o . fs . dirCache . FindPath ( o . remote , false )
_ , drive , rootURL := parseDirID ( directoryID )
if drive != "" {
opts = rest . Opts {
Method : "PATCH" ,
RootURL : rootURL ,
Path : "/" + drive + "/root:/" + rest . URLPathEscape ( o . srvPath ( ) ) ,
}
} else {
opts = rest . Opts {
Method : "PATCH" ,
Path : "/root:/" + rest . URLPathEscape ( o . srvPath ( ) ) ,
}
2015-10-04 23:08:31 +02:00
}
update := api . SetFileSystemInfo {
FileSystemInfo : api . FileSystemInfoFacet {
CreatedDateTime : api . Timestamp ( modTime ) ,
LastModifiedDateTime : api . Timestamp ( modTime ) ,
} ,
}
var info * api . Item
err := o . fs . pacer . Call ( func ( ) ( bool , error ) {
resp , err := o . fs . srv . CallJSON ( & opts , & update , & info )
return shouldRetry ( resp , err )
} )
return info , err
}
// SetModTime sets the modification time of the local fs object
2016-03-22 16:07:10 +01:00
func ( o * Object ) SetModTime ( modTime time . Time ) error {
2015-10-04 23:08:31 +02:00
info , err := o . setModTime ( modTime )
if err != nil {
2016-03-22 16:07:10 +01:00
return err
2015-10-04 23:08:31 +02:00
}
2017-03-06 21:11:54 +01:00
return o . setMetaData ( info )
2015-10-04 23:08:31 +02:00
}
// Storable returns a boolean showing whether this object storable
func ( o * Object ) Storable ( ) bool {
return true
}
// Open an object for read
2016-09-10 12:29:57 +02:00
func ( o * Object ) Open ( options ... fs . OpenOption ) ( in io . ReadCloser , err error ) {
2015-10-04 23:08:31 +02:00
if o . id == "" {
2016-06-12 16:06:02 +02:00
return nil , errors . New ( "can't download - no id" )
2015-10-04 23:08:31 +02:00
}
2018-10-03 06:46:25 +02:00
if o . isOneNoteFile {
return nil , errors . New ( "can't open a OneNote file" )
}
2018-01-22 18:05:00 +01:00
fs . FixRangeOption ( options , o . size )
2015-10-04 23:08:31 +02:00
var resp * http . Response
2018-07-11 19:48:59 +02:00
opts := newOptsCall ( o . id , "GET" , "/content" )
opts . Options = options
2015-10-04 23:08:31 +02:00
err = o . fs . pacer . Call ( func ( ) ( bool , error ) {
resp , err = o . fs . srv . Call ( & opts )
return shouldRetry ( resp , err )
} )
if err != nil {
return nil , err
}
2018-02-16 14:21:26 +01:00
if resp . StatusCode == http . StatusOK && resp . ContentLength > 0 && resp . Header . Get ( "Content-Range" ) == "" {
//Overwrite size with actual size since size readings from Onedrive is unreliable.
o . size = resp . ContentLength
}
2015-10-04 23:08:31 +02:00
return resp . Body , err
}
// createUploadSession creates an upload session for the object
2018-03-15 08:06:17 +01:00
func ( o * Object ) createUploadSession ( modTime time . Time ) ( response * api . CreateUploadResponse , err error ) {
2018-07-11 19:48:59 +02:00
leaf , directoryID , _ := o . fs . dirCache . FindPath ( o . remote , false )
id , drive , rootURL := parseDirID ( directoryID )
var opts rest . Opts
if drive != "" {
opts = rest . Opts {
Method : "POST" ,
RootURL : rootURL ,
2018-09-14 15:38:55 +02:00
Path : "/" + drive + "/items/" + id + ":/" + rest . URLPathEscape ( replaceReservedChars ( leaf ) ) + ":/createUploadSession" ,
2018-07-11 19:48:59 +02:00
}
} else {
opts = rest . Opts {
Method : "POST" ,
2018-08-18 12:06:22 +02:00
Path : "/root:/" + rest . URLPathEscape ( o . srvPath ( ) ) + ":/createUploadSession" ,
2018-07-11 19:48:59 +02:00
}
2015-10-04 23:08:31 +02:00
}
2018-03-15 08:06:17 +01:00
createRequest := api . CreateUploadRequest { }
2018-03-16 20:18:51 +01:00
createRequest . Item . FileSystemInfo . CreatedDateTime = api . Timestamp ( modTime )
2018-03-15 08:06:17 +01:00
createRequest . Item . FileSystemInfo . LastModifiedDateTime = api . Timestamp ( modTime )
2015-10-04 23:08:31 +02:00
var resp * http . Response
err = o . fs . pacer . Call ( func ( ) ( bool , error ) {
2018-03-15 08:06:17 +01:00
resp , err = o . fs . srv . CallJSON ( & opts , & createRequest , & response )
2018-10-03 06:46:25 +02:00
if apiErr , ok := err . ( * api . Error ) ; ok {
if apiErr . ErrorInfo . Code == "nameAlreadyExists" {
// Make the error more user-friendly
err = errors . New ( err . Error ( ) + " (is it a OneNote file?)" )
}
}
2015-10-04 23:08:31 +02:00
return shouldRetry ( resp , err )
} )
2018-03-15 08:06:17 +01:00
return response , err
2015-10-04 23:08:31 +02:00
}
// uploadFragment uploads a part
2018-03-17 10:46:06 +01:00
func ( o * Object ) uploadFragment ( url string , start int64 , totalSize int64 , chunk io . ReadSeeker , chunkSize int64 ) ( info * api . Item , err error ) {
2015-11-27 13:46:13 +01:00
opts := rest . Opts {
2015-10-04 23:08:31 +02:00
Method : "PUT" ,
2017-07-07 09:18:13 +02:00
RootURL : url ,
2017-04-05 18:19:26 +02:00
ContentLength : & chunkSize ,
ContentRange : fmt . Sprintf ( "bytes %d-%d/%d" , start , start + chunkSize - 1 , totalSize ) ,
Body : chunk ,
2015-10-04 23:08:31 +02:00
}
2018-03-15 08:06:17 +01:00
// var response api.UploadFragmentResponse
2015-10-04 23:08:31 +02:00
var resp * http . Response
err = o . fs . pacer . Call ( func ( ) ( bool , error ) {
2018-04-06 20:53:06 +02:00
_ , _ = chunk . Seek ( 0 , io . SeekStart )
2018-03-15 08:06:17 +01:00
resp , err = o . fs . srv . Call ( & opts )
2018-03-26 18:17:56 +02:00
if resp != nil {
defer fs . CheckClose ( resp . Body , & err )
}
2018-03-15 08:06:17 +01:00
retry , err := shouldRetry ( resp , err )
if ! retry && resp != nil {
if resp . StatusCode == 200 || resp . StatusCode == 201 {
// we are done :)
// read the item
2018-03-17 10:46:06 +01:00
info = & api . Item { }
2018-03-15 08:06:17 +01:00
return false , json . NewDecoder ( resp . Body ) . Decode ( info )
}
}
return retry , err
2015-10-04 23:08:31 +02:00
} )
2018-03-17 10:46:06 +01:00
return info , err
2015-10-04 23:08:31 +02:00
}
// cancelUploadSession cancels an upload session
func ( o * Object ) cancelUploadSession ( url string ) ( err error ) {
2015-11-27 13:46:13 +01:00
opts := rest . Opts {
2015-10-04 23:08:31 +02:00
Method : "DELETE" ,
2017-07-07 09:18:13 +02:00
RootURL : url ,
2015-10-04 23:08:31 +02:00
NoResponse : true ,
}
var resp * http . Response
err = o . fs . pacer . Call ( func ( ) ( bool , error ) {
resp , err = o . fs . srv . Call ( & opts )
return shouldRetry ( resp , err )
} )
return
}
// uploadMultipart uploads a file using multipart upload
2018-03-17 10:46:06 +01:00
func ( o * Object ) uploadMultipart ( in io . Reader , size int64 , modTime time . Time ) ( info * api . Item , err error ) {
2018-09-04 17:57:47 +02:00
if size <= 0 {
panic ( "size passed into uploadMultipart must be > 0" )
}
2015-10-04 23:08:31 +02:00
// Create upload session
2017-02-09 12:01:20 +01:00
fs . Debugf ( o , "Starting multipart upload" )
2018-03-15 08:06:17 +01:00
session , err := o . createUploadSession ( modTime )
2015-10-04 23:08:31 +02:00
if err != nil {
2018-03-17 10:46:06 +01:00
return nil , err
2015-10-04 23:08:31 +02:00
}
uploadURL := session . UploadURL
// Cancel the session if something went wrong
defer func ( ) {
if err != nil {
2017-03-23 14:10:43 +01:00
fs . Debugf ( o , "Cancelling multipart upload: %v" , err )
2015-10-04 23:08:31 +02:00
cancelErr := o . cancelUploadSession ( uploadURL )
if cancelErr != nil {
2017-02-09 12:01:20 +01:00
fs . Logf ( o , "Failed to cancel multipart upload: %v" , err )
2015-10-04 23:08:31 +02:00
}
}
} ( )
// Upload the chunks
remaining := size
position := int64 ( 0 )
for remaining > 0 {
2018-05-14 19:06:57 +02:00
n := int64 ( o . fs . opt . ChunkSize )
2015-10-04 23:08:31 +02:00
if remaining < n {
n = remaining
}
2018-01-12 17:30:54 +01:00
seg := readers . NewRepeatableReader ( io . LimitReader ( in , n ) )
2017-02-09 12:01:20 +01:00
fs . Debugf ( o , "Uploading segment %d/%d size %d" , position , size , n )
2018-03-17 10:46:06 +01:00
info , err = o . uploadFragment ( uploadURL , position , size , seg , n )
2015-10-04 23:08:31 +02:00
if err != nil {
2018-03-17 10:46:06 +01:00
return nil , err
2015-10-04 23:08:31 +02:00
}
remaining -= n
position += n
}
2018-03-17 10:46:06 +01:00
return info , nil
}
2018-09-04 17:57:47 +02:00
// Update the content of a remote file within 4MB size in one single request
// This function will set modtime after uploading, which will create a new version for the remote file
func ( o * Object ) uploadSinglepart ( in io . Reader , size int64 , modTime time . Time ) ( info * api . Item , err error ) {
2018-09-04 18:37:52 +02:00
if size < 0 || size > int64 ( fs . SizeSuffix ( 4 * 1024 * 1024 ) ) {
2018-09-04 17:57:47 +02:00
panic ( "size passed into uploadSinglepart must be >= 0 and <= 4MiB" )
}
fs . Debugf ( o , "Starting singlepart upload" )
var resp * http . Response
var opts rest . Opts
_ , directoryID , _ := o . fs . dirCache . FindPath ( o . remote , false )
_ , drive , rootURL := parseDirID ( directoryID )
if drive != "" {
opts = rest . Opts {
Method : "PUT" ,
RootURL : rootURL ,
Path : "/" + drive + "/root:/" + rest . URLPathEscape ( o . srvPath ( ) ) + ":/content" ,
ContentLength : & size ,
Body : in ,
}
} else {
opts = rest . Opts {
Method : "PUT" ,
Path : "/root:/" + rest . URLPathEscape ( o . srvPath ( ) ) + ":/content" ,
ContentLength : & size ,
Body : in ,
}
}
if size == 0 {
opts . Body = nil
}
err = o . fs . pacer . Call ( func ( ) ( bool , error ) {
resp , err = o . fs . srv . CallJSON ( & opts , nil , & info )
2018-10-03 06:46:25 +02:00
if apiErr , ok := err . ( * api . Error ) ; ok {
if apiErr . ErrorInfo . Code == "nameAlreadyExists" {
// Make the error more user-friendly
err = errors . New ( err . Error ( ) + " (is it a OneNote file?)" )
}
}
2018-09-04 17:57:47 +02:00
return shouldRetry ( resp , err )
} )
if err != nil {
return nil , err
}
err = o . setMetaData ( info )
if err != nil {
return nil , err
}
// Set the mod time now and read metadata
return o . setModTime ( modTime )
}
2015-10-04 23:08:31 +02:00
// Update the object with the contents of the io.Reader, modTime and size
//
// The new object may have been created if an error is returned
2017-05-28 13:44:22 +02:00
func ( o * Object ) Update ( in io . Reader , src fs . ObjectInfo , options ... fs . OpenOption ) ( err error ) {
2018-10-03 06:46:25 +02:00
if o . hasMetaData && o . isOneNoteFile {
return errors . New ( "can't upload content to a OneNote file" )
}
2017-01-29 21:42:43 +01:00
o . fs . tokenRenewer . Start ( )
defer o . fs . tokenRenewer . Stop ( )
2016-02-18 12:35:25 +01:00
size := src . Size ( )
modTime := src . ModTime ( )
2018-09-04 17:57:47 +02:00
var info * api . Item
if size > 0 {
info , err = o . uploadMultipart ( in , size , modTime )
} else if size == 0 {
info , err = o . uploadSinglepart ( in , size , modTime )
} else {
panic ( "src file size must be >= 0" )
}
2015-10-04 23:08:31 +02:00
if err != nil {
return err
}
2018-09-04 17:57:47 +02:00
2018-03-15 08:06:17 +01:00
return o . setMetaData ( info )
2015-10-04 23:08:31 +02:00
}
// Remove an object
func ( o * Object ) Remove ( ) error {
return o . fs . deleteObject ( o . id )
}
2016-09-21 23:13:24 +02:00
// MimeType of an Object if known, "" otherwise
func ( o * Object ) MimeType ( ) string {
return o . mimeType
}
2018-05-13 10:16:56 +02:00
// ID returns the ID of the Object if known, or "" if not
func ( o * Object ) ID ( ) string {
return o . id
}
2018-07-11 19:48:59 +02:00
func newOptsCall ( id string , method string , route string ) ( opts rest . Opts ) {
id , drive , rootURL := parseDirID ( id )
if drive != "" {
return rest . Opts {
Method : method ,
RootURL : rootURL ,
Path : "/" + drive + "/items/" + id + route ,
}
}
return rest . Opts {
Method : method ,
Path : "/items/" + id + route ,
}
}
func parseDirID ( ID string ) ( string , string , string ) {
if strings . Index ( ID , "#" ) >= 0 {
s := strings . Split ( ID , "#" )
2018-08-18 12:06:22 +02:00
return s [ 1 ] , s [ 0 ] , graphURL + "/drives"
2018-07-11 19:48:59 +02:00
}
return ID , "" , ""
}
2015-10-04 23:08:31 +02:00
// Check the interfaces are satisfied
var (
2018-08-19 17:22:51 +02:00
_ fs . Fs = ( * Fs ) ( nil )
_ fs . Purger = ( * Fs ) ( nil )
_ fs . Copier = ( * Fs ) ( nil )
_ fs . Mover = ( * Fs ) ( nil )
_ fs . DirMover = ( * Fs ) ( nil )
2016-12-09 16:39:29 +01:00
_ fs . DirCacheFlusher = ( * Fs ) ( nil )
2018-04-16 23:19:25 +02:00
_ fs . Abouter = ( * Fs ) ( nil )
2018-10-09 14:11:48 +02:00
_ fs . PublicLinker = ( * Fs ) ( nil )
2016-12-09 16:39:29 +01:00
_ fs . Object = ( * Object ) ( nil )
_ fs . MimeTyper = & Object { }
2018-05-13 10:16:56 +02:00
_ fs . IDer = & Object { }
2015-10-04 23:08:31 +02:00
)