2017-03-08 12:21:57 +01:00
// Package onedrive provides an interface to the Microsoft OneDrive
2015-10-04 23:08:31 +02:00
// object storage system.
package onedrive
import (
2019-06-17 10:34:30 +02:00
"context"
2018-04-20 13:55:49 +02:00
"encoding/base64"
"encoding/hex"
2017-03-12 13:00:10 +01:00
"encoding/json"
2021-11-04 11:12:57 +01:00
"errors"
2015-10-04 23:08:31 +02:00
"fmt"
"io"
"net/http"
2021-01-29 21:04:21 +01:00
"net/url"
2016-11-25 22:52:43 +01:00
"path"
2021-01-08 20:26:42 +01:00
"regexp"
2020-01-29 13:16:18 +01:00
"strconv"
2015-10-04 23:08:31 +02:00
"strings"
2020-08-03 21:45:37 +02:00
"sync"
2015-10-04 23:08:31 +02:00
"time"
2019-07-28 19:47:38 +02:00
"github.com/rclone/rclone/backend/onedrive/api"
2019-09-23 15:32:36 +02:00
"github.com/rclone/rclone/backend/onedrive/quickxorhash"
2019-07-28 19:47:38 +02:00
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/config/obscure"
"github.com/rclone/rclone/fs/fserrors"
2024-01-05 13:43:19 +01:00
"github.com/rclone/rclone/fs/fshttp"
2019-07-28 19:47:38 +02:00
"github.com/rclone/rclone/fs/hash"
2020-08-03 21:45:37 +02:00
"github.com/rclone/rclone/fs/operations"
"github.com/rclone/rclone/fs/walk"
2018-11-02 13:14:19 +01:00
"github.com/rclone/rclone/lib/atexit"
2019-07-28 19:47:38 +02:00
"github.com/rclone/rclone/lib/dircache"
2020-01-14 18:33:35 +01:00
"github.com/rclone/rclone/lib/encoder"
2019-07-28 19:47:38 +02:00
"github.com/rclone/rclone/lib/oauthutil"
"github.com/rclone/rclone/lib/pacer"
"github.com/rclone/rclone/lib/readers"
"github.com/rclone/rclone/lib/rest"
2015-10-04 23:08:31 +02:00
"golang.org/x/oauth2"
)
const (
2018-08-18 12:06:22 +02:00
rcloneClientID = "b15665d9-eda6-4092-8539-0eec376afd59"
rcloneEncryptedClientSecret = "_JUdzh3LnKNqSPcf4Wu5fgMFIQOI8glZu_akYgR8yf6egowNBg-R"
minSleep = 10 * time . Millisecond
maxSleep = 2 * time . Second
decayConstant = 2 // bigger for slower decay, exponential
2018-08-21 03:52:24 +02:00
configDriveID = "drive_id"
configDriveType = "drive_type"
2018-08-18 12:06:22 +02:00
driveTypePersonal = "personal"
driveTypeBusiness = "business"
driveTypeSharepoint = "documentLibrary"
2021-03-02 20:11:57 +01:00
defaultChunkSize = 10 * fs . Mebi
chunkSizeMultiple = 320 * fs . Kibi
2021-01-29 21:04:21 +01:00
regionGlobal = "global"
regionUS = "us"
regionDE = "de"
regionCN = "cn"
2015-10-04 23:08:31 +02:00
)
// Globals
var (
2021-01-29 21:04:21 +01:00
authPath = "/common/oauth2/v2.0/authorize"
tokenPath = "/common/oauth2/v2.0/token"
2022-06-14 11:21:23 +02:00
scopeAccess = fs . SpaceSepList { "Files.Read" , "Files.ReadWrite" , "Files.Read.All" , "Files.ReadWrite.All" , "Sites.Read.All" , "offline_access" }
scopeAccessWithoutSites = fs . SpaceSepList { "Files.Read" , "Files.ReadWrite" , "Files.Read.All" , "Files.ReadWrite.All" , "offline_access" }
2022-01-10 14:28:19 +01:00
2017-08-03 21:57:42 +02:00
// Description of how to auth for this app for a business account
2018-08-18 12:06:22 +02:00
oauthConfig = & oauth2 . Config {
2022-06-14 11:21:23 +02:00
Scopes : scopeAccess ,
2018-08-18 12:06:22 +02:00
ClientID : rcloneClientID ,
ClientSecret : obscure . MustReveal ( rcloneEncryptedClientSecret ) ,
2017-08-03 21:57:42 +02:00
RedirectURL : oauthutil . RedirectLocalhostURL ,
}
2019-09-23 15:32:36 +02:00
2021-01-29 21:04:21 +01:00
graphAPIEndpoint = map [ string ] string {
"global" : "https://graph.microsoft.com" ,
"us" : "https://graph.microsoft.us" ,
"de" : "https://graph.microsoft.de" ,
"cn" : "https://microsoftgraph.chinacloudapi.cn" ,
}
authEndpoint = map [ string ] string {
"global" : "https://login.microsoftonline.com" ,
"us" : "https://login.microsoftonline.us" ,
"de" : "https://login.microsoftonline.de" ,
"cn" : "https://login.chinacloudapi.cn" ,
}
2019-09-23 15:32:36 +02:00
// QuickXorHashType is the hash.Type for OneDrive
QuickXorHashType hash . Type
2015-10-04 23:08:31 +02:00
)
// Register with Fs
func init ( ) {
2021-05-21 16:32:33 +02:00
QuickXorHashType = hash . RegisterHash ( "quickxor" , "QuickXorHash" , 40 , quickxorhash . New )
2016-02-18 12:35:25 +01:00
fs . Register ( & fs . RegInfo {
2016-02-15 19:11:53 +01:00
Name : "onedrive" ,
Description : "Microsoft OneDrive" ,
NewFs : NewFs ,
2021-04-29 10:28:18 +02:00
Config : Config ,
2020-08-02 01:32:21 +02:00
Options : append ( oauthutil . SharedOptions , [ ] fs . Option { {
2021-01-29 21:04:21 +01:00
Name : "region" ,
Help : "Choose national cloud region for OneDrive." ,
Default : "global" ,
Examples : [ ] fs . OptionExample {
{
Value : regionGlobal ,
Help : "Microsoft Cloud Global" ,
} , {
Value : regionUS ,
Help : "Microsoft Cloud for US Government" ,
} , {
Value : regionDE ,
Help : "Microsoft Cloud Germany" ,
} , {
Value : regionCN ,
2022-07-17 18:07:23 +02:00
Help : "Azure and Office 365 operated by Vnet Group in China" ,
2021-01-29 21:04:21 +01:00
} ,
} ,
} , {
2018-10-01 19:36:15 +02:00
Name : "chunk_size" ,
2019-10-12 13:08:22 +02:00
Help : ` Chunk size to upload files with - must be multiple of 320 k ( 327 , 680 bytes ) .
2018-10-01 19:36:15 +02:00
2020-03-10 16:14:08 +01:00
Above this size files will be chunked - must be multiple of 320 k ( 327 , 680 bytes ) and
should not exceed 250 M ( 262 , 144 , 000 bytes ) else you may encounter \ "Microsoft.SharePoint.Client.InvalidClientQueryException: The request message is too big.\"
Note that the chunks will be buffered into memory . ` ,
2018-09-07 13:02:27 +02:00
Default : defaultChunkSize ,
2018-05-14 19:06:57 +02:00
Advanced : true ,
2018-08-21 04:50:17 +02:00
} , {
2023-07-06 18:55:53 +02:00
Name : "drive_id" ,
Help : "The ID of the drive to use." ,
Default : "" ,
Advanced : true ,
Sensitive : true ,
2018-08-21 04:50:17 +02:00
} , {
Name : "drive_type" ,
2021-08-16 11:30:01 +02:00
Help : "The type of the drive (" + driveTypePersonal + " | " + driveTypeBusiness + " | " + driveTypeSharepoint + ")." ,
2018-08-21 04:50:17 +02:00
Default : "" ,
Advanced : true ,
2022-01-28 18:17:35 +01:00
} , {
Name : "root_folder_id" ,
Help : ` ID of the root folder .
This isn ' t normally needed , but in special circumstances you might
know the folder ID that you wish to access but not be able to get
there through a path traversal .
` ,
2023-07-06 18:55:53 +02:00
Advanced : true ,
Sensitive : true ,
2022-01-10 14:28:19 +01:00
} , {
2022-06-14 11:21:23 +02:00
Name : "access_scopes" ,
Help : ` Set scopes to be requested by rclone .
Choose or manually enter a custom space separated list with all scopes , that rclone should request .
` ,
Default : scopeAccess ,
Advanced : true ,
Examples : [ ] fs . OptionExample {
{
Value : "Files.Read Files.ReadWrite Files.Read.All Files.ReadWrite.All Sites.Read.All offline_access" ,
Help : "Read and write access to all resources" ,
} ,
{
Value : "Files.Read Files.Read.All Sites.Read.All offline_access" ,
Help : "Read only access to all resources" ,
} ,
{
Value : "Files.Read Files.ReadWrite Files.Read.All Files.ReadWrite.All offline_access" ,
Help : "Read and write access to all resources, without the ability to browse SharePoint sites. \nSame as if disable_site_permission was set to true" ,
} ,
} } , {
2022-01-10 14:28:19 +01:00
Name : "disable_site_permission" ,
Help : ` Disable the request for Sites . Read . All permission .
If set to true , you will no longer be able to search for a SharePoint site when
configuring drive ID , because rclone will not request Sites . Read . All permission .
Set it to true if your organization didn ' t assign Sites . Read . All permission to the
application , and your organization disallows users to consent app permission
request on their own . ` ,
Default : false ,
Advanced : true ,
2022-06-14 11:21:23 +02:00
Hide : fs . OptionHideBoth ,
2018-10-03 06:46:25 +02:00
} , {
2018-10-01 19:36:15 +02:00
Name : "expose_onenote_files" ,
Help : ` Set to make OneNote files show up in directory listings .
2021-11-04 12:50:43 +01:00
By default , rclone will hide OneNote files in directory listings because
2018-10-01 19:36:15 +02:00
operations like "Open" and "Update" won ' t work on them . But this
behaviour may also prevent you from deleting them . If you want to
delete OneNote files or otherwise want them to show up in directory
listing , set this option . ` ,
2018-10-03 06:46:25 +02:00
Default : false ,
Advanced : true ,
2020-03-15 13:07:46 +01:00
} , {
Name : "server_side_across_configs" ,
Default : false ,
2023-06-12 15:35:07 +02:00
Help : ` Deprecated : use -- server - side - across - configs instead .
Allow server - side operations ( e . g . copy ) to work across different onedrive configs .
2020-03-15 13:07:46 +01:00
2021-01-05 14:26:00 +01:00
This will only work if you are copying between two OneDrive * Personal * drives AND
the files to copy are already shared between them . In other cases , rclone will
fall back to normal copy ( which will be slightly slower ) . ` ,
2020-03-15 13:07:46 +01:00
Advanced : true ,
2021-04-04 10:08:16 +02:00
} , {
Name : "list_chunk" ,
Help : "Size of listing chunk." ,
Default : 1000 ,
Advanced : true ,
2020-08-06 18:59:26 +02:00
} , {
Name : "no_versions" ,
Default : false ,
2021-08-16 11:30:01 +02:00
Help : ` Remove all versions on modifying operations .
2020-08-06 18:59:26 +02:00
Onedrive for business creates versions when rclone uploads new files
overwriting an existing one and when it sets the modification time .
These versions take up space out of the quota .
This flag checks for versions after file upload and setting
modification time and removes all but the last version .
* * NB * * Onedrive personal can ' t currently delete versions so don ' t use
this flag there .
2021-01-07 12:02:54 +01:00
` ,
Advanced : true ,
} , {
Name : "link_scope" ,
Default : "anonymous" ,
Help : ` Set the scope of the links created by the link command. ` ,
Advanced : true ,
Examples : [ ] fs . OptionExample { {
Value : "anonymous" ,
2021-08-16 11:30:01 +02:00
Help : "Anyone with the link has access, without needing to sign in.\nThis may include people outside of your organization.\nAnonymous link support may be disabled by an administrator." ,
2021-01-07 12:02:54 +01:00
} , {
Value : "organization" ,
2021-08-16 11:30:01 +02:00
Help : "Anyone signed into your organization (tenant) can use the link to get access.\nOnly available in OneDrive for Business and SharePoint." ,
2021-01-07 12:02:54 +01:00
} } ,
} , {
Name : "link_type" ,
Default : "view" ,
Help : ` Set the type of the links created by the link command. ` ,
Advanced : true ,
Examples : [ ] fs . OptionExample { {
Value : "view" ,
Help : "Creates a read-only link to the item." ,
} , {
Value : "edit" ,
Help : "Creates a read-write link to the item." ,
} , {
Value : "embed" ,
Help : "Creates an embeddable link to the item." ,
} } ,
} , {
Name : "link_password" ,
Default : "" ,
Help : ` Set the password for links created by the link command .
At the time of writing this only works with OneDrive personal paid accounts .
2020-08-06 18:59:26 +02:00
` ,
2023-07-06 18:55:53 +02:00
Advanced : true ,
Sensitive : true ,
2023-03-01 13:02:01 +01:00
} , {
Name : "hash_type" ,
Default : "auto" ,
Help : ` Specify the hash in use for the backend .
This specifies the hash type in use . If set to "auto" it will use the
2023-03-25 08:58:44 +01:00
default hash which is QuickXorHash .
2023-03-01 13:07:27 +01:00
Before rclone 1.62 an SHA1 hash was used by default for Onedrive
Personal . For 1.62 and later the default is to use a QuickXorHash for
all onedrive types . If an SHA1 hash is desired then set this option
accordingly .
From July 2023 QuickXorHash will be the only available hash for
both OneDrive for Business and OneDriver Personal .
2023-03-01 13:02:01 +01:00
This can be set to "none" to not use any hashes .
If the hash requested does not exist on the object , it will be
returned as an empty string which is treated as a missing hash by
rclone .
` ,
Examples : [ ] fs . OptionExample { {
Value : "auto" ,
Help : "Rclone chooses the best hash" ,
} , {
Value : "quickxor" ,
Help : "QuickXor" ,
} , {
Value : "sha1" ,
Help : "SHA1" ,
} , {
Value : "sha256" ,
Help : "SHA256" ,
} , {
Value : "crc32" ,
Help : "CRC32" ,
} , {
Value : "none" ,
Help : "None - don't use any hashes" ,
} } ,
Advanced : true ,
2023-05-03 16:19:26 +02:00
} , {
Name : "av_override" ,
Default : false ,
Help : ` Allows download of files the server thinks has a virus .
The onedrive / sharepoint server may check files uploaded with an Anti
Virus checker . If it detects any potential viruses or malware it will
block download of the file .
In this case you will see a message like this
server reports this file is infected with a virus - use -- onedrive - av - override to download anyway : Infected ( name of virus ) : 403 Forbidden :
If you are 100 % sure you want to download this file anyway then use
the -- onedrive - av - override flag , or av_override = true in the config
file .
` ,
Advanced : true ,
2023-11-26 16:54:13 +01:00
} , {
Name : "delta" ,
Default : false ,
Help : strings . ReplaceAll ( ` If set rclone will use delta listing to implement recursive listings .
2024-03-07 13:57:15 +01:00
If this flag is set the onedrive backend will advertise | ListR |
2023-11-26 16:54:13 +01:00
support for recursive listings .
Setting this flag speeds up these things greatly :
rclone lsf - R onedrive :
rclone size onedrive :
rclone rc vfs / refresh recursive = true
* * However * * the delta listing API * * only * * works at the root of the
drive . If you use it not at the root then it recurses from the root
and discards all the data that is not under the directory you asked
for . So it will be correct but may not be very efficient .
This is why this flag is not set as the default .
As a rule of thumb if nearly all of your data is under rclone ' s root
directory ( the | root / directory | in | onedrive : root / directory | ) then
using this flag will be be a big performance win . If your data is
mostly not under the root then using this flag will be a big
performance loss .
It is recommended if you are mounting your onedrive at the root
( or near the root when using crypt ) and using rclone | rc vfs / refresh | .
` , "|", " ` " ) ,
Advanced : true ,
2020-01-14 18:33:35 +01:00
} , {
Name : config . ConfigEncoding ,
Help : config . ConfigEncodingHelp ,
Advanced : true ,
2020-01-14 22:51:49 +01:00
// List of replaced characters:
// < (less than) -> '< ' // FULLWIDTH LESS-THAN SIGN
// > (greater than) -> '> ' // FULLWIDTH GREATER-THAN SIGN
// : (colon) -> ': ' // FULLWIDTH COLON
// " (double quote) -> '" ' // FULLWIDTH QUOTATION MARK
// \ (backslash) -> '\ ' // FULLWIDTH REVERSE SOLIDUS
// | (vertical line) -> '| ' // FULLWIDTH VERTICAL LINE
// ? (question mark) -> '? ' // FULLWIDTH QUESTION MARK
// * (asterisk) -> '* ' // FULLWIDTH ASTERISK
//
// Folder names cannot begin with a tilde ('~')
// List of replaced characters:
// ~ (tilde) -> '~ ' // FULLWIDTH TILDE
//
// Additionally names can't begin with a space ( ) or end with a period (.) or space ( ).
// List of replaced characters:
// . (period) -> '. ' // FULLWIDTH FULL STOP
// (space) -> '␠' // SYMBOL FOR SPACE
//
// Also encode invalid UTF-8 bytes as json doesn't handle them.
//
// The OneDrive API documentation lists the set of reserved characters, but
// testing showed this list is incomplete. This are the differences:
// - " (double quote) is rejected, but missing in the documentation
// - space at the end of file and folder names is rejected, but missing in the documentation
// - period at the end of file names is rejected, but missing in the documentation
//
// Adding these restrictions to the OneDrive API documentation yields exactly
// the same rules as the Windows naming conventions.
//
// https://docs.microsoft.com/en-us/onedrive/developer/rest-api/concepts/addressing-driveitems?view=odsp-graph-online#path-encoding
Default : ( encoder . Display |
encoder . EncodeBackSlash |
encoder . EncodeLeftSpace |
encoder . EncodeLeftTilde |
encoder . EncodeRightPeriod |
encoder . EncodeRightSpace |
encoder . EncodeWin |
encoder . EncodeInvalidUtf8 ) ,
2020-08-02 01:32:21 +02:00
} } ... ) ,
2015-10-04 23:08:31 +02:00
} )
2018-05-14 19:06:57 +02:00
}
2017-08-03 21:57:42 +02:00
2021-04-29 10:28:18 +02:00
// Get the region and graphURL from the config
func getRegionURL ( m configmap . Mapper ) ( region , graphURL string ) {
region , _ = m . Get ( "region" )
graphURL = graphAPIEndpoint [ region ] + "/v1.0"
return region , graphURL
}
// Config for chooseDrive
type chooseDriveOpt struct {
opts rest . Opts
finalDriveID string
siteID string
relativePath string
}
// chooseDrive returns a query to choose which drive the user is interested in
func chooseDrive ( ctx context . Context , name string , m configmap . Mapper , srv * rest . Client , opt chooseDriveOpt ) ( * fs . ConfigOut , error ) {
_ , graphURL := getRegionURL ( m )
// if we use server-relative URL for finding the drive
if opt . relativePath != "" {
opt . opts = rest . Opts {
Method : "GET" ,
RootURL : graphURL ,
Path : "/sites/root:" + opt . relativePath ,
}
2023-09-19 10:42:12 +02:00
site := api . SiteResource { }
2021-04-29 10:28:18 +02:00
_ , err := srv . CallJSON ( ctx , & opt . opts , nil , & site )
if err != nil {
return fs . ConfigError ( "choose_type" , fmt . Sprintf ( "Failed to query available site by relative path: %v" , err ) )
}
opt . siteID = site . SiteID
}
// if we have a siteID we need to ask for the drives
if opt . siteID != "" {
opt . opts = rest . Opts {
Method : "GET" ,
RootURL : graphURL ,
Path : "/sites/" + opt . siteID + "/drives" ,
}
}
2023-09-19 10:42:12 +02:00
drives := api . DrivesResponse { }
2021-04-29 10:28:18 +02:00
// We don't have the final ID yet?
// query Microsoft Graph
if opt . finalDriveID == "" {
_ , err := srv . CallJSON ( ctx , & opt . opts , nil , & drives )
if err != nil {
return fs . ConfigError ( "choose_type" , fmt . Sprintf ( "Failed to query available drives: %v" , err ) )
}
// Also call /me/drive as sometimes /me/drives doesn't return it #4068
if opt . opts . Path == "/me/drives" {
opt . opts . Path = "/me/drive"
2023-09-19 10:42:12 +02:00
meDrive := api . DriveResource { }
2021-04-29 10:28:18 +02:00
_ , err := srv . CallJSON ( ctx , & opt . opts , nil , & meDrive )
if err != nil {
return fs . ConfigError ( "choose_type" , fmt . Sprintf ( "Failed to query available drives: %v" , err ) )
}
found := false
for _ , drive := range drives . Drives {
if drive . DriveID == meDrive . DriveID {
found = true
break
}
}
// add the me drive if not found already
if ! found {
fs . Debugf ( nil , "Adding %v to drives list from /me/drive" , meDrive )
drives . Drives = append ( drives . Drives , meDrive )
}
}
} else {
2023-09-19 10:42:12 +02:00
drives . Drives = append ( drives . Drives , api . DriveResource {
2021-04-29 10:28:18 +02:00
DriveID : opt . finalDriveID ,
DriveName : "Chosen Drive ID" ,
DriveType : "drive" ,
} )
}
if len ( drives . Drives ) == 0 {
return fs . ConfigError ( "choose_type" , "No drives found" )
}
2021-05-04 13:27:50 +02:00
return fs . ConfigChoose ( "driveid_final" , "config_driveid" , "Select drive you want to use" , len ( drives . Drives ) , func ( i int ) ( string , string ) {
2021-04-29 10:28:18 +02:00
drive := drives . Drives [ i ]
return drive . DriveID , fmt . Sprintf ( "%s (%s)" , drive . DriveName , drive . DriveType )
} )
}
// Config the backend
func Config ( ctx context . Context , name string , m configmap . Mapper , config fs . ConfigIn ) ( * fs . ConfigOut , error ) {
region , graphURL := getRegionURL ( m )
2021-05-17 11:00:54 +02:00
if config . State == "" {
2022-06-14 11:21:23 +02:00
var accessScopes fs . SpaceSepList
accessScopesString , _ := m . Get ( "access_scopes" )
err := accessScopes . Set ( accessScopesString )
if err != nil {
return nil , fmt . Errorf ( "failed to parse access_scopes: %w" , err )
}
oauthConfig . Scopes = [ ] string ( accessScopes )
2022-01-10 14:28:19 +01:00
disableSitePermission , _ := m . Get ( "disable_site_permission" )
if disableSitePermission == "true" {
2022-06-14 11:21:23 +02:00
oauthConfig . Scopes = scopeAccessWithoutSites
2022-01-10 14:28:19 +01:00
}
2021-04-29 10:28:18 +02:00
oauthConfig . Endpoint = oauth2 . Endpoint {
AuthURL : authEndpoint [ region ] + authPath ,
TokenURL : authEndpoint [ region ] + tokenPath ,
}
return oauthutil . ConfigOut ( "choose_type" , & oauthutil . Options {
OAuth2Config : oauthConfig ,
} )
2021-05-17 11:00:54 +02:00
}
oAuthClient , _ , err := oauthutil . NewClient ( ctx , name , m , oauthConfig )
if err != nil {
2021-11-04 11:12:57 +01:00
return nil , fmt . Errorf ( "failed to configure OneDrive: %w" , err )
2021-05-17 11:00:54 +02:00
}
srv := rest . NewClient ( oAuthClient )
switch config . State {
2021-04-29 10:28:18 +02:00
case "choose_type" :
2022-01-18 22:38:24 +01:00
return fs . ConfigChooseExclusiveFixed ( "choose_type_done" , "config_type" , "Type of connection" , [ ] fs . OptionExample { {
2021-04-29 10:28:18 +02:00
Value : "onedrive" ,
Help : "OneDrive Personal or Business" ,
} , {
Value : "sharepoint" ,
Help : "Root Sharepoint site" ,
} , {
Value : "url" ,
2021-08-16 11:30:01 +02:00
Help : "Sharepoint site name or URL\nE.g. mysite or https://contoso.sharepoint.com/sites/mysite" ,
2021-04-29 10:28:18 +02:00
} , {
Value : "search" ,
Help : "Search for a Sharepoint site" ,
} , {
Value : "driveid" ,
Help : "Type in driveID (advanced)" ,
} , {
Value : "siteid" ,
Help : "Type in SiteID (advanced)" ,
} , {
Value : "path" ,
2021-08-16 11:30:01 +02:00
Help : "Sharepoint server-relative path (advanced)\nE.g. /teams/hr" ,
2021-04-29 10:28:18 +02:00
} } )
case "choose_type_done" :
// Jump to next state according to config chosen
return fs . ConfigGoto ( config . Result )
case "onedrive" :
return chooseDrive ( ctx , name , m , srv , chooseDriveOpt {
opts : rest . Opts {
Method : "GET" ,
RootURL : graphURL ,
Path : "/me/drives" ,
} ,
} )
case "sharepoint" :
return chooseDrive ( ctx , name , m , srv , chooseDriveOpt {
opts : rest . Opts {
Method : "GET" ,
RootURL : graphURL ,
Path : "/sites/root/drives" ,
} ,
} )
case "driveid" :
2021-05-04 13:27:50 +02:00
return fs . ConfigInput ( "driveid_end" , "config_driveid_fixed" , "Drive ID" )
2021-04-29 10:28:18 +02:00
case "driveid_end" :
return chooseDrive ( ctx , name , m , srv , chooseDriveOpt {
finalDriveID : config . Result ,
} )
case "siteid" :
2021-05-04 13:27:50 +02:00
return fs . ConfigInput ( "siteid_end" , "config_siteid" , "Site ID" )
2021-04-29 10:28:18 +02:00
case "siteid_end" :
return chooseDrive ( ctx , name , m , srv , chooseDriveOpt {
siteID : config . Result ,
} )
case "url" :
2021-05-04 13:27:50 +02:00
return fs . ConfigInput ( "url_end" , "config_site_url" , ` Site URL
2021-04-29 10:28:18 +02:00
2023-09-21 16:36:26 +02:00
Examples :
- "mysite"
- "https://XXX.sharepoint.com/sites/mysite"
- "https://XXX.sharepoint.com/teams/ID"
2021-04-29 10:28:18 +02:00
` )
case "url_end" :
siteURL := config . Result
2023-09-21 16:36:26 +02:00
re := regexp . MustCompile ( ` https://.*\.sharepoint\.com(/.*) ` )
2021-04-29 10:28:18 +02:00
match := re . FindStringSubmatch ( siteURL )
if len ( match ) == 2 {
return chooseDrive ( ctx , name , m , srv , chooseDriveOpt {
2023-09-21 16:36:26 +02:00
relativePath : match [ 1 ] ,
2021-04-29 10:28:18 +02:00
} )
}
return chooseDrive ( ctx , name , m , srv , chooseDriveOpt {
relativePath : "/sites/" + siteURL ,
} )
case "path" :
2021-05-04 13:27:50 +02:00
return fs . ConfigInput ( "path_end" , "config_sharepoint_url" , ` Server-relative URL ` )
2021-04-29 10:28:18 +02:00
case "path_end" :
return chooseDrive ( ctx , name , m , srv , chooseDriveOpt {
relativePath : config . Result ,
} )
case "search" :
2021-05-04 13:27:50 +02:00
return fs . ConfigInput ( "search_end" , "config_search_term" , ` Search term ` )
2021-04-29 10:28:18 +02:00
case "search_end" :
searchTerm := config . Result
opts := rest . Opts {
Method : "GET" ,
RootURL : graphURL ,
Path : "/sites?search=" + searchTerm ,
}
2023-09-19 10:42:12 +02:00
sites := api . SiteResponse { }
2021-04-29 10:28:18 +02:00
_ , err := srv . CallJSON ( ctx , & opts , nil , & sites )
if err != nil {
return fs . ConfigError ( "choose_type" , fmt . Sprintf ( "Failed to query available sites: %v" , err ) )
}
if len ( sites . Sites ) == 0 {
return fs . ConfigError ( "choose_type" , fmt . Sprintf ( "search for %q returned no results" , searchTerm ) )
}
2021-05-04 13:27:50 +02:00
return fs . ConfigChoose ( "search_sites" , "config_site" , ` Select the Site you want to use ` , len ( sites . Sites ) , func ( i int ) ( string , string ) {
2021-04-29 10:28:18 +02:00
site := sites . Sites [ i ]
return site . SiteID , fmt . Sprintf ( "%s (%s)" , site . SiteName , site . SiteURL )
} )
case "search_sites" :
return chooseDrive ( ctx , name , m , srv , chooseDriveOpt {
siteID : config . Result ,
} )
case "driveid_final" :
finalDriveID := config . Result
// Test the driveID and get drive type
opts := rest . Opts {
Method : "GET" ,
RootURL : graphURL ,
Path : "/drives/" + finalDriveID + "/root" }
var rootItem api . Item
_ , err = srv . CallJSON ( ctx , & opts , nil , & rootItem )
if err != nil {
return fs . ConfigError ( "choose_type" , fmt . Sprintf ( "Failed to query root for drive %q: %v" , finalDriveID , err ) )
}
m . Set ( configDriveID , finalDriveID )
m . Set ( configDriveType , rootItem . ParentReference . DriveType )
2021-05-04 13:27:50 +02:00
return fs . ConfigConfirm ( "driveid_final_end" , true , "config_drive_ok" , fmt . Sprintf ( "Drive OK?\n\nFound drive %q of type %q\nURL: %s\n" , rootItem . Name , rootItem . ParentReference . DriveType , rootItem . WebURL ) )
2021-04-29 10:28:18 +02:00
case "driveid_final_end" :
if config . Result == "true" {
return nil , nil
}
return fs . ConfigGoto ( "choose_type" )
}
return nil , fmt . Errorf ( "unknown state %q" , config . State )
}
2018-05-14 19:06:57 +02:00
// Options defines the configuration for this backend
type Options struct {
2021-01-29 21:04:21 +01:00
Region string ` config:"region" `
2020-03-15 13:07:46 +01:00
ChunkSize fs . SizeSuffix ` config:"chunk_size" `
DriveID string ` config:"drive_id" `
DriveType string ` config:"drive_type" `
2022-01-28 18:17:35 +01:00
RootFolderID string ` config:"root_folder_id" `
2022-01-10 14:28:19 +01:00
DisableSitePermission bool ` config:"disable_site_permission" `
2022-06-14 11:21:23 +02:00
AccessScopes fs . SpaceSepList ` config:"access_scopes" `
2020-03-15 13:07:46 +01:00
ExposeOneNoteFiles bool ` config:"expose_onenote_files" `
ServerSideAcrossConfigs bool ` config:"server_side_across_configs" `
2021-04-04 10:08:16 +02:00
ListChunk int64 ` config:"list_chunk" `
2020-08-06 18:59:26 +02:00
NoVersions bool ` config:"no_versions" `
2021-01-07 12:02:54 +01:00
LinkScope string ` config:"link_scope" `
LinkType string ` config:"link_type" `
LinkPassword string ` config:"link_password" `
2023-03-01 13:02:01 +01:00
HashType string ` config:"hash_type" `
2023-05-03 16:19:26 +02:00
AVOverride bool ` config:"av_override" `
2023-11-26 16:54:13 +01:00
Delta bool ` config:"delta" `
2020-03-15 13:07:46 +01:00
Enc encoder . MultiEncoder ` config:"encoding" `
2015-10-04 23:08:31 +02:00
}
2022-08-30 10:23:29 +02:00
// Fs represents a remote OneDrive
2015-10-04 23:08:31 +02:00
type Fs struct {
2017-01-29 21:42:43 +01:00
name string // name of this remote
root string // the path we are working on
2018-05-14 19:06:57 +02:00
opt Options // parsed options
2020-11-05 12:33:32 +01:00
ci * fs . ConfigInfo // global config
2017-01-29 21:42:43 +01:00
features * fs . Features // optional features
2022-08-30 10:23:29 +02:00
srv * rest . Client // the connection to the OneDrive server
2024-01-05 13:43:19 +01:00
unAuth * rest . Client // no authentication connection to the OneDrive server
2017-01-29 21:42:43 +01:00
dirCache * dircache . DirCache // Map of directory path to directory id
2019-02-09 21:52:15 +01:00
pacer * fs . Pacer // pacer for API calls
2017-01-29 21:42:43 +01:00
tokenRenewer * oauthutil . Renew // renew the token on expiry
2018-08-18 12:06:22 +02:00
driveID string // ID to use for querying Microsoft Graph
driveType string // https://developer.microsoft.com/en-us/graph/docs/api-reference/v1.0/resources/drive
2023-03-01 13:02:01 +01:00
hashType hash . Type // type of the hash we are using
2015-10-04 23:08:31 +02:00
}
2022-08-30 10:23:29 +02:00
// Object describes a OneDrive object
2015-10-04 23:08:31 +02:00
//
// Will definitely have info but maybe not meta
type Object struct {
2018-10-03 06:46:25 +02:00
fs * Fs // what this object is part of
remote string // The remote path
hasMetaData bool // whether info below has been set
isOneNoteFile bool // Whether the object is a OneNote file
size int64 // size of the object
modTime time . Time // modification time of the object
id string // ID of the object
2023-03-01 13:02:01 +01:00
hash string // Hash of the content, usually QuickXorHash but set as hash_type
2018-10-03 06:46:25 +02:00
mimeType string // Content-Type of object from server (may not be as uploaded)
2015-10-04 23:08:31 +02:00
}
// ------------------------------------------------------------
// Name of the remote (as passed into NewFs)
func ( f * Fs ) Name ( ) string {
return f . name
}
// Root of the remote (as passed into NewFs)
func ( f * Fs ) Root ( ) string {
return f . root
}
// String converts this Fs to a string
func ( f * Fs ) String ( ) string {
2022-08-30 10:23:29 +02:00
return fmt . Sprintf ( "OneDrive root '%s'" , f . root )
2015-10-04 23:08:31 +02:00
}
2017-01-13 18:21:47 +01:00
// Features returns the optional features of this Fs
func ( f * Fs ) Features ( ) * fs . Features {
return f . features
}
2022-08-30 10:23:29 +02:00
// parsePath parses a OneDrive 'url'
2015-10-04 23:08:31 +02:00
func parsePath ( path string ) ( root string ) {
root = strings . Trim ( path , "/" )
return
}
// retryErrorCodes is a slice of error codes that we will retry
var retryErrorCodes = [ ] int {
429 , // Too Many Requests.
500 , // Internal Server Error
502 , // Bad Gateway
503 , // Service Unavailable
504 , // Gateway Timeout
509 , // Bandwidth Limit Exceeded
}
2020-11-02 17:49:27 +01:00
var gatewayTimeoutError sync . Once
2021-01-05 14:26:00 +01:00
var errAsyncJobAccessDenied = errors . New ( "async job failed - access denied" )
2020-11-02 17:49:27 +01:00
2015-10-04 23:08:31 +02:00
// shouldRetry returns a boolean as to whether this resp and err
// deserve to be retried. It returns the err as a convenience
2021-03-16 16:50:02 +01:00
func shouldRetry ( ctx context . Context , resp * http . Response , err error ) ( bool , error ) {
if fserrors . ContextError ( ctx , & err ) {
return false , err
}
2020-01-29 13:16:18 +01:00
retry := false
if resp != nil {
switch resp . StatusCode {
2022-03-05 17:09:37 +01:00
case 400 :
if apiErr , ok := err . ( * api . Error ) ; ok {
if apiErr . ErrorInfo . InnerError . Code == "pathIsTooLong" {
return false , fserrors . NoRetryError ( err )
}
}
2020-01-29 13:16:18 +01:00
case 401 :
2022-06-08 22:25:17 +02:00
if len ( resp . Header [ "Www-Authenticate" ] ) == 1 && strings . Contains ( resp . Header [ "Www-Authenticate" ] [ 0 ] , "expired_token" ) {
2020-01-29 13:16:18 +01:00
retry = true
fs . Debugf ( nil , "Should retry: %v" , err )
2021-04-24 11:44:13 +02:00
} else if err != nil && strings . Contains ( err . Error ( ) , "Unable to initialize RPS" ) {
retry = true
fs . Debugf ( nil , "HTTP 401: Unable to initialize RPS. Trying again." )
2020-01-29 13:16:18 +01:00
}
case 429 : // Too Many Requests.
// see https://docs.microsoft.com/en-us/sharepoint/dev/general-development/how-to-avoid-getting-throttled-or-blocked-in-sharepoint-online
if values := resp . Header [ "Retry-After" ] ; len ( values ) == 1 && values [ 0 ] != "" {
retryAfter , parseErr := strconv . Atoi ( values [ 0 ] )
if parseErr != nil {
fs . Debugf ( nil , "Failed to parse Retry-After: %q: %v" , values [ 0 ] , parseErr )
} else {
duration := time . Second * time . Duration ( retryAfter )
retry = true
err = pacer . RetryAfterError ( err , duration )
fs . Debugf ( nil , "Too many requests. Trying again in %d seconds." , retryAfter )
}
}
2020-11-02 17:49:27 +01:00
case 504 : // Gateway timeout
gatewayTimeoutError . Do ( func ( ) {
fs . Errorf ( nil , "%v: upload chunks may be taking too long - try reducing --onedrive-chunk-size or decreasing --transfers" , err )
} )
2020-03-25 13:56:38 +01:00
case 507 : // Insufficient Storage
return false , fserrors . FatalError ( err )
2020-01-29 13:16:18 +01:00
}
2017-03-23 14:10:43 +01:00
}
2020-01-29 13:16:18 +01:00
return retry || fserrors . ShouldRetry ( err ) || fserrors . ShouldRetryHTTP ( resp , retryErrorCodes ) , err
2015-10-04 23:08:31 +02:00
}
2019-01-09 06:11:00 +01:00
// readMetaDataForPathRelativeToID reads the metadata for a path relative to an item that is addressed by its normalized ID.
// if `relPath` == "", it reads the metadata for the item with that ID.
2019-03-11 09:30:38 +01:00
//
// We address items using the pattern `drives/driveID/items/itemID:/relativePath`
// instead of simply using `drives/driveID/root:/itemPath` because it works for
// "shared with me" folders in OneDrive Personal (See #2536, #2778)
// This path pattern comes from https://github.com/OneDrive/onedrive-api-docs/issues/908#issuecomment-417488480
2019-10-26 19:02:22 +02:00
//
2022-08-05 17:35:41 +02:00
// If `relPath` == ”, do not append the slash (See #3664)
2019-09-04 21:00:37 +02:00
func ( f * Fs ) readMetaDataForPathRelativeToID ( ctx context . Context , normalizedID string , relPath string ) ( info * api . Item , resp * http . Response , err error ) {
2021-01-29 21:04:21 +01:00
opts , _ := f . newOptsCallWithIDPath ( normalizedID , relPath , true , "GET" , "" )
2015-10-04 23:08:31 +02:00
err = f . pacer . Call ( func ( ) ( bool , error ) {
2019-09-04 21:00:37 +02:00
resp , err = f . srv . CallJSON ( ctx , & opts , nil , & info )
2021-03-16 16:50:02 +01:00
return shouldRetry ( ctx , resp , err )
2015-10-04 23:08:31 +02:00
} )
2018-07-11 19:48:59 +02:00
2015-10-04 23:08:31 +02:00
return info , resp , err
}
2019-01-09 06:11:00 +01:00
// readMetaDataForPath reads the metadata from the path (relative to the absolute root)
2019-06-17 10:34:30 +02:00
func ( f * Fs ) readMetaDataForPath ( ctx context . Context , path string ) ( info * api . Item , resp * http . Response , err error ) {
2019-01-09 06:11:00 +01:00
firstSlashIndex := strings . IndexRune ( path , '/' )
if f . driveType != driveTypePersonal || firstSlashIndex == - 1 {
2022-06-08 22:25:17 +02:00
opts := f . newOptsCallWithPath ( ctx , path , "GET" , "" )
2021-01-29 21:04:21 +01:00
opts . Path = strings . TrimSuffix ( opts . Path , ":" )
2019-01-09 06:11:00 +01:00
err = f . pacer . Call ( func ( ) ( bool , error ) {
2019-09-04 21:00:37 +02:00
resp , err = f . srv . CallJSON ( ctx , & opts , nil , & info )
2021-03-16 16:50:02 +01:00
return shouldRetry ( ctx , resp , err )
2019-01-09 06:11:00 +01:00
} )
return info , resp , err
}
// The following branch handles the case when we're using OneDrive Personal and the path is in a folder.
// For OneDrive Personal, we need to consider the "shared with me" folders.
// An item in such a folder can only be addressed by its ID relative to the sharer's driveID or
// by its path relative to the folder's ID relative to the sharer's driveID.
// Note: A "shared with me" folder can only be placed in the sharee's absolute root.
// So we read metadata relative to a suitable folder's normalized ID.
var dirCacheFoundRoot bool
var rootNormalizedID string
if f . dirCache != nil {
2020-05-11 18:24:37 +02:00
rootNormalizedID , err = f . dirCache . RootID ( ctx , false )
dirCacheRootIDExists := err == nil
2019-05-21 09:45:03 +02:00
if f . root == "" {
// if f.root == "", it means f.root is the absolute root of the drive
// and its ID should have been found in NewFs
dirCacheFoundRoot = dirCacheRootIDExists
2020-05-11 18:24:37 +02:00
} else if _ , err := f . dirCache . RootParentID ( ctx , false ) ; err == nil {
2019-05-21 09:45:03 +02:00
// if root is in a folder, it must have a parent folder, and
// if dirCache has found root in NewFs, the parent folder's ID
// should be present.
// This RootParentID() check is a fix for #3164 which describes
// a possible case where the root is not found.
dirCacheFoundRoot = dirCacheRootIDExists
2019-01-09 06:11:00 +01:00
}
}
relPath , insideRoot := getRelativePathInsideBase ( f . root , path )
var firstDir , baseNormalizedID string
if ! insideRoot || ! dirCacheFoundRoot {
// We do not have the normalized ID in dirCache for our query to base on. Query it manually.
firstDir , relPath = path [ : firstSlashIndex ] , path [ firstSlashIndex + 1 : ]
2019-06-17 10:34:30 +02:00
info , resp , err := f . readMetaDataForPath ( ctx , firstDir )
2019-01-09 06:11:00 +01:00
if err != nil {
return info , resp , err
}
baseNormalizedID = info . GetID ( )
} else {
if f . root != "" {
// Read metadata based on root
baseNormalizedID = rootNormalizedID
} else {
// Read metadata based on firstDir
firstDir , relPath = path [ : firstSlashIndex ] , path [ firstSlashIndex + 1 : ]
2019-06-17 10:34:30 +02:00
baseNormalizedID , err = f . dirCache . FindDir ( ctx , firstDir , false )
2019-01-09 06:11:00 +01:00
if err != nil {
return nil , nil , err
}
}
}
2019-09-04 21:00:37 +02:00
return f . readMetaDataForPathRelativeToID ( ctx , baseNormalizedID , relPath )
2019-01-09 06:11:00 +01:00
}
2015-11-27 13:46:13 +01:00
// errorHandler parses a non 2xx error response into an error
func errorHandler ( resp * http . Response ) error {
// Decode error response
errResponse := new ( api . Error )
err := rest . DecodeJSON ( resp , & errResponse )
if err != nil {
2017-02-09 12:01:20 +01:00
fs . Debugf ( nil , "Couldn't decode error response: %v" , err )
2015-11-27 13:46:13 +01:00
}
if errResponse . ErrorInfo . Code == "" {
errResponse . ErrorInfo . Code = resp . Status
}
return errResponse
}
2018-09-07 13:02:27 +02:00
func checkUploadChunkSize ( cs fs . SizeSuffix ) error {
2021-03-02 20:11:57 +01:00
const minChunkSize = fs . SizeSuffixBase
2018-09-07 13:02:27 +02:00
if cs % chunkSizeMultiple != 0 {
2021-11-04 11:12:57 +01:00
return fmt . Errorf ( "%s is not a multiple of %s" , cs , chunkSizeMultiple )
2018-09-07 13:02:27 +02:00
}
if cs < minChunkSize {
2021-11-04 11:12:57 +01:00
return fmt . Errorf ( "%s is less than %s" , cs , minChunkSize )
2018-09-07 13:02:27 +02:00
}
return nil
}
func ( f * Fs ) setUploadChunkSize ( cs fs . SizeSuffix ) ( old fs . SizeSuffix , err error ) {
err = checkUploadChunkSize ( cs )
if err == nil {
old , f . opt . ChunkSize = f . opt . ChunkSize , cs
}
return
}
2015-10-04 23:08:31 +02:00
// NewFs constructs an Fs from the path, container:path
2020-11-05 16:18:51 +01:00
func NewFs ( ctx context . Context , name , root string , m configmap . Mapper ) ( fs . Fs , error ) {
2018-05-14 19:06:57 +02:00
// Parse config into Options struct
opt := new ( Options )
err := configstruct . Set ( m , opt )
if err != nil {
return nil , err
}
2018-09-07 13:02:27 +02:00
err = checkUploadChunkSize ( opt . ChunkSize )
if err != nil {
2021-11-04 11:12:57 +01:00
return nil , fmt . Errorf ( "onedrive: chunk size: %w" , err )
2018-05-14 19:06:57 +02:00
}
2017-08-03 21:57:42 +02:00
2018-08-21 04:50:17 +02:00
if opt . DriveID == "" || opt . DriveType == "" {
2018-11-11 11:40:13 +01:00
return nil , errors . New ( "unable to get drive_id and drive_type - if you are upgrading from older versions of rclone, please run `rclone config` and re-configure this backend" )
2018-08-21 04:50:17 +02:00
}
2018-08-18 12:06:22 +02:00
2021-01-29 21:04:21 +01:00
rootURL := graphAPIEndpoint [ opt . Region ] + "/v1.0" + "/drives/" + opt . DriveID
2022-06-14 11:21:23 +02:00
oauthConfig . Scopes = opt . AccessScopes
2022-01-10 14:28:19 +01:00
if opt . DisableSitePermission {
2022-06-14 11:21:23 +02:00
oauthConfig . Scopes = scopeAccessWithoutSites
2022-01-10 14:28:19 +01:00
}
2021-01-29 21:04:21 +01:00
oauthConfig . Endpoint = oauth2 . Endpoint {
AuthURL : authEndpoint [ opt . Region ] + authPath ,
TokenURL : authEndpoint [ opt . Region ] + tokenPath ,
}
2024-01-05 13:43:19 +01:00
client := fshttp . NewClient ( ctx )
2015-10-04 23:08:31 +02:00
root = parsePath ( root )
2024-01-05 13:43:19 +01:00
oAuthClient , ts , err := oauthutil . NewClientWithBaseClient ( ctx , name , m , oauthConfig , client )
2015-10-04 23:08:31 +02:00
if err != nil {
2021-11-04 11:12:57 +01:00
return nil , fmt . Errorf ( "failed to configure OneDrive: %w" , err )
2015-10-04 23:08:31 +02:00
}
2020-11-05 12:33:32 +01:00
ci := fs . GetConfig ( ctx )
2015-10-04 23:08:31 +02:00
f := & Fs {
2018-08-18 12:06:22 +02:00
name : name ,
root : root ,
opt : * opt ,
2020-11-05 12:33:32 +01:00
ci : ci ,
2018-08-21 04:50:17 +02:00
driveID : opt . DriveID ,
driveType : opt . DriveType ,
2021-01-29 21:04:21 +01:00
srv : rest . NewClient ( oAuthClient ) . SetRoot ( rootURL ) ,
2024-01-05 13:43:19 +01:00
unAuth : rest . NewClient ( client ) . SetRoot ( rootURL ) ,
2020-11-05 12:33:32 +01:00
pacer : fs . NewPacer ( ctx , pacer . NewDefault ( pacer . MinSleep ( minSleep ) , pacer . MaxSleep ( maxSleep ) , pacer . DecayConstant ( decayConstant ) ) ) ,
2023-03-01 13:02:01 +01:00
hashType : QuickXorHashType ,
2015-10-04 23:08:31 +02:00
}
2017-08-09 16:27:43 +02:00
f . features = ( & fs . Features {
2018-08-18 12:06:22 +02:00
CaseInsensitive : true ,
ReadMimeType : true ,
2017-08-09 16:27:43 +02:00
CanHaveEmptyDirectories : true ,
2020-03-15 13:07:46 +01:00
ServerSideAcrossConfigs : opt . ServerSideAcrossConfigs ,
2020-11-05 17:00:40 +01:00
} ) . Fill ( ctx , f )
2015-11-27 13:46:13 +01:00
f . srv . SetErrorHandler ( errorHandler )
2015-10-04 23:08:31 +02:00
2023-03-01 13:02:01 +01:00
// Set the user defined hash
if opt . HashType == "auto" || opt . HashType == "" {
2023-03-01 13:07:27 +01:00
opt . HashType = QuickXorHashType . String ( )
2023-03-01 13:02:01 +01:00
}
err = f . hashType . Set ( opt . HashType )
if err != nil {
return nil , err
}
2022-09-15 18:18:35 +02:00
// Disable change polling in China region
// See: https://github.com/rclone/rclone/issues/6444
if f . opt . Region == regionCN {
f . features . ChangeNotify = nil
}
2017-01-29 21:42:43 +01:00
// Renew the token in the background
f . tokenRenewer = oauthutil . NewRenew ( f . String ( ) , ts , func ( ) error {
2019-06-17 10:34:30 +02:00
_ , _ , err := f . readMetaDataForPath ( ctx , "" )
2017-01-29 21:42:43 +01:00
return err
} )
2017-06-27 12:34:32 +02:00
// Get rootID
2022-01-28 18:17:35 +01:00
var rootID = opt . RootFolderID
if rootID == "" {
rootInfo , _ , err := f . readMetaDataForPath ( ctx , "" )
if err != nil {
return nil , fmt . Errorf ( "failed to get root: %w" , err )
}
rootID = rootInfo . GetID ( )
2017-06-27 12:34:32 +02:00
}
2022-01-28 18:17:35 +01:00
if rootID == "" {
2021-11-09 14:00:51 +01:00
return nil , errors . New ( "failed to get root: ID was empty" )
}
2017-06-27 12:34:32 +02:00
2022-01-28 18:17:35 +01:00
f . dirCache = dircache . New ( root , rootID , f )
2015-10-04 23:08:31 +02:00
2023-11-26 16:54:13 +01:00
// ListR only supported if delta set
if ! f . opt . Delta {
f . features . ListR = nil
}
2015-10-04 23:08:31 +02:00
// Find the current root
2019-06-17 10:34:30 +02:00
err = f . dirCache . FindRoot ( ctx , false )
2015-10-04 23:08:31 +02:00
if err != nil {
// Assume it is a file
newRoot , remote := dircache . SplitPath ( root )
2018-10-14 15:41:26 +02:00
tempF := * f
2022-01-28 18:17:35 +01:00
tempF . dirCache = dircache . New ( newRoot , rootID , & tempF )
2018-10-14 15:41:26 +02:00
tempF . root = newRoot
2015-10-04 23:08:31 +02:00
// Make new Fs which is the parent
2019-06-17 10:34:30 +02:00
err = tempF . dirCache . FindRoot ( ctx , false )
2015-10-04 23:08:31 +02:00
if err != nil {
// No root so return old f
return f , nil
}
2019-06-17 10:34:30 +02:00
_ , err := tempF . newObjectWithInfo ( ctx , remote , nil )
2016-06-25 22:23:20 +02:00
if err != nil {
if err == fs . ErrorObjectNotFound {
// File doesn't exist so return old f
return f , nil
}
return nil , err
2015-10-04 23:08:31 +02:00
}
2018-10-14 15:41:26 +02:00
// XXX: update the old f here instead of returning tempF, since
// `features` were already filled with functions having *f as a receiver.
2019-07-28 19:47:38 +02:00
// See https://github.com/rclone/rclone/issues/2182
2018-10-14 15:41:26 +02:00
f . dirCache = tempF . dirCache
f . root = tempF . root
2016-06-21 19:01:53 +02:00
// return an error with an fs which points to the parent
2018-10-14 15:41:26 +02:00
return f , fs . ErrorIsFile
2015-10-04 23:08:31 +02:00
}
return f , nil
}
// rootSlash returns root with a slash on if it is empty, otherwise empty string
func ( f * Fs ) rootSlash ( ) string {
if f . root == "" {
return f . root
}
return f . root + "/"
}
// Return an Object from a path
//
2016-06-25 22:23:20 +02:00
// If it can't be found it returns the error fs.ErrorObjectNotFound.
2019-06-17 10:34:30 +02:00
func ( f * Fs ) newObjectWithInfo ( ctx context . Context , remote string , info * api . Item ) ( fs . Object , error ) {
2015-10-04 23:08:31 +02:00
o := & Object {
fs : f ,
remote : remote ,
}
2017-03-06 21:11:54 +01:00
var err error
2015-10-04 23:08:31 +02:00
if info != nil {
// Set info
2017-03-06 21:11:54 +01:00
err = o . setMetaData ( info )
2015-10-04 23:08:31 +02:00
} else {
2019-06-17 10:34:30 +02:00
err = o . readMetaData ( ctx ) // reads info and meta, returning an error
2017-03-06 21:11:54 +01:00
}
if err != nil {
return nil , err
2015-10-04 23:08:31 +02:00
}
2016-06-25 22:23:20 +02:00
return o , nil
2015-10-04 23:08:31 +02:00
}
2016-06-25 22:23:20 +02:00
// NewObject finds the Object at remote. If it can't be found
// it returns the error fs.ErrorObjectNotFound.
2019-06-17 10:34:30 +02:00
func ( f * Fs ) NewObject ( ctx context . Context , remote string ) ( fs . Object , error ) {
return f . newObjectWithInfo ( ctx , remote , nil )
2015-10-04 23:08:31 +02:00
}
// FindLeaf finds a directory of name leaf in the folder with ID pathID
2019-06-17 10:34:30 +02:00
func ( f * Fs ) FindLeaf ( ctx context . Context , pathID , leaf string ) ( pathIDOut string , found bool , err error ) {
2017-02-09 12:01:20 +01:00
// fs.Debugf(f, "FindLeaf(%q, %q)", pathID, leaf)
2019-01-09 06:11:00 +01:00
_ , ok := f . dirCache . GetInv ( pathID )
2015-10-04 23:08:31 +02:00
if ! ok {
2016-06-12 16:06:02 +02:00
return "" , false , errors . New ( "couldn't find parent ID" )
2015-10-04 23:08:31 +02:00
}
2019-09-04 21:00:37 +02:00
info , resp , err := f . readMetaDataForPathRelativeToID ( ctx , pathID , leaf )
2015-10-04 23:08:31 +02:00
if err != nil {
if resp != nil && resp . StatusCode == http . StatusNotFound {
return "" , false , nil
}
return "" , false , err
}
2018-10-03 06:46:25 +02:00
if info . GetPackageType ( ) == api . PackageTypeOneNote {
return "" , false , errors . New ( "found OneNote file when looking for folder" )
}
2018-07-11 19:48:59 +02:00
if info . GetFolder ( ) == nil {
2016-06-12 16:06:02 +02:00
return "" , false , errors . New ( "found file when looking for folder" )
2015-10-04 23:08:31 +02:00
}
2018-07-11 19:48:59 +02:00
return info . GetID ( ) , true , nil
2015-10-04 23:08:31 +02:00
}
// CreateDir makes a directory with pathID as parent and name leaf
2019-06-17 10:34:30 +02:00
func ( f * Fs ) CreateDir ( ctx context . Context , dirID , leaf string ) ( newID string , err error ) {
2018-07-11 19:48:59 +02:00
// fs.Debugf(f, "CreateDir(%q, %q)\n", dirID, leaf)
2015-10-04 23:08:31 +02:00
var resp * http . Response
var info * api . Item
2021-01-29 21:04:21 +01:00
opts := f . newOptsCall ( dirID , "POST" , "/children" )
2015-10-04 23:08:31 +02:00
mkdir := api . CreateItemRequest {
2020-01-14 18:33:35 +01:00
Name : f . opt . Enc . FromStandardName ( leaf ) ,
2015-10-04 23:08:31 +02:00
ConflictBehavior : "fail" ,
}
err = f . pacer . Call ( func ( ) ( bool , error ) {
2019-09-04 21:00:37 +02:00
resp , err = f . srv . CallJSON ( ctx , & opts , & mkdir , & info )
2021-03-16 16:50:02 +01:00
return shouldRetry ( ctx , resp , err )
2015-10-04 23:08:31 +02:00
} )
if err != nil {
//fmt.Printf("...Error %v\n", err)
return "" , err
}
2018-07-11 19:48:59 +02:00
2015-10-04 23:08:31 +02:00
//fmt.Printf("...Id %q\n", *info.Id)
2018-07-11 19:48:59 +02:00
return info . GetID ( ) , nil
2015-10-04 23:08:31 +02:00
}
// list the objects into the function supplied
//
// If directories is set it only sends directories
// User function to process a File item from listAll
//
2023-09-19 13:09:51 +02:00
// If an error is returned then processing stops
type listAllFn func ( * api . Item ) error
2015-10-04 23:08:31 +02:00
// Lists the directory required calling the user function on each item found
//
// If the user fn ever returns true then it early exits with found = true
2023-09-19 13:09:51 +02:00
//
// This listing function works on both normal listings and delta listings
func ( f * Fs ) _listAll ( ctx context . Context , dirID string , directoriesOnly bool , filesOnly bool , fn listAllFn , opts * rest . Opts , result any , pValue * [ ] api . Item , pNextLink * string ) ( err error ) {
2015-10-04 23:08:31 +02:00
for {
var resp * http . Response
err = f . pacer . Call ( func ( ) ( bool , error ) {
2023-09-19 13:09:51 +02:00
resp , err = f . srv . CallJSON ( ctx , opts , nil , result )
2021-03-16 16:50:02 +01:00
return shouldRetry ( ctx , resp , err )
2015-10-04 23:08:31 +02:00
} )
if err != nil {
2023-09-19 13:09:51 +02:00
return fmt . Errorf ( "couldn't list files: %w" , err )
2015-10-04 23:08:31 +02:00
}
2023-09-19 13:09:51 +02:00
if len ( * pValue ) == 0 {
2015-10-04 23:08:31 +02:00
break
}
2023-09-19 13:09:51 +02:00
for i := range * pValue {
item := & ( * pValue ) [ i ]
2018-07-11 19:48:59 +02:00
isFolder := item . GetFolder ( ) != nil
2015-10-04 23:08:31 +02:00
if isFolder {
if filesOnly {
continue
}
} else {
if directoriesOnly {
continue
}
}
if item . Deleted != nil {
continue
}
2020-01-14 18:33:35 +01:00
item . Name = f . opt . Enc . ToStandardName ( item . GetName ( ) )
2023-09-19 13:09:51 +02:00
err = fn ( item )
if err != nil {
return err
2015-10-04 23:08:31 +02:00
}
}
2023-09-19 13:09:51 +02:00
if * pNextLink == "" {
2015-10-04 23:08:31 +02:00
break
}
2017-07-07 09:18:13 +02:00
opts . Path = ""
2023-09-19 13:09:51 +02:00
opts . Parameters = nil
opts . RootURL = * pNextLink
// reset results
* pNextLink = ""
* pValue = nil
2015-10-04 23:08:31 +02:00
}
2023-09-19 13:09:51 +02:00
return nil
}
// Lists the directory required calling the user function on each item found
//
// If the user fn ever returns true then it early exits with found = true
func ( f * Fs ) listAll ( ctx context . Context , dirID string , directoriesOnly bool , filesOnly bool , fn listAllFn ) ( err error ) {
// Top parameter asks for bigger pages of data
// https://dev.onedrive.com/odata/optional-query-parameters.htm
opts := f . newOptsCall ( dirID , "GET" , fmt . Sprintf ( "/children?$top=%d" , f . opt . ListChunk ) )
var result api . ListChildrenResponse
return f . _listAll ( ctx , dirID , directoriesOnly , filesOnly , fn , & opts , & result , & result . Value , & result . NextLink )
}
// Convert a list item into a DirEntry
//
// Can return nil for an item which should be skipped
func ( f * Fs ) itemToDirEntry ( ctx context . Context , dir string , info * api . Item ) ( entry fs . DirEntry , err error ) {
if ! f . opt . ExposeOneNoteFiles && info . GetPackageType ( ) == api . PackageTypeOneNote {
fs . Debugf ( info . Name , "OneNote file not shown in directory listing" )
return nil , nil
}
remote := path . Join ( dir , info . GetName ( ) )
folder := info . GetFolder ( )
if folder != nil {
// cache the directory ID for later lookups
id := info . GetID ( )
f . dirCache . Put ( remote , id )
d := fs . NewDir ( remote , time . Time ( info . GetLastModifiedDateTime ( ) ) ) . SetID ( id )
d . SetItems ( folder . ChildCount )
entry = d
} else {
o , err := f . newObjectWithInfo ( ctx , remote , info )
if err != nil {
return nil , err
}
entry = o
}
return entry , nil
2015-10-04 23:08:31 +02:00
}
2017-06-11 23:43:31 +02:00
// List the objects and directories in dir into entries. The
// entries can be returned in any order but should be for a
// complete directory.
//
// dir should be "" to list the root, and should not have
// trailing slashes.
//
// This should return ErrDirNotFound if the directory isn't
// found.
2019-06-17 10:34:30 +02:00
func ( f * Fs ) List ( ctx context . Context , dir string ) ( entries fs . DirEntries , err error ) {
directoryID , err := f . dirCache . FindDir ( ctx , dir , false )
2017-06-11 23:43:31 +02:00
if err != nil {
return nil , err
}
2023-09-19 13:09:51 +02:00
err = f . listAll ( ctx , directoryID , false , false , func ( info * api . Item ) error {
entry , err := f . itemToDirEntry ( ctx , dir , info )
2023-11-28 19:49:38 +01:00
if err != nil {
return err
2015-10-04 23:08:31 +02:00
}
2023-11-28 19:49:38 +01:00
if entry == nil {
return nil
}
entries = append ( entries , entry )
return nil
2015-10-04 23:08:31 +02:00
} )
2017-06-11 23:43:31 +02:00
if err != nil {
return nil , err
}
return entries , nil
2015-10-04 23:08:31 +02:00
}
2023-09-19 13:09:51 +02:00
// ListR lists the objects and directories of the Fs starting
// from dir recursively into out.
//
// dir should be "" to start from the root, and should not
// have trailing slashes.
//
// This should return ErrDirNotFound if the directory isn't
// found.
//
// It should call callback for each tranche of entries read.
// These need not be returned in any particular order. If
// callback returns an error then the listing will stop
// immediately.
//
// Don't implement this unless you have a more efficient way
// of listing recursively than doing a directory traversal.
func ( f * Fs ) ListR ( ctx context . Context , dir string , callback fs . ListRCallback ) ( err error ) {
// Make sure this ID is in the directory cache
directoryID , err := f . dirCache . FindDir ( ctx , dir , false )
if err != nil {
return err
}
// ListR only works at the root of a onedrive, not on a folder
// So we have to filter things outside of the root which is
// inefficient.
list := walk . NewListRHelper ( callback )
// list a folder conventionally - used for shared folders
var listFolder func ( dir string ) error
listFolder = func ( dir string ) error {
entries , err := f . List ( ctx , dir )
if err != nil {
return err
}
for _ , entry := range entries {
err = list . Add ( entry )
if err != nil {
return err
}
if _ , isDir := entry . ( fs . Directory ) ; isDir {
err = listFolder ( entry . Remote ( ) )
if err != nil {
return err
}
}
}
return nil
}
// This code relies on the fact that directories are sent before their children. This isn't
// mentioned in the docs though, so maybe it shouldn't be relied on.
seen := map [ string ] struct { } { }
fn := func ( info * api . Item ) error {
var parentPath string
var ok bool
id := info . GetID ( )
// The API can produce duplicates, so skip them
if _ , found := seen [ id ] ; found {
return nil
}
seen [ id ] = struct { } { }
// Skip the root directory
if id == directoryID {
return nil
}
// Skip deleted items
if info . Deleted != nil {
return nil
}
dirID := info . GetParentReference ( ) . GetID ( )
// Skip files that don't have their parent directory
// cached as they are outside the root.
parentPath , ok = f . dirCache . GetInv ( dirID )
if ! ok {
return nil
}
// Skip files not under the root directory
remote := path . Join ( parentPath , info . GetName ( ) )
if dir != "" && ! strings . HasPrefix ( remote , dir + "/" ) {
return nil
}
entry , err := f . itemToDirEntry ( ctx , parentPath , info )
if err != nil {
return err
}
2023-11-28 19:49:38 +01:00
if entry == nil {
return nil
}
2023-09-19 13:09:51 +02:00
err = list . Add ( entry )
if err != nil {
return err
}
// If this is a shared folder, we'll need list it too
if info . RemoteItem != nil && info . RemoteItem . Folder != nil {
fs . Debugf ( remote , "Listing shared directory" )
return listFolder ( remote )
}
return nil
}
opts := rest . Opts {
Method : "GET" ,
Path : "/root/delta" ,
Parameters : map [ string ] [ ] string {
// "token": {token},
"$top" : { fmt . Sprintf ( "%d" , f . opt . ListChunk ) } ,
} ,
}
var result api . DeltaResponse
err = f . _listAll ( ctx , "" , false , false , fn , & opts , & result , & result . Value , & result . NextLink )
if err != nil {
return err
}
return list . Flush ( )
}
2023-12-08 05:33:51 +01:00
// Shutdown shutdown the fs
func ( f * Fs ) Shutdown ( ctx context . Context ) error {
f . tokenRenewer . Shutdown ( )
return nil
}
2015-10-30 09:40:14 +01:00
// Creates from the parameters passed in a half finished Object which
// must have setMetaData called on it
2015-10-04 23:08:31 +02:00
//
2022-08-05 17:35:41 +02:00
// Returns the object, leaf, directoryID and error.
2015-10-04 23:08:31 +02:00
//
2015-10-30 09:40:14 +01:00
// Used to create new objects
2019-06-17 10:34:30 +02:00
func ( f * Fs ) createObject ( ctx context . Context , remote string , modTime time . Time , size int64 ) ( o * Object , leaf string , directoryID string , err error ) {
2015-10-04 23:08:31 +02:00
// Create the directory for the object if it doesn't exist
2020-05-11 18:24:37 +02:00
leaf , directoryID , err = f . dirCache . FindPath ( ctx , remote , true )
2015-10-04 23:08:31 +02:00
if err != nil {
2015-10-30 09:40:14 +01:00
return nil , leaf , directoryID , err
2015-10-04 23:08:31 +02:00
}
// Temporary Object under construction
2015-10-30 09:40:14 +01:00
o = & Object {
2015-10-04 23:08:31 +02:00
fs : f ,
remote : remote ,
}
2015-10-30 09:40:14 +01:00
return o , leaf , directoryID , nil
}
// Put the object into the container
//
2022-08-05 17:35:41 +02:00
// Copy the reader in to the new object which is returned.
2015-10-30 09:40:14 +01:00
//
// The new object may have been created if an error is returned
2019-06-17 10:34:30 +02:00
func ( f * Fs ) Put ( ctx context . Context , in io . Reader , src fs . ObjectInfo , options ... fs . OpenOption ) ( fs . Object , error ) {
2016-02-18 12:35:25 +01:00
remote := src . Remote ( )
size := src . Size ( )
2019-06-17 10:34:30 +02:00
modTime := src . ModTime ( ctx )
2016-02-18 12:35:25 +01:00
2019-06-17 10:34:30 +02:00
o , _ , _ , err := f . createObject ( ctx , remote , modTime , size )
2015-10-30 09:40:14 +01:00
if err != nil {
return nil , err
}
2019-06-17 10:34:30 +02:00
return o , o . Update ( ctx , in , src , options ... )
2015-10-04 23:08:31 +02:00
}
// Mkdir creates the container if it doesn't exist
2019-06-17 10:34:30 +02:00
func ( f * Fs ) Mkdir ( ctx context . Context , dir string ) error {
2020-05-11 18:24:37 +02:00
_ , err := f . dirCache . FindDir ( ctx , dir , true )
2016-11-25 22:52:43 +01:00
return err
2015-10-04 23:08:31 +02:00
}
// deleteObject removes an object by ID
2019-09-04 21:00:37 +02:00
func ( f * Fs ) deleteObject ( ctx context . Context , id string ) error {
2021-01-29 21:04:21 +01:00
opts := f . newOptsCall ( id , "DELETE" , "" )
2018-07-11 19:48:59 +02:00
opts . NoResponse = true
2015-10-04 23:08:31 +02:00
return f . pacer . Call ( func ( ) ( bool , error ) {
2019-09-04 21:00:37 +02:00
resp , err := f . srv . Call ( ctx , & opts )
2021-03-16 16:50:02 +01:00
return shouldRetry ( ctx , resp , err )
2015-10-04 23:08:31 +02:00
} )
}
// purgeCheck removes the root directory, if check is set then it
// refuses to do so if it has anything in
2019-06-17 10:34:30 +02:00
func ( f * Fs ) purgeCheck ( ctx context . Context , dir string , check bool ) error {
2016-11-25 22:52:43 +01:00
root := path . Join ( f . root , dir )
if root == "" {
2016-06-12 16:06:02 +02:00
return errors . New ( "can't purge root directory" )
2015-10-04 23:08:31 +02:00
}
dc := f . dirCache
2019-06-17 10:34:30 +02:00
rootID , err := dc . FindDir ( ctx , dir , false )
2015-10-04 23:08:31 +02:00
if err != nil {
return err
}
2018-09-01 00:07:12 +02:00
if check {
// check to see if there are any items
2023-09-19 13:09:51 +02:00
err := f . listAll ( ctx , rootID , false , false , func ( item * api . Item ) error {
return fs . ErrorDirectoryNotEmpty
2018-09-01 00:07:12 +02:00
} )
if err != nil {
return err
}
2015-10-04 23:08:31 +02:00
}
2019-09-04 21:00:37 +02:00
err = f . deleteObject ( ctx , rootID )
2015-10-04 23:08:31 +02:00
if err != nil {
return err
}
2016-11-25 22:52:43 +01:00
f . dirCache . FlushDir ( dir )
2015-10-04 23:08:31 +02:00
return nil
}
// Rmdir deletes the root folder
//
// Returns an error if it isn't empty
2019-06-17 10:34:30 +02:00
func ( f * Fs ) Rmdir ( ctx context . Context , dir string ) error {
return f . purgeCheck ( ctx , dir , true )
2015-10-04 23:08:31 +02:00
}
// Precision return the precision of this Fs
func ( f * Fs ) Precision ( ) time . Duration {
return time . Second
}
2015-10-30 09:40:14 +01:00
// waitForJob waits for the job with status in url to complete
2019-06-17 10:34:30 +02:00
func ( f * Fs ) waitForJob ( ctx context . Context , location string , o * Object ) error {
2021-03-01 13:05:36 +01:00
deadline := time . Now ( ) . Add ( f . ci . TimeoutOrInfinite ( ) )
2015-10-30 09:40:14 +01:00
for time . Now ( ) . Before ( deadline ) {
var resp * http . Response
var err error
2017-03-12 13:00:10 +01:00
var body [ ] byte
2015-10-30 09:40:14 +01:00
err = f . pacer . Call ( func ( ) ( bool , error ) {
2018-08-18 12:06:22 +02:00
resp , err = http . Get ( location )
2017-03-12 13:00:10 +01:00
if err != nil {
2018-01-12 17:30:54 +01:00
return fserrors . ShouldRetry ( err ) , err
2017-03-12 13:00:10 +01:00
}
body , err = rest . ReadBody ( resp )
2018-01-12 17:30:54 +01:00
return fserrors . ShouldRetry ( err ) , err
2015-10-30 09:40:14 +01:00
} )
if err != nil {
return err
}
2017-03-12 13:00:10 +01:00
// Try to decode the body first as an api.AsyncOperationStatus
var status api . AsyncOperationStatus
err = json . Unmarshal ( body , & status )
if err != nil {
2021-11-04 11:12:57 +01:00
return fmt . Errorf ( "async status result not JSON: %q: %w" , body , err )
2017-03-12 13:00:10 +01:00
}
2018-08-18 12:06:22 +02:00
switch status . Status {
case "failed" :
2021-01-05 14:26:00 +01:00
if strings . HasPrefix ( status . ErrorCode , "AccessDenied_" ) {
return errAsyncJobAccessDenied
2015-10-30 09:40:14 +01:00
}
2021-01-05 14:26:00 +01:00
fallthrough
case "deleteFailed" :
2021-11-04 11:12:57 +01:00
return fmt . Errorf ( "%s: async operation returned %q" , o . remote , status . Status )
2018-08-18 12:06:22 +02:00
case "completed" :
2019-06-17 10:34:30 +02:00
err = o . readMetaData ( ctx )
2021-11-09 14:00:51 +01:00
if err != nil {
return fmt . Errorf ( "async operation completed but readMetaData failed: %w" , err )
}
return nil
2015-10-30 09:40:14 +01:00
}
2018-08-18 12:06:22 +02:00
2015-10-30 09:40:14 +01:00
time . Sleep ( 1 * time . Second )
}
2021-11-04 11:12:57 +01:00
return fmt . Errorf ( "async operation didn't complete after %v" , f . ci . TimeoutOrInfinite ( ) )
2015-10-30 09:40:14 +01:00
}
2020-10-13 23:43:40 +02:00
// Copy src to this remote using server-side copy operations.
2015-10-04 23:08:31 +02:00
//
2022-08-05 17:35:41 +02:00
// This is stored with the remote path given.
2015-10-04 23:08:31 +02:00
//
2022-08-05 17:35:41 +02:00
// It returns the destination Object and a possible error.
2015-10-04 23:08:31 +02:00
//
// Will only be called if src.Fs().Name() == f.Name()
//
// If it isn't possible then return fs.ErrorCantCopy
2019-06-17 10:34:30 +02:00
func ( f * Fs ) Copy ( ctx context . Context , src fs . Object , remote string ) ( fs . Object , error ) {
2015-10-30 09:40:14 +01:00
srcObj , ok := src . ( * Object )
if ! ok {
2017-02-09 12:01:20 +01:00
fs . Debugf ( src , "Can't copy - not same remote type" )
2015-10-30 09:40:14 +01:00
return nil , fs . ErrorCantCopy
}
2021-01-05 14:26:00 +01:00
if f . driveType != srcObj . fs . driveType {
2021-01-05 19:57:51 +01:00
fs . Debugf ( src , "Can't server-side copy - drive types differ" )
return nil , fs . ErrorCantCopy
}
// For OneDrive Business, this is only supported within the same drive
if f . driveType != driveTypePersonal && srcObj . fs . driveID != f . driveID {
fs . Debugf ( src , "Can't server-side copy - cross-drive but not OneDrive Personal" )
2021-01-05 14:26:00 +01:00
return nil , fs . ErrorCantCopy
}
2019-06-17 10:34:30 +02:00
err := srcObj . readMetaData ( ctx )
2015-10-30 09:40:14 +01:00
if err != nil {
return nil , err
}
2020-03-15 13:07:46 +01:00
// Check we aren't overwriting a file on the same remote
if srcObj . fs == f {
srcPath := srcObj . rootPath ( )
dstPath := f . rootPath ( remote )
2022-06-08 22:25:17 +02:00
if strings . EqualFold ( srcPath , dstPath ) {
2021-11-04 11:12:57 +01:00
return nil , fmt . Errorf ( "can't copy %q -> %q as are same name when lowercase" , srcPath , dstPath )
2020-03-15 13:07:46 +01:00
}
2017-02-22 20:28:22 +01:00
}
2015-10-30 09:40:14 +01:00
// Create temporary object
2019-06-17 10:34:30 +02:00
dstObj , leaf , directoryID , err := f . createObject ( ctx , remote , srcObj . modTime , srcObj . size )
2015-10-30 09:40:14 +01:00
if err != nil {
return nil , err
}
// Copy the object
2021-01-06 03:50:37 +01:00
// The query param is a workaround for OneDrive Business for #4590
2021-01-29 21:04:21 +01:00
opts := f . newOptsCall ( srcObj . id , "POST" , "/copy?@microsoft.graph.conflictBehavior=replace" )
2018-07-11 19:48:59 +02:00
opts . ExtraHeaders = map [ string ] string { "Prefer" : "respond-async" }
opts . NoResponse = true
2021-01-29 21:04:21 +01:00
id , dstDriveID , _ := f . parseNormalizedID ( directoryID )
2018-07-11 19:48:59 +02:00
2020-01-14 18:33:35 +01:00
replacedLeaf := f . opt . Enc . FromStandardName ( leaf )
2018-08-04 12:16:43 +02:00
copyReq := api . CopyItemRequest {
2015-10-30 09:40:14 +01:00
Name : & replacedLeaf ,
ParentReference : api . ItemReference {
2019-01-09 06:11:00 +01:00
DriveID : dstDriveID ,
2018-08-18 12:06:22 +02:00
ID : id ,
2015-10-30 09:40:14 +01:00
} ,
}
var resp * http . Response
err = f . pacer . Call ( func ( ) ( bool , error ) {
2019-09-04 21:00:37 +02:00
resp , err = f . srv . CallJSON ( ctx , & opts , & copyReq , nil )
2021-03-16 16:50:02 +01:00
return shouldRetry ( ctx , resp , err )
2015-10-30 09:40:14 +01:00
} )
if err != nil {
return nil , err
}
// read location header
location := resp . Header . Get ( "Location" )
if location == "" {
2016-06-12 16:06:02 +02:00
return nil , errors . New ( "didn't receive location header in copy response" )
2015-10-30 09:40:14 +01:00
}
// Wait for job to finish
2019-06-17 10:34:30 +02:00
err = f . waitForJob ( ctx , location , dstObj )
2021-01-05 14:26:00 +01:00
if err == errAsyncJobAccessDenied {
fs . Debugf ( src , "Server-side copy failed - file not shared between drives" )
return nil , fs . ErrorCantCopy
}
2015-10-30 09:40:14 +01:00
if err != nil {
return nil , err
}
2018-03-15 08:06:17 +01:00
2018-03-15 08:06:17 +01:00
// Copy does NOT copy the modTime from the source and there seems to
// be no way to set date before
// This will create TWO versions on OneDrive
2019-06-17 10:34:30 +02:00
err = dstObj . SetModTime ( ctx , srcObj . ModTime ( ctx ) )
2018-03-15 08:06:17 +01:00
if err != nil {
return nil , err
}
2015-10-30 09:40:14 +01:00
return dstObj , nil
}
2015-10-04 23:08:31 +02:00
2020-06-04 23:25:14 +02:00
// Purge deletes all the files in the directory
2015-10-04 23:08:31 +02:00
//
// Optional interface: Only implement this if you have a way of
// deleting all the files quicker than just running Remove() on the
// result of List()
2020-06-04 23:25:14 +02:00
func ( f * Fs ) Purge ( ctx context . Context , dir string ) error {
return f . purgeCheck ( ctx , dir , false )
2015-10-04 23:08:31 +02:00
}
2020-10-13 23:43:40 +02:00
// Move src to this remote using server-side move operations.
2017-03-14 16:35:10 +01:00
//
2022-08-05 17:35:41 +02:00
// This is stored with the remote path given.
2017-03-14 16:35:10 +01:00
//
2022-08-05 17:35:41 +02:00
// It returns the destination Object and a possible error.
2017-03-14 16:35:10 +01:00
//
// Will only be called if src.Fs().Name() == f.Name()
//
// If it isn't possible then return fs.ErrorCantMove
2019-06-17 10:34:30 +02:00
func ( f * Fs ) Move ( ctx context . Context , src fs . Object , remote string ) ( fs . Object , error ) {
2017-03-14 16:35:10 +01:00
srcObj , ok := src . ( * Object )
if ! ok {
fs . Debugf ( src , "Can't move - not same remote type" )
return nil , fs . ErrorCantMove
}
// Create temporary object
2019-06-17 10:34:30 +02:00
dstObj , leaf , directoryID , err := f . createObject ( ctx , remote , srcObj . modTime , srcObj . size )
2017-03-14 16:35:10 +01:00
if err != nil {
return nil , err
}
2021-01-29 21:04:21 +01:00
id , dstDriveID , _ := f . parseNormalizedID ( directoryID )
_ , srcObjDriveID , _ := f . parseNormalizedID ( srcObj . id )
2019-01-09 06:11:00 +01:00
2020-07-02 10:38:37 +02:00
if f . canonicalDriveID ( dstDriveID ) != srcObj . fs . canonicalDriveID ( srcObjDriveID ) {
2019-01-09 06:11:00 +01:00
// https://docs.microsoft.com/en-us/graph/api/driveitem-move?view=graph-rest-1.0
// "Items cannot be moved between Drives using this request."
2020-07-02 10:38:37 +02:00
fs . Debugf ( f , "Can't move files between drives (%q != %q)" , dstDriveID , srcObjDriveID )
2019-01-09 06:11:00 +01:00
return nil , fs . ErrorCantMove
}
2017-03-14 16:35:10 +01:00
// Move the object
2021-01-29 21:04:21 +01:00
opts := f . newOptsCall ( srcObj . id , "PATCH" , "" )
2018-07-11 19:48:59 +02:00
2017-03-14 16:35:10 +01:00
move := api . MoveItemRequest {
2020-01-14 18:33:35 +01:00
Name : f . opt . Enc . FromStandardName ( leaf ) ,
2017-03-14 16:35:10 +01:00
ParentReference : & api . ItemReference {
2019-01-09 06:11:00 +01:00
DriveID : dstDriveID ,
ID : id ,
2017-03-14 16:35:10 +01:00
} ,
// We set the mod time too as it gets reset otherwise
FileSystemInfo : & api . FileSystemInfoFacet {
CreatedDateTime : api . Timestamp ( srcObj . modTime ) ,
LastModifiedDateTime : api . Timestamp ( srcObj . modTime ) ,
} ,
}
var resp * http . Response
var info api . Item
err = f . pacer . Call ( func ( ) ( bool , error ) {
2019-09-04 21:00:37 +02:00
resp , err = f . srv . CallJSON ( ctx , & opts , & move , & info )
2021-03-16 16:50:02 +01:00
return shouldRetry ( ctx , resp , err )
2017-03-14 16:35:10 +01:00
} )
if err != nil {
return nil , err
}
err = dstObj . setMetaData ( & info )
if err != nil {
return nil , err
}
return dstObj , nil
}
2018-08-02 18:13:37 +02:00
// DirMove moves src, srcRemote to this remote at dstRemote
2020-10-13 23:43:40 +02:00
// using server-side move operations.
2018-08-02 18:13:37 +02:00
//
// Will only be called if src.Fs().Name() == f.Name()
//
// If it isn't possible then return fs.ErrorCantDirMove
//
// If destination exists then return fs.ErrorDirExists
2019-06-17 10:34:30 +02:00
func ( f * Fs ) DirMove ( ctx context . Context , src fs . Fs , srcRemote , dstRemote string ) error {
2018-08-02 18:13:37 +02:00
srcFs , ok := src . ( * Fs )
if ! ok {
fs . Debugf ( srcFs , "Can't move directory - not same remote type" )
return fs . ErrorCantDirMove
}
2020-05-12 12:39:30 +02:00
srcID , _ , _ , dstDirectoryID , dstLeaf , err := f . dirCache . DirMove ( ctx , srcFs . dirCache , srcFs . root , srcRemote , f . root , dstRemote )
2018-08-02 18:13:37 +02:00
if err != nil {
return err
}
2019-01-09 06:11:00 +01:00
2021-01-29 21:04:21 +01:00
parsedDstDirID , dstDriveID , _ := f . parseNormalizedID ( dstDirectoryID )
_ , srcDriveID , _ := f . parseNormalizedID ( srcID )
2019-01-09 06:11:00 +01:00
2020-07-02 10:38:37 +02:00
if f . canonicalDriveID ( dstDriveID ) != srcFs . canonicalDriveID ( srcDriveID ) {
2019-01-09 06:11:00 +01:00
// https://docs.microsoft.com/en-us/graph/api/driveitem-move?view=graph-rest-1.0
// "Items cannot be moved between Drives using this request."
2020-07-02 10:38:37 +02:00
fs . Debugf ( f , "Can't move directories between drives (%q != %q)" , dstDriveID , srcDriveID )
2019-01-09 06:11:00 +01:00
return fs . ErrorCantDirMove
}
2018-08-02 18:13:37 +02:00
// Get timestamps of src so they can be preserved
2019-09-04 21:00:37 +02:00
srcInfo , _ , err := srcFs . readMetaDataForPathRelativeToID ( ctx , srcID , "" )
2018-08-02 18:13:37 +02:00
if err != nil {
return err
}
// Do the move
2021-01-29 21:04:21 +01:00
opts := f . newOptsCall ( srcID , "PATCH" , "" )
2018-08-02 18:13:37 +02:00
move := api . MoveItemRequest {
2020-05-12 12:39:30 +02:00
Name : f . opt . Enc . FromStandardName ( dstLeaf ) ,
2018-08-02 18:13:37 +02:00
ParentReference : & api . ItemReference {
2019-01-09 06:11:00 +01:00
DriveID : dstDriveID ,
ID : parsedDstDirID ,
2018-08-02 18:13:37 +02:00
} ,
// We set the mod time too as it gets reset otherwise
FileSystemInfo : & api . FileSystemInfoFacet {
CreatedDateTime : srcInfo . CreatedDateTime ,
LastModifiedDateTime : srcInfo . LastModifiedDateTime ,
} ,
}
var resp * http . Response
var info api . Item
err = f . pacer . Call ( func ( ) ( bool , error ) {
2019-09-04 21:00:37 +02:00
resp , err = f . srv . CallJSON ( ctx , & opts , & move , & info )
2021-03-16 16:50:02 +01:00
return shouldRetry ( ctx , resp , err )
2018-08-02 18:13:37 +02:00
} )
if err != nil {
return err
}
srcFs . dirCache . FlushDir ( srcRemote )
return nil
}
2016-12-09 16:39:29 +01:00
// DirCacheFlush resets the directory cache - used in testing as an
// optional interface
func ( f * Fs ) DirCacheFlush ( ) {
f . dirCache . ResetRoot ( )
}
2018-04-16 23:19:25 +02:00
// About gets quota information
2019-06-17 10:34:30 +02:00
func ( f * Fs ) About ( ctx context . Context ) ( usage * fs . Usage , err error ) {
2018-04-16 23:19:25 +02:00
var drive api . Drive
opts := rest . Opts {
Method : "GET" ,
Path : "" ,
}
var resp * http . Response
err = f . pacer . Call ( func ( ) ( bool , error ) {
2019-09-04 21:00:37 +02:00
resp , err = f . srv . CallJSON ( ctx , & opts , nil , & drive )
2021-03-16 16:50:02 +01:00
return shouldRetry ( ctx , resp , err )
2018-04-16 23:19:25 +02:00
} )
if err != nil {
2022-01-14 22:18:32 +01:00
return nil , err
2018-04-16 23:19:25 +02:00
}
q := drive . Quota
2020-10-07 11:58:04 +02:00
// On (some?) Onedrive sharepoints these are all 0 so return unknown in that case
if q . Total == 0 && q . Used == 0 && q . Deleted == 0 && q . Remaining == 0 {
return & fs . Usage { } , nil
}
2018-04-16 23:19:25 +02:00
usage = & fs . Usage {
Total : fs . NewUsageValue ( q . Total ) , // quota of bytes that can be used
Used : fs . NewUsageValue ( q . Used ) , // bytes in use
Trashed : fs . NewUsageValue ( q . Deleted ) , // bytes in trash
Free : fs . NewUsageValue ( q . Remaining ) , // bytes which can be uploaded before reaching the quota
}
return usage , nil
}
2016-01-11 13:39:33 +01:00
// Hashes returns the supported hash sets.
2018-01-12 17:30:54 +01:00
func ( f * Fs ) Hashes ( ) hash . Set {
2023-03-01 13:02:01 +01:00
return hash . Set ( f . hashType )
2016-01-11 13:39:33 +01:00
}
2020-05-20 12:39:20 +02:00
// PublicLink returns a link for downloading without account.
2020-05-31 23:18:01 +02:00
func ( f * Fs ) PublicLink ( ctx context . Context , remote string , expire fs . Duration , unlink bool ) ( link string , err error ) {
2018-11-02 13:14:19 +01:00
info , _ , err := f . readMetaDataForPath ( ctx , f . rootPath ( remote ) )
2018-10-09 14:11:48 +02:00
if err != nil {
return "" , err
}
2021-01-29 21:04:21 +01:00
opts := f . newOptsCall ( info . GetID ( ) , "POST" , "/createLink" )
2018-10-09 14:11:48 +02:00
share := api . CreateShareLinkRequest {
2021-01-07 12:02:54 +01:00
Type : f . opt . LinkType ,
Scope : f . opt . LinkScope ,
Password : f . opt . LinkPassword ,
}
2021-04-06 17:21:32 +02:00
if expire < fs . DurationOff {
2021-01-07 12:02:54 +01:00
expiry := time . Now ( ) . Add ( time . Duration ( expire ) )
share . Expiry = & expiry
2018-10-09 14:11:48 +02:00
}
var resp * http . Response
var result api . CreateShareLinkResponse
err = f . pacer . Call ( func ( ) ( bool , error ) {
2019-09-04 21:00:37 +02:00
resp , err = f . srv . CallJSON ( ctx , & opts , & share , & result )
2021-03-16 16:50:02 +01:00
return shouldRetry ( ctx , resp , err )
2018-10-09 14:11:48 +02:00
} )
if err != nil {
2021-07-27 11:55:57 +02:00
if resp != nil && resp . StatusCode == 400 && f . driveType != driveTypePersonal {
2021-11-04 11:12:57 +01:00
return "" , fmt . Errorf ( "%v (is making public links permitted by the org admin?)" , err )
2021-07-27 11:55:57 +02:00
}
2018-10-09 14:11:48 +02:00
return "" , err
}
2021-06-22 15:25:08 +02:00
shareURL := result . Link . WebURL
// Convert share link to direct download link if target is not a folder
// Not attempting to do the conversion for regional versions, just to be safe
if f . opt . Region != regionGlobal {
return shareURL , nil
}
if info . Folder != nil {
fs . Debugf ( nil , "Can't convert share link for folder to direct link - returning the link as is" )
return shareURL , nil
}
cnvFailMsg := "Don't know how to convert share link to direct link - returning the link as is"
directURL := ""
segments := strings . Split ( shareURL , "/" )
switch f . driveType {
case driveTypePersonal :
// Method: https://stackoverflow.com/questions/37951114/direct-download-link-to-onedrive-file
if len ( segments ) != 5 {
fs . Logf ( f , cnvFailMsg )
return shareURL , nil
}
enc := base64 . StdEncoding . EncodeToString ( [ ] byte ( shareURL ) )
enc = strings . ReplaceAll ( enc , "/" , "_" )
enc = strings . ReplaceAll ( enc , "+" , "-" )
enc = strings . ReplaceAll ( enc , "=" , "" )
directURL = fmt . Sprintf ( "https://api.onedrive.com/v1.0/shares/u!%s/root/content" , enc )
case driveTypeBusiness :
// Method: https://docs.microsoft.com/en-us/sharepoint/dev/spfx/shorter-share-link-format
// Example:
// https://{tenant}-my.sharepoint.com/:t:/g/personal/{user_email}/{Opaque_String}
// --convert to->
// https://{tenant}-my.sharepoint.com/personal/{user_email}/_layouts/15/download.aspx?share={Opaque_String}
if len ( segments ) != 8 {
fs . Logf ( f , cnvFailMsg )
return shareURL , nil
}
directURL = fmt . Sprintf ( "https://%s/%s/%s/_layouts/15/download.aspx?share=%s" ,
segments [ 2 ] , segments [ 5 ] , segments [ 6 ] , segments [ 7 ] )
case driveTypeSharepoint :
// Method: Similar to driveTypeBusiness
// Example:
// https://{tenant}.sharepoint.com/:t:/s/{site_name}/{Opaque_String}
// --convert to->
// https://{tenant}.sharepoint.com/sites/{site_name}/_layouts/15/download.aspx?share={Opaque_String}
//
// https://{tenant}.sharepoint.com/:t:/t/{team_name}/{Opaque_String}
// --convert to->
// https://{tenant}.sharepoint.com/teams/{team_name}/_layouts/15/download.aspx?share={Opaque_String}
//
// https://{tenant}.sharepoint.com/:t:/g/{Opaque_String}
// --convert to->
// https://{tenant}.sharepoint.com/_layouts/15/download.aspx?share={Opaque_String}
if len ( segments ) < 6 || len ( segments ) > 7 {
fs . Logf ( f , cnvFailMsg )
return shareURL , nil
}
pathPrefix := ""
switch segments [ 4 ] {
case "s" : // Site
pathPrefix = "/sites/" + segments [ 5 ]
case "t" : // Team
pathPrefix = "/teams/" + segments [ 5 ]
case "g" : // Root site
default :
fs . Logf ( f , cnvFailMsg )
return shareURL , nil
}
directURL = fmt . Sprintf ( "https://%s%s/_layouts/15/download.aspx?share=%s" ,
segments [ 2 ] , pathPrefix , segments [ len ( segments ) - 1 ] )
}
return directURL , nil
2018-10-09 14:11:48 +02:00
}
2020-08-03 21:45:37 +02:00
// CleanUp deletes all the hidden files.
func ( f * Fs ) CleanUp ( ctx context . Context ) error {
2020-11-05 12:33:32 +01:00
token := make ( chan struct { } , f . ci . Checkers )
2020-08-03 21:45:37 +02:00
var wg sync . WaitGroup
err := walk . Walk ( ctx , f , "" , true , - 1 , func ( path string , entries fs . DirEntries , err error ) error {
2023-03-25 17:28:37 +01:00
if err != nil {
fs . Errorf ( f , "Failed to list %q: %v" , path , err )
return nil
}
2020-08-03 21:45:37 +02:00
err = entries . ForObjectError ( func ( obj fs . Object ) error {
o , ok := obj . ( * Object )
if ! ok {
return errors . New ( "internal error: not a onedrive object" )
}
wg . Add ( 1 )
token <- struct { } { }
go func ( ) {
defer func ( ) {
<- token
wg . Done ( )
} ( )
err := o . deleteVersions ( ctx )
if err != nil {
fs . Errorf ( o , "Failed to remove versions: %v" , err )
}
} ( )
return nil
} )
wg . Wait ( )
return err
} )
return err
}
// Finds and removes any old versions for o
func ( o * Object ) deleteVersions ( ctx context . Context ) error {
2021-01-29 21:04:21 +01:00
opts := o . fs . newOptsCall ( o . id , "GET" , "/versions" )
2020-08-03 21:45:37 +02:00
var versions api . VersionsResponse
err := o . fs . pacer . Call ( func ( ) ( bool , error ) {
resp , err := o . fs . srv . CallJSON ( ctx , & opts , nil , & versions )
2021-03-16 16:50:02 +01:00
return shouldRetry ( ctx , resp , err )
2020-08-03 21:45:37 +02:00
} )
if err != nil {
return err
}
if len ( versions . Versions ) < 2 {
return nil
}
for _ , version := range versions . Versions [ 1 : ] {
err = o . deleteVersion ( ctx , version . ID )
if err != nil {
return err
}
}
return nil
}
// Finds and removes any old versions for o
func ( o * Object ) deleteVersion ( ctx context . Context , ID string ) error {
if operations . SkipDestructive ( ctx , fmt . Sprintf ( "%s of %s" , ID , o . remote ) , "delete version" ) {
return nil
}
fs . Infof ( o , "removing version %q" , ID )
2021-01-29 21:04:21 +01:00
opts := o . fs . newOptsCall ( o . id , "DELETE" , "/versions/" + ID )
2020-08-03 21:45:37 +02:00
opts . NoResponse = true
return o . fs . pacer . Call ( func ( ) ( bool , error ) {
resp , err := o . fs . srv . Call ( ctx , & opts )
2021-03-16 16:50:02 +01:00
return shouldRetry ( ctx , resp , err )
2020-08-03 21:45:37 +02:00
} )
}
2015-10-04 23:08:31 +02:00
// ------------------------------------------------------------
// Fs returns the parent Fs
2016-02-18 12:35:25 +01:00
func ( o * Object ) Fs ( ) fs . Info {
2015-10-04 23:08:31 +02:00
return o . fs
}
// Return a string version
func ( o * Object ) String ( ) string {
if o == nil {
return "<nil>"
}
return o . remote
}
// Remote returns the remote path
func ( o * Object ) Remote ( ) string {
return o . remote
}
2018-11-02 13:14:19 +01:00
// rootPath returns a path for use in server given a remote
func ( f * Fs ) rootPath ( remote string ) string {
return f . rootSlash ( ) + remote
}
// rootPath returns a path for use in local functions
func ( o * Object ) rootPath ( ) string {
return o . fs . rootPath ( o . remote )
}
2016-01-11 13:39:33 +01:00
// Hash returns the SHA-1 of an object returning a lowercase hex string
2019-06-17 10:34:30 +02:00
func ( o * Object ) Hash ( ctx context . Context , t hash . Type ) ( string , error ) {
2023-03-01 13:02:01 +01:00
if t == o . fs . hashType {
return o . hash , nil
2016-01-11 13:39:33 +01:00
}
2018-08-18 12:06:22 +02:00
return "" , hash . ErrUnsupported
2015-10-04 23:08:31 +02:00
}
// Size returns the size of an object in bytes
func ( o * Object ) Size ( ) int64 {
2019-06-17 10:34:30 +02:00
err := o . readMetaData ( context . TODO ( ) )
2015-10-04 23:08:31 +02:00
if err != nil {
2017-02-09 12:01:20 +01:00
fs . Logf ( o , "Failed to read metadata: %v" , err )
2015-10-04 23:08:31 +02:00
return 0
}
return o . size
}
// setMetaData sets the metadata from info
2017-03-06 21:11:54 +01:00
func ( o * Object ) setMetaData ( info * api . Item ) ( err error ) {
2018-07-11 19:48:59 +02:00
if info . GetFolder ( ) != nil {
2021-09-06 14:54:08 +02:00
return fs . ErrorIsDir
2017-03-06 21:11:54 +01:00
}
2015-10-04 23:08:31 +02:00
o . hasMetaData = true
2018-07-11 19:48:59 +02:00
o . size = info . GetSize ( )
2016-01-11 13:39:33 +01:00
2018-10-03 06:46:25 +02:00
o . isOneNoteFile = info . GetPackageType ( ) == api . PackageTypeOneNote
2018-04-20 13:55:49 +02:00
// Docs: https://docs.microsoft.com/en-us/onedrive/developer/rest-api/resources/hashes
2016-01-17 11:45:17 +01:00
//
2018-04-20 13:55:49 +02:00
// We use SHA1 for onedrive personal and QuickXorHash for onedrive for business
2018-07-11 19:48:59 +02:00
file := info . GetFile ( )
if file != nil {
o . mimeType = file . MimeType
2023-03-01 13:02:01 +01:00
o . hash = ""
switch o . fs . hashType {
case QuickXorHashType :
if file . Hashes . QuickXorHash != "" {
h , err := base64 . StdEncoding . DecodeString ( file . Hashes . QuickXorHash )
if err != nil {
fs . Errorf ( o , "Failed to decode QuickXorHash %q: %v" , file . Hashes . QuickXorHash , err )
} else {
o . hash = hex . EncodeToString ( h )
}
2018-04-20 13:55:49 +02:00
}
2023-03-01 13:02:01 +01:00
case hash . SHA1 :
o . hash = strings . ToLower ( file . Hashes . Sha1Hash )
case hash . SHA256 :
o . hash = strings . ToLower ( file . Hashes . Sha256Hash )
case hash . CRC32 :
o . hash = strings . ToLower ( file . Hashes . Crc32Hash )
2018-04-20 13:55:49 +02:00
}
2016-01-11 13:39:33 +01:00
}
2018-07-11 19:48:59 +02:00
fileSystemInfo := info . GetFileSystemInfo ( )
if fileSystemInfo != nil {
o . modTime = time . Time ( fileSystemInfo . LastModifiedDateTime )
2015-10-04 23:08:31 +02:00
} else {
2018-07-11 19:48:59 +02:00
o . modTime = time . Time ( info . GetLastModifiedDateTime ( ) )
2015-10-04 23:08:31 +02:00
}
2018-07-11 19:48:59 +02:00
o . id = info . GetID ( )
2017-03-06 21:11:54 +01:00
return nil
2015-10-04 23:08:31 +02:00
}
// readMetaData gets the metadata if it hasn't already been fetched
//
// it also sets the info
2019-06-17 10:34:30 +02:00
func ( o * Object ) readMetaData ( ctx context . Context ) ( err error ) {
2015-10-04 23:08:31 +02:00
if o . hasMetaData {
return nil
}
2018-11-02 13:14:19 +01:00
info , _ , err := o . fs . readMetaDataForPath ( ctx , o . rootPath ( ) )
2015-10-04 23:08:31 +02:00
if err != nil {
2016-06-25 22:23:20 +02:00
if apiErr , ok := err . ( * api . Error ) ; ok {
if apiErr . ErrorInfo . Code == "itemNotFound" {
return fs . ErrorObjectNotFound
}
}
2015-10-04 23:08:31 +02:00
return err
}
2017-03-06 21:11:54 +01:00
return o . setMetaData ( info )
2015-10-04 23:08:31 +02:00
}
// ModTime returns the modification time of the object
//
// It attempts to read the objects mtime and if that isn't present the
// LastModified returned in the http headers
2019-06-17 10:34:30 +02:00
func ( o * Object ) ModTime ( ctx context . Context ) time . Time {
err := o . readMetaData ( ctx )
2015-10-04 23:08:31 +02:00
if err != nil {
2017-02-09 12:01:20 +01:00
fs . Logf ( o , "Failed to read metadata: %v" , err )
2015-10-04 23:08:31 +02:00
return time . Now ( )
}
return o . modTime
}
// setModTime sets the modification time of the local fs object
2019-06-17 10:34:30 +02:00
func ( o * Object ) setModTime ( ctx context . Context , modTime time . Time ) ( * api . Item , error ) {
2021-01-29 21:04:21 +01:00
opts := o . fs . newOptsCallWithPath ( ctx , o . remote , "PATCH" , "" )
2015-10-04 23:08:31 +02:00
update := api . SetFileSystemInfo {
FileSystemInfo : api . FileSystemInfoFacet {
CreatedDateTime : api . Timestamp ( modTime ) ,
LastModifiedDateTime : api . Timestamp ( modTime ) ,
} ,
}
var info * api . Item
err := o . fs . pacer . Call ( func ( ) ( bool , error ) {
2019-09-04 21:00:37 +02:00
resp , err := o . fs . srv . CallJSON ( ctx , & opts , & update , & info )
2021-03-16 16:50:02 +01:00
return shouldRetry ( ctx , resp , err )
2015-10-04 23:08:31 +02:00
} )
2020-08-06 18:59:26 +02:00
// Remove versions if required
if o . fs . opt . NoVersions {
err := o . deleteVersions ( ctx )
if err != nil {
fs . Errorf ( o , "Failed to remove versions: %v" , err )
}
}
2015-10-04 23:08:31 +02:00
return info , err
}
// SetModTime sets the modification time of the local fs object
2019-06-17 10:34:30 +02:00
func ( o * Object ) SetModTime ( ctx context . Context , modTime time . Time ) error {
info , err := o . setModTime ( ctx , modTime )
2015-10-04 23:08:31 +02:00
if err != nil {
2016-03-22 16:07:10 +01:00
return err
2015-10-04 23:08:31 +02:00
}
2017-03-06 21:11:54 +01:00
return o . setMetaData ( info )
2015-10-04 23:08:31 +02:00
}
// Storable returns a boolean showing whether this object storable
func ( o * Object ) Storable ( ) bool {
return true
}
// Open an object for read
2019-06-17 10:34:30 +02:00
func ( o * Object ) Open ( ctx context . Context , options ... fs . OpenOption ) ( in io . ReadCloser , err error ) {
2015-10-04 23:08:31 +02:00
if o . id == "" {
2016-06-12 16:06:02 +02:00
return nil , errors . New ( "can't download - no id" )
2015-10-04 23:08:31 +02:00
}
2018-10-03 06:46:25 +02:00
if o . isOneNoteFile {
return nil , errors . New ( "can't open a OneNote file" )
}
2018-01-22 18:05:00 +01:00
fs . FixRangeOption ( options , o . size )
2015-10-04 23:08:31 +02:00
var resp * http . Response
2021-01-29 21:04:21 +01:00
opts := o . fs . newOptsCall ( o . id , "GET" , "/content" )
2018-07-11 19:48:59 +02:00
opts . Options = options
2023-05-03 16:19:26 +02:00
if o . fs . opt . AVOverride {
opts . Parameters = url . Values { "AVOverride" : { "1" } }
}
2018-07-11 19:48:59 +02:00
2015-10-04 23:08:31 +02:00
err = o . fs . pacer . Call ( func ( ) ( bool , error ) {
2019-09-04 21:00:37 +02:00
resp , err = o . fs . srv . Call ( ctx , & opts )
2021-03-16 16:50:02 +01:00
return shouldRetry ( ctx , resp , err )
2015-10-04 23:08:31 +02:00
} )
if err != nil {
2023-05-03 16:19:26 +02:00
if resp != nil {
if virus := resp . Header . Get ( "X-Virus-Infected" ) ; virus != "" {
err = fmt . Errorf ( "server reports this file is infected with a virus - use --onedrive-av-override to download anyway: %s: %w" , virus , err )
}
}
2015-10-04 23:08:31 +02:00
return nil , err
}
2018-02-16 14:21:26 +01:00
if resp . StatusCode == http . StatusOK && resp . ContentLength > 0 && resp . Header . Get ( "Content-Range" ) == "" {
//Overwrite size with actual size since size readings from Onedrive is unreliable.
o . size = resp . ContentLength
}
2015-10-04 23:08:31 +02:00
return resp . Body , err
}
// createUploadSession creates an upload session for the object
2019-06-17 10:34:30 +02:00
func ( o * Object ) createUploadSession ( ctx context . Context , modTime time . Time ) ( response * api . CreateUploadResponse , err error ) {
2021-01-29 21:04:21 +01:00
opts := o . fs . newOptsCallWithPath ( ctx , o . remote , "POST" , "/createUploadSession" )
2018-03-15 08:06:17 +01:00
createRequest := api . CreateUploadRequest { }
2018-03-16 20:18:51 +01:00
createRequest . Item . FileSystemInfo . CreatedDateTime = api . Timestamp ( modTime )
2018-03-15 08:06:17 +01:00
createRequest . Item . FileSystemInfo . LastModifiedDateTime = api . Timestamp ( modTime )
2015-10-04 23:08:31 +02:00
var resp * http . Response
err = o . fs . pacer . Call ( func ( ) ( bool , error ) {
2019-09-04 21:00:37 +02:00
resp , err = o . fs . srv . CallJSON ( ctx , & opts , & createRequest , & response )
2018-10-03 06:46:25 +02:00
if apiErr , ok := err . ( * api . Error ) ; ok {
if apiErr . ErrorInfo . Code == "nameAlreadyExists" {
// Make the error more user-friendly
err = errors . New ( err . Error ( ) + " (is it a OneNote file?)" )
}
}
2021-03-16 16:50:02 +01:00
return shouldRetry ( ctx , resp , err )
2015-10-04 23:08:31 +02:00
} )
2018-03-15 08:06:17 +01:00
return response , err
2015-10-04 23:08:31 +02:00
}
2020-01-25 11:41:20 +01:00
// getPosition gets the current position in a multipart upload
func ( o * Object ) getPosition ( ctx context . Context , url string ) ( pos int64 , err error ) {
2015-11-27 13:46:13 +01:00
opts := rest . Opts {
2020-01-25 11:41:20 +01:00
Method : "GET" ,
RootURL : url ,
}
var info api . UploadFragmentResponse
var resp * http . Response
err = o . fs . pacer . Call ( func ( ) ( bool , error ) {
resp , err = o . fs . srv . CallJSON ( ctx , & opts , nil , & info )
2021-03-16 16:50:02 +01:00
return shouldRetry ( ctx , resp , err )
2020-01-25 11:41:20 +01:00
} )
if err != nil {
return 0 , err
}
if len ( info . NextExpectedRanges ) != 1 {
2021-11-04 11:12:57 +01:00
return 0 , fmt . Errorf ( "bad number of ranges in upload position: %v" , info . NextExpectedRanges )
2020-01-25 11:41:20 +01:00
}
position := info . NextExpectedRanges [ 0 ]
i := strings . IndexByte ( position , '-' )
if i < 0 {
2021-11-04 11:12:57 +01:00
return 0 , fmt . Errorf ( "no '-' in next expected range: %q" , position )
2015-10-04 23:08:31 +02:00
}
2020-01-25 11:41:20 +01:00
position = position [ : i ]
pos , err = strconv . ParseInt ( position , 10 , 64 )
if err != nil {
2021-11-04 11:12:57 +01:00
return 0 , fmt . Errorf ( "bad expected range: %q: %w" , position , err )
2020-01-25 11:41:20 +01:00
}
return pos , nil
}
// uploadFragment uploads a part
2020-03-21 23:31:51 +01:00
func ( o * Object ) uploadFragment ( ctx context . Context , url string , start int64 , totalSize int64 , chunk io . ReadSeeker , chunkSize int64 , options ... fs . OpenOption ) ( info * api . Item , err error ) {
2018-03-15 08:06:17 +01:00
// var response api.UploadFragmentResponse
2015-10-04 23:08:31 +02:00
var resp * http . Response
2019-08-28 12:21:38 +02:00
var body [ ] byte
2020-01-25 11:41:20 +01:00
var skip = int64 ( 0 )
2015-10-04 23:08:31 +02:00
err = o . fs . pacer . Call ( func ( ) ( bool , error ) {
2020-01-25 11:41:20 +01:00
toSend := chunkSize - skip
opts := rest . Opts {
Method : "PUT" ,
RootURL : url ,
ContentLength : & toSend ,
ContentRange : fmt . Sprintf ( "bytes %d-%d/%d" , start + skip , start + chunkSize - 1 , totalSize ) ,
Body : chunk ,
2020-03-21 23:31:51 +01:00
Options : options ,
2020-01-25 11:41:20 +01:00
}
_ , _ = chunk . Seek ( skip , io . SeekStart )
2024-01-05 13:43:19 +01:00
resp , err = o . fs . unAuth . Call ( ctx , & opts )
2020-01-25 11:41:20 +01:00
if err != nil && resp != nil && resp . StatusCode == http . StatusRequestedRangeNotSatisfiable {
fs . Debugf ( o , "Received 416 error - reading current position from server: %v" , err )
pos , posErr := o . getPosition ( ctx , url )
if posErr != nil {
fs . Debugf ( o , "Failed to read position: %v" , posErr )
return false , posErr
}
skip = pos - start
fs . Debugf ( o , "Read position %d, chunk is %d..%d, bytes to skip = %d" , pos , start , start + chunkSize , skip )
switch {
case skip < 0 :
2021-11-04 11:12:57 +01:00
return false , fmt . Errorf ( "sent block already (skip %d < 0), can't rewind: %w" , skip , err )
2020-01-25 11:41:20 +01:00
case skip > chunkSize :
2021-11-04 11:12:57 +01:00
return false , fmt . Errorf ( "position is in the future (skip %d > chunkSize %d), can't skip forward: %w" , skip , chunkSize , err )
2020-01-25 11:41:20 +01:00
case skip == chunkSize :
fs . Debugf ( o , "Skipping chunk as already sent (skip %d == chunkSize %d)" , skip , chunkSize )
return false , nil
}
2021-11-04 11:12:57 +01:00
return true , fmt . Errorf ( "retry this chunk skipping %d bytes: %w" , skip , err )
2020-01-25 11:41:20 +01:00
}
2019-08-28 12:21:38 +02:00
if err != nil {
2021-03-16 16:50:02 +01:00
return shouldRetry ( ctx , resp , err )
2018-03-26 18:17:56 +02:00
}
2019-08-28 12:21:38 +02:00
body , err = rest . ReadBody ( resp )
if err != nil {
2021-03-16 16:50:02 +01:00
return shouldRetry ( ctx , resp , err )
2019-08-28 12:21:38 +02:00
}
if resp . StatusCode == 200 || resp . StatusCode == 201 {
// we are done :)
// read the item
info = & api . Item { }
return false , json . Unmarshal ( body , info )
2018-03-15 08:06:17 +01:00
}
2019-08-28 12:21:38 +02:00
return false , nil
2015-10-04 23:08:31 +02:00
} )
2018-03-17 10:46:06 +01:00
return info , err
2015-10-04 23:08:31 +02:00
}
// cancelUploadSession cancels an upload session
2019-09-04 21:00:37 +02:00
func ( o * Object ) cancelUploadSession ( ctx context . Context , url string ) ( err error ) {
2015-11-27 13:46:13 +01:00
opts := rest . Opts {
2015-10-04 23:08:31 +02:00
Method : "DELETE" ,
2017-07-07 09:18:13 +02:00
RootURL : url ,
2015-10-04 23:08:31 +02:00
NoResponse : true ,
}
var resp * http . Response
err = o . fs . pacer . Call ( func ( ) ( bool , error ) {
2019-09-04 21:00:37 +02:00
resp , err = o . fs . srv . Call ( ctx , & opts )
2021-03-16 16:50:02 +01:00
return shouldRetry ( ctx , resp , err )
2015-10-04 23:08:31 +02:00
} )
return
}
// uploadMultipart uploads a file using multipart upload
2020-03-21 23:31:51 +01:00
func ( o * Object ) uploadMultipart ( ctx context . Context , in io . Reader , size int64 , modTime time . Time , options ... fs . OpenOption ) ( info * api . Item , err error ) {
2018-09-04 17:57:47 +02:00
if size <= 0 {
2018-11-02 13:12:22 +01:00
return nil , errors . New ( "unknown-sized upload not supported" )
2018-09-04 17:57:47 +02:00
}
2015-10-04 23:08:31 +02:00
// Create upload session
2017-02-09 12:01:20 +01:00
fs . Debugf ( o , "Starting multipart upload" )
2019-06-17 10:34:30 +02:00
session , err := o . createUploadSession ( ctx , modTime )
2015-10-04 23:08:31 +02:00
if err != nil {
2018-03-17 10:46:06 +01:00
return nil , err
2015-10-04 23:08:31 +02:00
}
uploadURL := session . UploadURL
2020-06-25 15:35:41 +02:00
// Cancel the session if something went wrong
defer atexit . OnError ( & err , func ( ) {
fs . Debugf ( o , "Cancelling multipart upload: %v" , err )
cancelErr := o . cancelUploadSession ( ctx , uploadURL )
if cancelErr != nil {
2021-04-16 06:41:38 +02:00
fs . Logf ( o , "Failed to cancel multipart upload: %v (upload failed due to: %v)" , cancelErr , err )
2015-10-04 23:08:31 +02:00
}
2020-06-25 15:35:41 +02:00
} ) ( )
2015-10-04 23:08:31 +02:00
// Upload the chunks
remaining := size
position := int64 ( 0 )
for remaining > 0 {
2018-05-14 19:06:57 +02:00
n := int64 ( o . fs . opt . ChunkSize )
2015-10-04 23:08:31 +02:00
if remaining < n {
n = remaining
}
2018-01-12 17:30:54 +01:00
seg := readers . NewRepeatableReader ( io . LimitReader ( in , n ) )
2017-02-09 12:01:20 +01:00
fs . Debugf ( o , "Uploading segment %d/%d size %d" , position , size , n )
2020-03-21 23:31:51 +01:00
info , err = o . uploadFragment ( ctx , uploadURL , position , size , seg , n , options ... )
2015-10-04 23:08:31 +02:00
if err != nil {
2018-03-17 10:46:06 +01:00
return nil , err
2015-10-04 23:08:31 +02:00
}
remaining -= n
position += n
}
2018-03-17 10:46:06 +01:00
return info , nil
}
2021-03-02 20:11:57 +01:00
// Update the content of a remote file within 4 MiB size in one single request
2018-09-04 17:57:47 +02:00
// This function will set modtime after uploading, which will create a new version for the remote file
2020-03-21 23:31:51 +01:00
func ( o * Object ) uploadSinglepart ( ctx context . Context , in io . Reader , size int64 , modTime time . Time , options ... fs . OpenOption ) ( info * api . Item , err error ) {
2018-09-04 18:37:52 +02:00
if size < 0 || size > int64 ( fs . SizeSuffix ( 4 * 1024 * 1024 ) ) {
2021-03-02 20:11:57 +01:00
return nil , errors . New ( "size passed into uploadSinglepart must be >= 0 and <= 4 MiB" )
2018-09-04 17:57:47 +02:00
}
fs . Debugf ( o , "Starting singlepart upload" )
var resp * http . Response
2021-01-29 21:04:21 +01:00
opts := o . fs . newOptsCallWithPath ( ctx , o . remote , "PUT" , "/content" )
opts . ContentLength = & size
opts . Body = in
opts . Options = options
2018-09-04 17:57:47 +02:00
err = o . fs . pacer . Call ( func ( ) ( bool , error ) {
2019-09-04 21:00:37 +02:00
resp , err = o . fs . srv . CallJSON ( ctx , & opts , nil , & info )
2018-10-03 06:46:25 +02:00
if apiErr , ok := err . ( * api . Error ) ; ok {
if apiErr . ErrorInfo . Code == "nameAlreadyExists" {
// Make the error more user-friendly
err = errors . New ( err . Error ( ) + " (is it a OneNote file?)" )
}
}
2021-03-16 16:50:02 +01:00
return shouldRetry ( ctx , resp , err )
2018-09-04 17:57:47 +02:00
} )
if err != nil {
return nil , err
}
err = o . setMetaData ( info )
if err != nil {
return nil , err
}
// Set the mod time now and read metadata
2019-06-17 10:34:30 +02:00
return o . setModTime ( ctx , modTime )
2018-09-04 17:57:47 +02:00
}
2015-10-04 23:08:31 +02:00
// Update the object with the contents of the io.Reader, modTime and size
//
// The new object may have been created if an error is returned
2019-06-17 10:34:30 +02:00
func ( o * Object ) Update ( ctx context . Context , in io . Reader , src fs . ObjectInfo , options ... fs . OpenOption ) ( err error ) {
2018-10-03 06:46:25 +02:00
if o . hasMetaData && o . isOneNoteFile {
return errors . New ( "can't upload content to a OneNote file" )
}
2017-01-29 21:42:43 +01:00
o . fs . tokenRenewer . Start ( )
defer o . fs . tokenRenewer . Stop ( )
2016-02-18 12:35:25 +01:00
size := src . Size ( )
2019-06-17 10:34:30 +02:00
modTime := src . ModTime ( ctx )
2016-02-18 12:35:25 +01:00
2018-09-04 17:57:47 +02:00
var info * api . Item
if size > 0 {
2020-03-21 23:31:51 +01:00
info , err = o . uploadMultipart ( ctx , in , size , modTime , options ... )
2018-09-04 17:57:47 +02:00
} else if size == 0 {
2020-03-21 23:31:51 +01:00
info , err = o . uploadSinglepart ( ctx , in , size , modTime , options ... )
2018-09-04 17:57:47 +02:00
} else {
2019-02-02 09:37:33 +01:00
return errors . New ( "unknown-sized upload not supported" )
2018-09-04 17:57:47 +02:00
}
2015-10-04 23:08:31 +02:00
if err != nil {
return err
}
2018-09-04 17:57:47 +02:00
2020-08-06 18:59:26 +02:00
// If updating the file then remove versions
if o . fs . opt . NoVersions && o . hasMetaData {
err = o . deleteVersions ( ctx )
if err != nil {
fs . Errorf ( o , "Failed to remove versions: %v" , err )
}
}
2018-03-15 08:06:17 +01:00
return o . setMetaData ( info )
2015-10-04 23:08:31 +02:00
}
// Remove an object
2019-06-17 10:34:30 +02:00
func ( o * Object ) Remove ( ctx context . Context ) error {
2019-09-04 21:00:37 +02:00
return o . fs . deleteObject ( ctx , o . id )
2015-10-04 23:08:31 +02:00
}
2016-09-21 23:13:24 +02:00
// MimeType of an Object if known, "" otherwise
2019-06-17 10:34:30 +02:00
func ( o * Object ) MimeType ( ctx context . Context ) string {
2016-09-21 23:13:24 +02:00
return o . mimeType
}
2018-05-13 10:16:56 +02:00
// ID returns the ID of the Object if known, or "" if not
func ( o * Object ) ID ( ) string {
return o . id
}
2021-01-29 21:04:21 +01:00
/ *
* URL Build routine area start
* 1. In this area , region - related URL rewrites are applied . As the API is blackbox ,
* we cannot thoroughly test this part . Please be extremely careful while changing them .
* 2. If possible , please don ' t introduce region related code in other region , but patch these helper functions .
* 3. To avoid region - related issues , please don ' t manually build rest . Opts from scratch .
* Instead , use these helper function , and customize the URL afterwards if needed .
*
2022-07-17 18:07:23 +02:00
* currently , the Vnet Group ' s API differs in the following places :
2021-01-29 21:04:21 +01:00
* - https : //{Endpoint}/drives/{driveID}/items/{leaf}:/{route}
* - this API doesn ' t work ( gives invalid request )
* - can be replaced with the following API :
* - https : //{Endpoint}/drives/{driveID}/items/children('{leaf}')/{route}
* - however , this API does NOT support multi - level leaf like a / b / c
* - https : //{Endpoint}/drives/{driveID}/items/children('@a1')/{route}?@a1=URLEncode("'{leaf}'")
* - this API does support multi - level leaf like a / b / c
* - https : //{Endpoint}/drives/{driveID}/root/children('@a1')/{route}?@a1=URLEncode({path})
* - Same as above
* /
// parseNormalizedID parses a normalized ID (may be in the form `driveID#itemID` or just `itemID`)
// and returns itemID, driveID, rootURL.
// Such a normalized ID can come from (*Item).GetID()
func ( f * Fs ) parseNormalizedID ( ID string ) ( string , string , string ) {
rootURL := graphAPIEndpoint [ f . opt . Region ] + "/v1.0/drives"
2022-06-08 22:25:17 +02:00
if strings . Contains ( ID , "#" ) {
2021-01-29 21:04:21 +01:00
s := strings . Split ( ID , "#" )
return s [ 1 ] , s [ 0 ] , rootURL
}
return ID , "" , ""
}
// newOptsCall build the rest.Opts structure with *a normalizedID(driveID#fileID, or simply fileID)*
// using url template https://{Endpoint}/drives/{driveID}/items/{itemID}/{route}
func ( f * Fs ) newOptsCall ( normalizedID string , method string , route string ) ( opts rest . Opts ) {
id , drive , rootURL := f . parseNormalizedID ( normalizedID )
2018-07-11 19:48:59 +02:00
if drive != "" {
return rest . Opts {
Method : method ,
RootURL : rootURL ,
Path : "/" + drive + "/items/" + id + route ,
}
}
return rest . Opts {
Method : method ,
Path : "/items/" + id + route ,
}
}
2021-01-29 21:04:21 +01:00
func escapeSingleQuote ( str string ) string {
return strings . ReplaceAll ( str , "'" , "''" )
}
// newOptsCallWithIDPath build the rest.Opts structure with *a normalizedID (driveID#fileID, or simply fileID) and leaf*
// using url template https://{Endpoint}/drives/{driveID}/items/{leaf}:/{route} (for international OneDrive)
// or https://{Endpoint}/drives/{driveID}/items/children('{leaf}')/{route}
2022-07-17 18:07:23 +02:00
// and https://{Endpoint}/drives/{driveID}/items/children('@a1')/{route}?@a1=URLEncode("'{leaf}'") (for Vnet Group)
2021-01-29 21:04:21 +01:00
// if isPath is false, this function will only work when the leaf is "" or a child name (i.e. it doesn't accept multi-level leaf)
// if isPath is true, multi-level leaf like a/b/c can be passed
func ( f * Fs ) newOptsCallWithIDPath ( normalizedID string , leaf string , isPath bool , method string , route string ) ( opts rest . Opts , ok bool ) {
encoder := f . opt . Enc . FromStandardName
if isPath {
encoder = f . opt . Enc . FromStandardPath
}
trueDirID , drive , rootURL := f . parseNormalizedID ( normalizedID )
if drive == "" {
trueDirID = normalizedID
}
entity := "/items/" + trueDirID + ":/" + withTrailingColon ( rest . URLPathEscape ( encoder ( leaf ) ) ) + route
if f . opt . Region == regionCN {
if isPath {
entity = "/items/" + trueDirID + "/children('@a1')" + route + "?@a1=" + url . QueryEscape ( "'" + encoder ( escapeSingleQuote ( leaf ) ) + "'" )
} else {
entity = "/items/" + trueDirID + "/children('" + rest . URLPathEscape ( encoder ( escapeSingleQuote ( leaf ) ) ) + "')" + route
}
}
if drive == "" {
ok = false
opts = rest . Opts {
Method : method ,
Path : entity ,
}
return
}
ok = true
opts = rest . Opts {
Method : method ,
RootURL : rootURL ,
Path : "/" + drive + entity ,
}
return
}
// newOptsCallWithIDPath build the rest.Opts structure with an *absolute path start from root*
// using url template https://{Endpoint}/drives/{driveID}/root:/{path}:/{route}
// or https://{Endpoint}/drives/{driveID}/root/children('@a1')/{route}?@a1=URLEncode({path})
func ( f * Fs ) newOptsCallWithRootPath ( path string , method string , route string ) ( opts rest . Opts ) {
path = strings . TrimSuffix ( path , "/" )
newURL := "/root:/" + withTrailingColon ( rest . URLPathEscape ( f . opt . Enc . FromStandardPath ( path ) ) ) + route
if f . opt . Region == regionCN {
newURL = "/root/children('@a1')" + route + "?@a1=" + url . QueryEscape ( "'" + escapeSingleQuote ( f . opt . Enc . FromStandardPath ( path ) ) + "'" )
}
return rest . Opts {
Method : method ,
Path : newURL ,
2018-07-11 19:48:59 +02:00
}
}
2021-01-29 21:04:21 +01:00
// newOptsCallWithPath build the rest.Opt intelligently.
// It will first try to resolve the path using dircache, which enables support for "Share with me" files.
// If present in cache, then use ID + Path variant, else fallback into RootPath variant
func ( f * Fs ) newOptsCallWithPath ( ctx context . Context , path string , method string , route string ) ( opts rest . Opts ) {
if path == "" {
url := "/root" + route
return rest . Opts {
Method : method ,
Path : url ,
}
}
// find dircache
leaf , directoryID , _ := f . dirCache . FindPath ( ctx , path , false )
// try to use IDPath variant first
if opts , ok := f . newOptsCallWithIDPath ( directoryID , leaf , false , method , route ) ; ok {
return opts
}
// fallback to use RootPath variant first
return f . newOptsCallWithRootPath ( path , method , route )
}
/ *
* URL Build routine area end
* /
2020-07-02 10:38:37 +02:00
// Returns the canonical form of the driveID
func ( f * Fs ) canonicalDriveID ( driveID string ) ( canonicalDriveID string ) {
if driveID == "" {
canonicalDriveID = f . opt . DriveID
} else {
canonicalDriveID = driveID
}
canonicalDriveID = strings . ToLower ( canonicalDriveID )
return canonicalDriveID
}
2022-04-29 15:46:06 +02:00
// ChangeNotify calls the passed function with a path that has had changes.
// If the implementation uses polling, it should adhere to the given interval.
//
// Automatically restarts itself in case of unexpected behavior of the remote.
//
// Close the returned channel to stop being notified.
//
// The Onedrive implementation gives the whole hierarchy up to the top when
// an object is changed. For instance, if a/b/c is changed, this function
// will call notifyFunc with a, a/b and a/b/c.
func ( f * Fs ) ChangeNotify ( ctx context . Context , notifyFunc func ( string , fs . EntryType ) , pollIntervalChan <- chan time . Duration ) {
go func ( ) {
// get the StartPageToken early so all changes from now on get processed
nextDeltaToken , err := f . changeNotifyStartPageToken ( ctx )
if err != nil {
fs . Errorf ( f , "Could not get first deltaLink: %s" , err )
return
}
fs . Debugf ( f , "Next delta token is: %s" , nextDeltaToken )
var ticker * time . Ticker
var tickerC <- chan time . Time
for {
select {
case pollInterval , ok := <- pollIntervalChan :
if ! ok {
if ticker != nil {
ticker . Stop ( )
}
return
}
if ticker != nil {
ticker . Stop ( )
ticker , tickerC = nil , nil
}
if pollInterval != 0 {
ticker = time . NewTicker ( pollInterval )
tickerC = ticker . C
}
case <- tickerC :
fs . Debugf ( f , "Checking for changes on remote" )
nextDeltaToken , err = f . changeNotifyRunner ( ctx , notifyFunc , nextDeltaToken )
if err != nil {
fs . Infof ( f , "Change notify listener failure: %s" , err )
}
}
}
} ( )
}
func ( f * Fs ) changeNotifyStartPageToken ( ctx context . Context ) ( nextDeltaToken string , err error ) {
delta , err := f . changeNotifyNextChange ( ctx , "latest" )
2022-06-08 22:25:17 +02:00
if err != nil {
return
}
2022-04-29 15:46:06 +02:00
parsedURL , err := url . Parse ( delta . DeltaLink )
if err != nil {
return
}
nextDeltaToken = parsedURL . Query ( ) . Get ( "token" )
return
}
2023-09-19 10:42:12 +02:00
func ( f * Fs ) changeNotifyNextChange ( ctx context . Context , token string ) ( delta api . DeltaResponse , err error ) {
2022-04-29 15:46:06 +02:00
opts := f . buildDriveDeltaOpts ( token )
_ , err = f . srv . CallJSON ( ctx , & opts , nil , & delta )
return
}
func ( f * Fs ) buildDriveDeltaOpts ( token string ) rest . Opts {
rootURL := graphAPIEndpoint [ f . opt . Region ] + "/v1.0/drives"
return rest . Opts {
Method : "GET" ,
RootURL : rootURL ,
Path : "/" + f . driveID + "/root/delta" ,
Parameters : map [ string ] [ ] string { "token" : { token } } ,
}
}
func ( f * Fs ) changeNotifyRunner ( ctx context . Context , notifyFunc func ( string , fs . EntryType ) , deltaToken string ) ( nextDeltaToken string , err error ) {
delta , err := f . changeNotifyNextChange ( ctx , deltaToken )
2022-06-08 22:25:17 +02:00
if err != nil {
return
}
2022-04-29 15:46:06 +02:00
parsedURL , err := url . Parse ( delta . DeltaLink )
if err != nil {
return
}
nextDeltaToken = parsedURL . Query ( ) . Get ( "token" )
for _ , item := range delta . Value {
isDriveRootFolder := item . GetParentReference ( ) . ID == ""
if isDriveRootFolder {
continue
}
fullPath , err := getItemFullPath ( & item )
if err != nil {
fs . Errorf ( f , "Could not get item full path: %s" , err )
continue
}
if fullPath == f . root {
continue
}
relName , insideRoot := getRelativePathInsideBase ( f . root , fullPath )
if ! insideRoot {
continue
}
if item . GetFile ( ) != nil {
notifyFunc ( relName , fs . EntryObject )
} else if item . GetFolder ( ) != nil {
notifyFunc ( relName , fs . EntryDirectory )
}
}
return
}
func getItemFullPath ( item * api . Item ) ( fullPath string , err error ) {
err = nil
fullPath = item . GetName ( )
if parent := item . GetParentReference ( ) ; parent != nil && parent . Path != "" {
pathParts := strings . SplitN ( parent . Path , ":" , 2 )
if len ( pathParts ) != 2 {
err = fmt . Errorf ( "invalid parent path: %s" , parent . Path )
return
}
if pathParts [ 1 ] != "" {
fullPath = strings . TrimPrefix ( pathParts [ 1 ] , "/" ) + "/" + fullPath
}
}
return
}
2019-01-09 06:11:00 +01:00
// getRelativePathInsideBase checks if `target` is inside `base`. If so, it
// returns a relative path for `target` based on `base` and a boolean `true`.
// Otherwise returns "", false.
func getRelativePathInsideBase ( base , target string ) ( string , bool ) {
if base == "" {
return target , true
}
baseSlash := base + "/"
if strings . HasPrefix ( target + "/" , baseSlash ) {
return target [ len ( baseSlash ) : ] , true
}
return "" , false
}
2019-03-11 09:30:38 +01:00
// Adds a ":" at the end of `remotePath` in a proper manner.
// If `remotePath` already ends with "/", change it to ":/"
// If `remotePath` is "", return "".
// A workaround for #2720 and #3039
func withTrailingColon ( remotePath string ) string {
if remotePath == "" {
return ""
}
if strings . HasSuffix ( remotePath , "/" ) {
return remotePath [ : len ( remotePath ) - 1 ] + ":/"
}
return remotePath + ":"
}
2015-10-04 23:08:31 +02:00
// Check the interfaces are satisfied
var (
2018-08-19 17:22:51 +02:00
_ fs . Fs = ( * Fs ) ( nil )
_ fs . Purger = ( * Fs ) ( nil )
_ fs . Copier = ( * Fs ) ( nil )
_ fs . Mover = ( * Fs ) ( nil )
_ fs . DirMover = ( * Fs ) ( nil )
2016-12-09 16:39:29 +01:00
_ fs . DirCacheFlusher = ( * Fs ) ( nil )
2018-04-16 23:19:25 +02:00
_ fs . Abouter = ( * Fs ) ( nil )
2018-10-09 14:11:48 +02:00
_ fs . PublicLinker = ( * Fs ) ( nil )
2020-08-03 21:45:37 +02:00
_ fs . CleanUpper = ( * Fs ) ( nil )
2023-09-19 13:09:51 +02:00
_ fs . ListRer = ( * Fs ) ( nil )
2023-12-08 05:33:51 +01:00
_ fs . Shutdowner = ( * Fs ) ( nil )
2016-12-09 16:39:29 +01:00
_ fs . Object = ( * Object ) ( nil )
_ fs . MimeTyper = & Object { }
2018-05-13 10:16:56 +02:00
_ fs . IDer = & Object { }
2015-10-04 23:08:31 +02:00
)