mirror of
https://github.com/rclone/rclone.git
synced 2024-11-22 00:13:49 +01:00
Add support for Microsoft One Drive - fixes #10
* Still to do * Copy * Move * MoveDir
This commit is contained in:
parent
0ab3f020ab
commit
2fcb8f5db7
@ -479,7 +479,7 @@ func (f *FsAcd) purgeCheck(check bool) error {
|
|||||||
if check {
|
if check {
|
||||||
// check directory is empty
|
// check directory is empty
|
||||||
empty := true
|
empty := true
|
||||||
_, err := f.listAll(rootID, "", false, false, func(node *acd.Node) bool {
|
_, err = f.listAll(rootID, "", false, false, func(node *acd.Node) bool {
|
||||||
switch *node.Kind {
|
switch *node.Kind {
|
||||||
case folderKind:
|
case folderKind:
|
||||||
empty = false
|
empty = false
|
||||||
|
@ -19,6 +19,7 @@ Rclone is a command line program to sync files and directories to and from
|
|||||||
* Dropbox
|
* Dropbox
|
||||||
* Google Cloud Storage
|
* Google Cloud Storage
|
||||||
* Amazon Cloud Drive
|
* Amazon Cloud Drive
|
||||||
|
* Microsoft One Drive
|
||||||
* The local filesystem
|
* The local filesystem
|
||||||
|
|
||||||
Features
|
Features
|
||||||
|
111
docs/content/onedrive.md
Normal file
111
docs/content/onedrive.md
Normal file
@ -0,0 +1,111 @@
|
|||||||
|
---
|
||||||
|
title: "Microsoft One Drive"
|
||||||
|
description: "Rclone docs for Microsoft One Drive"
|
||||||
|
date: "2015-10-14"
|
||||||
|
---
|
||||||
|
|
||||||
|
<i class="fa fa-windows"></i> Microsoft One Drive
|
||||||
|
-----------------------------------------
|
||||||
|
|
||||||
|
Paths are specified as `remote:path`
|
||||||
|
|
||||||
|
Paths may be as deep as required, eg `remote:directory/subdirectory`.
|
||||||
|
|
||||||
|
The initial setup for One Drive involves getting a token from
|
||||||
|
Microsoft which you need to do in your browser. `rclone config` walks
|
||||||
|
you through it.
|
||||||
|
|
||||||
|
Here is an example of how to make a remote called `remote`. First run:
|
||||||
|
|
||||||
|
rclone config
|
||||||
|
|
||||||
|
This will guide you through an interactive setup process:
|
||||||
|
|
||||||
|
```
|
||||||
|
n) New remote
|
||||||
|
d) Delete remote
|
||||||
|
q) Quit config
|
||||||
|
e/n/d/q> n
|
||||||
|
name> remote
|
||||||
|
What type of source is it?
|
||||||
|
Choose a number from below
|
||||||
|
1) amazon cloud drive
|
||||||
|
2) drive
|
||||||
|
3) dropbox
|
||||||
|
4) google cloud storage
|
||||||
|
5) local
|
||||||
|
6) onedrive
|
||||||
|
7) s3
|
||||||
|
8) swift
|
||||||
|
type> 6
|
||||||
|
Microsoft App Client Id - leave blank normally.
|
||||||
|
client_id>
|
||||||
|
Microsoft App Client Secret - leave blank normally.
|
||||||
|
client_secret>
|
||||||
|
Remote config
|
||||||
|
If your browser doesn't open automatically go to the following link: http://127.0.0.1:53682/auth
|
||||||
|
Log in and authorize rclone for access
|
||||||
|
Waiting for code...
|
||||||
|
Got code
|
||||||
|
--------------------
|
||||||
|
[remote]
|
||||||
|
client_id =
|
||||||
|
client_secret =
|
||||||
|
token = {"access_token":"XXXXXX"}
|
||||||
|
--------------------
|
||||||
|
y) Yes this is OK
|
||||||
|
e) Edit this remote
|
||||||
|
d) Delete this remote
|
||||||
|
y/e/d> y
|
||||||
|
```
|
||||||
|
|
||||||
|
Note that rclone runs a webserver on your local machine to collect the
|
||||||
|
token as returned from Microsoft. This only runs from the moment it
|
||||||
|
opens your browser to the moment you get back the verification
|
||||||
|
code. This is on `http://127.0.0.1:53682/` and this it may require
|
||||||
|
you to unblock it temporarily if you are running a host firewall.
|
||||||
|
|
||||||
|
Once configured you can then use `rclone` like this,
|
||||||
|
|
||||||
|
List directories in top level of your One Drive
|
||||||
|
|
||||||
|
rclone lsd remote:
|
||||||
|
|
||||||
|
List all the files in your One Drive
|
||||||
|
|
||||||
|
rclone ls remote:
|
||||||
|
|
||||||
|
To copy a local directory to an One Drive directory called backup
|
||||||
|
|
||||||
|
rclone copy /home/source remote:backup
|
||||||
|
|
||||||
|
### Modified time and MD5SUMs ###
|
||||||
|
|
||||||
|
One Drive allows modification times to be set on objects accurate to 1
|
||||||
|
second. These will be used to detect whether objects need syncing or
|
||||||
|
not.
|
||||||
|
|
||||||
|
One drive does not support MD5SUMs. This means the `--checksum` flag
|
||||||
|
will be equivalent to the `--size-only` flag.
|
||||||
|
|
||||||
|
### Deleting files ###
|
||||||
|
|
||||||
|
Any files you delete with rclone will end up in the trash. Microsoft
|
||||||
|
doesn't provide an API to permanently delete files, nor to empty the
|
||||||
|
trash, so you will have to do that with one of Microsoft's apps or via
|
||||||
|
the One Drive website.
|
||||||
|
|
||||||
|
### Limitations ###
|
||||||
|
|
||||||
|
Note that One Drive is case sensitive so you can't have a
|
||||||
|
file called "Hello.doc" and one called "hello.doc".
|
||||||
|
|
||||||
|
Rclone only supports your default One Drive, and doesn't work with One
|
||||||
|
Drive for business. Both these issues may be fixed at some point
|
||||||
|
depending on user demand!
|
||||||
|
|
||||||
|
There are quite a few characters that can't be in One Drive file
|
||||||
|
names. These can't occur on Windows platforms, but on non-Windows
|
||||||
|
platforms they are common. Rclone will map these names to and from an
|
||||||
|
identical looking unicode equivalent. For example if a file has a `?`
|
||||||
|
in it will be mapped to `?` instead.
|
@ -23,6 +23,7 @@ Here is an overview of the major features of each cloud storage system.
|
|||||||
| Dropbox | No | No | Yes | No |
|
| Dropbox | No | No | Yes | No |
|
||||||
| Google Cloud Storage | Yes | Yes | No | No |
|
| Google Cloud Storage | Yes | Yes | No | No |
|
||||||
| Amazon Cloud Drive | Yes | No | Yes | No |
|
| Amazon Cloud Drive | Yes | No | Yes | No |
|
||||||
|
| Microsoft One Drive | No | Yes | Yes | No |
|
||||||
| The local filesystem | Yes | Yes | Depends | No |
|
| The local filesystem | Yes | Yes | Depends | No |
|
||||||
|
|
||||||
### MD5SUM ###
|
### MD5SUM ###
|
||||||
|
@ -37,6 +37,7 @@
|
|||||||
<li><a href="/dropbox/"><i class="fa fa-dropbox"></i> Dropbox</a></li>
|
<li><a href="/dropbox/"><i class="fa fa-dropbox"></i> Dropbox</a></li>
|
||||||
<li><a href="/googlecloudstorage/"><i class="fa fa-google"></i> Google Cloud Storage</a></li>
|
<li><a href="/googlecloudstorage/"><i class="fa fa-google"></i> Google Cloud Storage</a></li>
|
||||||
<li><a href="/amazonclouddrive/"><i class="fa fa-amazon"></i> Amazon Cloud Drive</a></li>
|
<li><a href="/amazonclouddrive/"><i class="fa fa-amazon"></i> Amazon Cloud Drive</a></li>
|
||||||
|
<li><a href="/onedrive/"><i class="fa fa-windows"></i> Microsoft One Drive</a></li>
|
||||||
<li><a href="/local/"><i class="fa fa-file"></i> Local</a></li>
|
<li><a href="/local/"><i class="fa fa-file"></i> Local</a></li>
|
||||||
</ul>
|
</ul>
|
||||||
</li>
|
</li>
|
||||||
|
@ -25,6 +25,7 @@ import (
|
|||||||
_ "github.com/ncw/rclone/dropbox"
|
_ "github.com/ncw/rclone/dropbox"
|
||||||
_ "github.com/ncw/rclone/googlecloudstorage"
|
_ "github.com/ncw/rclone/googlecloudstorage"
|
||||||
_ "github.com/ncw/rclone/local"
|
_ "github.com/ncw/rclone/local"
|
||||||
|
_ "github.com/ncw/rclone/onedrive"
|
||||||
_ "github.com/ncw/rclone/s3"
|
_ "github.com/ncw/rclone/s3"
|
||||||
_ "github.com/ncw/rclone/swift"
|
_ "github.com/ncw/rclone/swift"
|
||||||
)
|
)
|
||||||
|
@ -65,7 +65,7 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
fstests.NilObject = fs.Object((*{{ .FsName }}.FsObject{{ .ObjectName }})(nil))
|
fstests.NilObject = fs.Object((*{{ .FsName }}.{{ .ObjectName }})(nil))
|
||||||
fstests.RemoteName = "{{ .TestName }}"
|
fstests.RemoteName = "{{ .TestName }}"
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -126,12 +126,13 @@ func generateTestProgram(t *template.Template, fns []string, Fsname, ObjectName
|
|||||||
func main() {
|
func main() {
|
||||||
fns := findTestFunctions()
|
fns := findTestFunctions()
|
||||||
t := template.Must(template.New("main").Parse(testProgram))
|
t := template.Must(template.New("main").Parse(testProgram))
|
||||||
generateTestProgram(t, fns, "Local", "Local")
|
generateTestProgram(t, fns, "Local", "FsObjectLocal")
|
||||||
generateTestProgram(t, fns, "Swift", "Swift")
|
generateTestProgram(t, fns, "Swift", "FsObjectSwift")
|
||||||
generateTestProgram(t, fns, "S3", "S3")
|
generateTestProgram(t, fns, "S3", "FsObjectS3")
|
||||||
generateTestProgram(t, fns, "Drive", "Drive")
|
generateTestProgram(t, fns, "Drive", "FsObjectDrive")
|
||||||
generateTestProgram(t, fns, "GoogleCloudStorage", "Storage")
|
generateTestProgram(t, fns, "GoogleCloudStorage", "FsObjectStorage")
|
||||||
generateTestProgram(t, fns, "Dropbox", "Dropbox")
|
generateTestProgram(t, fns, "Dropbox", "FsObjectDropbox")
|
||||||
generateTestProgram(t, fns, "AmazonCloudDrive", "Acd")
|
generateTestProgram(t, fns, "AmazonCloudDrive", "FsObjectAcd")
|
||||||
|
generateTestProgram(t, fns, "OneDrive", "Object")
|
||||||
log.Printf("Done")
|
log.Printf("Done")
|
||||||
}
|
}
|
||||||
|
BIN
graphics/rclone-50x50.png
Normal file
BIN
graphics/rclone-50x50.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 5.7 KiB |
129
onedrive/api/api.go
Normal file
129
onedrive/api/api.go
Normal file
@ -0,0 +1,129 @@
|
|||||||
|
// Package api implements the API for one drive
|
||||||
|
package api
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"net/http"
|
||||||
|
|
||||||
|
"github.com/ncw/rclone/fs"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
rootURL = "https://api.onedrive.com/v1.0" // root URL for requests
|
||||||
|
)
|
||||||
|
|
||||||
|
// Client contains the info to sustain the API
|
||||||
|
type Client struct {
|
||||||
|
c *http.Client
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewClient takes an oauth http.Client and makes a new api instance
|
||||||
|
func NewClient(c *http.Client) *Client {
|
||||||
|
return &Client{
|
||||||
|
c: c,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Opts contains parameters for Call, CallJSON etc
|
||||||
|
type Opts struct {
|
||||||
|
Method string
|
||||||
|
Path string
|
||||||
|
Absolute bool // Path is absolute
|
||||||
|
Body io.Reader
|
||||||
|
NoResponse bool // set to close Body
|
||||||
|
ContentType string
|
||||||
|
ContentLength *int64
|
||||||
|
ContentRange string
|
||||||
|
}
|
||||||
|
|
||||||
|
// checkClose is a utility function used to check the return from
|
||||||
|
// Close in a defer statement.
|
||||||
|
func checkClose(c io.Closer, err *error) {
|
||||||
|
cerr := c.Close()
|
||||||
|
if *err == nil {
|
||||||
|
*err = cerr
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// decodeJSON decodes resp.Body into json
|
||||||
|
func (api *Client) decodeJSON(resp *http.Response, result interface{}) (err error) {
|
||||||
|
defer checkClose(resp.Body, &err)
|
||||||
|
decoder := json.NewDecoder(resp.Body)
|
||||||
|
return decoder.Decode(result)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Call makes the call and returns the http.Response
|
||||||
|
//
|
||||||
|
// if err != nil then resp.Body will need to be closed
|
||||||
|
//
|
||||||
|
// it will return resp if at all possible, even if err is set
|
||||||
|
func (api *Client) Call(opts *Opts) (resp *http.Response, err error) {
|
||||||
|
if opts == nil {
|
||||||
|
return nil, fmt.Errorf("call() called with nil opts")
|
||||||
|
}
|
||||||
|
var url string
|
||||||
|
if opts.Absolute {
|
||||||
|
url = opts.Path
|
||||||
|
} else {
|
||||||
|
url = rootURL + opts.Path
|
||||||
|
}
|
||||||
|
req, err := http.NewRequest(opts.Method, url, opts.Body)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if opts.ContentType != "" {
|
||||||
|
req.Header.Add("Content-Type", opts.ContentType)
|
||||||
|
}
|
||||||
|
if opts.ContentLength != nil {
|
||||||
|
req.ContentLength = *opts.ContentLength
|
||||||
|
}
|
||||||
|
if opts.ContentRange != "" {
|
||||||
|
req.Header.Add("Content-Range", opts.ContentRange)
|
||||||
|
}
|
||||||
|
req.Header.Add("User-Agent", fs.UserAgent)
|
||||||
|
resp, err = api.c.Do(req)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if resp.StatusCode < 200 || resp.StatusCode > 299 {
|
||||||
|
// Decode error response
|
||||||
|
errResponse := new(Error)
|
||||||
|
err = api.decodeJSON(resp, &errResponse)
|
||||||
|
if err != nil {
|
||||||
|
return resp, err
|
||||||
|
}
|
||||||
|
return resp, errResponse
|
||||||
|
}
|
||||||
|
if opts.NoResponse {
|
||||||
|
return resp, resp.Body.Close()
|
||||||
|
}
|
||||||
|
return resp, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// CallJSON runs Call and decodes the body as a JSON object into result
|
||||||
|
//
|
||||||
|
// If request is not nil then it will be JSON encoded as the body of the request
|
||||||
|
//
|
||||||
|
// It will return resp if at all possible, even if err is set
|
||||||
|
func (api *Client) CallJSON(opts *Opts, request interface{}, response interface{}) (resp *http.Response, err error) {
|
||||||
|
// Set the body up as a JSON object if required
|
||||||
|
if opts.Body == nil && request != nil {
|
||||||
|
body, err := json.Marshal(request)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
var newOpts = *opts
|
||||||
|
newOpts.Body = bytes.NewBuffer(body)
|
||||||
|
newOpts.ContentType = "application/json"
|
||||||
|
opts = &newOpts
|
||||||
|
}
|
||||||
|
resp, err = api.Call(opts)
|
||||||
|
if err != nil {
|
||||||
|
return resp, err
|
||||||
|
}
|
||||||
|
err = api.decodeJSON(resp, response)
|
||||||
|
return resp, err
|
||||||
|
}
|
191
onedrive/api/types.go
Normal file
191
onedrive/api/types.go
Normal file
@ -0,0 +1,191 @@
|
|||||||
|
// Types passed and returned to and from the API
|
||||||
|
|
||||||
|
package api
|
||||||
|
|
||||||
|
import "time"
|
||||||
|
|
||||||
|
const (
|
||||||
|
timeFormat = `"` + time.RFC3339 + `"`
|
||||||
|
)
|
||||||
|
|
||||||
|
// Error is returned from one drive when things go wrong
|
||||||
|
type Error struct {
|
||||||
|
ErrorInfo struct {
|
||||||
|
Code string `json:"code"`
|
||||||
|
Message string `json:"message"`
|
||||||
|
InnerError struct {
|
||||||
|
Code string `json:"code"`
|
||||||
|
} `json:"innererror"`
|
||||||
|
} `json:"error"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// Error returns a string for the error and statistifes the error interface
|
||||||
|
func (e *Error) Error() string {
|
||||||
|
out := e.ErrorInfo.Code
|
||||||
|
if e.ErrorInfo.InnerError.Code != "" {
|
||||||
|
out += ": " + e.ErrorInfo.InnerError.Code
|
||||||
|
}
|
||||||
|
out += ": " + e.ErrorInfo.Message
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check Error statisfies the error interface
|
||||||
|
var _ error = (*Error)(nil)
|
||||||
|
|
||||||
|
// Identity represents an identity of an actor. For example, and actor
|
||||||
|
// can be a user, device, or application.
|
||||||
|
type Identity struct {
|
||||||
|
DisplayName string `json:"displayName"`
|
||||||
|
ID string `json:"id"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// IdentitySet is a keyed collection of Identity objects. It is used
|
||||||
|
// to represent a set of identities associated with various events for
|
||||||
|
// an item, such as created by or last modified by.
|
||||||
|
type IdentitySet struct {
|
||||||
|
User Identity `json:"user"`
|
||||||
|
Application Identity `json:"application"`
|
||||||
|
Device Identity `json:"device"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// Quota groups storage space quota-related information on OneDrive into a single structure.
|
||||||
|
type Quota struct {
|
||||||
|
Total int `json:"total"`
|
||||||
|
Used int `json:"used"`
|
||||||
|
Remaining int `json:"remaining"`
|
||||||
|
Deleted int `json:"deleted"`
|
||||||
|
State string `json:"state"` // normal | nearing | critical | exceeded
|
||||||
|
}
|
||||||
|
|
||||||
|
// Drive is a representation of a drive resource
|
||||||
|
type Drive struct {
|
||||||
|
ID string `json:"id"`
|
||||||
|
DriveType string `json:"driveType"`
|
||||||
|
Owner IdentitySet `json:"owner"`
|
||||||
|
Quota Quota `json:"quota"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// Timestamp represents represents date and time information for the
|
||||||
|
// OneDrive API, by using ISO 8601 and is always in UTC time.
|
||||||
|
type Timestamp time.Time
|
||||||
|
|
||||||
|
// MarshalJSON turns a Timestamp into JSON (in UTC)
|
||||||
|
func (t *Timestamp) MarshalJSON() (out []byte, err error) {
|
||||||
|
out = (*time.Time)(t).UTC().AppendFormat(out, timeFormat)
|
||||||
|
return out, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnmarshalJSON turns JSON into a Timestamp
|
||||||
|
func (t *Timestamp) UnmarshalJSON(data []byte) error {
|
||||||
|
newT, err := time.Parse(timeFormat, string(data))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
*t = Timestamp(newT)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ItemReference groups data needed to reference a OneDrive item
|
||||||
|
// across the service into a single structure.
|
||||||
|
type ItemReference struct {
|
||||||
|
DriveID string `json:"driveId"` // Unique identifier for the Drive that contains the item. Read-only.
|
||||||
|
ID string `json:"id"` // Unique identifier for the item. Read/Write.
|
||||||
|
Path string `json:"path"` // Path that used to navigate to the item. Read/Write.
|
||||||
|
}
|
||||||
|
|
||||||
|
// FolderFacet groups folder-related data on OneDrive into a single structure
|
||||||
|
type FolderFacet struct {
|
||||||
|
ChildCount int64 `json:"childCount"` // Number of children contained immediately within this container.
|
||||||
|
}
|
||||||
|
|
||||||
|
// HashesType groups different types of hashes into a single structure, for an item on OneDrive.
|
||||||
|
type HashesType struct {
|
||||||
|
Sha1Hash string `json:"sha1Hash"` // base64 encoded SHA1 hash for the contents of the file (if available)
|
||||||
|
Crc32Hash string `json:"crc32Hash"` // base64 encoded CRC32 value of the file (if available)
|
||||||
|
}
|
||||||
|
|
||||||
|
// FileFacet groups file-related data on OneDrive into a single structure.
|
||||||
|
type FileFacet struct {
|
||||||
|
MimeType string `json:"mimeType"` // The MIME type for the file. This is determined by logic on the server and might not be the value provided when the file was uploaded.
|
||||||
|
Hashes HashesType `json:"hashes"` // Hashes of the file's binary content, if available.
|
||||||
|
}
|
||||||
|
|
||||||
|
// FileSystemInfoFacet contains properties that are reported by the
|
||||||
|
// device's local file system for the local version of an item. This
|
||||||
|
// facet can be used to specify the last modified date or created date
|
||||||
|
// of the item as it was on the local device.
|
||||||
|
type FileSystemInfoFacet struct {
|
||||||
|
CreatedDateTime Timestamp `json:"createdDateTime"` // The UTC date and time the file was created on a client.
|
||||||
|
LastModifiedDateTime Timestamp `json:"lastModifiedDateTime"` // The UTC date and time the file was last modified on a client.
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeletedFacet indicates that the item on OneDrive has been
|
||||||
|
// deleted. In this version of the API, the presence (non-null) of the
|
||||||
|
// facet value indicates that the file was deleted. A null (or
|
||||||
|
// missing) value indicates that the file is not deleted.
|
||||||
|
type DeletedFacet struct {
|
||||||
|
}
|
||||||
|
|
||||||
|
// Item represents metadata for an item in OneDrive
|
||||||
|
type Item struct {
|
||||||
|
ID string `json:"id"` // The unique identifier of the item within the Drive. Read-only.
|
||||||
|
Name string `json:"name"` // The name of the item (filename and extension). Read-write.
|
||||||
|
ETag string `json:"eTag"` // eTag for the entire item (metadata + content). Read-only.
|
||||||
|
CTag string `json:"cTag"` // An eTag for the content of the item. This eTag is not changed if only the metadata is changed. Read-only.
|
||||||
|
CreatedBy IdentitySet `json:"createdBy"` // Identity of the user, device, and application which created the item. Read-only.
|
||||||
|
LastModifiedBy IdentitySet `json:"lastModifiedBy"` // Identity of the user, device, and application which last modified the item. Read-only.
|
||||||
|
CreatedDateTime Timestamp `json:"createdDateTime"` // Date and time of item creation. Read-only.
|
||||||
|
LastModifiedDateTime Timestamp `json:"lastModifiedDateTime"` // Date and time the item was last modified. Read-only.
|
||||||
|
Size int64 `json:"size"` // Size of the item in bytes. Read-only.
|
||||||
|
ParentReference *ItemReference `json:"parentReference"` // Parent information, if the item has a parent. Read-write.
|
||||||
|
WebURL string `json:"webUrl"` // URL that displays the resource in the browser. Read-only.
|
||||||
|
Description string `json:"description"` // Provide a user-visible description of the item. Read-write.
|
||||||
|
Folder *FolderFacet `json:"folder"` // Folder metadata, if the item is a folder. Read-only.
|
||||||
|
File *FileFacet `json:"file"` // File metadata, if the item is a file. Read-only.
|
||||||
|
FileSystemInfo *FileSystemInfoFacet `json:"fileSystemInfo"` // File system information on client. Read-write.
|
||||||
|
// Image *ImageFacet `json:"image"` // Image metadata, if the item is an image. Read-only.
|
||||||
|
// Photo *PhotoFacet `json:"photo"` // Photo metadata, if the item is a photo. Read-only.
|
||||||
|
// Audio *AudioFacet `json:"audio"` // Audio metadata, if the item is an audio file. Read-only.
|
||||||
|
// Video *VideoFacet `json:"video"` // Video metadata, if the item is a video. Read-only.
|
||||||
|
// Location *LocationFacet `json:"location"` // Location metadata, if the item has location data. Read-only.
|
||||||
|
Deleted *DeletedFacet `json:"deleted"` // Information about the deleted state of the item. Read-only.
|
||||||
|
}
|
||||||
|
|
||||||
|
// ViewDeltaResponse is the response to the view delta method
|
||||||
|
type ViewDeltaResponse struct {
|
||||||
|
Value []Item `json:"value"` // An array of Item objects which have been created, modified, or deleted.
|
||||||
|
NextLink string `json:"@odata.nextLink"` // A URL to retrieve the next available page of changes.
|
||||||
|
DeltaLink string `json:"@odata.deltaLink"` // A URL returned instead of @odata.nextLink after all current changes have been returned. Used to read the next set of changes in the future.
|
||||||
|
DeltaToken string `json:"@delta.token"` // A token value that can be used in the query string on manually-crafted calls to view.delta. Not needed if you're using nextLink and deltaLink.
|
||||||
|
}
|
||||||
|
|
||||||
|
// ListChildrenResponse is the response to the list children method
|
||||||
|
type ListChildrenResponse struct {
|
||||||
|
Value []Item `json:"value"` // An array of Item objects
|
||||||
|
NextLink string `json:"@odata.nextLink"` // A URL to retrieve the next available page of items.
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreateItemRequest is the request to create an item object
|
||||||
|
type CreateItemRequest struct {
|
||||||
|
Name string `json:"name"` // Name of the folder to be created.
|
||||||
|
Folder FolderFacet `json:"folder"` // Empty Folder facet to indicate that folder is the type of resource to be created.
|
||||||
|
ConflictBehavior string `json:"@name.conflictBehavior"` // Determines what to do if an item with a matching name already exists in this folder. Accepted values are: rename, replace, and fail (the default).
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetFileSystemInfo is used to Update an object's FileSystemInfo.
|
||||||
|
type SetFileSystemInfo struct {
|
||||||
|
FileSystemInfo FileSystemInfoFacet `json:"fileSystemInfo"` // File system information on client. Read-write.
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreateUploadResponse is the response from creating an upload session
|
||||||
|
type CreateUploadResponse struct {
|
||||||
|
UploadURL string `json:"uploadUrl"` // "https://sn3302.up.1drv.com/up/fe6987415ace7X4e1eF866337",
|
||||||
|
ExpirationDateTime Timestamp `json:"expirationDateTime"` // "2015-01-29T09:21:55.523Z",
|
||||||
|
NextExpectedRanges []string `json:"nextExpectedRanges"` // ["0-"]
|
||||||
|
}
|
||||||
|
|
||||||
|
// UploadFragmentResponse is the response from uploading a fragment
|
||||||
|
type UploadFragmentResponse struct {
|
||||||
|
ExpirationDateTime Timestamp `json:"expirationDateTime"` // "2015-01-29T09:21:55.523Z",
|
||||||
|
NextExpectedRanges []string `json:"nextExpectedRanges"` // ["0-"]
|
||||||
|
}
|
852
onedrive/onedrive.go
Normal file
852
onedrive/onedrive.go
Normal file
@ -0,0 +1,852 @@
|
|||||||
|
// Package onedrive provides an interface to the Microsoft One Drive
|
||||||
|
// object storage system.
|
||||||
|
package onedrive
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"log"
|
||||||
|
"net/http"
|
||||||
|
"regexp"
|
||||||
|
"strings"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/ncw/rclone/dircache"
|
||||||
|
"github.com/ncw/rclone/fs"
|
||||||
|
"github.com/ncw/rclone/oauthutil"
|
||||||
|
"github.com/ncw/rclone/onedrive/api"
|
||||||
|
"github.com/ncw/rclone/pacer"
|
||||||
|
"github.com/spf13/pflag"
|
||||||
|
"golang.org/x/oauth2"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
rcloneClientID = "0000000044165769"
|
||||||
|
rcloneClientSecret = "0+be4+jYw+7018HY6P3t/Izo+pTc+Yvt8+fy8NHU094="
|
||||||
|
minSleep = 10 * time.Millisecond
|
||||||
|
maxSleep = 2 * time.Second
|
||||||
|
decayConstant = 2 // bigger for slower decay, exponential
|
||||||
|
)
|
||||||
|
|
||||||
|
// Globals
|
||||||
|
var (
|
||||||
|
// Description of how to auth for this app
|
||||||
|
oauthConfig = &oauth2.Config{
|
||||||
|
Scopes: []string{
|
||||||
|
"wl.signin", // Allow single sign-on capabilities
|
||||||
|
"wl.offline_access", // Allow receiving a refresh token
|
||||||
|
"onedrive.readwrite", // r/w perms to all of a user's OneDrive files
|
||||||
|
},
|
||||||
|
Endpoint: oauth2.Endpoint{
|
||||||
|
AuthURL: "https://login.live.com/oauth20_authorize.srf",
|
||||||
|
TokenURL: "https://login.live.com/oauth20_token.srf",
|
||||||
|
},
|
||||||
|
ClientID: rcloneClientID,
|
||||||
|
ClientSecret: fs.Reveal(rcloneClientSecret),
|
||||||
|
RedirectURL: oauthutil.RedirectPublicURL,
|
||||||
|
}
|
||||||
|
chunkSize = fs.SizeSuffix(10 * 1024 * 1024)
|
||||||
|
uploadCutoff = fs.SizeSuffix(10 * 1024 * 1024)
|
||||||
|
)
|
||||||
|
|
||||||
|
// Register with Fs
|
||||||
|
func init() {
|
||||||
|
fs.Register(&fs.Info{
|
||||||
|
Name: "onedrive",
|
||||||
|
NewFs: NewFs,
|
||||||
|
Config: func(name string) {
|
||||||
|
err := oauthutil.Config(name, oauthConfig)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatalf("Failed to configure token: %v", err)
|
||||||
|
}
|
||||||
|
},
|
||||||
|
Options: []fs.Option{{
|
||||||
|
Name: oauthutil.ConfigClientID,
|
||||||
|
Help: "Microsoft App Client Id - leave blank normally.",
|
||||||
|
}, {
|
||||||
|
Name: oauthutil.ConfigClientSecret,
|
||||||
|
Help: "Microsoft App Client Secret - leave blank normally.",
|
||||||
|
}},
|
||||||
|
})
|
||||||
|
pflag.VarP(&chunkSize, "onedrive-chunk-size", "", "Above this size files will be chunked - must be multiple of 320k.")
|
||||||
|
pflag.VarP(&uploadCutoff, "onedrive-upload-cutoff", "", "Cutoff for switching to chunked upload - must be <= 100MB")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fs represents a remote one drive
|
||||||
|
type Fs struct {
|
||||||
|
name string // name of this remote
|
||||||
|
srv *api.Client // the connection to the one drive server
|
||||||
|
root string // the path we are working on
|
||||||
|
dirCache *dircache.DirCache // Map of directory path to directory id
|
||||||
|
pacer *pacer.Pacer // pacer for API calls
|
||||||
|
}
|
||||||
|
|
||||||
|
// Object describes a one drive object
|
||||||
|
//
|
||||||
|
// Will definitely have info but maybe not meta
|
||||||
|
type Object struct {
|
||||||
|
fs *Fs // what this object is part of
|
||||||
|
remote string // The remote path
|
||||||
|
hasMetaData bool // whether info below has been set
|
||||||
|
size int64 // size of the object
|
||||||
|
modTime time.Time // modification time of the object
|
||||||
|
id string // ID of the object
|
||||||
|
}
|
||||||
|
|
||||||
|
// ------------------------------------------------------------
|
||||||
|
|
||||||
|
// Name of the remote (as passed into NewFs)
|
||||||
|
func (f *Fs) Name() string {
|
||||||
|
return f.name
|
||||||
|
}
|
||||||
|
|
||||||
|
// Root of the remote (as passed into NewFs)
|
||||||
|
func (f *Fs) Root() string {
|
||||||
|
return f.root
|
||||||
|
}
|
||||||
|
|
||||||
|
// String converts this Fs to a string
|
||||||
|
func (f *Fs) String() string {
|
||||||
|
return fmt.Sprintf("One drive root '%s'", f.root)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Pattern to match a one drive path
|
||||||
|
var matcher = regexp.MustCompile(`^([^/]*)(.*)$`)
|
||||||
|
|
||||||
|
// parsePath parses an one drive 'url'
|
||||||
|
func parsePath(path string) (root string) {
|
||||||
|
root = strings.Trim(path, "/")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// retryErrorCodes is a slice of error codes that we will retry
|
||||||
|
var retryErrorCodes = []int{
|
||||||
|
429, // Too Many Requests.
|
||||||
|
500, // Internal Server Error
|
||||||
|
502, // Bad Gateway
|
||||||
|
503, // Service Unavailable
|
||||||
|
504, // Gateway Timeout
|
||||||
|
509, // Bandwidth Limit Exceeded
|
||||||
|
}
|
||||||
|
|
||||||
|
// shouldRetry returns a boolean as to whether this resp and err
|
||||||
|
// deserve to be retried. It returns the err as a convenience
|
||||||
|
func shouldRetry(resp *http.Response, err error) (bool, error) {
|
||||||
|
return fs.ShouldRetry(err) || fs.ShouldRetryHTTP(resp, retryErrorCodes), err
|
||||||
|
}
|
||||||
|
|
||||||
|
// readMetaDataForPath reads the metadata from the path
|
||||||
|
func (f *Fs) readMetaDataForPath(path string) (info *api.Item, resp *http.Response, err error) {
|
||||||
|
opts := api.Opts{
|
||||||
|
Method: "GET",
|
||||||
|
Path: "/drive/root:/" + replaceReservedChars(path),
|
||||||
|
}
|
||||||
|
err = f.pacer.Call(func() (bool, error) {
|
||||||
|
resp, err = f.srv.CallJSON(&opts, nil, &info)
|
||||||
|
return shouldRetry(resp, err)
|
||||||
|
})
|
||||||
|
return info, resp, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewFs constructs an Fs from the path, container:path
|
||||||
|
func NewFs(name, root string) (fs.Fs, error) {
|
||||||
|
root = parsePath(root)
|
||||||
|
oAuthClient, err := oauthutil.NewClient(name, oauthConfig)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatalf("Failed to configure One Drive: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
f := &Fs{
|
||||||
|
name: name,
|
||||||
|
root: root,
|
||||||
|
srv: api.NewClient(oAuthClient),
|
||||||
|
pacer: pacer.New().SetMinSleep(minSleep).SetMaxSleep(maxSleep).SetDecayConstant(decayConstant),
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get rootID
|
||||||
|
rootInfo, _, err := f.readMetaDataForPath("")
|
||||||
|
if err != nil || rootInfo.ID == "" {
|
||||||
|
return nil, fmt.Errorf("Failed to get root: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
f.dirCache = dircache.New(root, rootInfo.ID, f)
|
||||||
|
|
||||||
|
// Find the current root
|
||||||
|
err = f.dirCache.FindRoot(false)
|
||||||
|
if err != nil {
|
||||||
|
// Assume it is a file
|
||||||
|
newRoot, remote := dircache.SplitPath(root)
|
||||||
|
newF := *f
|
||||||
|
newF.dirCache = dircache.New(newRoot, rootInfo.ID, &newF)
|
||||||
|
newF.root = newRoot
|
||||||
|
// Make new Fs which is the parent
|
||||||
|
err = newF.dirCache.FindRoot(false)
|
||||||
|
if err != nil {
|
||||||
|
// No root so return old f
|
||||||
|
return f, nil
|
||||||
|
}
|
||||||
|
obj := newF.newObjectWithInfo(remote, nil)
|
||||||
|
if obj == nil {
|
||||||
|
// File doesn't exist so return old f
|
||||||
|
return f, nil
|
||||||
|
}
|
||||||
|
// return a Fs Limited to this object
|
||||||
|
return fs.NewLimited(&newF, obj), nil
|
||||||
|
}
|
||||||
|
return f, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// rootSlash returns root with a slash on if it is empty, otherwise empty string
|
||||||
|
func (f *Fs) rootSlash() string {
|
||||||
|
if f.root == "" {
|
||||||
|
return f.root
|
||||||
|
}
|
||||||
|
return f.root + "/"
|
||||||
|
}
|
||||||
|
|
||||||
|
// Return an Object from a path
|
||||||
|
//
|
||||||
|
// May return nil if an error occurred
|
||||||
|
func (f *Fs) newObjectWithInfo(remote string, info *api.Item) fs.Object {
|
||||||
|
o := &Object{
|
||||||
|
fs: f,
|
||||||
|
remote: remote,
|
||||||
|
}
|
||||||
|
if info != nil {
|
||||||
|
// Set info
|
||||||
|
o.setMetaData(info)
|
||||||
|
} else {
|
||||||
|
err := o.readMetaData() // reads info and meta, returning an error
|
||||||
|
if err != nil {
|
||||||
|
// logged already FsDebug("Failed to read info: %s", err)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return o
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewFsObject returns an Object from a path
|
||||||
|
//
|
||||||
|
// May return nil if an error occurred
|
||||||
|
func (f *Fs) NewFsObject(remote string) fs.Object {
|
||||||
|
return f.newObjectWithInfo(remote, nil)
|
||||||
|
}
|
||||||
|
|
||||||
|
// FindLeaf finds a directory of name leaf in the folder with ID pathID
|
||||||
|
func (f *Fs) FindLeaf(pathID, leaf string) (pathIDOut string, found bool, err error) {
|
||||||
|
// fs.Debug(f, "FindLeaf(%q, %q)", pathID, leaf)
|
||||||
|
parent, ok := f.dirCache.GetInv(pathID)
|
||||||
|
if !ok {
|
||||||
|
return "", false, fmt.Errorf("Couldn't find parent ID")
|
||||||
|
}
|
||||||
|
path := leaf
|
||||||
|
if parent != "" {
|
||||||
|
path = parent + "/" + path
|
||||||
|
}
|
||||||
|
if f.dirCache.FoundRoot() {
|
||||||
|
path = f.rootSlash() + path
|
||||||
|
}
|
||||||
|
info, resp, err := f.readMetaDataForPath(path)
|
||||||
|
if err != nil {
|
||||||
|
if resp != nil && resp.StatusCode == http.StatusNotFound {
|
||||||
|
return "", false, nil
|
||||||
|
}
|
||||||
|
return "", false, err
|
||||||
|
}
|
||||||
|
if info.Folder == nil {
|
||||||
|
return "", false, fmt.Errorf("Found file when looking for folder")
|
||||||
|
}
|
||||||
|
return info.ID, true, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreateDir makes a directory with pathID as parent and name leaf
|
||||||
|
func (f *Fs) CreateDir(pathID, leaf string) (newID string, err error) {
|
||||||
|
// fs.Debug(f, "CreateDir(%q, %q)\n", pathID, leaf)
|
||||||
|
var resp *http.Response
|
||||||
|
var info *api.Item
|
||||||
|
opts := api.Opts{
|
||||||
|
Method: "POST",
|
||||||
|
Path: "/drive/items/" + pathID + "/children",
|
||||||
|
}
|
||||||
|
mkdir := api.CreateItemRequest{
|
||||||
|
Name: leaf,
|
||||||
|
ConflictBehavior: "fail",
|
||||||
|
}
|
||||||
|
err = f.pacer.Call(func() (bool, error) {
|
||||||
|
resp, err = f.srv.CallJSON(&opts, &mkdir, &info)
|
||||||
|
return shouldRetry(resp, err)
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
//fmt.Printf("...Error %v\n", err)
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
//fmt.Printf("...Id %q\n", *info.Id)
|
||||||
|
return info.ID, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// list the objects into the function supplied
|
||||||
|
//
|
||||||
|
// If directories is set it only sends directories
|
||||||
|
// User function to process a File item from listAll
|
||||||
|
//
|
||||||
|
// Should return true to finish processing
|
||||||
|
type listAllFn func(*api.Item) bool
|
||||||
|
|
||||||
|
// Lists the directory required calling the user function on each item found
|
||||||
|
//
|
||||||
|
// If the user fn ever returns true then it early exits with found = true
|
||||||
|
func (f *Fs) listAll(dirID string, directoriesOnly bool, filesOnly bool, fn listAllFn) (found bool, err error) {
|
||||||
|
// Top parameter asks for bigger pages of data
|
||||||
|
// https://dev.onedrive.com/odata/optional-query-parameters.htm
|
||||||
|
opts := api.Opts{
|
||||||
|
Method: "GET",
|
||||||
|
Path: "/drive/items/" + dirID + "/children?top=1000",
|
||||||
|
}
|
||||||
|
OUTER:
|
||||||
|
for {
|
||||||
|
var result api.ListChildrenResponse
|
||||||
|
var resp *http.Response
|
||||||
|
err = f.pacer.Call(func() (bool, error) {
|
||||||
|
resp, err = f.srv.CallJSON(&opts, nil, &result)
|
||||||
|
return shouldRetry(resp, err)
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
fs.Stats.Error()
|
||||||
|
fs.ErrorLog(f, "Couldn't list files: %v", err)
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if len(result.Value) == 0 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
for i := range result.Value {
|
||||||
|
item := &result.Value[i]
|
||||||
|
isFolder := item.Folder != nil
|
||||||
|
if isFolder {
|
||||||
|
if filesOnly {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if directoriesOnly {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if item.Deleted != nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
item.Name = restoreReservedChars(item.Name)
|
||||||
|
if fn(item) {
|
||||||
|
found = true
|
||||||
|
break OUTER
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if result.NextLink == "" {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
opts.Path = result.NextLink
|
||||||
|
opts.Absolute = true
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Path should be directory path either "" or "path/"
|
||||||
|
//
|
||||||
|
// List the directory using a recursive list from the root
|
||||||
|
//
|
||||||
|
// This fetches the minimum amount of stuff but does more API calls
|
||||||
|
// which makes it slow
|
||||||
|
func (f *Fs) listDirRecursive(dirID string, path string, out fs.ObjectsChan) error {
|
||||||
|
var subError error
|
||||||
|
// Make the API request
|
||||||
|
var wg sync.WaitGroup
|
||||||
|
_, err := f.listAll(dirID, false, false, func(info *api.Item) bool {
|
||||||
|
// Recurse on directories
|
||||||
|
if info.Folder != nil {
|
||||||
|
wg.Add(1)
|
||||||
|
folder := path + info.Name + "/"
|
||||||
|
fs.Debug(f, "Reading %s", folder)
|
||||||
|
go func() {
|
||||||
|
defer wg.Done()
|
||||||
|
err := f.listDirRecursive(info.ID, folder, out)
|
||||||
|
if err != nil {
|
||||||
|
subError = err
|
||||||
|
fs.ErrorLog(f, "Error reading %s:%s", folder, err)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
} else {
|
||||||
|
if fs := f.newObjectWithInfo(path+info.Name, info); fs != nil {
|
||||||
|
out <- fs
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
})
|
||||||
|
wg.Wait()
|
||||||
|
fs.Debug(f, "Finished reading %s", path)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if subError != nil {
|
||||||
|
return subError
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// List walks the path returning a channel of Objects
|
||||||
|
func (f *Fs) List() fs.ObjectsChan {
|
||||||
|
out := make(fs.ObjectsChan, fs.Config.Checkers)
|
||||||
|
go func() {
|
||||||
|
defer close(out)
|
||||||
|
err := f.dirCache.FindRoot(false)
|
||||||
|
if err != nil {
|
||||||
|
fs.Stats.Error()
|
||||||
|
fs.ErrorLog(f, "Couldn't find root: %s", err)
|
||||||
|
} else {
|
||||||
|
err = f.listDirRecursive(f.dirCache.RootID(), "", out)
|
||||||
|
if err != nil {
|
||||||
|
fs.Stats.Error()
|
||||||
|
fs.ErrorLog(f, "List failed: %s", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
// ListDir lists the directories
|
||||||
|
func (f *Fs) ListDir() fs.DirChan {
|
||||||
|
out := make(fs.DirChan, fs.Config.Checkers)
|
||||||
|
go func() {
|
||||||
|
defer close(out)
|
||||||
|
err := f.dirCache.FindRoot(false)
|
||||||
|
if err != nil {
|
||||||
|
fs.Stats.Error()
|
||||||
|
fs.ErrorLog(f, "Couldn't find root: %s", err)
|
||||||
|
} else {
|
||||||
|
_, err := f.listAll(f.dirCache.RootID(), true, false, func(item *api.Item) bool {
|
||||||
|
dir := &fs.Dir{
|
||||||
|
Name: item.Name,
|
||||||
|
Bytes: -1,
|
||||||
|
Count: -1,
|
||||||
|
When: time.Time(item.LastModifiedDateTime),
|
||||||
|
}
|
||||||
|
if item.Folder != nil {
|
||||||
|
dir.Count = item.Folder.ChildCount
|
||||||
|
}
|
||||||
|
out <- dir
|
||||||
|
return false
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
fs.Stats.Error()
|
||||||
|
fs.ErrorLog(f, "ListDir failed: %s", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
// Put the object into the container
|
||||||
|
//
|
||||||
|
// Copy the reader in to the new object which is returned
|
||||||
|
//
|
||||||
|
// The new object may have been created if an error is returned
|
||||||
|
func (f *Fs) Put(in io.Reader, remote string, modTime time.Time, size int64) (fs.Object, error) {
|
||||||
|
// Create the directory for the object if it doesn't exist
|
||||||
|
_, _, err := f.dirCache.FindPath(remote, true)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
// Temporary Object under construction
|
||||||
|
o := &Object{
|
||||||
|
fs: f,
|
||||||
|
remote: remote,
|
||||||
|
}
|
||||||
|
return o, o.Update(in, modTime, size)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Mkdir creates the container if it doesn't exist
|
||||||
|
func (f *Fs) Mkdir() error {
|
||||||
|
return f.dirCache.FindRoot(true)
|
||||||
|
}
|
||||||
|
|
||||||
|
// deleteObject removes an object by ID
|
||||||
|
func (f *Fs) deleteObject(id string) error {
|
||||||
|
opts := api.Opts{
|
||||||
|
Method: "DELETE",
|
||||||
|
Path: "/drive/items/" + id,
|
||||||
|
NoResponse: true,
|
||||||
|
}
|
||||||
|
return f.pacer.Call(func() (bool, error) {
|
||||||
|
resp, err := f.srv.Call(&opts)
|
||||||
|
return shouldRetry(resp, err)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// purgeCheck removes the root directory, if check is set then it
|
||||||
|
// refuses to do so if it has anything in
|
||||||
|
func (f *Fs) purgeCheck(check bool) error {
|
||||||
|
if f.root == "" {
|
||||||
|
return fmt.Errorf("Can't purge root directory")
|
||||||
|
}
|
||||||
|
dc := f.dirCache
|
||||||
|
err := dc.FindRoot(false)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
rootID := dc.RootID()
|
||||||
|
item, _, err := f.readMetaDataForPath(f.root)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if item.Folder == nil {
|
||||||
|
return fmt.Errorf("Not a folder")
|
||||||
|
}
|
||||||
|
if check && item.Folder.ChildCount != 0 {
|
||||||
|
return fmt.Errorf("Folder not empty")
|
||||||
|
}
|
||||||
|
err = f.deleteObject(rootID)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
f.dirCache.ResetRoot()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Rmdir deletes the root folder
|
||||||
|
//
|
||||||
|
// Returns an error if it isn't empty
|
||||||
|
func (f *Fs) Rmdir() error {
|
||||||
|
return f.purgeCheck(true)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Precision return the precision of this Fs
|
||||||
|
func (f *Fs) Precision() time.Duration {
|
||||||
|
return time.Second
|
||||||
|
}
|
||||||
|
|
||||||
|
// Copy src to this remote using server side copy operations.
|
||||||
|
//
|
||||||
|
// This is stored with the remote path given
|
||||||
|
//
|
||||||
|
// It returns the destination Object and a possible error
|
||||||
|
//
|
||||||
|
// Will only be called if src.Fs().Name() == f.Name()
|
||||||
|
//
|
||||||
|
// If it isn't possible then return fs.ErrorCantCopy
|
||||||
|
//func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
|
||||||
|
// srcObj, ok := src.(*Object)
|
||||||
|
// if !ok {
|
||||||
|
// fs.Debug(src, "Can't copy - not same remote type")
|
||||||
|
// return nil, fs.ErrorCantCopy
|
||||||
|
// }
|
||||||
|
// srcFs := srcObj.acd
|
||||||
|
// _, err := f.c.ObjectCopy(srcFs.container, srcFs.root+srcObj.remote, f.container, f.root+remote, nil)
|
||||||
|
// if err != nil {
|
||||||
|
// return nil, err
|
||||||
|
// }
|
||||||
|
// return f.NewFsObject(remote), nil
|
||||||
|
//}
|
||||||
|
|
||||||
|
// Purge deletes all the files and the container
|
||||||
|
//
|
||||||
|
// Optional interface: Only implement this if you have a way of
|
||||||
|
// deleting all the files quicker than just running Remove() on the
|
||||||
|
// result of List()
|
||||||
|
func (f *Fs) Purge() error {
|
||||||
|
return f.purgeCheck(false)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ------------------------------------------------------------
|
||||||
|
|
||||||
|
// Fs returns the parent Fs
|
||||||
|
func (o *Object) Fs() fs.Fs {
|
||||||
|
return o.fs
|
||||||
|
}
|
||||||
|
|
||||||
|
// Return a string version
|
||||||
|
func (o *Object) String() string {
|
||||||
|
if o == nil {
|
||||||
|
return "<nil>"
|
||||||
|
}
|
||||||
|
return o.remote
|
||||||
|
}
|
||||||
|
|
||||||
|
// Remote returns the remote path
|
||||||
|
func (o *Object) Remote() string {
|
||||||
|
return o.remote
|
||||||
|
}
|
||||||
|
|
||||||
|
// srvPath returns a path for use in server
|
||||||
|
func (o *Object) srvPath() string {
|
||||||
|
return replaceReservedChars(o.fs.rootSlash() + o.remote)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Md5sum returns the Md5sum of an object returning a lowercase hex string
|
||||||
|
func (o *Object) Md5sum() (string, error) {
|
||||||
|
return "", nil // not supported by one drive
|
||||||
|
}
|
||||||
|
|
||||||
|
// Size returns the size of an object in bytes
|
||||||
|
func (o *Object) Size() int64 {
|
||||||
|
err := o.readMetaData()
|
||||||
|
if err != nil {
|
||||||
|
fs.Log(o, "Failed to read metadata: %s", err)
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
return o.size
|
||||||
|
}
|
||||||
|
|
||||||
|
// setMetaData sets the metadata from info
|
||||||
|
func (o *Object) setMetaData(info *api.Item) {
|
||||||
|
o.hasMetaData = true
|
||||||
|
o.size = info.Size
|
||||||
|
if info.FileSystemInfo != nil {
|
||||||
|
o.modTime = time.Time(info.FileSystemInfo.LastModifiedDateTime)
|
||||||
|
} else {
|
||||||
|
o.modTime = time.Time(info.LastModifiedDateTime)
|
||||||
|
}
|
||||||
|
o.id = info.ID
|
||||||
|
}
|
||||||
|
|
||||||
|
// readMetaData gets the metadata if it hasn't already been fetched
|
||||||
|
//
|
||||||
|
// it also sets the info
|
||||||
|
func (o *Object) readMetaData() (err error) {
|
||||||
|
if o.hasMetaData {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
// leaf, directoryID, err := o.fs.dirCache.FindPath(o.remote, false)
|
||||||
|
// if err != nil {
|
||||||
|
// return err
|
||||||
|
// }
|
||||||
|
info, _, err := o.fs.readMetaDataForPath(o.srvPath())
|
||||||
|
if err != nil {
|
||||||
|
fs.Debug(o, "Failed to read info: %s", err)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
o.setMetaData(info)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ModTime returns the modification time of the object
|
||||||
|
//
|
||||||
|
//
|
||||||
|
// It attempts to read the objects mtime and if that isn't present the
|
||||||
|
// LastModified returned in the http headers
|
||||||
|
func (o *Object) ModTime() time.Time {
|
||||||
|
err := o.readMetaData()
|
||||||
|
if err != nil {
|
||||||
|
fs.Log(o, "Failed to read metadata: %s", err)
|
||||||
|
return time.Now()
|
||||||
|
}
|
||||||
|
return o.modTime
|
||||||
|
}
|
||||||
|
|
||||||
|
// setModTime sets the modification time of the local fs object
|
||||||
|
func (o *Object) setModTime(modTime time.Time) (*api.Item, error) {
|
||||||
|
opts := api.Opts{
|
||||||
|
Method: "PATCH",
|
||||||
|
Path: "/drive/root:/" + o.srvPath(),
|
||||||
|
}
|
||||||
|
update := api.SetFileSystemInfo{
|
||||||
|
FileSystemInfo: api.FileSystemInfoFacet{
|
||||||
|
CreatedDateTime: api.Timestamp(modTime),
|
||||||
|
LastModifiedDateTime: api.Timestamp(modTime),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
var info *api.Item
|
||||||
|
err := o.fs.pacer.Call(func() (bool, error) {
|
||||||
|
resp, err := o.fs.srv.CallJSON(&opts, &update, &info)
|
||||||
|
return shouldRetry(resp, err)
|
||||||
|
})
|
||||||
|
return info, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetModTime sets the modification time of the local fs object
|
||||||
|
func (o *Object) SetModTime(modTime time.Time) {
|
||||||
|
info, err := o.setModTime(modTime)
|
||||||
|
if err != nil {
|
||||||
|
fs.Stats.Error()
|
||||||
|
fs.ErrorLog(o, "Failed to update remote mtime: %v", err)
|
||||||
|
}
|
||||||
|
o.setMetaData(info)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Storable returns a boolean showing whether this object storable
|
||||||
|
func (o *Object) Storable() bool {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// Open an object for read
|
||||||
|
func (o *Object) Open() (in io.ReadCloser, err error) {
|
||||||
|
if o.id == "" {
|
||||||
|
return nil, fmt.Errorf("Can't download no id")
|
||||||
|
}
|
||||||
|
var resp *http.Response
|
||||||
|
opts := api.Opts{
|
||||||
|
Method: "GET",
|
||||||
|
Path: "/drive/items/" + o.id + "/content",
|
||||||
|
}
|
||||||
|
err = o.fs.pacer.Call(func() (bool, error) {
|
||||||
|
resp, err = o.fs.srv.Call(&opts)
|
||||||
|
return shouldRetry(resp, err)
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return resp.Body, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// createUploadSession creates an upload session for the object
|
||||||
|
func (o *Object) createUploadSession() (response *api.CreateUploadResponse, err error) {
|
||||||
|
opts := api.Opts{
|
||||||
|
Method: "POST",
|
||||||
|
Path: "/drive/root:/" + o.srvPath() + ":/upload.createSession",
|
||||||
|
}
|
||||||
|
var resp *http.Response
|
||||||
|
err = o.fs.pacer.Call(func() (bool, error) {
|
||||||
|
resp, err = o.fs.srv.CallJSON(&opts, nil, &response)
|
||||||
|
return shouldRetry(resp, err)
|
||||||
|
})
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// uploadFragment uploads a part
|
||||||
|
func (o *Object) uploadFragment(url string, start int64, totalSize int64, buf []byte) (err error) {
|
||||||
|
bufSize := int64(len(buf))
|
||||||
|
opts := api.Opts{
|
||||||
|
Method: "PUT",
|
||||||
|
Path: url,
|
||||||
|
Absolute: true,
|
||||||
|
ContentLength: &bufSize,
|
||||||
|
ContentRange: fmt.Sprintf("bytes %d-%d/%d", start, start+bufSize-1, totalSize),
|
||||||
|
Body: bytes.NewReader(buf),
|
||||||
|
}
|
||||||
|
var response api.UploadFragmentResponse
|
||||||
|
var resp *http.Response
|
||||||
|
err = o.fs.pacer.Call(func() (bool, error) {
|
||||||
|
resp, err = o.fs.srv.CallJSON(&opts, nil, &response)
|
||||||
|
return shouldRetry(resp, err)
|
||||||
|
})
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// cancelUploadSession cancels an upload session
|
||||||
|
func (o *Object) cancelUploadSession(url string) (err error) {
|
||||||
|
opts := api.Opts{
|
||||||
|
Method: "DELETE",
|
||||||
|
Path: url,
|
||||||
|
Absolute: true,
|
||||||
|
NoResponse: true,
|
||||||
|
}
|
||||||
|
var resp *http.Response
|
||||||
|
err = o.fs.pacer.Call(func() (bool, error) {
|
||||||
|
resp, err = o.fs.srv.Call(&opts)
|
||||||
|
return shouldRetry(resp, err)
|
||||||
|
})
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// uploadMultipart uploads a file using multipart upload
|
||||||
|
func (o *Object) uploadMultipart(in io.Reader, size int64) (err error) {
|
||||||
|
if chunkSize%(320*1024) != 0 {
|
||||||
|
return fmt.Errorf("Chunk size %d is not a multiple of 320k", chunkSize)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create upload session
|
||||||
|
fs.Debug(o, "Starting multipart upload")
|
||||||
|
session, err := o.createUploadSession()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
uploadURL := session.UploadURL
|
||||||
|
|
||||||
|
// Cancel the session if something went wrong
|
||||||
|
defer func() {
|
||||||
|
if err != nil {
|
||||||
|
fs.Debug(o, "Cancelling multipart upload")
|
||||||
|
cancelErr := o.cancelUploadSession(uploadURL)
|
||||||
|
if cancelErr != nil {
|
||||||
|
fs.Log(o, "Failed to cancel multipart upload: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
// Upload the chunks
|
||||||
|
remaining := size
|
||||||
|
position := int64(0)
|
||||||
|
buf := make([]byte, int64(chunkSize))
|
||||||
|
for remaining > 0 {
|
||||||
|
n := int64(chunkSize)
|
||||||
|
if remaining < n {
|
||||||
|
n = remaining
|
||||||
|
buf = buf[:n]
|
||||||
|
}
|
||||||
|
_, err = io.ReadFull(in, buf)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
fs.Debug(o, "Uploading segment %d/%d size %d", position, size, n)
|
||||||
|
err = o.uploadFragment(uploadURL, position, size, buf)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
remaining -= n
|
||||||
|
position += n
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Update the object with the contents of the io.Reader, modTime and size
|
||||||
|
//
|
||||||
|
// The new object may have been created if an error is returned
|
||||||
|
func (o *Object) Update(in io.Reader, modTime time.Time, size int64) (err error) {
|
||||||
|
var info *api.Item
|
||||||
|
if size <= int64(uploadCutoff) {
|
||||||
|
// This is for less than 100 MB of content
|
||||||
|
var resp *http.Response
|
||||||
|
opts := api.Opts{
|
||||||
|
Method: "PUT",
|
||||||
|
Path: "/drive/root:/" + o.srvPath() + ":/content",
|
||||||
|
Body: in,
|
||||||
|
}
|
||||||
|
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
|
||||||
|
resp, err = o.fs.srv.CallJSON(&opts, nil, &info)
|
||||||
|
return shouldRetry(resp, err)
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
o.setMetaData(info)
|
||||||
|
} else {
|
||||||
|
err = o.uploadMultipart(in, size)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Set the mod time now and read metadata
|
||||||
|
info, err = o.setModTime(modTime)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
o.setMetaData(info)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Remove an object
|
||||||
|
func (o *Object) Remove() error {
|
||||||
|
return o.fs.deleteObject(o.id)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check the interfaces are satisfied
|
||||||
|
var (
|
||||||
|
_ fs.Fs = (*Fs)(nil)
|
||||||
|
_ fs.Purger = (*Fs)(nil)
|
||||||
|
// _ fs.Copier = (*Fs)(nil)
|
||||||
|
// _ fs.Mover = (*Fs)(nil)
|
||||||
|
// _ fs.DirMover = (*Fs)(nil)
|
||||||
|
_ fs.Object = (*Object)(nil)
|
||||||
|
)
|
56
onedrive/onedrive_test.go
Normal file
56
onedrive/onedrive_test.go
Normal file
@ -0,0 +1,56 @@
|
|||||||
|
// Test OneDrive filesystem interface
|
||||||
|
//
|
||||||
|
// Automatically generated - DO NOT EDIT
|
||||||
|
// Regenerate with: make gen_tests
|
||||||
|
package onedrive_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/ncw/rclone/fs"
|
||||||
|
"github.com/ncw/rclone/fstest/fstests"
|
||||||
|
"github.com/ncw/rclone/onedrive"
|
||||||
|
)
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
fstests.NilObject = fs.Object((*onedrive.Object)(nil))
|
||||||
|
fstests.RemoteName = "TestOneDrive:"
|
||||||
|
}
|
||||||
|
|
||||||
|
// Generic tests for the Fs
|
||||||
|
func TestInit(t *testing.T) { fstests.TestInit(t) }
|
||||||
|
func TestFsString(t *testing.T) { fstests.TestFsString(t) }
|
||||||
|
func TestFsRmdirEmpty(t *testing.T) { fstests.TestFsRmdirEmpty(t) }
|
||||||
|
func TestFsRmdirNotFound(t *testing.T) { fstests.TestFsRmdirNotFound(t) }
|
||||||
|
func TestFsMkdir(t *testing.T) { fstests.TestFsMkdir(t) }
|
||||||
|
func TestFsListEmpty(t *testing.T) { fstests.TestFsListEmpty(t) }
|
||||||
|
func TestFsListDirEmpty(t *testing.T) { fstests.TestFsListDirEmpty(t) }
|
||||||
|
func TestFsNewFsObjectNotFound(t *testing.T) { fstests.TestFsNewFsObjectNotFound(t) }
|
||||||
|
func TestFsPutFile1(t *testing.T) { fstests.TestFsPutFile1(t) }
|
||||||
|
func TestFsPutFile2(t *testing.T) { fstests.TestFsPutFile2(t) }
|
||||||
|
func TestFsListDirFile2(t *testing.T) { fstests.TestFsListDirFile2(t) }
|
||||||
|
func TestFsListDirRoot(t *testing.T) { fstests.TestFsListDirRoot(t) }
|
||||||
|
func TestFsListRoot(t *testing.T) { fstests.TestFsListRoot(t) }
|
||||||
|
func TestFsListFile1(t *testing.T) { fstests.TestFsListFile1(t) }
|
||||||
|
func TestFsNewFsObject(t *testing.T) { fstests.TestFsNewFsObject(t) }
|
||||||
|
func TestFsListFile1and2(t *testing.T) { fstests.TestFsListFile1and2(t) }
|
||||||
|
func TestFsCopy(t *testing.T) { fstests.TestFsCopy(t) }
|
||||||
|
func TestFsMove(t *testing.T) { fstests.TestFsMove(t) }
|
||||||
|
func TestFsDirMove(t *testing.T) { fstests.TestFsDirMove(t) }
|
||||||
|
func TestFsRmdirFull(t *testing.T) { fstests.TestFsRmdirFull(t) }
|
||||||
|
func TestFsPrecision(t *testing.T) { fstests.TestFsPrecision(t) }
|
||||||
|
func TestObjectString(t *testing.T) { fstests.TestObjectString(t) }
|
||||||
|
func TestObjectFs(t *testing.T) { fstests.TestObjectFs(t) }
|
||||||
|
func TestObjectRemote(t *testing.T) { fstests.TestObjectRemote(t) }
|
||||||
|
func TestObjectMd5sum(t *testing.T) { fstests.TestObjectMd5sum(t) }
|
||||||
|
func TestObjectModTime(t *testing.T) { fstests.TestObjectModTime(t) }
|
||||||
|
func TestObjectSetModTime(t *testing.T) { fstests.TestObjectSetModTime(t) }
|
||||||
|
func TestObjectSize(t *testing.T) { fstests.TestObjectSize(t) }
|
||||||
|
func TestObjectOpen(t *testing.T) { fstests.TestObjectOpen(t) }
|
||||||
|
func TestObjectUpdate(t *testing.T) { fstests.TestObjectUpdate(t) }
|
||||||
|
func TestObjectStorable(t *testing.T) { fstests.TestObjectStorable(t) }
|
||||||
|
func TestLimitedFs(t *testing.T) { fstests.TestLimitedFs(t) }
|
||||||
|
func TestLimitedFsNotFound(t *testing.T) { fstests.TestLimitedFsNotFound(t) }
|
||||||
|
func TestObjectRemove(t *testing.T) { fstests.TestObjectRemove(t) }
|
||||||
|
func TestObjectPurge(t *testing.T) { fstests.TestObjectPurge(t) }
|
||||||
|
func TestFinalise(t *testing.T) { fstests.TestFinalise(t) }
|
91
onedrive/replace.go
Normal file
91
onedrive/replace.go
Normal file
@ -0,0 +1,91 @@
|
|||||||
|
/*
|
||||||
|
Translate file names for one drive
|
||||||
|
|
||||||
|
OneDrive reserved characters
|
||||||
|
|
||||||
|
The following characters are OneDrive reserved characters, and can't
|
||||||
|
be used in OneDrive folder and file names.
|
||||||
|
|
||||||
|
onedrive-reserved = "/" / "\" / "*" / "<" / ">" / "?" / ":" / "|"
|
||||||
|
onedrive-business-reserved
|
||||||
|
= "/" / "\" / "*" / "<" / ">" / "?" / ":" / "|" / "#" / "%"
|
||||||
|
|
||||||
|
Note: Folder names can't end with a period (.).
|
||||||
|
|
||||||
|
Note: OneDrive for Business file or folder names cannot begin with a
|
||||||
|
tilde ('~').
|
||||||
|
|
||||||
|
*/
|
||||||
|
|
||||||
|
package onedrive
|
||||||
|
|
||||||
|
import (
|
||||||
|
"regexp"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
// charMap holds replacements for characters
|
||||||
|
//
|
||||||
|
// Onedrive has a restricted set of characters compared to other cloud
|
||||||
|
// storage systems, so we to map these to the FULLWIDTH unicode
|
||||||
|
// equivalents
|
||||||
|
//
|
||||||
|
// http://unicode-search.net/unicode-namesearch.pl?term=SOLIDUS
|
||||||
|
var (
|
||||||
|
charMap = map[rune]rune{
|
||||||
|
'\\': '\', // FULLWIDTH REVERSE SOLIDUS
|
||||||
|
'*': '*', // FULLWIDTH ASTERISK
|
||||||
|
'<': '<', // FULLWIDTH LESS-THAN SIGN
|
||||||
|
'>': '>', // FULLWIDTH GREATER-THAN SIGN
|
||||||
|
'?': '?', // FULLWIDTH QUESTION MARK
|
||||||
|
':': ':', // FULLWIDTH COLON
|
||||||
|
'|': '|', // FULLWIDTH VERTICAL LINE
|
||||||
|
'#': '#', // FULLWIDTH NUMBER SIGN
|
||||||
|
'%': '%', // FULLWIDTH PERCENT SIGN
|
||||||
|
'"': '"', // FULLWIDTH QUOTATION MARK - not on the list but seems to be reserved
|
||||||
|
'.': '.', // FULLWIDTH FULL STOP
|
||||||
|
'~': '~', // FULLWIDTH TILDE
|
||||||
|
' ': '␠', // SYMBOL FOR SPACE
|
||||||
|
}
|
||||||
|
invCharMap map[rune]rune
|
||||||
|
fixEndingInPeriod = regexp.MustCompile(`\.(/|$)`)
|
||||||
|
fixStartingWithTilde = regexp.MustCompile(`(/|^)~`)
|
||||||
|
fixStartingWithSpace = regexp.MustCompile(`(/|^) `)
|
||||||
|
)
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
// Create inverse charMap
|
||||||
|
invCharMap = make(map[rune]rune, len(charMap))
|
||||||
|
for k, v := range charMap {
|
||||||
|
invCharMap[v] = k
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// replaceReservedChars takes a path and substitutes any reserved
|
||||||
|
// characters in it
|
||||||
|
func replaceReservedChars(in string) string {
|
||||||
|
// Folder names can't end with a period '.'
|
||||||
|
in = fixEndingInPeriod.ReplaceAllString(in, string(charMap['.'])+"$1")
|
||||||
|
// OneDrive for Business file or folder names cannot begin with a tilde '~'
|
||||||
|
in = fixStartingWithTilde.ReplaceAllString(in, "$1"+string(charMap['~']))
|
||||||
|
// Apparently file names can't start with space either
|
||||||
|
in = fixStartingWithSpace.ReplaceAllString(in, "$1"+string(charMap[' ']))
|
||||||
|
// Replace reserved characters
|
||||||
|
return strings.Map(func(c rune) rune {
|
||||||
|
if replacement, ok := charMap[c]; ok && c != '.' && c != '~' && c != ' ' {
|
||||||
|
return replacement
|
||||||
|
}
|
||||||
|
return c
|
||||||
|
}, in)
|
||||||
|
}
|
||||||
|
|
||||||
|
// restoreReservedChars takes a path and undoes any substitutions
|
||||||
|
// made by replaceReservedChars
|
||||||
|
func restoreReservedChars(in string) string {
|
||||||
|
return strings.Map(func(c rune) rune {
|
||||||
|
if replacement, ok := invCharMap[c]; ok {
|
||||||
|
return replacement
|
||||||
|
}
|
||||||
|
return c
|
||||||
|
}, in)
|
||||||
|
}
|
30
onedrive/replace_test.go
Normal file
30
onedrive/replace_test.go
Normal file
@ -0,0 +1,30 @@
|
|||||||
|
package onedrive
|
||||||
|
|
||||||
|
import "testing"
|
||||||
|
|
||||||
|
func TestReplace(t *testing.T) {
|
||||||
|
for _, test := range []struct {
|
||||||
|
in string
|
||||||
|
out string
|
||||||
|
}{
|
||||||
|
{"", ""},
|
||||||
|
{"abc 123", "abc 123"},
|
||||||
|
{`\*<>?:|#%".~`, `\*<>?:|#%".~`},
|
||||||
|
{`\*<>?:|#%".~/\*<>?:|#%".~`, `\*<>?:|#%".~/\*<>?:|#%".~`},
|
||||||
|
{" leading space", "␠leading space"},
|
||||||
|
{"~leading tilde", "~leading tilde"},
|
||||||
|
{"trailing dot.", "trailing dot."},
|
||||||
|
{" leading space/ leading space/ leading space", "␠leading space/␠leading space/␠leading space"},
|
||||||
|
{"~leading tilde/~leading tilde/~leading tilde", "~leading tilde/~leading tilde/~leading tilde"},
|
||||||
|
{"trailing dot./trailing dot./trailing dot.", "trailing dot./trailing dot./trailing dot."},
|
||||||
|
} {
|
||||||
|
got := replaceReservedChars(test.in)
|
||||||
|
if got != test.out {
|
||||||
|
t.Errorf("replaceReservedChars(%q) want %q got %q", test.in, test.out, got)
|
||||||
|
}
|
||||||
|
got2 := restoreReservedChars(got)
|
||||||
|
if got2 != test.in {
|
||||||
|
t.Errorf("restoreReservedChars(%q) want %q got %q", got, test.in, got2)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
@ -21,6 +21,7 @@ import (
|
|||||||
_ "github.com/ncw/rclone/dropbox"
|
_ "github.com/ncw/rclone/dropbox"
|
||||||
_ "github.com/ncw/rclone/googlecloudstorage"
|
_ "github.com/ncw/rclone/googlecloudstorage"
|
||||||
_ "github.com/ncw/rclone/local"
|
_ "github.com/ncw/rclone/local"
|
||||||
|
_ "github.com/ncw/rclone/onedrive"
|
||||||
_ "github.com/ncw/rclone/s3"
|
_ "github.com/ncw/rclone/s3"
|
||||||
_ "github.com/ncw/rclone/swift"
|
_ "github.com/ncw/rclone/swift"
|
||||||
)
|
)
|
||||||
|
Loading…
Reference in New Issue
Block a user