mirror of
https://github.com/rclone/rclone.git
synced 2024-11-22 08:23:47 +01:00
filefabric: Implement the Enterprise File Fabric backend
Missing features - M-Stream support - Oauth-like flow (soon being changed to oauth)
This commit is contained in:
parent
dfeae0e70a
commit
979bb07c86
@ -30,6 +30,7 @@ Rclone *("rsync for cloud storage")* is a command line program to sync files and
|
||||
* DigitalOcean Spaces [:page_facing_up:](https://rclone.org/s3/#digitalocean-spaces)
|
||||
* Dreamhost [:page_facing_up:](https://rclone.org/s3/#dreamhost)
|
||||
* Dropbox [:page_facing_up:](https://rclone.org/dropbox/)
|
||||
* Enterprise File Fabric [:page_facing_up:](https://rclone.org/filefabric/)
|
||||
* FTP [:page_facing_up:](https://rclone.org/ftp/)
|
||||
* GetSky [:page_facing_up:](https://rclone.org/jottacloud/)
|
||||
* Google Cloud Storage [:page_facing_up:](https://rclone.org/googlecloudstorage/)
|
||||
|
@ -14,6 +14,7 @@ import (
|
||||
_ "github.com/rclone/rclone/backend/drive"
|
||||
_ "github.com/rclone/rclone/backend/dropbox"
|
||||
_ "github.com/rclone/rclone/backend/fichier"
|
||||
_ "github.com/rclone/rclone/backend/filefabric"
|
||||
_ "github.com/rclone/rclone/backend/ftp"
|
||||
_ "github.com/rclone/rclone/backend/googlecloudstorage"
|
||||
_ "github.com/rclone/rclone/backend/googlephotos"
|
||||
|
391
backend/filefabric/api/types.go
Normal file
391
backend/filefabric/api/types.go
Normal file
@ -0,0 +1,391 @@
|
||||
// Package api has type definitions for filefabric
|
||||
//
|
||||
// Converted from the API responses with help from https://mholt.github.io/json-to-go/
|
||||
package api
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
// TimeFormat for parameters (UTC)
|
||||
timeFormatParameters = `2006-01-02 15:04:05`
|
||||
// "2020-08-11 10:10:04" for JSON parsing
|
||||
timeFormatJSON = `"` + timeFormatParameters + `"`
|
||||
)
|
||||
|
||||
// Time represents represents date and time information for the
|
||||
// filefabric API
|
||||
type Time time.Time
|
||||
|
||||
// MarshalJSON turns a Time into JSON (in UTC)
|
||||
func (t *Time) MarshalJSON() (out []byte, err error) {
|
||||
timeString := (*time.Time)(t).UTC().Format(timeFormatJSON)
|
||||
return []byte(timeString), nil
|
||||
}
|
||||
|
||||
var zeroTime = []byte(`"0000-00-00 00:00:00"`)
|
||||
|
||||
// UnmarshalJSON turns JSON into a Time (in UTC)
|
||||
func (t *Time) UnmarshalJSON(data []byte) error {
|
||||
// Set a Zero time.Time if we receive a zero time input
|
||||
if bytes.Equal(data, zeroTime) {
|
||||
*t = Time(time.Time{})
|
||||
return nil
|
||||
}
|
||||
newT, err := time.Parse(timeFormatJSON, string(data))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*t = Time(newT)
|
||||
return nil
|
||||
}
|
||||
|
||||
// String turns a Time into a string in UTC suitable for the API
|
||||
// parameters
|
||||
func (t Time) String() string {
|
||||
return time.Time(t).UTC().Format(timeFormatParameters)
|
||||
}
|
||||
|
||||
// Status return returned in all status responses
|
||||
type Status struct {
|
||||
Code string `json:"status"`
|
||||
Message string `json:"statusmessage"`
|
||||
TaskID string `json:"taskid"`
|
||||
// Warning string `json:"warning"` // obsolete
|
||||
}
|
||||
|
||||
// Status statisfies the error interface
|
||||
func (e *Status) Error() string {
|
||||
return fmt.Sprintf("%s (%s)", e.Message, e.Code)
|
||||
}
|
||||
|
||||
// OK returns true if the status is all good
|
||||
func (e *Status) OK() bool {
|
||||
return e.Code == "ok"
|
||||
}
|
||||
|
||||
// GetCode returns the status code if any
|
||||
func (e *Status) GetCode() string {
|
||||
return e.Code
|
||||
}
|
||||
|
||||
// OKError defines an interface for items which can be OK or be an error
|
||||
type OKError interface {
|
||||
error
|
||||
OK() bool
|
||||
GetCode() string
|
||||
}
|
||||
|
||||
// Check Status satisfies the OKError interface
|
||||
var _ OKError = (*Status)(nil)
|
||||
|
||||
// EmptyResponse is response which just returns the error condition
|
||||
type EmptyResponse struct {
|
||||
Status
|
||||
}
|
||||
|
||||
// GetTokenByAuthTokenResponse is the response to getTokenByAuthToken
|
||||
type GetTokenByAuthTokenResponse struct {
|
||||
Status
|
||||
Token string `json:"token"`
|
||||
UserID string `json:"userid"`
|
||||
AllowLoginRemember string `json:"allowloginremember"`
|
||||
LastLogin Time `json:"lastlogin"`
|
||||
AutoLoginCode string `json:"autologincode"`
|
||||
}
|
||||
|
||||
// ApplianceInfo is the response to getApplianceInfo
|
||||
type ApplianceInfo struct {
|
||||
Status
|
||||
Sitetitle string `json:"sitetitle"`
|
||||
OauthLoginSupport string `json:"oauthloginsupport"`
|
||||
IsAppliance string `json:"isappliance"`
|
||||
SoftwareVersion string `json:"softwareversion"`
|
||||
SoftwareVersionLabel string `json:"softwareversionlabel"`
|
||||
}
|
||||
|
||||
// GetFolderContentsResponse is returned from getFolderContents
|
||||
type GetFolderContentsResponse struct {
|
||||
Status
|
||||
Total int `json:"total,string"`
|
||||
Items []Item `json:"filelist"`
|
||||
Folder Item `json:"folder"`
|
||||
From int `json:"from,string"`
|
||||
//Count int `json:"count"`
|
||||
Pid string `json:"pid"`
|
||||
RefreshResult Status `json:"refreshresult"`
|
||||
// Curfolder Item `json:"curfolder"` - sometimes returned as "ROOT"?
|
||||
Parents []Item `json:"parents"`
|
||||
CustomPermissions CustomPermissions `json:"custompermissions"`
|
||||
}
|
||||
|
||||
// ItemType determine whether it is a file or a folder
|
||||
type ItemType uint8
|
||||
|
||||
// Types of things in Item
|
||||
const (
|
||||
ItemTypeFile ItemType = 0
|
||||
ItemTypeFolder ItemType = 1
|
||||
)
|
||||
|
||||
// Item ia a File or a Folder
|
||||
type Item struct {
|
||||
ID string `json:"fi_id"`
|
||||
PID string `json:"fi_pid"`
|
||||
// UID string `json:"fi_uid"`
|
||||
Name string `json:"fi_name"`
|
||||
// S3Name string `json:"fi_s3name"`
|
||||
// Extension string `json:"fi_extension"`
|
||||
// Description string `json:"fi_description"`
|
||||
Type ItemType `json:"fi_type,string"`
|
||||
// Created Time `json:"fi_created"`
|
||||
Size int64 `json:"fi_size,string"`
|
||||
ContentType string `json:"fi_contenttype"`
|
||||
// Tags string `json:"fi_tags"`
|
||||
// MainCode string `json:"fi_maincode"`
|
||||
// Public int `json:"fi_public,string"`
|
||||
// Provider string `json:"fi_provider"`
|
||||
// ProviderFolder string `json:"fi_providerfolder"` // folder
|
||||
// Encrypted int `json:"fi_encrypted,string"`
|
||||
// StructType string `json:"fi_structtype"`
|
||||
// Bname string `json:"fi_bname"` // folder
|
||||
// OrgID string `json:"fi_orgid"`
|
||||
// Favorite int `json:"fi_favorite,string"`
|
||||
// IspartOf string `json:"fi_ispartof"` // folder
|
||||
Modified Time `json:"fi_modified"`
|
||||
// LastAccessed Time `json:"fi_lastaccessed"`
|
||||
// Hits int64 `json:"fi_hits,string"`
|
||||
// IP string `json:"fi_ip"` // folder
|
||||
// BigDescription string `json:"fi_bigdescription"`
|
||||
LocalTime Time `json:"fi_localtime"`
|
||||
// OrgfolderID string `json:"fi_orgfolderid"`
|
||||
// StorageIP string `json:"fi_storageip"` // folder
|
||||
// RemoteTime Time `json:"fi_remotetime"`
|
||||
// ProviderOptions string `json:"fi_provideroptions"`
|
||||
// Access string `json:"fi_access"`
|
||||
// Hidden string `json:"fi_hidden"` // folder
|
||||
// VersionOf string `json:"fi_versionof"`
|
||||
Trash bool `json:"trash"`
|
||||
// Isbucket string `json:"isbucket"` // filelist
|
||||
SubFolders int64 `json:"subfolders"` // folder
|
||||
}
|
||||
|
||||
// ItemFields is a | separated list of fields in Item
|
||||
var ItemFields = mustFields(Item{})
|
||||
|
||||
// fields returns the JSON fields in use by opt as a | separated
|
||||
// string.
|
||||
func fields(opt interface{}) (pipeTags string, err error) {
|
||||
var tags []string
|
||||
def := reflect.ValueOf(opt)
|
||||
defType := def.Type()
|
||||
for i := 0; i < def.NumField(); i++ {
|
||||
field := defType.Field(i)
|
||||
tag, ok := field.Tag.Lookup("json")
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
if comma := strings.IndexRune(tag, ','); comma >= 0 {
|
||||
tag = tag[:comma]
|
||||
}
|
||||
if tag == "" {
|
||||
continue
|
||||
}
|
||||
tags = append(tags, tag)
|
||||
}
|
||||
return strings.Join(tags, "|"), nil
|
||||
}
|
||||
|
||||
// mustFields returns the JSON fields in use by opt as a | separated
|
||||
// string. It panics on failure.
|
||||
func mustFields(opt interface{}) string {
|
||||
tags, err := fields(opt)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return tags
|
||||
}
|
||||
|
||||
// CustomPermissions is returned as part of GetFolderContentsResponse
|
||||
type CustomPermissions struct {
|
||||
Upload string `json:"upload"`
|
||||
CreateSubFolder string `json:"createsubfolder"`
|
||||
Rename string `json:"rename"`
|
||||
Delete string `json:"delete"`
|
||||
Move string `json:"move"`
|
||||
ManagePermissions string `json:"managepermissions"`
|
||||
ListOnly string `json:"listonly"`
|
||||
VisibleInTrash string `json:"visibleintrash"`
|
||||
}
|
||||
|
||||
// DoCreateNewFolderResponse is response from foCreateNewFolder
|
||||
type DoCreateNewFolderResponse struct {
|
||||
Status
|
||||
Item Item `json:"file"`
|
||||
}
|
||||
|
||||
// DoInitUploadResponse is response from doInitUpload
|
||||
type DoInitUploadResponse struct {
|
||||
Status
|
||||
ProviderID string `json:"providerid"`
|
||||
UploadCode string `json:"uploadcode"`
|
||||
FileType string `json:"filetype"`
|
||||
DirectUploadSupport string `json:"directuploadsupport"`
|
||||
ResumeAllowed string `json:"resumeallowed"`
|
||||
}
|
||||
|
||||
// UploaderResponse is returned from /cgi-bin/uploader/uploader1.cgi
|
||||
//
|
||||
// Sometimes the response is returned as XML and sometimes as JSON
|
||||
type UploaderResponse struct {
|
||||
FileSize int64 `xml:"filesize" json:"filesize,string"`
|
||||
MD5 string `xml:"md5" json:"md5"`
|
||||
Success string `xml:"success" json:"success"`
|
||||
}
|
||||
|
||||
// UploadStatus is returned from getUploadStatus
|
||||
type UploadStatus struct {
|
||||
Status
|
||||
UploadCode string `json:"uploadcode"`
|
||||
Metafile string `json:"metafile"`
|
||||
Percent int `json:"percent,string"`
|
||||
Uploaded int64 `json:"uploaded,string"`
|
||||
Size int64 `json:"size,string"`
|
||||
Filename string `json:"filename"`
|
||||
Nofile string `json:"nofile"`
|
||||
Completed string `json:"completed"`
|
||||
Completsuccess string `json:"completsuccess"`
|
||||
Completerror string `json:"completerror"`
|
||||
}
|
||||
|
||||
// DoCompleteUploadResponse is the response to doCompleteUpload
|
||||
type DoCompleteUploadResponse struct {
|
||||
Status
|
||||
UploadedSize int64 `json:"uploadedsize,string"`
|
||||
StorageIP string `json:"storageip"`
|
||||
UploadedName string `json:"uploadedname"`
|
||||
// Versioned []interface{} `json:"versioned"`
|
||||
// VersionedID int `json:"versionedid"`
|
||||
// Comment interface{} `json:"comment"`
|
||||
File Item `json:"file"`
|
||||
// UsSize string `json:"us_size"`
|
||||
// PaSize string `json:"pa_size"`
|
||||
// SpaceInfo SpaceInfo `json:"spaceinfo"`
|
||||
}
|
||||
|
||||
// Providers is returned as part of UploadResponse
|
||||
type Providers struct {
|
||||
Max string `json:"max"`
|
||||
Used string `json:"used"`
|
||||
ID string `json:"id"`
|
||||
Private string `json:"private"`
|
||||
Limit string `json:"limit"`
|
||||
Percent int `json:"percent"`
|
||||
}
|
||||
|
||||
// Total is returned as part of UploadResponse
|
||||
type Total struct {
|
||||
Max string `json:"max"`
|
||||
Used string `json:"used"`
|
||||
ID string `json:"id"`
|
||||
Priused string `json:"priused"`
|
||||
Primax string `json:"primax"`
|
||||
Limit string `json:"limit"`
|
||||
Percent int `json:"percent"`
|
||||
Pripercent int `json:"pripercent"`
|
||||
}
|
||||
|
||||
// UploadResponse is returned as part of SpaceInfo
|
||||
type UploadResponse struct {
|
||||
Providers []Providers `json:"providers"`
|
||||
Total Total `json:"total"`
|
||||
}
|
||||
|
||||
// SpaceInfo is returned as part of DoCompleteUploadResponse
|
||||
type SpaceInfo struct {
|
||||
Response UploadResponse `json:"response"`
|
||||
Status string `json:"status"`
|
||||
}
|
||||
|
||||
// DeleteResponse is returned from doDeleteFile
|
||||
type DeleteResponse struct {
|
||||
Status
|
||||
Deleted []string `json:"deleted"`
|
||||
Errors []interface{} `json:"errors"`
|
||||
ID string `json:"fi_id"`
|
||||
BackgroundTask int `json:"backgroundtask"`
|
||||
UsSize string `json:"us_size"`
|
||||
PaSize string `json:"pa_size"`
|
||||
//SpaceInfo SpaceInfo `json:"spaceinfo"`
|
||||
}
|
||||
|
||||
// FileResponse is returned from doRenameFile
|
||||
type FileResponse struct {
|
||||
Status
|
||||
Item Item `json:"file"`
|
||||
Exists string `json:"exists"`
|
||||
}
|
||||
|
||||
// MoveFilesResponse is returned from doMoveFiles
|
||||
type MoveFilesResponse struct {
|
||||
Status
|
||||
Filesleft string `json:"filesleft"`
|
||||
Addedtobackground string `json:"addedtobackground"`
|
||||
Moved string `json:"moved"`
|
||||
Item Item `json:"file"`
|
||||
IDs []string `json:"fi_ids"`
|
||||
Length int `json:"length"`
|
||||
DirID string `json:"dir_id"`
|
||||
MovedObjects []Item `json:"movedobjects"`
|
||||
// FolderTasks []interface{} `json:"foldertasks"`
|
||||
}
|
||||
|
||||
// TasksResponse is the response to getUserBackgroundTasks
|
||||
type TasksResponse struct {
|
||||
Status
|
||||
Tasks []Task `json:"tasks"`
|
||||
Total string `json:"total"`
|
||||
}
|
||||
|
||||
// BtData is part of TasksResponse
|
||||
type BtData struct {
|
||||
Callback string `json:"callback"`
|
||||
}
|
||||
|
||||
// Task describes a task returned in TasksResponse
|
||||
type Task struct {
|
||||
BtID string `json:"bt_id"`
|
||||
UsID string `json:"us_id"`
|
||||
BtType string `json:"bt_type"`
|
||||
BtData BtData `json:"bt_data"`
|
||||
BtStatustext string `json:"bt_statustext"`
|
||||
BtStatusdata string `json:"bt_statusdata"`
|
||||
BtMessage string `json:"bt_message"`
|
||||
BtProcent string `json:"bt_procent"`
|
||||
BtAdded string `json:"bt_added"`
|
||||
BtStatus string `json:"bt_status"`
|
||||
BtCompleted string `json:"bt_completed"`
|
||||
BtTitle string `json:"bt_title"`
|
||||
BtCredentials string `json:"bt_credentials"`
|
||||
BtHidden string `json:"bt_hidden"`
|
||||
BtAutoremove string `json:"bt_autoremove"`
|
||||
BtDevsite string `json:"bt_devsite"`
|
||||
BtPriority string `json:"bt_priority"`
|
||||
BtReport string `json:"bt_report"`
|
||||
BtSitemarker string `json:"bt_sitemarker"`
|
||||
BtExecuteafter string `json:"bt_executeafter"`
|
||||
BtCompletestatus string `json:"bt_completestatus"`
|
||||
BtSubtype string `json:"bt_subtype"`
|
||||
BtCanceled string `json:"bt_canceled"`
|
||||
Callback string `json:"callback"`
|
||||
CanBeCanceled bool `json:"canbecanceled"`
|
||||
CanBeRestarted bool `json:"canberestarted"`
|
||||
Type string `json:"type"`
|
||||
Status string `json:"status"`
|
||||
Settings string `json:"settings"`
|
||||
}
|
1347
backend/filefabric/filefabric.go
Normal file
1347
backend/filefabric/filefabric.go
Normal file
File diff suppressed because it is too large
Load Diff
17
backend/filefabric/filefabric_test.go
Normal file
17
backend/filefabric/filefabric_test.go
Normal file
@ -0,0 +1,17 @@
|
||||
// Test filefabric filesystem interface
|
||||
package filefabric_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/rclone/rclone/backend/filefabric"
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestFileFabric:",
|
||||
NilObject: (*filefabric.Object)(nil),
|
||||
})
|
||||
}
|
@ -36,6 +36,7 @@ docs = [
|
||||
"sharefile.md",
|
||||
"crypt.md",
|
||||
"dropbox.md",
|
||||
"filefabric.md",
|
||||
"ftp.md",
|
||||
"googlecloudstorage.md",
|
||||
"drive.md",
|
||||
|
@ -115,6 +115,7 @@ WebDAV or S3, that work out of the box.)
|
||||
{{< provider name="DigitalOcean Spaces" home="https://www.digitalocean.com/products/object-storage/" config="/s3/#digitalocean-spaces" >}}
|
||||
{{< provider name="Dreamhost" home="https://www.dreamhost.com/cloud/storage/" config="/s3/#dreamhost" >}}
|
||||
{{< provider name="Dropbox" home="https://www.dropbox.com/" config="/dropbox/" >}}
|
||||
{{< provider name="Enterprise File Fabric" home="https://storagemadeeasy.com/about/" config="/filefabric/" >}}
|
||||
{{< provider name="FTP" home="https://en.wikipedia.org/wiki/File_Transfer_Protocol" config="/ftp/" >}}
|
||||
{{< provider name="Google Cloud Storage" home="https://cloud.google.com/storage/" config="/googlecloudstorage/" >}}
|
||||
{{< provider name="Google Drive" home="https://www.google.com/drive/" config="/drive/" >}}
|
||||
|
@ -30,6 +30,7 @@ See the following for detailed instructions for
|
||||
* [Crypt](/crypt/) - to encrypt other remotes
|
||||
* [DigitalOcean Spaces](/s3/#digitalocean-spaces)
|
||||
* [Dropbox](/dropbox/)
|
||||
* [Enterprise File Fabric](/filefabric/)
|
||||
* [FTP](/ftp/)
|
||||
* [Google Cloud Storage](/googlecloudstorage/)
|
||||
* [Google Drive](/drive/)
|
||||
|
260
docs/content/filefabric.md
Normal file
260
docs/content/filefabric.md
Normal file
@ -0,0 +1,260 @@
|
||||
---
|
||||
title: "Enterprise File Fabric"
|
||||
description: "Rclone docs for the Enterprise File Fabric backend"
|
||||
---
|
||||
|
||||
{{< icon "fa fa-cloud" >}} Enterprise File Fabric
|
||||
-----------------------------------------
|
||||
|
||||
This backend supports [Storage Made Easy's Enterprise File
|
||||
Fabric™](https://storagemadeeasy.com/about/) which provides a software
|
||||
solution to integrate and unify File and Object Storage accessible
|
||||
through a global file system.
|
||||
|
||||
The initial setup for the Enterprise File Fabric backend involves
|
||||
getting a token from the the Enterprise File Fabric which you need to
|
||||
do in your browser. `rclone config` walks you through it.
|
||||
|
||||
Here is an example of how to make a remote called `remote`. First run:
|
||||
|
||||
rclone config
|
||||
|
||||
This will guide you through an interactive setup process:
|
||||
|
||||
```
|
||||
No remotes found - make a new one
|
||||
n) New remote
|
||||
s) Set configuration password
|
||||
q) Quit config
|
||||
n/s/q> n
|
||||
name> remote
|
||||
Type of storage to configure.
|
||||
Enter a string value. Press Enter for the default ("").
|
||||
Choose a number from below, or type in your own value
|
||||
[snip]
|
||||
XX / Enterprise File Fabric
|
||||
\ "filefabric"
|
||||
[snip]
|
||||
Storage> filefabric
|
||||
** See help for filefabric backend at: https://rclone.org/filefabric/ **
|
||||
|
||||
URL of the Enterprise File Fabric to connect to
|
||||
Enter a string value. Press Enter for the default ("").
|
||||
Choose a number from below, or type in your own value
|
||||
1 / Storage Made Easy US
|
||||
\ "https://storagemadeeasy.com"
|
||||
2 / Storage Made Easy EU
|
||||
\ "https://eu.storagemadeeasy.com"
|
||||
3 / Connect to your Enterprise File Fabric
|
||||
\ "https://yourfabric.smestorage.com"
|
||||
url> https://yourfabric.smestorage.com/
|
||||
ID of the root folder
|
||||
Leave blank normally.
|
||||
|
||||
Fill in to make rclone start with directory of a given ID.
|
||||
|
||||
Enter a string value. Press Enter for the default ("").
|
||||
root_folder_id>
|
||||
Permanent Authentication Token
|
||||
|
||||
A Permanent Authentication Token can be created in the Enterprise File
|
||||
Fabric, on the users Dashboard under Security, there is an entry
|
||||
you'll see called "My Authentication Tokens". Click the Manage button
|
||||
to create one.
|
||||
|
||||
These tokens are normally valid for several years.
|
||||
|
||||
For more info see: https://docs.storagemadeeasy.com/organisationcloud/api-tokens
|
||||
|
||||
Enter a string value. Press Enter for the default ("").
|
||||
permanent_token> xxxxxxxxxxxxxxx-xxxxxxxxxxxxxxxx
|
||||
Edit advanced config? (y/n)
|
||||
y) Yes
|
||||
n) No (default)
|
||||
y/n> n
|
||||
Remote config
|
||||
--------------------
|
||||
[remote]
|
||||
type = filefabric
|
||||
url = https://yourfabric.smestorage.com/
|
||||
permanent_token = xxxxxxxxxxxxxxx-xxxxxxxxxxxxxxxx
|
||||
--------------------
|
||||
y) Yes this is OK (default)
|
||||
e) Edit this remote
|
||||
d) Delete this remote
|
||||
y/e/d> y
|
||||
```
|
||||
|
||||
Once configured you can then use `rclone` like this,
|
||||
|
||||
List directories in top level of your Enterprise File Fabric
|
||||
|
||||
rclone lsd remote:
|
||||
|
||||
List all the files in your Enterprise File Fabric
|
||||
|
||||
rclone ls remote:
|
||||
|
||||
To copy a local directory to an Enterprise File Fabric directory called backup
|
||||
|
||||
rclone copy /home/source remote:backup
|
||||
|
||||
### Modified time and hashes
|
||||
|
||||
The Enterprise File Fabric allows modification times to be set on
|
||||
files accurate to 1 second. These will be used to detect whether
|
||||
objects need syncing or not.
|
||||
|
||||
The Enterprise File Fabric does not support any data hashes at this time.
|
||||
|
||||
### Restricted filename characters
|
||||
|
||||
The [default restricted characters set](/overview/#restricted-characters)
|
||||
will be replaced.
|
||||
|
||||
Invalid UTF-8 bytes will also be [replaced](/overview/#invalid-utf8),
|
||||
as they can't be used in JSON strings.
|
||||
|
||||
### Empty files
|
||||
|
||||
Empty files aren't supported by the Enterprise File Fabric. Rclone will therefore
|
||||
upload an empty file as a single space with a mime type of
|
||||
`application/vnd.rclone.empty.file` and files with that mime type are
|
||||
treated as empty.
|
||||
|
||||
### Root folder ID ###
|
||||
|
||||
You can set the `root_folder_id` for rclone. This is the directory
|
||||
(identified by its `Folder ID`) that rclone considers to be the root
|
||||
of your Enterprise File Fabric.
|
||||
|
||||
Normally you will leave this blank and rclone will determine the
|
||||
correct root to use itself.
|
||||
|
||||
However you can set this to restrict rclone to a specific folder
|
||||
hierarchy.
|
||||
|
||||
In order to do this you will have to find the `Folder ID` of the
|
||||
directory you wish rclone to display. These aren't displayed in the
|
||||
web interface, but you can use `rclone lsf` to find them, for example
|
||||
|
||||
```
|
||||
$ rclone lsf --dirs-only -Fip --csv filefabric:
|
||||
120673758,Burnt PDFs/
|
||||
120673759,My Quick Uploads/
|
||||
120673755,My Syncs/
|
||||
120673756,My backups/
|
||||
120673757,My contacts/
|
||||
120673761,S3 Storage/
|
||||
```
|
||||
|
||||
The ID for "S3 Storage" would be `120673761`.
|
||||
|
||||
{{< rem autogenerated options start" - DO NOT EDIT - instead edit fs.RegInfo in backend/filefabric/filefabric.go then run make backenddocs" >}}
|
||||
### Standard Options
|
||||
|
||||
Here are the standard options specific to filefabric (Enterprise File Fabric).
|
||||
|
||||
#### --filefabric-url
|
||||
|
||||
URL of the Enterprise File Fabric to connect to
|
||||
|
||||
- Config: url
|
||||
- Env Var: RCLONE_FILEFABRIC_URL
|
||||
- Type: string
|
||||
- Default: ""
|
||||
- Examples:
|
||||
- "https://storagemadeeasy.com"
|
||||
- Storage Made Easy US
|
||||
- "https://eu.storagemadeeasy.com"
|
||||
- Storage Made Easy EU
|
||||
- "https://yourfabric.smestorage.com"
|
||||
- Connect to your Enterprise File Fabric
|
||||
|
||||
#### --filefabric-root-folder-id
|
||||
|
||||
ID of the root folder
|
||||
Leave blank normally.
|
||||
|
||||
Fill in to make rclone start with directory of a given ID.
|
||||
|
||||
|
||||
- Config: root_folder_id
|
||||
- Env Var: RCLONE_FILEFABRIC_ROOT_FOLDER_ID
|
||||
- Type: string
|
||||
- Default: ""
|
||||
|
||||
#### --filefabric-permanent-token
|
||||
|
||||
Permanent Authentication Token
|
||||
|
||||
A Permanent Authentication Token can be created in the Enterprise File
|
||||
Fabric, on the users Dashboard under Security, there is an entry
|
||||
you'll see called "My Authentication Tokens". Click the Manage button
|
||||
to create one.
|
||||
|
||||
These tokens are normally valid for several years.
|
||||
|
||||
For more info see: https://docs.storagemadeeasy.com/organisationcloud/api-tokens
|
||||
|
||||
|
||||
- Config: permanent_token
|
||||
- Env Var: RCLONE_FILEFABRIC_PERMANENT_TOKEN
|
||||
- Type: string
|
||||
- Default: ""
|
||||
|
||||
### Advanced Options
|
||||
|
||||
Here are the advanced options specific to filefabric (Enterprise File Fabric).
|
||||
|
||||
#### --filefabric-token
|
||||
|
||||
Session Token
|
||||
|
||||
This is a session token which rclone caches in the config file. It is
|
||||
usually valid for 1 hour.
|
||||
|
||||
Don't set this value - rclone will set it automatically.
|
||||
|
||||
|
||||
- Config: token
|
||||
- Env Var: RCLONE_FILEFABRIC_TOKEN
|
||||
- Type: string
|
||||
- Default: ""
|
||||
|
||||
#### --filefabric-token-expiry
|
||||
|
||||
Token expiry time
|
||||
|
||||
Don't set this value - rclone will set it automatically.
|
||||
|
||||
|
||||
- Config: token_expiry
|
||||
- Env Var: RCLONE_FILEFABRIC_TOKEN_EXPIRY
|
||||
- Type: string
|
||||
- Default: ""
|
||||
|
||||
#### --filefabric-version
|
||||
|
||||
Version read from the file fabric
|
||||
|
||||
Don't set this value - rclone will set it automatically.
|
||||
|
||||
|
||||
- Config: version
|
||||
- Env Var: RCLONE_FILEFABRIC_VERSION
|
||||
- Type: string
|
||||
- Default: ""
|
||||
|
||||
#### --filefabric-encoding
|
||||
|
||||
This sets the encoding for the backend.
|
||||
|
||||
See: the [encoding section in the overview](/overview/#encoding) for more info.
|
||||
|
||||
- Config: encoding
|
||||
- Env Var: RCLONE_FILEFABRIC_ENCODING
|
||||
- Type: MultiEncoder
|
||||
- Default: Slash,Del,Ctl,InvalidUtf8,Dot
|
||||
|
||||
{{< rem autogenerated options stop >}}
|
@ -23,6 +23,7 @@ Here is an overview of the major features of each cloud storage system.
|
||||
| Box | SHA1 | Yes | Yes | No | - |
|
||||
| Citrix ShareFile | MD5 | Yes | Yes | No | - |
|
||||
| Dropbox | DBHASH ¹ | Yes | Yes | No | - |
|
||||
| Enterprise File Fabric | - | Yes | Yes | No | R/W |
|
||||
| FTP | - | No | No | No | - |
|
||||
| Google Cloud Storage | MD5 | Yes | No | No | R/W |
|
||||
| Google Drive | MD5 | Yes | No | Yes | R/W |
|
||||
@ -334,6 +335,7 @@ upon backend specific capabilities.
|
||||
| Box | Yes | Yes | Yes | Yes | Yes ‡‡ | No | Yes | Yes | No | Yes |
|
||||
| Citrix ShareFile | Yes | Yes | Yes | Yes | No | No | Yes | No | No | Yes |
|
||||
| Dropbox | Yes | Yes | Yes | Yes | No [#575](https://github.com/rclone/rclone/issues/575) | No | Yes | Yes | Yes | Yes |
|
||||
| Enterprise File Fabric | Yes | Yes | Yes | Yes | No | No | No | No | No | Yes |
|
||||
| FTP | No | No | Yes | Yes | No | No | Yes | No [#2178](https://github.com/rclone/rclone/issues/2178) | No | Yes |
|
||||
| Google Cloud Storage | Yes | Yes | No | No | No | Yes | Yes | No [#2178](https://github.com/rclone/rclone/issues/2178) | No | No |
|
||||
| Google Drive | Yes | Yes | Yes | Yes | Yes | Yes | Yes | Yes | Yes | Yes |
|
||||
|
@ -72,6 +72,7 @@
|
||||
<a class="dropdown-item" href="/sharefile/"><i class="fas fa-share-square"></i> Citrix ShareFile</a>
|
||||
<a class="dropdown-item" href="/crypt/"><i class="fa fa-lock"></i> Crypt (encrypts the others)</a>
|
||||
<a class="dropdown-item" href="/dropbox/"><i class="fab fa-dropbox"></i> Dropbox</a>
|
||||
<a class="dropdown-item" href="/filefabric/"><i class="fa fa-cloud"></i> Enterprise File Fabric</a>
|
||||
<a class="dropdown-item" href="/ftp/"><i class="fa fa-file"></i> FTP</a>
|
||||
<a class="dropdown-item" href="/googlecloudstorage/"><i class="fab fa-google"></i> Google Cloud Storage</a>
|
||||
<a class="dropdown-item" href="/drive/"><i class="fab fa-google"></i> Google Drive</a>
|
||||
|
@ -94,6 +94,9 @@ backends:
|
||||
- backend: "dropbox"
|
||||
remote: "TestDropbox:"
|
||||
fastlist: false
|
||||
- backend: "filefabric"
|
||||
remote: "TestFileFabric:"
|
||||
fastlist: false
|
||||
- backend: "googlecloudstorage"
|
||||
remote: "TestGoogleCloudStorage:"
|
||||
fastlist: true
|
||||
@ -291,3 +294,6 @@ backends:
|
||||
- backend: "tardigrade"
|
||||
remote: "TestTardigrade:"
|
||||
fastlist: true
|
||||
- backend: "filefabric"
|
||||
remote: "TestFileFabric:"
|
||||
fastlist: false
|
||||
|
Loading…
Reference in New Issue
Block a user