seafile: New backend for seafile server

This commit is contained in:
Fred
2020-04-25 18:55:18 +01:00
committed by Nick Craig-Wood
parent 62cfe3f384
commit c754e89906
20 changed files with 3291 additions and 0 deletions

View File

@@ -31,6 +31,7 @@ import (
_ "github.com/rclone/rclone/backend/putio"
_ "github.com/rclone/rclone/backend/qingstor"
_ "github.com/rclone/rclone/backend/s3"
_ "github.com/rclone/rclone/backend/seafile"
_ "github.com/rclone/rclone/backend/sftp"
_ "github.com/rclone/rclone/backend/sharefile"
_ "github.com/rclone/rclone/backend/sugarsync"

View File

@@ -0,0 +1,153 @@
package api
// Some api objects are duplicated with only small differences,
// it's because the returned JSON objects are very inconsistent between api calls
// AuthenticationRequest contains user credentials
type AuthenticationRequest struct {
Username string `json:"username"`
Password string `json:"password"`
}
// AuthenticationResult is returned by a call to the authentication api
type AuthenticationResult struct {
Token string `json:"token"`
Errors []string `json:"non_field_errors"`
}
// AccountInfo contains simple user properties
type AccountInfo struct {
Usage int64 `json:"usage"`
Total int64 `json:"total"`
Email string `json:"email"`
Name string `json:"name"`
}
// ServerInfo contains server information
type ServerInfo struct {
Version string `json:"version"`
}
// DefaultLibrary when none specified
type DefaultLibrary struct {
ID string `json:"repo_id"`
Exists bool `json:"exists"`
}
// CreateLibraryRequest contains the information needed to create a library
type CreateLibraryRequest struct {
Name string `json:"name"`
Description string `json:"desc"`
Password string `json:"passwd"`
}
// Library properties. Please note not all properties are going to be useful for rclone
type Library struct {
Encrypted bool `json:"encrypted"`
Owner string `json:"owner"`
ID string `json:"id"`
Size int `json:"size"`
Name string `json:"name"`
Modified int64 `json:"mtime"`
}
// CreateLibrary properties. Seafile is not consistent and returns different types for different API calls
type CreateLibrary struct {
ID string `json:"repo_id"`
Name string `json:"repo_name"`
}
// FileType is either "dir" or "file"
type FileType string
// File types
var (
FileTypeDir FileType = "dir"
FileTypeFile FileType = "file"
)
// FileDetail contains file properties (for older api v2.0)
type FileDetail struct {
ID string `json:"id"`
Type FileType `json:"type"`
Name string `json:"name"`
Size int64 `json:"size"`
Parent string `json:"parent_dir"`
Modified string `json:"last_modified"`
}
// DirEntries contains a list of DirEntry
type DirEntries struct {
Entries []DirEntry `json:"dirent_list"`
}
// DirEntry contains a directory entry
type DirEntry struct {
ID string `json:"id"`
Type FileType `json:"type"`
Name string `json:"name"`
Size int64 `json:"size"`
Path string `json:"parent_dir"`
Modified int64 `json:"mtime"`
}
// Operation is move, copy or rename
type Operation string
// Operations
var (
CopyFileOperation Operation = "copy"
MoveFileOperation Operation = "move"
RenameFileOperation Operation = "rename"
)
// FileOperationRequest is sent to the api to copy, move or rename a file
type FileOperationRequest struct {
Operation Operation `json:"operation"`
DestinationLibraryID string `json:"dst_repo"` // For copy/move operation
DestinationPath string `json:"dst_dir"` // For copy/move operation
NewName string `json:"newname"` // Only to be used by the rename operation
}
// FileInfo is returned by a server file copy/move/rename (new api v2.1)
type FileInfo struct {
Type string `json:"type"`
LibraryID string `json:"repo_id"`
Path string `json:"parent_dir"`
Name string `json:"obj_name"`
ID string `json:"obj_id"`
Size int64 `json:"size"`
}
// CreateDirRequest only contain an operation field
type CreateDirRequest struct {
Operation string `json:"operation"`
}
// DirectoryDetail contains the directory details specific to the getDirectoryDetails call
type DirectoryDetail struct {
ID string `json:"repo_id"`
Name string `json:"name"`
Path string `json:"path"`
}
// ShareLinkRequest contains the information needed to create or list shared links
type ShareLinkRequest struct {
LibraryID string `json:"repo_id"`
Path string `json:"path"`
}
// SharedLink contains the information returned by a call to shared link creation
type SharedLink struct {
Link string `json:"link"`
IsExpired bool `json:"is_expired"`
}
// BatchSourceDestRequest contains JSON parameters for sending a batch copy or move operation
type BatchSourceDestRequest struct {
SrcLibraryID string `json:"src_repo_id"`
SrcParentDir string `json:"src_parent_dir"`
SrcItems []string `json:"src_dirents"`
DstLibraryID string `json:"dst_repo_id"`
DstParentDir string `json:"dst_parent_dir"`
}

127
backend/seafile/object.go Normal file
View File

@@ -0,0 +1,127 @@
package seafile
import (
"context"
"io"
"time"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/hash"
)
// Object describes a seafile object (also commonly called a file)
type Object struct {
fs *Fs // what this object is part of
id string // internal ID of object
remote string // The remote path (full path containing library name if target at root)
pathInLibrary string // Path of the object without the library name
size int64 // size of the object
modTime time.Time // modification time of the object
libraryID string // Needed to download the file
}
// ==================== Interface fs.DirEntry ====================
// Return a string version
func (o *Object) String() string {
if o == nil {
return "<nil>"
}
return o.remote
}
// Remote returns the remote string
func (o *Object) Remote() string {
return o.remote
}
// ModTime returns last modified time
func (o *Object) ModTime(context.Context) time.Time {
return o.modTime
}
// Size returns the size of an object in bytes
func (o *Object) Size() int64 {
return o.size
}
// ==================== Interface fs.ObjectInfo ====================
// Fs returns the parent Fs
func (o *Object) Fs() fs.Info {
return o.fs
}
// Hash returns the selected checksum of the file
// If no checksum is available it returns ""
func (o *Object) Hash(ctx context.Context, ty hash.Type) (string, error) {
return "", hash.ErrUnsupported
}
// Storable says whether this object can be stored
func (o *Object) Storable() bool {
return true
}
// ==================== Interface fs.Object ====================
// SetModTime sets the metadata on the object to set the modification date
func (o *Object) SetModTime(ctx context.Context, t time.Time) error {
return fs.ErrorCantSetModTime
}
// Open opens the file for read. Call Close() on the returned io.ReadCloser
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (io.ReadCloser, error) {
downloadLink, err := o.fs.getDownloadLink(ctx, o.libraryID, o.pathInLibrary)
if err != nil {
return nil, err
}
reader, err := o.fs.download(ctx, downloadLink, o.Size(), options...)
if err != nil {
return nil, err
}
return reader, nil
}
// Update in to the object with the modTime given of the given size
//
// When called from outside a Fs by rclone, src.Size() will always be >= 0.
// But for unknown-sized objects (indicated by src.Size() == -1), Upload should either
// return an error or update the object properly (rather than e.g. calling panic).
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
// The upload sometimes return a temporary 500 error
// We cannot use the pacer to retry uploading the file as the upload link is single use only
for retry := 0; retry <= 3; retry++ {
uploadLink, err := o.fs.getUploadLink(ctx, o.libraryID)
if err != nil {
return err
}
uploaded, err := o.fs.upload(ctx, in, uploadLink, o.pathInLibrary)
if err == ErrorInternalDuringUpload {
// This is a temporary error, try again with a new upload link
continue
}
if err != nil {
return err
}
// Set the properties from the upload back to the object
o.size = uploaded.Size
o.id = uploaded.ID
return nil
}
return ErrorInternalDuringUpload
}
// Remove this object
func (o *Object) Remove(ctx context.Context) error {
return o.fs.deleteFile(ctx, o.libraryID, o.pathInLibrary)
}
// ==================== Optional Interface fs.IDer ====================
// ID returns the ID of the Object if known, or "" if not
func (o *Object) ID() string {
return o.id
}

67
backend/seafile/pacer.go Normal file
View File

@@ -0,0 +1,67 @@
package seafile
import (
"fmt"
"net/url"
"sync"
"time"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/lib/pacer"
)
const (
minSleep = 100 * time.Millisecond
maxSleep = 10 * time.Second
decayConstant = 2 // bigger for slower decay, exponential
)
// Use only one pacer per server URL
var (
pacers map[string]*fs.Pacer
pacerMutex sync.Mutex
)
func init() {
pacers = make(map[string]*fs.Pacer, 0)
}
// getPacer returns the unique pacer for that remote URL
func getPacer(remote string) *fs.Pacer {
pacerMutex.Lock()
defer pacerMutex.Unlock()
remote = parseRemote(remote)
if existing, found := pacers[remote]; found {
return existing
}
pacers[remote] = fs.NewPacer(
pacer.NewDefault(
pacer.MinSleep(minSleep),
pacer.MaxSleep(maxSleep),
pacer.DecayConstant(decayConstant),
),
)
return pacers[remote]
}
// parseRemote formats a remote url into "hostname:port"
func parseRemote(remote string) string {
remoteURL, err := url.Parse(remote)
if err != nil {
// Return a default value in the very unlikely event we're not going to parse remote
fs.Infof(nil, "Cannot parse remote %s", remote)
return "default"
}
host := remoteURL.Hostname()
port := remoteURL.Port()
if port == "" {
if remoteURL.Scheme == "https" {
port = "443"
} else {
port = "80"
}
}
return fmt.Sprintf("%s:%s", host, port)
}

1247
backend/seafile/seafile.go Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,123 @@
package seafile
import (
"path"
"testing"
"github.com/stretchr/testify/assert"
)
type pathData struct {
configLibrary string // Library specified in the config
configRoot string // Root directory specified in the config
argumentPath string // Path given as an argument in the command line
expectedLibrary string
expectedPath string
}
// Test the method to split a library name and a path
// from a mix of configuration data and path command line argument
func TestSplitPath(t *testing.T) {
testData := []pathData{
pathData{
configLibrary: "",
configRoot: "",
argumentPath: "",
expectedLibrary: "",
expectedPath: "",
},
pathData{
configLibrary: "",
configRoot: "",
argumentPath: "Library",
expectedLibrary: "Library",
expectedPath: "",
},
pathData{
configLibrary: "",
configRoot: "",
argumentPath: path.Join("Library", "path", "to", "file"),
expectedLibrary: "Library",
expectedPath: path.Join("path", "to", "file"),
},
pathData{
configLibrary: "Library",
configRoot: "",
argumentPath: "",
expectedLibrary: "Library",
expectedPath: "",
},
pathData{
configLibrary: "Library",
configRoot: "",
argumentPath: "path",
expectedLibrary: "Library",
expectedPath: "path",
},
pathData{
configLibrary: "Library",
configRoot: "",
argumentPath: path.Join("path", "to", "file"),
expectedLibrary: "Library",
expectedPath: path.Join("path", "to", "file"),
},
pathData{
configLibrary: "Library",
configRoot: "root",
argumentPath: "",
expectedLibrary: "Library",
expectedPath: "root",
},
pathData{
configLibrary: "Library",
configRoot: path.Join("root", "path"),
argumentPath: "",
expectedLibrary: "Library",
expectedPath: path.Join("root", "path"),
},
pathData{
configLibrary: "Library",
configRoot: "root",
argumentPath: "path",
expectedLibrary: "Library",
expectedPath: path.Join("root", "path"),
},
pathData{
configLibrary: "Library",
configRoot: "root",
argumentPath: path.Join("path", "to", "file"),
expectedLibrary: "Library",
expectedPath: path.Join("root", "path", "to", "file"),
},
pathData{
configLibrary: "Library",
configRoot: path.Join("root", "path"),
argumentPath: path.Join("subpath", "to", "file"),
expectedLibrary: "Library",
expectedPath: path.Join("root", "path", "subpath", "to", "file"),
},
}
for _, test := range testData {
fs := &Fs{
libraryName: test.configLibrary,
rootDirectory: test.configRoot,
}
libraryName, path := fs.splitPath(test.argumentPath)
assert.Equal(t, test.expectedLibrary, libraryName)
assert.Equal(t, test.expectedPath, path)
}
}
func TestSplitPathIntoSlice(t *testing.T) {
testData := map[string][]string{
"1": {"1"},
"/1": {"1"},
"/1/": {"1"},
"1/2/3": {"1", "2", "3"},
}
for input, expected := range testData {
output := splitPath(input)
assert.Equal(t, expected, output)
}
}

View File

@@ -0,0 +1,17 @@
// Test Seafile filesystem interface
package seafile_test
import (
"testing"
"github.com/rclone/rclone/backend/seafile"
"github.com/rclone/rclone/fstest/fstests"
)
// TestIntegration runs integration tests against the remote
func TestIntegration(t *testing.T) {
fstests.Run(t, &fstests.Opt{
RemoteName: "TestSeafile:",
NilObject: (*seafile.Object)(nil),
})
}

1083
backend/seafile/webapi.go Normal file

File diff suppressed because it is too large Load Diff