mirror of
https://github.com/rclone/rclone.git
synced 2024-11-25 09:54:44 +01:00
The memory backend
This is a bucket based remote
This commit is contained in:
parent
277d94feac
commit
c789436580
@ -46,6 +46,7 @@ Rclone *("rsync for cloud storage")* is a command line program to sync files and
|
|||||||
* Mail.ru Cloud [:page_facing_up:](https://rclone.org/mailru/)
|
* Mail.ru Cloud [:page_facing_up:](https://rclone.org/mailru/)
|
||||||
* Memset Memstore [:page_facing_up:](https://rclone.org/swift/)
|
* Memset Memstore [:page_facing_up:](https://rclone.org/swift/)
|
||||||
* Mega [:page_facing_up:](https://rclone.org/mega/)
|
* Mega [:page_facing_up:](https://rclone.org/mega/)
|
||||||
|
* Memory [:page_facing_up:](https://rclone.org/memory/)
|
||||||
* Microsoft Azure Blob Storage [:page_facing_up:](https://rclone.org/azureblob/)
|
* Microsoft Azure Blob Storage [:page_facing_up:](https://rclone.org/azureblob/)
|
||||||
* Microsoft OneDrive [:page_facing_up:](https://rclone.org/onedrive/)
|
* Microsoft OneDrive [:page_facing_up:](https://rclone.org/onedrive/)
|
||||||
* Minio [:page_facing_up:](https://rclone.org/s3/#minio)
|
* Minio [:page_facing_up:](https://rclone.org/s3/#minio)
|
||||||
|
@ -23,6 +23,7 @@ import (
|
|||||||
_ "github.com/rclone/rclone/backend/local"
|
_ "github.com/rclone/rclone/backend/local"
|
||||||
_ "github.com/rclone/rclone/backend/mailru"
|
_ "github.com/rclone/rclone/backend/mailru"
|
||||||
_ "github.com/rclone/rclone/backend/mega"
|
_ "github.com/rclone/rclone/backend/mega"
|
||||||
|
_ "github.com/rclone/rclone/backend/memory"
|
||||||
_ "github.com/rclone/rclone/backend/onedrive"
|
_ "github.com/rclone/rclone/backend/onedrive"
|
||||||
_ "github.com/rclone/rclone/backend/opendrive"
|
_ "github.com/rclone/rclone/backend/opendrive"
|
||||||
_ "github.com/rclone/rclone/backend/pcloud"
|
_ "github.com/rclone/rclone/backend/pcloud"
|
||||||
|
624
backend/memory/memory.go
Normal file
624
backend/memory/memory.go
Normal file
@ -0,0 +1,624 @@
|
|||||||
|
// Package memory provides an interface to an in memory object storage system
|
||||||
|
package memory
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"context"
|
||||||
|
"crypto/md5"
|
||||||
|
"encoding/hex"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"io/ioutil"
|
||||||
|
"path"
|
||||||
|
"strings"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
"github.com/rclone/rclone/fs"
|
||||||
|
"github.com/rclone/rclone/fs/config/configmap"
|
||||||
|
"github.com/rclone/rclone/fs/config/configstruct"
|
||||||
|
"github.com/rclone/rclone/fs/hash"
|
||||||
|
"github.com/rclone/rclone/fs/walk"
|
||||||
|
"github.com/rclone/rclone/lib/bucket"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
hashType = hash.MD5
|
||||||
|
// the object storage is persistent
|
||||||
|
buckets = newBucketsInfo()
|
||||||
|
)
|
||||||
|
|
||||||
|
// Register with Fs
|
||||||
|
func init() {
|
||||||
|
fs.Register(&fs.RegInfo{
|
||||||
|
Name: "memory",
|
||||||
|
Description: "In memory object storage system.",
|
||||||
|
NewFs: NewFs,
|
||||||
|
Options: []fs.Option{},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// Options defines the configuration for this backend
|
||||||
|
type Options struct {
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fs represents a remote memory server
|
||||||
|
type Fs struct {
|
||||||
|
name string // name of this remote
|
||||||
|
root string // the path we are working on if any
|
||||||
|
opt Options // parsed config options
|
||||||
|
rootBucket string // bucket part of root (if any)
|
||||||
|
rootDirectory string // directory part of root (if any)
|
||||||
|
features *fs.Features // optional features
|
||||||
|
}
|
||||||
|
|
||||||
|
// bucketsInfo holds info about all the buckets
|
||||||
|
type bucketsInfo struct {
|
||||||
|
mu sync.RWMutex
|
||||||
|
buckets map[string]*bucketInfo
|
||||||
|
}
|
||||||
|
|
||||||
|
func newBucketsInfo() *bucketsInfo {
|
||||||
|
return &bucketsInfo{
|
||||||
|
buckets: make(map[string]*bucketInfo, 16),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// getBucket gets a names bucket or nil
|
||||||
|
func (bi *bucketsInfo) getBucket(name string) (b *bucketInfo) {
|
||||||
|
bi.mu.RLock()
|
||||||
|
b = bi.buckets[name]
|
||||||
|
bi.mu.RUnlock()
|
||||||
|
return b
|
||||||
|
}
|
||||||
|
|
||||||
|
// makeBucket returns the bucket or makes it
|
||||||
|
func (bi *bucketsInfo) makeBucket(name string) (b *bucketInfo) {
|
||||||
|
bi.mu.Lock()
|
||||||
|
defer bi.mu.Unlock()
|
||||||
|
b = bi.buckets[name]
|
||||||
|
if b != nil {
|
||||||
|
return b
|
||||||
|
}
|
||||||
|
b = newBucketInfo()
|
||||||
|
bi.buckets[name] = b
|
||||||
|
return b
|
||||||
|
}
|
||||||
|
|
||||||
|
// deleteBucket deleted the bucket or returns an error
|
||||||
|
func (bi *bucketsInfo) deleteBucket(name string) error {
|
||||||
|
bi.mu.Lock()
|
||||||
|
defer bi.mu.Unlock()
|
||||||
|
b := bi.buckets[name]
|
||||||
|
if b == nil {
|
||||||
|
return fs.ErrorDirNotFound
|
||||||
|
}
|
||||||
|
if !b.isEmpty() {
|
||||||
|
return fs.ErrorDirectoryNotEmpty
|
||||||
|
}
|
||||||
|
delete(bi.buckets, name)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// getObjectData gets an object from (bucketName, bucketPath) or nil
|
||||||
|
func (bi *bucketsInfo) getObjectData(bucketName, bucketPath string) (od *objectData) {
|
||||||
|
b := bi.getBucket(bucketName)
|
||||||
|
if b == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return b.getObjectData(bucketPath)
|
||||||
|
}
|
||||||
|
|
||||||
|
// updateObjectData updates an object from (bucketName, bucketPath)
|
||||||
|
func (bi *bucketsInfo) updateObjectData(bucketName, bucketPath string, od *objectData) {
|
||||||
|
b := bi.makeBucket(bucketName)
|
||||||
|
b.mu.Lock()
|
||||||
|
b.objects[bucketPath] = od
|
||||||
|
b.mu.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
// removeObjectData removes an object from (bucketName, bucketPath) returning true if removed
|
||||||
|
func (bi *bucketsInfo) removeObjectData(bucketName, bucketPath string) (removed bool) {
|
||||||
|
b := bi.getBucket(bucketName)
|
||||||
|
if b != nil {
|
||||||
|
b.mu.Lock()
|
||||||
|
od := b.objects[bucketPath]
|
||||||
|
if od != nil {
|
||||||
|
delete(b.objects, bucketPath)
|
||||||
|
removed = true
|
||||||
|
}
|
||||||
|
b.mu.Unlock()
|
||||||
|
}
|
||||||
|
return removed
|
||||||
|
}
|
||||||
|
|
||||||
|
// bucketInfo holds info about a single bucket
|
||||||
|
type bucketInfo struct {
|
||||||
|
mu sync.RWMutex
|
||||||
|
objects map[string]*objectData
|
||||||
|
}
|
||||||
|
|
||||||
|
func newBucketInfo() *bucketInfo {
|
||||||
|
return &bucketInfo{
|
||||||
|
objects: make(map[string]*objectData, 16),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// getBucket gets a names bucket or nil
|
||||||
|
func (bi *bucketInfo) getObjectData(name string) (od *objectData) {
|
||||||
|
bi.mu.RLock()
|
||||||
|
od = bi.objects[name]
|
||||||
|
bi.mu.RUnlock()
|
||||||
|
return od
|
||||||
|
}
|
||||||
|
|
||||||
|
// getBucket gets a names bucket or nil
|
||||||
|
func (bi *bucketInfo) isEmpty() (empty bool) {
|
||||||
|
bi.mu.RLock()
|
||||||
|
empty = len(bi.objects) == 0
|
||||||
|
bi.mu.RUnlock()
|
||||||
|
return empty
|
||||||
|
}
|
||||||
|
|
||||||
|
// the object data and metadata
|
||||||
|
type objectData struct {
|
||||||
|
modTime time.Time
|
||||||
|
hash string
|
||||||
|
mimeType string
|
||||||
|
data []byte
|
||||||
|
}
|
||||||
|
|
||||||
|
// Object describes a memory object
|
||||||
|
type Object struct {
|
||||||
|
fs *Fs // what this object is part of
|
||||||
|
remote string // The remote path
|
||||||
|
od *objectData // the object data
|
||||||
|
}
|
||||||
|
|
||||||
|
// ------------------------------------------------------------
|
||||||
|
|
||||||
|
// Name of the remote (as passed into NewFs)
|
||||||
|
func (f *Fs) Name() string {
|
||||||
|
return f.name
|
||||||
|
}
|
||||||
|
|
||||||
|
// Root of the remote (as passed into NewFs)
|
||||||
|
func (f *Fs) Root() string {
|
||||||
|
return f.root
|
||||||
|
}
|
||||||
|
|
||||||
|
// String converts this Fs to a string
|
||||||
|
func (f *Fs) String() string {
|
||||||
|
return fmt.Sprintf("Memory root '%s'", f.root)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Features returns the optional features of this Fs
|
||||||
|
func (f *Fs) Features() *fs.Features {
|
||||||
|
return f.features
|
||||||
|
}
|
||||||
|
|
||||||
|
// parsePath parses a remote 'url'
|
||||||
|
func parsePath(path string) (root string) {
|
||||||
|
root = strings.Trim(path, "/")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// split returns bucket and bucketPath from the rootRelativePath
|
||||||
|
// relative to f.root
|
||||||
|
func (f *Fs) split(rootRelativePath string) (bucketName, bucketPath string) {
|
||||||
|
return bucket.Split(path.Join(f.root, rootRelativePath))
|
||||||
|
}
|
||||||
|
|
||||||
|
// split returns bucket and bucketPath from the object
|
||||||
|
func (o *Object) split() (bucket, bucketPath string) {
|
||||||
|
return o.fs.split(o.remote)
|
||||||
|
}
|
||||||
|
|
||||||
|
// setRoot changes the root of the Fs
|
||||||
|
func (f *Fs) setRoot(root string) {
|
||||||
|
f.root = parsePath(root)
|
||||||
|
f.rootBucket, f.rootDirectory = bucket.Split(f.root)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewFs contstructs an Fs from the path, bucket:path
|
||||||
|
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||||
|
// Parse config into Options struct
|
||||||
|
opt := new(Options)
|
||||||
|
err := configstruct.Set(m, opt)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
root = strings.Trim(root, "/")
|
||||||
|
f := &Fs{
|
||||||
|
name: name,
|
||||||
|
root: root,
|
||||||
|
opt: *opt,
|
||||||
|
}
|
||||||
|
f.setRoot(root)
|
||||||
|
f.features = (&fs.Features{
|
||||||
|
ReadMimeType: true,
|
||||||
|
WriteMimeType: true,
|
||||||
|
BucketBased: true,
|
||||||
|
BucketBasedRootOK: true,
|
||||||
|
}).Fill(f)
|
||||||
|
if f.rootBucket != "" && f.rootDirectory != "" {
|
||||||
|
od := buckets.getObjectData(f.rootBucket, f.rootDirectory)
|
||||||
|
if od != nil {
|
||||||
|
newRoot := path.Dir(f.root)
|
||||||
|
if newRoot == "." {
|
||||||
|
newRoot = ""
|
||||||
|
}
|
||||||
|
f.setRoot(newRoot)
|
||||||
|
// return an error with an fs which points to the parent
|
||||||
|
err = fs.ErrorIsFile
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return f, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// newObject makes an object from a remote and an objectData
|
||||||
|
func (f *Fs) newObject(remote string, od *objectData) *Object {
|
||||||
|
return &Object{fs: f, remote: remote, od: od}
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewObject finds the Object at remote. If it can't be found
|
||||||
|
// it returns the error fs.ErrorObjectNotFound.
|
||||||
|
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||||
|
bucket, bucketPath := f.split(remote)
|
||||||
|
od := buckets.getObjectData(bucket, bucketPath)
|
||||||
|
if od == nil {
|
||||||
|
return nil, fs.ErrorObjectNotFound
|
||||||
|
}
|
||||||
|
return f.newObject(remote, od), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// listFn is called from list to handle an object.
|
||||||
|
type listFn func(remote string, entry fs.DirEntry, isDirectory bool) error
|
||||||
|
|
||||||
|
// list the buckets to fn
|
||||||
|
func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBucket bool, recurse bool, fn listFn) (err error) {
|
||||||
|
if prefix != "" {
|
||||||
|
prefix += "/"
|
||||||
|
}
|
||||||
|
if directory != "" {
|
||||||
|
directory += "/"
|
||||||
|
}
|
||||||
|
b := buckets.getBucket(bucket)
|
||||||
|
if b == nil {
|
||||||
|
return fs.ErrorDirNotFound
|
||||||
|
}
|
||||||
|
b.mu.RLock()
|
||||||
|
defer b.mu.RUnlock()
|
||||||
|
dirs := make(map[string]struct{})
|
||||||
|
for absPath, od := range b.objects {
|
||||||
|
if strings.HasPrefix(absPath, directory) {
|
||||||
|
remote := absPath[len(prefix):]
|
||||||
|
if !recurse {
|
||||||
|
localPath := absPath[len(directory):]
|
||||||
|
slash := strings.IndexRune(localPath, '/')
|
||||||
|
if slash >= 0 {
|
||||||
|
// send a directory if have a slash
|
||||||
|
dir := directory + localPath[:slash]
|
||||||
|
if addBucket {
|
||||||
|
dir = path.Join(bucket, dir)
|
||||||
|
}
|
||||||
|
_, found := dirs[dir]
|
||||||
|
if !found {
|
||||||
|
err = fn(dir, fs.NewDir(dir, time.Time{}), true)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
dirs[dir] = struct{}{}
|
||||||
|
}
|
||||||
|
continue // don't send this file if not recursing
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// send an object
|
||||||
|
if addBucket {
|
||||||
|
remote = path.Join(bucket, remote)
|
||||||
|
}
|
||||||
|
err = fn(remote, f.newObject(remote, od), false)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// listDir lists the bucket to the entries
|
||||||
|
func (f *Fs) listDir(ctx context.Context, bucket, directory, prefix string, addBucket bool) (entries fs.DirEntries, err error) {
|
||||||
|
// List the objects and directories
|
||||||
|
err = f.list(ctx, bucket, directory, prefix, addBucket, false, func(remote string, entry fs.DirEntry, isDirectory bool) error {
|
||||||
|
entries = append(entries, entry)
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
return entries, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// listBuckets lists the buckets to entries
|
||||||
|
func (f *Fs) listBuckets(ctx context.Context) (entries fs.DirEntries, err error) {
|
||||||
|
buckets.mu.RLock()
|
||||||
|
defer buckets.mu.RUnlock()
|
||||||
|
for name := range buckets.buckets {
|
||||||
|
entries = append(entries, fs.NewDir(name, time.Time{}))
|
||||||
|
}
|
||||||
|
return entries, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// List the objects and directories in dir into entries. The
|
||||||
|
// entries can be returned in any order but should be for a
|
||||||
|
// complete directory.
|
||||||
|
//
|
||||||
|
// dir should be "" to list the root, and should not have
|
||||||
|
// trailing slashes.
|
||||||
|
//
|
||||||
|
// This should return ErrDirNotFound if the directory isn't
|
||||||
|
// found.
|
||||||
|
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||||
|
// defer fslog.Trace(dir, "")("entries = %q, err = %v", &entries, &err)
|
||||||
|
bucket, directory := f.split(dir)
|
||||||
|
if bucket == "" {
|
||||||
|
if directory != "" {
|
||||||
|
return nil, fs.ErrorListBucketRequired
|
||||||
|
}
|
||||||
|
return f.listBuckets(ctx)
|
||||||
|
}
|
||||||
|
return f.listDir(ctx, bucket, directory, f.rootDirectory, f.rootBucket == "")
|
||||||
|
}
|
||||||
|
|
||||||
|
// ListR lists the objects and directories of the Fs starting
|
||||||
|
// from dir recursively into out.
|
||||||
|
//
|
||||||
|
// dir should be "" to start from the root, and should not
|
||||||
|
// have trailing slashes.
|
||||||
|
//
|
||||||
|
// This should return ErrDirNotFound if the directory isn't
|
||||||
|
// found.
|
||||||
|
//
|
||||||
|
// It should call callback for each tranche of entries read.
|
||||||
|
// These need not be returned in any particular order. If
|
||||||
|
// callback returns an error then the listing will stop
|
||||||
|
// immediately.
|
||||||
|
//
|
||||||
|
// Don't implement this unless you have a more efficient way
|
||||||
|
// of listing recursively that doing a directory traversal.
|
||||||
|
func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) {
|
||||||
|
bucket, directory := f.split(dir)
|
||||||
|
list := walk.NewListRHelper(callback)
|
||||||
|
listR := func(bucket, directory, prefix string, addBucket bool) error {
|
||||||
|
return f.list(ctx, bucket, directory, prefix, addBucket, true, func(remote string, entry fs.DirEntry, isDirectory bool) error {
|
||||||
|
return list.Add(entry)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
if bucket == "" {
|
||||||
|
entries, err := f.listBuckets(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
for _, entry := range entries {
|
||||||
|
err = list.Add(entry)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
bucket := entry.Remote()
|
||||||
|
err = listR(bucket, "", f.rootDirectory, true)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
err = listR(bucket, directory, f.rootDirectory, f.rootBucket == "")
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return list.Flush()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Put the object into the bucket
|
||||||
|
//
|
||||||
|
// Copy the reader in to the new object which is returned
|
||||||
|
//
|
||||||
|
// The new object may have been created if an error is returned
|
||||||
|
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||||
|
// Temporary Object under construction
|
||||||
|
fs := &Object{
|
||||||
|
fs: f,
|
||||||
|
remote: src.Remote(),
|
||||||
|
od: &objectData{
|
||||||
|
modTime: src.ModTime(ctx),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
return fs, fs.Update(ctx, in, src, options...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// PutStream uploads to the remote path with the modTime given of indeterminate size
|
||||||
|
func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||||
|
return f.Put(ctx, in, src, options...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Mkdir creates the bucket if it doesn't exist
|
||||||
|
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
||||||
|
bucket, _ := f.split(dir)
|
||||||
|
buckets.makeBucket(bucket)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Rmdir deletes the bucket if the fs is at the root
|
||||||
|
//
|
||||||
|
// Returns an error if it isn't empty
|
||||||
|
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
||||||
|
bucket, directory := f.split(dir)
|
||||||
|
if bucket == "" || directory != "" {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return buckets.deleteBucket(bucket)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Precision of the remote
|
||||||
|
func (f *Fs) Precision() time.Duration {
|
||||||
|
return time.Nanosecond
|
||||||
|
}
|
||||||
|
|
||||||
|
// Copy src to this remote using server side copy operations.
|
||||||
|
//
|
||||||
|
// This is stored with the remote path given
|
||||||
|
//
|
||||||
|
// It returns the destination Object and a possible error
|
||||||
|
//
|
||||||
|
// Will only be called if src.Fs().Name() == f.Name()
|
||||||
|
//
|
||||||
|
// If it isn't possible then return fs.ErrorCantCopy
|
||||||
|
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
||||||
|
dstBucket, dstPath := f.split(remote)
|
||||||
|
_ = buckets.makeBucket(dstBucket)
|
||||||
|
srcObj, ok := src.(*Object)
|
||||||
|
if !ok {
|
||||||
|
fs.Debugf(src, "Can't copy - not same remote type")
|
||||||
|
return nil, fs.ErrorCantCopy
|
||||||
|
}
|
||||||
|
srcBucket, srcPath := srcObj.split()
|
||||||
|
od := buckets.getObjectData(srcBucket, srcPath)
|
||||||
|
if od == nil {
|
||||||
|
return nil, fs.ErrorObjectNotFound
|
||||||
|
}
|
||||||
|
buckets.updateObjectData(dstBucket, dstPath, od)
|
||||||
|
return f.NewObject(ctx, remote)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Hashes returns the supported hash sets.
|
||||||
|
func (f *Fs) Hashes() hash.Set {
|
||||||
|
return hash.Set(hashType)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ------------------------------------------------------------
|
||||||
|
|
||||||
|
// Fs returns the parent Fs
|
||||||
|
func (o *Object) Fs() fs.Info {
|
||||||
|
return o.fs
|
||||||
|
}
|
||||||
|
|
||||||
|
// Return a string version
|
||||||
|
func (o *Object) String() string {
|
||||||
|
if o == nil {
|
||||||
|
return "<nil>"
|
||||||
|
}
|
||||||
|
return o.Remote()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Remote returns the remote path
|
||||||
|
func (o *Object) Remote() string {
|
||||||
|
return o.remote
|
||||||
|
}
|
||||||
|
|
||||||
|
// Hash returns the hash of an object returning a lowercase hex string
|
||||||
|
func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
|
||||||
|
if t != hashType {
|
||||||
|
return "", hash.ErrUnsupported
|
||||||
|
}
|
||||||
|
if o.od.hash == "" {
|
||||||
|
sum := md5.Sum(o.od.data)
|
||||||
|
o.od.hash = hex.EncodeToString(sum[:])
|
||||||
|
}
|
||||||
|
return o.od.hash, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Size returns the size of an object in bytes
|
||||||
|
func (o *Object) Size() int64 {
|
||||||
|
return int64(len(o.od.data))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ModTime returns the modification time of the object
|
||||||
|
//
|
||||||
|
// It attempts to read the objects mtime and if that isn't present the
|
||||||
|
// LastModified returned in the http headers
|
||||||
|
//
|
||||||
|
// SHA-1 will also be updated once the request has completed.
|
||||||
|
func (o *Object) ModTime(ctx context.Context) (result time.Time) {
|
||||||
|
return o.od.modTime
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetModTime sets the modification time of the local fs object
|
||||||
|
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
|
||||||
|
o.od.modTime = modTime
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Storable returns if this object is storable
|
||||||
|
func (o *Object) Storable() bool {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// Open an object for read
|
||||||
|
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||||
|
var offset, limit int64 = 0, -1
|
||||||
|
for _, option := range options {
|
||||||
|
switch x := option.(type) {
|
||||||
|
case *fs.RangeOption:
|
||||||
|
offset, limit = x.Decode(int64(len(o.od.data)))
|
||||||
|
case *fs.SeekOption:
|
||||||
|
offset = x.Offset
|
||||||
|
default:
|
||||||
|
if option.Mandatory() {
|
||||||
|
fs.Logf(o, "Unsupported mandatory option: %v", option)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if offset > int64(len(o.od.data)) {
|
||||||
|
offset = int64(len(o.od.data))
|
||||||
|
}
|
||||||
|
data := o.od.data[offset:]
|
||||||
|
if limit >= 0 {
|
||||||
|
if limit > int64(len(data)) {
|
||||||
|
limit = int64(len(data))
|
||||||
|
}
|
||||||
|
data = data[:limit]
|
||||||
|
}
|
||||||
|
return ioutil.NopCloser(bytes.NewBuffer(data)), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Update the object with the contents of the io.Reader, modTime and size
|
||||||
|
//
|
||||||
|
// The new object may have been created if an error is returned
|
||||||
|
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
|
||||||
|
bucket, bucketPath := o.split()
|
||||||
|
data, err := ioutil.ReadAll(in)
|
||||||
|
if err != nil {
|
||||||
|
return errors.Wrap(err, "failed to update memory object")
|
||||||
|
}
|
||||||
|
o.od = &objectData{
|
||||||
|
data: data,
|
||||||
|
hash: "",
|
||||||
|
modTime: src.ModTime(ctx),
|
||||||
|
mimeType: fs.MimeType(ctx, o),
|
||||||
|
}
|
||||||
|
buckets.updateObjectData(bucket, bucketPath, o.od)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Remove an object
|
||||||
|
func (o *Object) Remove(ctx context.Context) error {
|
||||||
|
bucket, bucketPath := o.split()
|
||||||
|
removed := buckets.removeObjectData(bucket, bucketPath)
|
||||||
|
if !removed {
|
||||||
|
return fs.ErrorObjectNotFound
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// MimeType of an Object if known, "" otherwise
|
||||||
|
func (o *Object) MimeType(ctx context.Context) string {
|
||||||
|
return o.od.mimeType
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check the interfaces are satisfied
|
||||||
|
var (
|
||||||
|
_ fs.Fs = &Fs{}
|
||||||
|
_ fs.Copier = &Fs{}
|
||||||
|
_ fs.PutStreamer = &Fs{}
|
||||||
|
_ fs.ListRer = &Fs{}
|
||||||
|
_ fs.Object = &Object{}
|
||||||
|
_ fs.MimeTyper = &Object{}
|
||||||
|
)
|
16
backend/memory/memory_test.go
Normal file
16
backend/memory/memory_test.go
Normal file
@ -0,0 +1,16 @@
|
|||||||
|
// Test memory filesystem interface
|
||||||
|
package memory
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/rclone/rclone/fstest/fstests"
|
||||||
|
)
|
||||||
|
|
||||||
|
// TestIntegration runs integration tests against the remote
|
||||||
|
func TestIntegration(t *testing.T) {
|
||||||
|
fstests.Run(t, &fstests.Opt{
|
||||||
|
RemoteName: ":memory:",
|
||||||
|
NilObject: (*Object)(nil),
|
||||||
|
})
|
||||||
|
}
|
@ -45,6 +45,7 @@ docs = [
|
|||||||
"koofr.md",
|
"koofr.md",
|
||||||
"mailru.md",
|
"mailru.md",
|
||||||
"mega.md",
|
"mega.md",
|
||||||
|
"memory.md",
|
||||||
"azureblob.md",
|
"azureblob.md",
|
||||||
"onedrive.md",
|
"onedrive.md",
|
||||||
"opendrive.md",
|
"opendrive.md",
|
||||||
|
@ -34,6 +34,7 @@ Rclone is a command line program to sync files and directories to and from:
|
|||||||
* {{< provider name="Mail.ru Cloud" home="https://cloud.mail.ru/" config="/mailru/" >}}
|
* {{< provider name="Mail.ru Cloud" home="https://cloud.mail.ru/" config="/mailru/" >}}
|
||||||
* {{< provider name="Memset Memstore" home="https://www.memset.com/cloud/storage/" config="/swift/" >}}
|
* {{< provider name="Memset Memstore" home="https://www.memset.com/cloud/storage/" config="/swift/" >}}
|
||||||
* {{< provider name="Mega" home="https://mega.nz/" config="/mega/" >}}
|
* {{< provider name="Mega" home="https://mega.nz/" config="/mega/" >}}
|
||||||
|
* {{< provider name="Memory" home="/memory/" config="/memory/" >}}
|
||||||
* {{< provider name="Microsoft Azure Blob Storage" home="https://azure.microsoft.com/en-us/services/storage/blobs/" config="/azureblob/" >}}
|
* {{< provider name="Microsoft Azure Blob Storage" home="https://azure.microsoft.com/en-us/services/storage/blobs/" config="/azureblob/" >}}
|
||||||
* {{< provider name="Microsoft OneDrive" home="https://onedrive.live.com/" config="/onedrive/" >}}
|
* {{< provider name="Microsoft OneDrive" home="https://onedrive.live.com/" config="/onedrive/" >}}
|
||||||
* {{< provider name="Minio" home="https://www.minio.io/" config="/s3/#minio" >}}
|
* {{< provider name="Minio" home="https://www.minio.io/" config="/s3/#minio" >}}
|
||||||
|
@ -41,6 +41,7 @@ See the following for detailed instructions for
|
|||||||
* [Koofr](/koofr/)
|
* [Koofr](/koofr/)
|
||||||
* [Mail.ru Cloud](/mailru/)
|
* [Mail.ru Cloud](/mailru/)
|
||||||
* [Mega](/mega/)
|
* [Mega](/mega/)
|
||||||
|
* [Memory](/memory/)
|
||||||
* [Microsoft Azure Blob Storage](/azureblob/)
|
* [Microsoft Azure Blob Storage](/azureblob/)
|
||||||
* [Microsoft OneDrive](/onedrive/)
|
* [Microsoft OneDrive](/onedrive/)
|
||||||
* [Openstack Swift / Rackspace Cloudfiles / Memset Memstore](/swift/)
|
* [Openstack Swift / Rackspace Cloudfiles / Memset Memstore](/swift/)
|
||||||
|
66
docs/content/memory.md
Normal file
66
docs/content/memory.md
Normal file
@ -0,0 +1,66 @@
|
|||||||
|
---
|
||||||
|
title: "Memory"
|
||||||
|
description: "Rclone docs for Memory backend"
|
||||||
|
date: "2020-01-18"
|
||||||
|
---
|
||||||
|
|
||||||
|
<i class="fas fa-memory"></i> Memory
|
||||||
|
-----------------------------------------
|
||||||
|
|
||||||
|
The memory backend is an in RAM backend. It does not persist its
|
||||||
|
data - use the local backend for that.
|
||||||
|
|
||||||
|
The memory backend behaves like a bucket based remote (eg like
|
||||||
|
s3). Because it has no parameters you can just use it with the
|
||||||
|
`:memory:` remote name.
|
||||||
|
|
||||||
|
You can configure it as a remote like this with `rclone config` too if
|
||||||
|
you want to:
|
||||||
|
|
||||||
|
```
|
||||||
|
No remotes found - make a new one
|
||||||
|
n) New remote
|
||||||
|
s) Set configuration password
|
||||||
|
q) Quit config
|
||||||
|
n/s/q> n
|
||||||
|
name> remote
|
||||||
|
Type of storage to configure.
|
||||||
|
Enter a string value. Press Enter for the default ("").
|
||||||
|
Choose a number from below, or type in your own value
|
||||||
|
[snip]
|
||||||
|
XX / Memory
|
||||||
|
\ "memory"
|
||||||
|
[snip]
|
||||||
|
Storage> memory
|
||||||
|
** See help for memory backend at: https://rclone.org/memory/ **
|
||||||
|
|
||||||
|
Remote config
|
||||||
|
|
||||||
|
--------------------
|
||||||
|
[remote]
|
||||||
|
type = memory
|
||||||
|
--------------------
|
||||||
|
y) Yes this is OK (default)
|
||||||
|
e) Edit this remote
|
||||||
|
d) Delete this remote
|
||||||
|
y/e/d> y
|
||||||
|
```
|
||||||
|
|
||||||
|
Because the memory backend isn't persistent it is most useful for
|
||||||
|
testing or with an rclone server or rclone mount, eg
|
||||||
|
|
||||||
|
rclone mount :memory: /mnt/tmp
|
||||||
|
rclone serve webdav :memory:
|
||||||
|
rclone serve sftp :memory:
|
||||||
|
|
||||||
|
### Modified time and hashes ###
|
||||||
|
|
||||||
|
The memory backend supports MD5 hashes and modification times accurate to 1 nS.
|
||||||
|
|
||||||
|
#### Restricted filename characters
|
||||||
|
|
||||||
|
The memory backend replaces the [default restricted characters
|
||||||
|
set](/overview/#restricted-characters).
|
||||||
|
|
||||||
|
<!--- autogenerated options start - DO NOT EDIT, instead edit fs.RegInfo in backend/memory/memory.go then run make backenddocs -->
|
||||||
|
<!--- autogenerated options stop -->
|
@ -34,6 +34,7 @@ Here is an overview of the major features of each cloud storage system.
|
|||||||
| Koofr | MD5 | No | Yes | No | - |
|
| Koofr | MD5 | No | Yes | No | - |
|
||||||
| Mail.ru Cloud | Mailru ‡‡‡ | Yes | Yes | No | - |
|
| Mail.ru Cloud | Mailru ‡‡‡ | Yes | Yes | No | - |
|
||||||
| Mega | - | No | No | Yes | - |
|
| Mega | - | No | No | Yes | - |
|
||||||
|
| Memory | MD5 | Yes | No | No | - |
|
||||||
| Microsoft Azure Blob Storage | MD5 | Yes | No | No | R/W |
|
| Microsoft Azure Blob Storage | MD5 | Yes | No | No | R/W |
|
||||||
| Microsoft OneDrive | SHA1 ‡‡ | Yes | Yes | No | R |
|
| Microsoft OneDrive | SHA1 ‡‡ | Yes | Yes | No | R |
|
||||||
| OpenDrive | MD5 | Yes | Yes | No | - |
|
| OpenDrive | MD5 | Yes | Yes | No | - |
|
||||||
@ -332,6 +333,7 @@ operations more efficient.
|
|||||||
| Jottacloud | Yes | Yes | Yes | Yes | No | Yes | No | Yes | Yes | Yes |
|
| Jottacloud | Yes | Yes | Yes | Yes | No | Yes | No | Yes | Yes | Yes |
|
||||||
| Mail.ru Cloud | Yes | Yes | Yes | Yes | Yes | No | No | Yes | Yes | Yes |
|
| Mail.ru Cloud | Yes | Yes | Yes | Yes | Yes | No | No | Yes | Yes | Yes |
|
||||||
| Mega | Yes | No | Yes | Yes | Yes | No | No | No [#2178](https://github.com/rclone/rclone/issues/2178) | Yes | Yes |
|
| Mega | Yes | No | Yes | Yes | Yes | No | No | No [#2178](https://github.com/rclone/rclone/issues/2178) | Yes | Yes |
|
||||||
|
| Memory | No | Yes | No | No | No | Yes | Yes | No | No | No |
|
||||||
| Microsoft Azure Blob Storage | Yes | Yes | No | No | No | Yes | No | No [#2178](https://github.com/rclone/rclone/issues/2178) | No | No |
|
| Microsoft Azure Blob Storage | Yes | Yes | No | No | No | Yes | No | No [#2178](https://github.com/rclone/rclone/issues/2178) | No | No |
|
||||||
| Microsoft OneDrive | Yes | Yes | Yes | Yes | No [#575](https://github.com/rclone/rclone/issues/575) | No | No | Yes | Yes | Yes |
|
| Microsoft OneDrive | Yes | Yes | Yes | Yes | No [#575](https://github.com/rclone/rclone/issues/575) | No | No | Yes | Yes | Yes |
|
||||||
| OpenDrive | Yes | Yes | Yes | Yes | No | No | No | No | No | Yes |
|
| OpenDrive | Yes | Yes | Yes | Yes | No | No | No | No | No | Yes |
|
||||||
|
@ -77,6 +77,7 @@
|
|||||||
<li><a href="/koofr/"><i class="fa fa-suitcase"></i> Koofr</a></li>
|
<li><a href="/koofr/"><i class="fa fa-suitcase"></i> Koofr</a></li>
|
||||||
<li><a href="/mailru/"><i class="fa fa-at"></i> Mail.ru Cloud</a></li>
|
<li><a href="/mailru/"><i class="fa fa-at"></i> Mail.ru Cloud</a></li>
|
||||||
<li><a href="/mega/"><i class="fa fa-archive"></i> Mega</a></li>
|
<li><a href="/mega/"><i class="fa fa-archive"></i> Mega</a></li>
|
||||||
|
<li><a href="/memory/"><i class="fas fa-memory"></i> Memory</a></li>
|
||||||
<li><a href="/azureblob/"><i class="fab fa-windows"></i> Microsoft Azure Blob Storage</a></li>
|
<li><a href="/azureblob/"><i class="fab fa-windows"></i> Microsoft Azure Blob Storage</a></li>
|
||||||
<li><a href="/onedrive/"><i class="fab fa-windows"></i> Microsoft OneDrive</a></li>
|
<li><a href="/onedrive/"><i class="fab fa-windows"></i> Microsoft OneDrive</a></li>
|
||||||
<li><a href="/opendrive/"><i class="fa fa-space-shuttle"></i> OpenDrive</a></li>
|
<li><a href="/opendrive/"><i class="fa fa-space-shuttle"></i> OpenDrive</a></li>
|
||||||
|
@ -99,6 +99,9 @@ backends:
|
|||||||
- backend: "jottacloud"
|
- backend: "jottacloud"
|
||||||
remote: "TestJottacloud:"
|
remote: "TestJottacloud:"
|
||||||
fastlist: true
|
fastlist: true
|
||||||
|
- backend: "memory"
|
||||||
|
remote: ":memory:"
|
||||||
|
fastlist: true
|
||||||
- backend: "onedrive"
|
- backend: "onedrive"
|
||||||
remote: "TestOneDrive:"
|
remote: "TestOneDrive:"
|
||||||
fastlist: false
|
fastlist: false
|
||||||
|
Loading…
Reference in New Issue
Block a user