rclone/backend/cache/object.go

381 lines
9.7 KiB
Go
Raw Normal View History

// +build !plan9
2017-11-12 18:54:25 +01:00
package cache
import (
"context"
2017-11-12 18:54:25 +01:00
"io"
"path"
"sync"
"time"
2018-01-29 23:05:04 +01:00
"github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/lib/readers"
2018-01-29 23:05:04 +01:00
)
const (
objectInCache = "Object"
objectPendingUpload = "TempObject"
2017-11-12 18:54:25 +01:00
)
// Object is a generic file like object that stores basic information about it
type Object struct {
fs.Object `json:"-"`
ParentFs fs.Fs `json:"-"` // parent fs
CacheFs *Fs `json:"-"` // cache fs
Name string `json:"name"` // name of the directory
Dir string `json:"dir"` // abs path of the object
CacheModTime int64 `json:"modTime"` // modification or creation time - IsZero for unknown
CacheSize int64 `json:"size"` // size of directory and contents or -1 if unknown
CacheStorable bool `json:"storable"` // says whether this object can be stored
CacheType string `json:"cacheType"`
CacheTs time.Time `json:"cacheTs"`
cacheHashesMu sync.Mutex
2018-01-29 23:05:04 +01:00
CacheHashes map[hash.Type]string // all supported hashes cached
2017-11-12 18:54:25 +01:00
refreshMutex sync.Mutex
}
// NewObject builds one from a generic fs.Object
2018-01-29 23:05:04 +01:00
func NewObject(f *Fs, remote string) *Object {
2017-11-12 18:54:25 +01:00
fullRemote := path.Join(f.Root(), remote)
dir, name := path.Split(fullRemote)
2018-01-29 23:05:04 +01:00
cacheType := objectInCache
parentFs := f.UnWrap()
if f.opt.TempWritePath != "" {
2018-01-29 23:05:04 +01:00
_, err := f.cache.SearchPendingUpload(fullRemote)
if err == nil { // queued for upload
cacheType = objectPendingUpload
parentFs = f.tempFs
fs.Debugf(fullRemote, "pending upload found")
}
}
2017-11-12 18:54:25 +01:00
co := &Object{
2018-01-29 23:05:04 +01:00
ParentFs: parentFs,
2017-11-12 18:54:25 +01:00
CacheFs: f,
Name: cleanPath(name),
Dir: cleanPath(dir),
CacheModTime: time.Now().UnixNano(),
CacheSize: 0,
CacheStorable: false,
2018-01-29 23:05:04 +01:00
CacheType: cacheType,
CacheTs: time.Now(),
2017-11-12 18:54:25 +01:00
}
return co
}
// ObjectFromOriginal builds one from a generic fs.Object
func ObjectFromOriginal(ctx context.Context, f *Fs, o fs.Object) *Object {
2017-11-12 18:54:25 +01:00
var co *Object
fullRemote := cleanPath(path.Join(f.Root(), o.Remote()))
dir, name := path.Split(fullRemote)
2018-01-29 23:05:04 +01:00
cacheType := objectInCache
parentFs := f.UnWrap()
if f.opt.TempWritePath != "" {
2018-01-29 23:05:04 +01:00
_, err := f.cache.SearchPendingUpload(fullRemote)
if err == nil { // queued for upload
cacheType = objectPendingUpload
parentFs = f.tempFs
fs.Debugf(fullRemote, "pending upload found")
}
}
2017-11-12 18:54:25 +01:00
co = &Object{
2018-01-29 23:05:04 +01:00
ParentFs: parentFs,
2017-11-12 18:54:25 +01:00
CacheFs: f,
Name: cleanPath(name),
Dir: cleanPath(dir),
2018-01-29 23:05:04 +01:00
CacheType: cacheType,
CacheTs: time.Now(),
2017-11-12 18:54:25 +01:00
}
co.updateData(ctx, o)
2017-11-12 18:54:25 +01:00
return co
}
func (o *Object) updateData(ctx context.Context, source fs.Object) {
2017-11-12 18:54:25 +01:00
o.Object = source
o.CacheModTime = source.ModTime(ctx).UnixNano()
2017-11-12 18:54:25 +01:00
o.CacheSize = source.Size()
o.CacheStorable = source.Storable()
o.CacheTs = time.Now()
o.cacheHashesMu.Lock()
2018-01-29 23:05:04 +01:00
o.CacheHashes = make(map[hash.Type]string)
o.cacheHashesMu.Unlock()
2017-11-12 18:54:25 +01:00
}
// Fs returns its FS info
func (o *Object) Fs() fs.Info {
return o.CacheFs
}
// String returns a human friendly name for this object
func (o *Object) String() string {
if o == nil {
return "<nil>"
}
return o.Remote()
}
// Remote returns the remote path
func (o *Object) Remote() string {
p := path.Join(o.Dir, o.Name)
2018-01-29 23:05:04 +01:00
return o.CacheFs.cleanRootFromPath(p)
2017-11-12 18:54:25 +01:00
}
// abs returns the absolute path to the object
func (o *Object) abs() string {
return path.Join(o.Dir, o.Name)
}
// ModTime returns the cached ModTime
func (o *Object) ModTime(ctx context.Context) time.Time {
_ = o.refresh(ctx)
2017-11-12 18:54:25 +01:00
return time.Unix(0, o.CacheModTime)
}
// Size returns the cached Size
func (o *Object) Size() int64 {
_ = o.refresh(context.TODO())
2017-11-12 18:54:25 +01:00
return o.CacheSize
}
// Storable returns the cached Storable
func (o *Object) Storable() bool {
_ = o.refresh(context.TODO())
2017-11-12 18:54:25 +01:00
return o.CacheStorable
}
// refresh will check if the object info is expired and request the info from source if it is
// all these conditions must be true to ignore a refresh
// 1. cache ts didn't expire yet
// 2. is not pending a notification from the wrapped fs
func (o *Object) refresh(ctx context.Context) error {
isNotified := o.CacheFs.isNotifiedRemote(o.Remote())
isExpired := time.Now().After(o.CacheTs.Add(time.Duration(o.CacheFs.opt.InfoAge)))
if !isExpired && !isNotified {
return nil
}
return o.refreshFromSource(ctx, true)
}
2017-11-12 18:54:25 +01:00
// refreshFromSource requests the original FS for the object in case it comes from a cached entry
func (o *Object) refreshFromSource(ctx context.Context, force bool) error {
2017-11-12 18:54:25 +01:00
o.refreshMutex.Lock()
defer o.refreshMutex.Unlock()
2018-01-29 23:05:04 +01:00
var err error
var liveObject fs.Object
2017-11-12 18:54:25 +01:00
2018-01-29 23:05:04 +01:00
if o.Object != nil && !force {
2017-11-12 18:54:25 +01:00
return nil
}
2018-01-29 23:05:04 +01:00
if o.isTempFile() {
liveObject, err = o.ParentFs.NewObject(ctx, o.Remote())
2018-01-29 23:05:04 +01:00
err = errors.Wrapf(err, "in parent fs %v", o.ParentFs)
} else {
liveObject, err = o.CacheFs.Fs.NewObject(ctx, o.Remote())
2018-01-29 23:05:04 +01:00
err = errors.Wrapf(err, "in cache fs %v", o.CacheFs.Fs)
}
2017-11-12 18:54:25 +01:00
if err != nil {
2018-01-29 23:05:04 +01:00
fs.Errorf(o, "error refreshing object in : %v", err)
2017-11-12 18:54:25 +01:00
return err
}
o.updateData(ctx, liveObject)
2017-11-12 18:54:25 +01:00
o.persist()
return nil
}
// SetModTime sets the ModTime of this object
func (o *Object) SetModTime(ctx context.Context, t time.Time) error {
if err := o.refreshFromSource(ctx, false); err != nil {
2017-11-12 18:54:25 +01:00
return err
}
err := o.Object.SetModTime(ctx, t)
2017-11-12 18:54:25 +01:00
if err != nil {
return err
}
o.CacheModTime = t.UnixNano()
o.persist()
2018-01-29 23:05:04 +01:00
fs.Debugf(o, "updated ModTime: %v", t)
2017-11-12 18:54:25 +01:00
return nil
}
// Open is used to request a specific part of the file using fs.RangeOption
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (io.ReadCloser, error) {
var err error
if o.Object == nil {
err = o.refreshFromSource(ctx, true)
} else {
err = o.refresh(ctx)
}
if err != nil {
2017-11-12 18:54:25 +01:00
return nil, err
}
cacheReader := NewObjectHandle(ctx, o, o.CacheFs)
var offset, limit int64 = 0, -1
2017-11-12 18:54:25 +01:00
for _, option := range options {
switch x := option.(type) {
case *fs.SeekOption:
offset = x.Offset
case *fs.RangeOption:
2018-01-22 20:44:55 +01:00
offset, limit = x.Decode(o.Size())
}
_, err = cacheReader.Seek(offset, io.SeekStart)
if err != nil {
2018-01-22 20:44:55 +01:00
return nil, err
2017-11-12 18:54:25 +01:00
}
}
2018-01-22 20:44:55 +01:00
return readers.NewLimitedReadCloser(cacheReader, limit), nil
2017-11-12 18:54:25 +01:00
}
// Update will change the object data
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
if err := o.refreshFromSource(ctx, false); err != nil {
2017-11-12 18:54:25 +01:00
return err
}
2018-01-29 23:05:04 +01:00
// pause background uploads if active
if o.CacheFs.opt.TempWritePath != "" {
2018-01-29 23:05:04 +01:00
o.CacheFs.backgroundRunner.pause()
defer o.CacheFs.backgroundRunner.play()
// don't allow started uploads
if o.isTempFile() && o.tempFileStartedUpload() {
return errors.Errorf("%v is currently uploading, can't update", o)
}
}
fs.Debugf(o, "updating object contents with size %v", src.Size())
2017-11-12 18:54:25 +01:00
2018-01-29 23:05:04 +01:00
// FIXME use reliable upload
err := o.Object.Update(ctx, in, src, options...)
2017-11-12 18:54:25 +01:00
if err != nil {
fs.Errorf(o, "error updating source: %v", err)
return err
}
2018-01-29 23:05:04 +01:00
// deleting cached chunks and info to be replaced with new ones
_ = o.CacheFs.cache.RemoveObject(o.abs())
// advertise to ChangeNotify if wrapped doesn't do that
o.CacheFs.notifyChangeUpstreamIfNeeded(o.Remote(), fs.EntryObject)
2018-01-29 23:05:04 +01:00
o.CacheModTime = src.ModTime(ctx).UnixNano()
2017-11-12 18:54:25 +01:00
o.CacheSize = src.Size()
o.cacheHashesMu.Lock()
2018-01-29 23:05:04 +01:00
o.CacheHashes = make(map[hash.Type]string)
o.cacheHashesMu.Unlock()
2018-01-29 23:05:04 +01:00
o.CacheTs = time.Now()
2017-11-12 18:54:25 +01:00
o.persist()
return nil
}
// Remove deletes the object from both the cache and the source
func (o *Object) Remove(ctx context.Context) error {
if err := o.refreshFromSource(ctx, false); err != nil {
2017-11-12 18:54:25 +01:00
return err
}
2018-01-29 23:05:04 +01:00
// pause background uploads if active
if o.CacheFs.opt.TempWritePath != "" {
2018-01-29 23:05:04 +01:00
o.CacheFs.backgroundRunner.pause()
defer o.CacheFs.backgroundRunner.play()
// don't allow started uploads
if o.isTempFile() && o.tempFileStartedUpload() {
return errors.Errorf("%v is currently uploading, can't delete", o)
}
}
err := o.Object.Remove(ctx)
2017-11-12 18:54:25 +01:00
if err != nil {
return err
}
2018-01-29 23:05:04 +01:00
fs.Debugf(o, "removing object")
2017-11-12 18:54:25 +01:00
_ = o.CacheFs.cache.RemoveObject(o.abs())
2018-01-29 23:05:04 +01:00
_ = o.CacheFs.cache.removePendingUpload(o.abs())
2018-02-10 21:01:05 +01:00
parentCd := NewDirectory(o.CacheFs, cleanPath(path.Dir(o.Remote())))
_ = o.CacheFs.cache.ExpireDir(parentCd)
// advertise to ChangeNotify if wrapped doesn't do that
o.CacheFs.notifyChangeUpstreamIfNeeded(parentCd.Remote(), fs.EntryDirectory)
2018-01-29 23:05:04 +01:00
return nil
2017-11-12 18:54:25 +01:00
}
// Hash requests a hash of the object and stores in the cache
// since it might or might not be called, this is lazy loaded
func (o *Object) Hash(ctx context.Context, ht hash.Type) (string, error) {
_ = o.refresh(ctx)
o.cacheHashesMu.Lock()
2018-01-29 23:05:04 +01:00
if o.CacheHashes == nil {
o.CacheHashes = make(map[hash.Type]string)
2017-11-12 18:54:25 +01:00
}
2018-01-29 23:05:04 +01:00
cachedHash, found := o.CacheHashes[ht]
o.cacheHashesMu.Unlock()
2017-11-12 18:54:25 +01:00
if found {
return cachedHash, nil
}
if err := o.refreshFromSource(ctx, false); err != nil {
2017-11-12 18:54:25 +01:00
return "", err
}
liveHash, err := o.Object.Hash(ctx, ht)
2017-11-12 18:54:25 +01:00
if err != nil {
return "", err
}
o.cacheHashesMu.Lock()
2018-01-29 23:05:04 +01:00
o.CacheHashes[ht] = liveHash
o.cacheHashesMu.Unlock()
2017-11-12 18:54:25 +01:00
o.persist()
fs.Debugf(o, "object hash cached: %v", liveHash)
return liveHash, nil
}
// persist adds this object to the persistent cache
func (o *Object) persist() *Object {
err := o.CacheFs.cache.AddObject(o)
if err != nil {
fs.Errorf(o, "failed to cache object: %v", err)
}
return o
}
2018-01-29 23:05:04 +01:00
func (o *Object) isTempFile() bool {
_, err := o.CacheFs.cache.SearchPendingUpload(o.abs())
if err != nil {
o.CacheType = objectInCache
return false
}
o.CacheType = objectPendingUpload
return true
}
func (o *Object) tempFileStartedUpload() bool {
started, err := o.CacheFs.cache.SearchPendingUpload(o.abs())
if err != nil {
return false
}
return started
}
2018-07-26 12:53:46 +02:00
// UnWrap returns the Object that this Object is wrapping or
// nil if it isn't wrapping anything
func (o *Object) UnWrap() fs.Object {
return o.Object
}
2017-11-12 18:54:25 +01:00
var (
2018-07-26 12:53:46 +02:00
_ fs.Object = (*Object)(nil)
_ fs.ObjectUnWrapper = (*Object)(nil)
2017-11-12 18:54:25 +01:00
)