2021-09-09 14:25:25 +02:00
|
|
|
//go:build !plan9 && !js
|
2017-11-12 18:54:25 +01:00
|
|
|
|
|
|
|
package cache
|
|
|
|
|
|
|
|
import (
|
|
|
|
"bytes"
|
2019-06-17 10:34:30 +02:00
|
|
|
"context"
|
2017-11-12 18:54:25 +01:00
|
|
|
"encoding/binary"
|
|
|
|
"encoding/json"
|
2018-10-13 15:41:15 +02:00
|
|
|
"fmt"
|
2017-11-12 18:54:25 +01:00
|
|
|
"os"
|
|
|
|
"path"
|
|
|
|
"strconv"
|
|
|
|
"strings"
|
|
|
|
"sync"
|
2018-10-13 15:41:15 +02:00
|
|
|
"time"
|
2018-01-29 23:05:04 +01:00
|
|
|
|
2019-07-28 19:47:38 +02:00
|
|
|
"github.com/rclone/rclone/fs"
|
|
|
|
"github.com/rclone/rclone/fs/walk"
|
2020-02-27 00:13:13 +01:00
|
|
|
bolt "go.etcd.io/bbolt"
|
2017-11-12 18:54:25 +01:00
|
|
|
)
|
|
|
|
|
|
|
|
// Constants
|
|
|
|
const (
|
|
|
|
RootBucket = "root"
|
|
|
|
RootTsBucket = "rootTs"
|
|
|
|
DataTsBucket = "dataTs"
|
2018-01-29 23:05:04 +01:00
|
|
|
tempBucket = "pending"
|
2017-11-12 18:54:25 +01:00
|
|
|
)
|
|
|
|
|
2017-12-09 22:54:26 +01:00
|
|
|
// Features flags for this storage type
|
|
|
|
type Features struct {
|
2018-05-14 19:06:57 +02:00
|
|
|
PurgeDb bool // purge the db before starting
|
|
|
|
DbWaitTime time.Duration // time to wait for DB to be available
|
2017-12-09 22:54:26 +01:00
|
|
|
}
|
|
|
|
|
2017-11-12 18:54:25 +01:00
|
|
|
var boltMap = make(map[string]*Persistent)
|
2017-11-30 11:27:59 +01:00
|
|
|
var boltMapMx sync.Mutex
|
2017-11-12 18:54:25 +01:00
|
|
|
|
|
|
|
// GetPersistent returns a single instance for the specific store
|
2017-12-20 21:43:30 +01:00
|
|
|
func GetPersistent(dbPath, chunkPath string, f *Features) (*Persistent, error) {
|
2017-11-30 11:27:59 +01:00
|
|
|
// write lock to create one
|
2017-11-12 18:54:25 +01:00
|
|
|
boltMapMx.Lock()
|
|
|
|
defer boltMapMx.Unlock()
|
|
|
|
if b, ok := boltMap[dbPath]; ok {
|
2018-01-29 23:05:04 +01:00
|
|
|
if !b.open {
|
|
|
|
err := b.connect()
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
}
|
2017-11-30 11:27:59 +01:00
|
|
|
return b, nil
|
2017-11-12 18:54:25 +01:00
|
|
|
}
|
|
|
|
|
2017-12-20 21:43:30 +01:00
|
|
|
bb, err := newPersistent(dbPath, chunkPath, f)
|
2017-11-30 11:27:59 +01:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
boltMap[dbPath] = bb
|
|
|
|
return boltMap[dbPath], nil
|
2017-11-12 18:54:25 +01:00
|
|
|
}
|
|
|
|
|
2017-12-09 22:54:26 +01:00
|
|
|
type chunkInfo struct {
|
|
|
|
Path string
|
|
|
|
Offset int64
|
|
|
|
Size int64
|
|
|
|
}
|
|
|
|
|
2018-01-29 23:05:04 +01:00
|
|
|
type tempUploadInfo struct {
|
|
|
|
DestPath string
|
|
|
|
AddedOn time.Time
|
|
|
|
Started bool
|
|
|
|
}
|
|
|
|
|
|
|
|
// String representation of a tempUploadInfo
|
|
|
|
func (t *tempUploadInfo) String() string {
|
|
|
|
return fmt.Sprintf("%v - %v (%v)", t.DestPath, t.Started, t.AddedOn)
|
|
|
|
}
|
|
|
|
|
2017-11-12 18:54:25 +01:00
|
|
|
// Persistent is a wrapper of persistent storage for a bolt.DB file
|
|
|
|
type Persistent struct {
|
2018-01-29 23:05:04 +01:00
|
|
|
dbPath string
|
|
|
|
dataPath string
|
|
|
|
open bool
|
|
|
|
db *bolt.DB
|
|
|
|
cleanupMux sync.Mutex
|
|
|
|
tempQueueMux sync.Mutex
|
|
|
|
features *Features
|
2017-11-12 18:54:25 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// newPersistent builds a new wrapper and connects to the bolt.DB file
|
2017-12-20 21:43:30 +01:00
|
|
|
func newPersistent(dbPath, chunkPath string, f *Features) (*Persistent, error) {
|
2017-11-12 18:54:25 +01:00
|
|
|
b := &Persistent{
|
|
|
|
dbPath: dbPath,
|
2017-12-20 21:43:30 +01:00
|
|
|
dataPath: chunkPath,
|
2017-12-09 22:54:26 +01:00
|
|
|
features: f,
|
2017-11-12 18:54:25 +01:00
|
|
|
}
|
|
|
|
|
2018-01-29 23:05:04 +01:00
|
|
|
err := b.connect()
|
2017-11-12 18:54:25 +01:00
|
|
|
if err != nil {
|
2017-11-30 11:27:59 +01:00
|
|
|
fs.Errorf(dbPath, "Error opening storage cache. Is there another rclone running on the same remote? %v", err)
|
|
|
|
return nil, err
|
2017-11-12 18:54:25 +01:00
|
|
|
}
|
|
|
|
|
2017-11-30 11:27:59 +01:00
|
|
|
return b, nil
|
2017-11-12 18:54:25 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// String will return a human friendly string for this DB (currently the dbPath)
|
|
|
|
func (b *Persistent) String() string {
|
|
|
|
return "<Cache DB> " + b.dbPath
|
|
|
|
}
|
|
|
|
|
2018-01-29 23:05:04 +01:00
|
|
|
// connect creates a connection to the configured file
|
2017-11-12 18:54:25 +01:00
|
|
|
// refreshDb will delete the file before to create an empty DB if it's set to true
|
2018-01-29 23:05:04 +01:00
|
|
|
func (b *Persistent) connect() error {
|
2017-11-12 18:54:25 +01:00
|
|
|
var err error
|
|
|
|
|
|
|
|
err = os.MkdirAll(b.dataPath, os.ModePerm)
|
|
|
|
if err != nil {
|
2021-11-04 11:12:57 +01:00
|
|
|
return fmt.Errorf("failed to create a data directory %q: %w", b.dataPath, err)
|
2017-11-12 18:54:25 +01:00
|
|
|
}
|
2018-05-14 19:06:57 +02:00
|
|
|
b.db, err = bolt.Open(b.dbPath, 0644, &bolt.Options{Timeout: b.features.DbWaitTime})
|
2017-11-12 18:54:25 +01:00
|
|
|
if err != nil {
|
2021-11-04 11:12:57 +01:00
|
|
|
return fmt.Errorf("failed to open a cache connection to %q: %w", b.dbPath, err)
|
2017-11-12 18:54:25 +01:00
|
|
|
}
|
2018-01-29 23:05:04 +01:00
|
|
|
if b.features.PurgeDb {
|
|
|
|
b.Purge()
|
|
|
|
}
|
|
|
|
_ = b.db.Update(func(tx *bolt.Tx) error {
|
2017-11-12 18:54:25 +01:00
|
|
|
_, _ = tx.CreateBucketIfNotExists([]byte(RootBucket))
|
|
|
|
_, _ = tx.CreateBucketIfNotExists([]byte(RootTsBucket))
|
|
|
|
_, _ = tx.CreateBucketIfNotExists([]byte(DataTsBucket))
|
2018-01-29 23:05:04 +01:00
|
|
|
_, _ = tx.CreateBucketIfNotExists([]byte(tempBucket))
|
2017-11-12 18:54:25 +01:00
|
|
|
|
|
|
|
return nil
|
|
|
|
})
|
|
|
|
|
2018-01-29 23:05:04 +01:00
|
|
|
b.open = true
|
2017-11-12 18:54:25 +01:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// getBucket prepares and cleans a specific path of the form: /var/tmp and will iterate through each path component
|
|
|
|
// to get to the nested bucket of the final part (in this example: tmp)
|
|
|
|
func (b *Persistent) getBucket(dir string, createIfMissing bool, tx *bolt.Tx) *bolt.Bucket {
|
|
|
|
cleanPath(dir)
|
|
|
|
|
|
|
|
entries := strings.FieldsFunc(dir, func(c rune) bool {
|
2018-01-29 23:05:04 +01:00
|
|
|
// cover Windows where rclone still uses '/' as path separator
|
|
|
|
// this should be safe as '/' is not a valid Windows character
|
|
|
|
return (os.PathSeparator == c || c == rune('/'))
|
2017-11-12 18:54:25 +01:00
|
|
|
})
|
|
|
|
bucket := tx.Bucket([]byte(RootBucket))
|
|
|
|
|
|
|
|
for _, entry := range entries {
|
|
|
|
if createIfMissing {
|
|
|
|
bucket, _ = bucket.CreateBucketIfNotExists([]byte(entry))
|
|
|
|
} else {
|
|
|
|
bucket = bucket.Bucket([]byte(entry))
|
|
|
|
}
|
|
|
|
|
|
|
|
if bucket == nil {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return bucket
|
|
|
|
}
|
|
|
|
|
2018-03-13 22:43:34 +01:00
|
|
|
// GetDir will retrieve data of a cached directory
|
|
|
|
func (b *Persistent) GetDir(remote string) (*Directory, error) {
|
|
|
|
cd := &Directory{}
|
|
|
|
|
|
|
|
err := b.db.View(func(tx *bolt.Tx) error {
|
|
|
|
bucket := b.getBucket(remote, false, tx)
|
|
|
|
if bucket == nil {
|
2021-11-04 11:12:57 +01:00
|
|
|
return fmt.Errorf("couldn't open bucket (%v)", remote)
|
2018-03-13 22:43:34 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
data := bucket.Get([]byte("."))
|
|
|
|
if data != nil {
|
|
|
|
return json.Unmarshal(data, cd)
|
|
|
|
}
|
|
|
|
|
2021-11-04 11:12:57 +01:00
|
|
|
return fmt.Errorf("%v not found", remote)
|
2018-03-13 22:43:34 +01:00
|
|
|
})
|
|
|
|
|
|
|
|
return cd, err
|
|
|
|
}
|
|
|
|
|
2017-11-12 18:54:25 +01:00
|
|
|
// AddDir will update a CachedDirectory metadata and all its entries
|
|
|
|
func (b *Persistent) AddDir(cachedDir *Directory) error {
|
2018-06-03 21:48:13 +02:00
|
|
|
return b.AddBatchDir([]*Directory{cachedDir})
|
|
|
|
}
|
|
|
|
|
|
|
|
// AddBatchDir will update a list of CachedDirectory metadata and all their entries
|
|
|
|
func (b *Persistent) AddBatchDir(cachedDirs []*Directory) error {
|
|
|
|
if len(cachedDirs) == 0 {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2017-11-12 18:54:25 +01:00
|
|
|
return b.db.Update(func(tx *bolt.Tx) error {
|
2018-06-10 20:54:20 +02:00
|
|
|
var bucket *bolt.Bucket
|
|
|
|
if cachedDirs[0].Dir == "" {
|
|
|
|
bucket = tx.Bucket([]byte(RootBucket))
|
|
|
|
} else {
|
|
|
|
bucket = b.getBucket(cachedDirs[0].Dir, true, tx)
|
|
|
|
}
|
2017-11-12 18:54:25 +01:00
|
|
|
if bucket == nil {
|
2021-11-04 11:12:57 +01:00
|
|
|
return fmt.Errorf("couldn't open bucket (%v)", cachedDirs[0].Dir)
|
2017-11-12 18:54:25 +01:00
|
|
|
}
|
|
|
|
|
2018-06-03 21:48:13 +02:00
|
|
|
for _, cachedDir := range cachedDirs {
|
2018-06-10 20:54:20 +02:00
|
|
|
var b *bolt.Bucket
|
|
|
|
var err error
|
|
|
|
if cachedDir.Name == "" {
|
|
|
|
b = bucket
|
|
|
|
} else {
|
|
|
|
b, err = bucket.CreateBucketIfNotExists([]byte(cachedDir.Name))
|
|
|
|
}
|
2018-06-03 21:48:13 +02:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
encoded, err := json.Marshal(cachedDir)
|
|
|
|
if err != nil {
|
2021-11-04 11:12:57 +01:00
|
|
|
return fmt.Errorf("couldn't marshal object (%v): %v", cachedDir, err)
|
2018-06-03 21:48:13 +02:00
|
|
|
}
|
|
|
|
err = b.Put([]byte("."), encoded)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2017-11-12 18:54:25 +01:00
|
|
|
}
|
|
|
|
return nil
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
// GetDirEntries will return a CachedDirectory, its list of dir entries and/or an error if it encountered issues
|
|
|
|
func (b *Persistent) GetDirEntries(cachedDir *Directory) (fs.DirEntries, error) {
|
|
|
|
var dirEntries fs.DirEntries
|
|
|
|
|
|
|
|
err := b.db.View(func(tx *bolt.Tx) error {
|
|
|
|
bucket := b.getBucket(cachedDir.abs(), false, tx)
|
|
|
|
if bucket == nil {
|
2021-11-04 11:12:57 +01:00
|
|
|
return fmt.Errorf("couldn't open bucket (%v)", cachedDir.abs())
|
2017-11-12 18:54:25 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
val := bucket.Get([]byte("."))
|
|
|
|
if val != nil {
|
|
|
|
err := json.Unmarshal(val, cachedDir)
|
|
|
|
if err != nil {
|
2022-06-08 22:54:39 +02:00
|
|
|
return fmt.Errorf("error during unmarshalling obj: %w", err)
|
2017-11-12 18:54:25 +01:00
|
|
|
}
|
|
|
|
} else {
|
2021-11-04 11:12:57 +01:00
|
|
|
return fmt.Errorf("missing cached dir: %v", cachedDir)
|
2017-11-12 18:54:25 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
c := bucket.Cursor()
|
|
|
|
for k, v := c.First(); k != nil; k, v = c.Next() {
|
|
|
|
// ignore metadata key: .
|
|
|
|
if bytes.Equal(k, []byte(".")) {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
entryPath := path.Join(cachedDir.Remote(), string(k))
|
|
|
|
|
|
|
|
if v == nil { // directory
|
|
|
|
// we try to find a cached meta for the dir
|
|
|
|
currentBucket := c.Bucket().Bucket(k)
|
|
|
|
if currentBucket == nil {
|
2021-11-04 11:12:57 +01:00
|
|
|
return fmt.Errorf("couldn't open bucket (%v)", string(k))
|
2017-11-12 18:54:25 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
metaKey := currentBucket.Get([]byte("."))
|
|
|
|
d := NewDirectory(cachedDir.CacheFs, entryPath)
|
|
|
|
if metaKey != nil { //if we don't find it, we create an empty dir
|
|
|
|
err := json.Unmarshal(metaKey, d)
|
|
|
|
if err != nil { // if even this fails, we fallback to an empty dir
|
|
|
|
fs.Debugf(string(k), "error during unmarshalling obj: %v", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
dirEntries = append(dirEntries, d)
|
|
|
|
} else { // object
|
|
|
|
o := NewObject(cachedDir.CacheFs, entryPath)
|
|
|
|
err := json.Unmarshal(v, o)
|
|
|
|
if err != nil {
|
|
|
|
fs.Debugf(string(k), "error during unmarshalling obj: %v", err)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
dirEntries = append(dirEntries, o)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
})
|
|
|
|
|
|
|
|
return dirEntries, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// RemoveDir will delete a CachedDirectory, all its objects and all the chunks stored for it
|
|
|
|
func (b *Persistent) RemoveDir(fp string) error {
|
2017-12-18 13:55:37 +01:00
|
|
|
var err error
|
2017-11-12 18:54:25 +01:00
|
|
|
parentDir, dirName := path.Split(fp)
|
|
|
|
if fp == "" {
|
2017-12-18 13:55:37 +01:00
|
|
|
err = b.db.Update(func(tx *bolt.Tx) error {
|
2017-11-12 18:54:25 +01:00
|
|
|
err := tx.DeleteBucket([]byte(RootBucket))
|
|
|
|
if err != nil {
|
|
|
|
fs.Debugf(fp, "couldn't delete from cache: %v", err)
|
|
|
|
return err
|
|
|
|
}
|
2017-12-18 13:55:37 +01:00
|
|
|
_, _ = tx.CreateBucketIfNotExists([]byte(RootBucket))
|
|
|
|
return nil
|
|
|
|
})
|
|
|
|
} else {
|
|
|
|
err = b.db.Update(func(tx *bolt.Tx) error {
|
|
|
|
bucket := b.getBucket(cleanPath(parentDir), false, tx)
|
|
|
|
if bucket == nil {
|
2021-11-04 11:12:57 +01:00
|
|
|
return fmt.Errorf("couldn't open bucket (%v)", fp)
|
2017-12-18 13:55:37 +01:00
|
|
|
}
|
|
|
|
// delete the cached dir
|
|
|
|
err := bucket.DeleteBucket([]byte(cleanPath(dirName)))
|
2017-11-12 18:54:25 +01:00
|
|
|
if err != nil {
|
|
|
|
fs.Debugf(fp, "couldn't delete from cache: %v", err)
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2017-12-18 13:55:37 +01:00
|
|
|
// delete chunks on disk
|
|
|
|
// safe to ignore as the files might not have been open
|
2017-12-19 14:48:48 +01:00
|
|
|
if err == nil {
|
2017-12-18 13:55:37 +01:00
|
|
|
_ = os.RemoveAll(path.Join(b.dataPath, fp))
|
2017-12-19 14:48:48 +01:00
|
|
|
_ = os.MkdirAll(b.dataPath, os.ModePerm)
|
2017-12-18 13:55:37 +01:00
|
|
|
}
|
|
|
|
|
2017-12-19 14:48:48 +01:00
|
|
|
return err
|
2017-12-18 13:55:37 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// ExpireDir will flush a CachedDirectory and all its objects from the objects
|
|
|
|
// chunks will remain as they are
|
|
|
|
func (b *Persistent) ExpireDir(cd *Directory) error {
|
2018-05-14 19:06:57 +02:00
|
|
|
t := time.Now().Add(time.Duration(-cd.CacheFs.opt.InfoAge))
|
2017-12-18 13:55:37 +01:00
|
|
|
cd.CacheTs = &t
|
|
|
|
|
|
|
|
// expire all parents
|
2017-11-12 18:54:25 +01:00
|
|
|
return b.db.Update(func(tx *bolt.Tx) error {
|
2017-12-18 13:55:37 +01:00
|
|
|
// expire all the parents
|
|
|
|
currentDir := cd.abs()
|
|
|
|
for { // until we get to the root
|
|
|
|
bucket := b.getBucket(currentDir, false, tx)
|
|
|
|
if bucket != nil {
|
|
|
|
val := bucket.Get([]byte("."))
|
|
|
|
if val != nil {
|
|
|
|
cd2 := &Directory{CacheFs: cd.CacheFs}
|
|
|
|
err := json.Unmarshal(val, cd2)
|
|
|
|
if err == nil {
|
|
|
|
fs.Debugf(cd, "cache: expired %v", currentDir)
|
|
|
|
cd2.CacheTs = &t
|
|
|
|
enc2, _ := json.Marshal(cd2)
|
|
|
|
_ = bucket.Put([]byte("."), enc2)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if currentDir == "" {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
currentDir = cleanPath(path.Dir(currentDir))
|
2017-11-12 18:54:25 +01:00
|
|
|
}
|
|
|
|
return nil
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
// GetObject will return a CachedObject from its parent directory or an error if it doesn't find it
|
|
|
|
func (b *Persistent) GetObject(cachedObject *Object) (err error) {
|
|
|
|
return b.db.View(func(tx *bolt.Tx) error {
|
|
|
|
bucket := b.getBucket(cachedObject.Dir, false, tx)
|
|
|
|
if bucket == nil {
|
2021-11-04 11:12:57 +01:00
|
|
|
return fmt.Errorf("couldn't open parent bucket for %v", cachedObject.Dir)
|
2017-11-12 18:54:25 +01:00
|
|
|
}
|
|
|
|
val := bucket.Get([]byte(cachedObject.Name))
|
|
|
|
if val != nil {
|
|
|
|
return json.Unmarshal(val, cachedObject)
|
|
|
|
}
|
2021-11-04 11:12:57 +01:00
|
|
|
return fmt.Errorf("couldn't find object (%v)", cachedObject.Name)
|
2017-11-12 18:54:25 +01:00
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
// AddObject will create a cached object in its parent directory
|
|
|
|
func (b *Persistent) AddObject(cachedObject *Object) error {
|
|
|
|
return b.db.Update(func(tx *bolt.Tx) error {
|
|
|
|
bucket := b.getBucket(cachedObject.Dir, true, tx)
|
|
|
|
if bucket == nil {
|
2021-11-04 11:12:57 +01:00
|
|
|
return fmt.Errorf("couldn't open parent bucket for %v", cachedObject)
|
2017-11-12 18:54:25 +01:00
|
|
|
}
|
|
|
|
// cache Object Info
|
|
|
|
encoded, err := json.Marshal(cachedObject)
|
|
|
|
if err != nil {
|
2021-11-04 11:12:57 +01:00
|
|
|
return fmt.Errorf("couldn't marshal object (%v) info: %v", cachedObject, err)
|
2017-11-12 18:54:25 +01:00
|
|
|
}
|
2019-01-11 18:17:46 +01:00
|
|
|
err = bucket.Put([]byte(cachedObject.Name), encoded)
|
2017-11-12 18:54:25 +01:00
|
|
|
if err != nil {
|
2021-11-04 11:12:57 +01:00
|
|
|
return fmt.Errorf("couldn't cache object (%v) info: %v", cachedObject, err)
|
2017-11-12 18:54:25 +01:00
|
|
|
}
|
|
|
|
return nil
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
// RemoveObject will delete a single cached object and all the chunks which belong to it
|
|
|
|
func (b *Persistent) RemoveObject(fp string) error {
|
|
|
|
parentDir, objName := path.Split(fp)
|
|
|
|
return b.db.Update(func(tx *bolt.Tx) error {
|
|
|
|
bucket := b.getBucket(cleanPath(parentDir), false, tx)
|
|
|
|
if bucket == nil {
|
2021-11-04 11:12:57 +01:00
|
|
|
return fmt.Errorf("couldn't open parent bucket for %v", cleanPath(parentDir))
|
2017-11-12 18:54:25 +01:00
|
|
|
}
|
|
|
|
err := bucket.Delete([]byte(cleanPath(objName)))
|
|
|
|
if err != nil {
|
|
|
|
fs.Debugf(fp, "couldn't delete obj from storage: %v", err)
|
|
|
|
}
|
|
|
|
// delete chunks on disk
|
|
|
|
// safe to ignore as the file might not have been open
|
|
|
|
_ = os.RemoveAll(path.Join(b.dataPath, fp))
|
|
|
|
return nil
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2018-04-03 21:46:00 +02:00
|
|
|
// ExpireObject will flush an Object and all its data if desired
|
|
|
|
func (b *Persistent) ExpireObject(co *Object, withData bool) error {
|
2018-05-14 19:06:57 +02:00
|
|
|
co.CacheTs = time.Now().Add(time.Duration(-co.CacheFs.opt.InfoAge))
|
2018-04-03 21:46:00 +02:00
|
|
|
err := b.AddObject(co)
|
|
|
|
if withData {
|
|
|
|
_ = os.RemoveAll(path.Join(b.dataPath, co.abs()))
|
|
|
|
}
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2017-11-12 18:54:25 +01:00
|
|
|
// HasEntry confirms the existence of a single entry (dir or object)
|
|
|
|
func (b *Persistent) HasEntry(remote string) bool {
|
|
|
|
dir, name := path.Split(remote)
|
|
|
|
dir = cleanPath(dir)
|
|
|
|
name = cleanPath(name)
|
|
|
|
|
|
|
|
err := b.db.View(func(tx *bolt.Tx) error {
|
|
|
|
bucket := b.getBucket(dir, false, tx)
|
|
|
|
if bucket == nil {
|
2021-11-04 11:12:57 +01:00
|
|
|
return fmt.Errorf("couldn't open parent bucket for %v", remote)
|
2017-11-12 18:54:25 +01:00
|
|
|
}
|
|
|
|
if f := bucket.Bucket([]byte(name)); f != nil {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
if f := bucket.Get([]byte(name)); f != nil {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2021-11-04 11:12:57 +01:00
|
|
|
return fmt.Errorf("couldn't find object (%v)", remote)
|
2017-11-12 18:54:25 +01:00
|
|
|
})
|
2022-06-08 22:25:17 +02:00
|
|
|
return err == nil
|
2017-11-12 18:54:25 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// HasChunk confirms the existence of a single chunk of an object
|
|
|
|
func (b *Persistent) HasChunk(cachedObject *Object, offset int64) bool {
|
|
|
|
fp := path.Join(b.dataPath, cachedObject.abs(), strconv.FormatInt(offset, 10))
|
|
|
|
if _, err := os.Stat(fp); !os.IsNotExist(err) {
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
|
|
|
// GetChunk will retrieve a single chunk which belongs to a cached object or an error if it doesn't find it
|
|
|
|
func (b *Persistent) GetChunk(cachedObject *Object, offset int64) ([]byte, error) {
|
|
|
|
var data []byte
|
|
|
|
|
|
|
|
fp := path.Join(b.dataPath, cachedObject.abs(), strconv.FormatInt(offset, 10))
|
2022-08-20 16:38:02 +02:00
|
|
|
data, err := os.ReadFile(fp)
|
2017-11-12 18:54:25 +01:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
return data, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// AddChunk adds a new chunk of a cached object
|
2017-12-09 22:54:26 +01:00
|
|
|
func (b *Persistent) AddChunk(fp string, data []byte, offset int64) error {
|
2017-11-12 18:54:25 +01:00
|
|
|
_ = os.MkdirAll(path.Join(b.dataPath, fp), os.ModePerm)
|
|
|
|
|
|
|
|
filePath := path.Join(b.dataPath, fp, strconv.FormatInt(offset, 10))
|
2022-08-20 16:38:02 +02:00
|
|
|
err := os.WriteFile(filePath, data, os.ModePerm)
|
2017-11-12 18:54:25 +01:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
return b.db.Update(func(tx *bolt.Tx) error {
|
2017-12-09 22:54:26 +01:00
|
|
|
tsBucket := tx.Bucket([]byte(DataTsBucket))
|
|
|
|
ts := time.Now()
|
|
|
|
found := false
|
|
|
|
|
|
|
|
// delete (older) timestamps for the same object
|
|
|
|
c := tsBucket.Cursor()
|
|
|
|
for k, v := c.First(); k != nil; k, v = c.Next() {
|
|
|
|
var ci chunkInfo
|
|
|
|
err = json.Unmarshal(v, &ci)
|
|
|
|
if err != nil {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
if ci.Path == fp && ci.Offset == offset {
|
|
|
|
if tsInCache := time.Unix(0, btoi(k)); tsInCache.After(ts) && !found {
|
|
|
|
found = true
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
err := c.Delete()
|
|
|
|
if err != nil {
|
|
|
|
fs.Debugf(fp, "failed to clean chunk: %v", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// don't overwrite if a newer one is already there
|
|
|
|
if found {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
enc, err := json.Marshal(chunkInfo{Path: fp, Offset: offset, Size: int64(len(data))})
|
|
|
|
if err != nil {
|
|
|
|
fs.Debugf(fp, "failed to timestamp chunk: %v", err)
|
|
|
|
}
|
|
|
|
err = tsBucket.Put(itob(ts.UnixNano()), enc)
|
|
|
|
if err != nil {
|
|
|
|
fs.Debugf(fp, "failed to timestamp chunk: %v", err)
|
|
|
|
}
|
2017-11-12 18:54:25 +01:00
|
|
|
return nil
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
// CleanChunksByAge will cleanup on a cron basis
|
|
|
|
func (b *Persistent) CleanChunksByAge(chunkAge time.Duration) {
|
2017-12-09 22:54:26 +01:00
|
|
|
// NOOP
|
|
|
|
}
|
|
|
|
|
|
|
|
// CleanChunksByNeed is a noop for this implementation
|
|
|
|
func (b *Persistent) CleanChunksByNeed(offset int64) {
|
|
|
|
// noop: we want to clean a Bolt DB by time only
|
|
|
|
}
|
|
|
|
|
|
|
|
// CleanChunksBySize will cleanup chunks after the total size passes a certain point
|
|
|
|
func (b *Persistent) CleanChunksBySize(maxSize int64) {
|
2017-11-12 18:54:25 +01:00
|
|
|
b.cleanupMux.Lock()
|
|
|
|
defer b.cleanupMux.Unlock()
|
|
|
|
var cntChunks int
|
2018-01-29 23:05:04 +01:00
|
|
|
var roughlyCleaned fs.SizeSuffix
|
2017-11-12 18:54:25 +01:00
|
|
|
|
|
|
|
err := b.db.Update(func(tx *bolt.Tx) error {
|
|
|
|
dataTsBucket := tx.Bucket([]byte(DataTsBucket))
|
|
|
|
if dataTsBucket == nil {
|
2022-06-08 22:54:39 +02:00
|
|
|
return fmt.Errorf("couldn't open (%v) bucket", DataTsBucket)
|
2017-11-12 18:54:25 +01:00
|
|
|
}
|
|
|
|
// iterate through ts
|
|
|
|
c := dataTsBucket.Cursor()
|
2017-12-09 22:54:26 +01:00
|
|
|
totalSize := int64(0)
|
|
|
|
for k, v := c.First(); k != nil; k, v = c.Next() {
|
|
|
|
var ci chunkInfo
|
|
|
|
err := json.Unmarshal(v, &ci)
|
2017-11-12 18:54:25 +01:00
|
|
|
if err != nil {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2017-12-09 22:54:26 +01:00
|
|
|
totalSize += ci.Size
|
|
|
|
}
|
|
|
|
|
|
|
|
if totalSize > maxSize {
|
|
|
|
needToClean := totalSize - maxSize
|
2018-01-29 23:05:04 +01:00
|
|
|
roughlyCleaned = fs.SizeSuffix(needToClean)
|
2017-12-09 22:54:26 +01:00
|
|
|
for k, v := c.First(); k != nil; k, v = c.Next() {
|
|
|
|
var ci chunkInfo
|
|
|
|
err := json.Unmarshal(v, &ci)
|
|
|
|
if err != nil {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
// delete this ts entry
|
|
|
|
err = c.Delete()
|
|
|
|
if err != nil {
|
|
|
|
fs.Errorf(ci.Path, "failed deleting chunk ts during cleanup (%v): %v", ci.Offset, err)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
err = os.Remove(path.Join(b.dataPath, ci.Path, strconv.FormatInt(ci.Offset, 10)))
|
|
|
|
if err == nil {
|
|
|
|
cntChunks++
|
|
|
|
needToClean -= ci.Size
|
|
|
|
if needToClean <= 0 {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
2017-11-12 18:54:25 +01:00
|
|
|
}
|
|
|
|
}
|
2018-01-29 23:05:04 +01:00
|
|
|
if cntChunks > 0 {
|
|
|
|
fs.Infof("cache-cleanup", "chunks %v, est. size: %v", cntChunks, roughlyCleaned.String())
|
|
|
|
|
|
|
|
}
|
2017-11-12 18:54:25 +01:00
|
|
|
return nil
|
|
|
|
})
|
|
|
|
|
|
|
|
if err != nil {
|
2017-12-19 14:37:38 +01:00
|
|
|
if err == bolt.ErrDatabaseNotOpen {
|
|
|
|
// we're likely a late janitor and we need to end quietly as there's no guarantee of what exists anymore
|
|
|
|
return
|
|
|
|
}
|
2017-11-12 18:54:25 +01:00
|
|
|
fs.Errorf("cache", "cleanup failed: %v", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Stats returns a go map with the stats key values
|
|
|
|
func (b *Persistent) Stats() (map[string]map[string]interface{}, error) {
|
|
|
|
r := make(map[string]map[string]interface{})
|
|
|
|
r["data"] = make(map[string]interface{})
|
|
|
|
r["data"]["oldest-ts"] = time.Now()
|
|
|
|
r["data"]["oldest-file"] = ""
|
|
|
|
r["data"]["newest-ts"] = time.Now()
|
|
|
|
r["data"]["newest-file"] = ""
|
|
|
|
r["data"]["total-chunks"] = 0
|
2017-12-09 22:54:26 +01:00
|
|
|
r["data"]["total-size"] = int64(0)
|
2017-11-12 18:54:25 +01:00
|
|
|
r["files"] = make(map[string]interface{})
|
|
|
|
r["files"]["oldest-ts"] = time.Now()
|
|
|
|
r["files"]["oldest-name"] = ""
|
|
|
|
r["files"]["newest-ts"] = time.Now()
|
|
|
|
r["files"]["newest-name"] = ""
|
|
|
|
r["files"]["total-files"] = 0
|
|
|
|
|
|
|
|
_ = b.db.View(func(tx *bolt.Tx) error {
|
|
|
|
dataTsBucket := tx.Bucket([]byte(DataTsBucket))
|
|
|
|
rootTsBucket := tx.Bucket([]byte(RootTsBucket))
|
|
|
|
|
|
|
|
var totalDirs int
|
|
|
|
var totalFiles int
|
|
|
|
_ = b.iterateBuckets(tx.Bucket([]byte(RootBucket)), func(name string) {
|
|
|
|
totalDirs++
|
|
|
|
}, func(key string, val []byte) {
|
|
|
|
totalFiles++
|
|
|
|
})
|
|
|
|
r["files"]["total-dir"] = totalDirs
|
|
|
|
r["files"]["total-files"] = totalFiles
|
|
|
|
|
|
|
|
c := dataTsBucket.Cursor()
|
2017-12-09 22:54:26 +01:00
|
|
|
|
|
|
|
totalChunks := 0
|
|
|
|
totalSize := int64(0)
|
|
|
|
for k, v := c.First(); k != nil; k, v = c.Next() {
|
|
|
|
var ci chunkInfo
|
|
|
|
err := json.Unmarshal(v, &ci)
|
|
|
|
if err != nil {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
totalChunks++
|
|
|
|
totalSize += ci.Size
|
|
|
|
}
|
|
|
|
r["data"]["total-chunks"] = totalChunks
|
|
|
|
r["data"]["total-size"] = totalSize
|
|
|
|
|
2017-11-12 18:54:25 +01:00
|
|
|
if k, v := c.First(); k != nil {
|
2017-12-09 22:54:26 +01:00
|
|
|
var ci chunkInfo
|
|
|
|
_ = json.Unmarshal(v, &ci)
|
2017-11-12 18:54:25 +01:00
|
|
|
r["data"]["oldest-ts"] = time.Unix(0, btoi(k))
|
2017-12-09 22:54:26 +01:00
|
|
|
r["data"]["oldest-file"] = ci.Path
|
2017-11-12 18:54:25 +01:00
|
|
|
}
|
|
|
|
if k, v := c.Last(); k != nil {
|
2017-12-09 22:54:26 +01:00
|
|
|
var ci chunkInfo
|
|
|
|
_ = json.Unmarshal(v, &ci)
|
2017-11-12 18:54:25 +01:00
|
|
|
r["data"]["newest-ts"] = time.Unix(0, btoi(k))
|
2017-12-09 22:54:26 +01:00
|
|
|
r["data"]["newest-file"] = ci.Path
|
2017-11-12 18:54:25 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
c = rootTsBucket.Cursor()
|
|
|
|
if k, v := c.First(); k != nil {
|
|
|
|
// split to get (abs path - offset)
|
|
|
|
r["files"]["oldest-ts"] = time.Unix(0, btoi(k))
|
|
|
|
r["files"]["oldest-name"] = string(v)
|
|
|
|
}
|
|
|
|
if k, v := c.Last(); k != nil {
|
|
|
|
r["files"]["newest-ts"] = time.Unix(0, btoi(k))
|
|
|
|
r["files"]["newest-name"] = string(v)
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
})
|
|
|
|
|
|
|
|
return r, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Purge will flush the entire cache
|
|
|
|
func (b *Persistent) Purge() {
|
2017-12-18 13:55:37 +01:00
|
|
|
b.cleanupMux.Lock()
|
|
|
|
defer b.cleanupMux.Unlock()
|
|
|
|
|
2017-11-12 18:54:25 +01:00
|
|
|
_ = b.db.Update(func(tx *bolt.Tx) error {
|
|
|
|
_ = tx.DeleteBucket([]byte(RootBucket))
|
|
|
|
_ = tx.DeleteBucket([]byte(RootTsBucket))
|
|
|
|
_ = tx.DeleteBucket([]byte(DataTsBucket))
|
|
|
|
|
|
|
|
_, _ = tx.CreateBucketIfNotExists([]byte(RootBucket))
|
|
|
|
_, _ = tx.CreateBucketIfNotExists([]byte(RootTsBucket))
|
|
|
|
_, _ = tx.CreateBucketIfNotExists([]byte(DataTsBucket))
|
|
|
|
|
|
|
|
return nil
|
|
|
|
})
|
|
|
|
|
|
|
|
err := os.RemoveAll(b.dataPath)
|
|
|
|
if err != nil {
|
|
|
|
fs.Errorf(b, "issue removing data folder: %v", err)
|
|
|
|
}
|
|
|
|
err = os.MkdirAll(b.dataPath, os.ModePerm)
|
|
|
|
if err != nil {
|
|
|
|
fs.Errorf(b, "issue removing data folder: %v", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// GetChunkTs retrieves the current timestamp of this chunk
|
|
|
|
func (b *Persistent) GetChunkTs(path string, offset int64) (time.Time, error) {
|
|
|
|
var t time.Time
|
|
|
|
|
|
|
|
err := b.db.View(func(tx *bolt.Tx) error {
|
|
|
|
tsBucket := tx.Bucket([]byte(DataTsBucket))
|
|
|
|
c := tsBucket.Cursor()
|
|
|
|
for k, v := c.First(); k != nil; k, v = c.Next() {
|
2017-12-09 22:54:26 +01:00
|
|
|
var ci chunkInfo
|
|
|
|
err := json.Unmarshal(v, &ci)
|
|
|
|
if err != nil {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
if ci.Path == path && ci.Offset == offset {
|
2017-11-12 18:54:25 +01:00
|
|
|
t = time.Unix(0, btoi(k))
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
}
|
2021-11-04 11:12:57 +01:00
|
|
|
return fmt.Errorf("not found %v-%v", path, offset)
|
2017-11-12 18:54:25 +01:00
|
|
|
})
|
|
|
|
|
|
|
|
return t, err
|
|
|
|
}
|
|
|
|
|
|
|
|
func (b *Persistent) iterateBuckets(buk *bolt.Bucket, bucketFn func(name string), kvFn func(key string, val []byte)) error {
|
|
|
|
err := b.db.View(func(tx *bolt.Tx) error {
|
|
|
|
var c *bolt.Cursor
|
|
|
|
if buk == nil {
|
|
|
|
c = tx.Cursor()
|
|
|
|
} else {
|
|
|
|
c = buk.Cursor()
|
|
|
|
}
|
|
|
|
for k, v := c.First(); k != nil; k, v = c.Next() {
|
|
|
|
if v == nil {
|
|
|
|
var buk2 *bolt.Bucket
|
|
|
|
if buk == nil {
|
|
|
|
buk2 = tx.Bucket(k)
|
|
|
|
} else {
|
|
|
|
buk2 = buk.Bucket(k)
|
|
|
|
}
|
|
|
|
|
|
|
|
bucketFn(string(k))
|
|
|
|
_ = b.iterateBuckets(buk2, bucketFn, kvFn)
|
|
|
|
} else {
|
|
|
|
kvFn(string(k), v)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
})
|
|
|
|
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2018-01-29 23:05:04 +01:00
|
|
|
// addPendingUpload adds a new file to the pending queue of uploads
|
|
|
|
func (b *Persistent) addPendingUpload(destPath string, started bool) error {
|
|
|
|
return b.db.Update(func(tx *bolt.Tx) error {
|
|
|
|
bucket, err := tx.CreateBucketIfNotExists([]byte(tempBucket))
|
|
|
|
if err != nil {
|
2021-11-04 11:12:57 +01:00
|
|
|
return fmt.Errorf("couldn't bucket for %v", tempBucket)
|
2018-01-29 23:05:04 +01:00
|
|
|
}
|
|
|
|
tempObj := &tempUploadInfo{
|
|
|
|
DestPath: destPath,
|
|
|
|
AddedOn: time.Now(),
|
|
|
|
Started: started,
|
|
|
|
}
|
|
|
|
|
|
|
|
// cache Object Info
|
|
|
|
encoded, err := json.Marshal(tempObj)
|
|
|
|
if err != nil {
|
2021-11-04 11:12:57 +01:00
|
|
|
return fmt.Errorf("couldn't marshal object (%v) info: %v", destPath, err)
|
2018-01-29 23:05:04 +01:00
|
|
|
}
|
2019-01-11 18:17:46 +01:00
|
|
|
err = bucket.Put([]byte(destPath), encoded)
|
2018-01-29 23:05:04 +01:00
|
|
|
if err != nil {
|
2021-11-04 11:12:57 +01:00
|
|
|
return fmt.Errorf("couldn't cache object (%v) info: %v", destPath, err)
|
2018-01-29 23:05:04 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
// getPendingUpload returns the next file from the pending queue of uploads
|
|
|
|
func (b *Persistent) getPendingUpload(inRoot string, waitTime time.Duration) (destPath string, err error) {
|
|
|
|
b.tempQueueMux.Lock()
|
|
|
|
defer b.tempQueueMux.Unlock()
|
|
|
|
|
|
|
|
err = b.db.Update(func(tx *bolt.Tx) error {
|
|
|
|
bucket, err := tx.CreateBucketIfNotExists([]byte(tempBucket))
|
|
|
|
if err != nil {
|
2021-11-04 11:12:57 +01:00
|
|
|
return fmt.Errorf("couldn't bucket for %v", tempBucket)
|
2018-01-29 23:05:04 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
c := bucket.Cursor()
|
|
|
|
for k, v := c.Seek([]byte(inRoot)); k != nil && bytes.HasPrefix(k, []byte(inRoot)); k, v = c.Next() {
|
|
|
|
//for k, v := c.First(); k != nil; k, v = c.Next() {
|
|
|
|
var tempObj = &tempUploadInfo{}
|
|
|
|
err = json.Unmarshal(v, tempObj)
|
|
|
|
if err != nil {
|
|
|
|
fs.Errorf(b, "failed to read pending upload: %v", err)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
// skip over started uploads
|
|
|
|
if tempObj.Started || time.Now().Before(tempObj.AddedOn.Add(waitTime)) {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
tempObj.Started = true
|
|
|
|
v2, err := json.Marshal(tempObj)
|
|
|
|
if err != nil {
|
|
|
|
fs.Errorf(b, "failed to update pending upload: %v", err)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
err = bucket.Put(k, v2)
|
|
|
|
if err != nil {
|
|
|
|
fs.Errorf(b, "failed to update pending upload: %v", err)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
destPath = tempObj.DestPath
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2021-11-04 11:12:57 +01:00
|
|
|
return fmt.Errorf("no pending upload found")
|
2018-01-29 23:05:04 +01:00
|
|
|
})
|
|
|
|
|
|
|
|
return destPath, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// SearchPendingUpload returns the file info from the pending queue of uploads
|
|
|
|
func (b *Persistent) SearchPendingUpload(remote string) (started bool, err error) {
|
|
|
|
err = b.db.View(func(tx *bolt.Tx) error {
|
|
|
|
bucket := tx.Bucket([]byte(tempBucket))
|
|
|
|
if bucket == nil {
|
2021-11-04 11:12:57 +01:00
|
|
|
return fmt.Errorf("couldn't bucket for %v", tempBucket)
|
2018-01-29 23:05:04 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
var tempObj = &tempUploadInfo{}
|
|
|
|
v := bucket.Get([]byte(remote))
|
|
|
|
err = json.Unmarshal(v, tempObj)
|
|
|
|
if err != nil {
|
2021-11-04 11:12:57 +01:00
|
|
|
return fmt.Errorf("pending upload (%v) not found %v", remote, err)
|
2018-01-29 23:05:04 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
started = tempObj.Started
|
|
|
|
return nil
|
|
|
|
})
|
|
|
|
|
|
|
|
return started, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// searchPendingUploadFromDir files currently pending upload from a single dir
|
|
|
|
func (b *Persistent) searchPendingUploadFromDir(dir string) (remotes []string, err error) {
|
|
|
|
err = b.db.View(func(tx *bolt.Tx) error {
|
|
|
|
bucket := tx.Bucket([]byte(tempBucket))
|
|
|
|
if bucket == nil {
|
2021-11-04 11:12:57 +01:00
|
|
|
return fmt.Errorf("couldn't bucket for %v", tempBucket)
|
2018-01-29 23:05:04 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
c := bucket.Cursor()
|
|
|
|
for k, v := c.First(); k != nil; k, v = c.Next() {
|
|
|
|
var tempObj = &tempUploadInfo{}
|
|
|
|
err = json.Unmarshal(v, tempObj)
|
|
|
|
if err != nil {
|
|
|
|
fs.Errorf(b, "failed to read pending upload: %v", err)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
parentDir := cleanPath(path.Dir(tempObj.DestPath))
|
|
|
|
if dir == parentDir {
|
|
|
|
remotes = append(remotes, tempObj.DestPath)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
})
|
|
|
|
|
|
|
|
return remotes, err
|
|
|
|
}
|
|
|
|
|
|
|
|
func (b *Persistent) rollbackPendingUpload(remote string) error {
|
|
|
|
b.tempQueueMux.Lock()
|
|
|
|
defer b.tempQueueMux.Unlock()
|
|
|
|
|
|
|
|
return b.db.Update(func(tx *bolt.Tx) error {
|
|
|
|
bucket, err := tx.CreateBucketIfNotExists([]byte(tempBucket))
|
|
|
|
if err != nil {
|
2021-11-04 11:12:57 +01:00
|
|
|
return fmt.Errorf("couldn't bucket for %v", tempBucket)
|
2018-01-29 23:05:04 +01:00
|
|
|
}
|
|
|
|
var tempObj = &tempUploadInfo{}
|
|
|
|
v := bucket.Get([]byte(remote))
|
|
|
|
err = json.Unmarshal(v, tempObj)
|
|
|
|
if err != nil {
|
2022-06-08 22:54:39 +02:00
|
|
|
return fmt.Errorf("pending upload (%v) not found: %w", remote, err)
|
2018-01-29 23:05:04 +01:00
|
|
|
}
|
|
|
|
tempObj.Started = false
|
|
|
|
v2, err := json.Marshal(tempObj)
|
|
|
|
if err != nil {
|
2022-06-08 22:54:39 +02:00
|
|
|
return fmt.Errorf("pending upload not updated: %w", err)
|
2018-01-29 23:05:04 +01:00
|
|
|
}
|
|
|
|
err = bucket.Put([]byte(tempObj.DestPath), v2)
|
|
|
|
if err != nil {
|
2022-06-08 22:54:39 +02:00
|
|
|
return fmt.Errorf("pending upload not updated: %w", err)
|
2018-01-29 23:05:04 +01:00
|
|
|
}
|
|
|
|
return nil
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
func (b *Persistent) removePendingUpload(remote string) error {
|
|
|
|
b.tempQueueMux.Lock()
|
|
|
|
defer b.tempQueueMux.Unlock()
|
|
|
|
|
|
|
|
return b.db.Update(func(tx *bolt.Tx) error {
|
|
|
|
bucket, err := tx.CreateBucketIfNotExists([]byte(tempBucket))
|
|
|
|
if err != nil {
|
2021-11-04 11:12:57 +01:00
|
|
|
return fmt.Errorf("couldn't bucket for %v", tempBucket)
|
2018-01-29 23:05:04 +01:00
|
|
|
}
|
|
|
|
return bucket.Delete([]byte(remote))
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
// updatePendingUpload allows to update an existing item in the queue while checking if it's not started in the same
|
|
|
|
// transaction. If it is started, it will not allow the update
|
|
|
|
func (b *Persistent) updatePendingUpload(remote string, fn func(item *tempUploadInfo) error) error {
|
|
|
|
b.tempQueueMux.Lock()
|
|
|
|
defer b.tempQueueMux.Unlock()
|
|
|
|
|
|
|
|
return b.db.Update(func(tx *bolt.Tx) error {
|
|
|
|
bucket, err := tx.CreateBucketIfNotExists([]byte(tempBucket))
|
|
|
|
if err != nil {
|
2021-11-04 11:12:57 +01:00
|
|
|
return fmt.Errorf("couldn't bucket for %v", tempBucket)
|
2018-01-29 23:05:04 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
var tempObj = &tempUploadInfo{}
|
|
|
|
v := bucket.Get([]byte(remote))
|
|
|
|
err = json.Unmarshal(v, tempObj)
|
|
|
|
if err != nil {
|
2021-11-04 11:12:57 +01:00
|
|
|
return fmt.Errorf("pending upload (%v) not found %v", remote, err)
|
2018-01-29 23:05:04 +01:00
|
|
|
}
|
|
|
|
if tempObj.Started {
|
2021-11-04 11:12:57 +01:00
|
|
|
return fmt.Errorf("pending upload already started %v", remote)
|
2018-01-29 23:05:04 +01:00
|
|
|
}
|
|
|
|
err = fn(tempObj)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if remote != tempObj.DestPath {
|
|
|
|
err := bucket.Delete([]byte(remote))
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
// if this is removed then the entry can be removed too
|
|
|
|
if tempObj.DestPath == "" {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
v2, err := json.Marshal(tempObj)
|
|
|
|
if err != nil {
|
2022-06-08 22:54:39 +02:00
|
|
|
return fmt.Errorf("pending upload not updated: %w", err)
|
2018-01-29 23:05:04 +01:00
|
|
|
}
|
|
|
|
err = bucket.Put([]byte(tempObj.DestPath), v2)
|
|
|
|
if err != nil {
|
2022-06-08 22:54:39 +02:00
|
|
|
return fmt.Errorf("pending upload not updated: %w", err)
|
2018-01-29 23:05:04 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
// ReconcileTempUploads will recursively look for all the files in the temp directory and add them to the queue
|
2019-06-17 10:34:30 +02:00
|
|
|
func (b *Persistent) ReconcileTempUploads(ctx context.Context, cacheFs *Fs) error {
|
2018-01-29 23:05:04 +01:00
|
|
|
return b.db.Update(func(tx *bolt.Tx) error {
|
|
|
|
_ = tx.DeleteBucket([]byte(tempBucket))
|
|
|
|
bucket, err := tx.CreateBucketIfNotExists([]byte(tempBucket))
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
var queuedEntries []fs.Object
|
2019-06-17 10:34:30 +02:00
|
|
|
err = walk.ListR(ctx, cacheFs.tempFs, "", true, -1, walk.ListObjects, func(entries fs.DirEntries) error {
|
2018-01-29 23:05:04 +01:00
|
|
|
for _, o := range entries {
|
|
|
|
if oo, ok := o.(fs.Object); ok {
|
|
|
|
queuedEntries = append(queuedEntries, oo)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
fs.Debugf(cacheFs, "reconciling temporary uploads")
|
|
|
|
for _, queuedEntry := range queuedEntries {
|
|
|
|
destPath := path.Join(cacheFs.Root(), queuedEntry.Remote())
|
|
|
|
tempObj := &tempUploadInfo{
|
|
|
|
DestPath: destPath,
|
|
|
|
AddedOn: time.Now(),
|
|
|
|
Started: false,
|
|
|
|
}
|
|
|
|
|
|
|
|
// cache Object Info
|
|
|
|
encoded, err := json.Marshal(tempObj)
|
|
|
|
if err != nil {
|
2021-11-04 11:12:57 +01:00
|
|
|
return fmt.Errorf("couldn't marshal object (%v) info: %v", queuedEntry, err)
|
2018-01-29 23:05:04 +01:00
|
|
|
}
|
2019-01-11 18:17:46 +01:00
|
|
|
err = bucket.Put([]byte(destPath), encoded)
|
2018-01-29 23:05:04 +01:00
|
|
|
if err != nil {
|
2021-11-04 11:12:57 +01:00
|
|
|
return fmt.Errorf("couldn't cache object (%v) info: %v", destPath, err)
|
2018-01-29 23:05:04 +01:00
|
|
|
}
|
|
|
|
fs.Debugf(cacheFs, "reconciled temporary upload: %v", destPath)
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2017-11-22 17:32:36 +01:00
|
|
|
// Close should be called when the program ends gracefully
|
|
|
|
func (b *Persistent) Close() {
|
2017-12-18 13:55:37 +01:00
|
|
|
b.cleanupMux.Lock()
|
|
|
|
defer b.cleanupMux.Unlock()
|
|
|
|
|
2017-11-22 17:32:36 +01:00
|
|
|
err := b.db.Close()
|
|
|
|
if err != nil {
|
|
|
|
fs.Errorf(b, "closing handle: %v", err)
|
|
|
|
}
|
2018-01-29 23:05:04 +01:00
|
|
|
b.open = false
|
2017-11-22 17:32:36 +01:00
|
|
|
}
|
|
|
|
|
2017-11-12 18:54:25 +01:00
|
|
|
// itob returns an 8-byte big endian representation of v.
|
|
|
|
func itob(v int64) []byte {
|
|
|
|
b := make([]byte, 8)
|
|
|
|
binary.BigEndian.PutUint64(b, uint64(v))
|
|
|
|
return b
|
|
|
|
}
|
|
|
|
|
|
|
|
func btoi(d []byte) int64 {
|
|
|
|
return int64(binary.BigEndian.Uint64(d))
|
|
|
|
}
|