2017-11-06 22:38:52 +01:00
|
|
|
// This deals with caching of files locally
|
|
|
|
|
|
|
|
package vfs
|
|
|
|
|
|
|
|
import (
|
2018-04-06 20:13:27 +02:00
|
|
|
"context"
|
2017-11-06 22:38:52 +01:00
|
|
|
"fmt"
|
|
|
|
"os"
|
|
|
|
"path"
|
|
|
|
"path/filepath"
|
|
|
|
"runtime"
|
2017-11-28 15:18:48 +01:00
|
|
|
"sort"
|
2017-11-06 22:38:52 +01:00
|
|
|
"strings"
|
|
|
|
"sync"
|
|
|
|
"time"
|
|
|
|
|
|
|
|
"github.com/djherbis/times"
|
|
|
|
"github.com/pkg/errors"
|
2019-07-28 19:47:38 +02:00
|
|
|
"github.com/rclone/rclone/fs"
|
|
|
|
fscache "github.com/rclone/rclone/fs/cache"
|
|
|
|
"github.com/rclone/rclone/fs/config"
|
2017-11-06 22:38:52 +01:00
|
|
|
)
|
|
|
|
|
|
|
|
// CacheMode controls the functionality of the cache
|
|
|
|
type CacheMode byte
|
|
|
|
|
|
|
|
// CacheMode options
|
|
|
|
const (
|
|
|
|
CacheModeOff CacheMode = iota // cache nothing - return errors for writes which can't be satisfied
|
|
|
|
CacheModeMinimal // cache only the minimum, eg read/write opens
|
|
|
|
CacheModeWrites // cache all files opened with write intent
|
|
|
|
CacheModeFull // cache all files opened in any mode
|
|
|
|
)
|
|
|
|
|
|
|
|
var cacheModeToString = []string{
|
|
|
|
CacheModeOff: "off",
|
|
|
|
CacheModeMinimal: "minimal",
|
|
|
|
CacheModeWrites: "writes",
|
|
|
|
CacheModeFull: "full",
|
|
|
|
}
|
|
|
|
|
|
|
|
// String turns a CacheMode into a string
|
|
|
|
func (l CacheMode) String() string {
|
|
|
|
if l >= CacheMode(len(cacheModeToString)) {
|
|
|
|
return fmt.Sprintf("CacheMode(%d)", l)
|
|
|
|
}
|
|
|
|
return cacheModeToString[l]
|
|
|
|
}
|
|
|
|
|
|
|
|
// Set a CacheMode
|
|
|
|
func (l *CacheMode) Set(s string) error {
|
|
|
|
for n, name := range cacheModeToString {
|
|
|
|
if s != "" && name == s {
|
|
|
|
*l = CacheMode(n)
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return errors.Errorf("Unknown cache mode level %q", s)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Type of the value
|
|
|
|
func (l *CacheMode) Type() string {
|
2019-02-07 12:57:26 +01:00
|
|
|
return "CacheMode"
|
2017-11-06 22:38:52 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// cache opened files
|
|
|
|
type cache struct {
|
|
|
|
f fs.Fs // fs for the cache directory
|
|
|
|
opt *Options // vfs Options
|
|
|
|
root string // root of the cache directory
|
2019-02-02 00:35:03 +01:00
|
|
|
itemMu sync.Mutex // protects the following variables
|
2017-11-28 15:18:48 +01:00
|
|
|
item map[string]*cacheItem // files/directories in the cache
|
2019-02-02 00:35:03 +01:00
|
|
|
used int64 // total size of files in the cache
|
2017-11-06 22:38:52 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// cacheItem is stored in the item map
|
|
|
|
type cacheItem struct {
|
2017-11-28 15:18:48 +01:00
|
|
|
opens int // number of times file is open
|
|
|
|
atime time.Time // last time file was accessed
|
|
|
|
isFile bool // if this is a file or a directory
|
2019-02-02 00:35:03 +01:00
|
|
|
size int64 // size of the cached item
|
2017-11-06 22:38:52 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// newCacheItem returns an item for the cache
|
2017-11-28 15:18:48 +01:00
|
|
|
func newCacheItem(isFile bool) *cacheItem {
|
|
|
|
return &cacheItem{atime: time.Now(), isFile: isFile}
|
2017-11-06 22:38:52 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// newCache creates a new cache heirachy for f
|
|
|
|
//
|
|
|
|
// This starts background goroutines which can be cancelled with the
|
|
|
|
// context passed in.
|
|
|
|
func newCache(ctx context.Context, f fs.Fs, opt *Options) (*cache, error) {
|
|
|
|
fRoot := filepath.FromSlash(f.Root())
|
|
|
|
if runtime.GOOS == "windows" {
|
|
|
|
if strings.HasPrefix(fRoot, `\\?`) {
|
|
|
|
fRoot = fRoot[3:]
|
|
|
|
}
|
|
|
|
fRoot = strings.Replace(fRoot, ":", "", -1)
|
|
|
|
}
|
2018-01-12 17:30:54 +01:00
|
|
|
root := filepath.Join(config.CacheDir, "vfs", f.Name(), fRoot)
|
2017-11-06 22:38:52 +01:00
|
|
|
fs.Debugf(nil, "vfs cache root is %q", root)
|
|
|
|
|
2019-05-23 14:12:09 +02:00
|
|
|
f, err := fscache.Get(root)
|
2017-11-06 22:38:52 +01:00
|
|
|
if err != nil {
|
|
|
|
return nil, errors.Wrap(err, "failed to create cache remote")
|
|
|
|
}
|
|
|
|
|
|
|
|
c := &cache{
|
|
|
|
f: f,
|
|
|
|
opt: opt,
|
|
|
|
root: root,
|
|
|
|
item: make(map[string]*cacheItem),
|
|
|
|
}
|
|
|
|
|
|
|
|
go c.cleaner(ctx)
|
|
|
|
|
|
|
|
return c, nil
|
|
|
|
}
|
|
|
|
|
2017-11-28 15:18:48 +01:00
|
|
|
// findParent returns the parent directory of name, or "" for the root
|
|
|
|
func findParent(name string) string {
|
2017-11-06 22:38:52 +01:00
|
|
|
parent := path.Dir(name)
|
2017-11-28 15:18:48 +01:00
|
|
|
if parent == "." || parent == "/" {
|
2017-11-06 22:38:52 +01:00
|
|
|
parent = ""
|
|
|
|
}
|
2017-11-28 15:18:48 +01:00
|
|
|
return parent
|
|
|
|
}
|
|
|
|
|
2018-02-26 17:59:14 +01:00
|
|
|
// clean returns the cleaned version of name for use in the index map
|
|
|
|
func clean(name string) string {
|
|
|
|
name = strings.Trim(name, "/")
|
|
|
|
name = path.Clean(name)
|
|
|
|
if name == "." || name == "/" {
|
|
|
|
name = ""
|
|
|
|
}
|
|
|
|
return name
|
|
|
|
}
|
|
|
|
|
2017-11-28 15:18:48 +01:00
|
|
|
// toOSPath turns a remote relative name into an OS path in the cache
|
|
|
|
func (c *cache) toOSPath(name string) string {
|
|
|
|
return filepath.Join(c.root, filepath.FromSlash(name))
|
|
|
|
}
|
|
|
|
|
|
|
|
// mkdir makes the directory for name in the cache and returns an os
|
|
|
|
// path for the file
|
|
|
|
func (c *cache) mkdir(name string) (string, error) {
|
|
|
|
parent := findParent(name)
|
2017-11-06 22:38:52 +01:00
|
|
|
leaf := path.Base(name)
|
2017-11-28 15:18:48 +01:00
|
|
|
parentPath := c.toOSPath(parent)
|
2017-11-06 22:38:52 +01:00
|
|
|
err := os.MkdirAll(parentPath, 0700)
|
|
|
|
if err != nil {
|
|
|
|
return "", errors.Wrap(err, "make cache directory failed")
|
|
|
|
}
|
2017-11-28 15:18:48 +01:00
|
|
|
c.cacheDir(parent)
|
2017-11-06 22:38:52 +01:00
|
|
|
return filepath.Join(parentPath, leaf), nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// _get gets name from the cache or creates a new one
|
|
|
|
//
|
2018-02-02 13:06:42 +01:00
|
|
|
// It returns the item and found as to whether this item was found in
|
|
|
|
// the cache (or just created).
|
|
|
|
//
|
2017-11-28 15:18:48 +01:00
|
|
|
// name should be a remote path not an osPath
|
|
|
|
//
|
2017-11-06 22:38:52 +01:00
|
|
|
// must be called with itemMu held
|
2018-02-02 13:06:42 +01:00
|
|
|
func (c *cache) _get(isFile bool, name string) (item *cacheItem, found bool) {
|
|
|
|
item = c.item[name]
|
|
|
|
found = item != nil
|
|
|
|
if !found {
|
2017-11-28 15:18:48 +01:00
|
|
|
item = newCacheItem(isFile)
|
2017-11-06 22:38:52 +01:00
|
|
|
c.item[name] = item
|
|
|
|
}
|
2018-02-02 13:06:42 +01:00
|
|
|
return item, found
|
2017-11-06 22:38:52 +01:00
|
|
|
}
|
|
|
|
|
2018-02-26 17:58:02 +01:00
|
|
|
// opens returns the number of opens that are on the file
|
|
|
|
//
|
|
|
|
// name should be a remote path not an osPath
|
|
|
|
func (c *cache) opens(name string) int {
|
|
|
|
name = clean(name)
|
|
|
|
c.itemMu.Lock()
|
|
|
|
defer c.itemMu.Unlock()
|
|
|
|
item := c.item[name]
|
|
|
|
if item == nil {
|
|
|
|
return 0
|
|
|
|
}
|
|
|
|
return item.opens
|
|
|
|
}
|
|
|
|
|
2017-11-06 22:38:52 +01:00
|
|
|
// get gets name from the cache or creates a new one
|
2017-11-28 15:18:48 +01:00
|
|
|
//
|
|
|
|
// name should be a remote path not an osPath
|
2017-11-06 22:38:52 +01:00
|
|
|
func (c *cache) get(name string) *cacheItem {
|
2018-02-26 17:59:14 +01:00
|
|
|
name = clean(name)
|
2017-11-06 22:38:52 +01:00
|
|
|
c.itemMu.Lock()
|
2018-02-02 13:06:42 +01:00
|
|
|
item, _ := c._get(true, name)
|
2017-11-06 22:38:52 +01:00
|
|
|
c.itemMu.Unlock()
|
|
|
|
return item
|
|
|
|
}
|
|
|
|
|
2019-02-02 00:35:03 +01:00
|
|
|
// updateStat sets the atime of the name to that passed in if it is
|
2017-11-06 22:38:52 +01:00
|
|
|
// newer than the existing or there isn't an existing time.
|
2017-11-28 15:18:48 +01:00
|
|
|
//
|
2019-02-02 00:35:03 +01:00
|
|
|
// it also sets the size
|
|
|
|
//
|
2017-11-28 15:18:48 +01:00
|
|
|
// name should be a remote path not an osPath
|
2019-02-02 00:35:03 +01:00
|
|
|
func (c *cache) updateStat(name string, when time.Time, size int64) {
|
2018-02-26 17:59:14 +01:00
|
|
|
name = clean(name)
|
2017-11-06 22:38:52 +01:00
|
|
|
c.itemMu.Lock()
|
2018-02-02 13:06:42 +01:00
|
|
|
item, found := c._get(true, name)
|
|
|
|
if !found || when.Sub(item.atime) > 0 {
|
2017-11-06 22:38:52 +01:00
|
|
|
fs.Debugf(name, "updateTime: setting atime to %v", when)
|
|
|
|
item.atime = when
|
|
|
|
}
|
2019-02-02 00:35:03 +01:00
|
|
|
item.size = size
|
2017-11-06 22:38:52 +01:00
|
|
|
c.itemMu.Unlock()
|
|
|
|
}
|
|
|
|
|
2017-11-28 15:18:48 +01:00
|
|
|
// _open marks name as open, must be called with the lock held
|
|
|
|
//
|
|
|
|
// name should be a remote path not an osPath
|
|
|
|
func (c *cache) _open(isFile bool, name string) {
|
|
|
|
for {
|
2018-02-02 13:06:42 +01:00
|
|
|
item, _ := c._get(isFile, name)
|
2017-11-28 15:18:48 +01:00
|
|
|
item.opens++
|
|
|
|
item.atime = time.Now()
|
|
|
|
if name == "" {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
isFile = false
|
|
|
|
name = findParent(name)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-11-06 22:38:52 +01:00
|
|
|
// open marks name as open
|
2017-11-28 15:18:48 +01:00
|
|
|
//
|
|
|
|
// name should be a remote path not an osPath
|
2017-11-06 22:38:52 +01:00
|
|
|
func (c *cache) open(name string) {
|
2018-02-26 17:59:14 +01:00
|
|
|
name = clean(name)
|
2017-11-06 22:38:52 +01:00
|
|
|
c.itemMu.Lock()
|
2017-11-28 15:18:48 +01:00
|
|
|
c._open(true, name)
|
2017-11-06 22:38:52 +01:00
|
|
|
c.itemMu.Unlock()
|
|
|
|
}
|
|
|
|
|
2017-11-28 15:18:48 +01:00
|
|
|
// cacheDir marks a directory and its parents as being in the cache
|
|
|
|
//
|
|
|
|
// name should be a remote path not an osPath
|
|
|
|
func (c *cache) cacheDir(name string) {
|
2018-02-26 17:59:14 +01:00
|
|
|
name = clean(name)
|
2017-11-28 15:18:48 +01:00
|
|
|
c.itemMu.Lock()
|
|
|
|
defer c.itemMu.Unlock()
|
|
|
|
for {
|
|
|
|
item := c.item[name]
|
|
|
|
if item != nil {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
c.item[name] = newCacheItem(false)
|
|
|
|
if name == "" {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
name = findParent(name)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// _close marks name as closed - must be called with the lock held
|
|
|
|
func (c *cache) _close(isFile bool, name string) {
|
|
|
|
for {
|
2018-02-02 13:06:42 +01:00
|
|
|
item, _ := c._get(isFile, name)
|
2017-11-28 15:18:48 +01:00
|
|
|
item.opens--
|
|
|
|
item.atime = time.Now()
|
|
|
|
if item.opens < 0 {
|
|
|
|
fs.Errorf(name, "cache: double close")
|
|
|
|
}
|
2019-02-02 00:35:03 +01:00
|
|
|
osPath := c.toOSPath(name)
|
|
|
|
fi, err := os.Stat(osPath)
|
|
|
|
// Update the size on close
|
|
|
|
if err == nil && !fi.IsDir() {
|
|
|
|
item.size = fi.Size()
|
|
|
|
}
|
2017-11-28 15:18:48 +01:00
|
|
|
if name == "" {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
isFile = false
|
|
|
|
name = findParent(name)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-11-06 22:38:52 +01:00
|
|
|
// close marks name as closed
|
2017-11-28 15:18:48 +01:00
|
|
|
//
|
|
|
|
// name should be a remote path not an osPath
|
2017-11-06 22:38:52 +01:00
|
|
|
func (c *cache) close(name string) {
|
2018-02-26 17:59:14 +01:00
|
|
|
name = clean(name)
|
2017-11-06 22:38:52 +01:00
|
|
|
c.itemMu.Lock()
|
2017-11-28 15:18:48 +01:00
|
|
|
c._close(true, name)
|
2017-11-06 22:38:52 +01:00
|
|
|
c.itemMu.Unlock()
|
|
|
|
}
|
|
|
|
|
2017-11-27 20:48:25 +01:00
|
|
|
// remove should be called if name is deleted
|
|
|
|
func (c *cache) remove(name string) {
|
2017-11-28 15:18:48 +01:00
|
|
|
osPath := c.toOSPath(name)
|
2017-11-27 20:48:25 +01:00
|
|
|
err := os.Remove(osPath)
|
|
|
|
if err != nil && !os.IsNotExist(err) {
|
|
|
|
fs.Errorf(name, "Failed to remove from cache: %v", err)
|
|
|
|
} else {
|
2019-02-02 00:35:03 +01:00
|
|
|
fs.Infof(name, "Removed from cache")
|
2017-11-27 20:48:25 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-11-28 15:18:48 +01:00
|
|
|
// removeDir should be called if dir is deleted and returns true if
|
|
|
|
// the directory is gone.
|
|
|
|
func (c *cache) removeDir(dir string) bool {
|
|
|
|
osPath := c.toOSPath(dir)
|
|
|
|
err := os.Remove(osPath)
|
|
|
|
if err == nil || os.IsNotExist(err) {
|
2017-12-15 16:42:49 +01:00
|
|
|
if err == nil {
|
|
|
|
fs.Debugf(dir, "Removed empty directory")
|
|
|
|
}
|
2017-11-28 15:18:48 +01:00
|
|
|
return true
|
|
|
|
}
|
|
|
|
if !os.IsExist(err) {
|
|
|
|
fs.Errorf(dir, "Failed to remove cached dir: %v", err)
|
|
|
|
}
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
2017-11-06 22:38:52 +01:00
|
|
|
// cleanUp empties the cache of everything
|
|
|
|
func (c *cache) cleanUp() error {
|
|
|
|
return os.RemoveAll(c.root)
|
|
|
|
}
|
|
|
|
|
2017-11-28 15:18:48 +01:00
|
|
|
// walk walks the cache calling the function
|
|
|
|
func (c *cache) walk(fn func(osPath string, fi os.FileInfo, name string) error) error {
|
2017-11-06 22:38:52 +01:00
|
|
|
return filepath.Walk(c.root, func(osPath string, fi os.FileInfo, err error) error {
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2017-11-28 15:18:48 +01:00
|
|
|
// Find path relative to the cache root
|
|
|
|
name, err := filepath.Rel(c.root, osPath)
|
|
|
|
if err != nil {
|
|
|
|
return errors.Wrap(err, "filepath.Rel failed in walk")
|
|
|
|
}
|
|
|
|
if name == "." {
|
|
|
|
name = ""
|
|
|
|
}
|
|
|
|
// And convert into slashes
|
|
|
|
name = filepath.ToSlash(name)
|
2017-11-06 22:38:52 +01:00
|
|
|
|
2017-11-28 15:18:48 +01:00
|
|
|
return fn(osPath, fi, name)
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2019-02-02 00:35:03 +01:00
|
|
|
// updateStats walks the cache updating any atimes and sizes it finds
|
|
|
|
//
|
|
|
|
// it also updates used
|
|
|
|
func (c *cache) updateStats() error {
|
|
|
|
var newUsed int64
|
|
|
|
err := c.walk(func(osPath string, fi os.FileInfo, name string) error {
|
2017-11-28 15:18:48 +01:00
|
|
|
if !fi.IsDir() {
|
2017-11-06 22:38:52 +01:00
|
|
|
// Update the atime with that of the file
|
|
|
|
atime := times.Get(fi).AccessTime()
|
2019-02-02 00:35:03 +01:00
|
|
|
c.updateStat(name, atime, fi.Size())
|
|
|
|
newUsed += fi.Size()
|
2017-11-28 15:18:48 +01:00
|
|
|
} else {
|
|
|
|
c.cacheDir(name)
|
2017-11-06 22:38:52 +01:00
|
|
|
}
|
|
|
|
return nil
|
|
|
|
})
|
2019-02-02 00:35:03 +01:00
|
|
|
c.itemMu.Lock()
|
|
|
|
c.used = newUsed
|
|
|
|
c.itemMu.Unlock()
|
|
|
|
return err
|
2017-11-06 22:38:52 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// purgeOld gets rid of any files that are over age
|
|
|
|
func (c *cache) purgeOld(maxAge time.Duration) {
|
2019-02-02 00:35:03 +01:00
|
|
|
c._purgeOld(maxAge, c.remove)
|
2017-11-28 15:18:48 +01:00
|
|
|
}
|
|
|
|
|
2019-02-02 00:35:03 +01:00
|
|
|
func (c *cache) _purgeOld(maxAge time.Duration, remove func(name string)) {
|
2017-11-06 22:38:52 +01:00
|
|
|
c.itemMu.Lock()
|
|
|
|
defer c.itemMu.Unlock()
|
|
|
|
cutoff := time.Now().Add(-maxAge)
|
|
|
|
for name, item := range c.item {
|
2017-11-28 15:18:48 +01:00
|
|
|
if item.isFile && item.opens == 0 {
|
|
|
|
// If not locked and access time too long ago - delete the file
|
|
|
|
dt := item.atime.Sub(cutoff)
|
|
|
|
// fs.Debugf(name, "atime=%v cutoff=%v, dt=%v", item.atime, cutoff, dt)
|
|
|
|
if dt < 0 {
|
|
|
|
remove(name)
|
|
|
|
// Remove the entry
|
|
|
|
delete(c.item, name)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2019-02-02 00:35:03 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// Purge any empty directories
|
|
|
|
func (c *cache) purgeEmptyDirs() {
|
|
|
|
c._purgeEmptyDirs(c.removeDir)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *cache) _purgeEmptyDirs(removeDir func(name string) bool) {
|
|
|
|
c.itemMu.Lock()
|
|
|
|
defer c.itemMu.Unlock()
|
2017-11-28 15:18:48 +01:00
|
|
|
var dirs []string
|
|
|
|
for name, item := range c.item {
|
|
|
|
if !item.isFile && item.opens == 0 {
|
|
|
|
dirs = append(dirs, name)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// remove empty directories in reverse alphabetical order
|
|
|
|
sort.Strings(dirs)
|
|
|
|
for i := len(dirs) - 1; i >= 0; i-- {
|
|
|
|
dir := dirs[i]
|
|
|
|
// Remove the entry
|
|
|
|
if removeDir(dir) {
|
|
|
|
delete(c.item, dir)
|
2017-11-06 22:38:52 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-02-02 00:35:03 +01:00
|
|
|
// This is a cacheItem with a name for sorting
|
|
|
|
type cacheNamedItem struct {
|
|
|
|
name string
|
|
|
|
item *cacheItem
|
|
|
|
}
|
|
|
|
type cacheNamedItems []cacheNamedItem
|
|
|
|
|
|
|
|
func (v cacheNamedItems) Len() int { return len(v) }
|
|
|
|
func (v cacheNamedItems) Swap(i, j int) { v[i], v[j] = v[j], v[i] }
|
|
|
|
func (v cacheNamedItems) Less(i, j int) bool { return v[i].item.atime.Before(v[j].item.atime) }
|
|
|
|
|
|
|
|
// Remove any files that are over quota starting from the
|
|
|
|
// oldest first
|
|
|
|
func (c *cache) purgeOverQuota(quota int64) {
|
|
|
|
c._purgeOverQuota(quota, c.remove)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *cache) _purgeOverQuota(quota int64, remove func(name string)) {
|
|
|
|
c.itemMu.Lock()
|
|
|
|
defer c.itemMu.Unlock()
|
|
|
|
|
|
|
|
if quota <= 0 || c.used < quota {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
var items cacheNamedItems
|
|
|
|
|
|
|
|
// Make a slice of unused files
|
|
|
|
for name, item := range c.item {
|
|
|
|
if item.isFile && item.opens == 0 {
|
|
|
|
items = append(items, cacheNamedItem{
|
|
|
|
name: name,
|
|
|
|
item: item,
|
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|
|
|
|
sort.Sort(items)
|
|
|
|
|
|
|
|
// Remove items until the quota is OK
|
|
|
|
for _, item := range items {
|
|
|
|
if c.used < quota {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
remove(item.name)
|
|
|
|
// Remove the entry
|
|
|
|
delete(c.item, item.name)
|
|
|
|
c.used -= item.item.size
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-11-06 22:38:52 +01:00
|
|
|
// clean empties the cache of stuff if it can
|
|
|
|
func (c *cache) clean() {
|
|
|
|
// Cache may be empty so end
|
|
|
|
_, err := os.Stat(c.root)
|
|
|
|
if os.IsNotExist(err) {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2019-02-02 00:35:03 +01:00
|
|
|
c.itemMu.Lock()
|
|
|
|
oldItems, oldUsed := len(c.item), fs.SizeSuffix(c.used)
|
|
|
|
c.itemMu.Unlock()
|
2017-11-06 22:38:52 +01:00
|
|
|
|
2019-02-02 00:35:03 +01:00
|
|
|
// first walk the FS to update the atimes and sizes
|
|
|
|
err = c.updateStats()
|
2017-11-06 22:38:52 +01:00
|
|
|
if err != nil {
|
|
|
|
fs.Errorf(nil, "Error traversing cache %q: %v", c.root, err)
|
|
|
|
}
|
|
|
|
|
2019-02-02 00:35:03 +01:00
|
|
|
// Remove any files that are over age
|
2017-11-06 22:38:52 +01:00
|
|
|
c.purgeOld(c.opt.CacheMaxAge)
|
2019-02-02 00:35:03 +01:00
|
|
|
|
|
|
|
// Now remove any files that are over quota starting from the
|
|
|
|
// oldest first
|
|
|
|
c.purgeOverQuota(int64(c.opt.CacheMaxSize))
|
|
|
|
|
|
|
|
// Remove any empty directories
|
|
|
|
c.purgeEmptyDirs()
|
|
|
|
|
|
|
|
// Stats
|
|
|
|
c.itemMu.Lock()
|
|
|
|
newItems, newUsed := len(c.item), fs.SizeSuffix(c.used)
|
|
|
|
c.itemMu.Unlock()
|
|
|
|
|
|
|
|
fs.Infof(nil, "Cleaned the cache: objects %d (was %d), total size %v (was %v)", newItems, oldItems, newUsed, oldUsed)
|
2017-11-06 22:38:52 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// cleaner calls clean at regular intervals
|
|
|
|
//
|
|
|
|
// doesn't return until context is cancelled
|
|
|
|
func (c *cache) cleaner(ctx context.Context) {
|
2018-02-16 15:12:46 +01:00
|
|
|
if c.opt.CachePollInterval <= 0 {
|
|
|
|
fs.Debugf(nil, "Cache cleaning thread disabled because poll interval <= 0")
|
|
|
|
return
|
|
|
|
}
|
2018-02-02 13:19:53 +01:00
|
|
|
// Start cleaning the cache immediately
|
|
|
|
c.clean()
|
|
|
|
// Then every interval specified
|
2017-11-06 22:38:52 +01:00
|
|
|
timer := time.NewTicker(c.opt.CachePollInterval)
|
|
|
|
defer timer.Stop()
|
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case <-timer.C:
|
|
|
|
c.clean()
|
|
|
|
case <-ctx.Done():
|
|
|
|
fs.Debugf(nil, "cache cleaner exiting")
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|