2021-09-09 14:25:25 +02:00
//go:build !plan9 && !js
2017-11-12 18:54:25 +01:00
2022-08-28 13:21:57 +02:00
// Package cache implements a virtual provider to cache existing remotes.
2017-11-12 18:54:25 +01:00
package cache
import (
2018-04-06 20:13:27 +02:00
"context"
2021-11-04 11:12:57 +01:00
"errors"
2017-11-12 18:54:25 +01:00
"fmt"
"io"
2018-08-30 11:09:16 +02:00
"math"
2018-04-06 20:13:27 +02:00
"os"
"os/signal"
2017-11-12 18:54:25 +01:00
"path"
2017-11-20 15:38:28 +01:00
"path/filepath"
2018-10-06 12:23:33 +02:00
"sort"
2018-08-30 11:09:16 +02:00
"strconv"
2017-11-12 18:54:25 +01:00
"strings"
"sync"
2017-11-22 17:32:36 +01:00
"syscall"
2018-04-06 20:13:27 +02:00
"time"
2017-11-22 17:32:36 +01:00
2019-07-28 19:47:38 +02:00
"github.com/rclone/rclone/backend/crypt"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/cache"
"github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/config/obscure"
"github.com/rclone/rclone/fs/fspath"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/rc"
"github.com/rclone/rclone/fs/walk"
"github.com/rclone/rclone/lib/atexit"
2017-11-12 18:54:25 +01:00
"golang.org/x/time/rate"
)
const (
// DefCacheChunkSize is the default value for chunk size
2018-05-14 19:06:57 +02:00
DefCacheChunkSize = fs . SizeSuffix ( 5 * 1024 * 1024 )
2017-12-09 22:54:26 +01:00
// DefCacheTotalChunkSize is the default value for the maximum size of stored chunks
2018-05-14 19:06:57 +02:00
DefCacheTotalChunkSize = fs . SizeSuffix ( 10 * 1024 * 1024 * 1024 )
2017-12-09 22:54:26 +01:00
// DefCacheChunkCleanInterval is the interval at which chunks are cleaned
2018-05-14 19:06:57 +02:00
DefCacheChunkCleanInterval = fs . Duration ( time . Minute )
2017-11-12 18:54:25 +01:00
// DefCacheInfoAge is the default value for object info age
2018-05-14 19:06:57 +02:00
DefCacheInfoAge = fs . Duration ( 6 * time . Hour )
2017-11-12 18:54:25 +01:00
// DefCacheReadRetries is the default value for read retries
2017-12-09 22:54:26 +01:00
DefCacheReadRetries = 10
2017-11-12 18:54:25 +01:00
// DefCacheTotalWorkers is how many workers run in parallel to download chunks
DefCacheTotalWorkers = 4
// DefCacheChunkNoMemory will enable or disable in-memory storage for chunks
DefCacheChunkNoMemory = false
// DefCacheRps limits the number of requests per second to the source FS
DefCacheRps = - 1
// DefCacheWrites will cache file data on writes through the cache
DefCacheWrites = false
2018-01-29 23:05:04 +01:00
// DefCacheTmpWaitTime says how long should files be stored in local cache before being uploaded
2018-05-14 19:06:57 +02:00
DefCacheTmpWaitTime = fs . Duration ( 15 * time . Second )
2018-03-08 13:16:18 +01:00
// DefCacheDbWaitTime defines how long the cache backend should wait for the DB to be available
2018-05-14 19:06:57 +02:00
DefCacheDbWaitTime = fs . Duration ( 1 * time . Second )
2017-11-12 18:54:25 +01:00
)
// Register with Fs
func init ( ) {
fs . Register ( & fs . RegInfo {
Name : "cache" ,
Description : "Cache a remote" ,
NewFs : NewFs ,
2020-04-29 11:08:53 +02:00
CommandHelp : commandHelp ,
2017-11-12 18:54:25 +01:00
Options : [ ] fs . Option { {
2018-05-14 19:06:57 +02:00
Name : "remote" ,
2021-08-16 11:30:01 +02:00
Help : "Remote to cache.\n\nNormally should contain a ':' and a path, e.g. \"myremote:path/to/dir\",\n\"myremote:bucket\" or maybe \"myremote:\" (not recommended)." ,
2018-05-14 19:06:57 +02:00
Required : true ,
2017-12-09 22:54:26 +01:00
} , {
2018-05-14 19:06:57 +02:00
Name : "plex_url" ,
2021-08-16 11:30:01 +02:00
Help : "The URL of the Plex server." ,
2017-12-09 22:54:26 +01:00
} , {
2023-07-06 18:55:53 +02:00
Name : "plex_username" ,
Help : "The username of the Plex user." ,
Sensitive : true ,
2017-12-09 22:54:26 +01:00
} , {
Name : "plex_password" ,
2021-08-16 11:30:01 +02:00
Help : "The password of the Plex user." ,
2017-12-09 22:54:26 +01:00
IsPassword : true ,
2017-11-12 18:54:25 +01:00
} , {
2023-07-06 18:55:53 +02:00
Name : "plex_token" ,
Help : "The plex token for authentication - auto set normally." ,
Hide : fs . OptionHideBoth ,
Advanced : true ,
Sensitive : true ,
2018-09-07 18:20:24 +02:00
} , {
Name : "plex_insecure" ,
2021-08-16 11:30:01 +02:00
Help : "Skip all certificate verification when connecting to the Plex server." ,
2018-09-07 18:20:24 +02:00
Advanced : true ,
2018-05-14 19:06:57 +02:00
} , {
2018-10-01 19:36:15 +02:00
Name : "chunk_size" ,
Help : ` The size of a chunk ( partial file data ) .
Use lower numbers for slower connections . If the chunk size is
changed , any downloaded chunks will be invalid and cache - chunk - path
will need to be cleared or unexpected EOF errors will occur . ` ,
2018-05-14 19:06:57 +02:00
Default : DefCacheChunkSize ,
Examples : [ ] fs . OptionExample { {
2021-03-02 20:11:57 +01:00
Value : "1M" ,
Help : "1 MiB" ,
2018-05-14 19:06:57 +02:00
} , {
Value : "5M" ,
2021-03-02 20:11:57 +01:00
Help : "5 MiB" ,
2018-05-14 19:06:57 +02:00
} , {
Value : "10M" ,
2021-03-02 20:11:57 +01:00
Help : "10 MiB" ,
2018-05-14 19:06:57 +02:00
} } ,
} , {
2018-10-01 19:36:15 +02:00
Name : "info_age" ,
2020-10-14 00:07:12 +02:00
Help : ` How long to cache file structure information ( directory listings , file size , times , etc . ) .
2018-10-01 19:36:15 +02:00
If all write operations are done through the cache then you can safely make
this value very large as the cache store will also be updated in real time . ` ,
2018-05-14 19:06:57 +02:00
Default : DefCacheInfoAge ,
Examples : [ ] fs . OptionExample { {
Value : "1h" ,
Help : "1 hour" ,
} , {
Value : "24h" ,
Help : "24 hours" ,
} , {
Value : "48h" ,
Help : "48 hours" ,
} } ,
} , {
2018-10-01 19:36:15 +02:00
Name : "chunk_total_size" ,
Help : ` The total size that the chunks can take up on the local disk .
If the cache exceeds this value then it will start to delete the
oldest chunks until it goes under this value . ` ,
2018-05-14 19:06:57 +02:00
Default : DefCacheTotalChunkSize ,
Examples : [ ] fs . OptionExample { {
Value : "500M" ,
2021-03-02 20:11:57 +01:00
Help : "500 MiB" ,
2018-05-14 19:06:57 +02:00
} , {
Value : "1G" ,
2021-03-02 20:11:57 +01:00
Help : "1 GiB" ,
2018-05-14 19:06:57 +02:00
} , {
Value : "10G" ,
2021-03-02 20:11:57 +01:00
Help : "10 GiB" ,
2018-05-14 19:06:57 +02:00
} } ,
} , {
Name : "db_path" ,
2021-09-10 15:35:53 +02:00
Default : filepath . Join ( config . GetCacheDir ( ) , "cache-backend" ) ,
2021-08-16 11:30:01 +02:00
Help : "Directory to store file structure metadata DB.\n\nThe remote name is used as the DB file name." ,
2018-05-14 19:06:57 +02:00
Advanced : true ,
} , {
2018-10-01 19:36:15 +02:00
Name : "chunk_path" ,
2021-09-10 15:35:53 +02:00
Default : filepath . Join ( config . GetCacheDir ( ) , "cache-backend" ) ,
2018-10-01 19:36:15 +02:00
Help : ` Directory to cache chunk files .
Path to where partial file data ( chunks ) are stored locally . The remote
name is appended to the final path .
This config follows the "--cache-db-path" . If you specify a custom
location for "--cache-db-path" and don ' t specify one for "--cache-chunk-path"
then "--cache-chunk-path" will use the same path as "--cache-db-path" . ` ,
2018-05-14 19:06:57 +02:00
Advanced : true ,
} , {
Name : "db_purge" ,
Default : false ,
2018-10-01 19:36:15 +02:00
Help : "Clear all the cached data for this remote on start." ,
2018-05-14 19:06:57 +02:00
Hide : fs . OptionHideConfigurator ,
Advanced : true ,
} , {
2018-10-01 19:36:15 +02:00
Name : "chunk_clean_interval" ,
Default : DefCacheChunkCleanInterval ,
Help : ` How often should the cache perform cleanups of the chunk storage .
2021-08-16 11:30:01 +02:00
2018-10-01 19:36:15 +02:00
The default value should be ok for most people . If you find that the
cache goes over "cache-chunk-total-size" too often then try to lower
this value to force it to perform cleanups more often . ` ,
2018-05-14 19:06:57 +02:00
Advanced : true ,
2017-11-12 18:54:25 +01:00
} , {
2018-10-01 19:36:15 +02:00
Name : "read_retries" ,
Default : DefCacheReadRetries ,
Help : ` How many times to retry a read from a cache storage .
Since reading from a cache stream is independent from downloading file
data , readers can get to a point where there ' s no more data in the
cache . Most of the times this can indicate a connectivity issue if
cache isn ' t able to provide file data anymore .
For really slow connections , increase this to a point where the stream is
able to provide data but your experience will be very stuttering . ` ,
2018-05-14 19:06:57 +02:00
Advanced : true ,
2017-11-12 18:54:25 +01:00
} , {
2018-10-01 19:36:15 +02:00
Name : "workers" ,
Default : DefCacheTotalWorkers ,
Help : ` How many workers should run in parallel to download chunks .
Higher values will mean more parallel processing ( better CPU needed )
and more concurrent requests on the cloud provider . This impacts
several aspects like the cloud provider API limits , more stress on the
hardware that rclone runs on but it also means that streams will be
more fluid and data will be available much more faster to readers .
* * Note * * : If the optional Plex integration is enabled then this
setting will adapt to the type of reading performed and the value
specified here will be used as a maximum number of workers to use . ` ,
2018-05-14 19:06:57 +02:00
Advanced : true ,
} , {
2018-10-01 19:36:15 +02:00
Name : "chunk_no_memory" ,
Default : DefCacheChunkNoMemory ,
Help : ` Disable the in - memory cache for storing chunks during streaming .
By default , cache will keep file data during streaming in RAM as well
to provide it to readers as fast as possible .
This transient data is evicted as soon as it is read and the number of
chunks stored doesn ' t exceed the number of workers . However , depending
on other settings like "cache-chunk-size" and "cache-workers" this footprint
can increase if there are parallel streams too ( multiple files being read
at the same time ) .
If the hardware permits it , use this feature to provide an overall better
performance during streaming but it can also be disabled if RAM is not
available on the local machine . ` ,
2018-05-14 19:06:57 +02:00
Advanced : true ,
} , {
2018-10-01 19:36:15 +02:00
Name : "rps" ,
Default : int ( DefCacheRps ) ,
2021-08-16 11:30:01 +02:00
Help : ` Limits the number of requests per second to the source FS ( - 1 to disable ) .
2018-10-01 19:36:15 +02:00
This setting places a hard limit on the number of requests per second
that cache will be doing to the cloud provider remote and try to
respect that value by setting waits between reads .
If you find that you ' re getting banned or limited on the cloud
provider through cache and know that a smaller number of requests per
second will allow you to work with it then you can use this setting
for that .
A good balance of all the other settings should make this setting
useless but it is available to set for more special cases .
* * NOTE * * : This will limit the number of requests during streams but
other API calls to the cloud provider like directory listings will
still pass . ` ,
2018-05-14 19:06:57 +02:00
Advanced : true ,
} , {
2018-10-01 19:36:15 +02:00
Name : "writes" ,
Default : DefCacheWrites ,
2021-08-16 11:30:01 +02:00
Help : ` Cache file data on writes through the FS .
2018-10-01 19:36:15 +02:00
If you need to read files immediately after you upload them through
cache you can enable this flag to have their data stored in the
cache store at the same time during upload . ` ,
2018-05-14 19:06:57 +02:00
Advanced : true ,
} , {
2018-10-01 19:36:15 +02:00
Name : "tmp_upload_path" ,
Default : "" ,
Help : ` Directory to keep temporary files until they are uploaded .
This is the path where cache will use as a temporary storage for new
files that need to be uploaded to the cloud provider .
Specifying a value will enable this feature . Without it , it is
completely disabled and files will be uploaded directly to the cloud
provider ` ,
2018-05-14 19:06:57 +02:00
Advanced : true ,
} , {
2018-10-01 19:36:15 +02:00
Name : "tmp_wait_time" ,
Default : DefCacheTmpWaitTime ,
2021-08-16 11:30:01 +02:00
Help : ` How long should files be stored in local cache before being uploaded .
2018-10-01 19:36:15 +02:00
This is the duration that a file must wait in the temporary location
_cache - tmp - upload - path_ before it is selected for upload .
Note that only one file is uploaded at a time and it can take longer
to start the upload if a queue formed for this purpose . ` ,
2018-05-14 19:06:57 +02:00
Advanced : true ,
} , {
2018-10-01 19:36:15 +02:00
Name : "db_wait_time" ,
Default : DefCacheDbWaitTime ,
2021-08-16 11:30:01 +02:00
Help : ` How long to wait for the DB to be available - 0 is unlimited .
2018-10-01 19:36:15 +02:00
Only one process can have the DB open at any one time , so rclone waits
for this duration for the DB to become available before it gives an
error .
If you set it to 0 then it will wait forever . ` ,
2018-05-14 19:06:57 +02:00
Advanced : true ,
2017-11-12 18:54:25 +01:00
} } ,
} )
}
2018-05-14 19:06:57 +02:00
// Options defines the configuration for this backend
type Options struct {
Remote string ` config:"remote" `
PlexURL string ` config:"plex_url" `
PlexUsername string ` config:"plex_username" `
PlexPassword string ` config:"plex_password" `
PlexToken string ` config:"plex_token" `
2018-09-07 18:20:24 +02:00
PlexInsecure bool ` config:"plex_insecure" `
2018-05-14 19:06:57 +02:00
ChunkSize fs . SizeSuffix ` config:"chunk_size" `
InfoAge fs . Duration ` config:"info_age" `
ChunkTotalSize fs . SizeSuffix ` config:"chunk_total_size" `
DbPath string ` config:"db_path" `
ChunkPath string ` config:"chunk_path" `
DbPurge bool ` config:"db_purge" `
ChunkCleanInterval fs . Duration ` config:"chunk_clean_interval" `
ReadRetries int ` config:"read_retries" `
TotalWorkers int ` config:"workers" `
ChunkNoMemory bool ` config:"chunk_no_memory" `
Rps int ` config:"rps" `
StoreWrites bool ` config:"writes" `
TempWritePath string ` config:"tmp_upload_path" `
TempWaitTime fs . Duration ` config:"tmp_wait_time" `
DbWaitTime fs . Duration ` config:"db_wait_time" `
}
2017-11-12 18:54:25 +01:00
// Fs represents a wrapped fs.Fs
type Fs struct {
fs . Fs
2017-12-06 16:14:34 +01:00
wrapper fs . Fs
2017-11-12 18:54:25 +01:00
name string
root string
2018-05-14 19:06:57 +02:00
opt Options // parsed options
2017-11-12 18:54:25 +01:00
features * fs . Features // optional features
2018-01-29 23:05:04 +01:00
cache * Persistent
2018-05-14 19:06:57 +02:00
tempFs fs . Fs
2017-12-09 22:54:26 +01:00
lastChunkCleanup time . Time
cleanupMu sync . Mutex
rateLimiter * rate . Limiter
plexConnector * plexConnector
2018-01-29 23:05:04 +01:00
backgroundRunner * backgroundWriter
cleanupChan chan bool
2018-03-08 21:03:34 +01:00
parentsForgetFn [ ] func ( string , fs . EntryType )
notifiedRemotes map [ string ] bool
notifiedMu sync . Mutex
parentsForgetMu sync . Mutex
2017-11-12 18:54:25 +01:00
}
2018-01-30 14:35:40 +01:00
// parseRootPath returns a cleaned root path and a nil error or "" and an error when the path is invalid
func parseRootPath ( path string ) ( string , error ) {
return strings . Trim ( path , "/" ) , nil
}
2021-06-10 18:52:55 +02:00
var warnDeprecated sync . Once
2020-05-25 08:05:53 +02:00
// NewFs constructs an Fs from the path, container:path
2020-11-05 16:18:51 +01:00
func NewFs ( ctx context . Context , name , rootPath string , m configmap . Mapper ) ( fs . Fs , error ) {
2021-06-10 18:52:55 +02:00
warnDeprecated . Do ( func ( ) {
fs . Logf ( nil , "WARNING: Cache backend is deprecated and may be removed in future. Please use VFS instead." )
} )
2018-05-14 19:06:57 +02:00
// Parse config into Options struct
opt := new ( Options )
err := configstruct . Set ( m , opt )
if err != nil {
return nil , err
}
if opt . ChunkTotalSize < opt . ChunkSize * fs . SizeSuffix ( opt . TotalWorkers ) {
2021-11-04 11:12:57 +01:00
return nil , fmt . Errorf ( "don't set cache-chunk-total-size(%v) less than cache-chunk-size(%v) * cache-workers(%v)" ,
2018-05-14 19:06:57 +02:00
opt . ChunkTotalSize , opt . ChunkSize , opt . TotalWorkers )
}
if strings . HasPrefix ( opt . Remote , name + ":" ) {
2017-11-12 18:54:25 +01:00
return nil , errors . New ( "can't point cache remote at itself - check the value of the remote setting" )
}
2018-01-30 14:35:40 +01:00
rpath , err := parseRootPath ( rootPath )
if err != nil {
2021-11-04 11:12:57 +01:00
return nil , fmt . Errorf ( "failed to clean root path %q: %w" , rootPath , err )
2018-01-30 14:35:40 +01:00
}
2020-08-31 18:07:26 +02:00
remotePath := fspath . JoinRootPath ( opt . Remote , rootPath )
2020-11-05 16:18:51 +01:00
wrappedFs , wrapErr := cache . Get ( ctx , remotePath )
2018-01-29 23:05:04 +01:00
if wrapErr != nil && wrapErr != fs . ErrorIsFile {
2021-11-04 11:12:57 +01:00
return nil , fmt . Errorf ( "failed to make remote %q to wrap: %w" , remotePath , wrapErr )
2017-11-12 18:54:25 +01:00
}
2018-01-29 23:05:04 +01:00
var fsErr error
2017-11-12 18:54:25 +01:00
fs . Debugf ( name , "wrapped %v:%v at root %v" , wrappedFs . Name ( ) , wrappedFs . Root ( ) , rpath )
2018-01-29 23:05:04 +01:00
if wrapErr == fs . ErrorIsFile {
fsErr = fs . ErrorIsFile
rpath = cleanPath ( path . Dir ( rpath ) )
}
2017-11-12 18:54:25 +01:00
// configure cache backend
2018-05-14 19:06:57 +02:00
if opt . DbPurge {
2017-11-12 18:54:25 +01:00
fs . Debugf ( name , "Purging the DB" )
}
f := & Fs {
2018-05-14 19:06:57 +02:00
Fs : wrappedFs ,
name : name ,
root : rpath ,
opt : * opt ,
lastChunkCleanup : time . Now ( ) . Truncate ( time . Hour * 24 * 30 ) ,
cleanupChan : make ( chan bool , 1 ) ,
notifiedRemotes : make ( map [ string ] bool ) ,
2017-11-12 18:54:25 +01:00
}
2020-08-31 18:46:58 +02:00
cache . PinUntilFinalized ( f . Fs , f )
2022-04-01 18:19:19 +02:00
rps := rate . Inf
if opt . Rps > 0 {
rps = rate . Limit ( float64 ( opt . Rps ) )
}
f . rateLimiter = rate . NewLimiter ( rps , opt . TotalWorkers )
2017-11-12 18:54:25 +01:00
2017-12-09 22:54:26 +01:00
f . plexConnector = & plexConnector { }
2018-05-14 19:06:57 +02:00
if opt . PlexURL != "" {
if opt . PlexToken != "" {
2018-09-07 18:20:24 +02:00
f . plexConnector , err = newPlexConnectorWithToken ( f , opt . PlexURL , opt . PlexToken , opt . PlexInsecure )
2017-12-09 22:54:26 +01:00
if err != nil {
2021-11-04 11:12:57 +01:00
return nil , fmt . Errorf ( "failed to connect to the Plex API %v: %w" , opt . PlexURL , err )
2017-12-09 22:54:26 +01:00
}
2024-05-31 15:18:56 +02:00
} else if opt . PlexPassword != "" && opt . PlexUsername != "" {
decPass , err := obscure . Reveal ( opt . PlexPassword )
if err != nil {
decPass = opt . PlexPassword
}
f . plexConnector , err = newPlexConnector ( f , opt . PlexURL , opt . PlexUsername , decPass , opt . PlexInsecure , func ( token string ) {
m . Set ( "plex_token" , token )
} )
if err != nil {
return nil , fmt . Errorf ( "failed to connect to the Plex API %v: %w" , opt . PlexURL , err )
2017-12-09 22:54:26 +01:00
}
}
}
2018-05-14 19:06:57 +02:00
dbPath := f . opt . DbPath
chunkPath := f . opt . ChunkPath
2017-12-20 21:43:30 +01:00
// if the dbPath is non default but the chunk path is default, we overwrite the last to follow the same one as dbPath
2021-09-10 15:35:53 +02:00
if dbPath != filepath . Join ( config . GetCacheDir ( ) , "cache-backend" ) &&
chunkPath == filepath . Join ( config . GetCacheDir ( ) , "cache-backend" ) {
2017-12-20 21:43:30 +01:00
chunkPath = dbPath
}
2017-11-20 15:38:28 +01:00
if filepath . Ext ( dbPath ) != "" {
dbPath = filepath . Dir ( dbPath )
2017-11-12 18:54:25 +01:00
}
2017-12-20 21:43:30 +01:00
if filepath . Ext ( chunkPath ) != "" {
chunkPath = filepath . Dir ( chunkPath )
}
2017-11-12 18:54:25 +01:00
err = os . MkdirAll ( dbPath , os . ModePerm )
if err != nil {
2021-11-04 11:12:57 +01:00
return nil , fmt . Errorf ( "failed to create cache directory %v: %w" , dbPath , err )
2017-11-12 18:54:25 +01:00
}
2017-12-20 21:43:30 +01:00
err = os . MkdirAll ( chunkPath , os . ModePerm )
if err != nil {
2021-11-04 11:12:57 +01:00
return nil , fmt . Errorf ( "failed to create cache directory %v: %w" , chunkPath , err )
2017-12-20 21:43:30 +01:00
}
2017-11-12 18:54:25 +01:00
2017-11-20 15:38:28 +01:00
dbPath = filepath . Join ( dbPath , name + ".db" )
2017-12-20 21:43:30 +01:00
chunkPath = filepath . Join ( chunkPath , name )
fs . Infof ( name , "Cache DB path: %v" , dbPath )
fs . Infof ( name , "Cache chunk path: %v" , chunkPath )
f . cache , err = GetPersistent ( dbPath , chunkPath , & Features {
2018-05-14 19:06:57 +02:00
PurgeDb : opt . DbPurge ,
DbWaitTime : time . Duration ( opt . DbWaitTime ) ,
2017-12-09 22:54:26 +01:00
} )
2017-11-12 18:54:25 +01:00
if err != nil {
2021-11-04 11:12:57 +01:00
return nil , fmt . Errorf ( "failed to start cache db: %w" , err )
2017-11-12 18:54:25 +01:00
}
2017-11-22 17:32:36 +01:00
// Trap SIGINT and SIGTERM to close the DB handle gracefully
c := make ( chan os . Signal , 1 )
2018-01-30 21:35:53 +01:00
signal . Notify ( c , syscall . SIGHUP )
atexit . Register ( func ( ) {
2018-05-14 19:06:57 +02:00
if opt . PlexURL != "" {
2018-03-22 20:20:34 +01:00
f . plexConnector . closeWebsocket ( )
}
2018-01-30 21:35:53 +01:00
f . StopBackgroundRunners ( )
} )
2017-11-22 17:32:36 +01:00
go func ( ) {
2017-12-19 14:48:48 +01:00
for {
s := <- c
2018-01-30 21:35:53 +01:00
if s == syscall . SIGHUP {
2017-12-19 14:48:48 +01:00
fs . Infof ( f , "Clearing cache from signal" )
f . DirCacheFlush ( )
}
2017-11-22 17:32:36 +01:00
}
} ( )
2017-11-12 18:54:25 +01:00
2018-05-14 19:06:57 +02:00
fs . Infof ( name , "Chunk Memory: %v" , ! f . opt . ChunkNoMemory )
fs . Infof ( name , "Chunk Size: %v" , f . opt . ChunkSize )
fs . Infof ( name , "Chunk Total Size: %v" , f . opt . ChunkTotalSize )
fs . Infof ( name , "Chunk Clean Interval: %v" , f . opt . ChunkCleanInterval )
fs . Infof ( name , "Workers: %v" , f . opt . TotalWorkers )
fs . Infof ( name , "File Age: %v" , f . opt . InfoAge )
2018-10-26 16:04:39 +02:00
if f . opt . StoreWrites {
2018-01-29 23:05:04 +01:00
fs . Infof ( name , "Cache Writes: enabled" )
}
2017-11-12 18:54:25 +01:00
2018-05-14 19:06:57 +02:00
if f . opt . TempWritePath != "" {
err = os . MkdirAll ( f . opt . TempWritePath , os . ModePerm )
2018-01-29 23:05:04 +01:00
if err != nil {
2021-11-04 11:12:57 +01:00
return nil , fmt . Errorf ( "failed to create cache directory %v: %w" , f . opt . TempWritePath , err )
2018-01-29 23:05:04 +01:00
}
2018-05-14 19:06:57 +02:00
f . opt . TempWritePath = filepath . ToSlash ( f . opt . TempWritePath )
2020-11-05 16:18:51 +01:00
f . tempFs , err = cache . Get ( ctx , f . opt . TempWritePath )
2018-01-29 23:05:04 +01:00
if err != nil {
2021-11-15 13:17:30 +01:00
return nil , fmt . Errorf ( "failed to create temp fs: %w" , err )
2018-01-29 23:05:04 +01:00
}
2018-05-14 19:06:57 +02:00
fs . Infof ( name , "Upload Temp Rest Time: %v" , f . opt . TempWaitTime )
fs . Infof ( name , "Upload Temp FS: %v" , f . opt . TempWritePath )
2018-01-29 23:05:04 +01:00
f . backgroundRunner , _ = initBackgroundUploader ( f )
go f . backgroundRunner . run ( )
}
go func ( ) {
for {
2018-05-14 19:06:57 +02:00
time . Sleep ( time . Duration ( f . opt . ChunkCleanInterval ) )
2018-01-29 23:05:04 +01:00
select {
case <- f . cleanupChan :
fs . Infof ( f , "stopping cleanup" )
return
default :
fs . Debugf ( f , "starting cleanup" )
f . CleanUpCache ( false )
}
}
} ( )
2017-11-12 18:54:25 +01:00
2018-03-08 21:03:34 +01:00
if doChangeNotify := wrappedFs . Features ( ) . ChangeNotify ; doChangeNotify != nil {
2018-08-25 21:28:57 +02:00
pollInterval := make ( chan time . Duration , 1 )
pollInterval <- time . Duration ( f . opt . ChunkCleanInterval )
2020-11-05 16:18:51 +01:00
doChangeNotify ( ctx , f . receiveChangeNotify , pollInterval )
2018-02-10 21:01:05 +01:00
}
2017-11-12 18:54:25 +01:00
f . features = ( & fs . Features {
CanHaveEmptyDirectories : true ,
DuplicateFiles : false , // storage doesn't permit this
2020-11-05 17:00:40 +01:00
} ) . Fill ( ctx , f ) . Mask ( ctx , wrappedFs ) . WrapsFs ( f , wrappedFs )
2018-01-29 23:05:04 +01:00
// override only those features that use a temp fs and it doesn't support them
2018-03-08 21:03:34 +01:00
//f.features.ChangeNotify = f.ChangeNotify
2018-05-14 19:06:57 +02:00
if f . opt . TempWritePath != "" {
2018-01-29 23:05:04 +01:00
if f . tempFs . Features ( ) . Move == nil {
f . features . Move = nil
}
if f . tempFs . Features ( ) . Move == nil {
f . features . Move = nil
}
if f . tempFs . Features ( ) . DirMove == nil {
f . features . DirMove = nil
}
if f . tempFs . Features ( ) . MergeDirs == nil {
f . features . MergeDirs = nil
}
}
// even if the wrapped fs doesn't support it, we still want it
2017-12-18 13:55:37 +01:00
f . features . DirCacheFlush = f . DirCacheFlush
2017-11-12 18:54:25 +01:00
2018-03-14 20:49:11 +01:00
rc . Add ( rc . Call {
Path : "cache/expire" ,
Fn : f . httpExpireRemote ,
Title : "Purge a remote from cache" ,
Help : `
Purge a remote from the cache backend . Supports either a directory or a file .
Params :
- remote = path to remote ( required )
- withData = true / false to delete cached data ( chunks ) as well ( optional )
2018-04-23 21:44:44 +02:00
Eg
rclone rc cache / expire remote = path / to / sub / folder /
rclone rc cache / expire remote = / withData = true
2018-03-14 20:49:11 +01:00
` ,
} )
2018-03-22 00:11:20 +01:00
rc . Add ( rc . Call {
Path : "cache/stats" ,
Fn : f . httpStats ,
Title : "Get cache stats" ,
Help : `
Show statistics for the cache remote .
2018-08-30 11:09:16 +02:00
` ,
} )
rc . Add ( rc . Call {
Path : "cache/fetch" ,
Fn : f . rcFetch ,
Title : "Fetch file chunks" ,
Help : `
Ensure the specified file chunks are cached on disk .
The chunks = parameter specifies the file chunks to check .
It takes a comma separated list of array slice indices .
The slice indices are similar to Python slices : start [ : end ]
start is the 0 based chunk number from the beginning of the file
to fetch inclusive . end is 0 based chunk number from the beginning
2019-02-07 18:41:17 +01:00
of the file to fetch exclusive .
2018-08-30 11:09:16 +02:00
Both values can be negative , in which case they count from the back
of the file . The value "-5:" represents the last 5 chunks of a file .
Some valid examples are :
":5,-5:" - > the first and last five chunks
"0,-2" - > the first and the second last chunk
"0:10" - > the first ten chunks
Any parameter with a key that starts with "file" can be used to
2020-10-13 23:49:58 +02:00
specify files to fetch , e . g .
2018-08-30 11:09:16 +02:00
rclone rc cache / fetch chunks = 0 file = hello file2 = home / goodbye
File names will automatically be encrypted when the a crypt remote
is used on top of the cache .
2018-03-22 00:11:20 +01:00
` ,
} )
2018-01-29 23:05:04 +01:00
return f , fsErr
2017-11-12 18:54:25 +01:00
}
2019-06-17 10:34:30 +02:00
func ( f * Fs ) httpStats ( ctx context . Context , in rc . Params ) ( out rc . Params , err error ) {
2018-03-22 00:11:20 +01:00
out = make ( rc . Params )
m , err := f . Stats ( )
if err != nil {
2021-11-04 11:12:57 +01:00
return out , fmt . Errorf ( "error while getting cache stats" )
2018-03-22 00:11:20 +01:00
}
out [ "status" ] = "ok"
out [ "stats" ] = m
return out , nil
}
2018-08-30 11:09:16 +02:00
func ( f * Fs ) unwrapRemote ( remote string ) string {
remote = cleanPath ( remote )
if remote != "" {
// if it's wrapped by crypt we need to check what format we got
if cryptFs , yes := f . isWrappedByCrypt ( ) ; yes {
_ , err := cryptFs . DecryptFileName ( remote )
// if it failed to decrypt then it is a decrypted format and we need to encrypt it
if err != nil {
return cryptFs . EncryptFileName ( remote )
}
// else it's an encrypted format and we can use it as it is
}
}
return remote
}
2019-06-17 10:34:30 +02:00
func ( f * Fs ) httpExpireRemote ( ctx context . Context , in rc . Params ) ( out rc . Params , err error ) {
2018-03-14 20:49:11 +01:00
out = make ( rc . Params )
remoteInt , ok := in [ "remote" ]
if ! ok {
2021-11-04 11:12:57 +01:00
return out , fmt . Errorf ( "remote is needed" )
2018-03-14 20:49:11 +01:00
}
remote := remoteInt . ( string )
withData := false
_ , ok = in [ "withData" ]
if ok {
withData = true
}
2018-08-30 11:09:16 +02:00
remote = f . unwrapRemote ( remote )
if ! f . cache . HasEntry ( path . Join ( f . Root ( ) , remote ) ) {
2021-11-04 11:12:57 +01:00
return out , fmt . Errorf ( "%s doesn't exist in cache" , remote )
2018-03-14 20:49:11 +01:00
}
co := NewObject ( f , remote )
err = f . cache . GetObject ( co )
if err != nil { // it could be a dir
cd := NewDirectory ( f , remote )
err := f . cache . ExpireDir ( cd )
if err != nil {
2021-11-04 11:12:57 +01:00
return out , fmt . Errorf ( "error expiring directory: %w" , err )
2018-03-14 20:49:11 +01:00
}
2018-03-15 10:05:45 +01:00
// notify vfs too
f . notifyChangeUpstream ( cd . Remote ( ) , fs . EntryDirectory )
2018-03-14 20:49:11 +01:00
out [ "status" ] = "ok"
out [ "message" ] = fmt . Sprintf ( "cached directory cleared: %v" , remote )
return out , nil
}
// expire the entry
2018-04-03 21:46:00 +02:00
err = f . cache . ExpireObject ( co , withData )
2018-03-14 20:49:11 +01:00
if err != nil {
2021-11-04 11:12:57 +01:00
return out , fmt . Errorf ( "error expiring file: %w" , err )
2018-03-14 20:49:11 +01:00
}
2018-03-15 10:05:45 +01:00
// notify vfs too
f . notifyChangeUpstream ( co . Remote ( ) , fs . EntryObject )
2018-03-14 20:49:11 +01:00
out [ "status" ] = "ok"
out [ "message" ] = fmt . Sprintf ( "cached file cleared: %v" , remote )
return out , nil
}
2019-06-17 10:34:30 +02:00
func ( f * Fs ) rcFetch ( ctx context . Context , in rc . Params ) ( rc . Params , error ) {
2018-08-30 11:09:16 +02:00
type chunkRange struct {
start , end int64
}
parseChunks := func ( ranges string ) ( crs [ ] chunkRange , err error ) {
for _ , part := range strings . Split ( ranges , "," ) {
var start , end int64 = 0 , math . MaxInt64
switch ints := strings . Split ( part , ":" ) ; len ( ints ) {
case 1 :
start , err = strconv . ParseInt ( ints [ 0 ] , 10 , 64 )
if err != nil {
2021-11-04 11:12:57 +01:00
return nil , fmt . Errorf ( "invalid range: %q" , part )
2018-08-30 11:09:16 +02:00
}
end = start + 1
case 2 :
if ints [ 0 ] != "" {
start , err = strconv . ParseInt ( ints [ 0 ] , 10 , 64 )
if err != nil {
2021-11-04 11:12:57 +01:00
return nil , fmt . Errorf ( "invalid range: %q" , part )
2018-08-30 11:09:16 +02:00
}
}
if ints [ 1 ] != "" {
end , err = strconv . ParseInt ( ints [ 1 ] , 10 , 64 )
if err != nil {
2021-11-04 11:12:57 +01:00
return nil , fmt . Errorf ( "invalid range: %q" , part )
2018-08-30 11:09:16 +02:00
}
}
default :
2021-11-04 11:12:57 +01:00
return nil , fmt . Errorf ( "invalid range: %q" , part )
2018-08-30 11:09:16 +02:00
}
crs = append ( crs , chunkRange { start : start , end : end } )
}
return
}
walkChunkRange := func ( cr chunkRange , size int64 , cb func ( chunk int64 ) ) {
if size <= 0 {
return
}
chunks := ( size - 1 ) / f . ChunkSize ( ) + 1
start , end := cr . start , cr . end
if start < 0 {
start += chunks
}
if end <= 0 {
end += chunks
}
if end <= start {
return
}
switch {
case start < 0 :
start = 0
case start >= chunks :
return
}
switch {
case end <= start :
end = start + 1
case end >= chunks :
end = chunks
}
for i := start ; i < end ; i ++ {
cb ( i )
}
}
walkChunkRanges := func ( crs [ ] chunkRange , size int64 , cb func ( chunk int64 ) ) {
for _ , cr := range crs {
walkChunkRange ( cr , size , cb )
}
}
v , ok := in [ "chunks" ]
if ! ok {
return nil , errors . New ( "missing chunks parameter" )
}
s , ok := v . ( string )
if ! ok {
return nil , errors . New ( "invalid chunks parameter" )
}
delete ( in , "chunks" )
crs , err := parseChunks ( s )
if err != nil {
2021-11-04 11:12:57 +01:00
return nil , fmt . Errorf ( "invalid chunks parameter: %w" , err )
2018-08-30 11:09:16 +02:00
}
var files [ ] [ 2 ] string
for k , v := range in {
if ! strings . HasPrefix ( k , "file" ) {
2021-11-04 11:12:57 +01:00
return nil , fmt . Errorf ( "invalid parameter %s=%s" , k , v )
2018-08-30 11:09:16 +02:00
}
switch v := v . ( type ) {
case string :
files = append ( files , [ 2 ] string { v , f . unwrapRemote ( v ) } )
default :
2021-11-04 11:12:57 +01:00
return nil , fmt . Errorf ( "invalid parameter %s=%s" , k , v )
2018-08-30 11:09:16 +02:00
}
}
type fileStatus struct {
2018-09-03 17:07:07 +02:00
Error string
2018-08-30 11:09:16 +02:00
FetchedChunks int
}
fetchedChunks := make ( map [ string ] fileStatus , len ( files ) )
for _ , pair := range files {
file , remote := pair [ 0 ] , pair [ 1 ]
var status fileStatus
2019-06-17 10:34:30 +02:00
o , err := f . NewObject ( ctx , remote )
2018-08-30 11:09:16 +02:00
if err != nil {
2018-09-03 17:07:07 +02:00
fetchedChunks [ file ] = fileStatus { Error : err . Error ( ) }
2018-08-30 11:09:16 +02:00
continue
}
co := o . ( * Object )
2019-06-17 10:34:30 +02:00
err = co . refreshFromSource ( ctx , true )
2018-08-30 11:09:16 +02:00
if err != nil {
2018-09-03 17:07:07 +02:00
fetchedChunks [ file ] = fileStatus { Error : err . Error ( ) }
2018-08-30 11:09:16 +02:00
continue
}
2019-06-17 10:34:30 +02:00
handle := NewObjectHandle ( ctx , co , f )
2018-08-30 11:09:16 +02:00
handle . UseMemory = false
handle . scaleWorkers ( 1 )
walkChunkRanges ( crs , co . Size ( ) , func ( chunk int64 ) {
_ , err := handle . getChunk ( chunk * f . ChunkSize ( ) )
if err != nil {
2018-09-03 17:07:07 +02:00
if status . Error == "" {
status . Error = err . Error ( )
2018-08-30 11:09:16 +02:00
}
} else {
status . FetchedChunks ++
}
} )
fetchedChunks [ file ] = status
}
return rc . Params { "status" : fetchedChunks } , nil
}
2018-03-08 21:03:34 +01:00
// receiveChangeNotify is a wrapper to notifications sent from the wrapped FS about changed files
func ( f * Fs ) receiveChangeNotify ( forgetPath string , entryType fs . EntryType ) {
2018-03-23 21:41:01 +01:00
if crypt , yes := f . isWrappedByCrypt ( ) ; yes {
decryptedPath , err := crypt . DecryptFileName ( forgetPath )
if err == nil {
fs . Infof ( decryptedPath , "received cache expiry notification" )
} else {
fs . Infof ( forgetPath , "received cache expiry notification" )
}
} else {
fs . Infof ( forgetPath , "received cache expiry notification" )
}
2018-02-10 21:01:05 +01:00
// notify upstreams too (vfs)
2018-03-08 21:03:34 +01:00
f . notifyChangeUpstream ( forgetPath , entryType )
2018-02-10 21:01:05 +01:00
var cd * Directory
2018-03-08 21:03:34 +01:00
if entryType == fs . EntryObject {
co := NewObject ( f , forgetPath )
err := f . cache . GetObject ( co )
2018-06-13 22:57:26 +02:00
if err != nil {
fs . Debugf ( f , "got change notification for non cached entry %v" , co )
}
err = f . cache . ExpireObject ( co , true )
if err != nil {
fs . Debugf ( forgetPath , "notify: error expiring '%v': %v" , co , err )
2018-03-08 21:03:34 +01:00
}
2018-02-10 21:01:05 +01:00
cd = NewDirectory ( f , cleanPath ( path . Dir ( co . Remote ( ) ) ) )
} else {
cd = NewDirectory ( f , forgetPath )
2018-03-31 11:44:09 +02:00
}
// we expire the dir
err := f . cache . ExpireDir ( cd )
if err != nil {
fs . Debugf ( forgetPath , "notify: error expiring '%v': %v" , cd , err )
} else {
fs . Debugf ( forgetPath , "notify: expired '%v'" , cd )
2018-02-10 21:01:05 +01:00
}
2018-03-08 21:03:34 +01:00
f . notifiedMu . Lock ( )
defer f . notifiedMu . Unlock ( )
f . notifiedRemotes [ forgetPath ] = true
f . notifiedRemotes [ cd . Remote ( ) ] = true
2018-02-10 21:01:05 +01:00
}
2018-03-08 21:03:34 +01:00
// notifyChangeUpstreamIfNeeded will check if the wrapped remote doesn't notify on changes
2018-02-11 21:30:58 +01:00
// or if we use a temp fs
2018-03-08 21:03:34 +01:00
func ( f * Fs ) notifyChangeUpstreamIfNeeded ( remote string , entryType fs . EntryType ) {
2018-05-14 19:06:57 +02:00
if f . Fs . Features ( ) . ChangeNotify == nil || f . opt . TempWritePath != "" {
2018-03-08 21:03:34 +01:00
f . notifyChangeUpstream ( remote , entryType )
2018-02-11 21:30:58 +01:00
}
}
2018-03-08 21:03:34 +01:00
// notifyChangeUpstream will loop through all the upstreams and notify
2018-02-10 21:01:05 +01:00
// of the provided remote (should be only a dir)
2018-03-08 21:03:34 +01:00
func ( f * Fs ) notifyChangeUpstream ( remote string , entryType fs . EntryType ) {
f . parentsForgetMu . Lock ( )
defer f . parentsForgetMu . Unlock ( )
2018-02-10 21:01:05 +01:00
if len ( f . parentsForgetFn ) > 0 {
for _ , fn := range f . parentsForgetFn {
2018-03-08 21:03:34 +01:00
fn ( remote , entryType )
2018-02-10 21:01:05 +01:00
}
}
}
2019-02-07 18:41:17 +01:00
// ChangeNotify can subscribe multiple callers
2018-03-08 21:03:34 +01:00
// this is coupled with the wrapped fs ChangeNotify (if it supports it)
2018-02-10 21:01:05 +01:00
// and also notifies other caches (i.e VFS) to clear out whenever something changes
2019-06-17 10:34:30 +02:00
func ( f * Fs ) ChangeNotify ( ctx context . Context , notifyFunc func ( string , fs . EntryType ) , pollInterval <- chan time . Duration ) {
2018-03-08 21:03:34 +01:00
f . parentsForgetMu . Lock ( )
defer f . parentsForgetMu . Unlock ( )
fs . Debugf ( f , "subscribing to ChangeNotify" )
2018-02-10 21:01:05 +01:00
f . parentsForgetFn = append ( f . parentsForgetFn , notifyFunc )
2018-08-25 21:28:57 +02:00
go func ( ) {
for range pollInterval {
}
} ( )
2018-02-10 21:01:05 +01:00
}
2017-11-12 18:54:25 +01:00
// Name of the remote (as passed into NewFs)
func ( f * Fs ) Name ( ) string {
return f . name
}
// Root of the remote (as passed into NewFs)
func ( f * Fs ) Root ( ) string {
return f . root
}
// Features returns the optional features of this Fs
func ( f * Fs ) Features ( ) * fs . Features {
return f . features
}
// String returns a description of the FS
func ( f * Fs ) String ( ) string {
2018-01-29 23:05:04 +01:00
return fmt . Sprintf ( "Cache remote %s:%s" , f . name , f . root )
2017-11-12 18:54:25 +01:00
}
// ChunkSize returns the configured chunk size
func ( f * Fs ) ChunkSize ( ) int64 {
2018-05-14 19:06:57 +02:00
return int64 ( f . opt . ChunkSize )
2017-11-12 18:54:25 +01:00
}
2018-01-29 23:05:04 +01:00
// InfoAge returns the configured file age
func ( f * Fs ) InfoAge ( ) time . Duration {
2018-05-14 19:06:57 +02:00
return time . Duration ( f . opt . InfoAge )
2018-01-29 23:05:04 +01:00
}
// TempUploadWaitTime returns the configured temp file upload wait time
func ( f * Fs ) TempUploadWaitTime ( ) time . Duration {
2018-05-14 19:06:57 +02:00
return time . Duration ( f . opt . TempWaitTime )
2018-01-29 23:05:04 +01:00
}
2017-11-12 18:54:25 +01:00
// NewObject finds the Object at remote.
2019-06-17 10:34:30 +02:00
func ( f * Fs ) NewObject ( ctx context . Context , remote string ) ( fs . Object , error ) {
2018-01-29 23:05:04 +01:00
var err error
fs . Debugf ( f , "new object '%s'" , remote )
2017-11-12 18:54:25 +01:00
co := NewObject ( f , remote )
2018-01-29 23:05:04 +01:00
// search for entry in cache and validate it
err = f . cache . GetObject ( co )
2017-12-18 13:55:37 +01:00
if err != nil {
fs . Debugf ( remote , "find: error: %v" , err )
2018-05-14 19:06:57 +02:00
} else if time . Now ( ) . After ( co . CacheTs . Add ( time . Duration ( f . opt . InfoAge ) ) ) {
2018-01-29 23:05:04 +01:00
fs . Debugf ( co , "find: cold object: %+v" , co )
2017-12-18 13:55:37 +01:00
} else {
2018-05-14 19:06:57 +02:00
fs . Debugf ( co , "find: warm object: %v, expiring on: %v" , co , co . CacheTs . Add ( time . Duration ( f . opt . InfoAge ) ) )
2017-11-12 18:54:25 +01:00
return co , nil
}
2018-01-29 23:05:04 +01:00
// search for entry in source or temp fs
var obj fs . Object
2018-05-14 19:06:57 +02:00
if f . opt . TempWritePath != "" {
2019-06-17 10:34:30 +02:00
obj , err = f . tempFs . NewObject ( ctx , remote )
2018-01-29 23:05:04 +01:00
// not found in temp fs
if err != nil {
fs . Debugf ( remote , "find: not found in local cache fs" )
2019-06-17 10:34:30 +02:00
obj , err = f . Fs . NewObject ( ctx , remote )
2018-01-29 23:05:04 +01:00
} else {
fs . Debugf ( obj , "find: found in local cache fs" )
}
} else {
2019-06-17 10:34:30 +02:00
obj , err = f . Fs . NewObject ( ctx , remote )
2018-01-29 23:05:04 +01:00
}
// not found in either fs
2017-11-12 18:54:25 +01:00
if err != nil {
2018-01-29 23:05:04 +01:00
fs . Debugf ( obj , "find failed: not found in either local or remote fs" )
2017-11-12 18:54:25 +01:00
return nil , err
}
2018-01-29 23:05:04 +01:00
// cache the new entry
2019-06-17 10:34:30 +02:00
co = ObjectFromOriginal ( ctx , f , obj ) . persist ( )
2018-01-29 23:05:04 +01:00
fs . Debugf ( co , "find: cached object" )
2017-11-12 18:54:25 +01:00
return co , nil
}
// List the objects and directories in dir into entries
2019-06-17 10:34:30 +02:00
func ( f * Fs ) List ( ctx context . Context , dir string ) ( entries fs . DirEntries , err error ) {
2018-01-29 23:05:04 +01:00
fs . Debugf ( f , "list '%s'" , dir )
2017-12-18 13:55:37 +01:00
cd := ShallowDirectory ( f , dir )
2018-01-29 23:05:04 +01:00
// search for cached dir entries and validate them
2017-11-12 18:54:25 +01:00
entries , err = f . cache . GetDirEntries ( cd )
if err != nil {
2017-12-18 13:55:37 +01:00
fs . Debugf ( dir , "list: error: %v" , err )
2018-05-14 19:06:57 +02:00
} else if time . Now ( ) . After ( cd . CacheTs . Add ( time . Duration ( f . opt . InfoAge ) ) ) {
2017-12-18 13:55:37 +01:00
fs . Debugf ( dir , "list: cold listing: %v" , cd . CacheTs )
2017-11-12 18:54:25 +01:00
} else if len ( entries ) == 0 {
// TODO: read empty dirs from source?
2017-12-18 13:55:37 +01:00
fs . Debugf ( dir , "list: empty listing" )
2017-11-12 18:54:25 +01:00
} else {
2018-05-14 19:06:57 +02:00
fs . Debugf ( dir , "list: warm %v from cache for: %v, expiring on: %v" , len ( entries ) , cd . abs ( ) , cd . CacheTs . Add ( time . Duration ( f . opt . InfoAge ) ) )
2018-01-29 23:05:04 +01:00
fs . Debugf ( dir , "list: cached entries: %v" , entries )
2017-11-12 18:54:25 +01:00
return entries , nil
}
2018-01-29 23:05:04 +01:00
// we first search any temporary files stored locally
var cachedEntries fs . DirEntries
2018-05-14 19:06:57 +02:00
if f . opt . TempWritePath != "" {
2018-01-29 23:05:04 +01:00
queuedEntries , err := f . cache . searchPendingUploadFromDir ( cd . abs ( ) )
if err != nil {
fs . Errorf ( dir , "list: error getting pending uploads: %v" , err )
} else {
fs . Debugf ( dir , "list: read %v from temp fs" , len ( queuedEntries ) )
fs . Debugf ( dir , "list: temp fs entries: %v" , queuedEntries )
for _ , queuedRemote := range queuedEntries {
2019-06-17 10:34:30 +02:00
queuedEntry , err := f . tempFs . NewObject ( ctx , f . cleanRootFromPath ( queuedRemote ) )
2018-01-29 23:05:04 +01:00
if err != nil {
fs . Debugf ( dir , "list: temp file not found in local fs: %v" , err )
continue
}
2019-06-17 10:34:30 +02:00
co := ObjectFromOriginal ( ctx , f , queuedEntry ) . persist ( )
2018-01-29 23:05:04 +01:00
fs . Debugf ( co , "list: cached temp object" )
cachedEntries = append ( cachedEntries , co )
}
}
}
// search from the source
2019-06-17 10:34:30 +02:00
sourceEntries , err := f . Fs . List ( ctx , dir )
2017-11-12 18:54:25 +01:00
if err != nil {
return nil , err
}
2018-10-06 12:23:33 +02:00
fs . Debugf ( dir , "list: read %v from source" , len ( sourceEntries ) )
fs . Debugf ( dir , "list: source entries: %v" , sourceEntries )
sort . Sort ( sourceEntries )
for _ , entry := range entries {
entryRemote := entry . Remote ( )
i := sort . Search ( len ( sourceEntries ) , func ( i int ) bool { return sourceEntries [ i ] . Remote ( ) >= entryRemote } )
if i < len ( sourceEntries ) && sourceEntries [ i ] . Remote ( ) == entryRemote {
continue
}
fp := path . Join ( f . Root ( ) , entryRemote )
switch entry . ( type ) {
case fs . Object :
_ = f . cache . RemoveObject ( fp )
case fs . Directory :
_ = f . cache . RemoveDir ( fp )
}
fs . Debugf ( dir , "list: remove entry: %v" , entryRemote )
}
2023-02-06 11:44:40 +01:00
entries = nil //nolint:ineffassign
2017-11-12 18:54:25 +01:00
2018-01-29 23:05:04 +01:00
// and then iterate over the ones from source (temp Objects will override source ones)
2018-06-03 21:48:13 +02:00
var batchDirectories [ ] * Directory
2018-10-06 12:23:33 +02:00
sort . Sort ( cachedEntries )
tmpCnt := len ( cachedEntries )
for _ , entry := range sourceEntries {
2017-11-12 18:54:25 +01:00
switch o := entry . ( type ) {
case fs . Object :
2018-01-29 23:05:04 +01:00
// skip over temporary objects (might be uploading)
2018-10-06 12:23:33 +02:00
oRemote := o . Remote ( )
i := sort . Search ( tmpCnt , func ( i int ) bool { return cachedEntries [ i ] . Remote ( ) >= oRemote } )
if i < tmpCnt && cachedEntries [ i ] . Remote ( ) == oRemote {
2018-01-29 23:05:04 +01:00
continue
}
2019-06-17 10:34:30 +02:00
co := ObjectFromOriginal ( ctx , f , o ) . persist ( )
2017-11-12 18:54:25 +01:00
cachedEntries = append ( cachedEntries , co )
2018-01-29 23:05:04 +01:00
fs . Debugf ( dir , "list: cached object: %v" , co )
2017-11-12 18:54:25 +01:00
case fs . Directory :
2019-06-17 10:34:30 +02:00
cdd := DirectoryFromOriginal ( ctx , f , o )
2018-03-13 22:43:34 +01:00
// check if the dir isn't expired and add it in cache if it isn't
2018-05-14 19:06:57 +02:00
if cdd2 , err := f . cache . GetDir ( cdd . abs ( ) ) ; err != nil || time . Now ( ) . Before ( cdd2 . CacheTs . Add ( time . Duration ( f . opt . InfoAge ) ) ) {
2018-06-03 21:48:13 +02:00
batchDirectories = append ( batchDirectories , cdd )
2018-03-13 22:43:34 +01:00
}
2018-01-29 23:05:04 +01:00
cachedEntries = append ( cachedEntries , cdd )
2017-11-12 18:54:25 +01:00
default :
2018-01-29 23:05:04 +01:00
fs . Debugf ( entry , "list: Unknown object type %T" , entry )
2017-11-12 18:54:25 +01:00
}
}
2018-06-03 21:48:13 +02:00
err = f . cache . AddBatchDir ( batchDirectories )
if err != nil {
fs . Errorf ( dir , "list: error caching directories from listing %v" , dir )
} else {
fs . Debugf ( dir , "list: cached directories: %v" , len ( batchDirectories ) )
}
2018-01-29 23:05:04 +01:00
// cache dir meta
t := time . Now ( )
cd . CacheTs = & t
err = f . cache . AddDir ( cd )
2017-11-12 18:54:25 +01:00
if err != nil {
2018-01-29 23:05:04 +01:00
fs . Errorf ( cd , "list: save error: '%v'" , err )
2017-12-18 13:55:37 +01:00
} else {
2018-01-29 23:05:04 +01:00
fs . Debugf ( dir , "list: cached dir: '%v', cache ts: %v" , cd . abs ( ) , cd . CacheTs )
2017-11-12 18:54:25 +01:00
}
return cachedEntries , nil
}
2019-06-17 10:34:30 +02:00
func ( f * Fs ) recurse ( ctx context . Context , dir string , list * walk . ListRHelper ) error {
entries , err := f . List ( ctx , dir )
2017-11-12 18:54:25 +01:00
if err != nil {
return err
}
for i := 0 ; i < len ( entries ) ; i ++ {
innerDir , ok := entries [ i ] . ( fs . Directory )
if ok {
2019-06-17 10:34:30 +02:00
err := f . recurse ( ctx , innerDir . Remote ( ) , list )
2017-11-12 18:54:25 +01:00
if err != nil {
return err
}
}
err := list . Add ( entries [ i ] )
if err != nil {
return err
}
}
return nil
}
// ListR lists the objects and directories of the Fs starting
// from dir recursively into out.
2019-06-17 10:34:30 +02:00
func ( f * Fs ) ListR ( ctx context . Context , dir string , callback fs . ListRCallback ) ( err error ) {
2017-11-12 18:54:25 +01:00
fs . Debugf ( f , "list recursively from '%s'" , dir )
// we check if the source FS supports ListR
// if it does, we'll use that to get all the entries, cache them and return
do := f . Fs . Features ( ) . ListR
if do != nil {
2019-06-17 10:34:30 +02:00
return do ( ctx , dir , func ( entries fs . DirEntries ) error {
2017-11-12 18:54:25 +01:00
// we got called back with a set of entries so let's cache them and call the original callback
for _ , entry := range entries {
switch o := entry . ( type ) {
case fs . Object :
2019-06-17 10:34:30 +02:00
_ = f . cache . AddObject ( ObjectFromOriginal ( ctx , f , o ) )
2017-11-12 18:54:25 +01:00
case fs . Directory :
2019-06-17 10:34:30 +02:00
_ = f . cache . AddDir ( DirectoryFromOriginal ( ctx , f , o ) )
2017-11-12 18:54:25 +01:00
default :
2022-06-08 22:54:39 +02:00
return fmt . Errorf ( "unknown object type %T" , entry )
2017-11-12 18:54:25 +01:00
}
}
// call the original callback
return callback ( entries )
} )
}
// if we're here, we're gonna do a standard recursive traversal and cache everything
2018-01-12 17:30:54 +01:00
list := walk . NewListRHelper ( callback )
2019-06-17 10:34:30 +02:00
err = f . recurse ( ctx , dir , list )
2017-11-12 18:54:25 +01:00
if err != nil {
return err
}
return list . Flush ( )
}
// Mkdir makes the directory (container, bucket)
2019-06-17 10:34:30 +02:00
func ( f * Fs ) Mkdir ( ctx context . Context , dir string ) error {
2018-01-29 23:05:04 +01:00
fs . Debugf ( f , "mkdir '%s'" , dir )
2019-06-17 10:34:30 +02:00
err := f . Fs . Mkdir ( ctx , dir )
2017-11-12 18:54:25 +01:00
if err != nil {
return err
}
2018-01-29 23:05:04 +01:00
fs . Debugf ( dir , "mkdir: created dir in source fs" )
2017-11-12 18:54:25 +01:00
2017-12-18 13:55:37 +01:00
cd := NewDirectory ( f , cleanPath ( dir ) )
err = f . cache . AddDir ( cd )
if err != nil {
fs . Errorf ( dir , "mkdir: add error: %v" , err )
2018-01-29 23:05:04 +01:00
} else {
fs . Debugf ( cd , "mkdir: added to cache" )
2017-12-18 13:55:37 +01:00
}
2018-01-29 23:05:04 +01:00
// expire parent of new dir
2017-12-18 13:55:37 +01:00
parentCd := NewDirectory ( f , cleanPath ( path . Dir ( dir ) ) )
err = f . cache . ExpireDir ( parentCd )
if err != nil {
2018-01-29 23:05:04 +01:00
fs . Errorf ( parentCd , "mkdir: cache expire error: %v" , err )
} else {
fs . Infof ( parentCd , "mkdir: cache expired" )
2017-12-18 13:55:37 +01:00
}
2018-03-08 21:03:34 +01:00
// advertise to ChangeNotify if wrapped doesn't do that
f . notifyChangeUpstreamIfNeeded ( parentCd . Remote ( ) , fs . EntryDirectory )
2017-11-12 18:54:25 +01:00
return nil
}
// Rmdir removes the directory (container, bucket) if empty
2019-06-17 10:34:30 +02:00
func ( f * Fs ) Rmdir ( ctx context . Context , dir string ) error {
2018-01-29 23:05:04 +01:00
fs . Debugf ( f , "rmdir '%s'" , dir )
2018-05-14 19:06:57 +02:00
if f . opt . TempWritePath != "" {
2018-01-29 23:05:04 +01:00
// pause background uploads
f . backgroundRunner . pause ( )
defer f . backgroundRunner . play ( )
// we check if the source exists on the remote and make the same move on it too if it does
// otherwise, we skip this step
2019-06-17 10:34:30 +02:00
_ , err := f . UnWrap ( ) . List ( ctx , dir )
2018-01-29 23:05:04 +01:00
if err == nil {
2019-06-17 10:34:30 +02:00
err := f . Fs . Rmdir ( ctx , dir )
2018-01-29 23:05:04 +01:00
if err != nil {
return err
}
fs . Debugf ( dir , "rmdir: removed dir in source fs" )
}
var queuedEntries [ ] * Object
2019-06-17 10:34:30 +02:00
err = walk . ListR ( ctx , f . tempFs , dir , true , - 1 , walk . ListObjects , func ( entries fs . DirEntries ) error {
2018-01-29 23:05:04 +01:00
for _ , o := range entries {
if oo , ok := o . ( fs . Object ) ; ok {
2019-06-17 10:34:30 +02:00
co := ObjectFromOriginal ( ctx , f , oo )
2018-01-29 23:05:04 +01:00
queuedEntries = append ( queuedEntries , co )
}
}
return nil
} )
if err != nil {
fs . Errorf ( dir , "rmdir: error getting pending uploads: %v" , err )
} else {
fs . Debugf ( dir , "rmdir: read %v from temp fs" , len ( queuedEntries ) )
fs . Debugf ( dir , "rmdir: temp fs entries: %v" , queuedEntries )
if len ( queuedEntries ) > 0 {
2018-03-08 21:03:34 +01:00
fs . Errorf ( dir , "rmdir: temporary dir not empty: %v" , queuedEntries )
2018-01-29 23:05:04 +01:00
return fs . ErrorDirectoryNotEmpty
}
}
} else {
2019-06-17 10:34:30 +02:00
err := f . Fs . Rmdir ( ctx , dir )
2018-01-29 23:05:04 +01:00
if err != nil {
return err
}
fs . Debugf ( dir , "rmdir: removed dir in source fs" )
2017-11-12 18:54:25 +01:00
}
2017-12-18 13:55:37 +01:00
// remove dir data
d := NewDirectory ( f , dir )
2018-01-29 23:05:04 +01:00
err := f . cache . RemoveDir ( d . abs ( ) )
2017-12-18 13:55:37 +01:00
if err != nil {
fs . Errorf ( dir , "rmdir: remove error: %v" , err )
2018-01-29 23:05:04 +01:00
} else {
fs . Debugf ( d , "rmdir: removed from cache" )
2017-12-18 13:55:37 +01:00
}
// expire parent
parentCd := NewDirectory ( f , cleanPath ( path . Dir ( dir ) ) )
err = f . cache . ExpireDir ( parentCd )
if err != nil {
2018-01-29 23:05:04 +01:00
fs . Errorf ( dir , "rmdir: cache expire error: %v" , err )
} else {
fs . Infof ( parentCd , "rmdir: cache expired" )
2017-12-18 13:55:37 +01:00
}
2018-03-08 21:03:34 +01:00
// advertise to ChangeNotify if wrapped doesn't do that
f . notifyChangeUpstreamIfNeeded ( parentCd . Remote ( ) , fs . EntryDirectory )
2017-11-12 18:54:25 +01:00
return nil
}
// DirMove moves src, srcRemote to this remote at dstRemote
2020-10-13 23:43:40 +02:00
// using server-side move operations.
2019-06-17 10:34:30 +02:00
func ( f * Fs ) DirMove ( ctx context . Context , src fs . Fs , srcRemote , dstRemote string ) error {
2018-01-29 23:05:04 +01:00
fs . Debugf ( f , "move dir '%s'/'%s' -> '%s'/'%s'" , src . Root ( ) , srcRemote , f . Root ( ) , dstRemote )
2017-11-12 18:54:25 +01:00
do := f . Fs . Features ( ) . DirMove
if do == nil {
return fs . ErrorCantDirMove
}
srcFs , ok := src . ( * Fs )
if ! ok {
fs . Errorf ( srcFs , "can't move directory - not same remote type" )
return fs . ErrorCantDirMove
}
if srcFs . Fs . Name ( ) != f . Fs . Name ( ) {
fs . Errorf ( srcFs , "can't move directory - not wrapping same remotes" )
return fs . ErrorCantDirMove
}
2018-05-14 19:06:57 +02:00
if f . opt . TempWritePath != "" {
2018-01-29 23:05:04 +01:00
// pause background uploads
f . backgroundRunner . pause ( )
defer f . backgroundRunner . play ( )
2019-06-17 10:34:30 +02:00
_ , errInWrap := srcFs . UnWrap ( ) . List ( ctx , srcRemote )
_ , errInTemp := f . tempFs . List ( ctx , srcRemote )
2018-02-14 22:47:45 +01:00
// not found in either fs
if errInWrap != nil && errInTemp != nil {
return fs . ErrorDirNotFound
}
2018-01-29 23:05:04 +01:00
// we check if the source exists on the remote and make the same move on it too if it does
// otherwise, we skip this step
2018-02-14 22:47:45 +01:00
if errInWrap == nil {
2019-06-17 10:34:30 +02:00
err := do ( ctx , srcFs . UnWrap ( ) , srcRemote , dstRemote )
2018-01-29 23:05:04 +01:00
if err != nil {
return err
}
fs . Debugf ( srcRemote , "movedir: dir moved in the source fs" )
}
2018-02-14 22:47:45 +01:00
// we need to check if the directory exists in the temp fs
// and skip the move if it doesn't
if errInTemp != nil {
goto cleanup
}
2018-01-29 23:05:04 +01:00
var queuedEntries [ ] * Object
2019-06-17 10:34:30 +02:00
err := walk . ListR ( ctx , f . tempFs , srcRemote , true , - 1 , walk . ListObjects , func ( entries fs . DirEntries ) error {
2018-01-29 23:05:04 +01:00
for _ , o := range entries {
if oo , ok := o . ( fs . Object ) ; ok {
2019-06-17 10:34:30 +02:00
co := ObjectFromOriginal ( ctx , f , oo )
2018-01-29 23:05:04 +01:00
queuedEntries = append ( queuedEntries , co )
if co . tempFileStartedUpload ( ) {
fs . Errorf ( co , "can't move - upload has already started. need to finish that" )
return fs . ErrorCantDirMove
}
}
}
return nil
} )
if err != nil {
return err
}
fs . Debugf ( srcRemote , "dirmove: read %v from temp fs" , len ( queuedEntries ) )
fs . Debugf ( srcRemote , "dirmove: temp fs entries: %v" , queuedEntries )
do := f . tempFs . Features ( ) . DirMove
if do == nil {
fs . Errorf ( srcRemote , "dirmove: can't move dir in temp fs" )
return fs . ErrorCantDirMove
}
2019-06-17 10:34:30 +02:00
err = do ( ctx , f . tempFs , srcRemote , dstRemote )
2018-01-29 23:05:04 +01:00
if err != nil {
return err
}
2019-06-17 10:34:30 +02:00
err = f . cache . ReconcileTempUploads ( ctx , f )
2018-01-29 23:05:04 +01:00
if err != nil {
return err
}
} else {
2019-06-17 10:34:30 +02:00
err := do ( ctx , srcFs . UnWrap ( ) , srcRemote , dstRemote )
2018-01-29 23:05:04 +01:00
if err != nil {
return err
}
fs . Debugf ( srcRemote , "movedir: dir moved in the source fs" )
2017-11-12 18:54:25 +01:00
}
2018-02-14 22:47:45 +01:00
cleanup :
2017-11-12 18:54:25 +01:00
2017-12-18 13:55:37 +01:00
// delete src dir from cache along with all chunks
2017-11-12 18:54:25 +01:00
srcDir := NewDirectory ( srcFs , srcRemote )
2018-01-29 23:05:04 +01:00
err := f . cache . RemoveDir ( srcDir . abs ( ) )
2017-12-18 13:55:37 +01:00
if err != nil {
2018-01-29 23:05:04 +01:00
fs . Errorf ( srcDir , "dirmove: remove error: %v" , err )
} else {
fs . Debugf ( srcDir , "dirmove: removed cached dir" )
2017-12-18 13:55:37 +01:00
}
// expire src parent
srcParent := NewDirectory ( f , cleanPath ( path . Dir ( srcRemote ) ) )
err = f . cache . ExpireDir ( srcParent )
if err != nil {
2018-01-29 23:05:04 +01:00
fs . Errorf ( srcParent , "dirmove: cache expire error: %v" , err )
} else {
fs . Debugf ( srcParent , "dirmove: cache expired" )
2017-12-18 13:55:37 +01:00
}
2018-03-08 21:03:34 +01:00
// advertise to ChangeNotify if wrapped doesn't do that
f . notifyChangeUpstreamIfNeeded ( srcParent . Remote ( ) , fs . EntryDirectory )
2017-12-18 13:55:37 +01:00
// expire parent dir at the destination path
dstParent := NewDirectory ( f , cleanPath ( path . Dir ( dstRemote ) ) )
err = f . cache . ExpireDir ( dstParent )
if err != nil {
2018-01-29 23:05:04 +01:00
fs . Errorf ( dstParent , "dirmove: cache expire error: %v" , err )
} else {
fs . Debugf ( dstParent , "dirmove: cache expired" )
2017-12-18 13:55:37 +01:00
}
2018-03-08 21:03:34 +01:00
// advertise to ChangeNotify if wrapped doesn't do that
f . notifyChangeUpstreamIfNeeded ( dstParent . Remote ( ) , fs . EntryDirectory )
2017-12-18 13:55:37 +01:00
// TODO: precache dst dir and save the chunks
2017-11-12 18:54:25 +01:00
return nil
}
// cacheReader will split the stream of a reader to be cached at the same time it is read by the original source
func ( f * Fs ) cacheReader ( u io . Reader , src fs . ObjectInfo , originalRead func ( inn io . Reader ) ) {
// create the pipe and tee reader
pr , pw := io . Pipe ( )
tr := io . TeeReader ( u , pw )
// create channel to synchronize
done := make ( chan bool )
defer close ( done )
go func ( ) {
// notify the cache reader that we're complete after the source FS finishes
defer func ( ) {
_ = pw . Close ( )
} ( )
// process original reading
originalRead ( tr )
// signal complete
done <- true
} ( )
go func ( ) {
var offset int64
for {
2018-05-14 19:06:57 +02:00
chunk := make ( [ ] byte , f . opt . ChunkSize )
2017-11-12 18:54:25 +01:00
readSize , err := io . ReadFull ( pr , chunk )
// we ignore 3 failures which are ok:
// 1. EOF - original reading finished and we got a full buffer too
// 2. ErrUnexpectedEOF - original reading finished and partial buffer
// 3. ErrClosedPipe - source remote reader was closed (usually means it reached the end) and we need to stop too
// if we have a different error: we're going to error out the original reading too and stop this
if err != nil && err != io . EOF && err != io . ErrUnexpectedEOF && err != io . ErrClosedPipe {
fs . Errorf ( src , "error saving new data in cache. offset: %v, err: %v" , offset , err )
_ = pr . CloseWithError ( err )
break
}
// if we have some bytes we cache them
if readSize > 0 {
chunk = chunk [ : readSize ]
2017-12-09 22:54:26 +01:00
err2 := f . cache . AddChunk ( cleanPath ( path . Join ( f . root , src . Remote ( ) ) ) , chunk , offset )
2017-11-12 18:54:25 +01:00
if err2 != nil {
fs . Errorf ( src , "error saving new data in cache '%v'" , err2 )
_ = pr . CloseWithError ( err2 )
break
}
offset += int64 ( readSize )
}
// stuff should be closed but let's be sure
if err == io . EOF || err == io . ErrUnexpectedEOF || err == io . ErrClosedPipe {
_ = pr . Close ( )
break
}
}
// signal complete
done <- true
} ( )
// wait until both are done
for c := 0 ; c < 2 ; c ++ {
<- done
}
}
2019-06-17 10:34:30 +02:00
type putFn func ( ctx context . Context , in io . Reader , src fs . ObjectInfo , options ... fs . OpenOption ) ( fs . Object , error )
2017-11-12 18:54:25 +01:00
2017-11-30 20:16:45 +01:00
// put in to the remote path
2019-06-17 10:34:30 +02:00
func ( f * Fs ) put ( ctx context . Context , in io . Reader , src fs . ObjectInfo , options [ ] fs . OpenOption , put putFn ) ( fs . Object , error ) {
2017-11-12 18:54:25 +01:00
var err error
var obj fs . Object
2018-01-29 23:05:04 +01:00
// queue for upload and store in temp fs if configured
2018-05-14 19:06:57 +02:00
if f . opt . TempWritePath != "" {
2018-03-08 21:03:34 +01:00
// we need to clear the caches before a put through temp fs
parentCd := NewDirectory ( f , cleanPath ( path . Dir ( src . Remote ( ) ) ) )
_ = f . cache . ExpireDir ( parentCd )
f . notifyChangeUpstreamIfNeeded ( parentCd . Remote ( ) , fs . EntryDirectory )
2019-06-17 10:34:30 +02:00
obj , err = f . tempFs . Put ( ctx , in , src , options ... )
2018-01-29 23:05:04 +01:00
if err != nil {
fs . Errorf ( obj , "put: failed to upload in temp fs: %v" , err )
return nil , err
}
fs . Infof ( obj , "put: uploaded in temp fs" )
err = f . cache . addPendingUpload ( path . Join ( f . Root ( ) , src . Remote ( ) ) , false )
if err != nil {
fs . Errorf ( obj , "put: failed to queue for upload: %v" , err )
return nil , err
}
fs . Infof ( obj , "put: queued for upload" )
// if cache writes is enabled write it first through cache
2018-05-14 19:06:57 +02:00
} else if f . opt . StoreWrites {
2017-11-12 18:54:25 +01:00
f . cacheReader ( in , src , func ( inn io . Reader ) {
2019-06-17 10:34:30 +02:00
obj , err = put ( ctx , inn , src , options ... )
2017-11-12 18:54:25 +01:00
} )
2018-01-29 23:05:04 +01:00
if err == nil {
fs . Debugf ( obj , "put: uploaded to remote fs and saved in cache" )
}
// last option: save it directly in remote fs
2017-11-12 18:54:25 +01:00
} else {
2019-06-17 10:34:30 +02:00
obj , err = put ( ctx , in , src , options ... )
2018-01-29 23:05:04 +01:00
if err == nil {
fs . Debugf ( obj , "put: uploaded to remote fs" )
}
2017-11-12 18:54:25 +01:00
}
2018-01-29 23:05:04 +01:00
// validate and stop if errors are found
2017-11-12 18:54:25 +01:00
if err != nil {
2018-01-29 23:05:04 +01:00
fs . Errorf ( src , "put: error uploading: %v" , err )
2017-11-12 18:54:25 +01:00
return nil , err
}
2018-01-29 23:05:04 +01:00
// cache the new file
2019-06-17 10:34:30 +02:00
cachedObj := ObjectFromOriginal ( ctx , f , obj )
2018-06-08 22:33:05 +02:00
// deleting cached chunks and info to be replaced with new ones
_ = f . cache . RemoveObject ( cachedObj . abs ( ) )
cachedObj . persist ( )
2018-01-29 23:05:04 +01:00
fs . Debugf ( cachedObj , "put: added to cache" )
2018-06-08 22:33:05 +02:00
2017-12-18 13:55:37 +01:00
// expire parent
2018-01-29 23:05:04 +01:00
parentCd := NewDirectory ( f , cleanPath ( path . Dir ( cachedObj . Remote ( ) ) ) )
err = f . cache . ExpireDir ( parentCd )
2017-12-18 13:55:37 +01:00
if err != nil {
2018-01-29 23:05:04 +01:00
fs . Errorf ( cachedObj , "put: cache expire error: %v" , err )
} else {
fs . Infof ( parentCd , "put: cache expired" )
2017-12-18 13:55:37 +01:00
}
2018-03-08 21:03:34 +01:00
// advertise to ChangeNotify
f . notifyChangeUpstreamIfNeeded ( parentCd . Remote ( ) , fs . EntryDirectory )
2017-11-12 18:54:25 +01:00
return cachedObj , nil
}
2017-11-30 20:16:45 +01:00
// Put in to the remote path with the modTime given of the given size
2019-06-17 10:34:30 +02:00
func ( f * Fs ) Put ( ctx context . Context , in io . Reader , src fs . ObjectInfo , options ... fs . OpenOption ) ( fs . Object , error ) {
2018-01-29 23:05:04 +01:00
fs . Debugf ( f , "put data at '%s'" , src . Remote ( ) )
2019-06-17 10:34:30 +02:00
return f . put ( ctx , in , src , options , f . Fs . Put )
2017-11-30 20:16:45 +01:00
}
2017-11-12 18:54:25 +01:00
// PutUnchecked uploads the object
2019-06-17 10:34:30 +02:00
func ( f * Fs ) PutUnchecked ( ctx context . Context , in io . Reader , src fs . ObjectInfo , options ... fs . OpenOption ) ( fs . Object , error ) {
2017-11-12 18:54:25 +01:00
do := f . Fs . Features ( ) . PutUnchecked
if do == nil {
return nil , errors . New ( "can't PutUnchecked" )
}
2018-01-29 23:05:04 +01:00
fs . Debugf ( f , "put data unchecked in '%s'" , src . Remote ( ) )
2019-06-17 10:34:30 +02:00
return f . put ( ctx , in , src , options , do )
2017-11-30 20:16:45 +01:00
}
2017-11-12 18:54:25 +01:00
2017-11-30 20:16:45 +01:00
// PutStream uploads the object
2019-06-17 10:34:30 +02:00
func ( f * Fs ) PutStream ( ctx context . Context , in io . Reader , src fs . ObjectInfo , options ... fs . OpenOption ) ( fs . Object , error ) {
2017-11-30 20:16:45 +01:00
do := f . Fs . Features ( ) . PutStream
if do == nil {
return nil , errors . New ( "can't PutStream" )
2017-11-12 18:54:25 +01:00
}
2018-01-29 23:05:04 +01:00
fs . Debugf ( f , "put data streaming in '%s'" , src . Remote ( ) )
2019-06-17 10:34:30 +02:00
return f . put ( ctx , in , src , options , do )
2017-11-12 18:54:25 +01:00
}
2020-10-13 23:43:40 +02:00
// Copy src to this remote using server-side copy operations.
2019-06-17 10:34:30 +02:00
func ( f * Fs ) Copy ( ctx context . Context , src fs . Object , remote string ) ( fs . Object , error ) {
2018-01-29 23:05:04 +01:00
fs . Debugf ( f , "copy obj '%s' -> '%s'" , src , remote )
2017-11-12 18:54:25 +01:00
do := f . Fs . Features ( ) . Copy
if do == nil {
2017-11-21 23:38:25 +01:00
fs . Errorf ( src , "source remote (%v) doesn't support Copy" , src . Fs ( ) )
2017-11-12 18:54:25 +01:00
return nil , fs . ErrorCantCopy
}
2020-05-17 01:47:46 +02:00
if f . opt . TempWritePath != "" && src . Fs ( ) == f . tempFs {
return nil , fs . ErrorCantCopy
}
2018-01-29 23:05:04 +01:00
// the source must be a cached object or we abort
2017-11-12 18:54:25 +01:00
srcObj , ok := src . ( * Object )
if ! ok {
fs . Errorf ( srcObj , "can't copy - not same remote type" )
return nil , fs . ErrorCantCopy
}
2018-01-29 23:05:04 +01:00
// both the source cache fs and this cache fs need to wrap the same remote
2017-11-12 18:54:25 +01:00
if srcObj . CacheFs . Fs . Name ( ) != f . Fs . Name ( ) {
2018-01-29 23:05:04 +01:00
fs . Errorf ( srcObj , "can't copy - not wrapping same remotes" )
2017-11-12 18:54:25 +01:00
return nil , fs . ErrorCantCopy
}
2018-01-29 23:05:04 +01:00
// refresh from source or abort
2019-06-17 10:34:30 +02:00
if err := srcObj . refreshFromSource ( ctx , false ) ; err != nil {
2018-01-29 23:05:04 +01:00
fs . Errorf ( f , "can't copy %v - %v" , src , err )
2017-11-12 18:54:25 +01:00
return nil , fs . ErrorCantCopy
}
2018-01-29 23:05:04 +01:00
if srcObj . isTempFile ( ) {
2019-02-07 18:41:17 +01:00
// we check if the feature is still active
2018-05-14 19:06:57 +02:00
if f . opt . TempWritePath == "" {
2018-01-29 23:05:04 +01:00
fs . Errorf ( srcObj , "can't copy - this is a local cached file but this feature is turned off this run" )
return nil , fs . ErrorCantCopy
}
do = srcObj . ParentFs . Features ( ) . Copy
if do == nil {
fs . Errorf ( src , "parent remote (%v) doesn't support Copy" , srcObj . ParentFs )
return nil , fs . ErrorCantCopy
}
}
2019-06-17 10:34:30 +02:00
obj , err := do ( ctx , srcObj . Object , remote )
2017-11-12 18:54:25 +01:00
if err != nil {
fs . Errorf ( srcObj , "error moving in cache: %v" , err )
return nil , err
}
2018-01-29 23:05:04 +01:00
fs . Debugf ( obj , "copy: file copied" )
2017-11-12 18:54:25 +01:00
// persist new
2019-06-17 10:34:30 +02:00
co := ObjectFromOriginal ( ctx , f , obj ) . persist ( )
2018-01-29 23:05:04 +01:00
fs . Debugf ( co , "copy: added to cache" )
2017-12-18 13:55:37 +01:00
// expire the destination path
2018-01-29 23:05:04 +01:00
parentCd := NewDirectory ( f , cleanPath ( path . Dir ( co . Remote ( ) ) ) )
err = f . cache . ExpireDir ( parentCd )
2017-12-18 13:55:37 +01:00
if err != nil {
2018-01-29 23:05:04 +01:00
fs . Errorf ( parentCd , "copy: cache expire error: %v" , err )
} else {
fs . Infof ( parentCd , "copy: cache expired" )
2017-12-18 13:55:37 +01:00
}
2018-03-08 21:03:34 +01:00
// advertise to ChangeNotify if wrapped doesn't do that
f . notifyChangeUpstreamIfNeeded ( parentCd . Remote ( ) , fs . EntryDirectory )
2017-12-18 13:55:37 +01:00
// expire src parent
srcParent := NewDirectory ( f , cleanPath ( path . Dir ( src . Remote ( ) ) ) )
err = f . cache . ExpireDir ( srcParent )
if err != nil {
2018-01-29 23:05:04 +01:00
fs . Errorf ( srcParent , "copy: cache expire error: %v" , err )
} else {
fs . Infof ( srcParent , "copy: cache expired" )
2017-12-18 13:55:37 +01:00
}
2018-03-08 21:03:34 +01:00
// advertise to ChangeNotify if wrapped doesn't do that
f . notifyChangeUpstreamIfNeeded ( srcParent . Remote ( ) , fs . EntryDirectory )
2017-11-12 18:54:25 +01:00
2017-12-18 13:55:37 +01:00
return co , nil
2017-11-12 18:54:25 +01:00
}
2020-10-13 23:43:40 +02:00
// Move src to this remote using server-side move operations.
2019-06-17 10:34:30 +02:00
func ( f * Fs ) Move ( ctx context . Context , src fs . Object , remote string ) ( fs . Object , error ) {
2018-01-29 23:05:04 +01:00
fs . Debugf ( f , "moving obj '%s' -> %s" , src , remote )
// if source fs doesn't support move abort
2017-11-12 18:54:25 +01:00
do := f . Fs . Features ( ) . Move
if do == nil {
2017-11-21 23:38:25 +01:00
fs . Errorf ( src , "source remote (%v) doesn't support Move" , src . Fs ( ) )
2017-11-12 18:54:25 +01:00
return nil , fs . ErrorCantMove
}
2018-01-29 23:05:04 +01:00
// the source must be a cached object or we abort
2017-11-12 18:54:25 +01:00
srcObj , ok := src . ( * Object )
if ! ok {
fs . Errorf ( srcObj , "can't move - not same remote type" )
return nil , fs . ErrorCantMove
}
2018-01-29 23:05:04 +01:00
// both the source cache fs and this cache fs need to wrap the same remote
2017-11-12 18:54:25 +01:00
if srcObj . CacheFs . Fs . Name ( ) != f . Fs . Name ( ) {
fs . Errorf ( srcObj , "can't move - not wrapping same remote types" )
return nil , fs . ErrorCantMove
}
2018-01-29 23:05:04 +01:00
// refresh from source or abort
2019-06-17 10:34:30 +02:00
if err := srcObj . refreshFromSource ( ctx , false ) ; err != nil {
2017-11-12 18:54:25 +01:00
fs . Errorf ( f , "can't move %v - %v" , src , err )
return nil , fs . ErrorCantMove
}
2018-01-29 23:05:04 +01:00
// if this is a temp object then we perform the changes locally
if srcObj . isTempFile ( ) {
2019-02-07 18:41:17 +01:00
// we check if the feature is still active
2018-05-14 19:06:57 +02:00
if f . opt . TempWritePath == "" {
2018-01-29 23:05:04 +01:00
fs . Errorf ( srcObj , "can't move - this is a local cached file but this feature is turned off this run" )
return nil , fs . ErrorCantMove
}
// pause background uploads
f . backgroundRunner . pause ( )
defer f . backgroundRunner . play ( )
// started uploads can't be moved until they complete
if srcObj . tempFileStartedUpload ( ) {
fs . Errorf ( srcObj , "can't move - upload has already started. need to finish that" )
return nil , fs . ErrorCantMove
}
do = f . tempFs . Features ( ) . Move
// we must also update the pending queue
err := f . cache . updatePendingUpload ( srcObj . abs ( ) , func ( item * tempUploadInfo ) error {
item . DestPath = path . Join ( f . Root ( ) , remote )
item . AddedOn = time . Now ( )
return nil
} )
if err != nil {
fs . Errorf ( srcObj , "failed to rename queued file for upload: %v" , err )
return nil , fs . ErrorCantMove
}
fs . Debugf ( srcObj , "move: queued file moved to %v" , remote )
}
2019-06-17 10:34:30 +02:00
obj , err := do ( ctx , srcObj . Object , remote )
2017-11-12 18:54:25 +01:00
if err != nil {
2018-01-29 23:05:04 +01:00
fs . Errorf ( srcObj , "error moving: %v" , err )
2017-11-12 18:54:25 +01:00
return nil , err
}
2018-01-29 23:05:04 +01:00
fs . Debugf ( obj , "move: file moved" )
2017-11-12 18:54:25 +01:00
// remove old
2017-12-18 13:55:37 +01:00
err = f . cache . RemoveObject ( srcObj . abs ( ) )
if err != nil {
fs . Errorf ( srcObj , "move: remove error: %v" , err )
2018-01-29 23:05:04 +01:00
} else {
fs . Debugf ( srcObj , "move: removed from cache" )
2017-12-18 13:55:37 +01:00
}
// expire old parent
2018-01-29 23:05:04 +01:00
parentCd := NewDirectory ( f , cleanPath ( path . Dir ( srcObj . Remote ( ) ) ) )
err = f . cache . ExpireDir ( parentCd )
2017-12-18 13:55:37 +01:00
if err != nil {
2018-01-29 23:05:04 +01:00
fs . Errorf ( parentCd , "move: parent cache expire error: %v" , err )
} else {
fs . Infof ( parentCd , "move: cache expired" )
2017-12-18 13:55:37 +01:00
}
2018-03-08 21:03:34 +01:00
// advertise to ChangeNotify if wrapped doesn't do that
f . notifyChangeUpstreamIfNeeded ( parentCd . Remote ( ) , fs . EntryDirectory )
2017-11-12 18:54:25 +01:00
// persist new
2019-06-17 10:34:30 +02:00
cachedObj := ObjectFromOriginal ( ctx , f , obj ) . persist ( )
2018-01-29 23:05:04 +01:00
fs . Debugf ( cachedObj , "move: added to cache" )
2017-12-18 13:55:37 +01:00
// expire new parent
2018-01-29 23:05:04 +01:00
parentCd = NewDirectory ( f , cleanPath ( path . Dir ( cachedObj . Remote ( ) ) ) )
err = f . cache . ExpireDir ( parentCd )
2017-12-18 13:55:37 +01:00
if err != nil {
2018-01-29 23:05:04 +01:00
fs . Errorf ( parentCd , "move: expire error: %v" , err )
} else {
fs . Infof ( parentCd , "move: cache expired" )
2017-12-18 13:55:37 +01:00
}
2018-03-08 21:03:34 +01:00
// advertise to ChangeNotify if wrapped doesn't do that
f . notifyChangeUpstreamIfNeeded ( parentCd . Remote ( ) , fs . EntryDirectory )
2017-11-12 18:54:25 +01:00
return cachedObj , nil
}
// Hashes returns the supported hash sets.
2018-01-12 17:30:54 +01:00
func ( f * Fs ) Hashes ( ) hash . Set {
2017-11-12 18:54:25 +01:00
return f . Fs . Hashes ( )
}
2020-06-04 23:25:14 +02:00
// Purge all files in the directory
func ( f * Fs ) Purge ( ctx context . Context , dir string ) error {
if dir == "" {
// FIXME this isn't quite right as it should purge the dir prefix
fs . Infof ( f , "purging cache" )
f . cache . Purge ( )
}
2017-11-12 18:54:25 +01:00
do := f . Fs . Features ( ) . Purge
if do == nil {
2020-06-04 23:25:14 +02:00
return fs . ErrorCantPurge
2017-11-12 18:54:25 +01:00
}
2020-06-04 23:25:14 +02:00
err := do ( ctx , dir )
2017-11-12 18:54:25 +01:00
if err != nil {
return err
}
return nil
}
// CleanUp the trash in the Fs
2019-06-17 10:34:30 +02:00
func ( f * Fs ) CleanUp ( ctx context . Context ) error {
2017-11-12 18:54:25 +01:00
f . CleanUpCache ( false )
do := f . Fs . Features ( ) . CleanUp
if do == nil {
return nil
}
2019-06-17 10:34:30 +02:00
return do ( ctx )
2017-11-12 18:54:25 +01:00
}
2018-02-09 21:48:32 +01:00
// About gets quota information from the Fs
2019-06-17 10:34:30 +02:00
func ( f * Fs ) About ( ctx context . Context ) ( * fs . Usage , error ) {
2018-02-09 21:48:32 +01:00
do := f . Fs . Features ( ) . About
if do == nil {
2022-01-14 22:18:32 +01:00
return nil , errors . New ( "not supported by underlying remote" )
2018-02-09 21:48:32 +01:00
}
2019-06-17 10:34:30 +02:00
return do ( ctx )
2018-02-09 21:48:32 +01:00
}
2017-11-12 18:54:25 +01:00
// Stats returns stats about the cache storage
func ( f * Fs ) Stats ( ) ( map [ string ] map [ string ] interface { } , error ) {
return f . cache . Stats ( )
}
2018-01-29 23:05:04 +01:00
// openRateLimited will execute a closure under a rate limiter watch
func ( f * Fs ) openRateLimited ( fn func ( ) ( io . ReadCloser , error ) ) ( io . ReadCloser , error ) {
2017-11-12 18:54:25 +01:00
var err error
ctx , cancel := context . WithTimeout ( context . Background ( ) , time . Second * 10 )
defer cancel ( )
start := time . Now ( )
if err = f . rateLimiter . Wait ( ctx ) ; err != nil {
return nil , err
}
elapsed := time . Since ( start )
if elapsed > time . Second * 2 {
fs . Debugf ( f , "rate limited: %s" , elapsed )
}
return fn ( )
}
// CleanUpCache will cleanup only the cache data that is expired
func ( f * Fs ) CleanUpCache ( ignoreLastTs bool ) {
f . cleanupMu . Lock ( )
defer f . cleanupMu . Unlock ( )
2018-05-14 19:06:57 +02:00
if ignoreLastTs || time . Now ( ) . After ( f . lastChunkCleanup . Add ( time . Duration ( f . opt . ChunkCleanInterval ) ) ) {
f . cache . CleanChunksBySize ( int64 ( f . opt . ChunkTotalSize ) )
2017-11-12 18:54:25 +01:00
f . lastChunkCleanup = time . Now ( )
}
}
2022-09-20 08:54:47 +02:00
// StopBackgroundRunners will signal all the runners to stop their work
2018-01-29 23:05:04 +01:00
// can be triggered from a terminate signal or from testing between runs
func ( f * Fs ) StopBackgroundRunners ( ) {
f . cleanupChan <- false
2018-05-14 19:06:57 +02:00
if f . opt . TempWritePath != "" && f . backgroundRunner != nil && f . backgroundRunner . isRunning ( ) {
2018-01-29 23:05:04 +01:00
f . backgroundRunner . close ( )
}
f . cache . Close ( )
fs . Debugf ( f , "Services stopped" )
}
2017-11-12 18:54:25 +01:00
// UnWrap returns the Fs that this Fs is wrapping
func ( f * Fs ) UnWrap ( ) fs . Fs {
return f . Fs
}
2017-12-06 16:14:34 +01:00
// WrapFs returns the Fs that is wrapping this Fs
func ( f * Fs ) WrapFs ( ) fs . Fs {
return f . wrapper
}
// SetWrapper sets the Fs that is wrapping this Fs
func ( f * Fs ) SetWrapper ( wrapper fs . Fs ) {
f . wrapper = wrapper
}
2018-01-29 23:05:04 +01:00
// isWrappedByCrypt checks if this is wrapped by a crypt remote
2017-12-09 22:54:26 +01:00
func ( f * Fs ) isWrappedByCrypt ( ) ( * crypt . Fs , bool ) {
if f . wrapper == nil {
return nil , false
}
c , ok := f . wrapper . ( * crypt . Fs )
return c , ok
}
2018-01-29 23:05:04 +01:00
// cleanRootFromPath trims the root of the current fs from a path
func ( f * Fs ) cleanRootFromPath ( p string ) string {
if f . Root ( ) != "" {
p = p [ len ( f . Root ( ) ) : ] // trim out root
if len ( p ) > 0 { // remove first separator
p = p [ 1 : ]
}
}
return p
}
func ( f * Fs ) isRootInPath ( p string ) bool {
if f . Root ( ) == "" {
return true
}
return strings . HasPrefix ( p , f . Root ( ) + "/" )
}
2020-06-10 13:04:48 +02:00
// MergeDirs merges the contents of all the directories passed
// in into the first one and rmdirs the other directories.
func ( f * Fs ) MergeDirs ( ctx context . Context , dirs [ ] fs . Directory ) error {
do := f . Fs . Features ( ) . MergeDirs
if do == nil {
return errors . New ( "MergeDirs not supported" )
}
for _ , dir := range dirs {
_ = f . cache . RemoveDir ( dir . Remote ( ) )
}
return do ( ctx , dirs )
}
2017-11-12 18:54:25 +01:00
// DirCacheFlush flushes the dir cache
func ( f * Fs ) DirCacheFlush ( ) {
_ = f . cache . RemoveDir ( "" )
}
2018-01-29 23:05:04 +01:00
// GetBackgroundUploadChannel returns a channel that can be listened to for remote activities that happen
// in the background
func ( f * Fs ) GetBackgroundUploadChannel ( ) chan BackgroundUploadState {
2018-05-14 19:06:57 +02:00
if f . opt . TempWritePath != "" {
2018-01-29 23:05:04 +01:00
return f . backgroundRunner . notifyCh
}
return nil
}
2018-03-08 21:03:34 +01:00
func ( f * Fs ) isNotifiedRemote ( remote string ) bool {
f . notifiedMu . Lock ( )
defer f . notifiedMu . Unlock ( )
n , ok := f . notifiedRemotes [ remote ]
if ! ok || ! n {
return false
}
delete ( f . notifiedRemotes , remote )
return n
}
2017-11-12 18:54:25 +01:00
func cleanPath ( p string ) string {
p = path . Clean ( p )
if p == "." || p == "/" {
p = ""
}
return p
}
2019-08-12 12:07:31 +02:00
// UserInfo returns info about the connected user
func ( f * Fs ) UserInfo ( ctx context . Context ) ( map [ string ] string , error ) {
do := f . Fs . Features ( ) . UserInfo
if do == nil {
return nil , fs . ErrorNotImplemented
}
return do ( ctx )
}
// Disconnect the current user
func ( f * Fs ) Disconnect ( ctx context . Context ) error {
do := f . Fs . Features ( ) . Disconnect
if do == nil {
return fs . ErrorNotImplemented
}
return do ( ctx )
}
2020-11-27 18:02:00 +01:00
// Shutdown the backend, closing any background tasks and any
// cached connections.
func ( f * Fs ) Shutdown ( ctx context . Context ) error {
do := f . Fs . Features ( ) . Shutdown
if do == nil {
return nil
}
return do ( ctx )
}
2020-04-29 11:08:53 +02:00
var commandHelp = [ ] fs . CommandHelp {
{
Name : "stats" ,
Short : "Print stats on the cache backend in JSON format." ,
} ,
}
// Command the backend to run a named command
//
// The command run is name
// args may be used to read arguments from
// opts may be used to read optional arguments from
//
// The result should be capable of being JSON encoded
// If it is a string or a []string it will be shown to the user
// otherwise it will be JSON encoded and shown to the user like that
func ( f * Fs ) Command ( ctx context . Context , name string , arg [ ] string , opt map [ string ] string ) ( interface { } , error ) {
switch name {
case "stats" :
return f . Stats ( )
default :
return nil , fs . ErrorCommandNotFound
}
}
2017-11-12 18:54:25 +01:00
// Check the interfaces are satisfied
var (
2018-03-08 21:03:34 +01:00
_ fs . Fs = ( * Fs ) ( nil )
_ fs . Purger = ( * Fs ) ( nil )
_ fs . Copier = ( * Fs ) ( nil )
_ fs . Mover = ( * Fs ) ( nil )
_ fs . DirMover = ( * Fs ) ( nil )
_ fs . PutUncheckeder = ( * Fs ) ( nil )
_ fs . PutStreamer = ( * Fs ) ( nil )
_ fs . CleanUpper = ( * Fs ) ( nil )
_ fs . UnWrapper = ( * Fs ) ( nil )
_ fs . Wrapper = ( * Fs ) ( nil )
_ fs . ListRer = ( * Fs ) ( nil )
_ fs . ChangeNotifier = ( * Fs ) ( nil )
2018-02-09 21:48:32 +01:00
_ fs . Abouter = ( * Fs ) ( nil )
2019-08-12 12:07:31 +02:00
_ fs . UserInfoer = ( * Fs ) ( nil )
_ fs . Disconnecter = ( * Fs ) ( nil )
2020-04-29 11:08:53 +02:00
_ fs . Commander = ( * Fs ) ( nil )
2020-06-10 13:04:48 +02:00
_ fs . MergeDirser = ( * Fs ) ( nil )
2020-11-27 18:02:00 +01:00
_ fs . Shutdowner = ( * Fs ) ( nil )
2017-11-12 18:54:25 +01:00
)