rclone/backend/cache/cache_internal_test.go

1355 lines
42 KiB
Go
Raw Normal View History

2021-09-09 14:25:25 +02:00
//go:build !plan9 && !js && !race
// +build !plan9,!js,!race
2017-11-12 18:54:25 +01:00
package cache_test
import (
"bytes"
"context"
"encoding/base64"
"errors"
goflag "flag"
"fmt"
2017-11-12 18:54:25 +01:00
"io"
2018-01-29 23:05:04 +01:00
"log"
2017-11-12 18:54:25 +01:00
"math/rand"
2018-01-29 23:05:04 +01:00
"os"
2017-11-12 18:54:25 +01:00
"path"
"path/filepath"
"runtime"
"runtime/debug"
2018-01-29 23:05:04 +01:00
"strings"
2017-11-12 18:54:25 +01:00
"testing"
"time"
"github.com/rclone/rclone/backend/cache"
"github.com/rclone/rclone/backend/crypt"
_ "github.com/rclone/rclone/backend/drive"
"github.com/rclone/rclone/backend/local"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/object"
"github.com/rclone/rclone/fstest"
"github.com/rclone/rclone/fstest/testy"
"github.com/rclone/rclone/lib/random"
"github.com/rclone/rclone/vfs/vfsflags"
2017-11-12 18:54:25 +01:00
"github.com/stretchr/testify/require"
)
2018-01-29 23:05:04 +01:00
const (
// these 2 passwords are test random
cryptPassword1 = "3XcvMMdsV3d-HGAReTMdNH-5FcX5q32_lUeA" // oGJdUbQc7s8
cryptPassword2 = "NlgTBEIe-qibA7v-FoMfuX6Cw8KlLai_aMvV" // mv4mZW572HM
cryptedTextBase64 = "UkNMT05FAAC320i2xIee0BiNyknSPBn+Qcw3q9FhIFp3tvq6qlqvbsno3PnxmEFeJG3jDBnR/wku2gHWeQ==" // one content
cryptedText2Base64 = "UkNMT05FAAATcQkVsgjBh8KafCKcr0wdTa1fMmV0U8hsCLGFoqcvxKVmvv7wx3Hf5EXxFcki2FFV4sdpmSrb9Q==" // updated content
cryptedText3Base64 = "UkNMT05FAAB/f7YtYKbPfmk9+OX/ffN3qG3OEdWT+z74kxCX9V/YZwJ4X2DN3HOnUC3gKQ4Gcoud5UtNvQ==" // test content
letterBytes = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
2018-01-29 23:05:04 +01:00
)
2017-11-12 18:54:25 +01:00
var (
2018-01-29 23:05:04 +01:00
remoteName string
uploadDir string
runInstance *run
errNotSupported = errors.New("not supported")
decryptedToEncryptedRemotes = map[string]string{
"one": "lm4u7jjt3c85bf56vjqgeenuno",
"second": "qvt1ochrkcfbptp5mu9ugb2l14",
"test": "jn4tegjtpqro30t3o11thb4b5s",
"test2": "qakvqnh8ttei89e0gc76crpql4",
"data.bin": "0q2847tfko6mhj3dag3r809qbc",
"ticw/data.bin": "5mv97b0ule6pht33srae5pice8/0q2847tfko6mhj3dag3r809qbc",
"tiuufo/test/one": "vi6u1olqhirqv14cd8qlej1mgo/jn4tegjtpqro30t3o11thb4b5s/lm4u7jjt3c85bf56vjqgeenuno",
"tiuufo/test/second": "vi6u1olqhirqv14cd8qlej1mgo/jn4tegjtpqro30t3o11thb4b5s/qvt1ochrkcfbptp5mu9ugb2l14",
"tiutfo/test/one": "legd371aa8ol36tjfklt347qnc/jn4tegjtpqro30t3o11thb4b5s/lm4u7jjt3c85bf56vjqgeenuno",
"tiutfo/second/one": "legd371aa8ol36tjfklt347qnc/qvt1ochrkcfbptp5mu9ugb2l14/lm4u7jjt3c85bf56vjqgeenuno",
"second/one": "qvt1ochrkcfbptp5mu9ugb2l14/lm4u7jjt3c85bf56vjqgeenuno",
"test/one": "jn4tegjtpqro30t3o11thb4b5s/lm4u7jjt3c85bf56vjqgeenuno",
"test/second": "jn4tegjtpqro30t3o11thb4b5s/qvt1ochrkcfbptp5mu9ugb2l14",
"one/test": "lm4u7jjt3c85bf56vjqgeenuno/jn4tegjtpqro30t3o11thb4b5s",
"one/test/data.bin": "lm4u7jjt3c85bf56vjqgeenuno/jn4tegjtpqro30t3o11thb4b5s/0q2847tfko6mhj3dag3r809qbc",
"second/test/data.bin": "qvt1ochrkcfbptp5mu9ugb2l14/jn4tegjtpqro30t3o11thb4b5s/0q2847tfko6mhj3dag3r809qbc",
"test/third": "jn4tegjtpqro30t3o11thb4b5s/2nd7fjiop5h3ihfj1vl953aa5g",
"test/0.bin": "jn4tegjtpqro30t3o11thb4b5s/e6frddt058b6kvbpmlstlndmtk",
"test/1.bin": "jn4tegjtpqro30t3o11thb4b5s/kck472nt1k7qbmob0mt1p1crgc",
"test/2.bin": "jn4tegjtpqro30t3o11thb4b5s/744oe9ven2rmak4u27if51qk24",
"test/3.bin": "jn4tegjtpqro30t3o11thb4b5s/2bjd8kef0u5lmsu6qhqll34bcs",
"test/4.bin": "jn4tegjtpqro30t3o11thb4b5s/cvjs73iv0a82v0c7r67avllh7s",
"test/5.bin": "jn4tegjtpqro30t3o11thb4b5s/0plkdo790b6bnmt33qsdqmhv9c",
"test/6.bin": "jn4tegjtpqro30t3o11thb4b5s/s5r633srnjtbh83893jovjt5d0",
"test/7.bin": "jn4tegjtpqro30t3o11thb4b5s/6rq45tr9bjsammku622flmqsu4",
"test/8.bin": "jn4tegjtpqro30t3o11thb4b5s/37bc6tcl3e31qb8cadvjb749vk",
"test/9.bin": "jn4tegjtpqro30t3o11thb4b5s/t4pr35hnls32789o8fk0chk1ec",
2018-01-29 23:05:04 +01:00
}
2017-11-12 18:54:25 +01:00
)
2018-01-29 23:05:04 +01:00
func init() {
2018-02-10 21:01:05 +01:00
goflag.StringVar(&remoteName, "remote-internal", "TestInternalCache", "Remote to test with, defaults to local filesystem")
2018-01-29 23:05:04 +01:00
goflag.StringVar(&uploadDir, "upload-dir-internal", "", "")
}
// TestMain drives the tests
func TestMain(m *testing.M) {
goflag.Parse()
var rc int
log.Printf("Running with the following params: \n remote: %v", remoteName)
2018-01-29 23:05:04 +01:00
runInstance = newRun()
rc = m.Run()
os.Exit(rc)
}
2017-11-12 18:54:25 +01:00
func TestInternalListRootAndInnerRemotes(t *testing.T) {
2018-01-29 23:05:04 +01:00
id := fmt.Sprintf("tilrair%v", time.Now().Unix())
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, true, true, nil)
2017-11-12 18:54:25 +01:00
// Instantiate inner fs
innerFolder := "inner"
2018-01-29 23:05:04 +01:00
runInstance.mkdir(t, rootFs, innerFolder)
rootFs2, _ := runInstance.newCacheFs(t, remoteName, id+"/"+innerFolder, true, true, nil)
2017-11-12 18:54:25 +01:00
2018-01-29 23:05:04 +01:00
runInstance.writeObjectString(t, rootFs2, "one", "content")
2018-02-10 21:01:05 +01:00
listRoot, err := runInstance.list(t, rootFs, "")
require.NoError(t, err)
listRootInner, err := runInstance.list(t, rootFs, innerFolder)
require.NoError(t, err)
listInner, err := rootFs2.List(context.Background(), "")
2017-11-12 18:54:25 +01:00
require.NoError(t, err)
2018-01-29 23:05:04 +01:00
require.Len(t, listRoot, 1)
require.Len(t, listRootInner, 1)
require.Len(t, listInner, 1)
2017-11-12 18:54:25 +01:00
}
/* TODO: is this testing something?
2018-02-10 21:01:05 +01:00
func TestInternalVfsCache(t *testing.T) {
vfsflags.Opt.DirCacheTime = time.Second * 30
testSize := int64(524288000)
vfsflags.Opt.CacheMode = vfs.CacheModeWrites
id := "tiuufo"
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true, nil, map[string]string{"writes": "true", "info_age": "1h"})
2018-02-10 21:01:05 +01:00
defer runInstance.cleanupFs(t, rootFs, boltDb)
err := rootFs.Mkdir(context.Background(), "test")
2018-02-10 21:01:05 +01:00
require.NoError(t, err)
runInstance.writeObjectString(t, rootFs, "test/second", "content")
_, err = rootFs.List(context.Background(), "test")
2018-02-10 21:01:05 +01:00
require.NoError(t, err)
testReader := runInstance.randomReader(t, testSize)
writeCh := make(chan interface{})
//write2Ch := make(chan interface{})
readCh := make(chan interface{})
cacheCh := make(chan interface{})
// write the main file
go func() {
defer func() {
writeCh <- true
}()
log.Printf("========== started writing file 'test/one'")
runInstance.writeRemoteReader(t, rootFs, "test/one", testReader)
log.Printf("========== done writing file 'test/one'")
}()
// routine to check which cache has what, autostarts
go func() {
for {
select {
case <-cacheCh:
log.Printf("========== finished checking caches")
return
default:
}
li2 := [2]string{path.Join("test", "one"), path.Join("test", "second")}
for _, r := range li2 {
var err error
ci, err := os.ReadDir(path.Join(runInstance.chunkPath, runInstance.encryptRemoteIfNeeded(t, path.Join(id, r))))
2018-02-10 21:01:05 +01:00
if err != nil || len(ci) == 0 {
log.Printf("========== '%v' not in cache", r)
} else {
log.Printf("========== '%v' IN CACHE", r)
}
_, err = os.Stat(path.Join(runInstance.vfsCachePath, id, r))
if err != nil {
log.Printf("========== '%v' not in vfs", r)
} else {
log.Printf("========== '%v' IN VFS", r)
}
}
time.Sleep(time.Second * 10)
}
}()
// routine to list, autostarts
go func() {
for {
select {
case <-readCh:
log.Printf("========== finished checking listings and readings")
return
default:
}
li, err := runInstance.list(t, rootFs, "test")
if err != nil {
log.Printf("========== error listing 'test' folder: %v", err)
} else {
log.Printf("========== list 'test' folder count: %v", len(li))
}
time.Sleep(time.Second * 10)
}
}()
// wait for main file to be written
<-writeCh
log.Printf("========== waiting for VFS to expire")
time.Sleep(time.Second * 120)
// try a final read
li2 := [2]string{"test/one", "test/second"}
for _, r := range li2 {
_, err := runInstance.readDataFromRemote(t, rootFs, r, int64(0), int64(2), false)
if err != nil {
log.Printf("========== error reading '%v': %v", r, err)
} else {
log.Printf("========== read '%v'", r)
}
}
// close the cache and list checkers
cacheCh <- true
readCh <- true
}
*/
2018-02-10 21:01:05 +01:00
2017-11-12 18:54:25 +01:00
func TestInternalObjWrapFsFound(t *testing.T) {
2018-01-29 23:05:04 +01:00
id := fmt.Sprintf("tiowff%v", time.Now().Unix())
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, true, true, nil)
2018-01-29 23:05:04 +01:00
cfs, err := runInstance.getCacheFs(rootFs)
2017-11-12 18:54:25 +01:00
require.NoError(t, err)
wrappedFs := cfs.UnWrap()
2018-01-29 23:05:04 +01:00
var testData []byte
if runInstance.rootIsCrypt {
testData, err = base64.StdEncoding.DecodeString(cryptedTextBase64)
require.NoError(t, err)
} else {
testData = []byte("test content")
}
2017-11-12 18:54:25 +01:00
2018-01-29 23:05:04 +01:00
runInstance.writeObjectBytes(t, wrappedFs, runInstance.encryptRemoteIfNeeded(t, "test"), testData)
2018-02-10 21:01:05 +01:00
listRoot, err := runInstance.list(t, rootFs, "")
require.NoError(t, err)
2018-01-29 23:05:04 +01:00
require.Len(t, listRoot, 1)
2017-11-12 18:54:25 +01:00
2018-02-10 21:01:05 +01:00
cachedData, err := runInstance.readDataFromRemote(t, rootFs, "test", 0, int64(len([]byte("test content"))), false)
require.NoError(t, err)
2018-01-29 23:05:04 +01:00
require.Equal(t, "test content", string(cachedData))
2017-11-12 18:54:25 +01:00
2018-01-29 23:05:04 +01:00
err = runInstance.rm(t, rootFs, "test")
2017-11-12 18:54:25 +01:00
require.NoError(t, err)
2018-02-10 21:01:05 +01:00
listRoot, err = runInstance.list(t, rootFs, "")
require.NoError(t, err)
2018-01-29 23:05:04 +01:00
require.Len(t, listRoot, 0)
2017-11-12 18:54:25 +01:00
}
func TestInternalObjNotFound(t *testing.T) {
2018-01-29 23:05:04 +01:00
id := fmt.Sprintf("tionf%v", time.Now().Unix())
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, false, true, nil)
obj, err := rootFs.NewObject(context.Background(), "404")
2017-11-12 18:54:25 +01:00
require.Error(t, err)
require.Nil(t, obj)
}
func TestInternalCachedWrittenContentMatches(t *testing.T) {
testy.SkipUnreliable(t)
2018-01-29 23:05:04 +01:00
id := fmt.Sprintf("ticwcm%v", time.Now().Unix())
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, false, true, nil)
2018-01-29 23:05:04 +01:00
cfs, err := runInstance.getCacheFs(rootFs)
2017-11-12 18:54:25 +01:00
require.NoError(t, err)
chunkSize := cfs.ChunkSize()
// create some rand test data
testData := randStringBytes(int(chunkSize*4 + chunkSize/2))
2017-11-12 18:54:25 +01:00
// write the object
2018-01-29 23:05:04 +01:00
runInstance.writeRemoteBytes(t, rootFs, "data.bin", testData)
2017-11-12 18:54:25 +01:00
// check sample of data from in-file
sampleStart := chunkSize / 2
sampleEnd := chunkSize
testSample := testData[sampleStart:sampleEnd]
2018-02-10 21:01:05 +01:00
checkSample, err := runInstance.readDataFromRemote(t, rootFs, "data.bin", sampleStart, sampleEnd, false)
require.NoError(t, err)
2017-11-12 18:54:25 +01:00
require.Equal(t, int64(len(checkSample)), sampleEnd-sampleStart)
require.Equal(t, checkSample, testSample)
}
func TestInternalDoubleWrittenContentMatches(t *testing.T) {
if runtime.GOOS == "windows" && runtime.GOARCH == "386" {
t.Skip("Skip test on windows/386")
}
id := fmt.Sprintf("tidwcm%v", time.Now().Unix())
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, false, true, nil)
// write the object
runInstance.writeRemoteString(t, rootFs, "one", "one content")
err := runInstance.updateData(t, rootFs, "one", "one content", " updated")
require.NoError(t, err)
err = runInstance.updateData(t, rootFs, "one", "one content updated", " double")
require.NoError(t, err)
// check sample of data from in-file
data, err := runInstance.readDataFromRemote(t, rootFs, "one", int64(0), int64(len("one content updated double")), true)
require.NoError(t, err)
require.Equal(t, "one content updated double", string(data))
}
2017-11-12 18:54:25 +01:00
func TestInternalCachedUpdatedContentMatches(t *testing.T) {
testy.SkipUnreliable(t)
2018-01-29 23:05:04 +01:00
id := fmt.Sprintf("ticucm%v", time.Now().Unix())
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, false, true, nil)
2018-01-29 23:05:04 +01:00
var err error
2017-11-12 18:54:25 +01:00
// create some rand test data
2018-01-29 23:05:04 +01:00
var testData1 []byte
var testData2 []byte
if runInstance.rootIsCrypt {
testData1, err = base64.StdEncoding.DecodeString(cryptedTextBase64)
require.NoError(t, err)
testData2, err = base64.StdEncoding.DecodeString(cryptedText2Base64)
require.NoError(t, err)
} else {
testData1 = []byte(random.String(100))
testData2 = []byte(random.String(200))
2018-01-29 23:05:04 +01:00
}
2017-11-12 18:54:25 +01:00
// write the object
2018-01-29 23:05:04 +01:00
o := runInstance.updateObjectRemote(t, rootFs, "data.bin", testData1, testData2)
2017-11-12 18:54:25 +01:00
require.Equal(t, o.Size(), int64(len(testData2)))
// check data from in-file
2018-02-10 21:01:05 +01:00
checkSample, err := runInstance.readDataFromRemote(t, rootFs, "data.bin", 0, int64(len(testData2)), false)
require.NoError(t, err)
2017-11-12 18:54:25 +01:00
require.Equal(t, checkSample, testData2)
}
func TestInternalWrappedWrittenContentMatches(t *testing.T) {
2018-01-29 23:05:04 +01:00
id := fmt.Sprintf("tiwwcm%v", time.Now().Unix())
vfsflags.Opt.DirCacheTime = time.Second
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, true, true, nil)
2018-01-29 23:05:04 +01:00
if runInstance.rootIsCrypt {
t.Skip("test skipped with crypt remote")
}
2018-01-29 23:05:04 +01:00
cfs, err := runInstance.getCacheFs(rootFs)
2017-11-12 18:54:25 +01:00
require.NoError(t, err)
chunkSize := cfs.ChunkSize()
// create some rand test data
2018-01-29 23:05:04 +01:00
testSize := chunkSize*4 + chunkSize/2
testData := randStringBytes(int(testSize))
2017-11-12 18:54:25 +01:00
// write the object
2018-01-29 23:05:04 +01:00
o := runInstance.writeObjectBytes(t, cfs.UnWrap(), "data.bin", testData)
require.Equal(t, o.Size(), testSize)
2018-01-29 23:05:04 +01:00
time.Sleep(time.Second * 3)
2017-11-12 18:54:25 +01:00
checkSample, err := runInstance.readDataFromRemote(t, rootFs, "data.bin", 0, testSize, false)
2018-02-10 21:01:05 +01:00
require.NoError(t, err)
require.Equal(t, int64(len(checkSample)), o.Size())
2017-11-12 18:54:25 +01:00
for i := 0; i < len(checkSample); i++ {
require.Equal(t, testData[i], checkSample[i])
2017-11-12 18:54:25 +01:00
}
}
func TestInternalLargeWrittenContentMatches(t *testing.T) {
2018-01-29 23:05:04 +01:00
id := fmt.Sprintf("tilwcm%v", time.Now().Unix())
vfsflags.Opt.DirCacheTime = time.Second
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, true, true, nil)
2018-01-29 23:05:04 +01:00
if runInstance.rootIsCrypt {
t.Skip("test skipped with crypt remote")
2017-11-12 18:54:25 +01:00
}
2018-01-29 23:05:04 +01:00
cfs, err := runInstance.getCacheFs(rootFs)
require.NoError(t, err)
chunkSize := cfs.ChunkSize()
// create some rand test data
2018-01-29 23:05:04 +01:00
testSize := chunkSize*10 + chunkSize/2
testData := randStringBytes(int(testSize))
// write the object
2018-01-29 23:05:04 +01:00
runInstance.writeObjectBytes(t, cfs.UnWrap(), "data.bin", testData)
time.Sleep(time.Second * 3)
2018-02-10 21:01:05 +01:00
readData, err := runInstance.readDataFromRemote(t, rootFs, "data.bin", 0, testSize, false)
require.NoError(t, err)
2018-01-29 23:05:04 +01:00
for i := 0; i < len(readData); i++ {
require.Equalf(t, testData[i], readData[i], "at byte %v", i)
}
}
2017-11-12 18:54:25 +01:00
func TestInternalWrappedFsChangeNotSeen(t *testing.T) {
2018-01-29 23:05:04 +01:00
id := fmt.Sprintf("tiwfcns%v", time.Now().Unix())
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, false, true, nil)
2018-01-29 23:05:04 +01:00
cfs, err := runInstance.getCacheFs(rootFs)
2017-11-12 18:54:25 +01:00
require.NoError(t, err)
chunkSize := cfs.ChunkSize()
// create some rand test data
testData := randStringBytes(int(chunkSize*4 + chunkSize/2))
2018-01-29 23:05:04 +01:00
runInstance.writeRemoteBytes(t, rootFs, "data.bin", testData)
2017-11-12 18:54:25 +01:00
// update in the wrapped fs
originalSize, err := runInstance.size(t, rootFs, "data.bin")
require.NoError(t, err)
log.Printf("original size: %v", originalSize)
o, err := cfs.UnWrap().NewObject(context.Background(), runInstance.encryptRemoteIfNeeded(t, "data.bin"))
2017-11-12 18:54:25 +01:00
require.NoError(t, err)
expectedSize := int64(len([]byte("test content")))
var data2 []byte
if runInstance.rootIsCrypt {
data2, err = base64.StdEncoding.DecodeString(cryptedText3Base64)
require.NoError(t, err)
expectedSize = expectedSize + 1 // FIXME newline gets in, likely test data issue
} else {
data2 = []byte("test content")
}
objInfo := object.NewStaticObjectInfo(runInstance.encryptRemoteIfNeeded(t, "data.bin"), time.Now(), int64(len(data2)), true, nil, cfs.UnWrap())
err = o.Update(context.Background(), bytes.NewReader(data2), objInfo)
2017-11-12 18:54:25 +01:00
require.NoError(t, err)
require.Equal(t, int64(len(data2)), o.Size())
log.Printf("updated size: %v", len(data2))
2017-11-12 18:54:25 +01:00
// get a new instance from the cache
2018-02-10 21:01:05 +01:00
if runInstance.wrappedIsExternal {
err = runInstance.retryBlock(func() error {
coSize, err := runInstance.size(t, rootFs, "data.bin")
2018-02-10 21:01:05 +01:00
if err != nil {
return err
}
if coSize != expectedSize {
return fmt.Errorf("%v <> %v", coSize, expectedSize)
2018-02-10 21:01:05 +01:00
}
return nil
}, 12, time.Second*10)
require.NoError(t, err)
} else {
coSize, err := runInstance.size(t, rootFs, "data.bin")
2018-02-10 21:01:05 +01:00
require.NoError(t, err)
require.NotEqual(t, coSize, expectedSize)
2018-02-10 21:01:05 +01:00
}
2017-11-12 18:54:25 +01:00
}
func TestInternalMoveWithNotify(t *testing.T) {
id := fmt.Sprintf("timwn%v", time.Now().Unix())
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, false, true, nil)
if !runInstance.wrappedIsExternal {
t.Skipf("Not external")
}
cfs, err := runInstance.getCacheFs(rootFs)
require.NoError(t, err)
srcName := runInstance.encryptRemoteIfNeeded(t, "test") + "/" + runInstance.encryptRemoteIfNeeded(t, "one") + "/" + runInstance.encryptRemoteIfNeeded(t, "data.bin")
dstName := runInstance.encryptRemoteIfNeeded(t, "test") + "/" + runInstance.encryptRemoteIfNeeded(t, "second") + "/" + runInstance.encryptRemoteIfNeeded(t, "data.bin")
// create some rand test data
var testData []byte
if runInstance.rootIsCrypt {
testData, err = base64.StdEncoding.DecodeString(cryptedTextBase64)
require.NoError(t, err)
} else {
testData = []byte("test content")
}
_ = cfs.UnWrap().Mkdir(context.Background(), runInstance.encryptRemoteIfNeeded(t, "test"))
_ = cfs.UnWrap().Mkdir(context.Background(), runInstance.encryptRemoteIfNeeded(t, "test/one"))
_ = cfs.UnWrap().Mkdir(context.Background(), runInstance.encryptRemoteIfNeeded(t, "test/second"))
srcObj := runInstance.writeObjectBytes(t, cfs.UnWrap(), srcName, testData)
// list in mount
_, err = runInstance.list(t, rootFs, "test")
require.NoError(t, err)
_, err = runInstance.list(t, rootFs, "test/one")
require.NoError(t, err)
// move file
_, err = cfs.UnWrap().Features().Move(context.Background(), srcObj, dstName)
require.NoError(t, err)
err = runInstance.retryBlock(func() error {
li, err := runInstance.list(t, rootFs, "test")
if err != nil {
log.Printf("err: %v", err)
return err
}
if len(li) != 2 {
log.Printf("not expected listing /test: %v", li)
return fmt.Errorf("not expected listing /test: %v", li)
}
li, err = runInstance.list(t, rootFs, "test/one")
if err != nil {
log.Printf("err: %v", err)
return err
}
if len(li) != 0 {
log.Printf("not expected listing /test/one: %v", li)
return fmt.Errorf("not expected listing /test/one: %v", li)
}
li, err = runInstance.list(t, rootFs, "test/second")
if err != nil {
log.Printf("err: %v", err)
return err
}
if len(li) != 1 {
log.Printf("not expected listing /test/second: %v", li)
return fmt.Errorf("not expected listing /test/second: %v", li)
}
if fi, ok := li[0].(os.FileInfo); ok {
if fi.Name() != "data.bin" {
log.Printf("not expected name: %v", fi.Name())
return fmt.Errorf("not expected name: %v", fi.Name())
}
} else if di, ok := li[0].(fs.DirEntry); ok {
if di.Remote() != "test/second/data.bin" {
log.Printf("not expected remote: %v", di.Remote())
return fmt.Errorf("not expected remote: %v", di.Remote())
}
} else {
log.Printf("unexpected listing: %v", li)
return fmt.Errorf("unexpected listing: %v", li)
}
log.Printf("complete listing: %v", li)
return nil
}, 12, time.Second*10)
require.NoError(t, err)
}
func TestInternalNotifyCreatesEmptyParts(t *testing.T) {
id := fmt.Sprintf("tincep%v", time.Now().Unix())
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil)
if !runInstance.wrappedIsExternal {
t.Skipf("Not external")
}
cfs, err := runInstance.getCacheFs(rootFs)
require.NoError(t, err)
srcName := runInstance.encryptRemoteIfNeeded(t, "test") + "/" + runInstance.encryptRemoteIfNeeded(t, "one") + "/" + runInstance.encryptRemoteIfNeeded(t, "test")
dstName := runInstance.encryptRemoteIfNeeded(t, "test") + "/" + runInstance.encryptRemoteIfNeeded(t, "one") + "/" + runInstance.encryptRemoteIfNeeded(t, "test2")
// create some rand test data
var testData []byte
if runInstance.rootIsCrypt {
testData, err = base64.StdEncoding.DecodeString(cryptedTextBase64)
require.NoError(t, err)
} else {
testData = []byte("test content")
}
err = rootFs.Mkdir(context.Background(), "test")
require.NoError(t, err)
err = rootFs.Mkdir(context.Background(), "test/one")
require.NoError(t, err)
srcObj := runInstance.writeObjectBytes(t, cfs.UnWrap(), srcName, testData)
// list in mount
_, err = runInstance.list(t, rootFs, "test")
require.NoError(t, err)
_, err = runInstance.list(t, rootFs, "test/one")
require.NoError(t, err)
found := boltDb.HasEntry(path.Join(cfs.Root(), runInstance.encryptRemoteIfNeeded(t, "test")))
require.True(t, found)
boltDb.Purge()
found = boltDb.HasEntry(path.Join(cfs.Root(), runInstance.encryptRemoteIfNeeded(t, "test")))
require.False(t, found)
// move file
_, err = cfs.UnWrap().Features().Move(context.Background(), srcObj, dstName)
require.NoError(t, err)
err = runInstance.retryBlock(func() error {
found = boltDb.HasEntry(path.Join(cfs.Root(), runInstance.encryptRemoteIfNeeded(t, "test")))
if !found {
log.Printf("not found /test")
return fmt.Errorf("not found /test")
}
found = boltDb.HasEntry(path.Join(cfs.Root(), runInstance.encryptRemoteIfNeeded(t, "test"), runInstance.encryptRemoteIfNeeded(t, "one")))
if !found {
log.Printf("not found /test/one")
return fmt.Errorf("not found /test/one")
}
found = boltDb.HasEntry(path.Join(cfs.Root(), runInstance.encryptRemoteIfNeeded(t, "test"), runInstance.encryptRemoteIfNeeded(t, "one"), runInstance.encryptRemoteIfNeeded(t, "test2")))
if !found {
log.Printf("not found /test/one/test2")
return fmt.Errorf("not found /test/one/test2")
}
li, err := runInstance.list(t, rootFs, "test/one")
if err != nil {
log.Printf("err: %v", err)
return err
}
if len(li) != 1 {
log.Printf("not expected listing /test/one: %v", li)
return fmt.Errorf("not expected listing /test/one: %v", li)
}
if fi, ok := li[0].(os.FileInfo); ok {
if fi.Name() != "test2" {
log.Printf("not expected name: %v", fi.Name())
return fmt.Errorf("not expected name: %v", fi.Name())
}
} else if di, ok := li[0].(fs.DirEntry); ok {
if di.Remote() != "test/one/test2" {
log.Printf("not expected remote: %v", di.Remote())
return fmt.Errorf("not expected remote: %v", di.Remote())
}
} else {
log.Printf("unexpected listing: %v", li)
return fmt.Errorf("unexpected listing: %v", li)
}
log.Printf("complete listing /test/one/test2")
return nil
}, 12, time.Second*10)
require.NoError(t, err)
}
2017-11-12 18:54:25 +01:00
func TestInternalChangeSeenAfterDirCacheFlush(t *testing.T) {
2018-01-29 23:05:04 +01:00
id := fmt.Sprintf("ticsadcf%v", time.Now().Unix())
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, false, true, nil)
2018-01-29 23:05:04 +01:00
cfs, err := runInstance.getCacheFs(rootFs)
2017-11-12 18:54:25 +01:00
require.NoError(t, err)
chunkSize := cfs.ChunkSize()
// create some rand test data
testData := randStringBytes(int(chunkSize*4 + chunkSize/2))
2018-01-29 23:05:04 +01:00
runInstance.writeRemoteBytes(t, rootFs, "data.bin", testData)
// update in the wrapped fs
o, err := cfs.UnWrap().NewObject(context.Background(), runInstance.encryptRemoteIfNeeded(t, "data.bin"))
require.NoError(t, err)
2018-01-29 23:05:04 +01:00
wrappedTime := time.Now().Add(-1 * time.Hour)
err = o.SetModTime(context.Background(), wrappedTime)
require.NoError(t, err)
// get a new instance from the cache
co, err := rootFs.NewObject(context.Background(), "data.bin")
require.NoError(t, err)
require.NotEqual(t, o.ModTime(context.Background()).String(), co.ModTime(context.Background()).String())
2017-11-12 18:54:25 +01:00
cfs.DirCacheFlush() // flush the cache
// get a new instance from the cache
co, err = rootFs.NewObject(context.Background(), "data.bin")
2017-11-12 18:54:25 +01:00
require.NoError(t, err)
require.Equal(t, wrappedTime.Unix(), co.ModTime(context.Background()).Unix())
2017-11-12 18:54:25 +01:00
}
func TestInternalCacheWrites(t *testing.T) {
2018-01-29 23:05:04 +01:00
id := "ticw"
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, map[string]string{"writes": "true"})
2018-01-29 23:05:04 +01:00
cfs, err := runInstance.getCacheFs(rootFs)
2017-11-12 18:54:25 +01:00
require.NoError(t, err)
chunkSize := cfs.ChunkSize()
// create some rand test data
2018-01-29 23:05:04 +01:00
earliestTime := time.Now()
testData := randStringBytes(int(chunkSize*4 + chunkSize/2))
2018-01-29 23:05:04 +01:00
runInstance.writeRemoteBytes(t, rootFs, "data.bin", testData)
expectedTs := time.Now()
2018-01-29 23:05:04 +01:00
ts, err := boltDb.GetChunkTs(runInstance.encryptRemoteIfNeeded(t, path.Join(rootFs.Root(), "data.bin")), 0)
2017-11-12 18:54:25 +01:00
require.NoError(t, err)
2018-01-29 23:05:04 +01:00
require.WithinDuration(t, expectedTs, ts, expectedTs.Sub(earliestTime))
2017-11-12 18:54:25 +01:00
}
func TestInternalMaxChunkSizeRespected(t *testing.T) {
if runtime.GOOS == "windows" && runtime.GOARCH == "386" {
t.Skip("Skip test on windows/386")
}
2018-01-29 23:05:04 +01:00
id := fmt.Sprintf("timcsr%v", time.Now().Unix())
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, map[string]string{"workers": "1"})
2018-01-29 23:05:04 +01:00
cfs, err := runInstance.getCacheFs(rootFs)
2017-11-12 18:54:25 +01:00
require.NoError(t, err)
chunkSize := cfs.ChunkSize()
totalChunks := 20
// create some rand test data
testData := randStringBytes(int(int64(totalChunks-1)*chunkSize + chunkSize/2))
2018-01-29 23:05:04 +01:00
runInstance.writeRemoteBytes(t, rootFs, "data.bin", testData)
o, err := cfs.NewObject(context.Background(), runInstance.encryptRemoteIfNeeded(t, "data.bin"))
require.NoError(t, err)
co, ok := o.(*cache.Object)
require.True(t, ok)
for i := 0; i < 4; i++ { // read first 4
2018-01-29 23:05:04 +01:00
_ = runInstance.readDataFromObj(t, co, chunkSize*int64(i), chunkSize*int64(i+1), false)
}
cfs.CleanUpCache(true)
// the last 2 **must** be in the cache
require.True(t, boltDb.HasChunk(co, chunkSize*2))
require.True(t, boltDb.HasChunk(co, chunkSize*3))
2017-11-12 18:54:25 +01:00
for i := 4; i < 6; i++ { // read next 2
2018-01-29 23:05:04 +01:00
_ = runInstance.readDataFromObj(t, co, chunkSize*int64(i), chunkSize*int64(i+1), false)
}
cfs.CleanUpCache(true)
// the last 2 **must** be in the cache
require.True(t, boltDb.HasChunk(co, chunkSize*4))
require.True(t, boltDb.HasChunk(co, chunkSize*5))
2017-11-12 18:54:25 +01:00
}
func TestInternalExpiredEntriesRemoved(t *testing.T) {
2018-01-29 23:05:04 +01:00
id := fmt.Sprintf("tieer%v", time.Now().Unix())
vfsflags.Opt.DirCacheTime = time.Second * 4 // needs to be lower than the defined
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, true, true, nil)
2018-01-29 23:05:04 +01:00
cfs, err := runInstance.getCacheFs(rootFs)
require.NoError(t, err)
// create some rand test data
runInstance.writeRemoteString(t, rootFs, "one", "one content")
runInstance.mkdir(t, rootFs, "test")
runInstance.writeRemoteString(t, rootFs, "test/second", "second content")
2018-02-10 21:01:05 +01:00
l, err := runInstance.list(t, rootFs, "test")
require.NoError(t, err)
2018-01-29 23:05:04 +01:00
require.Len(t, l, 1)
err = cfs.UnWrap().Mkdir(context.Background(), runInstance.encryptRemoteIfNeeded(t, "test/third"))
2018-01-29 23:05:04 +01:00
require.NoError(t, err)
2018-02-10 21:01:05 +01:00
l, err = runInstance.list(t, rootFs, "test")
require.NoError(t, err)
2018-01-29 23:05:04 +01:00
require.Len(t, l, 1)
err = runInstance.retryBlock(func() error {
2018-02-10 21:01:05 +01:00
l, err = runInstance.list(t, rootFs, "test")
if err != nil {
return err
}
2018-01-29 23:05:04 +01:00
if len(l) != 2 {
return errors.New("list is not 2")
}
return nil
}, 10, time.Second)
require.NoError(t, err)
}
2018-03-13 22:43:34 +01:00
func TestInternalBug2117(t *testing.T) {
vfsflags.Opt.DirCacheTime = time.Second * 10
id := fmt.Sprintf("tib2117%v", time.Now().Unix())
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, false, true, map[string]string{"info_age": "72h", "chunk_clean_interval": "15m"})
2018-03-13 22:43:34 +01:00
if runInstance.rootIsCrypt {
t.Skipf("skipping crypt")
}
cfs, err := runInstance.getCacheFs(rootFs)
require.NoError(t, err)
err = cfs.UnWrap().Mkdir(context.Background(), "test")
2018-03-13 22:43:34 +01:00
require.NoError(t, err)
for i := 1; i <= 4; i++ {
err = cfs.UnWrap().Mkdir(context.Background(), fmt.Sprintf("test/dir%d", i))
2018-03-13 22:43:34 +01:00
require.NoError(t, err)
for j := 1; j <= 4; j++ {
err = cfs.UnWrap().Mkdir(context.Background(), fmt.Sprintf("test/dir%d/dir%d", i, j))
2018-03-13 22:43:34 +01:00
require.NoError(t, err)
runInstance.writeObjectString(t, cfs.UnWrap(), fmt.Sprintf("test/dir%d/dir%d/test.txt", i, j), "test")
}
}
di, err := runInstance.list(t, rootFs, "test/dir1/dir2")
require.NoError(t, err)
log.Printf("len: %v", len(di))
require.Len(t, di, 1)
time.Sleep(time.Second * 30)
di, err = runInstance.list(t, rootFs, "test/dir1/dir2")
require.NoError(t, err)
log.Printf("len: %v", len(di))
require.Len(t, di, 1)
di, err = runInstance.list(t, rootFs, "test/dir1")
require.NoError(t, err)
log.Printf("len: %v", len(di))
require.Len(t, di, 4)
di, err = runInstance.list(t, rootFs, "test")
require.NoError(t, err)
log.Printf("len: %v", len(di))
require.Len(t, di, 4)
}
2018-01-29 23:05:04 +01:00
// run holds the remotes for a test run
type run struct {
okDiff time.Duration
runDefaultCfgMap configmap.Simple
2018-01-29 23:05:04 +01:00
tmpUploadDir string
rootIsCrypt bool
wrappedIsExternal bool
tempFiles []*os.File
2018-02-10 21:01:05 +01:00
dbPath string
chunkPath string
vfsCachePath string
2018-01-29 23:05:04 +01:00
}
2017-11-12 18:54:25 +01:00
2018-01-29 23:05:04 +01:00
func newRun() *run {
var err error
r := &run{
okDiff: time.Second * 9, // really big diff here but the build machines seem to be slow. need a different way for this
2018-01-29 23:05:04 +01:00
}
2017-11-12 18:54:25 +01:00
// Read in all the defaults for all the options
fsInfo, err := fs.Find("cache")
if err != nil {
panic(fmt.Sprintf("Couldn't find cache remote: %v", err))
2018-01-29 23:05:04 +01:00
}
r.runDefaultCfgMap = configmap.Simple{}
for _, option := range fsInfo.Options {
r.runDefaultCfgMap.Set(option.Name, fmt.Sprint(option.Default))
2018-01-29 23:05:04 +01:00
}
2018-01-29 23:05:04 +01:00
if uploadDir == "" {
r.tmpUploadDir, err = os.MkdirTemp("", "rclonecache-tmp")
2018-01-29 23:05:04 +01:00
if err != nil {
panic(fmt.Sprintf("Failed to create temp dir: %v", err))
2018-01-29 23:05:04 +01:00
}
} else {
r.tmpUploadDir = uploadDir
}
log.Printf("Temp Upload Dir: %v", r.tmpUploadDir)
return r
2017-11-12 18:54:25 +01:00
}
2018-01-29 23:05:04 +01:00
func (r *run) encryptRemoteIfNeeded(t *testing.T, remote string) string {
if !runInstance.rootIsCrypt || len(decryptedToEncryptedRemotes) == 0 {
return remote
}
enc, ok := decryptedToEncryptedRemotes[remote]
if !ok {
t.Fatalf("Failed to find decrypted -> encrypted mapping for '%v'", remote)
return remote
}
return enc
2017-11-12 18:54:25 +01:00
}
func (r *run) newCacheFs(t *testing.T, remote, id string, needRemote, purge bool, flags map[string]string) (fs.Fs, *cache.Persistent) {
2018-01-29 23:05:04 +01:00
fstest.Initialise()
remoteExists := false
for _, s := range config.FileSections() {
if s == remote {
remoteExists = true
}
}
if !remoteExists && needRemote {
t.Skipf("Need remote (%v) to exist", remote)
return nil, nil
}
2017-11-12 18:54:25 +01:00
// Config to pass to NewFs
m := configmap.Simple{}
for k, v := range r.runDefaultCfgMap {
m.Set(k, v)
}
for k, v := range flags {
m.Set(k, v)
}
2018-01-29 23:05:04 +01:00
// if the remote doesn't exist, create a new one with a local one for it
// identify which is the cache remote (it can be wrapped by a crypt too)
rootIsCrypt := false
cacheRemote := remote
if !remoteExists {
localRemote := remote + "-local"
config.FileSet(localRemote, "type", "local")
config.FileSet(localRemote, "nounc", "true")
m.Set("type", "cache")
m.Set("remote", localRemote+":"+filepath.Join(os.TempDir(), localRemote))
2018-01-29 23:05:04 +01:00
} else {
remoteType := config.FileGet(remote, "type")
2018-01-29 23:05:04 +01:00
if remoteType == "" {
t.Skipf("skipped due to invalid remote type for %v", remote)
return nil, nil
}
if remoteType != "cache" {
if remoteType == "crypt" {
rootIsCrypt = true
m.Set("password", cryptPassword1)
m.Set("password2", cryptPassword2)
2018-01-29 23:05:04 +01:00
}
remoteRemote := config.FileGet(remote, "remote")
2018-01-29 23:05:04 +01:00
if remoteRemote == "" {
t.Skipf("skipped due to invalid remote wrapper for %v", remote)
return nil, nil
}
remoteRemoteParts := strings.Split(remoteRemote, ":")
remoteWrapping := remoteRemoteParts[0]
remoteType := config.FileGet(remoteWrapping, "type")
2018-01-29 23:05:04 +01:00
if remoteType != "cache" {
t.Skipf("skipped due to invalid remote type for %v: '%v'", remoteWrapping, remoteType)
return nil, nil
}
cacheRemote = remoteWrapping
}
}
runInstance.rootIsCrypt = rootIsCrypt
runInstance.dbPath = filepath.Join(config.GetCacheDir(), "cache-backend", cacheRemote+".db")
runInstance.chunkPath = filepath.Join(config.GetCacheDir(), "cache-backend", cacheRemote)
runInstance.vfsCachePath = filepath.Join(config.GetCacheDir(), "vfs", remote)
2018-02-10 21:01:05 +01:00
boltDb, err := cache.GetPersistent(runInstance.dbPath, runInstance.chunkPath, &cache.Features{PurgeDb: true})
2017-11-12 18:54:25 +01:00
require.NoError(t, err)
ci := fs.GetConfig(context.Background())
ci.LowLevelRetries = 1
2018-01-29 23:05:04 +01:00
// Instantiate root
if purge {
boltDb.PurgeTempUploads()
_ = os.RemoveAll(path.Join(runInstance.tmpUploadDir, id))
}
f, err := cache.NewFs(context.Background(), remote, id, m)
2017-11-12 18:54:25 +01:00
require.NoError(t, err)
2018-01-29 23:05:04 +01:00
cfs, err := r.getCacheFs(f)
2017-11-12 18:54:25 +01:00
require.NoError(t, err)
2018-01-29 23:05:04 +01:00
_, isCache := cfs.Features().UnWrap().(*cache.Fs)
_, isCrypt := cfs.Features().UnWrap().(*crypt.Fs)
_, isLocal := cfs.Features().UnWrap().(*local.Fs)
if isCache || isCrypt || isLocal {
2018-02-10 21:01:05 +01:00
r.wrappedIsExternal = false
2018-01-29 23:05:04 +01:00
} else {
r.wrappedIsExternal = true
}
2017-11-12 18:54:25 +01:00
2018-01-29 23:05:04 +01:00
if purge {
_ = f.Features().Purge(context.Background(), "")
2018-01-29 23:05:04 +01:00
require.NoError(t, err)
}
err = f.Mkdir(context.Background(), "")
2018-01-29 23:05:04 +01:00
require.NoError(t, err)
t.Cleanup(func() {
runInstance.cleanupFs(t, f)
})
2018-01-29 23:05:04 +01:00
return f, boltDb
2017-11-12 18:54:25 +01:00
}
func (r *run) cleanupFs(t *testing.T, f fs.Fs) {
err := f.Features().Purge(context.Background(), "")
2017-11-12 18:54:25 +01:00
require.NoError(t, err)
2018-01-29 23:05:04 +01:00
cfs, err := r.getCacheFs(f)
require.NoError(t, err)
cfs.StopBackgroundRunners()
2017-11-12 18:54:25 +01:00
2018-01-29 23:05:04 +01:00
err = os.RemoveAll(r.tmpUploadDir)
2017-11-12 18:54:25 +01:00
require.NoError(t, err)
2018-01-29 23:05:04 +01:00
for _, f := range r.tempFiles {
_ = f.Close()
_ = os.Remove(f.Name())
}
r.tempFiles = nil
debug.FreeOSMemory()
2017-11-12 18:54:25 +01:00
}
2018-01-29 23:05:04 +01:00
func (r *run) randomReader(t *testing.T, size int64) io.ReadCloser {
chunk := int64(1024)
cnt := size / chunk
left := size % chunk
f, err := os.CreateTemp("", "rclonecache-tempfile")
require.NoError(t, err)
2018-01-29 23:05:04 +01:00
for i := 0; i < int(cnt); i++ {
data := randStringBytes(int(chunk))
2018-01-29 23:05:04 +01:00
_, _ = f.Write(data)
2017-11-12 18:54:25 +01:00
}
data := randStringBytes(int(left))
2018-01-29 23:05:04 +01:00
_, _ = f.Write(data)
_, _ = f.Seek(int64(0), io.SeekStart)
2018-01-29 23:05:04 +01:00
r.tempFiles = append(r.tempFiles, f)
2017-11-12 18:54:25 +01:00
2018-01-29 23:05:04 +01:00
return f
}
2017-11-12 18:54:25 +01:00
2018-01-29 23:05:04 +01:00
func (r *run) writeRemoteString(t *testing.T, f fs.Fs, remote, content string) {
r.writeRemoteBytes(t, f, remote, []byte(content))
}
func (r *run) writeObjectString(t *testing.T, f fs.Fs, remote, content string) fs.Object {
return r.writeObjectBytes(t, f, remote, []byte(content))
}
func (r *run) writeRemoteBytes(t *testing.T, f fs.Fs, remote string, data []byte) {
r.writeObjectBytes(t, f, remote, data)
2018-01-29 23:05:04 +01:00
}
func (r *run) writeRemoteReader(t *testing.T, f fs.Fs, remote string, in io.ReadCloser) {
r.writeObjectReader(t, f, remote, in)
2018-01-29 23:05:04 +01:00
}
2017-11-12 18:54:25 +01:00
2018-01-29 23:05:04 +01:00
func (r *run) writeObjectBytes(t *testing.T, f fs.Fs, remote string, data []byte) fs.Object {
in := bytes.NewReader(data)
_ = r.writeObjectReader(t, f, remote, in)
o, err := f.NewObject(context.Background(), remote)
2018-01-29 23:05:04 +01:00
require.NoError(t, err)
require.Equal(t, int64(len(data)), o.Size())
return o
}
2018-01-29 23:05:04 +01:00
func (r *run) writeObjectReader(t *testing.T, f fs.Fs, remote string, in io.Reader) fs.Object {
modTime := time.Now()
objInfo := object.NewStaticObjectInfo(remote, modTime, -1, true, nil, f)
obj, err := f.Put(context.Background(), in, objInfo)
2018-01-29 23:05:04 +01:00
require.NoError(t, err)
return obj
}
func (r *run) updateObjectRemote(t *testing.T, f fs.Fs, remote string, data1 []byte, data2 []byte) fs.Object {
var err error
var obj fs.Object
in1 := bytes.NewReader(data1)
in2 := bytes.NewReader(data2)
objInfo1 := object.NewStaticObjectInfo(remote, time.Now(), int64(len(data1)), true, nil, f)
objInfo2 := object.NewStaticObjectInfo(remote, time.Now(), int64(len(data2)), true, nil, f)
2018-01-29 23:05:04 +01:00
_, err = f.Put(context.Background(), in1, objInfo1)
require.NoError(t, err)
obj, err = f.NewObject(context.Background(), remote)
require.NoError(t, err)
err = obj.Update(context.Background(), in2, objInfo2)
2018-01-29 23:05:04 +01:00
require.NoError(t, err)
return obj
}
2018-02-10 21:01:05 +01:00
func (r *run) readDataFromRemote(t *testing.T, f fs.Fs, remote string, offset, end int64, noLengthCheck bool) ([]byte, error) {
2018-01-29 23:05:04 +01:00
size := end - offset
checkSample := make([]byte, size)
co, err := f.NewObject(context.Background(), remote)
if err != nil {
return checkSample, err
}
checkSample = r.readDataFromObj(t, co, offset, end, noLengthCheck)
2018-02-10 21:01:05 +01:00
if !noLengthCheck && size != int64(len(checkSample)) {
return checkSample, fmt.Errorf("read size doesn't match expected: %v <> %v", len(checkSample), size)
}
2018-02-10 21:01:05 +01:00
return checkSample, nil
2018-01-29 23:05:04 +01:00
}
2018-01-29 23:05:04 +01:00
func (r *run) readDataFromObj(t *testing.T, o fs.Object, offset, end int64, noLengthCheck bool) []byte {
size := end - offset
checkSample := make([]byte, size)
reader, err := o.Open(context.Background(), &fs.SeekOption{Offset: offset})
require.NoError(t, err)
2018-01-29 23:05:04 +01:00
totalRead, err := io.ReadFull(reader, checkSample)
if (err == io.EOF || err == io.ErrUnexpectedEOF) && noLengthCheck {
err = nil
checkSample = checkSample[:totalRead]
}
require.NoError(t, err, "with string -%v-", string(checkSample))
2018-01-29 23:05:04 +01:00
_ = reader.Close()
return checkSample
}
2017-11-12 18:54:25 +01:00
2018-01-29 23:05:04 +01:00
func (r *run) mkdir(t *testing.T, f fs.Fs, remote string) {
err := f.Mkdir(context.Background(), remote)
2018-01-29 23:05:04 +01:00
require.NoError(t, err)
}
2018-01-29 23:05:04 +01:00
func (r *run) rm(t *testing.T, f fs.Fs, remote string) error {
var err error
var obj fs.Object
obj, err = f.NewObject(context.Background(), remote)
if err != nil {
err = f.Rmdir(context.Background(), remote)
2018-01-29 23:05:04 +01:00
} else {
err = obj.Remove(context.Background())
2018-01-29 23:05:04 +01:00
}
return err
}
2018-02-10 21:01:05 +01:00
func (r *run) list(t *testing.T, f fs.Fs, remote string) ([]interface{}, error) {
2018-01-29 23:05:04 +01:00
var err error
var l []interface{}
var list fs.DirEntries
list, err = f.List(context.Background(), remote)
for _, ll := range list {
l = append(l, ll)
2018-01-29 23:05:04 +01:00
}
2018-02-10 21:01:05 +01:00
return l, err
2018-01-29 23:05:04 +01:00
}
2018-01-29 23:05:04 +01:00
func (r *run) copyFile(t *testing.T, f fs.Fs, src, dst string) error {
in, err := os.Open(src)
if err != nil {
return err
}
2018-01-29 23:05:04 +01:00
defer func() {
_ = in.Close()
}()
2018-01-29 23:05:04 +01:00
out, err := os.Create(dst)
if err != nil {
return err
}
2018-01-29 23:05:04 +01:00
defer func() {
_ = out.Close()
}()
_, err = io.Copy(out, in)
return err
}
func (r *run) dirMove(t *testing.T, rootFs fs.Fs, src, dst string) error {
var err error
if rootFs.Features().DirMove != nil {
err = rootFs.Features().DirMove(context.Background(), rootFs, src, dst)
2018-01-29 23:05:04 +01:00
if err != nil {
return err
}
} else {
2018-01-29 23:05:04 +01:00
t.Logf("DirMove not supported by %v", rootFs)
return errNotSupported
}
2018-01-29 23:05:04 +01:00
return err
}
func (r *run) move(t *testing.T, rootFs fs.Fs, src, dst string) error {
var err error
if rootFs.Features().Move != nil {
obj1, err := rootFs.NewObject(context.Background(), src)
2018-01-29 23:05:04 +01:00
if err != nil {
return err
}
_, err = rootFs.Features().Move(context.Background(), obj1, dst)
2018-01-29 23:05:04 +01:00
if err != nil {
return err
}
} else {
2018-01-29 23:05:04 +01:00
t.Logf("Move not supported by %v", rootFs)
return errNotSupported
}
2018-01-29 23:05:04 +01:00
return err
}
func (r *run) copy(t *testing.T, rootFs fs.Fs, src, dst string) error {
var err error
if rootFs.Features().Copy != nil {
obj, err := rootFs.NewObject(context.Background(), src)
2018-01-29 23:05:04 +01:00
if err != nil {
return err
}
_, err = rootFs.Features().Copy(context.Background(), obj, dst)
2018-01-29 23:05:04 +01:00
if err != nil {
return err
}
} else {
2018-01-29 23:05:04 +01:00
t.Logf("Copy not supported by %v", rootFs)
return errNotSupported
}
2018-01-29 23:05:04 +01:00
return err
}
func (r *run) modTime(t *testing.T, rootFs fs.Fs, src string) (time.Time, error) {
var err error
obj1, err := rootFs.NewObject(context.Background(), src)
2018-01-29 23:05:04 +01:00
if err != nil {
return time.Time{}, err
}
return obj1.ModTime(context.Background()), nil
2018-01-29 23:05:04 +01:00
}
func (r *run) size(t *testing.T, rootFs fs.Fs, src string) (int64, error) {
var err error
obj1, err := rootFs.NewObject(context.Background(), src)
if err != nil {
return int64(0), err
}
return obj1.Size(), nil
}
2018-01-29 23:05:04 +01:00
func (r *run) updateData(t *testing.T, rootFs fs.Fs, src, data, append string) error {
var err error
var obj1 fs.Object
obj1, err = rootFs.NewObject(context.Background(), src)
if err != nil {
return err
}
data1 := []byte(data + append)
reader := bytes.NewReader(data1)
objInfo1 := object.NewStaticObjectInfo(src, time.Now(), int64(len(data1)), true, nil, rootFs)
err = obj1.Update(context.Background(), reader, objInfo1)
2018-01-29 23:05:04 +01:00
return err
}
func (r *run) cleanSize(t *testing.T, size int64) int64 {
if r.rootIsCrypt {
denominator := int64(65536 + 16)
size = size - 32
quotient := size / denominator
remainder := size % denominator
return (quotient*65536 + remainder - 16)
}
2018-01-29 23:05:04 +01:00
return size
}
func (r *run) listenForBackgroundUpload(t *testing.T, f fs.Fs, remote string) chan error {
cfs, err := r.getCacheFs(f)
require.NoError(t, err)
2018-01-29 23:05:04 +01:00
buCh := cfs.GetBackgroundUploadChannel()
require.NotNil(t, buCh)
maxDuration := time.Minute * 3
if r.wrappedIsExternal {
maxDuration = time.Minute * 10
}
2018-01-29 23:05:04 +01:00
waitCh := make(chan error)
go func() {
var err error
var state cache.BackgroundUploadState
for i := 0; i < 2; i++ {
select {
case state = <-buCh:
// continue
case <-time.After(maxDuration):
waitCh <- fmt.Errorf("Timed out waiting for background upload: %v", remote)
2018-01-29 23:05:04 +01:00
return
}
checkRemote := state.Remote
if r.rootIsCrypt {
cryptFs := f.(*crypt.Fs)
checkRemote, err = cryptFs.DecryptFileName(checkRemote)
if err != nil {
waitCh <- err
return
}
}
if checkRemote == remote && cache.BackgroundUploadStarted != state.Status {
waitCh <- state.Error
return
}
}
waitCh <- fmt.Errorf("Too many attempts to wait for the background upload: %v", remote)
2018-01-29 23:05:04 +01:00
}()
return waitCh
}
2018-01-29 23:05:04 +01:00
func (r *run) completeBackgroundUpload(t *testing.T, remote string, waitCh chan error) {
var err error
maxDuration := time.Minute * 3
if r.wrappedIsExternal {
maxDuration = time.Minute * 10
}
select {
case err = <-waitCh:
// continue
case <-time.After(maxDuration):
t.Fatalf("Timed out waiting to complete the background upload %v", remote)
return
}
require.NoError(t, err)
}
2018-01-29 23:05:04 +01:00
func (r *run) completeAllBackgroundUploads(t *testing.T, f fs.Fs, lastRemote string) {
var state cache.BackgroundUploadState
var err error
2018-01-29 23:05:04 +01:00
maxDuration := time.Minute * 5
if r.wrappedIsExternal {
maxDuration = time.Minute * 15
}
cfs, err := r.getCacheFs(f)
require.NoError(t, err)
buCh := cfs.GetBackgroundUploadChannel()
require.NotNil(t, buCh)
for {
select {
case state = <-buCh:
checkRemote := state.Remote
if r.rootIsCrypt {
cryptFs := f.(*crypt.Fs)
checkRemote, err = cryptFs.DecryptFileName(checkRemote)
require.NoError(t, err)
}
if checkRemote == lastRemote && cache.BackgroundUploadCompleted == state.Status {
require.NoError(t, state.Error)
return
}
case <-time.After(maxDuration):
t.Fatalf("Timed out waiting to complete the background upload %v", lastRemote)
return
}
}
2018-01-29 23:05:04 +01:00
}
2018-01-29 23:05:04 +01:00
func (r *run) retryBlock(block func() error, maxRetries int, rate time.Duration) error {
var err error
for i := 0; i < maxRetries; i++ {
err = block()
if err == nil {
return nil
}
time.Sleep(rate)
}
return err
2017-11-12 18:54:25 +01:00
}
2018-01-29 23:05:04 +01:00
func (r *run) getCacheFs(f fs.Fs) (*cache.Fs, error) {
2017-11-12 18:54:25 +01:00
cfs, ok := f.(*cache.Fs)
if ok {
return cfs, nil
2019-01-11 17:57:05 +01:00
}
if f.Features().UnWrap != nil {
cfs, ok := f.Features().UnWrap().(*cache.Fs)
if ok {
return cfs, nil
2017-11-12 18:54:25 +01:00
}
}
2018-01-29 23:05:04 +01:00
return nil, errors.New("didn't found a cache fs")
2017-11-12 18:54:25 +01:00
}
func randStringBytes(n int) []byte {
b := make([]byte, n)
for i := range b {
b[i] = letterBytes[rand.Intn(len(letterBytes))]
}
return b
}
2017-11-12 18:54:25 +01:00
var (
_ fs.Fs = (*cache.Fs)(nil)
_ fs.Fs = (*local.Fs)(nil)
)