2018-04-07 19:48:11 +02:00
// Package fstests provides generic integration tests for the Fs and
2018-11-22 18:43:18 +01:00
// Object interfaces.
//
// These tests are concerned with the basic functionality of a
// backend. The tests in fs/sync and fs/operations tests more
// cornercases that these tests don't.
2015-09-22 19:47:16 +02:00
package fstests
2015-02-14 19:48:08 +01:00
2014-07-24 23:50:11 +02:00
import (
"bytes"
2019-06-17 10:34:30 +02:00
"context"
2021-11-04 11:12:57 +01:00
"errors"
2016-07-25 20:18:56 +02:00
"fmt"
2014-07-24 23:50:11 +02:00
"io"
2019-04-15 21:03:33 +02:00
"math/bits"
2014-07-31 22:24:52 +02:00
"os"
2016-04-23 22:46:52 +02:00
"path"
2017-07-08 17:26:41 +02:00
"path/filepath"
2019-01-11 13:28:41 +01:00
"reflect"
2016-05-07 15:50:35 +02:00
"sort"
2020-01-05 12:23:39 +01:00
"strconv"
2014-07-31 22:24:52 +02:00
"strings"
2014-07-24 23:50:11 +02:00
"testing"
"time"
2019-07-28 19:47:38 +02:00
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/fs/fserrors"
2019-08-08 23:42:43 +02:00
"github.com/rclone/rclone/fs/fspath"
2019-07-28 19:47:38 +02:00
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/object"
"github.com/rclone/rclone/fs/operations"
"github.com/rclone/rclone/fs/walk"
"github.com/rclone/rclone/fstest"
2019-10-04 17:51:07 +02:00
"github.com/rclone/rclone/fstest/testserver"
2019-07-25 16:16:39 +02:00
"github.com/rclone/rclone/lib/encoder"
2019-08-06 13:44:08 +02:00
"github.com/rclone/rclone/lib/random"
2019-07-28 19:47:38 +02:00
"github.com/rclone/rclone/lib/readers"
2016-04-23 22:46:52 +02:00
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
2014-07-24 23:50:11 +02:00
)
2018-02-25 10:58:06 +01:00
// InternalTester is an optional interface for Fs which allows to execute internal tests
//
// This interface should be implemented in 'backend'_internal_test.go and not in 'backend'.go
type InternalTester interface {
InternalTest ( * testing . T )
}
2018-09-07 12:45:28 +02:00
// ChunkedUploadConfig contains the values used by TestFsPutChunked
// to determine the limits of chunked uploading
type ChunkedUploadConfig struct {
// Minimum allowed chunk size
MinChunkSize fs . SizeSuffix
// Maximum allowed chunk size, 0 is no limit
MaxChunkSize fs . SizeSuffix
// Rounds the given chunk size up to the next valid value
// nil will disable rounding
// e.g. the next power of 2
CeilChunkSize func ( fs . SizeSuffix ) fs . SizeSuffix
2018-10-13 23:43:15 +02:00
// More than one chunk is required on upload
NeedMultipleChunks bool
2022-05-20 11:05:52 +02:00
// Skip this particular remote
Skip bool
2018-09-07 12:45:28 +02:00
}
// SetUploadChunkSizer is a test only interface to change the upload chunk size at runtime
type SetUploadChunkSizer interface {
// Change the configured UploadChunkSize.
// Will only be called while no transfer is in progress.
SetUploadChunkSize ( fs . SizeSuffix ) ( fs . SizeSuffix , error )
}
2018-10-13 23:43:15 +02:00
// SetUploadCutoffer is a test only interface to change the upload cutoff size at runtime
type SetUploadCutoffer interface {
// Change the configured UploadCutoff.
// Will only be called while no transfer is in progress.
SetUploadCutoff ( fs . SizeSuffix ) ( fs . SizeSuffix , error )
}
2023-11-24 13:36:48 +01:00
// SetCopyCutoffer is a test only interface to change the copy cutoff size at runtime
type SetCopyCutoffer interface {
// Change the configured CopyCutoff.
// Will only be called while no transfer is in progress.
SetCopyCutoff ( fs . SizeSuffix ) ( fs . SizeSuffix , error )
}
2018-09-07 12:45:28 +02:00
// NextPowerOfTwo returns the current or next bigger power of two.
// All values less or equal 0 will return 0
func NextPowerOfTwo ( i fs . SizeSuffix ) fs . SizeSuffix {
2019-04-15 21:03:33 +02:00
return 1 << uint ( 64 - bits . LeadingZeros64 ( uint64 ( i ) - 1 ) )
2018-09-07 12:45:28 +02:00
}
// NextMultipleOf returns a function that can be used as a CeilChunkSize function.
// This function will return the next multiple of m that is equal or bigger than i.
// All values less or equal 0 will return 0.
func NextMultipleOf ( m fs . SizeSuffix ) func ( fs . SizeSuffix ) fs . SizeSuffix {
if m <= 0 {
panic ( fmt . Sprintf ( "invalid multiplier %s" , m ) )
}
return func ( i fs . SizeSuffix ) fs . SizeSuffix {
if i <= 0 {
return 0
}
return ( ( ( i - 1 ) / m ) + 1 ) * m
}
}
2016-05-07 15:50:35 +02:00
// dirsToNames returns a sorted list of names
2017-06-30 14:37:29 +02:00
func dirsToNames ( dirs [ ] fs . Directory ) [ ] string {
2016-05-07 15:50:35 +02:00
names := [ ] string { }
for _ , dir := range dirs {
2019-08-26 21:22:38 +02:00
names = append ( names , fstest . Normalize ( dir . Remote ( ) ) )
2016-05-07 15:50:35 +02:00
}
sort . Strings ( names )
return names
}
// objsToNames returns a sorted list of object names
func objsToNames ( objs [ ] fs . Object ) [ ] string {
names := [ ] string { }
for _ , obj := range objs {
2019-08-26 21:22:38 +02:00
names = append ( names , fstest . Normalize ( obj . Remote ( ) ) )
2016-05-07 15:50:35 +02:00
}
sort . Strings ( names )
return names
}
2018-11-15 18:15:56 +01:00
// retry f() until no retriable error
func retry ( t * testing . T , what string , f func ( ) error ) {
2017-06-13 12:22:16 +02:00
const maxTries = 10
2018-11-15 18:15:56 +01:00
var err error
for tries := 1 ; tries <= maxTries ; tries ++ {
err = f ( )
// exit if no error, or error is not retriable
if err == nil || ! fserrors . IsRetryError ( err ) {
break
2016-05-07 15:50:35 +02:00
}
2018-11-15 18:15:56 +01:00
t . Logf ( "%s error: %v - low level retry %d/%d" , what , err , tries , maxTries )
time . Sleep ( 2 * time . Second )
2014-07-24 23:50:11 +02:00
}
2018-11-15 18:15:56 +01:00
require . NoError ( t , err , what )
}
2020-11-26 22:28:39 +01:00
// check interface
2022-05-24 12:00:00 +02:00
// PutTestContentsMetadata puts file with given contents to the remote and checks it but unlike TestPutLarge doesn't remove
2020-11-26 22:28:39 +01:00
//
2022-08-05 17:35:41 +02:00
// It uploads the object with the mimeType and metadata passed in if set.
2022-05-24 12:00:00 +02:00
//
// It returns the object which will have been checked if check is set
func PutTestContentsMetadata ( ctx context . Context , t * testing . T , f fs . Fs , file * fstest . Item , contents string , check bool , mimeType string , metadata fs . Metadata ) fs . Object {
2018-11-15 18:15:56 +01:00
var (
err error
obj fs . Object
uploadHash * hash . MultiHasher
)
retry ( t , "Put" , func ( ) error {
buf := bytes . NewBufferString ( contents )
uploadHash = hash . NewMultiHasher ( )
in := io . TeeReader ( buf , uploadHash )
file . Size = int64 ( buf . Len ( ) )
obji := object . NewStaticObjectInfo ( file . Path , file . ModTime , file . Size , true , nil , nil )
2022-05-24 12:00:00 +02:00
if mimeType != "" || metadata != nil {
2022-05-25 13:18:49 +02:00
// force the --metadata flag on temporarily
if metadata != nil {
ci := fs . GetConfig ( ctx )
previousMetadata := ci . Metadata
ci . Metadata = true
defer func ( ) {
ci . Metadata = previousMetadata
} ( )
}
2022-11-08 20:33:42 +01:00
obji . WithMetadata ( metadata ) . WithMimeType ( mimeType )
2020-11-26 22:28:39 +01:00
}
2019-07-25 16:16:39 +02:00
obj , err = f . Put ( ctx , in , obji )
2018-11-15 18:15:56 +01:00
return err
} )
file . Hashes = uploadHash . Sums ( )
2019-10-09 11:21:45 +02:00
if check {
2022-05-24 12:00:00 +02:00
// Overwrite time with that in metadata if it is already specified
mtime , ok := metadata [ "mtime" ]
if ok {
modTime , err := time . Parse ( time . RFC3339Nano , mtime )
require . NoError ( t , err )
file . ModTime = modTime
}
2019-10-09 11:21:45 +02:00
file . Check ( t , obj , f . Precision ( ) )
// Re-read the object and check again
2024-02-26 14:44:55 +01:00
obj = fstest . NewObject ( ctx , t , f , file . Path )
2019-10-09 11:21:45 +02:00
file . Check ( t , obj , f . Precision ( ) )
}
2022-06-27 13:29:13 +02:00
return obj
2014-07-24 23:50:11 +02:00
}
2020-11-26 22:28:39 +01:00
// PutTestContents puts file with given contents to the remote and checks it but unlike TestPutLarge doesn't remove
2022-06-27 13:29:13 +02:00
func PutTestContents ( ctx context . Context , t * testing . T , f fs . Fs , file * fstest . Item , contents string , check bool ) fs . Object {
2022-05-24 12:00:00 +02:00
return PutTestContentsMetadata ( ctx , t , f , file , contents , check , "" , nil )
2020-11-26 22:28:39 +01:00
}
// testPut puts file with random contents to the remote
func testPut ( ctx context . Context , t * testing . T , f fs . Fs , file * fstest . Item ) ( string , fs . Object ) {
2022-06-27 13:29:13 +02:00
contents := random . String ( 100 )
return contents , PutTestContents ( ctx , t , f , file , contents , true )
2020-11-26 22:28:39 +01:00
}
// testPutMimeType puts file with random contents to the remote and the mime type given
2022-05-24 12:00:00 +02:00
func testPutMimeType ( ctx context . Context , t * testing . T , f fs . Fs , file * fstest . Item , mimeType string , metadata fs . Metadata ) ( string , fs . Object ) {
2022-06-27 13:29:13 +02:00
contents := random . String ( 100 )
2022-05-24 12:00:00 +02:00
return contents , PutTestContentsMetadata ( ctx , t , f , file , contents , true , mimeType , metadata )
2020-11-26 22:28:39 +01:00
}
2023-11-24 13:58:40 +01:00
// testPutLarge puts file to the remote, checks it and removes it on success.
//
// If stream is set, then it uploads the file with size -1
func testPutLarge ( ctx context . Context , t * testing . T , f fs . Fs , file * fstest . Item , stream bool ) {
2018-11-15 18:15:56 +01:00
var (
err error
obj fs . Object
uploadHash * hash . MultiHasher
)
retry ( t , "PutLarge" , func ( ) error {
r := readers . NewPatternReader ( file . Size )
uploadHash = hash . NewMultiHasher ( )
in := io . TeeReader ( r , uploadHash )
2023-11-24 13:58:40 +01:00
size := file . Size
if stream {
size = - 1
}
obji := object . NewStaticObjectInfo ( file . Path , file . ModTime , size , true , nil , nil )
2019-07-25 16:16:39 +02:00
obj , err = f . Put ( ctx , in , obji )
2019-06-30 19:31:58 +02:00
if file . Size == 0 && err == fs . ErrorCantUploadEmptyFiles {
t . Skip ( "Can't upload zero length files" )
}
2018-11-15 18:15:56 +01:00
return err
} )
2018-10-13 23:43:15 +02:00
file . Hashes = uploadHash . Sums ( )
2018-10-09 09:42:45 +02:00
file . Check ( t , obj , f . Precision ( ) )
2018-10-13 23:43:15 +02:00
2018-10-09 09:42:45 +02:00
// Re-read the object and check again
2024-02-26 14:44:55 +01:00
obj = fstest . NewObject ( ctx , t , f , file . Path )
2018-10-09 09:42:45 +02:00
file . Check ( t , obj , f . Precision ( ) )
2018-10-13 23:43:15 +02:00
// Download the object and check it is OK
downloadHash := hash . NewMultiHasher ( )
2019-07-25 16:16:39 +02:00
download , err := obj . Open ( ctx )
2018-10-13 23:43:15 +02:00
require . NoError ( t , err )
n , err := io . Copy ( downloadHash , download )
require . NoError ( t , err )
assert . Equal ( t , file . Size , n )
require . NoError ( t , download . Close ( ) )
assert . Equal ( t , file . Hashes , downloadHash . Sums ( ) )
// Remove the object
2019-07-25 16:16:39 +02:00
require . NoError ( t , obj . Remove ( ctx ) )
2018-10-09 09:42:45 +02:00
}
2023-11-24 13:58:40 +01:00
// TestPutLarge puts file to the remote, checks it and removes it on success.
func TestPutLarge ( ctx context . Context , t * testing . T , f fs . Fs , file * fstest . Item ) {
testPutLarge ( ctx , t , f , file , false )
}
// TestPutLargeStreamed puts file of unknown size to the remote, checks it and removes it on success.
func TestPutLargeStreamed ( ctx context . Context , t * testing . T , f fs . Fs , file * fstest . Item ) {
testPutLarge ( ctx , t , f , file , true )
}
2022-07-26 17:50:32 +02:00
// ReadObject reads the contents of an object as a string
func ReadObject ( ctx context . Context , t * testing . T , obj fs . Object , limit int64 , options ... fs . OpenOption ) string {
2018-04-07 19:48:11 +02:00
what := fmt . Sprintf ( "readObject(%q) limit=%d, options=%+v" , obj , limit , options )
2019-07-25 16:16:39 +02:00
in , err := obj . Open ( ctx , options ... )
2018-04-07 19:48:11 +02:00
require . NoError ( t , err , what )
var r io . Reader = in
if limit >= 0 {
r = & io . LimitedReader { R : r , N : limit }
}
2022-08-20 16:38:02 +02:00
contents , err := io . ReadAll ( r )
2018-04-07 19:48:11 +02:00
require . NoError ( t , err , what )
err = in . Close ( )
require . NoError ( t , err , what )
return string ( contents )
2014-07-24 23:50:11 +02:00
}
2018-04-07 19:48:11 +02:00
// ExtraConfigItem describes a config item for the tests
type ExtraConfigItem struct { Name , Key , Value string }
2016-06-12 16:06:27 +02:00
2018-04-07 19:48:11 +02:00
// Opt is options for Run
type Opt struct {
2024-02-22 12:13:32 +01:00
RemoteName string
NilObject fs . Object
ExtraConfig [ ] ExtraConfigItem
SkipBadWindowsCharacters bool // skips unusable characters for windows if set
SkipFsMatch bool // if set skip exact matching of Fs value
TiersToTest [ ] string // List of tiers which can be tested in setTier test
ChunkedUpload ChunkedUploadConfig
UnimplementableFsMethods [ ] string // List of Fs methods which can't be implemented in this wrapping Fs
UnimplementableObjectMethods [ ] string // List of Object methods which can't be implemented in this wrapping Fs
UnimplementableDirectoryMethods [ ] string // List of Directory methods which can't be implemented in this wrapping Fs
SkipFsCheckWrap bool // if set skip FsCheckWrap
SkipObjectCheckWrap bool // if set skip ObjectCheckWrap
SkipDirectoryCheckWrap bool // if set skip DirectoryCheckWrap
SkipInvalidUTF8 bool // if set skip invalid UTF-8 checks
QuickTestOK bool // if set, run this test with make quicktest
2019-01-11 13:28:41 +01:00
}
// returns true if x is found in ss
func stringsContains ( x string , ss [ ] string ) bool {
for _ , s := range ss {
if x == s {
return true
}
}
return false
2018-04-07 19:48:11 +02:00
}
2021-01-27 15:22:10 +01:00
// toUpperASCII returns a copy of the string s with all Unicode
// letters mapped to their upper case.
func toUpperASCII ( s string ) string {
return strings . Map ( func ( r rune ) rune {
if 'a' <= r && r <= 'z' {
r -= 'a' - 'A'
}
return r
} , s )
}
2021-03-10 15:10:03 +01:00
// removeConfigID removes any {xyz} parts of the name put in for
// config disambiguation
func removeConfigID ( s string ) string {
bra := strings . IndexRune ( s , '{' )
ket := strings . IndexRune ( s , '}' )
if bra >= 0 && ket > bra {
s = s [ : bra ] + s [ ket + 1 : ]
}
return s
}
2022-07-26 17:50:55 +02:00
// InternalTestFiles is the state of the remote at the moment the internal tests are called
var InternalTestFiles [ ] fstest . Item
2018-11-22 18:43:18 +01:00
// Run runs the basic integration tests for a remote using the options passed in.
//
2019-04-30 14:06:24 +02:00
// They are structured in a hierarchical way so that dependencies for the tests can be created.
2018-11-22 18:43:18 +01:00
//
// For example some tests require the directory to be created - these
// are inside the "FsMkdir" test. Some tests require some tests files
// - these are inside the "FsPutFiles" test.
2018-04-07 19:48:11 +02:00
func Run ( t * testing . T , opt * Opt ) {
var (
2020-11-26 22:28:39 +01:00
f fs . Fs
2018-04-07 19:48:11 +02:00
remoteName = opt . RemoteName
subRemoteName string
subRemoteLeaf string
file1 = fstest . Item {
ModTime : fstest . Time ( "2001-02-03T04:05:06.499999999Z" ) ,
Path : "file name.txt" ,
2016-01-20 21:06:05 +01:00
}
2018-04-07 19:48:11 +02:00
file1Contents string
2020-11-26 22:28:39 +01:00
file1MimeType = "text/csv"
2022-05-24 12:00:00 +02:00
file1Metadata = fs . Metadata { "rclone-test" : "potato" }
2018-04-07 19:48:11 +02:00
file2 = fstest . Item {
ModTime : fstest . Time ( "2001-02-03T04:05:10.123123123Z" ) ,
Path : ` hello? sausage/êé/Hello, 世界/ " ' @ < > & ? + ≠/z.txt ` ,
2017-02-22 11:14:40 +01:00
}
2020-04-28 13:58:34 +02:00
isLocalRemote bool
purged bool // whether the dir has been purged or not
ctx = context . Background ( )
2020-11-05 12:33:32 +01:00
ci = fs . GetConfig ( ctx )
2020-04-28 13:58:34 +02:00
unwrappableFsMethods = [ ] string { "Command" } // these Fs methods don't need to be wrapped ever
2018-04-07 19:48:11 +02:00
)
2022-06-29 12:51:46 +02:00
if strings . HasSuffix ( os . Getenv ( "RCLONE_CONFIG" ) , "/notfound" ) && * fstest . RemoteName == "" && ! opt . QuickTestOK {
2019-10-04 17:51:07 +02:00
t . Skip ( "quicktest only" )
}
2018-04-07 19:48:11 +02:00
// Skip the test if the remote isn't configured
skipIfNotOk := func ( t * testing . T ) {
2020-11-26 22:28:39 +01:00
if f == nil {
2018-04-07 19:48:11 +02:00
t . Skipf ( "WARN: %q not configured" , remoteName )
2016-08-24 23:21:34 +02:00
}
2015-02-14 19:48:08 +01:00
}
2018-04-07 19:48:11 +02:00
// Skip if remote is not ListR capable, otherwise set the useListR
// flag, returning a function to restore its value
skipIfNotListR := func ( t * testing . T ) func ( ) {
skipIfNotOk ( t )
2020-11-26 22:28:39 +01:00
if f . Features ( ) . ListR == nil {
2018-04-07 19:48:11 +02:00
t . Skip ( "FS has no ListR interface" )
}
2020-11-05 12:33:32 +01:00
previous := ci . UseListR
ci . UseListR = true
2018-04-07 19:48:11 +02:00
return func ( ) {
2020-11-05 12:33:32 +01:00
ci . UseListR = previous
2018-04-07 19:48:11 +02:00
}
2016-07-25 20:18:56 +02:00
}
2015-02-14 19:48:08 +01:00
2018-09-18 14:25:20 +02:00
// Skip if remote is not SetTier and GetTier capable
2018-09-11 03:57:43 +02:00
skipIfNotSetTier := func ( t * testing . T ) {
skipIfNotOk ( t )
2022-06-08 22:25:17 +02:00
if ! f . Features ( ) . SetTier || ! f . Features ( ) . GetTier {
2018-09-18 14:25:20 +02:00
t . Skip ( "FS has no SetTier & GetTier interfaces" )
2018-09-11 03:57:43 +02:00
}
}
2018-11-22 18:43:18 +01:00
// Return true if f (or any of the things it wraps) is bucket
// based but not at the root.
isBucketBasedButNotRoot := func ( f fs . Fs ) bool {
2020-09-01 16:43:41 +02:00
f = fs . UnWrapFs ( f )
return f . Features ( ) . BucketBased && strings . Contains ( strings . Trim ( f . Root ( ) , "/" ) , "/" )
2018-11-22 18:43:18 +01:00
}
2015-02-14 19:48:08 +01:00
2018-11-22 18:43:18 +01:00
// Initialise the remote
fstest . Initialise ( )
2015-08-31 22:05:51 +02:00
2018-11-22 18:43:18 +01:00
// Set extra config if supplied
for _ , item := range opt . ExtraConfig {
config . FileSet ( item . Name , item . Key , item . Value )
}
if * fstest . RemoteName != "" {
remoteName = * fstest . RemoteName
}
2019-10-04 17:51:07 +02:00
oldFstestRemoteName := fstest . RemoteName
2019-08-08 20:58:02 +02:00
fstest . RemoteName = & remoteName
2019-10-04 17:51:07 +02:00
defer func ( ) {
fstest . RemoteName = oldFstestRemoteName
} ( )
2018-11-22 18:43:18 +01:00
t . Logf ( "Using remote %q" , remoteName )
var err error
if remoteName == "" {
remoteName , err = fstest . LocalRemote ( )
require . NoError ( t , err )
isLocalRemote = true
}
2015-08-31 22:05:51 +02:00
2019-10-04 17:51:07 +02:00
// Start any test servers if required
finish , err := testserver . Start ( remoteName )
require . NoError ( t , err )
defer finish ( )
2018-11-22 18:43:18 +01:00
// Make the Fs we are testing with, initialising the local variables
// subRemoteName - name of the remote after the TestRemote:
// subRemoteLeaf - a subdirectory to use under that
// remote - the result of fs.NewFs(TestRemote:subRemoteName)
subRemoteName , subRemoteLeaf , err = fstest . RandomRemoteName ( remoteName )
require . NoError ( t , err )
2020-11-26 22:28:39 +01:00
f , err = fs . NewFs ( context . Background ( ) , subRemoteName )
2018-11-22 18:43:18 +01:00
if err == fs . ErrorNotFoundInConfigFile {
t . Logf ( "Didn't find %q in config file - skipping tests" , remoteName )
return
}
require . NoError ( t , err , fmt . Sprintf ( "unexpected error: %v" , err ) )
2015-08-31 22:05:51 +02:00
2021-07-27 11:03:51 +02:00
// Get fsInfo which contains type, etc. of the fs
fsInfo , _ , _ , _ , err := fs . ConfigFs ( subRemoteName )
require . NoError ( t , err , fmt . Sprintf ( "unexpected error: %v" , err ) )
2018-11-22 18:43:18 +01:00
// Skip the rest if it failed
skipIfNotOk ( t )
2019-01-11 13:28:41 +01:00
// Check to see if Fs that wrap other Fs implement all the optional methods
t . Run ( "FsCheckWrap" , func ( t * testing . T ) {
skipIfNotOk ( t )
if opt . SkipFsCheckWrap {
t . Skip ( "Skipping FsCheckWrap on this Fs" )
}
2020-11-26 22:28:39 +01:00
ft := new ( fs . Features ) . Fill ( ctx , f )
2023-05-12 12:42:22 +02:00
if ft . UnWrap == nil && ! f . Features ( ) . Overlay {
2019-01-11 13:28:41 +01:00
t . Skip ( "Not a wrapping Fs" )
}
v := reflect . ValueOf ( ft ) . Elem ( )
vType := v . Type ( )
for i := 0 ; i < v . NumField ( ) ; i ++ {
vName := vType . Field ( i ) . Name
if stringsContains ( vName , opt . UnimplementableFsMethods ) {
continue
}
2020-04-28 13:58:34 +02:00
if stringsContains ( vName , unwrappableFsMethods ) {
continue
}
2019-01-11 13:28:41 +01:00
field := v . Field ( i )
// skip the bools
if field . Type ( ) . Kind ( ) == reflect . Bool {
continue
}
if field . IsNil ( ) {
t . Errorf ( "Missing Fs wrapper for %s" , vName )
}
}
} )
2020-04-28 13:58:34 +02:00
// Check to see if Fs advertises commands and they work and have docs
t . Run ( "FsCommand" , func ( t * testing . T ) {
skipIfNotOk ( t )
2020-11-26 22:28:39 +01:00
doCommand := f . Features ( ) . Command
2020-04-28 13:58:34 +02:00
if doCommand == nil {
t . Skip ( "No commands in this remote" )
}
// Check the correct error is generated
_ , err := doCommand ( context . Background ( ) , "NOTFOUND" , nil , nil )
assert . Equal ( t , fs . ErrorCommandNotFound , err , "Incorrect error generated on command not found" )
// Check there are some commands in the fsInfo
fsInfo , _ , _ , _ , err := fs . ConfigFs ( remoteName )
require . NoError ( t , err )
assert . True ( t , len ( fsInfo . CommandHelp ) > 0 , "Command is declared, must return some help in CommandHelp" )
} )
2022-08-14 04:56:32 +02:00
// TestFsRmdirNotFound tests deleting a nonexistent directory
2018-11-22 18:43:18 +01:00
t . Run ( "FsRmdirNotFound" , func ( t * testing . T ) {
skipIfNotOk ( t )
2020-11-26 22:28:39 +01:00
if isBucketBasedButNotRoot ( f ) {
2021-11-04 12:50:43 +01:00
t . Skip ( "Skipping test as non root bucket-based remote" )
2018-11-22 18:43:18 +01:00
}
2020-11-26 22:28:39 +01:00
err := f . Rmdir ( ctx , "" )
2022-08-14 04:56:32 +02:00
assert . Error ( t , err , "Expecting error on Rmdir nonexistent" )
2018-04-07 19:48:11 +02:00
} )
2018-11-22 18:43:18 +01:00
// Make the directory
2020-11-26 22:28:39 +01:00
err = f . Mkdir ( ctx , "" )
2018-11-22 18:43:18 +01:00
require . NoError ( t , err )
2020-11-26 22:28:39 +01:00
fstest . CheckListing ( t , f , [ ] fstest . Item { } )
2018-11-22 18:43:18 +01:00
2018-04-07 19:48:11 +02:00
// TestFsString tests the String method
2018-11-22 18:43:18 +01:00
t . Run ( "FsString" , func ( t * testing . T ) {
2018-04-07 19:48:11 +02:00
skipIfNotOk ( t )
2020-11-26 22:28:39 +01:00
str := f . String ( )
2018-04-07 19:48:11 +02:00
require . NotEqual ( t , "" , str )
} )
// TestFsName tests the Name method
2018-11-22 18:43:18 +01:00
t . Run ( "FsName" , func ( t * testing . T ) {
2018-04-07 19:48:11 +02:00
skipIfNotOk ( t )
2021-03-10 15:10:03 +01:00
got := removeConfigID ( f . Name ( ) )
2023-04-28 13:22:44 +02:00
var want string
2018-04-07 19:48:11 +02:00
if isLocalRemote {
2023-04-28 13:22:44 +02:00
want = "local"
} else {
want = remoteName [ : strings . LastIndex ( remoteName , ":" ) ]
comma := strings . IndexRune ( remoteName , ',' )
if comma >= 0 {
want = want [ : comma ]
}
2018-04-07 19:48:11 +02:00
}
2023-04-28 13:22:44 +02:00
require . Equal ( t , want , got )
2018-04-07 19:48:11 +02:00
} )
// TestFsRoot tests the Root method
2018-11-22 18:43:18 +01:00
t . Run ( "FsRoot" , func ( t * testing . T ) {
2018-04-07 19:48:11 +02:00
skipIfNotOk ( t )
2023-04-28 13:22:44 +02:00
got := f . Root ( )
want := subRemoteName
colon := strings . LastIndex ( want , ":" )
if colon >= 0 {
want = want [ colon + 1 : ]
}
2018-04-07 19:48:11 +02:00
if isLocalRemote {
// only check last path element on local
2023-04-28 13:22:44 +02:00
require . Equal ( t , filepath . Base ( subRemoteName ) , filepath . Base ( got ) )
2018-04-07 19:48:11 +02:00
} else {
2023-04-28 13:22:44 +02:00
require . Equal ( t , want , got )
2018-04-07 19:48:11 +02:00
}
} )
2015-09-22 19:47:16 +02:00
2018-04-07 19:48:11 +02:00
// TestFsRmdirEmpty tests deleting an empty directory
2018-11-22 18:43:18 +01:00
t . Run ( "FsRmdirEmpty" , func ( t * testing . T ) {
2018-04-07 19:48:11 +02:00
skipIfNotOk ( t )
2020-11-26 22:28:39 +01:00
err := f . Rmdir ( ctx , "" )
2018-04-07 19:48:11 +02:00
require . NoError ( t , err )
} )
2015-08-31 22:05:51 +02:00
2018-04-11 22:39:17 +02:00
// TestFsMkdir tests making a directory
2018-11-22 18:43:18 +01:00
//
// Tests that require the directory to be made are within this
t . Run ( "FsMkdir" , func ( t * testing . T ) {
2018-04-07 19:48:11 +02:00
skipIfNotOk ( t )
2015-08-31 22:05:51 +02:00
2020-11-26 22:28:39 +01:00
err := f . Mkdir ( ctx , "" )
2018-04-07 19:48:11 +02:00
require . NoError ( t , err )
2020-11-26 22:28:39 +01:00
fstest . CheckListing ( t , f , [ ] fstest . Item { } )
2015-08-31 22:05:51 +02:00
2020-11-26 22:28:39 +01:00
err = f . Mkdir ( ctx , "" )
2018-04-07 19:48:11 +02:00
require . NoError ( t , err )
2015-08-31 22:05:51 +02:00
2018-11-22 18:43:18 +01:00
// TestFsMkdirRmdirSubdir tests making and removing a sub directory
t . Run ( "FsMkdirRmdirSubdir" , func ( t * testing . T ) {
skipIfNotOk ( t )
dir := "dir/subdir"
2020-11-26 22:28:39 +01:00
err := operations . Mkdir ( ctx , f , dir )
2018-11-22 18:43:18 +01:00
require . NoError ( t , err )
2020-11-26 22:28:39 +01:00
fstest . CheckListingWithPrecision ( t , f , [ ] fstest . Item { } , [ ] string { "dir" , "dir/subdir" } , fs . GetModifyWindow ( ctx , f ) )
2014-07-24 23:50:11 +02:00
2020-11-26 22:28:39 +01:00
err = operations . Rmdir ( ctx , f , dir )
2018-11-22 18:43:18 +01:00
require . NoError ( t , err )
2020-11-26 22:28:39 +01:00
fstest . CheckListingWithPrecision ( t , f , [ ] fstest . Item { } , [ ] string { "dir" } , fs . GetModifyWindow ( ctx , f ) )
2014-07-24 23:50:11 +02:00
2020-11-26 22:28:39 +01:00
err = operations . Rmdir ( ctx , f , "dir" )
2018-11-22 18:43:18 +01:00
require . NoError ( t , err )
2020-11-26 22:28:39 +01:00
fstest . CheckListingWithPrecision ( t , f , [ ] fstest . Item { } , [ ] string { } , fs . GetModifyWindow ( ctx , f ) )
2018-11-22 18:43:18 +01:00
} )
2018-04-07 19:48:11 +02:00
2018-11-22 18:43:18 +01:00
// TestFsListEmpty tests listing an empty directory
t . Run ( "FsListEmpty" , func ( t * testing . T ) {
skipIfNotOk ( t )
2020-11-26 22:28:39 +01:00
fstest . CheckListing ( t , f , [ ] fstest . Item { } )
2018-11-22 18:43:18 +01:00
} )
2018-04-07 19:48:11 +02:00
2018-11-22 18:43:18 +01:00
// TestFsListDirEmpty tests listing the directories from an empty directory
TestFsListDirEmpty := func ( t * testing . T ) {
skipIfNotOk ( t )
2020-11-26 22:28:39 +01:00
objs , dirs , err := walk . GetAll ( ctx , f , "" , true , 1 )
if ! f . Features ( ) . CanHaveEmptyDirectories {
2019-08-13 22:25:11 +02:00
if err != fs . ErrorDirNotFound {
require . NoError ( t , err )
}
} else {
require . NoError ( t , err )
}
2018-11-22 18:43:18 +01:00
assert . Equal ( t , [ ] string { } , objsToNames ( objs ) )
assert . Equal ( t , [ ] string { } , dirsToNames ( dirs ) )
}
t . Run ( "FsListDirEmpty" , TestFsListDirEmpty )
2018-04-07 19:48:11 +02:00
2018-11-22 18:43:18 +01:00
// TestFsListRDirEmpty tests listing the directories from an empty directory using ListR
t . Run ( "FsListRDirEmpty" , func ( t * testing . T ) {
defer skipIfNotListR ( t ) ( )
TestFsListDirEmpty ( t )
} )
2018-04-07 19:48:11 +02:00
2018-11-22 18:43:18 +01:00
// TestFsListDirNotFound tests listing the directories from an empty directory
TestFsListDirNotFound := func ( t * testing . T ) {
skipIfNotOk ( t )
2020-11-26 22:28:39 +01:00
objs , dirs , err := walk . GetAll ( ctx , f , "does not exist" , true , 1 )
if ! f . Features ( ) . CanHaveEmptyDirectories {
2018-11-22 18:43:18 +01:00
if err != fs . ErrorDirNotFound {
assert . NoError ( t , err )
assert . Equal ( t , 0 , len ( objs ) + len ( dirs ) )
}
} else {
assert . Equal ( t , fs . ErrorDirNotFound , err )
2018-04-21 00:06:51 +02:00
}
}
2018-11-22 18:43:18 +01:00
t . Run ( "FsListDirNotFound" , TestFsListDirNotFound )
2018-04-07 19:48:11 +02:00
2018-11-22 18:43:18 +01:00
// TestFsListRDirNotFound tests listing the directories from an empty directory using ListR
t . Run ( "FsListRDirNotFound" , func ( t * testing . T ) {
defer skipIfNotListR ( t ) ( )
TestFsListDirNotFound ( t )
} )
2018-04-07 19:48:11 +02:00
2019-07-25 16:16:39 +02:00
// FsEncoding tests that file name encodings are
// working by uploading a series of unusual files
// Must be run in an empty directory
t . Run ( "FsEncoding" , func ( t * testing . T ) {
skipIfNotOk ( t )
2021-01-26 15:46:23 +01:00
if testing . Short ( ) {
t . Skip ( "not running with -short" )
}
2019-07-25 16:16:39 +02:00
// check no files or dirs as pre-requisite
2020-11-26 22:28:39 +01:00
fstest . CheckListingWithPrecision ( t , f , [ ] fstest . Item { } , [ ] string { } , fs . GetModifyWindow ( ctx , f ) )
2019-07-25 16:16:39 +02:00
for _ , test := range [ ] struct {
name string
path string
} {
// See lib/encoder/encoder.go for list of things that go here
{ "control chars" , "\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0A\x0B\x0C\x0D\x0E\x0F\x10\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1A\x1B\x1C\x1D\x1E\x1F\x7F" } ,
{ "dot" , "." } ,
{ "dot dot" , ".." } ,
{ "punctuation" , "!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~" } ,
{ "leading space" , " leading space" } ,
{ "leading tilde" , "~leading tilde" } ,
{ "leading CR" , "\rleading CR" } ,
{ "leading LF" , "\nleading LF" } ,
{ "leading HT" , "\tleading HT" } ,
{ "leading VT" , "\vleading VT" } ,
2019-09-30 17:41:48 +02:00
{ "leading dot" , ".leading dot" } ,
2019-07-25 16:16:39 +02:00
{ "trailing space" , "trailing space " } ,
{ "trailing CR" , "trailing CR\r" } ,
{ "trailing LF" , "trailing LF\n" } ,
{ "trailing HT" , "trailing HT\t" } ,
{ "trailing VT" , "trailing VT\v" } ,
{ "trailing dot" , "trailing dot." } ,
{ "invalid UTF-8" , "invalid utf-8\xfe" } ,
2021-11-02 16:58:25 +01:00
{ "URL encoding" , "test%46.txt" } ,
2019-07-25 16:16:39 +02:00
} {
t . Run ( test . name , func ( t * testing . T ) {
2019-10-10 11:32:28 +02:00
if opt . SkipInvalidUTF8 && test . name == "invalid UTF-8" {
t . Skip ( "Skipping " + test . name )
}
2019-07-25 16:16:39 +02:00
// turn raw strings into Standard encoding
fileName := encoder . Standard . Encode ( test . path )
dirName := fileName
t . Logf ( "testing %q" , fileName )
2020-11-26 22:28:39 +01:00
assert . NoError ( t , f . Mkdir ( ctx , dirName ) )
2019-07-25 16:16:39 +02:00
file := fstest . Item {
ModTime : time . Now ( ) ,
Path : dirName + "/" + fileName , // test creating a file and dir with that name
}
2020-11-26 22:28:39 +01:00
_ , o := testPut ( context . Background ( ) , t , f , & file )
fstest . CheckListingWithPrecision ( t , f , [ ] fstest . Item { file } , [ ] string { dirName } , fs . GetModifyWindow ( ctx , f ) )
2019-07-25 16:16:39 +02:00
assert . NoError ( t , o . Remove ( ctx ) )
2020-11-26 22:28:39 +01:00
assert . NoError ( t , f . Rmdir ( ctx , dirName ) )
fstest . CheckListingWithPrecision ( t , f , [ ] fstest . Item { } , [ ] string { } , fs . GetModifyWindow ( ctx , f ) )
2019-07-25 16:16:39 +02:00
} )
}
} )
2020-05-20 12:39:20 +02:00
// TestFsNewObjectNotFound tests not finding an object
2018-11-22 18:43:18 +01:00
t . Run ( "FsNewObjectNotFound" , func ( t * testing . T ) {
skipIfNotOk ( t )
// Object in an existing directory
2020-11-26 22:28:39 +01:00
o , err := f . NewObject ( ctx , "potato" )
2018-11-22 18:43:18 +01:00
assert . Nil ( t , o )
assert . Equal ( t , fs . ErrorObjectNotFound , err )
2022-08-14 04:56:32 +02:00
// Now try an object in a nonexistent directory
2020-11-26 22:28:39 +01:00
o , err = f . NewObject ( ctx , "directory/not/found/potato" )
2018-11-22 18:43:18 +01:00
assert . Nil ( t , o )
assert . Equal ( t , fs . ErrorObjectNotFound , err )
} )
2018-04-07 19:48:11 +02:00
2018-11-22 18:43:18 +01:00
// TestFsPutError tests uploading a file where there is an error
//
// It makes sure that aborting a file half way through does not create
// a file on the remote.
//
// go test -v -run 'TestIntegration/Test(Setup|Init|FsMkdir|FsPutError)$'
t . Run ( "FsPutError" , func ( t * testing . T ) {
skipIfNotOk ( t )
2019-09-16 19:52:41 +02:00
var N int64 = 5 * 1024
if * fstest . SizeLimit > 0 && N > * fstest . SizeLimit {
N = * fstest . SizeLimit
t . Logf ( "Reduce file size due to limit %d" , N )
}
2018-11-22 18:43:18 +01:00
// Read N bytes then produce an error
2019-09-16 19:52:41 +02:00
contents := random . String ( int ( N ) )
2018-11-22 18:43:18 +01:00
buf := bytes . NewBufferString ( contents )
2020-03-27 12:12:21 +01:00
er := & readers . ErrorReader { Err : errors . New ( "potato" ) }
2018-11-22 18:43:18 +01:00
in := io . MultiReader ( buf , er )
2018-04-07 19:48:11 +02:00
2018-11-22 18:43:18 +01:00
obji := object . NewStaticObjectInfo ( file2 . Path , file2 . ModTime , 2 * N , true , nil , nil )
2020-11-26 22:28:39 +01:00
_ , err := f . Put ( ctx , in , obji )
2018-11-22 18:43:18 +01:00
// assert.Nil(t, obj) - FIXME some remotes return the object even on nil
assert . NotNil ( t , err )
2018-04-07 19:48:11 +02:00
2020-11-26 22:28:39 +01:00
obj , err := f . NewObject ( ctx , file2 . Path )
2018-11-22 18:43:18 +01:00
assert . Nil ( t , obj )
assert . Equal ( t , fs . ErrorObjectNotFound , err )
} )
2018-04-07 19:48:11 +02:00
2019-06-30 19:31:58 +02:00
t . Run ( "FsPutZeroLength" , func ( t * testing . T ) {
skipIfNotOk ( t )
2020-11-26 22:28:39 +01:00
TestPutLarge ( ctx , t , f , & fstest . Item {
2019-06-30 19:31:58 +02:00
ModTime : fstest . Time ( "2001-02-03T04:05:06.499999999Z" ) ,
2022-06-08 22:25:17 +02:00
Path : "zero-length-file" ,
2019-06-30 19:31:58 +02:00
Size : int64 ( 0 ) ,
} )
} )
2019-04-22 20:22:42 +02:00
t . Run ( "FsOpenWriterAt" , func ( t * testing . T ) {
skipIfNotOk ( t )
2020-11-26 22:28:39 +01:00
openWriterAt := f . Features ( ) . OpenWriterAt
2019-04-22 20:22:42 +02:00
if openWriterAt == nil {
t . Skip ( "FS has no OpenWriterAt interface" )
}
path := "writer-at-subdir/writer-at-file"
2019-07-25 16:16:39 +02:00
out , err := openWriterAt ( ctx , path , - 1 )
2019-04-22 20:22:42 +02:00
require . NoError ( t , err )
var n int
n , err = out . WriteAt ( [ ] byte ( "def" ) , 3 )
assert . NoError ( t , err )
assert . Equal ( t , 3 , n )
n , err = out . WriteAt ( [ ] byte ( "ghi" ) , 6 )
assert . NoError ( t , err )
assert . Equal ( t , 3 , n )
n , err = out . WriteAt ( [ ] byte ( "abc" ) , 0 )
assert . NoError ( t , err )
assert . Equal ( t , 3 , n )
assert . NoError ( t , out . Close ( ) )
2024-02-26 14:44:55 +01:00
obj := fstest . NewObject ( ctx , t , f , path )
2022-07-26 17:50:32 +02:00
assert . Equal ( t , "abcdefghi" , ReadObject ( ctx , t , obj , - 1 ) , "contents of file differ" )
2019-04-22 20:22:42 +02:00
2019-07-25 16:16:39 +02:00
assert . NoError ( t , obj . Remove ( ctx ) )
2020-11-26 22:28:39 +01:00
assert . NoError ( t , f . Rmdir ( ctx , "writer-at-subdir" ) )
2019-04-22 20:22:42 +02:00
} )
2023-07-13 21:01:10 +02:00
// TestFsOpenChunkWriter tests writing in chunks to fs
// then reads back the contents and check if they match
// go test -v -run 'TestIntegration/FsMkdir/FsOpenChunkWriter'
t . Run ( "FsOpenChunkWriter" , func ( t * testing . T ) {
skipIfNotOk ( t )
openChunkWriter := f . Features ( ) . OpenChunkWriter
if openChunkWriter == nil {
t . Skip ( "FS has no OpenChunkWriter interface" )
}
size5MBs := 5 * 1024 * 1024
contents1 := random . String ( size5MBs )
contents2 := random . String ( size5MBs )
size1MB := 1 * 1024 * 1024
contents3 := random . String ( size1MB )
path := "writer-at-subdir/writer-at-file"
2023-10-28 15:51:01 +02:00
objSrc := object . NewStaticObjectInfo ( path + "-WRONG-REMOTE" , file1 . ModTime , - 1 , true , nil , nil )
_ , out , err := openChunkWriter ( ctx , path , objSrc , & fs . ChunkOption {
2023-07-13 21:01:10 +02:00
ChunkSize : int64 ( size5MBs ) ,
} )
require . NoError ( t , err )
var n int64
2023-08-19 18:30:55 +02:00
n , err = out . WriteChunk ( ctx , 1 , strings . NewReader ( contents2 ) )
2023-07-13 21:01:10 +02:00
assert . NoError ( t , err )
assert . Equal ( t , int64 ( size5MBs ) , n )
2023-08-19 18:30:55 +02:00
n , err = out . WriteChunk ( ctx , 2 , strings . NewReader ( contents3 ) )
2023-07-13 21:01:10 +02:00
assert . NoError ( t , err )
assert . Equal ( t , int64 ( size1MB ) , n )
2023-08-19 18:30:55 +02:00
n , err = out . WriteChunk ( ctx , 0 , strings . NewReader ( contents1 ) )
2023-07-13 21:01:10 +02:00
assert . NoError ( t , err )
assert . Equal ( t , int64 ( size5MBs ) , n )
2023-08-19 18:30:55 +02:00
assert . NoError ( t , out . Close ( ctx ) )
2023-07-13 21:01:10 +02:00
2024-02-26 14:44:55 +01:00
obj := fstest . NewObject ( ctx , t , f , path )
2023-07-13 21:01:10 +02:00
originalContents := contents1 + contents2 + contents3
fileContents := ReadObject ( ctx , t , obj , - 1 )
isEqual := originalContents == fileContents
assert . True ( t , isEqual , "contents of file differ" )
assert . NoError ( t , obj . Remove ( ctx ) )
assert . NoError ( t , f . Rmdir ( ctx , "writer-at-subdir" ) )
} )
2018-11-22 18:43:18 +01:00
// TestFsChangeNotify tests that changes are properly
// propagated
//
// go test -v -remote TestDrive: -run '^Test(Setup|Init|FsChangeNotify)$' -verbose
t . Run ( "FsChangeNotify" , func ( t * testing . T ) {
skipIfNotOk ( t )
// Check have ChangeNotify
2020-11-26 22:28:39 +01:00
doChangeNotify := f . Features ( ) . ChangeNotify
2018-11-22 18:43:18 +01:00
if doChangeNotify == nil {
t . Skip ( "FS has no ChangeNotify interface" )
2018-10-09 09:42:45 +02:00
}
2020-11-26 22:28:39 +01:00
err := operations . Mkdir ( ctx , f , "dir" )
2018-11-22 18:43:18 +01:00
require . NoError ( t , err )
2018-10-13 23:43:15 +02:00
2018-11-22 18:43:18 +01:00
pollInterval := make ( chan time . Duration )
dirChanges := map [ string ] struct { } { }
objChanges := map [ string ] struct { } { }
2019-07-25 16:16:39 +02:00
doChangeNotify ( ctx , func ( x string , e fs . EntryType ) {
2018-11-22 18:43:18 +01:00
fs . Debugf ( nil , "doChangeNotify(%q, %+v)" , x , e )
if strings . HasPrefix ( x , file1 . Path [ : 5 ] ) || strings . HasPrefix ( x , file2 . Path [ : 5 ] ) {
fs . Debugf ( nil , "Ignoring notify for file1 or file2: %q, %v" , x , e )
return
}
if e == fs . EntryDirectory {
dirChanges [ x ] = struct { } { }
} else if e == fs . EntryObject {
objChanges [ x ] = struct { } { }
2018-10-13 23:43:15 +02:00
}
2018-11-22 18:43:18 +01:00
} , pollInterval )
defer func ( ) { close ( pollInterval ) } ( )
pollInterval <- time . Second
var dirs [ ] string
for _ , idx := range [ ] int { 1 , 3 , 2 } {
dir := fmt . Sprintf ( "dir/subdir%d" , idx )
2020-11-26 22:28:39 +01:00
err = operations . Mkdir ( ctx , f , dir )
2018-11-22 18:43:18 +01:00
require . NoError ( t , err )
dirs = append ( dirs , dir )
}
2018-10-13 23:43:15 +02:00
2018-11-22 18:43:18 +01:00
var objs [ ] fs . Object
for _ , idx := range [ ] int { 2 , 4 , 3 } {
file := fstest . Item {
ModTime : time . Now ( ) ,
Path : fmt . Sprintf ( "dir/file%d" , idx ) ,
2018-10-09 09:42:45 +02:00
}
2020-11-26 22:28:39 +01:00
_ , o := testPut ( ctx , t , f , & file )
2018-11-22 18:43:18 +01:00
objs = append ( objs , o )
}
2018-10-09 09:42:45 +02:00
2018-11-22 18:43:18 +01:00
// Looks for each item in wants in changes -
// if they are all found it returns true
contains := func ( changes map [ string ] struct { } , wants [ ] string ) bool {
for _ , want := range wants {
_ , ok := changes [ want ]
if ! ok {
return false
}
2018-04-07 19:48:11 +02:00
}
2018-11-22 18:43:18 +01:00
return true
}
// Wait a little while for the changes to come in
wantDirChanges := [ ] string { "dir/subdir1" , "dir/subdir3" , "dir/subdir2" }
wantObjChanges := [ ] string { "dir/file2" , "dir/file4" , "dir/file3" }
ok := false
for tries := 1 ; tries < 10 ; tries ++ {
ok = contains ( dirChanges , wantDirChanges ) && contains ( objChanges , wantObjChanges )
if ok {
2018-04-07 19:48:11 +02:00
break
}
2018-11-22 18:43:18 +01:00
t . Logf ( "Try %d/10 waiting for dirChanges and objChanges" , tries )
time . Sleep ( 3 * time . Second )
2018-04-07 19:48:11 +02:00
}
2018-11-22 18:43:18 +01:00
if ! ok {
t . Errorf ( "%+v does not contain %+v or \n%+v does not contain %+v" , dirChanges , wantDirChanges , objChanges , wantObjChanges )
2018-04-07 19:48:11 +02:00
}
2018-11-22 18:43:18 +01:00
// tidy up afterwards
for _ , o := range objs {
2019-07-25 16:16:39 +02:00
assert . NoError ( t , o . Remove ( ctx ) )
2018-04-07 19:48:11 +02:00
}
2018-11-22 18:43:18 +01:00
dirs = append ( dirs , "dir" )
for _ , dir := range dirs {
2020-11-26 22:28:39 +01:00
assert . NoError ( t , f . Rmdir ( ctx , dir ) )
2018-11-22 18:43:18 +01:00
}
} )
2018-04-07 19:48:11 +02:00
2018-11-22 18:43:18 +01:00
// TestFsPut files writes file1, file2 and tests an update
//
// Tests that require file1, file2 are within this
t . Run ( "FsPutFiles" , func ( t * testing . T ) {
skipIfNotOk ( t )
2020-11-26 22:28:39 +01:00
file1Contents , _ = testPut ( ctx , t , f , & file1 )
/* file2Contents = */ testPut ( ctx , t , f , & file2 )
2022-05-24 12:00:00 +02:00
file1Contents , _ = testPutMimeType ( ctx , t , f , & file1 , file1MimeType , file1Metadata )
2018-11-22 18:43:18 +01:00
// Note that the next test will check there are no duplicated file names
// TestFsListDirFile2 tests the files are correctly uploaded by doing
// Depth 1 directory listings
TestFsListDirFile2 := func ( t * testing . T ) {
skipIfNotOk ( t )
list := func ( dir string , expectedDirNames , expectedObjNames [ ] string ) {
var objNames , dirNames [ ] string
for i := 1 ; i <= * fstest . ListRetries ; i ++ {
2020-11-26 22:28:39 +01:00
objs , dirs , err := walk . GetAll ( ctx , f , dir , true , 1 )
2021-11-04 11:12:57 +01:00
if errors . Is ( err , fs . ErrorDirNotFound ) {
2020-11-26 22:28:39 +01:00
objs , dirs , err = walk . GetAll ( ctx , f , dir , true , 1 )
2018-11-22 18:43:18 +01:00
}
require . NoError ( t , err )
objNames = objsToNames ( objs )
dirNames = dirsToNames ( dirs )
if len ( objNames ) >= len ( expectedObjNames ) && len ( dirNames ) >= len ( expectedDirNames ) {
break
}
t . Logf ( "Sleeping for 1 second for TestFsListDirFile2 eventual consistency: %d/%d" , i , * fstest . ListRetries )
time . Sleep ( 1 * time . Second )
}
assert . Equal ( t , expectedDirNames , dirNames )
assert . Equal ( t , expectedObjNames , objNames )
}
dir := file2 . Path
deepest := true
for dir != "" {
expectedObjNames := [ ] string { }
expectedDirNames := [ ] string { }
child := dir
dir = path . Dir ( dir )
if dir == "." {
dir = ""
2019-08-26 21:22:38 +02:00
expectedObjNames = append ( expectedObjNames , file1 . Path )
2018-11-22 18:43:18 +01:00
}
if deepest {
2019-08-26 21:22:38 +02:00
expectedObjNames = append ( expectedObjNames , file2 . Path )
2018-11-22 18:43:18 +01:00
deepest = false
} else {
2019-08-26 21:22:38 +02:00
expectedDirNames = append ( expectedDirNames , child )
2018-11-22 18:43:18 +01:00
}
list ( dir , expectedDirNames , expectedObjNames )
}
}
t . Run ( "FsListDirFile2" , TestFsListDirFile2 )
2018-04-07 19:48:11 +02:00
2018-11-22 18:43:18 +01:00
// TestFsListRDirFile2 tests the files are correctly uploaded by doing
// Depth 1 directory listings using ListR
t . Run ( "FsListRDirFile2" , func ( t * testing . T ) {
defer skipIfNotListR ( t ) ( )
TestFsListDirFile2 ( t )
} )
2018-04-07 19:48:11 +02:00
2019-01-21 11:02:23 +01:00
// Test the files are all there with walk.ListR recursive listings
t . Run ( "FsListR" , func ( t * testing . T ) {
skipIfNotOk ( t )
2020-11-26 22:28:39 +01:00
objs , dirs , err := walk . GetAll ( ctx , f , "" , true , - 1 )
2019-01-21 11:02:23 +01:00
require . NoError ( t , err )
assert . Equal ( t , [ ] string {
2019-08-26 21:22:38 +02:00
"hello? sausage" ,
"hello? sausage/êé" ,
"hello? sausage/êé/Hello, 世界" ,
"hello? sausage/êé/Hello, 世界/ \" ' @ < > & ? + ≠" ,
2019-01-21 11:02:23 +01:00
} , dirsToNames ( dirs ) )
assert . Equal ( t , [ ] string {
"file name.txt" ,
2019-08-26 21:22:38 +02:00
"hello? sausage/êé/Hello, 世界/ \" ' @ < > & ? + ≠/z.txt" ,
2019-01-21 11:02:23 +01:00
} , objsToNames ( objs ) )
} )
// Test the files are all there with
// walk.ListR recursive listings on a sub dir
t . Run ( "FsListRSubdir" , func ( t * testing . T ) {
skipIfNotOk ( t )
2020-11-26 22:28:39 +01:00
objs , dirs , err := walk . GetAll ( ctx , f , path . Dir ( path . Dir ( path . Dir ( path . Dir ( file2 . Path ) ) ) ) , true , - 1 )
2019-01-21 11:02:23 +01:00
require . NoError ( t , err )
assert . Equal ( t , [ ] string {
2019-08-26 21:22:38 +02:00
"hello? sausage/êé" ,
"hello? sausage/êé/Hello, 世界" ,
"hello? sausage/êé/Hello, 世界/ \" ' @ < > & ? + ≠" ,
2019-01-21 11:02:23 +01:00
} , dirsToNames ( dirs ) )
assert . Equal ( t , [ ] string {
2019-08-26 21:22:38 +02:00
"hello? sausage/êé/Hello, 世界/ \" ' @ < > & ? + ≠/z.txt" ,
2019-01-21 11:02:23 +01:00
} , objsToNames ( objs ) )
} )
2018-11-22 18:43:18 +01:00
// TestFsListDirRoot tests that DirList works in the root
TestFsListDirRoot := func ( t * testing . T ) {
skipIfNotOk ( t )
2020-11-05 16:18:51 +01:00
rootRemote , err := fs . NewFs ( context . Background ( ) , remoteName )
2018-11-22 18:43:18 +01:00
require . NoError ( t , err )
2019-07-25 16:16:39 +02:00
_ , dirs , err := walk . GetAll ( ctx , rootRemote , "" , true , 1 )
2018-11-22 18:43:18 +01:00
require . NoError ( t , err )
assert . Contains ( t , dirsToNames ( dirs ) , subRemoteLeaf , "Remote leaf not found" )
}
t . Run ( "FsListDirRoot" , TestFsListDirRoot )
2018-04-07 19:48:11 +02:00
2018-11-22 18:43:18 +01:00
// TestFsListRDirRoot tests that DirList works in the root using ListR
t . Run ( "FsListRDirRoot" , func ( t * testing . T ) {
defer skipIfNotListR ( t ) ( )
TestFsListDirRoot ( t )
} )
2018-04-07 19:48:11 +02:00
2018-11-22 18:43:18 +01:00
// TestFsListSubdir tests List works for a subdirectory
TestFsListSubdir := func ( t * testing . T ) {
skipIfNotOk ( t )
fileName := file2 . Path
var err error
var objs [ ] fs . Object
var dirs [ ] fs . Directory
for i := 0 ; i < 2 ; i ++ {
dir , _ := path . Split ( fileName )
dir = dir [ : len ( dir ) - 1 ]
2020-11-26 22:28:39 +01:00
objs , dirs , err = walk . GetAll ( ctx , f , dir , true , - 1 )
2018-11-22 18:43:18 +01:00
}
require . NoError ( t , err )
require . Len ( t , objs , 1 )
assert . Equal ( t , fileName , objs [ 0 ] . Remote ( ) )
require . Len ( t , dirs , 0 )
}
t . Run ( "FsListSubdir" , TestFsListSubdir )
2018-04-07 19:48:11 +02:00
2018-11-22 18:43:18 +01:00
// TestFsListRSubdir tests List works for a subdirectory using ListR
t . Run ( "FsListRSubdir" , func ( t * testing . T ) {
defer skipIfNotListR ( t ) ( )
TestFsListSubdir ( t )
} )
2018-04-07 19:48:11 +02:00
2018-11-22 18:43:18 +01:00
// TestFsListLevel2 tests List works for 2 levels
TestFsListLevel2 := func ( t * testing . T ) {
skipIfNotOk ( t )
2020-11-26 22:28:39 +01:00
objs , dirs , err := walk . GetAll ( ctx , f , "" , true , 2 )
2018-11-22 18:43:18 +01:00
if err == fs . ErrorLevelNotSupported {
return
}
require . NoError ( t , err )
assert . Equal ( t , [ ] string { file1 . Path } , objsToNames ( objs ) )
2019-08-26 21:22:38 +02:00
assert . Equal ( t , [ ] string { "hello? sausage" , "hello? sausage/êé" } , dirsToNames ( dirs ) )
2018-11-22 18:43:18 +01:00
}
t . Run ( "FsListLevel2" , TestFsListLevel2 )
2017-05-25 23:05:49 +02:00
2018-11-22 18:43:18 +01:00
// TestFsListRLevel2 tests List works for 2 levels using ListR
t . Run ( "FsListRLevel2" , func ( t * testing . T ) {
defer skipIfNotListR ( t ) ( )
TestFsListLevel2 ( t )
} )
2014-07-24 23:50:11 +02:00
2018-11-22 18:43:18 +01:00
// TestFsListFile1 tests file present
t . Run ( "FsListFile1" , func ( t * testing . T ) {
skipIfNotOk ( t )
2020-11-26 22:28:39 +01:00
fstest . CheckListing ( t , f , [ ] fstest . Item { file1 , file2 } )
2018-11-22 18:43:18 +01:00
} )
2014-07-24 23:50:11 +02:00
2018-11-22 18:43:18 +01:00
// TestFsNewObject tests NewObject
t . Run ( "FsNewObject" , func ( t * testing . T ) {
skipIfNotOk ( t )
2024-02-26 14:44:55 +01:00
obj := fstest . NewObject ( ctx , t , f , file1 . Path )
2020-11-26 22:28:39 +01:00
file1 . Check ( t , obj , f . Precision ( ) )
2018-11-22 18:43:18 +01:00
} )
2014-07-24 23:50:11 +02:00
2020-12-07 11:57:06 +01:00
// FsNewObjectCaseInsensitive tests NewObject on a case insensitive file system
t . Run ( "FsNewObjectCaseInsensitive" , func ( t * testing . T ) {
skipIfNotOk ( t )
if ! f . Features ( ) . CaseInsensitive {
t . Skip ( "Not Case Insensitive" )
}
2024-02-26 14:44:55 +01:00
obj := fstest . NewObject ( ctx , t , f , toUpperASCII ( file1 . Path ) )
2020-12-07 11:57:06 +01:00
file1 . Check ( t , obj , f . Precision ( ) )
2021-01-27 15:22:10 +01:00
t . Run ( "Dir" , func ( t * testing . T ) {
2024-02-26 14:44:55 +01:00
obj := fstest . NewObject ( ctx , t , f , toUpperASCII ( file2 . Path ) )
2021-01-27 15:22:10 +01:00
file2 . Check ( t , obj , f . Precision ( ) )
} )
2020-12-07 11:57:06 +01:00
} )
2018-11-22 18:43:18 +01:00
// TestFsListFile1and2 tests two files present
t . Run ( "FsListFile1and2" , func ( t * testing . T ) {
skipIfNotOk ( t )
2020-11-26 22:28:39 +01:00
fstest . CheckListing ( t , f , [ ] fstest . Item { file1 , file2 } )
2018-11-22 18:43:18 +01:00
} )
2014-07-24 23:50:11 +02:00
2021-09-06 14:54:08 +02:00
// TestFsNewObjectDir tests NewObject on a directory which should produce fs.ErrorIsDir if possible or fs.ErrorObjectNotFound if not
2018-11-22 18:43:18 +01:00
t . Run ( "FsNewObjectDir" , func ( t * testing . T ) {
skipIfNotOk ( t )
dir := path . Dir ( file2 . Path )
2020-11-26 22:28:39 +01:00
obj , err := f . NewObject ( ctx , dir )
2018-11-22 18:43:18 +01:00
assert . Nil ( t , obj )
2021-09-06 14:54:08 +02:00
assert . True ( t , err == fs . ErrorIsDir || err == fs . ErrorObjectNotFound , fmt . Sprintf ( "Wrong error: expecting fs.ErrorIsDir or fs.ErrorObjectNotFound but got: %#v" , err ) )
2018-11-22 18:43:18 +01:00
} )
2016-09-21 23:13:24 +02:00
2020-06-04 23:25:14 +02:00
// TestFsPurge tests Purge
t . Run ( "FsPurge" , func ( t * testing . T ) {
skipIfNotOk ( t )
// Check have Purge
2020-11-26 22:28:39 +01:00
doPurge := f . Features ( ) . Purge
2020-06-04 23:25:14 +02:00
if doPurge == nil {
t . Skip ( "FS has no Purge interface" )
}
// put up a file to purge
fileToPurge := fstest . Item {
ModTime : fstest . Time ( "2001-02-03T04:05:06.499999999Z" ) ,
Path : "dirToPurge/fileToPurge.txt" ,
}
2020-11-26 22:28:39 +01:00
_ , _ = testPut ( ctx , t , f , & fileToPurge )
2020-06-04 23:25:14 +02:00
2020-11-26 22:28:39 +01:00
fstest . CheckListingWithPrecision ( t , f , [ ] fstest . Item { file1 , file2 , fileToPurge } , [ ] string {
2020-06-04 23:25:14 +02:00
"dirToPurge" ,
"hello? sausage" ,
"hello? sausage/êé" ,
"hello? sausage/êé/Hello, 世界" ,
"hello? sausage/êé/Hello, 世界/ \" ' @ < > & ? + ≠" ,
2020-11-26 22:28:39 +01:00
} , fs . GetModifyWindow ( ctx , f ) )
2020-06-04 23:25:14 +02:00
// Now purge it
2020-11-26 22:28:39 +01:00
err = operations . Purge ( ctx , f , "dirToPurge" )
2020-06-04 23:25:14 +02:00
require . NoError ( t , err )
2020-11-26 22:28:39 +01:00
fstest . CheckListingWithPrecision ( t , f , [ ] fstest . Item { file1 , file2 } , [ ] string {
2020-06-04 23:25:14 +02:00
"hello? sausage" ,
"hello? sausage/êé" ,
"hello? sausage/êé/Hello, 世界" ,
"hello? sausage/êé/Hello, 世界/ \" ' @ < > & ? + ≠" ,
2020-11-26 22:28:39 +01:00
} , fs . GetModifyWindow ( ctx , f ) )
2020-06-04 23:25:14 +02:00
} )
2023-08-25 18:38:41 +02:00
// TestFsPurge tests Purge on the Root
t . Run ( "FsPurgeRoot" , func ( t * testing . T ) {
skipIfNotOk ( t )
// Check have Purge
doPurge := f . Features ( ) . Purge
if doPurge == nil {
t . Skip ( "FS has no Purge interface" )
}
// put up a file to purge
fileToPurge := fstest . Item {
ModTime : fstest . Time ( "2001-02-03T04:05:06.499999999Z" ) ,
Path : "dirToPurgeFromRoot/fileToPurgeFromRoot.txt" ,
}
_ , _ = testPut ( ctx , t , f , & fileToPurge )
fstest . CheckListingWithPrecision ( t , f , [ ] fstest . Item { file1 , file2 , fileToPurge } , [ ] string {
"dirToPurgeFromRoot" ,
"hello? sausage" ,
"hello? sausage/êé" ,
"hello? sausage/êé/Hello, 世界" ,
"hello? sausage/êé/Hello, 世界/ \" ' @ < > & ? + ≠" ,
} , fs . GetModifyWindow ( ctx , f ) )
// Create a new Fs pointing at the directory
remoteName := subRemoteName + "/" + "dirToPurgeFromRoot"
fPurge , err := fs . NewFs ( context . Background ( ) , remoteName )
require . NoError ( t , err )
// Now purge it from the root
err = operations . Purge ( ctx , fPurge , "" )
require . NoError ( t , err )
fstest . CheckListingWithPrecision ( t , f , [ ] fstest . Item { file1 , file2 } , [ ] string {
"hello? sausage" ,
"hello? sausage/êé" ,
"hello? sausage/êé/Hello, 世界" ,
"hello? sausage/êé/Hello, 世界/ \" ' @ < > & ? + ≠" ,
} , fs . GetModifyWindow ( ctx , f ) )
} )
2018-11-22 18:43:18 +01:00
// TestFsCopy tests Copy
t . Run ( "FsCopy" , func ( t * testing . T ) {
skipIfNotOk ( t )
2014-07-24 23:50:11 +02:00
2018-11-22 18:43:18 +01:00
// Check have Copy
2020-11-26 22:28:39 +01:00
doCopy := f . Features ( ) . Copy
2018-11-22 18:43:18 +01:00
if doCopy == nil {
t . Skip ( "FS has no Copier interface" )
}
2014-07-24 23:50:11 +02:00
2018-11-22 18:43:18 +01:00
// Test with file2 so have + and ' ' in file name
var file2Copy = file2
file2Copy . Path += "-copy"
2016-01-17 12:27:31 +01:00
2018-11-22 18:43:18 +01:00
// do the copy
2024-02-26 14:44:55 +01:00
src := fstest . NewObject ( ctx , t , f , file2 . Path )
2019-07-25 16:16:39 +02:00
dst , err := doCopy ( ctx , src , file2Copy . Path )
2018-11-22 18:43:18 +01:00
if err == fs . ErrorCantCopy {
t . Skip ( "FS can't copy" )
}
require . NoError ( t , err , fmt . Sprintf ( "Error: %#v" , err ) )
2017-05-19 13:26:07 +02:00
2018-11-22 18:43:18 +01:00
// check file exists in new listing
2020-11-26 22:28:39 +01:00
fstest . CheckListing ( t , f , [ ] fstest . Item { file1 , file2 , file2Copy } )
2018-01-21 17:56:11 +01:00
2018-11-22 18:43:18 +01:00
// Check dst lightly - list above has checked ModTime/Hashes
assert . Equal ( t , file2Copy . Path , dst . Remote ( ) )
2018-04-14 18:15:00 +02:00
2018-11-22 18:43:18 +01:00
// Delete copy
2019-07-25 16:16:39 +02:00
err = dst . Remove ( ctx )
2018-11-22 18:43:18 +01:00
require . NoError ( t , err )
2018-04-07 19:48:11 +02:00
2024-03-06 11:28:53 +01:00
// Test that server side copying files does the correct thing with metadata
t . Run ( "Metadata" , func ( t * testing . T ) {
if ! f . Features ( ) . WriteMetadata {
t . Skip ( "Skipping test as can't write metadata" )
}
ctx , ci := fs . AddConfig ( ctx )
ci . Metadata = true
// Create file with metadata
const srcName = "test metadata copy.txt"
const dstName = "test metadata copied.txt"
t1 := fstest . Time ( "2003-02-03T04:05:06.499999999Z" )
t2 := fstest . Time ( "2004-03-03T04:05:06.499999999Z" )
fileSrc := fstest . NewItem ( srcName , srcName , t1 )
contents := random . String ( 100 )
var testMetadata = fs . Metadata {
// System metadata supported by all backends
"mtime" : t1 . Format ( time . RFC3339Nano ) ,
// User metadata
"potato" : "jersey" ,
}
oSrc := PutTestContentsMetadata ( ctx , t , f , & fileSrc , contents , true , "text/plain" , testMetadata )
fstest . CheckEntryMetadata ( ctx , t , f , oSrc , testMetadata )
// Copy it with --metadata-set
ci . MetadataSet = fs . Metadata {
// System metadata supported by all backends
"mtime" : t2 . Format ( time . RFC3339Nano ) ,
// User metadata
"potato" : "royal" ,
}
oDst , err := doCopy ( ctx , oSrc , dstName )
require . NoError ( t , err )
fileDst := fileSrc
fileDst . Path = dstName
fileDst . ModTime = t2
fstest . CheckListing ( t , f , [ ] fstest . Item { file1 , file2 , fileSrc , fileDst } )
// Check metadata is correct
fstest . CheckEntryMetadata ( ctx , t , f , oDst , ci . MetadataSet )
oDst = fstest . NewObject ( ctx , t , f , dstName )
fstest . CheckEntryMetadata ( ctx , t , f , oDst , ci . MetadataSet )
// Remove test files
require . NoError ( t , oSrc . Remove ( ctx ) )
require . NoError ( t , oDst . Remove ( ctx ) )
} )
2018-11-22 18:43:18 +01:00
} )
2018-04-07 19:48:11 +02:00
2018-11-22 18:43:18 +01:00
// TestFsMove tests Move
t . Run ( "FsMove" , func ( t * testing . T ) {
skipIfNotOk ( t )
2018-04-07 19:48:11 +02:00
2018-11-22 18:43:18 +01:00
// Check have Move
2020-11-26 22:28:39 +01:00
doMove := f . Features ( ) . Move
2018-11-22 18:43:18 +01:00
if doMove == nil {
t . Skip ( "FS has no Mover interface" )
}
2014-07-24 23:50:11 +02:00
2018-11-22 18:43:18 +01:00
// state of files now:
// 1: file name.txt
// 2: hello sausage?/../z.txt
var file1Move = file1
var file2Move = file2
// check happy path, i.e. no naming conflicts when rename and move are two
// separate operations
file2Move . Path = "other.txt"
2024-02-26 14:44:55 +01:00
src := fstest . NewObject ( ctx , t , f , file2 . Path )
2019-07-25 16:16:39 +02:00
dst , err := doMove ( ctx , src , file2Move . Path )
2018-11-22 18:43:18 +01:00
if err == fs . ErrorCantMove {
t . Skip ( "FS can't move" )
}
require . NoError ( t , err )
// check file exists in new listing
2020-11-26 22:28:39 +01:00
fstest . CheckListing ( t , f , [ ] fstest . Item { file1 , file2Move } )
2018-11-22 18:43:18 +01:00
// Check dst lightly - list above has checked ModTime/Hashes
assert . Equal ( t , file2Move . Path , dst . Remote ( ) )
// 1: file name.txt
// 2: other.txt
// Check conflict on "rename, then move"
file1Move . Path = "moveTest/other.txt"
2024-02-26 14:44:55 +01:00
src = fstest . NewObject ( ctx , t , f , file1 . Path )
2019-07-25 16:16:39 +02:00
_ , err = doMove ( ctx , src , file1Move . Path )
2018-11-22 18:43:18 +01:00
require . NoError ( t , err )
2020-11-26 22:28:39 +01:00
fstest . CheckListing ( t , f , [ ] fstest . Item { file1Move , file2Move } )
2018-11-22 18:43:18 +01:00
// 1: moveTest/other.txt
// 2: other.txt
2014-07-24 23:50:11 +02:00
2018-11-22 18:43:18 +01:00
// Check conflict on "move, then rename"
2024-02-26 14:44:55 +01:00
src = fstest . NewObject ( ctx , t , f , file1Move . Path )
2019-07-25 16:16:39 +02:00
_ , err = doMove ( ctx , src , file1 . Path )
2018-11-22 18:43:18 +01:00
require . NoError ( t , err )
2020-11-26 22:28:39 +01:00
fstest . CheckListing ( t , f , [ ] fstest . Item { file1 , file2Move } )
2018-11-22 18:43:18 +01:00
// 1: file name.txt
// 2: other.txt
2016-09-10 12:29:57 +02:00
2024-02-26 14:44:55 +01:00
src = fstest . NewObject ( ctx , t , f , file2Move . Path )
2019-07-25 16:16:39 +02:00
_ , err = doMove ( ctx , src , file2 . Path )
2018-11-22 18:43:18 +01:00
require . NoError ( t , err )
2020-11-26 22:28:39 +01:00
fstest . CheckListing ( t , f , [ ] fstest . Item { file1 , file2 } )
2018-11-22 18:43:18 +01:00
// 1: file name.txt
// 2: hello sausage?/../z.txt
2016-09-10 12:29:57 +02:00
2018-11-22 18:43:18 +01:00
// Tidy up moveTest directory
2020-11-26 22:28:39 +01:00
require . NoError ( t , f . Rmdir ( ctx , "moveTest" ) )
2024-03-06 11:28:53 +01:00
// Test that server side moving files does the correct thing with metadata
t . Run ( "Metadata" , func ( t * testing . T ) {
if ! f . Features ( ) . WriteMetadata {
t . Skip ( "Skipping test as can't write metadata" )
}
ctx , ci := fs . AddConfig ( ctx )
ci . Metadata = true
// Create file with metadata
const name = "test metadata move.txt"
const newName = "test metadata moved.txt"
t1 := fstest . Time ( "2003-02-03T04:05:06.499999999Z" )
t2 := fstest . Time ( "2004-03-03T04:05:06.499999999Z" )
file := fstest . NewItem ( name , name , t1 )
contents := random . String ( 100 )
var testMetadata = fs . Metadata {
// System metadata supported by all backends
"mtime" : t1 . Format ( time . RFC3339Nano ) ,
// User metadata
"potato" : "jersey" ,
}
o := PutTestContentsMetadata ( ctx , t , f , & file , contents , true , "text/plain" , testMetadata )
fstest . CheckEntryMetadata ( ctx , t , f , o , testMetadata )
// Move it with --metadata-set
ci . MetadataSet = fs . Metadata {
// System metadata supported by all backends
"mtime" : t2 . Format ( time . RFC3339Nano ) ,
// User metadata
"potato" : "royal" ,
}
newO , err := doMove ( ctx , o , newName )
require . NoError ( t , err )
file . Path = newName
file . ModTime = t2
fstest . CheckListing ( t , f , [ ] fstest . Item { file1 , file2 , file } )
// Check metadata is correct
fstest . CheckEntryMetadata ( ctx , t , f , newO , ci . MetadataSet )
newO = fstest . NewObject ( ctx , t , f , newName )
fstest . CheckEntryMetadata ( ctx , t , f , newO , ci . MetadataSet )
// Remove test file
require . NoError ( t , newO . Remove ( ctx ) )
} )
2018-11-22 18:43:18 +01:00
} )
2016-09-10 12:29:57 +02:00
2020-10-13 23:43:40 +02:00
// Move src to this remote using server-side move operations.
2018-11-22 18:43:18 +01:00
//
// Will only be called if src.Fs().Name() == f.Name()
//
// If it isn't possible then return fs.ErrorCantDirMove
//
// If destination exists then return fs.ErrorDirExists
// TestFsDirMove tests DirMove
//
// go test -v -run 'TestIntegration/Test(Setup|Init|FsMkdir|FsPutFile1|FsPutFile2|FsUpdateFile1|FsDirMove)$
t . Run ( "FsDirMove" , func ( t * testing . T ) {
skipIfNotOk ( t )
// Check have DirMove
2020-11-26 22:28:39 +01:00
doDirMove := f . Features ( ) . DirMove
2018-11-22 18:43:18 +01:00
if doDirMove == nil {
t . Skip ( "FS has no DirMover interface" )
}
2018-04-07 19:48:11 +02:00
2018-11-22 18:43:18 +01:00
// Check it can't move onto itself
2020-11-26 22:28:39 +01:00
err := doDirMove ( ctx , f , "" , "" )
2018-11-22 18:43:18 +01:00
require . Equal ( t , fs . ErrorDirExists , err )
2018-04-07 19:48:11 +02:00
2018-11-22 18:43:18 +01:00
// new remote
2019-08-08 20:58:02 +02:00
newRemote , _ , removeNewRemote , err := fstest . RandomRemote ( )
2018-11-22 18:43:18 +01:00
require . NoError ( t , err )
defer removeNewRemote ( )
2018-04-07 19:48:11 +02:00
2018-11-22 18:43:18 +01:00
const newName = "new_name/sub_new_name"
// try the move
2020-11-26 22:28:39 +01:00
err = newRemote . Features ( ) . DirMove ( ctx , f , "" , newName )
2018-11-22 18:43:18 +01:00
require . NoError ( t , err )
2018-04-07 19:48:11 +02:00
2018-11-22 18:43:18 +01:00
// check remotes
// remote should not exist here
2020-11-26 22:28:39 +01:00
_ , err = f . List ( ctx , "" )
2021-11-04 11:12:57 +01:00
assert . True ( t , errors . Is ( err , fs . ErrorDirNotFound ) )
2018-11-22 18:43:18 +01:00
//fstest.CheckListingWithPrecision(t, remote, []fstest.Item{}, []string{}, remote.Precision())
file1Copy := file1
file1Copy . Path = path . Join ( newName , file1 . Path )
file2Copy := file2
file2Copy . Path = path . Join ( newName , file2 . Path )
fstest . CheckListingWithPrecision ( t , newRemote , [ ] fstest . Item { file2Copy , file1Copy } , [ ] string {
"new_name" ,
"new_name/sub_new_name" ,
"new_name/sub_new_name/hello? sausage" ,
"new_name/sub_new_name/hello? sausage/êé" ,
"new_name/sub_new_name/hello? sausage/êé/Hello, 世界" ,
"new_name/sub_new_name/hello? sausage/êé/Hello, 世界/ \" ' @ < > & ? + ≠" ,
} , newRemote . Precision ( ) )
// move it back
2019-07-25 16:16:39 +02:00
err = doDirMove ( ctx , newRemote , newName , "" )
2018-11-22 18:43:18 +01:00
require . NoError ( t , err )
2014-07-24 23:50:11 +02:00
2018-11-22 18:43:18 +01:00
// check remotes
2020-11-26 22:28:39 +01:00
fstest . CheckListingWithPrecision ( t , f , [ ] fstest . Item { file2 , file1 } , [ ] string {
2018-11-22 18:43:18 +01:00
"hello? sausage" ,
"hello? sausage/êé" ,
"hello? sausage/êé/Hello, 世界" ,
"hello? sausage/êé/Hello, 世界/ \" ' @ < > & ? + ≠" ,
2020-11-26 22:28:39 +01:00
} , f . Precision ( ) )
2018-11-22 18:43:18 +01:00
fstest . CheckListingWithPrecision ( t , newRemote , [ ] fstest . Item { } , [ ] string {
"new_name" ,
} , newRemote . Precision ( ) )
} )
2014-07-24 23:50:11 +02:00
2018-11-22 18:43:18 +01:00
// TestFsRmdirFull tests removing a non empty directory
t . Run ( "FsRmdirFull" , func ( t * testing . T ) {
skipIfNotOk ( t )
2020-11-26 22:28:39 +01:00
if isBucketBasedButNotRoot ( f ) {
2021-11-04 12:50:43 +01:00
t . Skip ( "Skipping test as non root bucket-based remote" )
2018-11-15 18:48:38 +01:00
}
2020-11-26 22:28:39 +01:00
err := f . Rmdir ( ctx , "" )
2018-11-22 18:43:18 +01:00
require . Error ( t , err , "Expecting error on RMdir on non empty remote" )
} )
2014-07-31 22:24:52 +02:00
2018-11-22 18:43:18 +01:00
// TestFsPrecision tests the Precision of the Fs
t . Run ( "FsPrecision" , func ( t * testing . T ) {
skipIfNotOk ( t )
2020-11-26 22:28:39 +01:00
precision := f . Precision ( )
2018-11-22 18:43:18 +01:00
if precision == fs . ModTimeNotSupported {
return
}
if precision > time . Second || precision < 0 {
t . Fatalf ( "Precision out of range %v" , precision )
}
// FIXME check expected precision
} )
2018-03-29 09:10:19 +02:00
2018-11-22 18:43:18 +01:00
// TestObjectString tests the Object String method
t . Run ( "ObjectString" , func ( t * testing . T ) {
skipIfNotOk ( t )
2024-02-26 14:44:55 +01:00
obj := fstest . NewObject ( ctx , t , f , file1 . Path )
2018-11-22 18:43:18 +01:00
assert . Equal ( t , file1 . Path , obj . String ( ) )
if opt . NilObject != nil {
assert . Equal ( t , "<nil>" , opt . NilObject . String ( ) )
}
} )
2018-03-29 09:10:19 +02:00
2018-11-22 18:43:18 +01:00
// TestObjectFs tests the object can be found
t . Run ( "ObjectFs" , func ( t * testing . T ) {
skipIfNotOk ( t )
2024-02-26 14:44:55 +01:00
obj := fstest . NewObject ( ctx , t , f , file1 . Path )
2018-11-22 18:43:18 +01:00
// If this is set we don't do the direct comparison of
// the Fs from the object as it may be different
if opt . SkipFsMatch {
return
}
2020-11-26 22:28:39 +01:00
testRemote := f
2018-11-22 18:43:18 +01:00
if obj . Fs ( ) != testRemote {
// Check to see if this wraps something else
if doUnWrap := testRemote . Features ( ) . UnWrap ; doUnWrap != nil {
testRemote = doUnWrap ( )
}
}
assert . Equal ( t , obj . Fs ( ) , testRemote )
} )
2018-03-29 09:10:19 +02:00
2018-11-22 18:43:18 +01:00
// TestObjectRemote tests the Remote is correct
t . Run ( "ObjectRemote" , func ( t * testing . T ) {
skipIfNotOk ( t )
2024-02-26 14:44:55 +01:00
obj := fstest . NewObject ( ctx , t , f , file1 . Path )
2018-11-22 18:43:18 +01:00
assert . Equal ( t , file1 . Path , obj . Remote ( ) )
} )
2018-04-07 19:48:11 +02:00
2018-11-22 18:43:18 +01:00
// TestObjectHashes checks all the hashes the object supports
t . Run ( "ObjectHashes" , func ( t * testing . T ) {
skipIfNotOk ( t )
2024-02-26 14:44:55 +01:00
obj := fstest . NewObject ( ctx , t , f , file1 . Path )
2018-11-22 18:43:18 +01:00
file1 . CheckHashes ( t , obj )
} )
2018-04-07 19:48:11 +02:00
2018-11-22 18:43:18 +01:00
// TestObjectModTime tests the ModTime of the object is correct
TestObjectModTime := func ( t * testing . T ) {
skipIfNotOk ( t )
2024-02-26 14:44:55 +01:00
obj := fstest . NewObject ( ctx , t , f , file1 . Path )
2020-11-26 22:28:39 +01:00
file1 . CheckModTime ( t , obj , obj . ModTime ( ctx ) , f . Precision ( ) )
2018-04-07 19:48:11 +02:00
}
2018-11-22 18:43:18 +01:00
t . Run ( "ObjectModTime" , TestObjectModTime )
// TestObjectMimeType tests the MimeType of the object is correct
t . Run ( "ObjectMimeType" , func ( t * testing . T ) {
skipIfNotOk ( t )
2020-11-26 22:28:39 +01:00
features := f . Features ( )
2024-02-26 14:44:55 +01:00
obj := fstest . NewObject ( ctx , t , f , file1 . Path )
2018-11-22 18:43:18 +01:00
do , ok := obj . ( fs . MimeTyper )
if ! ok {
2021-10-13 16:59:43 +02:00
require . False ( t , features . ReadMimeType , "Features.ReadMimeType is set but Object.MimeType method not found" )
2018-11-22 18:43:18 +01:00
t . Skip ( "MimeType method not supported" )
}
2019-07-25 16:16:39 +02:00
mimeType := do . MimeType ( ctx )
2021-10-13 16:59:43 +02:00
if ! features . ReadMimeType {
require . Equal ( t , "" , mimeType , "Features.ReadMimeType is not set but Object.MimeType returned a non-empty MimeType" )
} else if features . WriteMimeType {
2020-11-26 22:28:39 +01:00
assert . Equal ( t , file1MimeType , mimeType , "can read and write mime types but failed" )
2018-11-22 18:43:18 +01:00
} else {
2020-11-26 22:28:39 +01:00
if strings . ContainsRune ( mimeType , ';' ) {
assert . Equal ( t , "text/plain; charset=utf-8" , mimeType )
} else {
assert . Equal ( t , "text/plain" , mimeType )
}
2018-11-22 18:43:18 +01:00
}
} )
2018-04-07 19:48:11 +02:00
2022-05-24 12:00:00 +02:00
// TestObjectMetadata tests the Metadata of the object is correct
t . Run ( "ObjectMetadata" , func ( t * testing . T ) {
skipIfNotOk ( t )
2023-11-19 12:06:19 +01:00
ctx , ci := fs . AddConfig ( ctx )
ci . Metadata = true
2022-05-24 12:00:00 +02:00
features := f . Features ( )
2024-02-26 14:44:55 +01:00
obj := fstest . NewObject ( ctx , t , f , file1 . Path )
2022-06-22 16:56:41 +02:00
do , objectHasMetadata := obj . ( fs . Metadataer )
if objectHasMetadata || features . ReadMetadata || features . WriteMetadata || features . UserMetadata {
2022-07-04 10:25:49 +02:00
fsInfo := fs . FindFromFs ( f )
require . NotNil ( t , fsInfo )
2022-06-22 16:56:41 +02:00
require . NotNil ( t , fsInfo . MetadataInfo , "Object declares metadata support but no MetadataInfo in RegInfo" )
}
if ! objectHasMetadata {
2022-05-24 12:00:00 +02:00
require . False ( t , features . ReadMetadata , "Features.ReadMetadata is set but Object.Metadata method not found" )
t . Skip ( "Metadata method not supported" )
}
metadata , err := do . Metadata ( ctx )
require . NoError ( t , err )
// check standard metadata
for k , v := range metadata {
switch k {
case "atime" , "btime" , "mtime" :
mtime , err := time . Parse ( time . RFC3339Nano , v )
require . NoError ( t , err )
if k == "mtime" {
fstest . AssertTimeEqualWithPrecision ( t , file1 . Path , file1 . ModTime , mtime , f . Precision ( ) )
}
}
}
if ! features . ReadMetadata {
2024-02-22 12:13:32 +01:00
if metadata != nil && ! features . Overlay {
require . Equal ( t , "" , metadata , "Features.ReadMetadata is not set but Object.Metadata returned a non nil Metadata: %#v" , metadata )
2022-05-24 12:00:00 +02:00
}
} else if features . WriteMetadata {
require . NotNil ( t , metadata )
if features . UserMetadata {
// check all the metadata bits we uploaded are present - there may be more we didn't write
for k , v := range file1Metadata {
2023-11-19 12:06:19 +01:00
assert . Equal ( t , v , metadata [ k ] , "can read and write metadata but failed on key %q (want=%+v, got=%+v)" , k , file1Metadata , metadata )
2022-05-24 12:00:00 +02:00
}
}
// Now test we can set the mtime and content-type via the metadata and these take precedence
t . Run ( "mtime" , func ( t * testing . T ) {
path := "metadatatest"
mtimeModTime := fstest . Time ( "2002-02-03T04:05:06.499999999Z" )
modTime := fstest . Time ( "2003-02-03T04:05:06.499999999Z" )
item := fstest . NewItem ( path , path , modTime )
metaMimeType := "application/zip"
mimeType := "application/gzip"
metadata := fs . Metadata {
"mtime" : mtimeModTime . Format ( time . RFC3339Nano ) ,
"content-type" : metaMimeType ,
}
// This checks the mtime is correct also and returns the re-read object
_ , obj := testPutMimeType ( ctx , t , f , & item , mimeType , metadata )
defer func ( ) {
assert . NoError ( t , obj . Remove ( ctx ) )
} ( )
// Check content-type got updated too
if features . ReadMimeType && features . WriteMimeType {
// read the object from scratch
o , err := f . NewObject ( ctx , path )
require . NoError ( t , err )
// Check the mimetype is correct
do , ok := o . ( fs . MimeTyper )
require . True ( t , ok )
gotMimeType := do . MimeType ( ctx )
assert . Equal ( t , metaMimeType , gotMimeType )
}
} )
2022-06-24 17:20:31 +02:00
} // else: Have some metadata here we didn't write - can't really check it!
2022-05-24 12:00:00 +02:00
} )
2018-11-22 18:43:18 +01:00
// TestObjectSetModTime tests that SetModTime works
t . Run ( "ObjectSetModTime" , func ( t * testing . T ) {
skipIfNotOk ( t )
newModTime := fstest . Time ( "2011-12-13T14:15:16.999999999Z" )
2024-02-26 14:44:55 +01:00
obj := fstest . NewObject ( ctx , t , f , file1 . Path )
2019-07-25 16:16:39 +02:00
err := obj . SetModTime ( ctx , newModTime )
2018-11-22 18:43:18 +01:00
if err == fs . ErrorCantSetModTime || err == fs . ErrorCantSetModTimeWithoutDelete {
t . Log ( err )
return
}
require . NoError ( t , err )
file1 . ModTime = newModTime
2020-11-26 22:28:39 +01:00
file1 . CheckModTime ( t , obj , obj . ModTime ( ctx ) , f . Precision ( ) )
2018-11-22 18:43:18 +01:00
// And make a new object and read it from there too
TestObjectModTime ( t )
} )
2018-04-07 19:48:11 +02:00
2018-11-22 18:43:18 +01:00
// TestObjectSize tests that Size works
t . Run ( "ObjectSize" , func ( t * testing . T ) {
skipIfNotOk ( t )
2024-02-26 14:44:55 +01:00
obj := fstest . NewObject ( ctx , t , f , file1 . Path )
2018-11-22 18:43:18 +01:00
assert . Equal ( t , file1 . Size , obj . Size ( ) )
} )
2018-04-07 19:48:11 +02:00
2018-11-22 18:43:18 +01:00
// TestObjectOpen tests that Open works
t . Run ( "ObjectOpen" , func ( t * testing . T ) {
skipIfNotOk ( t )
2024-02-26 14:44:55 +01:00
obj := fstest . NewObject ( ctx , t , f , file1 . Path )
2022-07-26 17:50:32 +02:00
assert . Equal ( t , file1Contents , ReadObject ( ctx , t , obj , - 1 ) , "contents of file1 differ" )
2018-11-22 18:43:18 +01:00
} )
2018-04-07 19:48:11 +02:00
2018-11-22 18:43:18 +01:00
// TestObjectOpenSeek tests that Open works with SeekOption
t . Run ( "ObjectOpenSeek" , func ( t * testing . T ) {
skipIfNotOk ( t )
2024-02-26 14:44:55 +01:00
obj := fstest . NewObject ( ctx , t , f , file1 . Path )
2022-07-26 17:50:32 +02:00
assert . Equal ( t , file1Contents [ 50 : ] , ReadObject ( ctx , t , obj , - 1 , & fs . SeekOption { Offset : 50 } ) , "contents of file1 differ after seek" )
2018-11-22 18:43:18 +01:00
} )
2018-04-07 19:48:11 +02:00
2018-11-22 18:43:18 +01:00
// TestObjectOpenRange tests that Open works with RangeOption
//
// go test -v -run 'TestIntegration/Test(Setup|Init|FsMkdir|FsPutFile1|FsPutFile2|FsUpdateFile1|ObjectOpenRange)$'
t . Run ( "ObjectOpenRange" , func ( t * testing . T ) {
skipIfNotOk ( t )
2024-02-26 14:44:55 +01:00
obj := fstest . NewObject ( ctx , t , f , file1 . Path )
2018-11-22 18:43:18 +01:00
for _ , test := range [ ] struct {
ro fs . RangeOption
wantStart , wantEnd int
} {
{ fs . RangeOption { Start : 5 , End : 15 } , 5 , 16 } ,
{ fs . RangeOption { Start : 80 , End : - 1 } , 80 , 100 } ,
{ fs . RangeOption { Start : 81 , End : 100000 } , 81 , 100 } ,
{ fs . RangeOption { Start : - 1 , End : 20 } , 80 , 100 } , // if start is omitted this means get the final bytes
// {fs.RangeOption{Start: -1, End: -1}, 0, 100}, - this seems to work but the RFC doesn't define it
} {
2022-07-26 17:50:32 +02:00
got := ReadObject ( ctx , t , obj , - 1 , & test . ro )
2018-11-22 18:43:18 +01:00
foundAt := strings . Index ( file1Contents , got )
help := fmt . Sprintf ( "%#v failed want [%d:%d] got [%d:%d]" , test . ro , test . wantStart , test . wantEnd , foundAt , foundAt + len ( got ) )
assert . Equal ( t , file1Contents [ test . wantStart : test . wantEnd ] , got , help )
}
} )
2018-04-07 19:48:11 +02:00
2018-11-22 18:43:18 +01:00
// TestObjectPartialRead tests that reading only part of the object does the correct thing
t . Run ( "ObjectPartialRead" , func ( t * testing . T ) {
skipIfNotOk ( t )
2024-02-26 14:44:55 +01:00
obj := fstest . NewObject ( ctx , t , f , file1 . Path )
2022-07-26 17:50:32 +02:00
assert . Equal ( t , file1Contents [ : 50 ] , ReadObject ( ctx , t , obj , 50 ) , "contents of file1 differ after limited read" )
2018-11-22 18:43:18 +01:00
} )
2018-04-07 19:48:11 +02:00
2018-11-22 18:43:18 +01:00
// TestObjectUpdate tests that Update works
t . Run ( "ObjectUpdate" , func ( t * testing . T ) {
skipIfNotOk ( t )
2019-08-06 13:44:08 +02:00
contents := random . String ( 200 )
2023-06-27 13:42:16 +02:00
var h * hash . MultiHasher
2018-11-22 18:43:18 +01:00
2023-06-27 13:42:16 +02:00
file1 . Size = int64 ( len ( contents ) )
2024-02-26 14:44:55 +01:00
obj := fstest . NewObject ( ctx , t , f , file1 . Path )
2023-05-02 17:23:19 +02:00
remoteBefore := obj . Remote ( )
obji := object . NewStaticObjectInfo ( file1 . Path + "-should-be-ignored.bin" , file1 . ModTime , int64 ( len ( contents ) ) , true , nil , obj . Fs ( ) )
2023-06-27 13:42:16 +02:00
retry ( t , "Update object" , func ( ) error {
buf := bytes . NewBufferString ( contents )
h = hash . NewMultiHasher ( )
in := io . TeeReader ( buf , h )
return obj . Update ( ctx , in , obji )
} )
2023-05-02 17:23:19 +02:00
remoteAfter := obj . Remote ( )
assert . Equal ( t , remoteBefore , remoteAfter , "Remote should not change" )
2023-06-27 13:42:16 +02:00
file1 . Hashes = h . Sums ( )
2018-04-07 19:48:11 +02:00
2018-11-22 18:43:18 +01:00
// check the object has been updated
2020-11-26 22:28:39 +01:00
file1 . Check ( t , obj , f . Precision ( ) )
2018-04-07 19:48:11 +02:00
2018-11-22 18:43:18 +01:00
// Re-read the object and check again
2024-02-26 14:44:55 +01:00
obj = fstest . NewObject ( ctx , t , f , file1 . Path )
2020-11-26 22:28:39 +01:00
file1 . Check ( t , obj , f . Precision ( ) )
2018-04-07 19:48:11 +02:00
2018-11-22 18:43:18 +01:00
// check contents correct
2022-07-26 17:50:32 +02:00
assert . Equal ( t , contents , ReadObject ( ctx , t , obj , - 1 ) , "contents of updated file1 differ" )
2018-11-22 18:43:18 +01:00
file1Contents = contents
} )
2018-04-07 19:48:11 +02:00
2018-11-22 18:43:18 +01:00
// TestObjectStorable tests that Storable works
t . Run ( "ObjectStorable" , func ( t * testing . T ) {
skipIfNotOk ( t )
2024-02-26 14:44:55 +01:00
obj := fstest . NewObject ( ctx , t , f , file1 . Path )
2018-11-22 18:43:18 +01:00
require . NotNil ( t , ! obj . Storable ( ) , "Expecting object to be storable" )
} )
2018-04-07 19:48:11 +02:00
2018-11-22 18:43:18 +01:00
// TestFsIsFile tests that an error is returned along with a valid fs
// which points to the parent directory.
t . Run ( "FsIsFile" , func ( t * testing . T ) {
skipIfNotOk ( t )
remoteName := subRemoteName + "/" + file2 . Path
file2Copy := file2
file2Copy . Path = "z.txt"
2020-11-05 16:18:51 +01:00
fileRemote , err := fs . NewFs ( context . Background ( ) , remoteName )
2018-11-22 18:43:18 +01:00
require . NotNil ( t , fileRemote )
assert . Equal ( t , fs . ErrorIsFile , err )
2019-06-09 19:41:48 +02:00
2023-12-08 15:00:22 +01:00
// Check Fs.Root returns the right thing
t . Run ( "FsRoot" , func ( t * testing . T ) {
skipIfNotOk ( t )
got := fileRemote . Root ( )
remoteDir := path . Dir ( remoteName )
want := remoteDir
colon := strings . LastIndex ( want , ":" )
if colon >= 0 {
want = want [ colon + 1 : ]
}
if isLocalRemote {
// only check last path element on local
require . Equal ( t , filepath . Base ( remoteDir ) , filepath . Base ( got ) )
} else {
require . Equal ( t , want , got )
}
} )
2019-10-09 23:33:05 +02:00
if strings . HasPrefix ( remoteName , "TestChunker" ) && strings . Contains ( remoteName , "Nometa" ) {
2019-06-09 19:41:48 +02:00
// TODO fix chunker and remove this bypass
t . Logf ( "Skip listing check -- chunker can't yet handle this tricky case" )
return
}
2018-11-22 18:43:18 +01:00
fstest . CheckListing ( t , fileRemote , [ ] fstest . Item { file2Copy } )
} )
2018-04-07 19:48:11 +02:00
2018-11-22 18:43:18 +01:00
// TestFsIsFileNotFound tests that an error is not returned if no object is found
t . Run ( "FsIsFileNotFound" , func ( t * testing . T ) {
skipIfNotOk ( t )
remoteName := subRemoteName + "/not found.txt"
2020-11-05 16:18:51 +01:00
fileRemote , err := fs . NewFs ( context . Background ( ) , remoteName )
2018-11-22 18:43:18 +01:00
require . NoError ( t , err )
fstest . CheckListing ( t , fileRemote , [ ] fstest . Item { } )
} )
2018-04-07 19:48:11 +02:00
2019-08-08 23:42:43 +02:00
// Test that things work from the root
t . Run ( "FromRoot" , func ( t * testing . T ) {
2020-11-26 22:28:39 +01:00
if features := f . Features ( ) ; features . BucketBased && ! features . BucketBasedRootOK {
2019-08-08 23:42:43 +02:00
t . Skip ( "Can't list from root on this remote" )
}
2021-02-09 10:30:40 +01:00
parsed , err := fspath . Parse ( subRemoteName )
2019-09-05 12:01:04 +02:00
require . NoError ( t , err )
2021-02-09 10:30:40 +01:00
configName , configLeaf := parsed . ConfigString , parsed . Path
2019-08-08 23:42:43 +02:00
if configName == "" {
configName , configLeaf = path . Split ( subRemoteName )
} else {
configName += ":"
}
t . Logf ( "Opening root remote %q path %q from %q" , configName , configLeaf , subRemoteName )
2020-11-05 16:18:51 +01:00
rootRemote , err := fs . NewFs ( context . Background ( ) , configName )
2019-08-08 23:42:43 +02:00
require . NoError ( t , err )
file1Root := file1
file1Root . Path = path . Join ( configLeaf , file1Root . Path )
file2Root := file2
file2Root . Path = path . Join ( configLeaf , file2Root . Path )
var dirs [ ] string
dir := file2 . Path
for {
dir = path . Dir ( dir )
if dir == "" || dir == "." || dir == "/" {
break
}
dirs = append ( dirs , path . Join ( configLeaf , dir ) )
}
// Check that we can see file1 and file2 from the root
t . Run ( "List" , func ( t * testing . T ) {
fstest . CheckListingWithRoot ( t , rootRemote , configLeaf , [ ] fstest . Item { file1Root , file2Root } , dirs , rootRemote . Precision ( ) )
} )
2022-08-14 04:56:32 +02:00
// Check that listing the entries is OK
2019-08-08 23:42:43 +02:00
t . Run ( "ListEntries" , func ( t * testing . T ) {
entries , err := rootRemote . List ( context . Background ( ) , configLeaf )
require . NoError ( t , err )
fstest . CompareItems ( t , entries , [ ] fstest . Item { file1Root } , dirs [ len ( dirs ) - 1 : ] , rootRemote . Precision ( ) , "ListEntries" )
} )
// List the root with ListR
t . Run ( "ListR" , func ( t * testing . T ) {
doListR := rootRemote . Features ( ) . ListR
if doListR == nil {
t . Skip ( "FS has no ListR interface" )
}
file1Found , file2Found := false , false
stopTime := time . Now ( ) . Add ( 10 * time . Second )
errTooMany := errors . New ( "too many files" )
errFound := errors . New ( "found" )
err := doListR ( context . Background ( ) , "" , func ( entries fs . DirEntries ) error {
for _ , entry := range entries {
remote := entry . Remote ( )
if remote == file1Root . Path {
file1Found = true
}
if remote == file2Root . Path {
file2Found = true
}
if file1Found && file2Found {
return errFound
}
}
if time . Now ( ) . After ( stopTime ) {
return errTooMany
}
return nil
} )
2022-04-20 18:56:15 +02:00
if ! errors . Is ( err , errFound ) && ! errors . Is ( err , errTooMany ) {
2019-08-08 23:42:43 +02:00
assert . NoError ( t , err )
}
2022-04-20 18:56:15 +02:00
if ! errors . Is ( err , errTooMany ) {
assert . True ( t , file1Found , "file1Root %q not found" , file1Root . Path )
assert . True ( t , file2Found , "file2Root %q not found" , file2Root . Path )
2019-08-08 23:42:43 +02:00
} else {
t . Logf ( "Too many files to list - giving up" )
}
} )
// Create a new file
t . Run ( "Put" , func ( t * testing . T ) {
file3Root := fstest . Item {
ModTime : time . Now ( ) ,
Path : path . Join ( configLeaf , "created from root.txt" ) ,
}
2019-07-25 16:16:39 +02:00
_ , file3Obj := testPut ( ctx , t , rootRemote , & file3Root )
2019-08-08 23:42:43 +02:00
fstest . CheckListingWithRoot ( t , rootRemote , configLeaf , [ ] fstest . Item { file1Root , file2Root , file3Root } , nil , rootRemote . Precision ( ) )
// And then remove it
t . Run ( "Remove" , func ( t * testing . T ) {
require . NoError ( t , file3Obj . Remove ( context . Background ( ) ) )
fstest . CheckListingWithRoot ( t , rootRemote , configLeaf , [ ] fstest . Item { file1Root , file2Root } , nil , rootRemote . Precision ( ) )
} )
} )
} )
2018-11-22 18:43:18 +01:00
// TestPublicLink tests creation of sharable, public links
// go test -v -run 'TestIntegration/Test(Setup|Init|FsMkdir|FsPutFile1|FsPutFile2|FsUpdateFile1|PublicLink)$'
t . Run ( "PublicLink" , func ( t * testing . T ) {
skipIfNotOk ( t )
2018-04-07 19:48:11 +02:00
2021-07-27 11:03:51 +02:00
publicLinkFunc := f . Features ( ) . PublicLink
if publicLinkFunc == nil {
2018-11-22 18:43:18 +01:00
t . Skip ( "FS has no PublicLinker interface" )
}
2018-04-07 19:48:11 +02:00
2021-07-27 11:03:51 +02:00
type PublicLinkFunc func ( ctx context . Context , remote string , expire fs . Duration , unlink bool ) ( link string , err error )
wrapPublicLinkFunc := func ( f PublicLinkFunc ) PublicLinkFunc {
return func ( ctx context . Context , remote string , expire fs . Duration , unlink bool ) ( link string , err error ) {
link , err = publicLinkFunc ( ctx , remote , expire , unlink )
if err == nil {
return
}
// For OneDrive Personal, link expiry is a premium feature
// Don't let it fail the test (https://github.com/rclone/rclone/issues/5420)
if fsInfo . Name == "onedrive" && strings . Contains ( err . Error ( ) , "accountUpgradeRequired" ) {
t . Log ( "treating accountUpgradeRequired as success for PublicLink" )
link , err = "bogus link to " + remote , nil
}
return
}
}
2023-09-05 19:01:37 +02:00
expiry := fs . Duration ( 120 * time . Second )
2021-07-27 11:03:51 +02:00
doPublicLink := wrapPublicLinkFunc ( publicLinkFunc )
2020-06-18 18:50:12 +02:00
2018-11-22 18:43:18 +01:00
// if object not found
2020-06-18 18:50:12 +02:00
link , err := doPublicLink ( ctx , file1 . Path + "_does_not_exist" , expiry , false )
2018-11-22 18:43:18 +01:00
require . Error ( t , err , "Expected to get error when file doesn't exist" )
require . Equal ( t , "" , link , "Expected link to be empty on error" )
2018-04-07 19:48:11 +02:00
2018-11-22 18:43:18 +01:00
// sharing file for the first time
2020-06-18 18:50:12 +02:00
link1 , err := doPublicLink ( ctx , file1 . Path , expiry , false )
2018-11-22 18:43:18 +01:00
require . NoError ( t , err )
require . NotEqual ( t , "" , link1 , "Link should not be empty" )
2018-03-29 09:10:19 +02:00
2020-06-18 18:50:12 +02:00
link2 , err := doPublicLink ( ctx , file2 . Path , expiry , false )
2018-11-22 18:43:18 +01:00
require . NoError ( t , err )
require . NotEqual ( t , "" , link2 , "Link should not be empty" )
2018-03-29 09:10:19 +02:00
2018-11-22 18:43:18 +01:00
require . NotEqual ( t , link1 , link2 , "Links to different files should differ" )
2018-03-29 09:10:19 +02:00
2018-11-22 18:43:18 +01:00
// sharing file for the 2nd time
2020-06-18 18:50:12 +02:00
link1 , err = doPublicLink ( ctx , file1 . Path , expiry , false )
2018-11-22 18:43:18 +01:00
require . NoError ( t , err )
require . NotEqual ( t , "" , link1 , "Link should not be empty" )
2018-03-29 09:10:19 +02:00
2018-11-22 18:43:18 +01:00
// sharing directory for the first time
path := path . Dir ( file2 . Path )
2020-06-18 18:50:12 +02:00
link3 , err := doPublicLink ( ctx , path , expiry , false )
2021-11-04 11:12:57 +01:00
if err != nil && ( errors . Is ( err , fs . ErrorCantShareDirectories ) || errors . Is ( err , fs . ErrorObjectNotFound ) ) {
2019-07-24 17:57:56 +02:00
t . Log ( "skipping directory tests as not supported on this backend" )
} else {
require . NoError ( t , err )
require . NotEqual ( t , "" , link3 , "Link should not be empty" )
2018-03-29 09:10:19 +02:00
2019-07-24 17:57:56 +02:00
// sharing directory for the second time
2020-06-18 18:50:12 +02:00
link3 , err = doPublicLink ( ctx , path , expiry , false )
2019-07-24 17:57:56 +02:00
require . NoError ( t , err )
require . NotEqual ( t , "" , link3 , "Link should not be empty" )
2018-03-29 09:10:19 +02:00
2019-07-24 17:57:56 +02:00
// sharing the "root" directory in a subremote
2019-08-08 20:58:02 +02:00
subRemote , _ , removeSubRemote , err := fstest . RandomRemote ( )
2019-07-24 17:57:56 +02:00
require . NoError ( t , err )
defer removeSubRemote ( )
// ensure sub remote isn't empty
buf := bytes . NewBufferString ( "somecontent" )
obji := object . NewStaticObjectInfo ( "somefile" , time . Now ( ) , int64 ( buf . Len ( ) ) , true , nil , nil )
2023-01-10 18:40:04 +01:00
retry ( t , "Put" , func ( ) error {
_ , err := subRemote . Put ( ctx , buf , obji )
return err
} )
2018-03-29 09:10:19 +02:00
2021-07-27 11:03:51 +02:00
link4 , err := wrapPublicLinkFunc ( subRemote . Features ( ) . PublicLink ) ( ctx , "" , expiry , false )
2019-07-24 17:57:56 +02:00
require . NoError ( t , err , "Sharing root in a sub-remote should work" )
require . NotEqual ( t , "" , link4 , "Link should not be empty" )
}
2018-11-22 18:43:18 +01:00
} )
2014-07-24 23:50:11 +02:00
2018-11-22 18:43:18 +01:00
// TestSetTier tests SetTier and GetTier functionality
t . Run ( "SetTier" , func ( t * testing . T ) {
skipIfNotSetTier ( t )
2024-02-26 14:44:55 +01:00
obj := fstest . NewObject ( ctx , t , f , file1 . Path )
2018-11-22 18:43:18 +01:00
setter , ok := obj . ( fs . SetTierer )
assert . NotNil ( t , ok )
getter , ok := obj . ( fs . GetTierer )
assert . NotNil ( t , ok )
// If interfaces are supported TiersToTest should contain
// at least one entry
supportedTiers := opt . TiersToTest
assert . NotEmpty ( t , supportedTiers )
// test set tier changes on supported storage classes or tiers
for _ , tier := range supportedTiers {
err := setter . SetTier ( tier )
assert . Nil ( t , err )
got := getter . GetTier ( )
assert . Equal ( t , tier , got )
}
} )
2017-08-03 21:42:35 +02:00
2019-01-11 13:28:41 +01:00
// Check to see if Fs that wrap other Objects implement all the optional methods
t . Run ( "ObjectCheckWrap" , func ( t * testing . T ) {
skipIfNotOk ( t )
if opt . SkipObjectCheckWrap {
t . Skip ( "Skipping FsCheckWrap on this Fs" )
}
2020-11-26 22:28:39 +01:00
ft := new ( fs . Features ) . Fill ( ctx , f )
2019-01-11 13:28:41 +01:00
if ft . UnWrap == nil {
t . Skip ( "Not a wrapping Fs" )
}
2024-02-26 14:44:55 +01:00
obj := fstest . NewObject ( ctx , t , f , file1 . Path )
2019-01-11 13:28:41 +01:00
_ , unsupported := fs . ObjectOptionalInterfaces ( obj )
for _ , name := range unsupported {
if ! stringsContains ( name , opt . UnimplementableObjectMethods ) {
t . Errorf ( "Missing Object wrapper for %s" , name )
}
}
} )
2022-07-26 17:50:55 +02:00
// State of remote at the moment the internal tests are called
InternalTestFiles = [ ] fstest . Item { file1 , file2 }
2018-11-22 18:43:18 +01:00
// TestObjectRemove tests Remove
t . Run ( "ObjectRemove" , func ( t * testing . T ) {
skipIfNotOk ( t )
2019-07-25 16:16:39 +02:00
// remove file1
2024-02-26 14:44:55 +01:00
obj := fstest . NewObject ( ctx , t , f , file1 . Path )
2019-07-25 16:16:39 +02:00
err := obj . Remove ( ctx )
2018-11-22 18:43:18 +01:00
require . NoError ( t , err )
// check listing without modtime as TestPublicLink may change the modtime
2020-11-26 22:28:39 +01:00
fstest . CheckListingWithPrecision ( t , f , [ ] fstest . Item { file2 } , nil , fs . ModTimeNotSupported )
2022-07-26 17:50:55 +02:00
// Show the internal tests file2 is gone
InternalTestFiles = [ ] fstest . Item { file2 }
2018-11-22 18:43:18 +01:00
} )
2017-08-03 21:42:35 +02:00
2019-09-15 02:44:14 +02:00
// TestAbout tests the About optional interface
t . Run ( "ObjectAbout" , func ( t * testing . T ) {
skipIfNotOk ( t )
// Check have About
2020-11-26 22:28:39 +01:00
doAbout := f . Features ( ) . About
2019-09-15 02:44:14 +02:00
if doAbout == nil {
t . Skip ( "FS does not support About" )
}
// Can't really check the output much!
usage , err := doAbout ( context . Background ( ) )
require . NoError ( t , err )
require . NotNil ( t , usage )
assert . NotEqual ( t , int64 ( 0 ) , usage . Total )
} )
2019-07-25 16:16:39 +02:00
// Just file2 remains for Purge to clean up
2019-09-15 02:44:14 +02:00
// TestFsPutStream tests uploading files when size isn't known in advance.
// This may trigger large buffer allocation in some backends, keep it
// close to the end of suite. (See fs/operations/xtra_operations_test.go)
2018-11-22 18:43:18 +01:00
t . Run ( "FsPutStream" , func ( t * testing . T ) {
skipIfNotOk ( t )
2020-11-26 22:28:39 +01:00
if f . Features ( ) . PutStream == nil {
2018-11-22 18:43:18 +01:00
t . Skip ( "FS has no PutStream interface" )
}
2017-08-03 21:42:35 +02:00
2020-01-05 12:23:39 +01:00
for _ , contentSize := range [ ] int { 0 , 100 } {
t . Run ( strconv . Itoa ( contentSize ) , func ( t * testing . T ) {
file := fstest . Item {
ModTime : fstest . Time ( "2001-02-03T04:05:06.499999999Z" ) ,
Path : "piped data.txt" ,
Size : - 1 , // use unknown size during upload
}
2017-08-03 21:42:35 +02:00
2020-01-05 12:23:39 +01:00
var (
err error
obj fs . Object
uploadHash * hash . MultiHasher
)
retry ( t , "PutStream" , func ( ) error {
contents := random . String ( contentSize )
buf := bytes . NewBufferString ( contents )
uploadHash = hash . NewMultiHasher ( )
in := io . TeeReader ( buf , uploadHash )
file . Size = - 1
obji := object . NewStaticObjectInfo ( file . Path , file . ModTime , file . Size , true , nil , nil )
2020-11-26 22:28:39 +01:00
obj , err = f . Features ( ) . PutStream ( ctx , in , obji )
2020-01-05 12:23:39 +01:00
return err
} )
file . Hashes = uploadHash . Sums ( )
file . Size = int64 ( contentSize ) // use correct size when checking
2020-11-26 22:28:39 +01:00
file . Check ( t , obj , f . Precision ( ) )
2020-01-05 12:23:39 +01:00
// Re-read the object and check again
2024-02-26 14:44:55 +01:00
obj = fstest . NewObject ( ctx , t , f , file . Path )
2020-11-26 22:28:39 +01:00
file . Check ( t , obj , f . Precision ( ) )
2020-01-05 12:23:39 +01:00
require . NoError ( t , obj . Remove ( ctx ) )
} )
}
2018-11-22 18:43:18 +01:00
} )
2018-09-11 03:57:43 +02:00
2018-11-22 18:43:18 +01:00
// TestInternal calls InternalTest() on the Fs
t . Run ( "Internal" , func ( t * testing . T ) {
skipIfNotOk ( t )
2020-11-26 22:28:39 +01:00
if it , ok := f . ( InternalTester ) ; ok {
2018-11-22 18:43:18 +01:00
it . InternalTest ( t )
} else {
2020-11-26 22:28:39 +01:00
t . Skipf ( "%T does not implement InternalTester" , f )
2018-11-22 18:43:18 +01:00
}
} )
2018-11-15 18:15:56 +01:00
} )
2018-04-16 23:19:25 +02:00
2019-09-15 02:44:14 +02:00
// TestFsPutChunked may trigger large buffer allocation with
// some backends (see fs/operations/xtra_operations_test.go),
// keep it closer to the end of suite.
t . Run ( "FsPutChunked" , func ( t * testing . T ) {
skipIfNotOk ( t )
if testing . Short ( ) {
t . Skip ( "not running with -short" )
}
2022-05-20 11:05:52 +02:00
if opt . ChunkedUpload . Skip {
t . Skip ( "skipping as ChunkedUpload.Skip is set" )
}
2020-11-26 22:28:39 +01:00
setUploadChunkSizer , _ := f . ( SetUploadChunkSizer )
2019-09-15 02:44:14 +02:00
if setUploadChunkSizer == nil {
2020-11-26 22:28:39 +01:00
t . Skipf ( "%T does not implement SetUploadChunkSizer" , f )
2019-09-15 02:44:14 +02:00
}
2020-11-26 22:28:39 +01:00
setUploadCutoffer , _ := f . ( SetUploadCutoffer )
2019-09-15 02:44:14 +02:00
minChunkSize := opt . ChunkedUpload . MinChunkSize
if minChunkSize < 100 {
minChunkSize = 100
}
if opt . ChunkedUpload . CeilChunkSize != nil {
minChunkSize = opt . ChunkedUpload . CeilChunkSize ( minChunkSize )
}
2021-03-02 20:11:57 +01:00
maxChunkSize := 2 * fs . Mebi
2019-09-15 02:44:14 +02:00
if maxChunkSize < 2 * minChunkSize {
maxChunkSize = 2 * minChunkSize
}
if opt . ChunkedUpload . MaxChunkSize > 0 && maxChunkSize > opt . ChunkedUpload . MaxChunkSize {
maxChunkSize = opt . ChunkedUpload . MaxChunkSize
}
if opt . ChunkedUpload . CeilChunkSize != nil {
maxChunkSize = opt . ChunkedUpload . CeilChunkSize ( maxChunkSize )
}
next := func ( f func ( fs . SizeSuffix ) fs . SizeSuffix ) fs . SizeSuffix {
s := f ( minChunkSize )
if s > maxChunkSize {
s = minChunkSize
}
return s
}
chunkSizes := fs . SizeSuffixList {
minChunkSize ,
minChunkSize + ( maxChunkSize - minChunkSize ) / 3 ,
next ( NextPowerOfTwo ) ,
next ( NextMultipleOf ( 100000 ) ) ,
next ( NextMultipleOf ( 100001 ) ) ,
maxChunkSize ,
}
chunkSizes . Sort ( )
// Set the minimum chunk size, upload cutoff and reset it at the end
oldChunkSize , err := setUploadChunkSizer . SetUploadChunkSize ( minChunkSize )
require . NoError ( t , err )
var oldUploadCutoff fs . SizeSuffix
if setUploadCutoffer != nil {
oldUploadCutoff , err = setUploadCutoffer . SetUploadCutoff ( minChunkSize )
require . NoError ( t , err )
}
defer func ( ) {
_ , err := setUploadChunkSizer . SetUploadChunkSize ( oldChunkSize )
assert . NoError ( t , err )
if setUploadCutoffer != nil {
_ , err := setUploadCutoffer . SetUploadCutoff ( oldUploadCutoff )
assert . NoError ( t , err )
}
} ( )
var lastCs fs . SizeSuffix
for _ , cs := range chunkSizes {
if cs <= lastCs {
continue
}
if opt . ChunkedUpload . CeilChunkSize != nil {
cs = opt . ChunkedUpload . CeilChunkSize ( cs )
}
lastCs = cs
t . Run ( cs . String ( ) , func ( t * testing . T ) {
_ , err := setUploadChunkSizer . SetUploadChunkSize ( cs )
require . NoError ( t , err )
if setUploadCutoffer != nil {
_ , err = setUploadCutoffer . SetUploadCutoff ( cs )
require . NoError ( t , err )
}
var testChunks [ ] fs . SizeSuffix
if opt . ChunkedUpload . NeedMultipleChunks {
// If NeedMultipleChunks is set then test with > cs
testChunks = [ ] fs . SizeSuffix { cs + 1 , 2 * cs , 2 * cs + 1 }
} else {
testChunks = [ ] fs . SizeSuffix { cs - 1 , cs , 2 * cs + 1 }
}
for _ , fileSize := range testChunks {
t . Run ( fmt . Sprintf ( "%d" , fileSize ) , func ( t * testing . T ) {
2020-11-26 22:28:39 +01:00
TestPutLarge ( ctx , t , f , & fstest . Item {
2019-09-15 02:44:14 +02:00
ModTime : fstest . Time ( "2001-02-03T04:05:06.499999999Z" ) ,
Path : fmt . Sprintf ( "chunked-%s-%s.bin" , cs . String ( ) , fileSize . String ( ) ) ,
Size : int64 ( fileSize ) ,
} )
2023-11-24 13:58:40 +01:00
t . Run ( "Streamed" , func ( t * testing . T ) {
if f . Features ( ) . PutStream == nil {
t . Skip ( "FS has no PutStream interface" )
}
TestPutLargeStreamed ( ctx , t , f , & fstest . Item {
ModTime : fstest . Time ( "2001-02-03T04:05:06.499999999Z" ) ,
Path : fmt . Sprintf ( "chunked-%s-%s-streamed.bin" , cs . String ( ) , fileSize . String ( ) ) ,
Size : int64 ( fileSize ) ,
} )
} )
2019-09-15 02:44:14 +02:00
} )
}
} )
}
} )
2023-11-24 13:36:48 +01:00
// Copy files with chunked copy if available
t . Run ( "FsCopyChunked" , func ( t * testing . T ) {
skipIfNotOk ( t )
if testing . Short ( ) {
t . Skip ( "not running with -short" )
}
// Check have Copy
doCopy := f . Features ( ) . Copy
if doCopy == nil {
t . Skip ( "FS has no Copier interface" )
}
if opt . ChunkedUpload . Skip {
t . Skip ( "skipping as ChunkedUpload.Skip is set" )
}
2023-11-24 18:08:04 +01:00
if strings . HasPrefix ( f . Name ( ) , "serves3" ) || strings . HasPrefix ( f . Name ( ) , "TestS3Rclone" ) {
2023-11-24 16:49:33 +01:00
t . Skip ( "FIXME skip test - see #7454" )
}
2023-11-24 13:36:48 +01:00
do , _ := f . ( SetCopyCutoffer )
if do == nil {
t . Skipf ( "%T does not implement SetCopyCutoff" , f )
}
minChunkSize := opt . ChunkedUpload . MinChunkSize
if minChunkSize < 100 {
minChunkSize = 100
}
if opt . ChunkedUpload . CeilChunkSize != nil {
minChunkSize = opt . ChunkedUpload . CeilChunkSize ( minChunkSize )
}
chunkSizes := fs . SizeSuffixList {
minChunkSize ,
minChunkSize + 1 ,
2 * minChunkSize - 1 ,
2 * minChunkSize ,
2 * minChunkSize + 1 ,
}
for _ , chunkSize := range chunkSizes {
t . Run ( fmt . Sprintf ( "%d" , chunkSize ) , func ( t * testing . T ) {
contents := random . String ( int ( chunkSize ) )
item := fstest . NewItem ( "chunked-copy" , contents , fstest . Time ( "2001-05-06T04:05:06.499999999Z" ) )
src := PutTestContents ( ctx , t , f , & item , contents , true )
defer func ( ) {
assert . NoError ( t , src . Remove ( ctx ) )
} ( )
var itemCopy = item
itemCopy . Path += ".copy"
// Set copy cutoff to mininum value so we make chunks
origCutoff , err := do . SetCopyCutoff ( minChunkSize )
require . NoError ( t , err )
defer func ( ) {
_ , err = do . SetCopyCutoff ( origCutoff )
require . NoError ( t , err )
} ( )
// Do the copy
dst , err := doCopy ( ctx , src , itemCopy . Path )
require . NoError ( t , err )
defer func ( ) {
assert . NoError ( t , dst . Remove ( ctx ) )
} ( )
// Check size
assert . Equal ( t , src . Size ( ) , dst . Size ( ) )
// Check modtime
srcModTime := src . ModTime ( ctx )
dstModTime := dst . ModTime ( ctx )
assert . True ( t , srcModTime . Equal ( dstModTime ) )
// Make sure contents are correct
gotContents := ReadObject ( ctx , t , dst , - 1 )
assert . Equal ( t , contents , gotContents )
} )
}
} )
2019-02-02 09:35:41 +01:00
// TestFsUploadUnknownSize ensures Fs.Put() and Object.Update() don't panic when
// src.Size() == -1
2019-09-15 02:44:14 +02:00
//
// This may trigger large buffer allocation in some backends, keep it
// closer to the suite end. (See fs/operations/xtra_operations_test.go)
2019-02-02 09:35:41 +01:00
t . Run ( "FsUploadUnknownSize" , func ( t * testing . T ) {
skipIfNotOk ( t )
t . Run ( "FsPutUnknownSize" , func ( t * testing . T ) {
defer func ( ) {
assert . Nil ( t , recover ( ) , "Fs.Put() should not panic when src.Size() == -1" )
} ( )
2019-08-06 13:44:08 +02:00
contents := random . String ( 100 )
2019-02-02 09:35:41 +01:00
in := bytes . NewBufferString ( contents )
obji := object . NewStaticObjectInfo ( "unknown-size-put.txt" , fstest . Time ( "2002-02-03T04:05:06.499999999Z" ) , - 1 , true , nil , nil )
2020-11-26 22:28:39 +01:00
obj , err := f . Put ( ctx , in , obji )
2019-02-02 09:35:41 +01:00
if err == nil {
2019-07-25 16:16:39 +02:00
require . NoError ( t , obj . Remove ( ctx ) , "successfully uploaded unknown-sized file but failed to remove" )
2019-02-02 09:35:41 +01:00
}
// if err != nil: it's okay as long as no panic
} )
t . Run ( "FsUpdateUnknownSize" , func ( t * testing . T ) {
unknownSizeUpdateFile := fstest . Item {
ModTime : fstest . Time ( "2002-02-03T04:05:06.499999999Z" ) ,
Path : "unknown-size-update.txt" ,
}
2020-11-26 22:28:39 +01:00
testPut ( ctx , t , f , & unknownSizeUpdateFile )
2019-02-02 09:35:41 +01:00
defer func ( ) {
assert . Nil ( t , recover ( ) , "Object.Update() should not panic when src.Size() == -1" )
} ( )
2019-08-06 13:44:08 +02:00
newContents := random . String ( 200 )
2019-02-02 09:35:41 +01:00
in := bytes . NewBufferString ( newContents )
2024-02-26 14:44:55 +01:00
obj := fstest . NewObject ( ctx , t , f , unknownSizeUpdateFile . Path )
2019-02-02 09:35:41 +01:00
obji := object . NewStaticObjectInfo ( unknownSizeUpdateFile . Path , unknownSizeUpdateFile . ModTime , - 1 , true , nil , obj . Fs ( ) )
2019-07-25 16:16:39 +02:00
err := obj . Update ( ctx , in , obji )
2019-02-02 09:35:41 +01:00
if err == nil {
2019-07-25 16:16:39 +02:00
require . NoError ( t , obj . Remove ( ctx ) , "successfully updated object with unknown-sized source but failed to remove" )
2019-02-02 09:35:41 +01:00
}
// if err != nil: it's okay as long as no panic
} )
} )
2019-05-23 15:10:17 +02:00
// TestFsRootCollapse tests if the root of an fs "collapses" to the
// absolute root. It creates a new fs of the same backend type with its
2022-08-14 04:56:32 +02:00
// root set to a *nonexistent* folder, and attempts to read the info of
2019-05-23 15:10:17 +02:00
// an object in that folder, whose name is taken from a directory that
// exists in the absolute root.
// This test is added after
2019-07-28 19:47:38 +02:00
// https://github.com/rclone/rclone/issues/3164.
2019-05-23 15:10:17 +02:00
t . Run ( "FsRootCollapse" , func ( t * testing . T ) {
deepRemoteName := subRemoteName + "/deeper/nonexisting/directory"
2020-11-05 16:18:51 +01:00
deepRemote , err := fs . NewFs ( context . Background ( ) , deepRemoteName )
2019-05-23 15:10:17 +02:00
require . NoError ( t , err )
colonIndex := strings . IndexRune ( deepRemoteName , ':' )
firstSlashIndex := strings . IndexRune ( deepRemoteName , '/' )
firstDir := deepRemoteName [ colonIndex + 1 : firstSlashIndex ]
2019-07-25 16:16:39 +02:00
_ , err = deepRemote . NewObject ( ctx , firstDir )
2019-05-23 15:10:17 +02:00
require . Equal ( t , fs . ErrorObjectNotFound , err )
// If err is not fs.ErrorObjectNotFound, it means the backend is
// somehow confused about root and absolute root.
} )
2024-02-22 12:13:32 +01:00
// FsDirSetModTime tests setting the mod time on a directory if possible
t . Run ( "FsDirSetModTime" , func ( t * testing . T ) {
const name = "dir-mod-time"
do := f . Features ( ) . DirSetModTime
if do == nil {
t . Skip ( "FS has no DirSetModTime interface" )
}
// Set ModTime on non existing directory should return error
t1 := fstest . Time ( "2001-02-03T04:05:06.499999999Z" )
err := do ( ctx , name , t1 )
require . Error ( t , err )
// Make the directory and try again
err = f . Mkdir ( ctx , name )
require . NoError ( t , err )
err = do ( ctx , name , t1 )
require . NoError ( t , err )
// Check the modtime got set properly
dir := fstest . NewDirectory ( ctx , t , f , name )
fstest . CheckDirModTime ( ctx , t , f , dir , t1 )
// Tidy up
err = f . Rmdir ( ctx , name )
require . NoError ( t , err )
} )
var testMetadata = fs . Metadata {
// System metadata supported by all backends
"mtime" : "2001-02-03T04:05:06.499999999Z" ,
// User metadata
"potato" : "jersey" ,
}
var testMetadata2 = fs . Metadata {
// System metadata supported by all backends
"mtime" : "2002-02-03T04:05:06.499999999Z" ,
// User metadata
"potato" : "king edwards" ,
}
// FsMkdirMetadata tests creating a directory with metadata if possible
t . Run ( "FsMkdirMetadata" , func ( t * testing . T ) {
ctx , ci := fs . AddConfig ( ctx )
ci . Metadata = true
const name = "dir-metadata"
do := f . Features ( ) . MkdirMetadata
if do == nil {
t . Skip ( "FS has no MkdirMetadata interface" )
}
assert . True ( t , f . Features ( ) . WriteDirMetadata , "Backends must support Directory.SetMetadata and Fs.MkdirMetadata" )
// Create the directory from fresh
dir , err := do ( ctx , name , testMetadata )
require . NoError ( t , err )
require . NotNil ( t , dir )
// Check the returned directory and one read from the listing
fstest . CheckEntryMetadata ( ctx , t , f , dir , testMetadata )
fstest . CheckEntryMetadata ( ctx , t , f , fstest . NewDirectory ( ctx , t , f , name ) , testMetadata )
// Now update the metadata on the existing directory
t . Run ( "Update" , func ( t * testing . T ) {
dir , err := do ( ctx , name , testMetadata2 )
require . NoError ( t , err )
require . NotNil ( t , dir )
// Check the returned directory and one read from the listing
fstest . CheckEntryMetadata ( ctx , t , f , dir , testMetadata2 )
// The TestUnionPolicy2 has randomness in it so it sets metadata on
// one directory but can read a different one from the listing.
if f . Name ( ) != "TestUnionPolicy2" {
fstest . CheckEntryMetadata ( ctx , t , f , fstest . NewDirectory ( ctx , t , f , name ) , testMetadata2 )
}
} )
// Now test the Directory methods
t . Run ( "CheckDirectory" , func ( t * testing . T ) {
_ , ok := dir . ( fs . Object )
assert . False ( t , ok , "Directory must not type assert to Object" )
_ , ok = dir . ( fs . ObjectInfo )
assert . False ( t , ok , "Directory must not type assert to ObjectInfo" )
} )
// Tidy up
err = f . Rmdir ( ctx , name )
require . NoError ( t , err )
} )
// FsDirectory checks methods on the directory object
t . Run ( "FsDirectory" , func ( t * testing . T ) {
ctx , ci := fs . AddConfig ( ctx )
ci . Metadata = true
const name = "dir-methods"
features := f . Features ( )
if ! features . CanHaveEmptyDirectories {
t . Skip ( "Can't test if can't have empty directories" )
}
if ! features . ReadDirMetadata &&
! features . WriteDirMetadata &&
! features . WriteDirSetModTime &&
! features . UserDirMetadata &&
! features . Overlay &&
features . UnWrap == nil {
t . Skip ( "FS has no Directory methods and doesn't Wrap" )
}
// Create a directory to start with
err := f . Mkdir ( ctx , name )
require . NoError ( t , err )
// Get the directory object
dir := fstest . NewDirectory ( ctx , t , f , name )
_ , ok := dir . ( fs . Object )
assert . False ( t , ok , "Directory must not type assert to Object" )
_ , ok = dir . ( fs . ObjectInfo )
assert . False ( t , ok , "Directory must not type assert to ObjectInfo" )
// Now test the directory methods
t . Run ( "ReadDirMetadata" , func ( t * testing . T ) {
if ! features . ReadDirMetadata {
t . Skip ( "Directories don't support ReadDirMetadata" )
}
if f . Name ( ) == "TestUnionPolicy3" {
t . Skipf ( "Test unreliable on %q" , f . Name ( ) )
}
fstest . CheckEntryMetadata ( ctx , t , f , dir , fs . Metadata {
"mtime" : dir . ModTime ( ctx ) . Format ( time . RFC3339Nano ) ,
} )
} )
t . Run ( "WriteDirMetadata" , func ( t * testing . T ) {
if ! features . WriteDirMetadata {
t . Skip ( "Directories don't support WriteDirMetadata" )
}
assert . NotNil ( t , features . MkdirMetadata , "Backends must support Directory.SetMetadata and Fs.MkdirMetadata" )
do , ok := dir . ( fs . SetMetadataer )
require . True ( t , ok , "Expected to find SetMetadata method on Directory" )
err := do . SetMetadata ( ctx , testMetadata )
require . NoError ( t , err )
fstest . CheckEntryMetadata ( ctx , t , f , dir , testMetadata )
fstest . CheckEntryMetadata ( ctx , t , f , fstest . NewDirectory ( ctx , t , f , name ) , testMetadata )
} )
t . Run ( "WriteDirSetModTime" , func ( t * testing . T ) {
if ! features . WriteDirSetModTime {
t . Skip ( "Directories don't support WriteDirSetModTime" )
}
assert . NotNil ( t , features . DirSetModTime , "Backends must support Directory.SetModTime and Fs.DirSetModTime" )
t1 := fstest . Time ( "2001-02-03T04:05:10.123123123Z" )
do , ok := dir . ( fs . SetModTimer )
require . True ( t , ok , "Expected to find SetMetadata method on Directory" )
err := do . SetModTime ( ctx , t1 )
require . NoError ( t , err )
fstest . CheckDirModTime ( ctx , t , f , dir , t1 )
fstest . CheckDirModTime ( ctx , t , f , fstest . NewDirectory ( ctx , t , f , name ) , t1 )
} )
// Check to see if Fs that wrap other Directories implement all the optional methods
t . Run ( "DirectoryCheckWrap" , func ( t * testing . T ) {
if opt . SkipDirectoryCheckWrap {
t . Skip ( "Skipping DirectoryCheckWrap on this Fs" )
}
if ! features . Overlay && features . UnWrap == nil {
t . Skip ( "Not a wrapping Fs" )
}
_ , unsupported := fs . DirectoryOptionalInterfaces ( dir )
for _ , name := range unsupported {
if ! stringsContains ( name , opt . UnimplementableDirectoryMethods ) {
t . Errorf ( "Missing Directory wrapper for %s" , name )
}
}
} )
// Tidy up
err = f . Rmdir ( ctx , name )
require . NoError ( t , err )
} )
2018-11-22 18:43:18 +01:00
// Purge the folder
2020-11-26 22:28:39 +01:00
err = operations . Purge ( ctx , f , "" )
2021-11-04 11:12:57 +01:00
if ! errors . Is ( err , fs . ErrorDirNotFound ) {
2019-07-27 17:17:45 +02:00
require . NoError ( t , err )
}
2018-11-22 18:43:18 +01:00
purged = true
2020-11-26 22:28:39 +01:00
fstest . CheckListing ( t , f , [ ] fstest . Item { } )
2014-07-24 23:50:11 +02:00
2021-11-04 12:50:43 +01:00
// Check purging again if not bucket-based
2020-11-26 22:28:39 +01:00
if ! isBucketBasedButNotRoot ( f ) {
err = operations . Purge ( ctx , f , "" )
2018-11-15 17:28:04 +01:00
assert . Error ( t , err , "Expecting error after on second purge" )
2021-11-04 11:12:57 +01:00
if ! errors . Is ( err , fs . ErrorDirNotFound ) {
2020-08-19 19:03:42 +02:00
t . Log ( "Warning: this should produce fs.ErrorDirNotFound" )
}
2018-11-15 17:28:04 +01:00
}
2018-02-25 10:58:06 +01:00
2018-04-07 19:48:11 +02:00
} )
2018-11-22 18:43:18 +01:00
// Check directory is purged
if ! purged {
2020-11-26 22:28:39 +01:00
_ = operations . Purge ( ctx , f , "" )
2018-11-22 18:43:18 +01:00
}
2020-11-27 18:25:57 +01:00
t . Run ( "FsShutdown" , func ( t * testing . T ) {
do := f . Features ( ) . Shutdown
if do == nil {
t . Skip ( "Shutdown method not supported" )
}
require . NoError ( t , do ( ctx ) )
require . NoError ( t , do ( ctx ) , "must be able to call Shutdown twice" )
} )
2018-11-22 18:43:18 +01:00
// Remove the local directory so we don't clutter up /tmp
if strings . HasPrefix ( remoteName , "/" ) {
t . Log ( "remoteName" , remoteName )
// Remove temp directory
err := os . Remove ( remoteName )
require . NoError ( t , err )
}
2014-07-24 23:50:11 +02:00
}