mirror of
https://github.com/rclone/rclone.git
synced 2024-12-22 15:11:56 +01:00
swift: update github.com/ncw/swift to v2.0.0
The update to v2 of the swift library introduces a context parameter to each function. This required a lot of mostly mechanical changes adding context parameters. See: https://github.com/ncw/swift/issues/159 See: https://github.com/ncw/swift/issues/161
This commit is contained in:
parent
1545ace8f2
commit
bbe791a886
@ -5,7 +5,7 @@ import (
|
|||||||
"net/http"
|
"net/http"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/ncw/swift"
|
"github.com/ncw/swift/v2"
|
||||||
"github.com/rclone/rclone/fs"
|
"github.com/rclone/rclone/fs"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -24,7 +24,7 @@ func newAuth(f *Fs) *auth {
|
|||||||
// Request constructs an http.Request for authentication
|
// Request constructs an http.Request for authentication
|
||||||
//
|
//
|
||||||
// returns nil for not needed
|
// returns nil for not needed
|
||||||
func (a *auth) Request(*swift.Connection) (r *http.Request, err error) {
|
func (a *auth) Request(ctx context.Context, c *swift.Connection) (r *http.Request, err error) {
|
||||||
const retries = 10
|
const retries = 10
|
||||||
for try := 1; try <= retries; try++ {
|
for try := 1; try <= retries; try++ {
|
||||||
err = a.f.getCredentials(context.TODO())
|
err = a.f.getCredentials(context.TODO())
|
||||||
@ -38,7 +38,7 @@ func (a *auth) Request(*swift.Connection) (r *http.Request, err error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Response parses the result of an http request
|
// Response parses the result of an http request
|
||||||
func (a *auth) Response(resp *http.Response) error {
|
func (a *auth) Response(ctx context.Context, resp *http.Response) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -16,7 +16,7 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
swiftLib "github.com/ncw/swift"
|
swiftLib "github.com/ncw/swift/v2"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
"github.com/rclone/rclone/backend/swift"
|
"github.com/rclone/rclone/backend/swift"
|
||||||
"github.com/rclone/rclone/fs"
|
"github.com/rclone/rclone/fs"
|
||||||
@ -163,7 +163,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
|||||||
Timeout: 10 * ci.Timeout, // Use the timeouts in the transport
|
Timeout: 10 * ci.Timeout, // Use the timeouts in the transport
|
||||||
Transport: fshttp.NewTransport(ctx),
|
Transport: fshttp.NewTransport(ctx),
|
||||||
}
|
}
|
||||||
err = c.Authenticate()
|
err = c.Authenticate(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "error authenticating swift connection")
|
return nil, errors.Wrap(err, "error authenticating swift connection")
|
||||||
}
|
}
|
||||||
|
@ -33,7 +33,7 @@ import (
|
|||||||
"github.com/aws/aws-sdk-go/aws/request"
|
"github.com/aws/aws-sdk-go/aws/request"
|
||||||
"github.com/aws/aws-sdk-go/aws/session"
|
"github.com/aws/aws-sdk-go/aws/session"
|
||||||
"github.com/aws/aws-sdk-go/service/s3"
|
"github.com/aws/aws-sdk-go/service/s3"
|
||||||
"github.com/ncw/swift"
|
"github.com/ncw/swift/v2"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
"github.com/rclone/rclone/fs"
|
"github.com/rclone/rclone/fs"
|
||||||
"github.com/rclone/rclone/fs/config"
|
"github.com/rclone/rclone/fs/config"
|
||||||
|
@ -1,10 +1,11 @@
|
|||||||
package swift
|
package swift
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"net/http"
|
"net/http"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/ncw/swift"
|
"github.com/ncw/swift/v2"
|
||||||
)
|
)
|
||||||
|
|
||||||
// auth is an authenticator for swift. It overrides the StorageUrl
|
// auth is an authenticator for swift. It overrides the StorageUrl
|
||||||
@ -28,19 +29,19 @@ func newAuth(parentAuth swift.Authenticator, storageURL string, authToken string
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Request creates an http.Request for the auth - return nil if not needed
|
// Request creates an http.Request for the auth - return nil if not needed
|
||||||
func (a *auth) Request(c *swift.Connection) (*http.Request, error) {
|
func (a *auth) Request(ctx context.Context, c *swift.Connection) (*http.Request, error) {
|
||||||
if a.parentAuth == nil {
|
if a.parentAuth == nil {
|
||||||
return nil, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
return a.parentAuth.Request(c)
|
return a.parentAuth.Request(ctx, c)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Response parses the http.Response
|
// Response parses the http.Response
|
||||||
func (a *auth) Response(resp *http.Response) error {
|
func (a *auth) Response(ctx context.Context, resp *http.Response) error {
|
||||||
if a.parentAuth == nil {
|
if a.parentAuth == nil {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
return a.parentAuth.Response(resp)
|
return a.parentAuth.Response(ctx, resp)
|
||||||
}
|
}
|
||||||
|
|
||||||
// The public storage URL - set Internal to true to read
|
// The public storage URL - set Internal to true to read
|
||||||
|
@ -13,7 +13,7 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/ncw/swift"
|
"github.com/ncw/swift/v2"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
"github.com/rclone/rclone/fs"
|
"github.com/rclone/rclone/fs"
|
||||||
"github.com/rclone/rclone/fs/config"
|
"github.com/rclone/rclone/fs/config"
|
||||||
@ -391,7 +391,7 @@ func swiftConnection(ctx context.Context, opt *Options, name string) (*swift.Con
|
|||||||
if c.AuthUrl == "" {
|
if c.AuthUrl == "" {
|
||||||
return nil, errors.New("auth not found")
|
return nil, errors.New("auth not found")
|
||||||
}
|
}
|
||||||
err := c.Authenticate() // fills in c.StorageUrl and c.AuthToken
|
err := c.Authenticate(ctx) // fills in c.StorageUrl and c.AuthToken
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -467,7 +467,7 @@ func NewFsWithConnection(ctx context.Context, opt *Options, name, root string, c
|
|||||||
encodedDirectory := f.opt.Enc.FromStandardPath(f.rootDirectory)
|
encodedDirectory := f.opt.Enc.FromStandardPath(f.rootDirectory)
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
err = f.pacer.Call(func() (bool, error) {
|
||||||
var rxHeaders swift.Headers
|
var rxHeaders swift.Headers
|
||||||
info, rxHeaders, err = f.c.Object(f.rootContainer, encodedDirectory)
|
info, rxHeaders, err = f.c.Object(ctx, f.rootContainer, encodedDirectory)
|
||||||
return shouldRetryHeaders(rxHeaders, err)
|
return shouldRetryHeaders(rxHeaders, err)
|
||||||
})
|
})
|
||||||
if err == nil && info.ContentType != directoryMarkerContentType {
|
if err == nil && info.ContentType != directoryMarkerContentType {
|
||||||
@ -506,7 +506,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
|||||||
// Return an Object from a path
|
// Return an Object from a path
|
||||||
//
|
//
|
||||||
// If it can't be found it returns the error fs.ErrorObjectNotFound.
|
// If it can't be found it returns the error fs.ErrorObjectNotFound.
|
||||||
func (f *Fs) newObjectWithInfo(remote string, info *swift.Object) (fs.Object, error) {
|
func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *swift.Object) (fs.Object, error) {
|
||||||
o := &Object{
|
o := &Object{
|
||||||
fs: f,
|
fs: f,
|
||||||
remote: remote,
|
remote: remote,
|
||||||
@ -516,7 +516,7 @@ func (f *Fs) newObjectWithInfo(remote string, info *swift.Object) (fs.Object, er
|
|||||||
// making sure we read the full metadata for all 0 byte files.
|
// making sure we read the full metadata for all 0 byte files.
|
||||||
// We don't read the metadata for directory marker objects.
|
// We don't read the metadata for directory marker objects.
|
||||||
if info != nil && info.Bytes == 0 && info.ContentType != "application/directory" {
|
if info != nil && info.Bytes == 0 && info.ContentType != "application/directory" {
|
||||||
err := o.readMetaData() // reads info and headers, returning an error
|
err := o.readMetaData(ctx) // reads info and headers, returning an error
|
||||||
if err == fs.ErrorObjectNotFound {
|
if err == fs.ErrorObjectNotFound {
|
||||||
// We have a dangling large object here so just return the original metadata
|
// We have a dangling large object here so just return the original metadata
|
||||||
fs.Errorf(o, "dangling large object with no contents")
|
fs.Errorf(o, "dangling large object with no contents")
|
||||||
@ -533,7 +533,7 @@ func (f *Fs) newObjectWithInfo(remote string, info *swift.Object) (fs.Object, er
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
err := o.readMetaData() // reads info and headers, returning an error
|
err := o.readMetaData(ctx) // reads info and headers, returning an error
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -544,7 +544,7 @@ func (f *Fs) newObjectWithInfo(remote string, info *swift.Object) (fs.Object, er
|
|||||||
// NewObject finds the Object at remote. If it can't be found it
|
// NewObject finds the Object at remote. If it can't be found it
|
||||||
// returns the error fs.ErrorObjectNotFound.
|
// returns the error fs.ErrorObjectNotFound.
|
||||||
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||||
return f.newObjectWithInfo(remote, nil)
|
return f.newObjectWithInfo(ctx, remote, nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
// listFn is called from list and listContainerRoot to handle an object.
|
// listFn is called from list and listContainerRoot to handle an object.
|
||||||
@ -556,7 +556,7 @@ type listFn func(remote string, object *swift.Object, isDirectory bool) error
|
|||||||
// container to the start.
|
// container to the start.
|
||||||
//
|
//
|
||||||
// Set recurse to read sub directories
|
// Set recurse to read sub directories
|
||||||
func (f *Fs) listContainerRoot(container, directory, prefix string, addContainer bool, recurse bool, includeDirMarkers bool, fn listFn) error {
|
func (f *Fs) listContainerRoot(ctx context.Context, container, directory, prefix string, addContainer bool, recurse bool, includeDirMarkers bool, fn listFn) error {
|
||||||
if prefix != "" && !strings.HasSuffix(prefix, "/") {
|
if prefix != "" && !strings.HasSuffix(prefix, "/") {
|
||||||
prefix += "/"
|
prefix += "/"
|
||||||
}
|
}
|
||||||
@ -571,11 +571,11 @@ func (f *Fs) listContainerRoot(container, directory, prefix string, addContainer
|
|||||||
if !recurse {
|
if !recurse {
|
||||||
opts.Delimiter = '/'
|
opts.Delimiter = '/'
|
||||||
}
|
}
|
||||||
return f.c.ObjectsWalk(container, &opts, func(opts *swift.ObjectsOpts) (interface{}, error) {
|
return f.c.ObjectsWalk(ctx, container, &opts, func(ctx context.Context, opts *swift.ObjectsOpts) (interface{}, error) {
|
||||||
var objects []swift.Object
|
var objects []swift.Object
|
||||||
var err error
|
var err error
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
err = f.pacer.Call(func() (bool, error) {
|
||||||
objects, err = f.c.Objects(container, opts)
|
objects, err = f.c.Objects(ctx, container, opts)
|
||||||
return shouldRetry(err)
|
return shouldRetry(err)
|
||||||
})
|
})
|
||||||
if err == nil {
|
if err == nil {
|
||||||
@ -613,8 +613,8 @@ func (f *Fs) listContainerRoot(container, directory, prefix string, addContainer
|
|||||||
type addEntryFn func(fs.DirEntry) error
|
type addEntryFn func(fs.DirEntry) error
|
||||||
|
|
||||||
// list the objects into the function supplied
|
// list the objects into the function supplied
|
||||||
func (f *Fs) list(container, directory, prefix string, addContainer bool, recurse bool, includeDirMarkers bool, fn addEntryFn) error {
|
func (f *Fs) list(ctx context.Context, container, directory, prefix string, addContainer bool, recurse bool, includeDirMarkers bool, fn addEntryFn) error {
|
||||||
err := f.listContainerRoot(container, directory, prefix, addContainer, recurse, includeDirMarkers, func(remote string, object *swift.Object, isDirectory bool) (err error) {
|
err := f.listContainerRoot(ctx, container, directory, prefix, addContainer, recurse, includeDirMarkers, func(remote string, object *swift.Object, isDirectory bool) (err error) {
|
||||||
if isDirectory {
|
if isDirectory {
|
||||||
remote = strings.TrimRight(remote, "/")
|
remote = strings.TrimRight(remote, "/")
|
||||||
d := fs.NewDir(remote, time.Time{}).SetSize(object.Bytes)
|
d := fs.NewDir(remote, time.Time{}).SetSize(object.Bytes)
|
||||||
@ -622,7 +622,7 @@ func (f *Fs) list(container, directory, prefix string, addContainer bool, recurs
|
|||||||
} else {
|
} else {
|
||||||
// newObjectWithInfo does a full metadata read on 0 size objects which might be dynamic large objects
|
// newObjectWithInfo does a full metadata read on 0 size objects which might be dynamic large objects
|
||||||
var o fs.Object
|
var o fs.Object
|
||||||
o, err = f.newObjectWithInfo(remote, object)
|
o, err = f.newObjectWithInfo(ctx, remote, object)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -639,12 +639,12 @@ func (f *Fs) list(container, directory, prefix string, addContainer bool, recurs
|
|||||||
}
|
}
|
||||||
|
|
||||||
// listDir lists a single directory
|
// listDir lists a single directory
|
||||||
func (f *Fs) listDir(container, directory, prefix string, addContainer bool) (entries fs.DirEntries, err error) {
|
func (f *Fs) listDir(ctx context.Context, container, directory, prefix string, addContainer bool) (entries fs.DirEntries, err error) {
|
||||||
if container == "" {
|
if container == "" {
|
||||||
return nil, fs.ErrorListBucketRequired
|
return nil, fs.ErrorListBucketRequired
|
||||||
}
|
}
|
||||||
// List the objects
|
// List the objects
|
||||||
err = f.list(container, directory, prefix, addContainer, false, false, func(entry fs.DirEntry) error {
|
err = f.list(ctx, container, directory, prefix, addContainer, false, false, func(entry fs.DirEntry) error {
|
||||||
entries = append(entries, entry)
|
entries = append(entries, entry)
|
||||||
return nil
|
return nil
|
||||||
})
|
})
|
||||||
@ -660,7 +660,7 @@ func (f *Fs) listDir(container, directory, prefix string, addContainer bool) (en
|
|||||||
func (f *Fs) listContainers(ctx context.Context) (entries fs.DirEntries, err error) {
|
func (f *Fs) listContainers(ctx context.Context) (entries fs.DirEntries, err error) {
|
||||||
var containers []swift.Container
|
var containers []swift.Container
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
err = f.pacer.Call(func() (bool, error) {
|
||||||
containers, err = f.c.ContainersAll(nil)
|
containers, err = f.c.ContainersAll(ctx, nil)
|
||||||
return shouldRetry(err)
|
return shouldRetry(err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -691,7 +691,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
|||||||
}
|
}
|
||||||
return f.listContainers(ctx)
|
return f.listContainers(ctx)
|
||||||
}
|
}
|
||||||
return f.listDir(container, directory, f.rootDirectory, f.rootContainer == "")
|
return f.listDir(ctx, container, directory, f.rootDirectory, f.rootContainer == "")
|
||||||
}
|
}
|
||||||
|
|
||||||
// ListR lists the objects and directories of the Fs starting
|
// ListR lists the objects and directories of the Fs starting
|
||||||
@ -714,7 +714,7 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
|
|||||||
container, directory := f.split(dir)
|
container, directory := f.split(dir)
|
||||||
list := walk.NewListRHelper(callback)
|
list := walk.NewListRHelper(callback)
|
||||||
listR := func(container, directory, prefix string, addContainer bool) error {
|
listR := func(container, directory, prefix string, addContainer bool) error {
|
||||||
return f.list(container, directory, prefix, addContainer, true, false, func(entry fs.DirEntry) error {
|
return f.list(ctx, container, directory, prefix, addContainer, true, false, func(entry fs.DirEntry) error {
|
||||||
return list.Add(entry)
|
return list.Add(entry)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
@ -752,7 +752,7 @@ func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
|
|||||||
var containers []swift.Container
|
var containers []swift.Container
|
||||||
var err error
|
var err error
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
err = f.pacer.Call(func() (bool, error) {
|
||||||
containers, err = f.c.ContainersAll(nil)
|
containers, err = f.c.ContainersAll(ctx, nil)
|
||||||
return shouldRetry(err)
|
return shouldRetry(err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -804,7 +804,7 @@ func (f *Fs) makeContainer(ctx context.Context, container string) error {
|
|||||||
if !f.noCheckContainer {
|
if !f.noCheckContainer {
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
err = f.pacer.Call(func() (bool, error) {
|
||||||
var rxHeaders swift.Headers
|
var rxHeaders swift.Headers
|
||||||
_, rxHeaders, err = f.c.Container(container)
|
_, rxHeaders, err = f.c.Container(ctx, container)
|
||||||
return shouldRetryHeaders(rxHeaders, err)
|
return shouldRetryHeaders(rxHeaders, err)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
@ -814,7 +814,7 @@ func (f *Fs) makeContainer(ctx context.Context, container string) error {
|
|||||||
headers["X-Storage-Policy"] = f.opt.StoragePolicy
|
headers["X-Storage-Policy"] = f.opt.StoragePolicy
|
||||||
}
|
}
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
err = f.pacer.Call(func() (bool, error) {
|
||||||
err = f.c.ContainerCreate(container, headers)
|
err = f.c.ContainerCreate(ctx, container, headers)
|
||||||
return shouldRetry(err)
|
return shouldRetry(err)
|
||||||
})
|
})
|
||||||
if err == nil {
|
if err == nil {
|
||||||
@ -835,7 +835,7 @@ func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
|||||||
}
|
}
|
||||||
err := f.cache.Remove(container, func() error {
|
err := f.cache.Remove(container, func() error {
|
||||||
err := f.pacer.Call(func() (bool, error) {
|
err := f.pacer.Call(func() (bool, error) {
|
||||||
err := f.c.ContainerDelete(container)
|
err := f.c.ContainerDelete(ctx, container)
|
||||||
return shouldRetry(err)
|
return shouldRetry(err)
|
||||||
})
|
})
|
||||||
if err == nil {
|
if err == nil {
|
||||||
@ -865,7 +865,7 @@ func (f *Fs) Purge(ctx context.Context, dir string) error {
|
|||||||
go func() {
|
go func() {
|
||||||
delErr <- operations.DeleteFiles(ctx, toBeDeleted)
|
delErr <- operations.DeleteFiles(ctx, toBeDeleted)
|
||||||
}()
|
}()
|
||||||
err := f.list(container, directory, f.rootDirectory, false, true, true, func(entry fs.DirEntry) error {
|
err := f.list(ctx, container, directory, f.rootDirectory, false, true, true, func(entry fs.DirEntry) error {
|
||||||
if o, ok := entry.(*Object); ok {
|
if o, ok := entry.(*Object); ok {
|
||||||
toBeDeleted <- o
|
toBeDeleted <- o
|
||||||
}
|
}
|
||||||
@ -905,7 +905,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
|||||||
srcContainer, srcPath := srcObj.split()
|
srcContainer, srcPath := srcObj.split()
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
err = f.pacer.Call(func() (bool, error) {
|
||||||
var rxHeaders swift.Headers
|
var rxHeaders swift.Headers
|
||||||
rxHeaders, err = f.c.ObjectCopy(srcContainer, srcPath, dstContainer, dstPath, nil)
|
rxHeaders, err = f.c.ObjectCopy(ctx, srcContainer, srcPath, dstContainer, dstPath, nil)
|
||||||
return shouldRetryHeaders(rxHeaders, err)
|
return shouldRetryHeaders(rxHeaders, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -944,11 +944,11 @@ func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
|
|||||||
if t != hash.MD5 {
|
if t != hash.MD5 {
|
||||||
return "", hash.ErrUnsupported
|
return "", hash.ErrUnsupported
|
||||||
}
|
}
|
||||||
isDynamicLargeObject, err := o.isDynamicLargeObject()
|
isDynamicLargeObject, err := o.isDynamicLargeObject(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
isStaticLargeObject, err := o.isStaticLargeObject()
|
isStaticLargeObject, err := o.isStaticLargeObject(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
@ -961,8 +961,8 @@ func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
|
|||||||
|
|
||||||
// hasHeader checks for the header passed in returning false if the
|
// hasHeader checks for the header passed in returning false if the
|
||||||
// object isn't found.
|
// object isn't found.
|
||||||
func (o *Object) hasHeader(header string) (bool, error) {
|
func (o *Object) hasHeader(ctx context.Context, header string) (bool, error) {
|
||||||
err := o.readMetaData()
|
err := o.readMetaData(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if err == fs.ErrorObjectNotFound {
|
if err == fs.ErrorObjectNotFound {
|
||||||
return false, nil
|
return false, nil
|
||||||
@ -974,29 +974,29 @@ func (o *Object) hasHeader(header string) (bool, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// isDynamicLargeObject checks for X-Object-Manifest header
|
// isDynamicLargeObject checks for X-Object-Manifest header
|
||||||
func (o *Object) isDynamicLargeObject() (bool, error) {
|
func (o *Object) isDynamicLargeObject(ctx context.Context) (bool, error) {
|
||||||
return o.hasHeader("X-Object-Manifest")
|
return o.hasHeader(ctx, "X-Object-Manifest")
|
||||||
}
|
}
|
||||||
|
|
||||||
// isStaticLargeObjectFile checks for the X-Static-Large-Object header
|
// isStaticLargeObjectFile checks for the X-Static-Large-Object header
|
||||||
func (o *Object) isStaticLargeObject() (bool, error) {
|
func (o *Object) isStaticLargeObject(ctx context.Context) (bool, error) {
|
||||||
return o.hasHeader("X-Static-Large-Object")
|
return o.hasHeader(ctx, "X-Static-Large-Object")
|
||||||
}
|
}
|
||||||
|
|
||||||
func (o *Object) isLargeObject() (result bool, err error) {
|
func (o *Object) isLargeObject(ctx context.Context) (result bool, err error) {
|
||||||
result, err = o.hasHeader("X-Static-Large-Object")
|
result, err = o.hasHeader(ctx, "X-Static-Large-Object")
|
||||||
if result {
|
if result {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
result, err = o.hasHeader("X-Object-Manifest")
|
result, err = o.hasHeader(ctx, "X-Object-Manifest")
|
||||||
if result {
|
if result {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
return false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (o *Object) isInContainerVersioning(container string) (bool, error) {
|
func (o *Object) isInContainerVersioning(ctx context.Context, container string) (bool, error) {
|
||||||
_, headers, err := o.fs.c.Container(container)
|
_, headers, err := o.fs.c.Container(ctx, container)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, err
|
return false, err
|
||||||
}
|
}
|
||||||
@ -1032,7 +1032,7 @@ func (o *Object) decodeMetaData(info *swift.Object) (err error) {
|
|||||||
// it also sets the info
|
// it also sets the info
|
||||||
//
|
//
|
||||||
// it returns fs.ErrorObjectNotFound if the object isn't found
|
// it returns fs.ErrorObjectNotFound if the object isn't found
|
||||||
func (o *Object) readMetaData() (err error) {
|
func (o *Object) readMetaData(ctx context.Context) (err error) {
|
||||||
if o.headers != nil {
|
if o.headers != nil {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@ -1040,7 +1040,7 @@ func (o *Object) readMetaData() (err error) {
|
|||||||
var h swift.Headers
|
var h swift.Headers
|
||||||
container, containerPath := o.split()
|
container, containerPath := o.split()
|
||||||
err = o.fs.pacer.Call(func() (bool, error) {
|
err = o.fs.pacer.Call(func() (bool, error) {
|
||||||
info, h, err = o.fs.c.Object(container, containerPath)
|
info, h, err = o.fs.c.Object(ctx, container, containerPath)
|
||||||
return shouldRetryHeaders(h, err)
|
return shouldRetryHeaders(h, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -1066,7 +1066,7 @@ func (o *Object) ModTime(ctx context.Context) time.Time {
|
|||||||
if o.fs.ci.UseServerModTime {
|
if o.fs.ci.UseServerModTime {
|
||||||
return o.lastModified
|
return o.lastModified
|
||||||
}
|
}
|
||||||
err := o.readMetaData()
|
err := o.readMetaData(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fs.Debugf(o, "Failed to read metadata: %s", err)
|
fs.Debugf(o, "Failed to read metadata: %s", err)
|
||||||
return o.lastModified
|
return o.lastModified
|
||||||
@ -1081,7 +1081,7 @@ func (o *Object) ModTime(ctx context.Context) time.Time {
|
|||||||
|
|
||||||
// SetModTime sets the modification time of the local fs object
|
// SetModTime sets the modification time of the local fs object
|
||||||
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
|
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
|
||||||
err := o.readMetaData()
|
err := o.readMetaData(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -1099,7 +1099,7 @@ func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
|
|||||||
}
|
}
|
||||||
container, containerPath := o.split()
|
container, containerPath := o.split()
|
||||||
return o.fs.pacer.Call(func() (bool, error) {
|
return o.fs.pacer.Call(func() (bool, error) {
|
||||||
err = o.fs.c.ObjectUpdate(container, containerPath, newHeaders)
|
err = o.fs.c.ObjectUpdate(ctx, container, containerPath, newHeaders)
|
||||||
return shouldRetry(err)
|
return shouldRetry(err)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
@ -1120,7 +1120,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
|||||||
container, containerPath := o.split()
|
container, containerPath := o.split()
|
||||||
err = o.fs.pacer.Call(func() (bool, error) {
|
err = o.fs.pacer.Call(func() (bool, error) {
|
||||||
var rxHeaders swift.Headers
|
var rxHeaders swift.Headers
|
||||||
in, rxHeaders, err = o.fs.c.ObjectOpen(container, containerPath, !isRanging, headers)
|
in, rxHeaders, err = o.fs.c.ObjectOpen(ctx, container, containerPath, !isRanging, headers)
|
||||||
return shouldRetryHeaders(rxHeaders, err)
|
return shouldRetryHeaders(rxHeaders, err)
|
||||||
})
|
})
|
||||||
return
|
return
|
||||||
@ -1134,9 +1134,9 @@ func min(x, y int64) int64 {
|
|||||||
return y
|
return y
|
||||||
}
|
}
|
||||||
|
|
||||||
func (o *Object) getSegmentsLargeObject() (map[string][]string, error) {
|
func (o *Object) getSegmentsLargeObject(ctx context.Context) (map[string][]string, error) {
|
||||||
container, objectName := o.split()
|
container, objectName := o.split()
|
||||||
segmentContainer, segmentObjects, err := o.fs.c.LargeObjectGetSegments(container, objectName)
|
segmentContainer, segmentObjects, err := o.fs.c.LargeObjectGetSegments(ctx, container, objectName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fs.Debugf(o, "Failed to get list segments of object: %v", err)
|
fs.Debugf(o, "Failed to get list segments of object: %v", err)
|
||||||
return nil, err
|
return nil, err
|
||||||
@ -1153,12 +1153,12 @@ func (o *Object) getSegmentsLargeObject() (map[string][]string, error) {
|
|||||||
return containerSegments, nil
|
return containerSegments, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (o *Object) removeSegmentsLargeObject(containerSegments map[string][]string) error {
|
func (o *Object) removeSegmentsLargeObject(ctx context.Context, containerSegments map[string][]string) error {
|
||||||
if containerSegments == nil || len(containerSegments) <= 0 {
|
if containerSegments == nil || len(containerSegments) <= 0 {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
for container, segments := range containerSegments {
|
for container, segments := range containerSegments {
|
||||||
_, err := o.fs.c.BulkDelete(container, segments)
|
_, err := o.fs.c.BulkDelete(ctx, container, segments)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fs.Debugf(o, "Failed to delete bulk segments %v", err)
|
fs.Debugf(o, "Failed to delete bulk segments %v", err)
|
||||||
return err
|
return err
|
||||||
@ -1167,8 +1167,8 @@ func (o *Object) removeSegmentsLargeObject(containerSegments map[string][]string
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (o *Object) getSegmentsDlo() (segmentsContainer string, prefix string, err error) {
|
func (o *Object) getSegmentsDlo(ctx context.Context) (segmentsContainer string, prefix string, err error) {
|
||||||
if err = o.readMetaData(); err != nil {
|
if err = o.readMetaData(ctx); err != nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
dirManifest := o.headers["X-Object-Manifest"]
|
dirManifest := o.headers["X-Object-Manifest"]
|
||||||
@ -1203,14 +1203,14 @@ func urlEncode(str string) string {
|
|||||||
|
|
||||||
// updateChunks updates the existing object using chunks to a separate
|
// updateChunks updates the existing object using chunks to a separate
|
||||||
// container. It returns a string which prefixes current segments.
|
// container. It returns a string which prefixes current segments.
|
||||||
func (o *Object) updateChunks(in0 io.Reader, headers swift.Headers, size int64, contentType string) (string, error) {
|
func (o *Object) updateChunks(ctx context.Context, in0 io.Reader, headers swift.Headers, size int64, contentType string) (string, error) {
|
||||||
container, containerPath := o.split()
|
container, containerPath := o.split()
|
||||||
segmentsContainer := container + "_segments"
|
segmentsContainer := container + "_segments"
|
||||||
// Create the segmentsContainer if it doesn't exist
|
// Create the segmentsContainer if it doesn't exist
|
||||||
var err error
|
var err error
|
||||||
err = o.fs.pacer.Call(func() (bool, error) {
|
err = o.fs.pacer.Call(func() (bool, error) {
|
||||||
var rxHeaders swift.Headers
|
var rxHeaders swift.Headers
|
||||||
_, rxHeaders, err = o.fs.c.Container(segmentsContainer)
|
_, rxHeaders, err = o.fs.c.Container(ctx, segmentsContainer)
|
||||||
return shouldRetryHeaders(rxHeaders, err)
|
return shouldRetryHeaders(rxHeaders, err)
|
||||||
})
|
})
|
||||||
if err == swift.ContainerNotFound {
|
if err == swift.ContainerNotFound {
|
||||||
@ -1219,7 +1219,7 @@ func (o *Object) updateChunks(in0 io.Reader, headers swift.Headers, size int64,
|
|||||||
headers["X-Storage-Policy"] = o.fs.opt.StoragePolicy
|
headers["X-Storage-Policy"] = o.fs.opt.StoragePolicy
|
||||||
}
|
}
|
||||||
err = o.fs.pacer.Call(func() (bool, error) {
|
err = o.fs.pacer.Call(func() (bool, error) {
|
||||||
err = o.fs.c.ContainerCreate(segmentsContainer, headers)
|
err = o.fs.c.ContainerCreate(ctx, segmentsContainer, headers)
|
||||||
return shouldRetry(err)
|
return shouldRetry(err)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
@ -1241,7 +1241,7 @@ func (o *Object) updateChunks(in0 io.Reader, headers swift.Headers, size int64,
|
|||||||
if segmentInfos == nil || len(segmentInfos) == 0 {
|
if segmentInfos == nil || len(segmentInfos) == 0 {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
deleteChunks(o, segmentsContainer, segmentInfos)
|
deleteChunks(ctx, o, segmentsContainer, segmentInfos)
|
||||||
})()
|
})()
|
||||||
for {
|
for {
|
||||||
// can we read at least one byte?
|
// can we read at least one byte?
|
||||||
@ -1263,7 +1263,7 @@ func (o *Object) updateChunks(in0 io.Reader, headers swift.Headers, size int64,
|
|||||||
fs.Debugf(o, "Uploading segment file %q into %q", segmentPath, segmentsContainer)
|
fs.Debugf(o, "Uploading segment file %q into %q", segmentPath, segmentsContainer)
|
||||||
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
|
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
|
||||||
var rxHeaders swift.Headers
|
var rxHeaders swift.Headers
|
||||||
rxHeaders, err = o.fs.c.ObjectPut(segmentsContainer, segmentPath, segmentReader, true, "", "", headers)
|
rxHeaders, err = o.fs.c.ObjectPut(ctx, segmentsContainer, segmentPath, segmentReader, true, "", "", headers)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
segmentInfos = append(segmentInfos, segmentPath)
|
segmentInfos = append(segmentInfos, segmentPath)
|
||||||
}
|
}
|
||||||
@ -1280,7 +1280,7 @@ func (o *Object) updateChunks(in0 io.Reader, headers swift.Headers, size int64,
|
|||||||
emptyReader := bytes.NewReader(nil)
|
emptyReader := bytes.NewReader(nil)
|
||||||
err = o.fs.pacer.Call(func() (bool, error) {
|
err = o.fs.pacer.Call(func() (bool, error) {
|
||||||
var rxHeaders swift.Headers
|
var rxHeaders swift.Headers
|
||||||
rxHeaders, err = o.fs.c.ObjectPut(container, containerPath, emptyReader, true, "", contentType, headers)
|
rxHeaders, err = o.fs.c.ObjectPut(ctx, container, containerPath, emptyReader, true, "", contentType, headers)
|
||||||
return shouldRetryHeaders(rxHeaders, err)
|
return shouldRetryHeaders(rxHeaders, err)
|
||||||
})
|
})
|
||||||
|
|
||||||
@ -1291,13 +1291,13 @@ func (o *Object) updateChunks(in0 io.Reader, headers swift.Headers, size int64,
|
|||||||
return uniquePrefix + "/", err
|
return uniquePrefix + "/", err
|
||||||
}
|
}
|
||||||
|
|
||||||
func deleteChunks(o *Object, segmentsContainer string, segmentInfos []string) {
|
func deleteChunks(ctx context.Context, o *Object, segmentsContainer string, segmentInfos []string) {
|
||||||
if segmentInfos == nil || len(segmentInfos) == 0 {
|
if segmentInfos == nil || len(segmentInfos) == 0 {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
for _, v := range segmentInfos {
|
for _, v := range segmentInfos {
|
||||||
fs.Debugf(o, "Delete segment file %q on %q", v, segmentsContainer)
|
fs.Debugf(o, "Delete segment file %q on %q", v, segmentsContainer)
|
||||||
e := o.fs.c.ObjectDelete(segmentsContainer, v)
|
e := o.fs.c.ObjectDelete(ctx, segmentsContainer, v)
|
||||||
if e != nil {
|
if e != nil {
|
||||||
fs.Errorf(o, "Error occurred in delete segment file %q on %q, error: %q", v, segmentsContainer, e)
|
fs.Errorf(o, "Error occurred in delete segment file %q on %q, error: %q", v, segmentsContainer, e)
|
||||||
}
|
}
|
||||||
@ -1320,7 +1320,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
|||||||
modTime := src.ModTime(ctx)
|
modTime := src.ModTime(ctx)
|
||||||
|
|
||||||
// Note whether this is a dynamic large object before starting
|
// Note whether this is a dynamic large object before starting
|
||||||
isLargeObject, err := o.isLargeObject()
|
isLargeObject, err := o.isLargeObject(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -1328,7 +1328,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
|||||||
//capture segments before upload
|
//capture segments before upload
|
||||||
var segmentsContainer map[string][]string
|
var segmentsContainer map[string][]string
|
||||||
if isLargeObject {
|
if isLargeObject {
|
||||||
segmentsContainer, _ = o.getSegmentsLargeObject()
|
segmentsContainer, _ = o.getSegmentsLargeObject(ctx)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Set the mtime
|
// Set the mtime
|
||||||
@ -1339,7 +1339,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
|||||||
fs.OpenOptionAddHeaders(options, headers)
|
fs.OpenOptionAddHeaders(options, headers)
|
||||||
|
|
||||||
if size > int64(o.fs.opt.ChunkSize) || (size == -1 && !o.fs.opt.NoChunk) {
|
if size > int64(o.fs.opt.ChunkSize) || (size == -1 && !o.fs.opt.NoChunk) {
|
||||||
_, err = o.updateChunks(in, headers, size, contentType)
|
_, err = o.updateChunks(ctx, in, headers, size, contentType)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -1355,7 +1355,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
|||||||
}
|
}
|
||||||
var rxHeaders swift.Headers
|
var rxHeaders swift.Headers
|
||||||
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
|
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
|
||||||
rxHeaders, err = o.fs.c.ObjectPut(container, containerPath, in, true, "", contentType, headers)
|
rxHeaders, err = o.fs.c.ObjectPut(ctx, container, containerPath, in, true, "", contentType, headers)
|
||||||
return shouldRetryHeaders(rxHeaders, err)
|
return shouldRetryHeaders(rxHeaders, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -1373,17 +1373,17 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
|||||||
o.size = int64(inCount.BytesRead())
|
o.size = int64(inCount.BytesRead())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
isInContainerVersioning, _ := o.isInContainerVersioning(container)
|
isInContainerVersioning, _ := o.isInContainerVersioning(ctx, container)
|
||||||
// If file was a large object and the container is not enable versioning then remove old/all segments
|
// If file was a large object and the container is not enable versioning then remove old/all segments
|
||||||
if isLargeObject && len(segmentsContainer) > 0 && !isInContainerVersioning {
|
if isLargeObject && len(segmentsContainer) > 0 && !isInContainerVersioning {
|
||||||
err := o.removeSegmentsLargeObject(segmentsContainer)
|
err := o.removeSegmentsLargeObject(ctx, segmentsContainer)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fs.Logf(o, "Failed to remove old segments - carrying on with upload: %v", err)
|
fs.Logf(o, "Failed to remove old segments - carrying on with upload: %v", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Read the metadata from the newly created object if necessary
|
// Read the metadata from the newly created object if necessary
|
||||||
return o.readMetaData()
|
return o.readMetaData(ctx)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Remove an object
|
// Remove an object
|
||||||
@ -1391,14 +1391,14 @@ func (o *Object) Remove(ctx context.Context) (err error) {
|
|||||||
container, containerPath := o.split()
|
container, containerPath := o.split()
|
||||||
|
|
||||||
//check object is large object
|
//check object is large object
|
||||||
isLargeObject, err := o.isLargeObject()
|
isLargeObject, err := o.isLargeObject(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
//check container has enabled version to reserve segment when delete
|
//check container has enabled version to reserve segment when delete
|
||||||
isInContainerVersioning := false
|
isInContainerVersioning := false
|
||||||
if isLargeObject {
|
if isLargeObject {
|
||||||
isInContainerVersioning, err = o.isInContainerVersioning(container)
|
isInContainerVersioning, err = o.isInContainerVersioning(ctx, container)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -1406,14 +1406,14 @@ func (o *Object) Remove(ctx context.Context) (err error) {
|
|||||||
//capture segments object if this object is large object
|
//capture segments object if this object is large object
|
||||||
var containerSegments map[string][]string
|
var containerSegments map[string][]string
|
||||||
if isLargeObject {
|
if isLargeObject {
|
||||||
containerSegments, err = o.getSegmentsLargeObject()
|
containerSegments, err = o.getSegmentsLargeObject(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Remove file/manifest first
|
// Remove file/manifest first
|
||||||
err = o.fs.pacer.Call(func() (bool, error) {
|
err = o.fs.pacer.Call(func() (bool, error) {
|
||||||
err = o.fs.c.ObjectDelete(container, containerPath)
|
err = o.fs.c.ObjectDelete(ctx, container, containerPath)
|
||||||
return shouldRetry(err)
|
return shouldRetry(err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -1425,7 +1425,7 @@ func (o *Object) Remove(ctx context.Context) (err error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if isLargeObject {
|
if isLargeObject {
|
||||||
return o.removeSegmentsLargeObject(containerSegments)
|
return o.removeSegmentsLargeObject(ctx, containerSegments)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -4,7 +4,7 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/ncw/swift"
|
"github.com/ncw/swift/v2"
|
||||||
"github.com/rclone/rclone/fs/fserrors"
|
"github.com/rclone/rclone/fs/fserrors"
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
)
|
)
|
||||||
|
@ -9,7 +9,7 @@ import (
|
|||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/ncw/swift"
|
"github.com/ncw/swift/v2"
|
||||||
"github.com/rclone/rclone/fs"
|
"github.com/rclone/rclone/fs"
|
||||||
"github.com/rclone/rclone/fs/hash"
|
"github.com/rclone/rclone/fs/hash"
|
||||||
"github.com/rclone/rclone/fs/object"
|
"github.com/rclone/rclone/fs/object"
|
||||||
@ -144,10 +144,10 @@ func (f *Fs) testWithChunkFail(t *testing.T) {
|
|||||||
// error is potato
|
// error is potato
|
||||||
require.NotNil(t, err)
|
require.NotNil(t, err)
|
||||||
require.Equal(t, errMessage, err.Error())
|
require.Equal(t, errMessage, err.Error())
|
||||||
_, _, err = f.c.Object(f.rootContainer, path)
|
_, _, err = f.c.Object(ctx, f.rootContainer, path)
|
||||||
assert.Equal(t, swift.ObjectNotFound, err)
|
assert.Equal(t, swift.ObjectNotFound, err)
|
||||||
prefix := path
|
prefix := path
|
||||||
objs, err := f.c.Objects(segmentContainer, &swift.ObjectsOpts{
|
objs, err := f.c.Objects(ctx, segmentContainer, &swift.ObjectsOpts{
|
||||||
Prefix: prefix,
|
Prefix: prefix,
|
||||||
})
|
})
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
2
go.mod
2
go.mod
@ -37,7 +37,7 @@ require (
|
|||||||
github.com/mattn/go-runewidth v0.0.9
|
github.com/mattn/go-runewidth v0.0.9
|
||||||
github.com/mitchellh/go-homedir v1.1.0
|
github.com/mitchellh/go-homedir v1.1.0
|
||||||
github.com/ncw/go-acd v0.0.0-20201019170801-fe55f33415b1
|
github.com/ncw/go-acd v0.0.0-20201019170801-fe55f33415b1
|
||||||
github.com/ncw/swift v1.0.52
|
github.com/ncw/swift/v2 v2.0.0
|
||||||
github.com/nsf/termbox-go v0.0.0-20200418040025-38ba6e5628f1
|
github.com/nsf/termbox-go v0.0.0-20200418040025-38ba6e5628f1
|
||||||
github.com/patrickmn/go-cache v2.1.0+incompatible
|
github.com/patrickmn/go-cache v2.1.0+incompatible
|
||||||
github.com/pkg/errors v0.9.1
|
github.com/pkg/errors v0.9.1
|
||||||
|
4
go.sum
4
go.sum
@ -451,8 +451,8 @@ github.com/nats-io/nkeys v0.1.3/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxzi
|
|||||||
github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c=
|
github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c=
|
||||||
github.com/ncw/go-acd v0.0.0-20201019170801-fe55f33415b1 h1:nAjWYc03awJAjsozNehdGZsm5LP7AhLOvjgbS8zN1tk=
|
github.com/ncw/go-acd v0.0.0-20201019170801-fe55f33415b1 h1:nAjWYc03awJAjsozNehdGZsm5LP7AhLOvjgbS8zN1tk=
|
||||||
github.com/ncw/go-acd v0.0.0-20201019170801-fe55f33415b1/go.mod h1:MLIrzg7gp/kzVBxRE1olT7CWYMCklcUWU+ekoxOD9x0=
|
github.com/ncw/go-acd v0.0.0-20201019170801-fe55f33415b1/go.mod h1:MLIrzg7gp/kzVBxRE1olT7CWYMCklcUWU+ekoxOD9x0=
|
||||||
github.com/ncw/swift v1.0.52 h1:ACF3JufDGgeKp/9mrDgQlEgS8kRYC4XKcuzj/8EJjQU=
|
github.com/ncw/swift/v2 v2.0.0 h1:Q1jkMe/yhCkx7yAKq4bBZ/Th3NR+ejRcwbVK8Pi1i/0=
|
||||||
github.com/ncw/swift v1.0.52/go.mod h1:23YIA4yWVnGwv2dQlN4bB7egfYX6YLn0Yo/S6zZO/ZM=
|
github.com/ncw/swift/v2 v2.0.0/go.mod h1:z0A9RVdYPjNjXVo2pDOPxZ4eu3oarO1P91fTItcb+Kg=
|
||||||
github.com/nfnt/resize v0.0.0-20180221191011-83c6a9932646/go.mod h1:jpp1/29i3P1S/RLdc7JQKbRpFeM1dOBd8T9ki5s+AY8=
|
github.com/nfnt/resize v0.0.0-20180221191011-83c6a9932646/go.mod h1:jpp1/29i3P1S/RLdc7JQKbRpFeM1dOBd8T9ki5s+AY8=
|
||||||
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs=
|
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs=
|
||||||
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno=
|
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno=
|
||||||
|
Loading…
Reference in New Issue
Block a user