serve s3: convert options to new style

This commit is contained in:
Nick Craig-Wood 2025-03-28 17:30:02 +00:00
parent cebd588092
commit b930c4b437
6 changed files with 80 additions and 76 deletions

View File

@ -25,15 +25,13 @@ var (
// s3Backend implements the gofacess3.Backend interface to make an S3 // s3Backend implements the gofacess3.Backend interface to make an S3
// backend for gofakes3 // backend for gofakes3
type s3Backend struct { type s3Backend struct {
opt *Options
s *Server s *Server
meta *sync.Map meta *sync.Map
} }
// newBackend creates a new SimpleBucketBackend. // newBackend creates a new SimpleBucketBackend.
func newBackend(s *Server, opt *Options) gofakes3.Backend { func newBackend(s *Server) gofakes3.Backend {
return &s3Backend{ return &s3Backend{
opt: opt,
s: s, s: s,
meta: new(sync.Map), meta: new(sync.Map),
} }
@ -136,7 +134,7 @@ func (b *s3Backend) HeadObject(ctx context.Context, bucketName, objectName strin
fobj := entry.(fs.Object) fobj := entry.(fs.Object)
size := node.Size() size := node.Size()
hash := getFileHashByte(fobj) hash := getFileHashByte(fobj, b.s.etagHashType)
meta := map[string]string{ meta := map[string]string{
"Last-Modified": formatHeaderTime(node.ModTime()), "Last-Modified": formatHeaderTime(node.ModTime()),
@ -187,7 +185,7 @@ func (b *s3Backend) GetObject(ctx context.Context, bucketName, objectName string
file := node.(*vfs.File) file := node.(*vfs.File)
size := node.Size() size := node.Size()
hash := getFileHashByte(fobj) hash := getFileHashByte(fobj, b.s.etagHashType)
in, err := file.Open(os.O_RDONLY) in, err := file.Open(os.O_RDONLY)
if err != nil { if err != nil {

View File

@ -39,7 +39,7 @@ func (b *s3Backend) entryListR(_vfs *vfs.VFS, bucket, fdPath, name string, addPr
item := &gofakes3.Content{ item := &gofakes3.Content{
Key: objectPath, Key: objectPath,
LastModified: gofakes3.NewContentTime(entry.ModTime()), LastModified: gofakes3.NewContentTime(entry.ModTime()),
ETag: getFileHash(entry), ETag: getFileHash(entry, b.s.etagHashType),
Size: entry.Size(), Size: entry.Size(),
StorageClass: gofakes3.StorageStandard, StorageClass: gofakes3.StorageStandard,
} }

View File

@ -11,38 +11,55 @@ import (
"github.com/rclone/rclone/cmd/serve/proxy/proxyflags" "github.com/rclone/rclone/cmd/serve/proxy/proxyflags"
"github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config/flags" "github.com/rclone/rclone/fs/config/flags"
"github.com/rclone/rclone/fs/hash"
httplib "github.com/rclone/rclone/lib/http" httplib "github.com/rclone/rclone/lib/http"
"github.com/rclone/rclone/vfs" "github.com/rclone/rclone/vfs"
"github.com/rclone/rclone/vfs/vfsflags" "github.com/rclone/rclone/vfs/vfsflags"
"github.com/spf13/cobra" "github.com/spf13/cobra"
) )
// DefaultOpt is the default values used for Options // OptionsInfo describes the Options in use
var DefaultOpt = Options{ var OptionsInfo = fs.Options{{
pathBucketMode: true, Name: "force_path_style",
hashName: "MD5", Default: true,
hashType: hash.MD5, Help: "If true use path style access if false use virtual hosted style",
noCleanup: false, }, {
Auth: httplib.DefaultAuthCfg(), Name: "etag_hash",
HTTP: httplib.DefaultCfg(), Default: "MD5",
Help: "Which hash to use for the ETag, or auto or blank for off",
}, {
Name: "auth_key",
Default: []string{},
Help: "Set key pair for v4 authorization: access_key_id,secret_access_key",
}, {
Name: "no_cleanup",
Default: false,
Help: "Not to cleanup empty folder after object is deleted",
}}.
Add(httplib.ConfigInfo).
Add(httplib.AuthConfigInfo)
// Options contains options for the s3 Server
type Options struct {
//TODO add more options
ForcePathStyle bool `config:"force_path_style"`
EtagHash string `config:"etag_hash"`
AuthKey []string `config:"auth_key"`
NoCleanup bool `config:"no_cleanup"`
Auth httplib.AuthConfig
HTTP httplib.Config
} }
// Opt is options set by command line flags // Opt is options set by command line flags
var Opt = DefaultOpt var Opt Options
const flagPrefix = "" const flagPrefix = ""
func init() { func init() {
fs.RegisterGlobalOptions(fs.OptionsInfo{Name: "s3", Opt: &Opt, Options: OptionsInfo})
flagSet := Command.Flags() flagSet := Command.Flags()
httplib.AddAuthFlagsPrefix(flagSet, flagPrefix, &Opt.Auth) flags.AddFlagsFromOptions(flagSet, "", OptionsInfo)
httplib.AddHTTPFlagsPrefix(flagSet, flagPrefix, &Opt.HTTP)
vfsflags.AddFlags(flagSet) vfsflags.AddFlags(flagSet)
proxyflags.AddFlags(flagSet) proxyflags.AddFlags(flagSet)
flags.BoolVarP(flagSet, &Opt.pathBucketMode, "force-path-style", "", Opt.pathBucketMode, "If true use path style access if false use virtual hosted style (default true)", "")
flags.StringVarP(flagSet, &Opt.hashName, "etag-hash", "", Opt.hashName, "Which hash to use for the ETag, or auto or blank for off", "")
flags.StringArrayVarP(flagSet, &Opt.authPair, "auth-key", "", Opt.authPair, "Set key pair for v4 authorization: access_key_id,secret_access_key", "")
flags.BoolVarP(flagSet, &Opt.noCleanup, "no-cleanup", "", Opt.noCleanup, "Not to cleanup empty folder after object is deleted", "")
serve.Command.AddCommand(Command) serve.Command.AddCommand(Command)
} }
@ -73,14 +90,6 @@ var Command = &cobra.Command{
cmd.CheckArgs(0, 0, command, args) cmd.CheckArgs(0, 0, command, args)
} }
if Opt.hashName == "auto" {
Opt.hashType = f.Hashes().GetOne()
} else if Opt.hashName != "" {
err := Opt.hashType.Set(Opt.hashName)
if err != nil {
return err
}
}
cmd.Run(false, false, command, func() error { cmd.Run(false, false, command, func() error {
s, err := newServer(context.Background(), f, &Opt) s, err := newServer(context.Background(), f, &Opt)
if err != nil { if err != nil {

View File

@ -25,7 +25,6 @@ import (
"github.com/rclone/rclone/fs/hash" "github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/object" "github.com/rclone/rclone/fs/object"
"github.com/rclone/rclone/fstest" "github.com/rclone/rclone/fstest"
httplib "github.com/rclone/rclone/lib/http"
"github.com/rclone/rclone/lib/random" "github.com/rclone/rclone/lib/random"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
@ -39,16 +38,10 @@ const (
func serveS3(f fs.Fs) (testURL string, keyid string, keysec string, w *Server) { func serveS3(f fs.Fs) (testURL string, keyid string, keysec string, w *Server) {
keyid = random.String(16) keyid = random.String(16)
keysec = random.String(16) keysec = random.String(16)
serveropt := &Options{ opt := Opt // copy default options
HTTP: httplib.DefaultCfg(), opt.AuthKey = []string{fmt.Sprintf("%s,%s", keyid, keysec)}
pathBucketMode: true, opt.HTTP.ListenAddr = []string{endpoint}
hashName: "", w, _ = newServer(context.Background(), f, &opt)
hashType: hash.None,
authPair: []string{fmt.Sprintf("%s,%s", keyid, keysec)},
}
serveropt.HTTP.ListenAddr = []string{endpoint}
w, _ = newServer(context.Background(), f, serveropt)
router := w.server.Router() router := w.server.Router()
w.Bind(router) w.Bind(router)

View File

@ -28,21 +28,10 @@ const (
ctxKeyID ctxKey = iota ctxKeyID ctxKey = iota
) )
// Options contains options for the http Server
type Options struct {
//TODO add more options
pathBucketMode bool
hashName string
hashType hash.Type
authPair []string
noCleanup bool
Auth httplib.AuthConfig
HTTP httplib.Config
}
// Server is a s3.FileSystem interface // Server is a s3.FileSystem interface
type Server struct { type Server struct {
server *httplib.Server server *httplib.Server
opt Options
f fs.Fs f fs.Fs
_vfs *vfs.VFS // don't use directly, use getVFS _vfs *vfs.VFS // don't use directly, use getVFS
faker *gofakes3.GoFakeS3 faker *gofakes3.GoFakeS3
@ -50,6 +39,7 @@ type Server struct {
proxy *proxy.Proxy proxy *proxy.Proxy
ctx context.Context // for global config ctx context.Context // for global config
s3Secret string s3Secret string
etagHashType hash.Type
} }
// Make a new S3 Server to serve the remote // Make a new S3 Server to serve the remote
@ -57,22 +47,36 @@ func newServer(ctx context.Context, f fs.Fs, opt *Options) (s *Server, err error
w := &Server{ w := &Server{
f: f, f: f,
ctx: ctx, ctx: ctx,
opt: *opt,
etagHashType: hash.None,
} }
if len(opt.authPair) == 0 { if w.opt.EtagHash == "auto" {
w.etagHashType = f.Hashes().GetOne()
} else if w.opt.EtagHash != "" {
err := w.etagHashType.Set(w.opt.EtagHash)
if err != nil {
return nil, err
}
}
if w.etagHashType != hash.None {
fs.Debugf(f, "Using hash %v for ETag", w.etagHashType)
}
if len(opt.AuthKey) == 0 {
fs.Logf("serve s3", "No auth provided so allowing anonymous access") fs.Logf("serve s3", "No auth provided so allowing anonymous access")
} else { } else {
w.s3Secret = getAuthSecret(opt.authPair) w.s3Secret = getAuthSecret(opt.AuthKey)
} }
var newLogger logger var newLogger logger
w.faker = gofakes3.New( w.faker = gofakes3.New(
newBackend(w, opt), newBackend(w),
gofakes3.WithHostBucket(!opt.pathBucketMode), gofakes3.WithHostBucket(!opt.ForcePathStyle),
gofakes3.WithLogger(newLogger), gofakes3.WithLogger(newLogger),
gofakes3.WithRequestID(rand.Uint64()), gofakes3.WithRequestID(rand.Uint64()),
gofakes3.WithoutVersioning(), gofakes3.WithoutVersioning(),
gofakes3.WithV4Auth(authlistResolver(opt.authPair)), gofakes3.WithV4Auth(authlistResolver(opt.AuthKey)),
gofakes3.WithIntegrityCheck(true), // Check Content-MD5 if supplied gofakes3.WithIntegrityCheck(true), // Check Content-MD5 if supplied
) )
@ -87,8 +91,8 @@ func newServer(ctx context.Context, f fs.Fs, opt *Options) (s *Server, err error
} else { } else {
w._vfs = vfs.New(f, &vfscommon.Opt) w._vfs = vfs.New(f, &vfscommon.Opt)
if len(opt.authPair) > 0 { if len(opt.AuthKey) > 0 {
w.faker.AddAuthKeys(authlistResolver(opt.authPair)) w.faker.AddAuthKeys(authlistResolver(opt.AuthKey))
} }
} }

View File

@ -36,15 +36,15 @@ func getDirEntries(prefix string, VFS *vfs.VFS) (vfs.Nodes, error) {
return dirEntries, nil return dirEntries, nil
} }
func getFileHashByte(node any) []byte { func getFileHashByte(node any, hashType hash.Type) []byte {
b, err := hex.DecodeString(getFileHash(node)) b, err := hex.DecodeString(getFileHash(node, hashType))
if err != nil { if err != nil {
return nil return nil
} }
return b return b
} }
func getFileHash(node any) string { func getFileHash(node any, hashType hash.Type) string {
var o fs.Object var o fs.Object
switch b := node.(type) { switch b := node.(type) {
@ -59,7 +59,7 @@ func getFileHash(node any) string {
defer func() { defer func() {
_ = in.Close() _ = in.Close()
}() }()
h, err := hash.NewMultiHasherTypes(hash.NewHashSet(Opt.hashType)) h, err := hash.NewMultiHasherTypes(hash.NewHashSet(hashType))
if err != nil { if err != nil {
return "" return ""
} }
@ -67,14 +67,14 @@ func getFileHash(node any) string {
if err != nil { if err != nil {
return "" return ""
} }
return h.Sums()[Opt.hashType] return h.Sums()[hashType]
} }
o = fsObj o = fsObj
case fs.Object: case fs.Object:
o = b o = b
} }
hash, err := o.Hash(context.Background(), Opt.hashType) hash, err := o.Hash(context.Background(), hashType)
if err != nil { if err != nil {
return "" return ""
} }