diff --git a/cmd/serve/s3/backend.go b/cmd/serve/s3/backend.go index 90974cd43..d4531e987 100644 --- a/cmd/serve/s3/backend.go +++ b/cmd/serve/s3/backend.go @@ -25,15 +25,13 @@ var ( // s3Backend implements the gofacess3.Backend interface to make an S3 // backend for gofakes3 type s3Backend struct { - opt *Options s *Server meta *sync.Map } // newBackend creates a new SimpleBucketBackend. -func newBackend(s *Server, opt *Options) gofakes3.Backend { +func newBackend(s *Server) gofakes3.Backend { return &s3Backend{ - opt: opt, s: s, meta: new(sync.Map), } @@ -136,7 +134,7 @@ func (b *s3Backend) HeadObject(ctx context.Context, bucketName, objectName strin fobj := entry.(fs.Object) size := node.Size() - hash := getFileHashByte(fobj) + hash := getFileHashByte(fobj, b.s.etagHashType) meta := map[string]string{ "Last-Modified": formatHeaderTime(node.ModTime()), @@ -187,7 +185,7 @@ func (b *s3Backend) GetObject(ctx context.Context, bucketName, objectName string file := node.(*vfs.File) size := node.Size() - hash := getFileHashByte(fobj) + hash := getFileHashByte(fobj, b.s.etagHashType) in, err := file.Open(os.O_RDONLY) if err != nil { diff --git a/cmd/serve/s3/list.go b/cmd/serve/s3/list.go index 9b0a2e11e..f58e95a87 100644 --- a/cmd/serve/s3/list.go +++ b/cmd/serve/s3/list.go @@ -39,7 +39,7 @@ func (b *s3Backend) entryListR(_vfs *vfs.VFS, bucket, fdPath, name string, addPr item := &gofakes3.Content{ Key: objectPath, LastModified: gofakes3.NewContentTime(entry.ModTime()), - ETag: getFileHash(entry), + ETag: getFileHash(entry, b.s.etagHashType), Size: entry.Size(), StorageClass: gofakes3.StorageStandard, } diff --git a/cmd/serve/s3/s3.go b/cmd/serve/s3/s3.go index 504da1a52..bef41c022 100644 --- a/cmd/serve/s3/s3.go +++ b/cmd/serve/s3/s3.go @@ -11,38 +11,55 @@ import ( "github.com/rclone/rclone/cmd/serve/proxy/proxyflags" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs/config/flags" - "github.com/rclone/rclone/fs/hash" httplib "github.com/rclone/rclone/lib/http" "github.com/rclone/rclone/vfs" "github.com/rclone/rclone/vfs/vfsflags" "github.com/spf13/cobra" ) -// DefaultOpt is the default values used for Options -var DefaultOpt = Options{ - pathBucketMode: true, - hashName: "MD5", - hashType: hash.MD5, - noCleanup: false, - Auth: httplib.DefaultAuthCfg(), - HTTP: httplib.DefaultCfg(), +// OptionsInfo describes the Options in use +var OptionsInfo = fs.Options{{ + Name: "force_path_style", + Default: true, + Help: "If true use path style access if false use virtual hosted style", +}, { + Name: "etag_hash", + Default: "MD5", + Help: "Which hash to use for the ETag, or auto or blank for off", +}, { + Name: "auth_key", + Default: []string{}, + Help: "Set key pair for v4 authorization: access_key_id,secret_access_key", +}, { + Name: "no_cleanup", + Default: false, + Help: "Not to cleanup empty folder after object is deleted", +}}. + Add(httplib.ConfigInfo). + Add(httplib.AuthConfigInfo) + +// Options contains options for the s3 Server +type Options struct { + //TODO add more options + ForcePathStyle bool `config:"force_path_style"` + EtagHash string `config:"etag_hash"` + AuthKey []string `config:"auth_key"` + NoCleanup bool `config:"no_cleanup"` + Auth httplib.AuthConfig + HTTP httplib.Config } // Opt is options set by command line flags -var Opt = DefaultOpt +var Opt Options const flagPrefix = "" func init() { + fs.RegisterGlobalOptions(fs.OptionsInfo{Name: "s3", Opt: &Opt, Options: OptionsInfo}) flagSet := Command.Flags() - httplib.AddAuthFlagsPrefix(flagSet, flagPrefix, &Opt.Auth) - httplib.AddHTTPFlagsPrefix(flagSet, flagPrefix, &Opt.HTTP) + flags.AddFlagsFromOptions(flagSet, "", OptionsInfo) vfsflags.AddFlags(flagSet) proxyflags.AddFlags(flagSet) - flags.BoolVarP(flagSet, &Opt.pathBucketMode, "force-path-style", "", Opt.pathBucketMode, "If true use path style access if false use virtual hosted style (default true)", "") - flags.StringVarP(flagSet, &Opt.hashName, "etag-hash", "", Opt.hashName, "Which hash to use for the ETag, or auto or blank for off", "") - flags.StringArrayVarP(flagSet, &Opt.authPair, "auth-key", "", Opt.authPair, "Set key pair for v4 authorization: access_key_id,secret_access_key", "") - flags.BoolVarP(flagSet, &Opt.noCleanup, "no-cleanup", "", Opt.noCleanup, "Not to cleanup empty folder after object is deleted", "") serve.Command.AddCommand(Command) } @@ -73,14 +90,6 @@ var Command = &cobra.Command{ cmd.CheckArgs(0, 0, command, args) } - if Opt.hashName == "auto" { - Opt.hashType = f.Hashes().GetOne() - } else if Opt.hashName != "" { - err := Opt.hashType.Set(Opt.hashName) - if err != nil { - return err - } - } cmd.Run(false, false, command, func() error { s, err := newServer(context.Background(), f, &Opt) if err != nil { diff --git a/cmd/serve/s3/s3_test.go b/cmd/serve/s3/s3_test.go index 106154693..6bc73085f 100644 --- a/cmd/serve/s3/s3_test.go +++ b/cmd/serve/s3/s3_test.go @@ -25,7 +25,6 @@ import ( "github.com/rclone/rclone/fs/hash" "github.com/rclone/rclone/fs/object" "github.com/rclone/rclone/fstest" - httplib "github.com/rclone/rclone/lib/http" "github.com/rclone/rclone/lib/random" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -39,16 +38,10 @@ const ( func serveS3(f fs.Fs) (testURL string, keyid string, keysec string, w *Server) { keyid = random.String(16) keysec = random.String(16) - serveropt := &Options{ - HTTP: httplib.DefaultCfg(), - pathBucketMode: true, - hashName: "", - hashType: hash.None, - authPair: []string{fmt.Sprintf("%s,%s", keyid, keysec)}, - } - - serveropt.HTTP.ListenAddr = []string{endpoint} - w, _ = newServer(context.Background(), f, serveropt) + opt := Opt // copy default options + opt.AuthKey = []string{fmt.Sprintf("%s,%s", keyid, keysec)} + opt.HTTP.ListenAddr = []string{endpoint} + w, _ = newServer(context.Background(), f, &opt) router := w.server.Router() w.Bind(router) diff --git a/cmd/serve/s3/server.go b/cmd/serve/s3/server.go index 6627a1195..05ee994ac 100644 --- a/cmd/serve/s3/server.go +++ b/cmd/serve/s3/server.go @@ -28,51 +28,55 @@ const ( ctxKeyID ctxKey = iota ) -// Options contains options for the http Server -type Options struct { - //TODO add more options - pathBucketMode bool - hashName string - hashType hash.Type - authPair []string - noCleanup bool - Auth httplib.AuthConfig - HTTP httplib.Config -} - // Server is a s3.FileSystem interface type Server struct { - server *httplib.Server - f fs.Fs - _vfs *vfs.VFS // don't use directly, use getVFS - faker *gofakes3.GoFakeS3 - handler http.Handler - proxy *proxy.Proxy - ctx context.Context // for global config - s3Secret string + server *httplib.Server + opt Options + f fs.Fs + _vfs *vfs.VFS // don't use directly, use getVFS + faker *gofakes3.GoFakeS3 + handler http.Handler + proxy *proxy.Proxy + ctx context.Context // for global config + s3Secret string + etagHashType hash.Type } // Make a new S3 Server to serve the remote func newServer(ctx context.Context, f fs.Fs, opt *Options) (s *Server, err error) { w := &Server{ - f: f, - ctx: ctx, + f: f, + ctx: ctx, + opt: *opt, + etagHashType: hash.None, } - if len(opt.authPair) == 0 { + if w.opt.EtagHash == "auto" { + w.etagHashType = f.Hashes().GetOne() + } else if w.opt.EtagHash != "" { + err := w.etagHashType.Set(w.opt.EtagHash) + if err != nil { + return nil, err + } + } + if w.etagHashType != hash.None { + fs.Debugf(f, "Using hash %v for ETag", w.etagHashType) + } + + if len(opt.AuthKey) == 0 { fs.Logf("serve s3", "No auth provided so allowing anonymous access") } else { - w.s3Secret = getAuthSecret(opt.authPair) + w.s3Secret = getAuthSecret(opt.AuthKey) } var newLogger logger w.faker = gofakes3.New( - newBackend(w, opt), - gofakes3.WithHostBucket(!opt.pathBucketMode), + newBackend(w), + gofakes3.WithHostBucket(!opt.ForcePathStyle), gofakes3.WithLogger(newLogger), gofakes3.WithRequestID(rand.Uint64()), gofakes3.WithoutVersioning(), - gofakes3.WithV4Auth(authlistResolver(opt.authPair)), + gofakes3.WithV4Auth(authlistResolver(opt.AuthKey)), gofakes3.WithIntegrityCheck(true), // Check Content-MD5 if supplied ) @@ -87,8 +91,8 @@ func newServer(ctx context.Context, f fs.Fs, opt *Options) (s *Server, err error } else { w._vfs = vfs.New(f, &vfscommon.Opt) - if len(opt.authPair) > 0 { - w.faker.AddAuthKeys(authlistResolver(opt.authPair)) + if len(opt.AuthKey) > 0 { + w.faker.AddAuthKeys(authlistResolver(opt.AuthKey)) } } diff --git a/cmd/serve/s3/utils.go b/cmd/serve/s3/utils.go index 1e83a5f0b..f10d238d3 100644 --- a/cmd/serve/s3/utils.go +++ b/cmd/serve/s3/utils.go @@ -36,15 +36,15 @@ func getDirEntries(prefix string, VFS *vfs.VFS) (vfs.Nodes, error) { return dirEntries, nil } -func getFileHashByte(node any) []byte { - b, err := hex.DecodeString(getFileHash(node)) +func getFileHashByte(node any, hashType hash.Type) []byte { + b, err := hex.DecodeString(getFileHash(node, hashType)) if err != nil { return nil } return b } -func getFileHash(node any) string { +func getFileHash(node any, hashType hash.Type) string { var o fs.Object switch b := node.(type) { @@ -59,7 +59,7 @@ func getFileHash(node any) string { defer func() { _ = in.Close() }() - h, err := hash.NewMultiHasherTypes(hash.NewHashSet(Opt.hashType)) + h, err := hash.NewMultiHasherTypes(hash.NewHashSet(hashType)) if err != nil { return "" } @@ -67,14 +67,14 @@ func getFileHash(node any) string { if err != nil { return "" } - return h.Sums()[Opt.hashType] + return h.Sums()[hashType] } o = fsObj case fs.Object: o = b } - hash, err := o.Hash(context.Background(), Opt.hashType) + hash, err := o.Hash(context.Background(), hashType) if err != nil { return "" }