serve nbd: serve an rclone remote as a Network Block Device - WIP FIXME

TODO

- Need to finalise rclone/gonbdserver and upload and change go.mod/go.sum
- Remove uneeded dependencies from rclone/gonbdserver

Maybe make companion `mount nbd` command?

Fixes #7337
This commit is contained in:
Nick Craig-Wood 2023-09-29 10:56:28 +01:00
parent 9fb0afad88
commit 3fa5a424a9
7 changed files with 688 additions and 0 deletions

View File

@ -0,0 +1,140 @@
// Implements an nbd.Backend for serving from a chunked file in the VFS.
package nbd
import (
"errors"
"fmt"
"github.com/rclone/gonbdserver/nbd"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/log"
"github.com/rclone/rclone/vfs/chunked"
"golang.org/x/net/context"
)
// Backend for a single chunked file
type chunkedBackend struct {
file *chunked.File
ec *nbd.ExportConfig
}
// Create Backend for a single chunked file
type chunkedBackendFactory struct {
s *NBD
file *chunked.File
}
// WriteAt implements Backend.WriteAt
func (cb *chunkedBackend) WriteAt(ctx context.Context, b []byte, offset int64, fua bool) (n int, err error) {
defer log.Trace(logPrefix, "size=%d, off=%d", len(b), offset)("n=%d, err=%v", &n, &err)
n, err = cb.file.WriteAt(b, offset)
if err != nil || !fua {
return n, err
}
err = cb.file.Sync()
if err != nil {
return 0, err
}
return n, err
}
// ReadAt implements Backend.ReadAt
func (cb *chunkedBackend) ReadAt(ctx context.Context, b []byte, offset int64) (n int, err error) {
defer log.Trace(logPrefix, "size=%d, off=%d", len(b), offset)("n=%d, err=%v", &n, &err)
return cb.file.ReadAt(b, offset)
}
// TrimAt implements Backend.TrimAt
func (cb *chunkedBackend) TrimAt(ctx context.Context, length int, offset int64) (n int, err error) {
defer log.Trace(logPrefix, "size=%d, off=%d", length, offset)("n=%d, err=%v", &n, &err)
return length, nil
}
// Flush implements Backend.Flush
func (cb *chunkedBackend) Flush(ctx context.Context) (err error) {
defer log.Trace(logPrefix, "")("err=%v", &err)
return nil
}
// Close implements Backend.Close
func (cb *chunkedBackend) Close(ctx context.Context) (err error) {
defer log.Trace(logPrefix, "")("err=%v", &err)
err = cb.file.Close()
return nil
}
// Geometry implements Backend.Geometry
func (cb *chunkedBackend) Geometry(ctx context.Context) (size uint64, minBS uint64, prefBS uint64, maxBS uint64, err error) {
defer log.Trace(logPrefix, "")("size=%d, minBS=%d, prefBS=%d, maxBS=%d, err=%v", &size, &minBS, &prefBS, &maxBS, &err)
size = uint64(cb.file.Size())
minBS = cb.ec.MinimumBlockSize
prefBS = cb.ec.PreferredBlockSize
maxBS = cb.ec.MaximumBlockSize
err = nil
return
}
// HasFua implements Backend.HasFua
func (cb *chunkedBackend) HasFua(ctx context.Context) (fua bool) {
defer log.Trace(logPrefix, "")("fua=%v", &fua)
return true
}
// HasFlush implements Backend.HasFua
func (cb *chunkedBackend) HasFlush(ctx context.Context) (flush bool) {
defer log.Trace(logPrefix, "")("flush=%v", &flush)
return true
}
// New generates a new chunked backend
func (cbf *chunkedBackendFactory) newBackend(ctx context.Context, ec *nbd.ExportConfig) (nbd.Backend, error) {
err := cbf.file.Open(false, 0)
if err != nil {
return nil, fmt.Errorf("failed to open chunked file: %w", err)
}
cb := &chunkedBackend{
file: cbf.file,
ec: ec,
}
return cb, nil
}
// Generate a chunked backend factory
func (s *NBD) newChunkedBackendFactory(ctx context.Context) (bf backendFactory, err error) {
create := s.opt.Create > 0
if s.vfs.Opt.ReadOnly && create {
return nil, errors.New("can't create files with --read-only")
}
file := chunked.New(s.vfs, s.leaf)
err = file.Open(create, s.log2ChunkSize)
if err != nil {
return nil, fmt.Errorf("failed to open chunked file: %w", err)
}
defer fs.CheckClose(file, &err)
var truncateSize fs.SizeSuffix
if create {
if file.Size() == 0 {
truncateSize = s.opt.Create
}
} else {
truncateSize = s.opt.Resize
}
if truncateSize > 0 {
err = file.Truncate(int64(truncateSize))
if err != nil {
return nil, fmt.Errorf("failed to create chunked file: %w", err)
}
fs.Logf(logPrefix, "Size of network block device is now %v", truncateSize)
}
return &chunkedBackendFactory{
s: s,
file: file,
}, nil
}
// Check interfaces
var (
_ nbd.Backend = (*chunkedBackend)(nil)
_ backendFactory = (*chunkedBackendFactory)(nil)
)

View File

@ -0,0 +1,140 @@
// Implements an nbd.Backend for serving from the VFS.
package nbd
import (
"fmt"
"os"
"github.com/rclone/gonbdserver/nbd"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/log"
"github.com/rclone/rclone/vfs"
"golang.org/x/net/context"
)
// Backend for a single file
type fileBackend struct {
file vfs.Handle
ec *nbd.ExportConfig
}
// Create Backend for a single file
type fileBackendFactory struct {
s *NBD
vfs *vfs.VFS
filePath string
perms int
}
// WriteAt implements Backend.WriteAt
func (fb *fileBackend) WriteAt(ctx context.Context, b []byte, offset int64, fua bool) (n int, err error) {
defer log.Trace(logPrefix, "size=%d, off=%d", len(b), offset)("n=%d, err=%v", &n, &err)
n, err = fb.file.WriteAt(b, offset)
if err != nil || !fua {
return n, err
}
err = fb.file.Sync()
if err != nil {
return 0, err
}
return n, err
}
// ReadAt implements Backend.ReadAt
func (fb *fileBackend) ReadAt(ctx context.Context, b []byte, offset int64) (n int, err error) {
defer log.Trace(logPrefix, "size=%d, off=%d", len(b), offset)("n=%d, err=%v", &n, &err)
return fb.file.ReadAt(b, offset)
}
// TrimAt implements Backend.TrimAt
func (fb *fileBackend) TrimAt(ctx context.Context, length int, offset int64) (n int, err error) {
defer log.Trace(logPrefix, "size=%d, off=%d", length, offset)("n=%d, err=%v", &n, &err)
return length, nil
}
// Flush implements Backend.Flush
func (fb *fileBackend) Flush(ctx context.Context) (err error) {
defer log.Trace(logPrefix, "")("err=%v", &err)
return nil
}
// Close implements Backend.Close
func (fb *fileBackend) Close(ctx context.Context) (err error) {
defer log.Trace(logPrefix, "")("err=%v", &err)
err = fb.file.Close()
return nil
}
// Geometry implements Backend.Geometry
func (fb *fileBackend) Geometry(ctx context.Context) (size uint64, minBS uint64, prefBS uint64, maxBS uint64, err error) {
defer log.Trace(logPrefix, "")("size=%d, minBS=%d, prefBS=%d, maxBS=%d, err=%v", &size, &minBS, &prefBS, &maxBS, &err)
fi, err := fb.file.Stat()
if err != nil {
err = fmt.Errorf("failed read info about open backing file: %w", err)
return
}
size = uint64(fi.Size())
minBS = fb.ec.MinimumBlockSize
prefBS = fb.ec.PreferredBlockSize
maxBS = fb.ec.MaximumBlockSize
err = nil
return
}
// HasFua implements Backend.HasFua
func (fb *fileBackend) HasFua(ctx context.Context) (fua bool) {
defer log.Trace(logPrefix, "")("fua=%v", &fua)
return true
}
// HasFlush implements Backend.HasFua
func (fb *fileBackend) HasFlush(ctx context.Context) (flush bool) {
defer log.Trace(logPrefix, "")("flush=%v", &flush)
return true
}
// open the backing file
func (fbf *fileBackendFactory) open() (vfs.Handle, error) {
return fbf.vfs.OpenFile(fbf.filePath, fbf.perms, 0700)
}
// New generates a new file backend
func (fbf *fileBackendFactory) newBackend(ctx context.Context, ec *nbd.ExportConfig) (nbd.Backend, error) {
fd, err := fbf.open()
if err != nil {
return nil, fmt.Errorf("failed to open backing file: %w", err)
}
fb := &fileBackend{
file: fd,
ec: ec,
}
return fb, nil
}
// Generate a file backend factory
func (s *NBD) newFileBackendFactory(ctx context.Context) (bf backendFactory, err error) {
perms := os.O_RDWR
if s.vfs.Opt.ReadOnly {
perms = os.O_RDONLY
}
fbf := &fileBackendFactory{
s: s,
vfs: s.vfs,
perms: perms,
filePath: s.leaf,
}
// Try opening the file so we get errors now rather than later when they are more difficult to report.
fd, err := fbf.open()
if err != nil {
return nil, fmt.Errorf("failed to open backing file: %w", err)
}
defer fs.CheckClose(fd, &err)
return fbf, nil
}
// Check interfaces
var (
_ nbd.Backend = (*fileBackend)(nil)
_ backendFactory = (*fileBackendFactory)(nil)
)

260
cmd/serve/nbd/nbd.go Normal file
View File

@ -0,0 +1,260 @@
// Package nbd provides a network block device server
package nbd
import (
"bufio"
"context"
_ "embed"
"errors"
"fmt"
"io"
"log"
"math/bits"
"path/filepath"
"strings"
"sync"
"github.com/rclone/gonbdserver/nbd"
"github.com/rclone/rclone/cmd"
"github.com/rclone/rclone/cmd/serve/proxy/proxyflags"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config/flags"
"github.com/rclone/rclone/lib/systemd"
"github.com/rclone/rclone/vfs"
"github.com/rclone/rclone/vfs/vfscommon"
"github.com/rclone/rclone/vfs/vfsflags"
"github.com/spf13/cobra"
"github.com/spf13/pflag"
)
const logPrefix = "nbd"
// OptionsInfo descripts the Options in use
var OptionsInfo = fs.Options{{
Name: "addr",
Default: "localhost:10809",
Help: "IPaddress:Port or :Port to bind server to",
}, {
Name: "min_block_size",
Default: fs.SizeSuffix(512), // FIXME
Help: "Minimum block size to advertise",
}, {
Name: "preferred_block_size",
Default: fs.SizeSuffix(4096), // FIXME this is the max according to nbd-client
Help: "Preferred block size to advertise",
}, {
Name: "max_block_size",
Default: fs.SizeSuffix(1024 * 1024), // FIXME,
Help: "Maximum block size to advertise",
}, {
Name: "create",
Default: fs.SizeSuffix(-1),
Help: "If the destination does not exist, create it with this size",
}, {
Name: "chunk_size",
Default: fs.SizeSuffix(0),
Help: "If creating the destination use this chunk size. Must be a power of 2.",
}, {
Name: "resize",
Default: fs.SizeSuffix(-1),
Help: "If the destination exists, resize it to this size",
}}
// name := flag.String("name", "default", "Export name")
// description := flag.String("description", "The default export", "Export description")
// Options required for nbd server
type Options struct {
ListenAddr string `config:"addr"` // Port to listen on
MinBlockSize fs.SizeSuffix `config:"min_block_size"`
PreferredBlockSize fs.SizeSuffix `config:"preferred_block_size"`
MaxBlockSize fs.SizeSuffix `config:"max_block_size"`
Create fs.SizeSuffix `config:"create"`
ChunkSize fs.SizeSuffix `config:"chunk_size"`
Resize fs.SizeSuffix `config:"resize"`
}
func init() {
fs.RegisterGlobalOptions(fs.OptionsInfo{Name: "nbd", Opt: &Opt, Options: OptionsInfo})
}
// Opt is options set by command line flags
var Opt Options
// AddFlags adds flags for the nbd
func AddFlags(flagSet *pflag.FlagSet, Opt *Options) {
flags.AddFlagsFromOptions(flagSet, "", OptionsInfo)
}
func init() {
flagSet := Command.Flags()
vfsflags.AddFlags(flagSet)
proxyflags.AddFlags(flagSet)
AddFlags(flagSet, &Opt)
}
//go:embed nbd.md
var helpText string
// Command definition for cobra
var Command = &cobra.Command{
Use: "nbd remote:path",
Short: `Serve the remote over NBD.`,
Long: helpText + vfs.Help(),
Annotations: map[string]string{
"versionIntroduced": "v1.65",
"status": "experimental",
},
Run: func(command *cobra.Command, args []string) {
// FIXME could serve more than one nbd?
cmd.CheckArgs(1, 1, command, args)
f, leaf := cmd.NewFsFile(args[0])
cmd.Run(false, true, command, func() error {
s, err := run(context.Background(), f, leaf, Opt)
if err != nil {
log.Fatal(err)
}
defer systemd.Notify()()
// FIXME
_ = s
s.Wait()
return nil
})
},
}
// NBD contains everything to run the server
type NBD struct {
f fs.Fs
leaf string
vfs *vfs.VFS // don't use directly, use getVFS
opt Options
wg sync.WaitGroup
sessionWaitGroup sync.WaitGroup
logRd *io.PipeReader
logWr *io.PipeWriter
log2ChunkSize uint
readOnly bool // Set for read only by vfs config
backendFactory backendFactory
}
// interface for creating backend factories
type backendFactory interface {
newBackend(ctx context.Context, ec *nbd.ExportConfig) (nbd.Backend, error)
}
// Create and start the server for nbd either on directory f or using file leaf in f
func run(ctx context.Context, f fs.Fs, leaf string, opt Options) (s *NBD, err error) {
s = &NBD{
f: f,
leaf: leaf,
opt: opt,
vfs: vfs.New(f, &vfscommon.Opt),
readOnly: vfscommon.Opt.ReadOnly,
}
if opt.ChunkSize != 0 {
if set := bits.OnesCount64(uint64(opt.ChunkSize)); set != 1 {
return nil, fmt.Errorf("--chunk-size must be a power of 2 (counted %d bits set)", set)
}
s.log2ChunkSize = uint(bits.TrailingZeros64(uint64(opt.ChunkSize)))
fs.Debugf(logPrefix, "Using ChunkSize %v (%v), Log2ChunkSize %d", opt.ChunkSize, fs.SizeSuffix(1<<s.log2ChunkSize), s.log2ChunkSize)
}
if !vfscommon.Opt.ReadOnly && vfscommon.Opt.CacheMode < vfscommon.CacheModeWrites {
return nil, errors.New("need --vfs-cache-mode writes or full when serving read/write")
}
// Create the backend factory
if leaf != "" {
s.backendFactory, err = s.newFileBackendFactory(ctx)
} else {
s.backendFactory, err = s.newChunkedBackendFactory(ctx)
}
if err != nil {
return nil, err
}
nbd.RegisterBackend("rclone", s.backendFactory.newBackend)
fs.Debugf(logPrefix, "Registered backends: %v", nbd.GetBackendNames())
var (
protocol = "tcp"
addr = Opt.ListenAddr
)
if strings.HasPrefix(addr, "unix://") || filepath.IsAbs(addr) {
protocol = "unix"
addr = strings.TrimPrefix(addr, "unix://")
}
ec := nbd.ExportConfig{
Name: "default",
Description: fs.ConfigString(f),
Driver: "rclone",
ReadOnly: vfscommon.Opt.ReadOnly,
Workers: 8, // should this be --checkers or a new config flag FIXME
TLSOnly: false, // FIXME
MinimumBlockSize: uint64(Opt.MinBlockSize),
PreferredBlockSize: uint64(Opt.PreferredBlockSize),
MaximumBlockSize: uint64(Opt.MaxBlockSize),
DriverParameters: nbd.DriverParametersConfig{
"sync": "false",
"path": "/tmp/diskimage",
},
}
// Make a logger to feed gonbdserver's logs into rclone's logging system
s.logRd, s.logWr = io.Pipe()
go func() {
scanner := bufio.NewScanner(s.logRd)
for scanner.Scan() {
line := scanner.Text()
if s, ok := strings.CutPrefix(line, "[DEBUG] "); ok {
fs.Debugf(logPrefix, "%s", s)
} else if s, ok := strings.CutPrefix(line, "[INFO] "); ok {
fs.Infof(logPrefix, "%s", s)
} else if s, ok := strings.CutPrefix(line, "[WARN] "); ok {
fs.Logf(logPrefix, "%s", s)
} else if s, ok := strings.CutPrefix(line, "[ERROR] "); ok {
fs.Errorf(logPrefix, "%s", s)
} else if s, ok := strings.CutPrefix(line, "[CRIT] "); ok {
fs.Errorf(logPrefix, "%s", s)
} else {
fs.Infof(logPrefix, "%s", line)
}
}
if err := scanner.Err(); err != nil {
fs.Errorf(logPrefix, "Log writer failed: %v", err)
}
}()
logger := log.New(s.logWr, "", 0)
ci := fs.GetConfig(ctx)
dump := ci.Dump & (fs.DumpHeaders | fs.DumpBodies | fs.DumpAuth | fs.DumpRequests | fs.DumpResponses)
var serverConfig = nbd.ServerConfig{
Protocol: protocol, // protocol it should listen on (in net.Conn form)
Address: addr, // address to listen on
DefaultExport: "default", // name of default export
Exports: []nbd.ExportConfig{ec}, // array of configurations of exported items
//TLS: nbd.TLSConfig{}, // TLS configuration
DisableNoZeroes: false, // Disable NoZereos extension FIXME
Debug: dump != 0, // Verbose debug
}
s.wg.Add(1)
go func() {
defer s.wg.Done()
// FIXME contexts
nbd.StartServer(ctx, ctx, &s.sessionWaitGroup, logger, serverConfig)
}()
return s, nil
}
// Wait for the server to finish
func (s *NBD) Wait() {
s.wg.Wait()
_ = s.logWr.Close()
_ = s.logRd.Close()
}

139
cmd/serve/nbd/nbd.md Normal file
View File

@ -0,0 +1,139 @@
Run a Network Block Device server using remote:path to store the object.
You can use a unix socket by setting the url to `unix:/path/to/socket`
or just by using an absolute path name.
`rclone serve nbd` will run on any OS, but the examples for using it
are Linux specific. There do exist Windows and macOS NBD clients but
these haven't been tested yet.
To see the packets on the wire use `--dump headers` or `--dump bodies`.
**NB** this has no authentication. It may in the future allow SSL
certificates. If you need access control then you will have to provide
it on the network layer, or use unix sockets.
### remote:path pointing to a file
If the `remote:path` points to a file then rclone will serve the file
directly as a network block device.
Using this with `--read-only` is recommended. You can use any
`--vfs-cache-mode` and only parts of the file that are read will be
cached locally if using `--vfs-cache-mode full`.
If you don't use `--read-only` then `--vfs-cache-mode full` is
required and the entire file will be cached locally and won't be
uploaded until the client has disconnected (`nbd-client -d`).
### remote:path pointing to a directory
If the `remote:path` points to a directory then rclone will treat the
directory as a place to store chunks of the exported network block device.
It will store an `info.json` file in the top level and store the
individual chunks in a hierarchical directory scheme with no more than
256 chunks or directories in any directory.
The first time you use this, you should use the `--create` flag
indicating how big you want the network block device to appear. Rclone
only allocates space you use so you can make this large. For example
`--create 1T`. You can also pass the `--chunk-size` flag at this
point. If you don't you will get the default of 64k chunks.
Rclone will then chunk the network block device into `--chunk-size`
chunks. Rclone has to download the entire chunk in order to change
only part of it and it will cache the chunk on disk so bear that in
mind when choosing `--chunk-size`.
If you wish to change the size of the network block device you can use
the `--resize` flag. This won't remove any data, it just changes the
size advertised. So if you have made a file system on the block device
you will need to resize it too.
If you are using `--read-only` then you can use any
`--vfs-cache-mode`.
If you are not using `--read-only` then you will need
`--vfs-cache-mode writes` or `--vfs-cache-mode full`.
Note that rclone will be acting as a writeback cache with
`--vfs-cache-mode writes` or `--vfs-cache-mode full`. Note that rclone
will only write `--transfers` files at once so the cache can get a
backlog of uploads. You can reduce the writeback caching slightly
setting `--vfs-write-back 0`, however due to the way the kernel works,
this will only reduce it slightly.
If using `--vfs-cache-mode writes` or `--vfs-cache-mode full` it is
recommended to set limits on the cache size using some or all of these
flags as the VFS can use a lot of disk space very quickly.
--vfs-cache-max-age duration Max time since last access of objects in the cache (default 1h0m0s)
--vfs-cache-max-size SizeSuffix Max total size of objects in the cache (default off)
--vfs-cache-min-free-space SizeSuffix Target minimum free space on the disk containing the cache (default off)
You might also need to set this smaller as the cache will only be
examined at this interval.
--vfs-cache-poll-interval duration Interval to poll the cache for stale objects (default 1m0s)
### Linux Examples
Install
sudo apt install nbd-client
Start server on localhost:10809 by default.
rclone -v --vfs-cache-mode full serve ndb remote:path
List devices
sudo modprobe nbd
sudo nbd-client --list localhost
Format the partition and mount read write
sudo nbd-client -g localhost 10809 /dev/nbd0
sudo mkfs.ext4 /dev/nbd0
sudo mkdir -p /mnt/tmp
sudo mount -t ext4 /dev/nbd0 /mnt/tmp
Mount read only
rclone -v --vfs-cache-mode full --read-only serve ndb remote:path
sudo nbd-client --readonly -g localhost 10809 /dev/nbd0
sudo mount -t ext4 -o ro /dev/nbd0 /mnt/tmp
Disconnect
sudo umount /mnt/tmp
sudo nbd-client -d /dev/nbd0
### TODO
Experiment with `-connections` option. This is supported by the code.
Does it improve performance?
-connections num
-C Use num connections to the server, to allow speeding up request
handling, at the cost of higher resource usage on the server.
Use of this option requires kernel support available first with
Linux 4.9.
Experiment with `-persist` option - is that a good idea?
-persist
-p When this option is specified, nbd-client will immediately try
to reconnect an nbd device if the connection ever drops unex
pectedly due to a lost server or something similar.
Need to implement Trim and see if Trim is being called.
Need to delete zero files before upload (do in VFS layer?)
FIXME need better back pressure from VFS cache to writers.
FIXME need Sync to actually work!

View File

@ -9,6 +9,7 @@ import (
"github.com/rclone/rclone/cmd/serve/docker"
"github.com/rclone/rclone/cmd/serve/ftp"
"github.com/rclone/rclone/cmd/serve/http"
"github.com/rclone/rclone/cmd/serve/nbd"
"github.com/rclone/rclone/cmd/serve/nfs"
"github.com/rclone/rclone/cmd/serve/restic"
"github.com/rclone/rclone/cmd/serve/s3"
@ -43,6 +44,9 @@ func init() {
if s3.Command != nil {
Command.AddCommand(s3.Command)
}
if nbd.Command != nil {
Command.AddCommand(nbd.Command)
}
cmd.Root.AddCommand(Command)
}

3
go.mod
View File

@ -2,6 +2,8 @@ module github.com/rclone/rclone
go 1.21
//replace github.com/rclone/gonbdserver => /home/ncw/go/src/github.com/rclone/gonbdserver
require (
bazil.org/fuse v0.0.0-20230120002735-62a210ff1fd5
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.13.0
@ -53,6 +55,7 @@ require (
github.com/prometheus/client_golang v1.19.1
github.com/putdotio/go-putio/putio v0.0.0-20200123120452-16d982cac2b8
github.com/rclone/gofakes3 v0.0.3-0.20240716093803-d6abc178be56
github.com/rclone/gonbdserver v0.0.0-20230928185136-7adb4642e1cb
github.com/rfjakob/eme v1.1.2
github.com/rivo/uniseg v0.4.7
github.com/rogpeppe/go-internal v1.12.0

2
go.sum
View File

@ -460,6 +460,8 @@ github.com/rasky/go-xdr v0.0.0-20170124162913-1a41d1a06c93 h1:UVArwN/wkKjMVhh2EQ
github.com/rasky/go-xdr v0.0.0-20170124162913-1a41d1a06c93/go.mod h1:Nfe4efndBz4TibWycNE+lqyJZiMX4ycx+QKV8Ta0f/o=
github.com/rclone/gofakes3 v0.0.3-0.20240716093803-d6abc178be56 h1:JmCt3EsTnlZrg/PHIyZqvKDRvBCde/rmThAQFliE9bU=
github.com/rclone/gofakes3 v0.0.3-0.20240716093803-d6abc178be56/go.mod h1:L0VIBE0mT6ArN/5dfHsJm3UjqCpi5B/cdN+qWDNh7ko=
github.com/rclone/gonbdserver v0.0.0-20230928185136-7adb4642e1cb h1:4FyF15nQLPIhLcJDpn2ItwcuO3E/pYQXdPVOt+v3Duk=
github.com/rclone/gonbdserver v0.0.0-20230928185136-7adb4642e1cb/go.mod h1:HwROhGq4gA7vncM5mLZjoNbI9CrS52aDuHReB3NMWp4=
github.com/relvacode/iso8601 v1.3.0 h1:HguUjsGpIMh/zsTczGN3DVJFxTU/GX+MMmzcKoMO7ko=
github.com/relvacode/iso8601 v1.3.0/go.mod h1:FlNp+jz+TXpyRqgmM7tnzHHzBnz776kmAH2h3sZCn0I=
github.com/rfjakob/eme v1.1.2 h1:SxziR8msSOElPayZNFfQw4Tjx/Sbaeeh3eRvrHVMUs4=