From ffe97865a75181a27816e4d0b98681d2028f8603 Mon Sep 17 00:00:00 2001 From: buengese Date: Fri, 31 Jan 2020 19:18:19 +0100 Subject: [PATCH] press: first overhaul ... fix error handling for metadata upload ... fix metadata wrapping ... make sure not to leave any orphaned data when overwriting existing files with Put, Copy or Move ... switch from id01/go-lz4 to pierrec/lz4 ... use klauspost/compress/gzip for gzip ... use ulikunitz/xz for xz ... remove snappy --- backend/all/all.go | 1 + backend/press/alg_exec.go | 98 ---- backend/press/alg_gzip.go | 48 +- backend/press/alg_lz4.go | 226 +++++++-- backend/press/alg_snappy.go | 35 -- backend/press/alg_xz.go | 75 +++ backend/press/compression.go | 208 ++------ backend/press/compression_test.go | 7 +- backend/press/press.go | 800 ++++++++++++++++-------------- backend/press/press_test.go | 95 +++- go.mod | 5 +- go.sum | 20 +- 12 files changed, 855 insertions(+), 763 deletions(-) delete mode 100644 backend/press/alg_exec.go delete mode 100644 backend/press/alg_snappy.go create mode 100644 backend/press/alg_xz.go diff --git a/backend/all/all.go b/backend/all/all.go index 4b4509a68..e06ad4e80 100644 --- a/backend/all/all.go +++ b/backend/all/all.go @@ -28,6 +28,7 @@ import ( _ "github.com/rclone/rclone/backend/opendrive" _ "github.com/rclone/rclone/backend/pcloud" _ "github.com/rclone/rclone/backend/premiumizeme" + _ "github.com/rclone/rclone/backend/press" _ "github.com/rclone/rclone/backend/putio" _ "github.com/rclone/rclone/backend/qingstor" _ "github.com/rclone/rclone/backend/s3" diff --git a/backend/press/alg_exec.go b/backend/press/alg_exec.go deleted file mode 100644 index be91911d6..000000000 --- a/backend/press/alg_exec.go +++ /dev/null @@ -1,98 +0,0 @@ -package press - -// This file implements shell exec algorithms that require binaries. -import ( - "bytes" - "io" - "os/exec" -) - -// XZ command -const xzcommand = "xz" // Name of xz binary (if available) - -// ExecHeader - Header we add to an exec file. We don't need this. -var ExecHeader = []byte{} - -// Function that checks whether XZ is present in the system -func checkXZ() bool { - _, err := exec.LookPath("xz") - if err != nil { - return false - } - return true -} - -// Function that gets binary paths if needed -func getBinPaths(c *Compression, mode int) (err error) { - err = nil - if mode == XZMin || mode == XZDefault { - c.BinPath, err = exec.LookPath(xzcommand) - } - return err -} - -// Function that compresses a block using a shell command without wrapping in gzip. Requires an binary corresponding with the command. -func (c *Compression) compressBlockExec(in []byte, out io.Writer, binaryPath string, args []string) (compressedSize uint32, uncompressedSize int64, err error) { - // Initialize compression subprocess - subprocess := exec.Command(binaryPath, args...) - stdin, err := subprocess.StdinPipe() - if err != nil { - return 0, 0, err - } - - // Run subprocess that creates compressed file - stdinError := make(chan error) - go func() { - _, err := stdin.Write(in) - _ = stdin.Close() - stdinError <- err - }() - - // Get output - output, err := subprocess.Output() - if err != nil { - return 0, 0, err - } - - // Copy over - n, err := io.Copy(out, bytes.NewReader(output)) - if err != nil { - return uint32(n), int64(len(in)), err - } - - // Check if there was an error and return - err = <-stdinError - - return uint32(n), int64(len(in)), err -} - -// Utility function to decompress a block range using a shell command which wasn't wrapped in gzip -func decompressBlockRangeExec(in io.Reader, out io.Writer, binaryPath string, args []string) (n int, err error) { - // Decompress actual compression - // Initialize decompression subprocess - subprocess := exec.Command(binaryPath, args...) - stdin, err := subprocess.StdinPipe() - if err != nil { - return 0, err - } - - // Run subprocess that copies over compressed block - stdinError := make(chan error) - go func() { - _, err := io.Copy(stdin, in) - _ = stdin.Close() - stdinError <- err - }() - - // Get output, copy, and return - output, err := subprocess.Output() - if err != nil { - return 0, err - } - n64, err := io.Copy(out, bytes.NewReader(output)) - if err != nil { - return int(n64), err - } - err = <-stdinError - return int(n64), err -} diff --git a/backend/press/alg_gzip.go b/backend/press/alg_gzip.go index eb15c0f83..8649f5ec8 100644 --- a/backend/press/alg_gzip.go +++ b/backend/press/alg_gzip.go @@ -1,22 +1,48 @@ package press -// This file implements the gzip algorithm. import ( "bufio" - "compress/gzip" "io" + + "github.com/klauspost/compress/gzip" ) -// GzipHeader - Header we add to a gzip file. We're contatenating GZIP files here, so we don't need this. -var GzipHeader = []byte{} +// AlgGzip represents gzip compression algorithm +type AlgGzip struct { + level int + blockSize uint32 +} -// Function that compresses a block using gzip -func (c *Compression) compressBlockGz(in []byte, out io.Writer, compressionLevel int) (compressedSize uint32, uncompressedSize int64, err error) { +// InitializeGzip initializes the gzip compression Algorithm +func InitializeGzip(bs uint32, level int) Algorithm { + a := new(AlgGzip) + a.blockSize = bs + a.level = level + return a +} + +// GetFileExtension returns file extension +func (a *AlgGzip) GetFileExtension() string { + return ".gz" +} + +// GetHeader returns the Lz4 compression header +func (a *AlgGzip) GetHeader() []byte { + return []byte{} +} + +// GetFooter returns +func (a *AlgGzip) GetFooter() []byte { + return []byte{} +} + +// CompressBlock that compresses a block using gzip +func (a *AlgGzip) CompressBlock(in []byte, out io.Writer) (compressedSize uint32, uncompressedSize uint64, err error) { // Initialize buffer - bufw := bufio.NewWriterSize(out, int(c.maxCompressedBlockSize())) + bufw := bufio.NewWriterSize(out, int(a.blockSize+(a.blockSize)>>4)) // Initialize block writer - outw, err := gzip.NewWriterLevel(bufw, compressionLevel) + outw, err := gzip.NewWriterLevel(bufw, a.level) if err != nil { return 0, 0, err } @@ -35,11 +61,11 @@ func (c *Compression) compressBlockGz(in []byte, out io.Writer, compressionLevel blockSize := uint32(bufw.Buffered()) err = bufw.Flush() - return blockSize, int64(len(in)), err + return blockSize, uint64(len(in)), err } -// Utility function to decompress a block range using gzip -func decompressBlockRangeGz(in io.Reader, out io.Writer) (n int, err error) { +// DecompressBlock decompresses Lz4 compressed block +func (a *AlgGzip) DecompressBlock(in io.Reader, out io.Writer, BlockSize uint32) (n int, err error) { gzipReader, err := gzip.NewReader(in) if err != nil { return 0, err diff --git a/backend/press/alg_lz4.go b/backend/press/alg_lz4.go index a38073113..cf43716d2 100644 --- a/backend/press/alg_lz4.go +++ b/backend/press/alg_lz4.go @@ -4,11 +4,12 @@ package press import ( "bytes" "encoding/binary" - "errors" + "fmt" "io" + "math/bits" - "github.com/OneOfOne/xxhash" - lz4 "github.com/id01/go-lz4" + "github.com/buengese/xxh32" + lz4 "github.com/pierrec/lz4" ) /* @@ -31,65 +32,192 @@ Header checksum byte (xxhash(flags and bd byte) >> 1) & 0xff */ // LZ4Header - Header of our LZ4 file -var LZ4Header = []byte{0x04, 0x22, 0x4d, 0x18, 0x70, 0x50, 0x84} +//var LZ4Header = []byte{0x04, 0x22, 0x4d, 0x18, 0x70, 0x50, 0x84} // LZ4Footer - Footer of our LZ4 file var LZ4Footer = []byte{0x00, 0x00, 0x00, 0x00} // This is just an empty block -// Function that compresses a block using lz4 -func (c *Compression) compressBlockLz4(in []byte, out io.Writer) (compressedSize uint32, uncompressedSize int64, err error) { - // Write lz4 compressed data - compressedBytes, err := lz4.Encode(nil, in) - if err != nil { - return 0, 0, err - } - // Write compressed bytes - n1, err := out.Write(compressedBytes) - if err != nil { - return 0, 0, err - } - // Get checksum - h := xxhash.New32() - _, err = h.Write(compressedBytes[4:]) // The checksum doesn't include the size - if err != nil { - return 0, 0, err - } - checksum := make([]byte, 4) - binary.LittleEndian.PutUint32(checksum, h.Sum32()) - n2, err := out.Write(checksum) - if err != nil { - return 0, 0, err - } - // Return sizes - return uint32(n1 + n2), int64(len(in)), err +const ( + frameMagic uint32 = 0x184D2204 + + compressedBlockFlag = 1 << 31 + compressedBlockMask = compressedBlockFlag - 1 +) + +// AlgLz4 is the Lz4 Compression algorithm +type AlgLz4 struct { + Header lz4.Header + buf [19]byte // magic number(4) + header(flags(2)+[Size(8)+DictID(4)]+checksum(1)) does not exceed 19 bytes } -// Utility function to decompress a block using LZ4 -func decompressBlockLz4(in io.Reader, out io.Writer, BlockSize int64) (n int, err error) { +// InitializeLz4 creates an Lz4 compression algorithm +func InitializeLz4(bs uint32, blockChecksum bool) Algorithm { + a := new(AlgLz4) + a.Header.Reset() + a.Header = lz4.Header{ + BlockChecksum: blockChecksum, + BlockMaxSize: int(bs), + } + return a +} + +// GetFileExtension returns file extension +func (a *AlgLz4) GetFileExtension() string { + return ".lz4" +} + +// GetHeader returns the Lz4 compression header +func (a *AlgLz4) GetHeader() []byte { + // Size is optional. + buf := a.buf[:] + + // Set the fixed size data: magic number, block max size and flags. + binary.LittleEndian.PutUint32(buf[0:], frameMagic) + flg := byte(lz4.Version << 6) + flg |= 1 << 5 // No block dependency. + if a.Header.BlockChecksum { + flg |= 1 << 4 + } + if a.Header.Size > 0 { + flg |= 1 << 3 + } + buf[4] = flg + buf[5] = blockSizeValueToIndex(a.Header.BlockMaxSize) << 4 + + // Current buffer size: magic(4) + flags(1) + block max size (1). + n := 6 + if a.Header.Size > 0 { + binary.LittleEndian.PutUint64(buf[n:], a.Header.Size) + n += 8 + } + + // The header checksum includes the flags, block max size and optional Size. + buf[n] = byte(xxh32.ChecksumZero(buf[4:n]) >> 8 & 0xFF) + + // Header ready, write it out. + return buf[0 : n+1] +} + +// GetFooter returns +func (a *AlgLz4) GetFooter() []byte { + return LZ4Footer +} + +// CompressBlock that compresses a block using lz4 +func (a *AlgLz4) CompressBlock(in []byte, out io.Writer) (compressedSize uint32, uncompressedSize uint64, err error) { + if len(in) > 0 { + n, err := a.compressBlock(in, out) + if err != nil { + return 0, 0, err + } + return n, uint64(len(in)), nil + } + + return 0, 0, nil +} + +// compressBlock compresses a block. +func (a *AlgLz4) compressBlock(data []byte, dst io.Writer) (uint32, error) { + zdata := make([]byte, a.Header.BlockMaxSize) // The compressed block size cannot exceed the input's. + var zn int + if level := a.Header.CompressionLevel; level != 0 { + zn, _ = lz4.CompressBlockHC(data, zdata, level) + } else { + var hashTable [1 << 16]int + zn, _ = lz4.CompressBlock(data, zdata, hashTable[:]) + } + + var bLen uint32 + if zn > 0 && zn < len(data) { + // Compressible and compressed size smaller than uncompressed: ok! + bLen = uint32(zn) + zdata = zdata[:zn] + } else { + // Uncompressed block. + bLen = uint32(len(data)) | compressedBlockFlag + zdata = data + } + + // Write the block. + if err := a.writeUint32(bLen, dst); err != nil { + return 0, err + } + _, err := dst.Write(zdata) + if err != nil { + return 0, err + } + + if !a.Header.BlockChecksum { + return bLen, nil + } + checksum := xxh32.ChecksumZero(zdata) + if err := a.writeUint32(checksum, dst); err != nil { + return 0, err + } + return bLen, nil +} + +// writeUint32 writes a uint32 to the underlying writer. +func (a *AlgLz4) writeUint32(x uint32, dst io.Writer) error { + buf := make([]byte, 4) + binary.LittleEndian.PutUint32(buf, x) + _, err := dst.Write(buf) + return err +} + +func blockSizeValueToIndex(size int) byte { + return 4 + byte(bits.TrailingZeros(uint(size)>>16)/2) +} + +// DecompressBlock decompresses Lz4 compressed block +func (a *AlgLz4) DecompressBlock(in io.Reader, out io.Writer, BlockSize uint32) (n int, err error) { // Get our compressed data var b bytes.Buffer _, err = io.Copy(&b, in) if err != nil { return 0, err } - // Add the length in byte form to the begining of the buffer. Because the length is not equal to BlockSize for the last block, the last block might screw this code up. - compressedBytesWithHash := b.Bytes() - compressedBytes := compressedBytesWithHash[:len(compressedBytesWithHash)-4] - hash := compressedBytesWithHash[len(compressedBytesWithHash)-4:] - // Verify, decode, write, and return - h := xxhash.New32() - _, err = h.Write(compressedBytes[4:]) + zdata := b.Bytes() + bLen := binary.LittleEndian.Uint32(zdata[:4]) + + if bLen&compressedBlockFlag > 0 { + // Uncompressed block. + bLen &= compressedBlockMask + + if bLen > BlockSize { + return 0, fmt.Errorf("lz4: invalid block size: %d", bLen) + } + data := zdata[4 : bLen+4] + + if a.Header.BlockChecksum { + checksum := binary.LittleEndian.Uint32(zdata[4+bLen:]) + + if h := xxh32.ChecksumZero(data); h != checksum { + return 0, fmt.Errorf("lz4: invalid block checksum: got %x; expected %x", h, checksum) + } + } + _, err := out.Write(data) + return len(data), err + } + + // compressed block + if bLen > BlockSize { + return 0, fmt.Errorf("lz4: invalid block size: %d", bLen) + } + + if a.Header.BlockChecksum { + checksum := binary.LittleEndian.Uint32(zdata[4+bLen:]) + + if h := xxh32.ChecksumZero(zdata[4 : bLen+4]); h != checksum { + return 0, fmt.Errorf("lz4: invalid block checksum: got %x; expected %x", h, checksum) + } + } + + data := make([]byte, BlockSize) + n, err = lz4.UncompressBlock(zdata[4:bLen+4], data) if err != nil { return 0, err } - if binary.LittleEndian.Uint32(hash) != h.Sum32() { - return 0, errors.New("XXHash checksum invalid") - } - dst := make([]byte, BlockSize*2) - decompressed, err := lz4.Decode(dst, compressedBytes) - if err != nil { - return 0, err - } - _, err = out.Write(decompressed) - return len(decompressed), err + _, err = out.Write(data[:n]) + return n, err } diff --git a/backend/press/alg_snappy.go b/backend/press/alg_snappy.go deleted file mode 100644 index bae42613c..000000000 --- a/backend/press/alg_snappy.go +++ /dev/null @@ -1,35 +0,0 @@ -package press - -// This file implements compression/decompression using snappy. -import ( - "bytes" - "io" - - "github.com/golang/snappy" -) - -// SnappyHeader - Header we add to a snappy file. We don't need this. -var SnappyHeader = []byte{} - -// Function that compresses a block using snappy -func (c *Compression) compressBlockSnappy(in []byte, out io.Writer) (compressedSize uint32, uncompressedSize int64, err error) { - // Compress and return - outBytes := snappy.Encode(nil, in) - _, err = out.Write(outBytes) - return uint32(len(outBytes)), int64(len(in)), err -} - -// Utility function to decompress a block using snappy -func decompressBlockSnappy(in io.Reader, out io.Writer) (n int, err error) { - var b bytes.Buffer - _, err = io.Copy(&b, in) - if err != nil { - return 0, err - } - decompressed, err := snappy.Decode(nil, b.Bytes()) - if err != nil { - return 0, err - } - _, err = out.Write(decompressed) - return len(decompressed), err -} diff --git a/backend/press/alg_xz.go b/backend/press/alg_xz.go new file mode 100644 index 000000000..415bbb117 --- /dev/null +++ b/backend/press/alg_xz.go @@ -0,0 +1,75 @@ +package press + +import ( + "bufio" + "io" + + "github.com/ulikunitz/xz" +) + +// AlgXZ represents the XZ compression algorithm +type AlgXZ struct { + blockSize uint32 + config xz.WriterConfig +} + +// InitializeXZ creates an Lz4 compression algorithm +func InitializeXZ(bs uint32) Algorithm { + a := new(AlgXZ) + a.blockSize = bs + a.config = xz.WriterConfig{} + return a +} + +// GetFileExtension returns file extension +func (a *AlgXZ) GetFileExtension() string { + return ".xz" +} + +// GetHeader returns the Lz4 compression header +func (a *AlgXZ) GetHeader() []byte { + return []byte{} +} + +// GetFooter returns +func (a *AlgXZ) GetFooter() []byte { + return []byte{} +} + +// CompressBlock that compresses a block using lz4 +func (a *AlgXZ) CompressBlock(in []byte, out io.Writer) (compressedSize uint32, uncompressedSize uint64, err error) { + // Initialize buffer + bufw := bufio.NewWriterSize(out, int(a.blockSize+(a.blockSize)>>4)) + + // Initialize block writer + outw, err := a.config.NewWriter(bufw) + if err != nil { + return 0, 0, err + } + + // Compress block + _, err = outw.Write(in) + if err != nil { + return 0, 0, err + } + + // Finalize gzip file, flush buffer and return + err = outw.Close() + if err != nil { + return 0, 0, err + } + blockSize := uint32(bufw.Buffered()) + err = bufw.Flush() + + return blockSize, uint64(len(in)), err +} + +// DecompressBlock decompresses Lz4 compressed block +func (a *AlgXZ) DecompressBlock(in io.Reader, out io.Writer, BlockSize uint32) (n int, err error) { + xzReader, err := xz.NewReader(in) + if err != nil { + return 0, err + } + written, err := io.Copy(out, xzReader) + return int(written), err +} diff --git a/backend/press/compression.go b/backend/press/compression.go index 589199c74..b60e1e4b1 100644 --- a/backend/press/compression.go +++ b/backend/press/compression.go @@ -2,26 +2,11 @@ // This file is the backend implementation for seekable compression. package press -/* -NOTES: -Structure of the metadata we store is: -gzipExtraify(gzip([4-byte header size][4-byte block size] ... [4-byte block size][4-byte raw size of last block])) -This is appended to any compressed file, and is ignored as trailing garbage in our LZ4 and SNAPPY implementations, and seen as empty archives in our GZIP and XZ_IN_GZ implementations. - -There are two possible compression/decompression function pairs to be used: -The two functions that store data internally are: -- Compression.CompressFileAppendingBlockData. Appends block data in extra data fields of empty gzip files at the end. -- DecompressFile. Reads block data from extra fields of these empty gzip files. -The two functions that require externally stored data are: -- Compression.CompressFileReturningBlockData. Returns a []uint32 containing raw (uncompressed and unencoded) block data, which must be externally stored. -- DecompressFileExtData. Takes in the []uint32 that was returned by Compression.CompressFileReturningBlockData -WARNING: These function pairs are incompatible with each other. Don't use CompressFileAppendingBlockData with DecompressFileExtData, or the other way around. It won't work. -*/ - import ( "bufio" "bytes" "errors" + "fmt" "io" "io/ioutil" "log" @@ -30,14 +15,9 @@ import ( // Compression modes const ( Uncompressed = -1 - GzipStore = 0 - GzipMin = 1 - GzipDefault = 2 - GzipMax = 3 - LZ4 = 4 - Snappy = 5 - XZMin = 6 - XZDefault = 7 + LZ4 = 2 + Gzip = 4 + XZ = 8 ) // Errors @@ -50,7 +30,8 @@ const DEBUG = false // Compression is a struct containing configurable variables (what used to be constants) type Compression struct { - CompressionMode int // Compression mode + CompressionMode int // Compression mode + Algorithm Algorithm BlockSize uint32 // Size of blocks. Higher block size means better compression but more download bandwidth needed for small downloads // ~1MB is recommended for xz, while ~128KB is recommended for gzip and lz4 HeuristicBytes int64 // Bytes to perform gzip heuristic on to determine whether a file should be compressed @@ -59,23 +40,31 @@ type Compression struct { BinPath string // Path to compression binary. This is used for all non-gzip compression. } +// Algorithm is the main compression Algorithm Interface +type Algorithm interface { + GetHeader() []byte + + GetFileExtension() string + + CompressBlock(in []byte, out io.Writer) (compressedSize uint32, uncompressedSize uint64, err error) + + DecompressBlock(in io.Reader, out io.Writer, BlockSize uint32) (n int, err error) + + GetFooter() []byte +} + // NewCompressionPreset creates a Compression object with a preset mode/bs func NewCompressionPreset(preset string) (*Compression, error) { switch preset { - case "gzip-store": - return NewCompression(GzipStore, 131070) // GZIP-store (dummy) compression case "lz4": - return NewCompression(LZ4, 262140) // LZ4 compression (very fast) - case "snappy": - return NewCompression(Snappy, 262140) // Snappy compression (like LZ4, but slower and worse) - case "gzip-min": - return NewCompression(GzipMin, 131070) // GZIP-min compression (fast) - case "gzip-default": - return NewCompression(GzipDefault, 131070) // GZIP-default compression (medium) - case "xz-min": - return NewCompression(XZMin, 524288) // XZ-min compression (slow) - case "xz-default": - return NewCompression(XZDefault, 1048576) // XZ-default compression (very slow) + alg := InitializeLz4(262144, true) + return NewCompression(LZ4, alg, 262144) // LZ4 compression (very fast) + case "gzip": + alg := InitializeGzip(131072, 6) + return NewCompression(Gzip, alg, 131070) // GZIP-default compression (medium)*/ + case "xz": + alg := InitializeXZ(1048576) + return NewCompression(XZ, alg, 1048576) // XZ compression (strong compression)*/ } return nil, errors.New("Compression mode doesn't exist") } @@ -83,66 +72,45 @@ func NewCompressionPreset(preset string) (*Compression, error) { // NewCompressionPresetNumber creates a Compression object with a preset mode/bs func NewCompressionPresetNumber(preset int) (*Compression, error) { switch preset { - case GzipStore: - return NewCompression(GzipStore, 131070) // GZIP-store (dummy) compression case LZ4: - return NewCompression(LZ4, 262140) // LZ4 compression (very fast) - case Snappy: - return NewCompression(Snappy, 262140) // Snappy compression (like LZ4, but slower and worse) - case GzipMin: - return NewCompression(GzipMin, 131070) // GZIP-min compression (fast) - case GzipDefault: - return NewCompression(GzipDefault, 131070) // GZIP-default compression (medium) - case XZMin: - return NewCompression(XZMin, 524288) // XZ-min compression (slow) - case XZDefault: - return NewCompression(XZDefault, 1048576) // XZ-default compression (very slow) + alg := InitializeLz4(262144, true) + return NewCompression(LZ4, alg, 262144) // LZ4 compression (very fast) + case Gzip: + alg := InitializeGzip(131072, 6) + return NewCompression(Gzip, alg, 131070) // GZIP-default compression (medium)*/ + case XZ: + alg := InitializeXZ(1048576) + return NewCompression(XZ, alg, 1048576) // XZ compression (strong compression)*/ } return nil, errors.New("Compression mode doesn't exist") } // NewCompression creates a Compression object with some default configuration values -func NewCompression(mode int, bs uint32) (*Compression, error) { - return NewCompressionAdvanced(mode, bs, 1048576, 12, 0.9) +func NewCompression(mode int, alg Algorithm, bs uint32) (*Compression, error) { + return NewCompressionAdvanced(mode, alg, bs, 1048576, 12, 0.9) } // NewCompressionAdvanced creates a Compression object -func NewCompressionAdvanced(mode int, bs uint32, hb int64, threads int, mcr float64) (c *Compression, err error) { +func NewCompressionAdvanced(mode int, alg Algorithm, bs uint32, hb int64, threads int, mcr float64) (c *Compression, err error) { // Set vars c = new(Compression) + c.Algorithm = alg c.CompressionMode = mode c.BlockSize = bs c.HeuristicBytes = hb c.NumThreads = threads c.MaxCompressionRatio = mcr - // Get binary path if needed - err = getBinPaths(c, mode) return c, err } /*** UTILITY FUNCTIONS ***/ -// Gets an overestimate for the maximum compressed block size -func (c *Compression) maxCompressedBlockSize() uint32 { - return c.BlockSize + (c.BlockSize >> 2) + 256 -} // GetFileExtension gets a file extension for current compression mode func (c *Compression) GetFileExtension() string { - switch c.CompressionMode { - case GzipStore, GzipMin, GzipDefault, GzipMax: - return ".gz" - case XZMin, XZDefault: - return ".xzgz" - case LZ4: - return ".lz4" - case Snappy: - return ".snap" - } - panic("Compression mode doesn't exist") + return c.Algorithm.GetFileExtension() } // GetFileCompressionInfo gets a file extension along with compressibility of file -// It is currently not being used but may be usable in the future. func (c *Compression) GetFileCompressionInfo(reader io.Reader) (compressable bool, extension string, err error) { // Use our compression algorithm to do a heuristic on the first few bytes var emulatedBlock, emulatedBlockCompressed bytes.Buffer @@ -150,7 +118,7 @@ func (c *Compression) GetFileCompressionInfo(reader io.Reader) (compressable boo if err != nil && err != io.EOF { return false, "", err } - compressedSize, uncompressedSize, err := c.compressBlock(emulatedBlock.Bytes(), &emulatedBlockCompressed) + compressedSize, uncompressedSize, err := c.Algorithm.CompressBlock(emulatedBlock.Bytes(), &emulatedBlockCompressed) if err != nil { return false, "", err } @@ -162,79 +130,25 @@ func (c *Compression) GetFileCompressionInfo(reader io.Reader) (compressable boo } // If the file is compressible, select file extension based on compression mode - return true, c.GetFileExtension(), nil -} - -// Gets the file header we add to files of the currently used algorithm. Currently only used for lz4. -func (c *Compression) getHeader() []byte { - switch c.CompressionMode { - case GzipStore, GzipMin, GzipDefault, GzipMax: - return GzipHeader - case XZMin, XZDefault: - return ExecHeader - case LZ4: - return LZ4Header - case Snappy: - return SnappyHeader - } - panic("Compression mode doesn't exist") -} - -// Gets the file footer we add to files of the currently used algorithm. Currently only used for lz4. -func (c *Compression) getFooter() []byte { - switch c.CompressionMode { - case GzipStore, GzipMin, GzipDefault, GzipMax: - return []byte{} - case XZMin, XZDefault: - return []byte{} - case LZ4: - return LZ4Footer - case Snappy: - return []byte{} - } - panic("Compression mode doesn't exist") -} - -/*** BLOCK COMPRESSION FUNCTIONS ***/ -// Wrapper function to compress a block -func (c *Compression) compressBlock(in []byte, out io.Writer) (compressedSize uint32, uncompressedSize int64, err error) { - switch c.CompressionMode { // Select compression function (and arguments) based on compression mode - case GzipStore: - return c.compressBlockGz(in, out, 0) - case GzipMin: - return c.compressBlockGz(in, out, 1) - case GzipDefault: - return c.compressBlockGz(in, out, 6) - case GzipMax: - return c.compressBlockGz(in, out, 9) - case XZDefault: - return c.compressBlockExec(in, out, c.BinPath, []string{"-c"}) - case XZMin: - return c.compressBlockExec(in, out, c.BinPath, []string{"-c1"}) - case LZ4: - return c.compressBlockLz4(in, out) - case Snappy: - return c.compressBlockSnappy(in, out) - } - panic("Compression mode doesn't exist") + return true, c.Algorithm.GetFileExtension(), nil } /*** MAIN COMPRESSION INTERFACE ***/ // compressionResult represents the result of compression for a single block (gotten by a single thread) type compressionResult struct { buffer *bytes.Buffer - n int64 + n uint64 err error } // CompressFileReturningBlockData compresses a file returning the block data for that file. func (c *Compression) CompressFileReturningBlockData(in io.Reader, out io.Writer) (blockData []uint32, err error) { // Initialize buffered writer - bufw := bufio.NewWriterSize(out, int(c.maxCompressedBlockSize()*uint32(c.NumThreads))) + bufw := bufio.NewWriterSize(out, int((c.BlockSize+(c.BlockSize)>>4)*uint32(c.NumThreads))) // Get blockData, copy over header, add length of header to blockData blockData = make([]uint32, 0) - header := c.getHeader() + header := c.Algorithm.GetHeader() _, err = bufw.Write(header) if err != nil { return nil, err @@ -263,7 +177,7 @@ func (c *Compression) CompressFileReturningBlockData(in io.Reader, out io.Writer var buffer bytes.Buffer // Compress block - _, n, err := c.compressBlock(in, &buffer) + _, n, err := c.Algorithm.CompressBlock(in, &buffer) if err != nil && err != io.EOF { // This errored out. res.buffer = nil res.n = 0 @@ -276,7 +190,6 @@ func (c *Compression) CompressFileReturningBlockData(in io.Reader, out io.Writer res.n = n res.err = err compressionResults[i] <- res - return }(i, inputBuffer.Bytes()) // If we have reached eof, we don't need more threads if eofAt != -1 { @@ -294,12 +207,13 @@ func (c *Compression) CompressFileReturningBlockData(in io.Reader, out io.Writer return nil, res.err } blockSize := uint32(res.buffer.Len()) + _, err = io.Copy(bufw, res.buffer) if err != nil { return nil, err } if DEBUG { - log.Printf("%d %d\n", res.n, blockSize) + fmt.Printf("%d %d\n", res.n, blockSize) } // Append block size to block data @@ -310,6 +224,7 @@ func (c *Compression) CompressFileReturningBlockData(in io.Reader, out io.Writer if DEBUG { log.Printf("%d %d %d\n", res.n, byte(res.n%256), byte(res.n/256)) } + blockData = append(blockData, uint32(res.n)) break } @@ -333,7 +248,9 @@ func (c *Compression) CompressFileReturningBlockData(in io.Reader, out io.Writer } // Write footer and flush - _, err = bufw.Write(c.getFooter()) + footer := c.Algorithm.GetFooter() + + _, err = bufw.Write(footer) if err != nil { return nil, err } @@ -344,22 +261,6 @@ func (c *Compression) CompressFileReturningBlockData(in io.Reader, out io.Writer } /*** BLOCK DECOMPRESSION FUNCTIONS ***/ -// Wrapper function to decompress a block -func (d *Decompressor) decompressBlock(in io.Reader, out io.Writer) (n int, err error) { - switch d.c.CompressionMode { // Select decompression function based off compression mode - case GzipStore, GzipMin, GzipDefault, GzipMax: - return decompressBlockRangeGz(in, out) - case XZMin: - return decompressBlockRangeExec(in, out, d.c.BinPath, []string{"-dc1"}) - case XZDefault: - return decompressBlockRangeExec(in, out, d.c.BinPath, []string{"-dc"}) - case LZ4: - return decompressBlockLz4(in, out, int64(d.c.BlockSize)) - case Snappy: - return decompressBlockSnappy(in, out) - } - panic("Compression mode doesn't exist") // If none of the above returned -} // Wrapper function for decompressBlock that implements multithreading // decompressionResult represents the result of decompressing a block @@ -412,7 +313,7 @@ func (d *Decompressor) decompressBlockRangeMultithreaded(in io.Reader, out io.Wr var res decompressionResult // Decompress block - _, res.err = d.decompressBlock(in, &block) + _, res.err = d.c.Algorithm.DecompressBlock(in, &block, d.c.BlockSize) res.buffer = &block decompressionResults[i] <- res }(i, currBlock, &compressedBlock) @@ -535,8 +436,7 @@ func (d Decompressor) Read(p []byte) (int, error) { blocksToRead = int64(d.numBlocks) - blockNumber returnEOF = true } - var blockEnd int64 // End position of blocks to read - blockEnd = d.blockStarts[blockNumber+blocksToRead] // Start of the block after the last block we want to get is the end of the last block we want to get + blockEnd := d.blockStarts[blockNumber+blocksToRead] // Start of the block after the last block we want to get is the end of the last block we want to get blockLen := blockEnd - blockStart // Read compressed block range into buffer diff --git a/backend/press/compression_test.go b/backend/press/compression_test.go index 51927e00d..ff625985c 100644 --- a/backend/press/compression_test.go +++ b/backend/press/compression_test.go @@ -122,12 +122,7 @@ func getCompressibleString(size int) string { } func TestCompression(t *testing.T) { - testCases := []string{"lz4", "snappy", "gzip-min"} - if checkXZ() { - testCases = append(testCases, "xz-min") - } else { - t.Log("XZ binary not found on current system. Not testing xz.") - } + testCases := []string{"lz4", "gzip", "xz"} for _, tc := range testCases { t.Run(tc, func(t *testing.T) { testSmallLarge(t, tc) diff --git a/backend/press/press.go b/backend/press/press.go index 3313495a2..4045ebab4 100644 --- a/backend/press/press.go +++ b/backend/press/press.go @@ -5,6 +5,7 @@ import ( "bufio" "bytes" "compress/gzip" + "context" "crypto/md5" "encoding/binary" "encoding/gob" @@ -17,24 +18,18 @@ import ( "github.com/gabriel-vasile/mimetype" - "github.com/ncw/rclone/fs" - "github.com/ncw/rclone/fs/accounting" - "github.com/ncw/rclone/fs/chunkedreader" - "github.com/ncw/rclone/fs/config/configmap" - "github.com/ncw/rclone/fs/config/configstruct" - "github.com/ncw/rclone/fs/fspath" - "github.com/ncw/rclone/fs/hash" - "github.com/ncw/rclone/fs/operations" // Used for Rcat "github.com/pkg/errors" + "github.com/rclone/rclone/fs" + "github.com/rclone/rclone/fs/accounting" + "github.com/rclone/rclone/fs/chunkedreader" + "github.com/rclone/rclone/fs/config/configmap" + "github.com/rclone/rclone/fs/config/configstruct" + "github.com/rclone/rclone/fs/fspath" + "github.com/rclone/rclone/fs/hash" + "github.com/rclone/rclone/fs/operations" + // Used for Rcat ) -/** -NOTES: -Filenames are now . -Hashes and mime types now supported -Metadata files now used to store metadata and point to actual files -**/ - // Globals // Register with Fs func init() { @@ -43,26 +38,13 @@ func init() { Value: "lz4", Help: "Fast, real-time compression with reasonable compression ratios.", }, { - Value: "snappy", - Help: "Google's compression algorithm. Slightly faster and larger than LZ4.", - }, { - Value: "gzip-min", + Value: "gzip", Help: "Standard gzip compression with fastest parameters.", }, { - Value: "gzip-default", - Help: "Standard gzip compression with default parameters.", + Value: "xz", + Help: "Standard xz compression with fastest parameters.", }, } - if checkXZ() { // If XZ is on the system, append compression mode options that are only available with the XZ binary installed - compressionModeOptions = append(compressionModeOptions, []fs.OptionExample{{ - Value: "xz-min", - Help: "Slow but powerful compression with reasonable speed.", - }, { - Value: "xz-default", - Help: "Slowest but best compression.", - }, - }...) - } // Register our remote fs.Register(&fs.RegInfo{ @@ -75,8 +57,8 @@ func init() { Required: true, }, { Name: "compression_mode", - Help: "Compression mode. Installing XZ will unlock XZ modes.", - Default: "gzip-min", + Help: "Compression mode.", + Default: "gzip", Examples: compressionModeOptions, }}, }) @@ -106,18 +88,21 @@ func NewFs(name, rpath string, m configmap.Mapper) (fs.Fs, error) { if err != nil { return nil, err } - c, err := newCompressionForConfig(opt) - if err != nil { - return nil, err - } + remote := opt.Remote if strings.HasPrefix(remote, name+":") { return nil, errors.New("can't point press remote at itself - check the value of the remote setting") } + wInfo, wName, wPath, wConfig, err := fs.ConfigFs(remote) if err != nil { return nil, errors.Wrapf(err, "failed to parse remote %q to wrap", remote) } + + c, err := newCompressionForConfig(opt) + if err != nil { + return nil, err + } // Strip trailing slashes if they exist in rpath rpath = strings.TrimRight(rpath, "\\/") @@ -148,13 +133,16 @@ func NewFs(name, rpath string, m configmap.Mapper) (fs.Fs, error) { DuplicateFiles: true, ReadMimeType: false, WriteMimeType: false, + GetTier: true, + SetTier: true, BucketBased: true, CanHaveEmptyDirectories: true, - SetTier: true, - GetTier: true, }).Fill(f).Mask(wrappedFs).WrapsFs(f, wrappedFs) // We support reading MIME types no matter the wrapped fs f.features.ReadMimeType = true + if wrappedFs.Features().Move == nil && wrappedFs.Features().Copy == nil { + f.features.PutStream = nil + } return f, err } @@ -253,42 +241,6 @@ type Fs struct { c *Compression } -// Name of the remote (as passed into NewFs) -func (f *Fs) Name() string { - return f.name -} - -// Root of the remote (as passed into NewFs) -func (f *Fs) Root() string { - return f.root -} - -// Features returns the optional features of this Fs -func (f *Fs) Features() *fs.Features { - return f.features -} - -// String returns a description of the FS -func (f *Fs) String() string { - return fmt.Sprintf("Compressed drive '%s:%s'", f.name, f.root) -} - -// Get an object from a metadata file -/*func (f *Fs) addMeta(entries *fs.DirEntries, mo fs.Object) { - meta := readMetadata(mo) - origFileName, err := processMetadataName(mo.Remote()) - if err != nil { - fs.Errorf(mo, "Not a metadata file: %v", err) - return - } - o, err := f.Fs.NewObject(generateDataNameFromCompressionMode(origFileName, meta.Size, meta.CompressionMode)) - if err != nil { - fs.Errorf(mo, "Metadata corrupted: %v", err) - return - } - *entries = append(*entries, f.newObject(o, mo, meta)) -}*/ - // Get an Object from a data DirEntry func (f *Fs) addData(entries *fs.DirEntries, o fs.Object) { origFileName, _, size, err := processFileName(o.Remote()) @@ -344,8 +296,8 @@ func (f *Fs) processEntries(entries fs.DirEntries) (newEntries fs.DirEntries, er // This should return ErrDirNotFound if the directory isn't // found. // List entries and process them -func (f *Fs) List(dir string) (entries fs.DirEntries, err error) { - entries, err = f.Fs.List(dir) +func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) { + entries, err = f.Fs.List(ctx, dir) if err != nil { return nil, err } @@ -368,8 +320,8 @@ func (f *Fs) List(dir string) (entries fs.DirEntries, err error) { // // Don't implement this unless you have a more efficient way // of listing recursively that doing a directory traversal. -func (f *Fs) ListR(dir string, callback fs.ListRCallback) (err error) { - return f.Fs.Features().ListR(dir, func(entries fs.DirEntries) error { +func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) { + return f.Fs.Features().ListR(ctx, dir, func(entries fs.DirEntries) error { newEntries, err := f.processEntries(entries) if err != nil { return err @@ -379,18 +331,18 @@ func (f *Fs) ListR(dir string, callback fs.ListRCallback) (err error) { } // NewObject finds the Object at remote. -func (f *Fs) NewObject(remote string) (fs.Object, error) { +func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) { // Read metadata from metadata object - mo, err := f.Fs.NewObject(generateMetadataName(remote)) + mo, err := f.Fs.NewObject(ctx, generateMetadataName(remote)) if err != nil { return nil, err } - meta := readMetadata(mo) + meta := readMetadata(ctx, mo) if meta == nil { return nil, errors.New("error decoding metadata") } // Create our Object - o, err := f.Fs.NewObject(generateDataNameFromCompressionMode(remote, meta.Size, meta.CompressionMode)) + o, err := f.Fs.NewObject(ctx, generateDataNameFromCompressionMode(remote, meta.Size, meta.CompressionMode)) return f.newObject(o, mo, meta), err } @@ -407,23 +359,23 @@ func (c *Compression) checkFileCompressibilityAndType(in io.Reader) (newReader i if err != nil { return nil, false, "", err } - mimeType, _ = mimetype.Detect(b.Bytes()) + mime := mimetype.Detect(b.Bytes()) in = io.MultiReader(bytes.NewReader(b.Bytes()), in) in = wrap(in) - return in, compressible, mimeType, nil + return in, compressible, mime.String(), nil } // Verifies an object hash -func (f *Fs) verifyObjectHash(o fs.Object, hasher *hash.MultiHasher, ht hash.Type) (err error) { +func (f *Fs) verifyObjectHash(ctx context.Context, o fs.Object, hasher *hash.MultiHasher, ht hash.Type) (err error) { srcHash := hasher.Sums()[ht] var dstHash string - dstHash, err = o.Hash(ht) + dstHash, err = o.Hash(ctx, ht) if err != nil { return errors.Wrap(err, "failed to read destination hash") } if srcHash != "" && dstHash != "" && srcHash != dstHash { // remove object - err = o.Remove() + err = o.Remove(ctx) if err != nil { fs.Errorf(o, "Failed to remove corrupted object: %v", err) } @@ -432,7 +384,7 @@ func (f *Fs) verifyObjectHash(o fs.Object, hasher *hash.MultiHasher, ht hash.Typ return nil } -type putFn func(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) +type putFn func(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) type blockDataAndError struct { err error @@ -440,7 +392,7 @@ type blockDataAndError struct { } // Put a compressed version of a file. Returns a wrappable object and metadata. -func (f *Fs) putCompress(in io.Reader, src fs.ObjectInfo, options []fs.OpenOption, put putFn, mimeType string, verifyCompressedObject bool) (fs.Object, *ObjectMetadata, error) { +func (f *Fs) putCompress(ctx context.Context, in io.Reader, src fs.ObjectInfo, options []fs.OpenOption, put putFn, mimeType string, verifyCompressedObject bool) (fs.Object, *ObjectMetadata, error) { // Unwrap reader accounting in, wrap := accounting.UnWrap(in) @@ -483,24 +435,23 @@ func (f *Fs) putCompress(in io.Reader, src fs.ObjectInfo, options []fs.OpenOptio } // Transfer the data - // o, err := put(wrappedIn, f.renameObjectInfo(src, f.c.generateDataName(src.Remote(), src.Size(), true), -1), options...) - o, err := operations.Rcat(f.Fs, f.c.generateDataName(src.Remote(), src.Size(), true), ioutil.NopCloser(wrappedIn), src.ModTime()) + //o, err := put(ctx, wrappedIn, f.wrapInfo(src, f.c.generateDataName(src.Remote(), src.Size(), true), src.Size()), options...) + o, err := operations.Rcat(ctx, f.Fs, f.c.generateDataName(src.Remote(), src.Size(), true), ioutil.NopCloser(wrappedIn), src.ModTime(ctx)) if err != nil { if o != nil { - removeErr := o.Remove() + removeErr := o.Remove(ctx) if removeErr != nil { fs.Errorf(o, "Failed to remove partially transferred object: %v", err) } } return nil, nil, err } - // Check whether we got an error during compression result := <-compressionResult err = result.err if err != nil { if o != nil { - removeErr := o.Remove() + removeErr := o.Remove(ctx) if removeErr != nil { fs.Errorf(o, "Failed to remove partially compressed object: %v", err) } @@ -511,11 +462,11 @@ func (f *Fs) putCompress(in io.Reader, src fs.ObjectInfo, options []fs.OpenOptio // Generate metadata blockData := result.blockData _, _, decompressedSize := parseBlockData(blockData, f.c.BlockSize) - meta := newMetadata(decompressedSize, f.c.CompressionMode, blockData, metaHasher.Sum([]byte{}), mimeType) + meta := newMetadata(decompressedSize, f.c.CompressionMode, blockData, metaHasher.Sum(nil), mimeType) // Check the hashes of the compressed data if we were comparing them if ht != hash.None && hasher != nil { - err := f.verifyObjectHash(o, hasher, ht) + err := f.verifyObjectHash(ctx, o, hasher, ht) if err != nil { return nil, nil, err } @@ -525,7 +476,7 @@ func (f *Fs) putCompress(in io.Reader, src fs.ObjectInfo, options []fs.OpenOptio } // Put an uncompressed version of a file. Returns a wrappable object and metadata. -func (f *Fs) putUncompress(in io.Reader, src fs.ObjectInfo, options []fs.OpenOption, put putFn, mimeType string, verifyCompressedObject bool) (fs.Object, *ObjectMetadata, error) { +func (f *Fs) putUncompress(ctx context.Context, in io.Reader, src fs.ObjectInfo, options []fs.OpenOption, put putFn, mimeType string, verifyCompressedObject bool) (fs.Object, *ObjectMetadata, error) { // Unwrap the accounting, add our metadata hasher, then wrap it back on in, wrap := accounting.UnWrap(in) metaHasher := md5.New() @@ -548,11 +499,11 @@ func (f *Fs) putUncompress(in io.Reader, src fs.ObjectInfo, options []fs.OpenOpt wrappedIn = wrap(wrappedIn) } // Put the object - o, err := put(wrappedIn, f.renameObjectInfo(src, f.c.generateDataName(src.Remote(), src.Size(), false), src.Size()), options...) + o, err := put(ctx, wrappedIn, f.wrapInfo(src, f.c.generateDataName(src.Remote(), src.Size(), false), src.Size()), options...) //o, err := operations.Rcat(f, f.c.generateDataName(src.Remote(), src.Size(), false), wrappedIn, src.ModTime()) if err != nil { if o != nil { - removeErr := o.Remove() + removeErr := o.Remove(ctx) if removeErr != nil { fs.Errorf(o, "Failed to remove partially transferred object: %v", err) } @@ -561,7 +512,7 @@ func (f *Fs) putUncompress(in io.Reader, src fs.ObjectInfo, options []fs.OpenOpt } // Check the hashes of the compressed data if we were comparing them if ht != hash.None && hasher != nil { - err := f.verifyObjectHash(o, hasher, ht) + err := f.verifyObjectHash(ctx, o, hasher, ht) if err != nil { return nil, nil, err } @@ -571,7 +522,7 @@ func (f *Fs) putUncompress(in io.Reader, src fs.ObjectInfo, options []fs.OpenOpt } // This function will write a metadata struct to a metadata Object for an src. Returns a wrappable metadata object. -func (f *Fs) putMetadata(meta *ObjectMetadata, src fs.ObjectInfo, options []fs.OpenOption, put putFn, verifyCompressedObject bool) (mo fs.Object, err error) { +func (f *Fs) putMetadata(ctx context.Context, meta *ObjectMetadata, src fs.ObjectInfo, options []fs.OpenOption, put putFn, verifyCompressedObject bool) (mo fs.Object, err error) { // Generate the metadata contents var b bytes.Buffer gzipWriter := gzip.NewWriter(&b) @@ -584,65 +535,49 @@ func (f *Fs) putMetadata(meta *ObjectMetadata, src fs.ObjectInfo, options []fs.O if err != nil { return nil, err } - var metaReader io.Reader - metaReader = bytes.NewReader(b.Bytes()) - // If verifyCompressedObject is on, find a hash the destination supports to compute a hash of - // the compressed data. - ht := f.Fs.Hashes().GetOne() - var hasher *hash.MultiHasher - if ht != hash.None && verifyCompressedObject { - hasher, err = hash.NewMultiHasherTypes(hash.NewHashSet(ht)) - if err != nil { - return nil, err - } - metaReader = io.TeeReader(metaReader, hasher) - } + metaReader := bytes.NewReader(b.Bytes()) + // Put the data - mo, err = put(metaReader, f.renameObjectInfo(src, generateMetadataName(src.Remote()), int64(b.Len())), options...) + mo, err = put(ctx, metaReader, f.wrapInfo(src, generateMetadataName(src.Remote()), int64(b.Len())), options...) if err != nil { - removeErr := mo.Remove() + removeErr := mo.Remove(ctx) if removeErr != nil { fs.Errorf(mo, "Failed to remove partially transferred object: %v", err) } return nil, err } - // Check the hashes of the compressed data if we were comparing them - if ht != hash.None && hasher != nil { - err := f.verifyObjectHash(mo, hasher, ht) - if err != nil { - return nil, err - } - } return mo, nil } // This function will put both the data and metadata for an Object. // putData is the function used for data, while putMeta is the function used for metadata. -func (f *Fs) putWithCustomFunctions(in io.Reader, src fs.ObjectInfo, options []fs.OpenOption, putData putFn, putMeta putFn, verifyCompressedObject bool) (*Object, error) { - // Check compressibility of file - in, compressible, mimeType, err := f.c.checkFileCompressibilityAndType(in) - if err != nil { - return nil, err - } +func (f *Fs) putWithCustomFunctions(ctx context.Context, in io.Reader, src fs.ObjectInfo, options []fs.OpenOption, + putData putFn, putMeta putFn, compressible bool, mimeType string, verifyCompressedObject bool) (*Object, error) { // Put file then metadata var dataObject fs.Object var meta *ObjectMetadata + var err error if compressible { - dataObject, meta, err = f.putCompress(in, src, options, putData, mimeType, verifyCompressedObject) + dataObject, meta, err = f.putCompress(ctx, in, src, options, putData, mimeType, verifyCompressedObject) } else { - dataObject, meta, err = f.putUncompress(in, src, options, putData, mimeType, verifyCompressedObject) + dataObject, meta, err = f.putUncompress(ctx, in, src, options, putData, mimeType, verifyCompressedObject) } if err != nil { return nil, err } - mo, err := f.putMetadata(meta, src, options, putMeta, verifyCompressedObject) - return f.newObject(dataObject, mo, meta), err -} -// This function will put both the data and metadata for an Object, using the default f.Fs.Put for metadata and checking file hashes. -func (f *Fs) put(in io.Reader, src fs.ObjectInfo, options []fs.OpenOption, put putFn) (*Object, error) { - return f.putWithCustomFunctions(in, src, options, put, f.Fs.Put, true) + mo, err := f.putMetadata(ctx, meta, src, options, putMeta, verifyCompressedObject) + + // meta data upload may fail. in this case we try to remove the original object + if err != nil { + removeError := dataObject.Remove(ctx) + if removeError != nil { + return nil, removeError + } + return nil, err + } + return f.newObject(dataObject, mo, meta), err } // Put in to the remote path with the modTime given of the given size @@ -650,27 +585,95 @@ func (f *Fs) put(in io.Reader, src fs.ObjectInfo, options []fs.OpenOption, put p // May create the object even if it returns an error - if so // will return the object and the error, otherwise will return // nil and the error -func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { - return f.put(in, src, options, f.Fs.Put) +func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { + // If there's already an existent objects we need to make sure to explcitly update it to make sure we don't leave + // orphaned data. Alternatively we could also deleted (which would simpler) but has the disadvantage that it + // destroys all server-side versioning. + o, err := f.NewObject(ctx, src.Remote()) + if err == fs.ErrorObjectNotFound { + // Get our file compressibility + in, compressible, mimeType, err := f.c.checkFileCompressibilityAndType(in) + if err != nil { + return nil, err + } + return f.putWithCustomFunctions(ctx, in, src, options, f.Fs.Put, f.Fs.Put, compressible, mimeType, true) + } + if err != nil { + return nil, err + } + return o, o.Update(ctx, in, src, options...) } // PutStream uploads to the remote path with the modTime given of indeterminate size -func (f *Fs) PutStream(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { - return f.put(in, src, options, f.Fs.Features().PutStream) +func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { + oldObj, err := f.NewObject(ctx, src.Remote()) + if err != nil && err != fs.ErrorObjectNotFound { + return nil, err + } + found := err == nil + + in, compressible, mimeType, err := f.c.checkFileCompressibilityAndType(in) + if err != nil { + return nil, err + } + newObj, err := f.putWithCustomFunctions(ctx, in, src, options, f.Fs.Features().PutStream, f.Fs.Put, compressible, mimeType, true) + if err != nil { + return nil, err + } + + // Our transfer is now complete if we allready had an object with the same name we can safely remove it now + // this is necessary to make sure we don't leave the remote in an inconsistent state. + if found { + err = oldObj.(*Object).Object.Remove(ctx) + if err != nil { + return nil, errors.Wrap(err, "Could remove original object") + } + } + + moveFs, ok := f.Fs.(fs.Mover) + var wrapObj fs.Object + if ok { + wrapObj, err = moveFs.Move(ctx, newObj.Object, f.c.generateDataName(src.Remote(), newObj.size, compressible)) + if err != nil { + return nil, errors.Wrap(err, "Couldn't rename streamed object.") + } + } + + // If we don't have move we'll need to resort to serverside copy and remove + copyFs, ok := f.Fs.(fs.Copier) + if ok { + wrapObj, err := copyFs.Copy(ctx, newObj.Object, f.c.generateDataName(src.Remote(), newObj.size, compressible)) + if err != nil { + return nil, errors.Wrap(err, "Could't copy streamed object.") + } + // Remove the original + err = newObj.Remove(ctx) + if err != nil { + return wrapObj, errors.Wrap(err, "Couldn't remove original streamed object. Remote may be in an incositent state.") + } + } + + newObj.Object = wrapObj + + return newObj, nil } +// Temporarely disabled. There might be a way to implement this correctly but with the current handling metadata duplicate objects +// will break stuff. Right no I can't think of a way to make this work. + // PutUnchecked uploads the object // // This will create a duplicate if we upload a new file without // checking to see if there is one already - use Put() for that. -func (f *Fs) PutUnchecked(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { +/*func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { // If PutUnchecked is supported, do it. + // I'm unsure about this. With the current metadata model this might actually break things. Needs some manual testing. do := f.Fs.Features().PutUnchecked if do == nil { return nil, errors.New("can't PutUnchecked") } - return f.putWithCustomFunctions(in, src, options, do, do, false) -} + return f.putWithCustomFunctions(ctx, in, src, options, do, do, false) +}*/ // Hashes returns the supported hash sets. func (f *Fs) Hashes() hash.Set { @@ -680,15 +683,15 @@ func (f *Fs) Hashes() hash.Set { // Mkdir makes the directory (container, bucket) // // Shouldn't return an error if it already exists -func (f *Fs) Mkdir(dir string) error { - return f.Fs.Mkdir(dir) +func (f *Fs) Mkdir(ctx context.Context, dir string) error { + return f.Fs.Mkdir(ctx, dir) } // Rmdir removes the directory (container, bucket) if empty // // Return an error if it doesn't exist or isn't empty -func (f *Fs) Rmdir(dir string) error { - return f.Fs.Rmdir(dir) +func (f *Fs) Rmdir(ctx context.Context, dir string) error { + return f.Fs.Rmdir(ctx, dir) } // Purge all files in the root and the root directory @@ -697,12 +700,12 @@ func (f *Fs) Rmdir(dir string) error { // quicker than just running Remove() on the result of List() // // Return an error if it doesn't exist -func (f *Fs) Purge() error { +func (f *Fs) Purge(ctx context.Context, dir string) error { do := f.Fs.Features().Purge if do == nil { return fs.ErrorCantPurge } - return do() + return do(ctx, dir) } // Copy src to this remote using server side copy operations. @@ -714,7 +717,7 @@ func (f *Fs) Purge() error { // Will only be called if src.Fs().Name() == f.Name() // // If it isn't possible then return fs.ErrorCantCopy -func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) { +func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) { do := f.Fs.Features().Copy if do == nil { return nil, fs.ErrorCantCopy @@ -723,19 +726,32 @@ func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) { if !ok { return nil, fs.ErrorCantCopy } + // We might be trying to overwrite a file with a newer version but due to size difference the name + // is different. Therefore we have to remove the old file first (if it exists). + dstFile, err := f.NewObject(ctx, remote) + if err != nil && err != fs.ErrorObjectNotFound { + return nil, err + } + if err == nil { + err := dstFile.Remove(ctx) + if err != nil { + return nil, err + } + } + // Copy over metadata - err := o.loadMetadataObjectIfNotLoaded() + err = o.loadMetadataIfNotLoaded(ctx) if err != nil { return nil, err } newFilename := generateMetadataName(remote) - moResult, err := do(o.mo, newFilename) + moResult, err := do(ctx, o.mo, newFilename) if err != nil { return nil, err } // Copy over data newFilename = generateDataNameFromCompressionMode(remote, src.Size(), o.meta.CompressionMode) - oResult, err := do(o.Object, newFilename) + oResult, err := do(ctx, o.Object, newFilename) if err != nil { return nil, err } @@ -751,7 +767,7 @@ func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) { // Will only be called if src.Fs().Name() == f.Name() // // If it isn't possible then return fs.ErrorCantMove -func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) { +func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) { do := f.Fs.Features().Move if do == nil { return nil, fs.ErrorCantMove @@ -760,19 +776,33 @@ func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) { if !ok { return nil, fs.ErrorCantMove } + // We might be trying to overwrite a file with a newer version but due to size difference the name + // is different. Therefore we have to remove the old file first (if it exists). + dstFile, err := f.NewObject(ctx, remote) + if err != nil && err != fs.ErrorObjectNotFound { + return nil, err + } + if err == nil { + err := dstFile.Remove(ctx) + if err != nil { + return nil, err + } + } + // Move metadata - err := o.loadMetadataObjectIfNotLoaded() + err = o.loadMetadataIfNotLoaded(ctx) if err != nil { return nil, err } newFilename := generateMetadataName(remote) - moResult, err := do(o.mo, newFilename) + moResult, err := do(ctx, o.mo, newFilename) if err != nil { return nil, err } + // Move data newFilename = generateDataNameFromCompressionMode(remote, src.Size(), o.meta.CompressionMode) - oResult, err := do(o.Object, newFilename) + oResult, err := do(ctx, o.Object, newFilename) if err != nil { return nil, err } @@ -787,7 +817,7 @@ func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) { // If it isn't possible then return fs.ErrorCantDirMove // // If destination exists then return fs.ErrorDirExists -func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error { +func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) error { do := f.Fs.Features().DirMove if do == nil { return fs.ErrorCantDirMove @@ -797,28 +827,28 @@ func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error { fs.Debugf(srcFs, "Can't move directory - not same remote type") return fs.ErrorCantDirMove } - return do(srcFs.Fs, srcRemote, dstRemote) + return do(ctx, srcFs.Fs, srcRemote, dstRemote) } // CleanUp the trash in the Fs // // Implement this if you have a way of emptying the trash or // otherwise cleaning up old versions of files. -func (f *Fs) CleanUp() error { +func (f *Fs) CleanUp(ctx context.Context) error { do := f.Fs.Features().CleanUp if do == nil { return errors.New("can't CleanUp") } - return do() + return do(ctx) } // About gets quota information from the Fs -func (f *Fs) About() (*fs.Usage, error) { +func (f *Fs) About(ctx context.Context) (*fs.Usage, error) { do := f.Fs.Features().About if do == nil { return nil, errors.New("About not supported") } - return do() + return do(ctx) } // UnWrap returns the Fs that this Fs is wrapping @@ -838,16 +868,16 @@ func (f *Fs) SetWrapper(wrapper fs.Fs) { // MergeDirs merges the contents of all the directories passed // in into the first one and rmdirs the other directories. -func (f *Fs) MergeDirs(dirs []fs.Directory) error { +func (f *Fs) MergeDirs(ctx context.Context, dirs []fs.Directory) error { do := f.Fs.Features().MergeDirs if do == nil { return errors.New("MergeDirs not supported") } out := make([]fs.Directory, len(dirs)) for i, dir := range dirs { - out[i] = fs.NewDirCopy(dir).SetRemote(dir.Remote()) + out[i] = fs.NewDirCopy(ctx, dir).SetRemote(dir.Remote()) } - return do(out) + return do(ctx, out) } // DirCacheFlush resets the directory cache - used in testing @@ -862,7 +892,7 @@ func (f *Fs) DirCacheFlush() { // ChangeNotify calls the passed function with a path // that has had changes. If the implementation // uses polling, it should adhere to the given interval. -func (f *Fs) ChangeNotify(notifyFunc func(string, fs.EntryType), pollIntervalChan <-chan time.Duration) { +func (f *Fs) ChangeNotify(ctx context.Context, notifyFunc func(string, fs.EntryType), pollIntervalChan <-chan time.Duration) { do := f.Fs.Features().ChangeNotify if do == nil { return @@ -885,21 +915,21 @@ func (f *Fs) ChangeNotify(notifyFunc func(string, fs.EntryType), pollIntervalCha } notifyFunc(wrappedPath, entryType) } - do(wrappedNotifyFunc, pollIntervalChan) + do(ctx, wrappedNotifyFunc, pollIntervalChan) } // PublicLink generates a public link to the remote path (usually readable by anyone) -func (f *Fs) PublicLink(remote string) (string, error) { +func (f *Fs) PublicLink(ctx context.Context, remote string, duration fs.Duration, unlink bool) (string, error) { do := f.Fs.Features().PublicLink if do == nil { return "", errors.New("PublicLink not supported") } - o, err := f.NewObject(remote) + o, err := f.NewObject(ctx, remote) if err != nil { // assume it is a directory - return do(remote) + return do(ctx, remote, duration, unlink) } - return do(o.(*Object).Object.Remote()) + return do(ctx, o.(*Object).Object.Remote(), duration, unlink) } /*** OBJECT FUNCTIONS ***/ @@ -935,9 +965,9 @@ func newMetadata(size int64, compressionMode int, blockData []uint32, hash []byt } // This function will read the metadata from a metadata object. -func readMetadata(mo fs.Object) (meta *ObjectMetadata) { +func readMetadata(ctx context.Context, mo fs.Object) (meta *ObjectMetadata) { // Open our meradata object - rc, err := mo.Open() + rc, err := mo.Open(ctx) if err != nil { return nil } @@ -964,6 +994,104 @@ func readMetadata(mo fs.Object) (meta *ObjectMetadata) { return meta } +// Remove removes this object +func (o *Object) Remove(ctx context.Context) error { + err := o.loadMetadataObjectIfNotLoaded(ctx) + if err != nil { + return err + } + err = o.mo.Remove(ctx) + objErr := o.Object.Remove(ctx) + if err != nil { + return err + } + return objErr +} + +// ReadCloserWrapper combines a Reader and a Closer to a ReadCloser +type ReadCloserWrapper struct { + dataSource io.Reader + closer io.Closer +} + +func combineReaderAndCloser(dataSource io.Reader, closer io.Closer) *ReadCloserWrapper { + rc := new(ReadCloserWrapper) + rc.dataSource = dataSource + rc.closer = closer + return rc +} + +// Read function +func (w *ReadCloserWrapper) Read(p []byte) (n int, err error) { + return w.dataSource.Read(p) +} + +// Close function +func (w *ReadCloserWrapper) Close() error { + return w.closer.Close() +} + +// Update in to the object with the modTime given of the given size +func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) { + err = o.loadMetadataIfNotLoaded(ctx) // Loads metadata object too + if err != nil { + return err + } + // Function that updates metadata object + updateMeta := func(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { + return o.mo, o.mo.Update(ctx, in, src, options...) + } + + // Get our file compressibility + in, compressible, mimeType, err := o.f.c.checkFileCompressibilityAndType(in) + if err != nil { + return err + } + + // Since we're encoding the original filesize in the name we'll need to make sure that this name is updated before the actual update + var newObject *Object + origName := o.Remote() + if o.meta.CompressionMode != Uncompressed || compressible { + // If we aren't, we must either move-then-update or reupload-then-remove the object, and update the metadata. + // Check if this FS supports moving + moveFs, ok := o.f.Fs.(fs.Mover) + if ok { // If this fs supports moving, use move-then-update. This may help keep some versioning alive. + // First, move the object + var movedObject fs.Object + movedObject, err = moveFs.Move(ctx, o.Object, o.f.c.generateDataName(o.Remote(), src.Size(), compressible)) + if err != nil { + return err + } + // Create function that updates moved object, then update + update := func(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { + return movedObject, movedObject.Update(ctx, in, src, options...) + } + newObject, err = o.f.putWithCustomFunctions(ctx, in, src, options, update, updateMeta, compressible, mimeType, true) + } else { // If this fs does not support moving, fall back to reuploading the object then removing the old one. + newObject, err = o.f.putWithCustomFunctions(ctx, in, o.f.wrapInfo(src, origName, src.Size()), options, o.f.Fs.Put, updateMeta, compressible, mimeType, true) + removeErr := o.Object.Remove(ctx) // Note: We must do remove later so a failed update doesn't destroy data. + if removeErr != nil { + return removeErr + } + } + } else { + // Function that updates object + update := func(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { + return o.Object, o.Object.Update(ctx, in, src, options...) + } + // If we are, just update the object and metadata + newObject, err = o.f.putWithCustomFunctions(ctx, in, src, options, update, updateMeta, compressible, mimeType, true) + } + if err != nil { + return err + } + // Update object metadata and return + o.Object = newObject.Object + o.meta = newObject.meta + o.size = newObject.size + return nil +} + // This will initialize the variables of a new press Object. The metadata object, mo, and metadata struct, meta, must be specified. func (f *Fs) newObject(o fs.Object, mo fs.Object, meta *ObjectMetadata) *Object { return &Object{ @@ -989,21 +1117,21 @@ func (f *Fs) newObjectSizeAndNameOnly(o fs.Object, moName string, size int64) *O } // This loads the metadata of a press Object if it's not loaded yet -func (o *Object) loadMetadataIfNotLoaded() (err error) { - err = o.loadMetadataObjectIfNotLoaded() +func (o *Object) loadMetadataIfNotLoaded(ctx context.Context) (err error) { + err = o.loadMetadataObjectIfNotLoaded(ctx) if err != nil { return err } if o.meta == nil { - o.meta = readMetadata(o.mo) + o.meta = readMetadata(ctx, o.mo) } return err } // This loads the metadata object of a press Object if it's not loaded yet -func (o *Object) loadMetadataObjectIfNotLoaded() (err error) { +func (o *Object) loadMetadataObjectIfNotLoaded(ctx context.Context) (err error) { if o.mo == nil { - o.mo, err = o.f.Fs.NewObject(o.moName) + o.mo, err = o.f.Fs.NewObject(ctx, o.moName) } return err } @@ -1021,20 +1149,6 @@ func (o *Object) String() string { return o.Remote() } -// Remove removes this object -func (o *Object) Remove() error { - err := o.loadMetadataObjectIfNotLoaded() - if err != nil { - return err - } - err = o.mo.Remove() - objErr := o.Object.Remove() - if err != nil { - return err - } - return objErr -} - // Remote returns the remote path func (o *Object) Remote() string { origFileName, _, _, err := processFileName(o.Object.Remote()) @@ -1053,10 +1167,19 @@ func (o *Object) Size() int64 { return o.meta.Size } +// MimeType returns the MIME type of the file +func (o *Object) MimeType(ctx context.Context) string { + err := o.loadMetadataIfNotLoaded(ctx) + if err != nil { + return "error/error" + } + return o.meta.MimeType +} + // Hash returns the selected checksum of the file // If no checksum is available it returns "" -func (o *Object) Hash(ht hash.Type) (string, error) { - err := o.loadMetadataIfNotLoaded() +func (o *Object) Hash(ctx context.Context, ht hash.Type) (string, error) { + err := o.loadMetadataIfNotLoaded(ctx) if err != nil { return "", err } @@ -1066,13 +1189,23 @@ func (o *Object) Hash(ht hash.Type) (string, error) { return hex.EncodeToString(o.meta.Hash), nil } -// MimeType returns the MIME type of the file -func (o *Object) MimeType() string { - err := o.loadMetadataIfNotLoaded() - if err != nil { - return "error/error" +// SetTier performs changing storage tier of the Object if +// multiple storage classes supported +func (o *Object) SetTier(tier string) error { + do, ok := o.Object.(fs.SetTierer) + if !ok { + return errors.New("press: underlying remote does not support SetTier") } - return o.meta.MimeType + return do.SetTier(tier) +} + +// GetTier returns storage tier or class of the Object +func (o *Object) GetTier() string { + do, ok := o.Object.(fs.GetTierer) + if !ok { + return "" + } + return do.GetTier() } // UnWrap returns the wrapped Object @@ -1080,38 +1213,15 @@ func (o *Object) UnWrap() fs.Object { return o.Object } -// ReadCloserWrapper combines a Reader and a Closer to a ReadCloser -type ReadCloserWrapper struct { - dataSource io.Reader - closer io.Closer -} - -func combineReaderAndCloser(dataSource io.Reader, closer io.Closer) *ReadCloserWrapper { - rc := new(ReadCloserWrapper) - rc.dataSource = dataSource - rc.closer = closer - return rc -} - -// Read function -func (w *ReadCloserWrapper) Read(p []byte) (n int, err error) { - return w.dataSource.Read(p) -} - -// Close function -func (w *ReadCloserWrapper) Close() error { - return w.closer.Close() -} - // Open opens the file for read. Call Close() on the returned io.ReadCloser. Note that this call requires quite a bit of overhead. -func (o *Object) Open(options ...fs.OpenOption) (rc io.ReadCloser, err error) { - err = o.loadMetadataIfNotLoaded() +func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (rc io.ReadCloser, err error) { + err = o.loadMetadataIfNotLoaded(ctx) if err != nil { return nil, err } // If we're uncompressed, just pass this to the underlying object if o.meta.CompressionMode == Uncompressed { - return o.Object.Open(options...) + return o.Object.Open(ctx, options...) } // Get offset and limit from OpenOptions, pass the rest to the underlying remote var openOptions []fs.OpenOption = []fs.OpenOption{&fs.SeekOption{Offset: 0}} @@ -1127,7 +1237,7 @@ func (o *Object) Open(options ...fs.OpenOption) (rc io.ReadCloser, err error) { } } // Get a chunkedreader for the wrapped object - chunkedReader := chunkedreader.New(o.Object, initialChunkSize, maxChunkSize) + chunkedReader := chunkedreader.New(ctx, o.Object, initialChunkSize, maxChunkSize) // Get file handle c, err := NewCompressionPresetNumber(o.meta.CompressionMode) if err != nil { @@ -1155,60 +1265,76 @@ func (o *Object) Open(options ...fs.OpenOption) (rc io.ReadCloser, err error) { return combineReaderAndCloser(fileReader, chunkedReader), nil } -// Update in to the object with the modTime given of the given size -func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) { - err = o.loadMetadataIfNotLoaded() // Loads metadata object too - if err != nil { - return err +// ObjectInfo describes a wrapped fs.ObjectInfo for being the source +type ObjectInfo struct { + src fs.ObjectInfo + fs *Fs + remote string + size int64 +} + +func (f *Fs) wrapInfo(src fs.ObjectInfo, newRemote string, size int64) *ObjectInfo { + return &ObjectInfo{ + src: src, + fs: f, + remote: newRemote, + size: size, } - // Function that updates metadata object - updateMeta := func(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { - return o.mo, o.mo.Update(in, src, options...) +} + +// Fs returns read only access to the Fs that this object is part of +func (o *ObjectInfo) Fs() fs.Info { + if o.fs == nil { + panic("stub ObjectInfo") } - // Get our file compressibility - in, compressible, _, err := o.f.c.checkFileCompressibilityAndType(in) - if err != nil { - return err + return o.fs +} + +// String returns string representation +func (o *ObjectInfo) String() string { + return o.src.String() +} + +// Storable returns whether object is storable +func (o *ObjectInfo) Storable() bool { + return o.src.Storable() +} + +// Remote returns the remote path +func (o *ObjectInfo) Remote() string { + if o.remote != "" { + return o.remote } - // Check if we're updating an uncompressed file with an uncompressible object - var newObject *Object - origName := o.Remote() - if o.meta.CompressionMode != Uncompressed || compressible { - // If we aren't, we must either move-then-update or reupload-then-remove the object, and update the metadata. - // Check if this FS supports moving - moveFs, ok := o.f.Fs.(fs.Mover) - if ok { // If this fs supports moving, use move-then-update. This may help keep some versioning alive. - // First, move the object - var movedObject fs.Object - movedObject, err = moveFs.Move(o.Object, o.f.c.generateDataName(o.Remote(), src.Size(), compressible)) - if err != nil { - return err - } - // Create function that updates moved object, then update - update := func(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { - return movedObject, movedObject.Update(in, src, options...) - } - newObject, err = o.f.putWithCustomFunctions(in, src, options, update, updateMeta, true) - } else { // If this fs does not support moving, fall back to reuploading the object then removing the old one. - newObject, err = o.f.putWithCustomFunctions(in, o.f.renameObjectInfo(src, origName, src.Size()), options, o.f.Fs.Put, updateMeta, true) - removeErr := o.Object.Remove() // Note: We must do remove later so a failed update doesn't destroy data. - if removeErr != nil { - return removeErr - } - } - } else { - // Function that updates object - update := func(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { - return o.Object, o.Object.Update(in, src, options...) - } - // If we are, just update the object and metadata - newObject, err = o.f.putWithCustomFunctions(in, src, options, update, updateMeta, true) + return o.src.Remote() +} + +// Size returns the size of the file +func (o *ObjectInfo) Size() int64 { + if o.size != -1 { + return o.size } - // Update object metadata and return - o.Object = newObject.Object - o.meta = newObject.meta - o.size = newObject.size - return err + return o.src.Size() +} + +// ModTime returns the modification time +func (o *ObjectInfo) ModTime(ctx context.Context) time.Time { + return o.src.ModTime(ctx) +} + +// Hash returns the selected checksum of the file +// If no checksum is available it returns "" +func (o *ObjectInfo) Hash(ctx context.Context, ht hash.Type) (string, error) { + if ht != hash.MD5 { + return "", hash.ErrUnsupported + } + if o.Size() != o.src.Size() { + return "", hash.ErrUnsupported + } + value, err := o.src.Hash(ctx, ht) + if err == hash.ErrUnsupported { + return "", hash.ErrUnsupported + } + return value, err } // ID returns the ID of the Object if known, or "" if not @@ -1220,108 +1346,29 @@ func (o *Object) ID() string { return do.ID() } -// SetTier performs changing storage tier of the Object if -// multiple storage classes supported -func (o *Object) SetTier(tier string) error { - do, ok := o.Object.(fs.SetTierer) - if !ok { - return errors.New("press: underlying remote does not support SetTier") - } - return do.SetTier(tier) +// Name of the remote (as passed into NewFs) +func (f *Fs) Name() string { + return f.name } -// GetTier returns storage tier or class of the Object -func (o *Object) GetTier() string { - do, ok := o.Object.(fs.GetTierer) - if !ok { - return "" - } - return do.GetTier() +// Root of the remote (as passed into NewFs) +func (f *Fs) Root() string { + return f.root } -// RenamedObjectInfo is the renamed representation of an ObjectInfo -type RenamedObjectInfo struct { - fs.ObjectInfo - remote string - size int64 +// Features returns the optional features of this Fs +func (f *Fs) Features() *fs.Features { + return f.features } -func (f *Fs) renameObjectInfo(src fs.ObjectInfo, newRemote string, size int64) *RenamedObjectInfo { - return &RenamedObjectInfo{ - ObjectInfo: src, - remote: newRemote, - size: size, - } +// Return a string version +func (f *Fs) String() string { + return fmt.Sprintf("Compressed: %s:%s", f.name, f.root) } -// Remote gets the remote of the RenamedObjectInfo -func (o *RenamedObjectInfo) Remote() string { - return o.remote -} - -// Size is unknown -func (o *RenamedObjectInfo) Size() int64 { - return o.size -} - -// ObjectInfo describes a wrapped fs.ObjectInfo for being the source -type ObjectInfo struct { - fs.ObjectInfo - f *Fs - meta *ObjectMetadata -} - -// Gets a new ObjectInfo from an src and a metadata struct -func (f *Fs) newObjectInfo(src fs.ObjectInfo) *ObjectInfo { - return &ObjectInfo{ - ObjectInfo: src, - f: f, - meta: nil, - } -} - -// Fs returns read only access to the Fs that this object is part of -func (o *ObjectInfo) Fs() fs.Info { - return o.f -} - -// Remote returns the remote path -func (o *ObjectInfo) Remote() string { - origFileName, _, _, err := processFileName(o.ObjectInfo.Remote()) - if err != nil { - fs.Errorf(o, "Could not get remote path for: %s", o.ObjectInfo.Remote()) - return o.ObjectInfo.Remote() - } - return origFileName -} - -// Size returns the size of the file -func (o *ObjectInfo) Size() int64 { - _, _, size, err := processFileName(o.ObjectInfo.Remote()) - if err != nil { - fs.Errorf(o, "Could not get size for: %s", o.ObjectInfo.Remote()) - return -1 - } - if size == -2 { // File is uncompressed - return o.ObjectInfo.Size() - } - return size -} - -// Hash returns the selected checksum of the file -// If no checksum is available it returns "" -func (o *ObjectInfo) Hash(ht hash.Type) (string, error) { - if o.meta == nil { - mo, err := o.f.NewObject(generateMetadataName(o.Remote())) - if err != nil { - return "", err - } - o.meta = readMetadata(mo) - } - if ht&hash.MD5 == 0 { - return "", hash.ErrUnsupported - } - return hex.EncodeToString(o.meta.Hash), nil +// Precision returns the precision of this Fs +func (f *Fs) Precision() time.Duration { + return f.Fs.Precision() } // Check the interfaces are satisfied @@ -1331,7 +1378,6 @@ var ( _ fs.Copier = (*Fs)(nil) _ fs.Mover = (*Fs)(nil) _ fs.DirMover = (*Fs)(nil) - _ fs.PutUncheckeder = (*Fs)(nil) _ fs.PutStreamer = (*Fs)(nil) _ fs.CleanUpper = (*Fs)(nil) _ fs.UnWrapper = (*Fs)(nil) @@ -1343,10 +1389,10 @@ var ( _ fs.ChangeNotifier = (*Fs)(nil) _ fs.PublicLinker = (*Fs)(nil) _ fs.ObjectInfo = (*ObjectInfo)(nil) + _ fs.GetTierer = (*Object)(nil) + _ fs.SetTierer = (*Object)(nil) _ fs.Object = (*Object)(nil) _ fs.ObjectUnWrapper = (*Object)(nil) _ fs.IDer = (*Object)(nil) - _ fs.SetTierer = (*Object)(nil) - _ fs.GetTierer = (*Object)(nil) _ fs.MimeTyper = (*Object)(nil) ) diff --git a/backend/press/press_test.go b/backend/press/press_test.go index db4110ca7..1f40d0498 100644 --- a/backend/press/press_test.go +++ b/backend/press/press_test.go @@ -6,9 +6,9 @@ import ( "path/filepath" "testing" - _ "github.com/ncw/rclone/backend/local" - "github.com/ncw/rclone/fstest" - "github.com/ncw/rclone/fstest/fstests" + _ "github.com/rclone/rclone/backend/local" + "github.com/rclone/rclone/fstest" + "github.com/rclone/rclone/fstest/fstests" ) // TestIntegration runs integration tests against the remote @@ -17,10 +17,21 @@ func TestIntegration(t *testing.T) { t.Skip("Skipping as -remote not set") } fstests.Run(t, &fstests.Opt{ - RemoteName: *fstest.RemoteName, - NilObject: (*Object)(nil), - UnimplementableFsMethods: []string{"OpenWriterAt"}, - UnimplementableObjectMethods: []string{}, + RemoteName: *fstest.RemoteName, + NilObject: (*Object)(nil), + UnimplementableFsMethods: []string{ + "OpenWriterAt", + "MergeDirs", + "DirCacheFlush", + "PutUnchecked", + "PutStream", + "UserInfo", + "Disconnect", + }, + UnimplementableObjectMethods: []string{ + "GetTier", + "SetTier", + }, }) } @@ -32,10 +43,21 @@ func TestRemoteLz4(t *testing.T) { tempdir := filepath.Join(os.TempDir(), "rclone-press-test-lz4") name := "TestPressLz4" fstests.Run(t, &fstests.Opt{ - RemoteName: name + ":", - NilObject: (*Object)(nil), - UnimplementableFsMethods: []string{"OpenWriterAt"}, - UnimplementableObjectMethods: []string{}, + RemoteName: name + ":", + NilObject: (*Object)(nil), + UnimplementableFsMethods: []string{ + "OpenWriterAt", + "MergeDirs", + "DirCacheFlush", + "PutUnchecked", + "PutStream", + "UserInfo", + "Disconnect", + }, + UnimplementableObjectMethods: []string{ + "GetTier", + "SetTier", + }, ExtraConfig: []fstests.ExtraConfigItem{ {Name: name, Key: "type", Value: "press"}, {Name: name, Key: "remote", Value: tempdir}, @@ -52,37 +74,56 @@ func TestRemoteGzip(t *testing.T) { tempdir := filepath.Join(os.TempDir(), "rclone-press-test-gzip") name := "TestPressGzip" fstests.Run(t, &fstests.Opt{ - RemoteName: name + ":", - NilObject: (*Object)(nil), - UnimplementableFsMethods: []string{"OpenWriterAt"}, - UnimplementableObjectMethods: []string{}, + RemoteName: name + ":", + NilObject: (*Object)(nil), + UnimplementableFsMethods: []string{ + "OpenWriterAt", + "MergeDirs", + "DirCacheFlush", + "PutUnchecked", + "PutStream", + "UserInfo", + "Disconnect", + }, + UnimplementableObjectMethods: []string{ + "GetTier", + "SetTier", + }, ExtraConfig: []fstests.ExtraConfigItem{ {Name: name, Key: "type", Value: "press"}, {Name: name, Key: "remote", Value: tempdir}, - {Name: name, Key: "compression_mode", Value: "gzip-min"}, + {Name: name, Key: "compression_mode", Value: "gzip"}, }, }) } -// TestRemoteXZ tests XZ compression -func TestRemoteXZ(t *testing.T) { - if !checkXZ() { - t.Skip("XZ binary not found on current system") - } +// TestRemoteXz tests XZ compression +func TestRemoteXz(t *testing.T) { if *fstest.RemoteName != "" { t.Skip("Skipping as -remote set") } tempdir := filepath.Join(os.TempDir(), "rclone-press-test-xz") - name := "TestPressXZ" + name := "TestPressXz" fstests.Run(t, &fstests.Opt{ - RemoteName: name + ":", - NilObject: (*Object)(nil), - UnimplementableFsMethods: []string{"OpenWriterAt"}, - UnimplementableObjectMethods: []string{}, + RemoteName: name + ":", + NilObject: (*Object)(nil), + UnimplementableFsMethods: []string{ + "OpenWriterAt", + "MergeDirs", + "DirCacheFlush", + "PutUnchecked", + "PutStream", + "UserInfo", + "Disconnect", + }, + UnimplementableObjectMethods: []string{ + "GetTier", + "SetTier", + }, ExtraConfig: []fstests.ExtraConfigItem{ {Name: name, Key: "type", Value: "press"}, {Name: name, Key: "remote", Value: tempdir}, - {Name: name, Key: "compression_mode", Value: "xz-min"}, + {Name: name, Key: "compression_mode", Value: "xz"}, }, }) } diff --git a/go.mod b/go.mod index 871bc6bbe..305d504ba 100644 --- a/go.mod +++ b/go.mod @@ -8,7 +8,6 @@ require ( github.com/Azure/azure-pipeline-go v0.2.2 github.com/Azure/azure-storage-blob-go v0.10.0 github.com/Unknwon/goconfig v0.0.0-20191126170842-860a72fb44fd - github.com/OneOfOne/xxhash v1.2.7 github.com/a8m/tree v0.0.0-20181222104329-6a0b80129de4 github.com/aalpar/deheap v0.0.0-20200318053559-9a0c2883bd56 github.com/abbot/go-http-auth v0.4.0 @@ -17,9 +16,11 @@ require ( github.com/aws/aws-sdk-go v1.32.11 github.com/billziss-gh/cgofuse v1.4.0 github.com/btcsuite/btcutil v1.0.2 // indirect + github.com/buengese/xxh32 v1.0.1 github.com/calebcase/tmpfile v1.0.2 // indirect github.com/coreos/go-semver v0.3.0 github.com/dropbox/dropbox-sdk-go-unofficial v5.6.0+incompatible + github.com/gabriel-vasile/mimetype v1.1.1 github.com/gogo/protobuf v1.3.1 // indirect github.com/google/go-querystring v1.0.0 // indirect github.com/hanwen/go-fuse/v2 v2.0.3 @@ -38,6 +39,7 @@ require ( github.com/nsf/termbox-go v0.0.0-20200418040025-38ba6e5628f1 github.com/okzk/sdnotify v0.0.0-20180710141335-d9becc38acbd github.com/patrickmn/go-cache v2.1.0+incompatible + github.com/pierrec/lz4 v2.4.1+incompatible github.com/pkg/errors v0.9.1 github.com/pkg/sftp v1.11.0 github.com/prometheus/client_golang v1.7.1 @@ -50,6 +52,7 @@ require ( github.com/spf13/pflag v1.0.5 github.com/stretchr/testify v1.6.1 github.com/t3rm1n4l/go-mega v0.0.0-20200416171014-ffad7fcb44b8 + github.com/ulikunitz/xz v0.5.8 github.com/xanzy/ssh-agent v0.2.1 github.com/youmark/pkcs8 v0.0.0-20200520070018-fad002e585ce github.com/yunify/qingstor-sdk-go/v3 v3.2.0 diff --git a/go.sum b/go.sum index c01197891..10526376b 100644 --- a/go.sum +++ b/go.sum @@ -56,8 +56,7 @@ github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbt github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= -github.com/OneOfOne/xxhash v1.2.7 h1:fzrmmkskv067ZQbd9wERNGuxckWw67dyzoMG62p7LMo= -github.com/OneOfOne/xxhash v1.2.7/go.mod h1:eZbhyaAYD41SGSSsnmcpxVoRiQ/MPUTjUdIIOT9Um7Q= +github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= github.com/RoaringBitmap/roaring v0.4.7/go.mod h1:8khRDP4HmeXns4xIj9oGrKSz7XTQiJx2zgh7AcNke4w= github.com/Unknwon/goconfig v0.0.0-20191126170842-860a72fb44fd h1:+CYOsXi89xOqBkj7CuEJjA2It+j+R3ngUZEydr6mtkw= github.com/Unknwon/goconfig v0.0.0-20191126170842-860a72fb44fd/go.mod h1:wngxua9XCNjvHjDiTiV26DaKDT+0c63QR6H5hjVUUxw= @@ -104,6 +103,7 @@ github.com/btcsuite/goleveldb v0.0.0-20160330041536-7834afc9e8cd/go.mod h1:F+uVa github.com/btcsuite/snappy-go v0.0.0-20151229074030-0bdef8d06723/go.mod h1:8woku9dyThutzjeg+3xrA5iCpBRH8XEEg3lh6TiUghc= github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792/go.mod h1:ghJtEyQwv5/p4Mg4C0fgbePVuGr935/5ddU9Z3TmDRY= github.com/btcsuite/winsvc v1.0.0/go.mod h1:jsenWakMcC0zFBFurPLEAyrnc/teJEM1O46fmI40EZs= +github.com/buengese/xxh32 v1.0.1/go.mod h1:Q5GTtu7m/GuqzCc8YZ0n+oetaGFwW7oy291HvqLTZFk= github.com/calebcase/tmpfile v1.0.2-0.20200602150926-3af473ef8439/go.mod h1:iErLeG/iqJr8LaQ/gYRv4GXdqssi3jg4iSzvrA06/lw= github.com/calebcase/tmpfile v1.0.2 h1:1AGuhKiUu4J6wxz6lxuF6ck3f8G2kaV6KSEny0RGCig= github.com/calebcase/tmpfile v1.0.2/go.mod h1:iErLeG/iqJr8LaQ/gYRv4GXdqssi3jg4iSzvrA06/lw= @@ -142,11 +142,15 @@ github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymF github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/frankban/quicktest v1.7.3 h1:kV0lw0TH1j1hozahVmcpFCsbV5hcS4ZalH+U7UoeTow= +github.com/frankban/quicktest v1.7.3/go.mod h1:V1d2J5pfxYH6EjBAgSK7YNXcXlTWxUHdE1sVDXkjnig= github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= -github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/gabriel-vasile/mimetype v1.0.2 h1:GKCo1TUCg0pV0R4atTcaLv/9SI2W9xPgMySZxUxcJOE= github.com/gabriel-vasile/mimetype v1.0.2/go.mod h1:6CDPel/o/3/s4+bp6kIbsWATq8pmgOisOPG40CJa6To= +github.com/gabriel-vasile/mimetype v1.1.1 h1:qbN9MPuRf3bstHu9zkI9jDWNfH//9+9kHxr9oRBBBOA= +github.com/gabriel-vasile/mimetype v1.1.1/go.mod h1:6CDPel/o/3/s4+bp6kIbsWATq8pmgOisOPG40CJa6To= +github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/glycerine/go-unsnap-stream v0.0.0-20180323001048-9f0cb55181dd/go.mod h1:/20jfyN9Y5QPEAprSgKAUr+glWDY39ZiUEAYOEv5dsE= github.com/glycerine/goconvey v0.0.0-20180728074245-46e3a41ad493/go.mod h1:Ogl1Tioa0aV7gstGFO7KhffUsb9M4ydbEbbxpcEDc24= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= @@ -235,8 +239,6 @@ github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/huandu/xstrings v1.0.0/go.mod h1:4qWG/gcEcfX4z/mBDHJ++3ReCw9ibxbsNJbcucJdbSo= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= -github.com/id01/go-lz4 v1.0.3 h1:D3krbAf5BppFsRSVa75yFo+JMxlTqFwuYpyHQAOgYds= -github.com/id01/go-lz4 v1.0.3/go.mod h1:G8scWkW5nw6fEwIREHZcWy3qddP/Go9IImmcit+bTzw= github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= github.com/jessevdk/go-flags v0.0.0-20141203071132-1679536dcc89/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= @@ -335,6 +337,8 @@ github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/9 github.com/pengsrc/go-shared v0.2.1-0.20190131101655-1999055a4a14 h1:XeOYlK9W1uCmhjJSsY78Mcuh7MVkNjTzmHx1yBzizSU= github.com/pengsrc/go-shared v0.2.1-0.20190131101655-1999055a4a14/go.mod h1:jVblp62SafmidSkvWrXyxAme3gaTfEtWwRPGz5cpvHg= github.com/philhofer/fwd v1.0.0/go.mod h1:gk3iGcWd9+svBvR0sR+KPcfE+RNWozjowpeBVG3ZVNU= +github.com/pierrec/lz4 v2.4.1+incompatible h1:mFe7ttWaflA46Mhqh+jUfjp2qTbPYxLB2/OyBppH9dg= +github.com/pierrec/lz4 v2.4.1+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= @@ -421,6 +425,10 @@ github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1 github.com/tv42/httpunix v0.0.0-20191220191345-2ba4b9c3382c h1:u6SKchux2yDvFQnDHS3lPnIRmfVJ5Sxy3ao2SIdysLQ= github.com/tv42/httpunix v0.0.0-20191220191345-2ba4b9c3382c/go.mod h1:hzIxponao9Kjc7aWznkXaL4U4TWaDSs8zcsY4Ka08nM= github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= +github.com/ulikunitz/xz v0.5.7 h1:YvTNdFzX6+W5m9msiYg/zpkSURPPtOlzbqYjrFn7Yt4= +github.com/ulikunitz/xz v0.5.7/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= +github.com/ulikunitz/xz v0.5.8 h1:ERv8V6GKqVi23rgu5cj9pVfVzJbOqAY2Ntl88O6c2nQ= +github.com/ulikunitz/xz v0.5.8/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= github.com/vivint/infectious v0.0.0-20200605153912-25a574ae18a3 h1:zMsHhfK9+Wdl1F7sIKLyx3wrOFofpb3rWFbA4HgcK5k= github.com/vivint/infectious v0.0.0-20200605153912-25a574ae18a3/go.mod h1:R0Gbuw7ElaGSLOZUSwBm/GgVwMd30jWxBDdAyMOeTuc= github.com/willf/bitset v1.1.9/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4= @@ -759,6 +767,8 @@ gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bl gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.5 h1:ymVxjfMaHvXD8RqPRmzHHsB3VvucivSkIAvJFDI5O3c= gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.3.0 h1:clyUAQHOM3G0M3f5vQj7LuJrETvjVot3Z5el9nffUtU= gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=