mirror of
https://github.com/superseriousbusiness/gotosocial.git
synced 2024-11-28 19:23:45 +01:00
Merge remote-tracking branch 'upstream/main'
This commit is contained in:
commit
3fb5890f3a
@ -171,4 +171,5 @@ snapshot:
|
||||
name_template: "{{ incpatch .Version }}-SNAPSHOT"
|
||||
source:
|
||||
# https://goreleaser.com/customization/source/
|
||||
enabled: false
|
||||
enabled: true
|
||||
name_template: "{{ .ProjectName }}-{{ .Version }}-source-code"
|
||||
|
@ -23,11 +23,8 @@
|
||||
"fmt"
|
||||
"os"
|
||||
"os/signal"
|
||||
"path"
|
||||
"syscall"
|
||||
|
||||
"codeberg.org/gruf/go-store/kv"
|
||||
"codeberg.org/gruf/go-store/storage"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/superseriousbusiness/gotosocial/cmd/gotosocial/action"
|
||||
"github.com/superseriousbusiness/gotosocial/internal/api"
|
||||
@ -68,6 +65,7 @@
|
||||
"github.com/superseriousbusiness/gotosocial/internal/oidc"
|
||||
"github.com/superseriousbusiness/gotosocial/internal/processing"
|
||||
"github.com/superseriousbusiness/gotosocial/internal/router"
|
||||
gtsstorage "github.com/superseriousbusiness/gotosocial/internal/storage"
|
||||
"github.com/superseriousbusiness/gotosocial/internal/transport"
|
||||
"github.com/superseriousbusiness/gotosocial/internal/typeutils"
|
||||
"github.com/superseriousbusiness/gotosocial/internal/web"
|
||||
@ -106,17 +104,10 @@
|
||||
typeConverter := typeutils.NewConverter(dbService)
|
||||
|
||||
// Open the storage backend
|
||||
storageBasePath := config.GetStorageLocalBasePath()
|
||||
storage, err := kv.OpenFile(storageBasePath, &storage.DiskConfig{
|
||||
// Put the store lockfile in the storage dir itself.
|
||||
// Normally this would not be safe, since we could end up
|
||||
// overwriting the lockfile if we store a file called 'store.lock'.
|
||||
// However, in this case it's OK because the keys are set by
|
||||
// GtS and not the user, so we know we're never going to overwrite it.
|
||||
LockFile: path.Join(storageBasePath, "store.lock"),
|
||||
})
|
||||
|
||||
storage, err := gtsstorage.AutoConfig()
|
||||
if err != nil {
|
||||
return fmt.Errorf("error creating storage backend: %s", err)
|
||||
return fmt.Errorf("error creating storage backend: %w", err)
|
||||
}
|
||||
|
||||
// Build HTTP client (TODO: add configurables here)
|
||||
|
@ -58,6 +58,7 @@
|
||||
"github.com/superseriousbusiness/gotosocial/internal/gotosocial"
|
||||
"github.com/superseriousbusiness/gotosocial/internal/messages"
|
||||
"github.com/superseriousbusiness/gotosocial/internal/oidc"
|
||||
"github.com/superseriousbusiness/gotosocial/internal/storage"
|
||||
"github.com/superseriousbusiness/gotosocial/internal/web"
|
||||
"github.com/superseriousbusiness/gotosocial/testrig"
|
||||
)
|
||||
@ -70,7 +71,12 @@
|
||||
dbService := testrig.NewTestDB()
|
||||
testrig.StandardDBSetup(dbService, nil)
|
||||
router := testrig.NewTestRouter(dbService)
|
||||
storageBackend := testrig.NewTestStorage()
|
||||
var storageBackend storage.Driver
|
||||
if os.Getenv("GTS_STORAGE_BACKEND") == "s3" {
|
||||
storageBackend = testrig.NewS3Storage()
|
||||
} else {
|
||||
storageBackend = testrig.NewInMemoryStorage()
|
||||
}
|
||||
testrig.StandardStorageSetup(storageBackend, "./testrig/media")
|
||||
|
||||
// Create client API and federator worker pools
|
||||
|
Binary file not shown.
Before Width: | Height: | Size: 102 KiB After Width: | Height: | Size: 123 KiB |
Binary file not shown.
Before Width: | Height: | Size: 645 KiB After Width: | Height: | Size: 476 KiB |
@ -17,8 +17,39 @@ storage-backend: "local"
|
||||
|
||||
# String. Directory to use as a base path for storing files.
|
||||
# Make sure whatever user/group gotosocial is running as has permission to access
|
||||
# this directly, and create new subdirectories and files with in.
|
||||
# this directory, and create new subdirectories and files within it.
|
||||
# Only required when running with the local storage backend.
|
||||
# Examples: ["/home/gotosocial/storage", "/opt/gotosocial/datastorage"]
|
||||
# Default: "/gotosocial/storage"
|
||||
storage-local-base-path: "/gotosocial/storage"
|
||||
|
||||
# String. API endpoint of the S3 compatible service.
|
||||
# Only required when running with the s3 storage backend.
|
||||
# Examples: ["minio:9000", "s3.nl-ams.scw.cloud", "s3.us-west-002.backblazeb2.com"]
|
||||
# Default: ""
|
||||
storage-s3-endpoint: ""
|
||||
|
||||
# String. Access key part of the S3 credentials.
|
||||
# Consider setting this value using environment variables to avoid leaking it via the config file
|
||||
# Only required when running with the s3 storage backend.
|
||||
# Examples: ["AKIAJSIE27KKMHXI3BJQ","miniouser"]
|
||||
# Default: ""
|
||||
storage-s3-access-key: ""
|
||||
# String. Secret key part of the S3 credentials.
|
||||
# Consider setting this value using environment variables to avoid leaking it via the config file
|
||||
# Only required when running with the s3 storage backend.
|
||||
# Examples: ["5bEYu26084qjSFyclM/f2pz4gviSfoOg+mFwBH39","miniopassword"]
|
||||
# Default: ""
|
||||
storage-s3-secret-key: ""
|
||||
# String. Name of the storage bucket.
|
||||
#
|
||||
# If you have already encoded your bucket name in the storage-s3-endpoint, this
|
||||
# value will be used as a directory containing your data.
|
||||
#
|
||||
# The bucket must exist prior to starting GoToSocial
|
||||
#
|
||||
# Only required when running with the s3 storage backend.
|
||||
# Examples: ["gts","cool-instance"]
|
||||
# Default: ""
|
||||
storage-s3-bucket: ""
|
||||
```
|
||||
|
@ -253,16 +253,46 @@ media-remote-cache-days: 30
|
||||
# String. Type of storage backend to use.
|
||||
# Examples: ["local", "s3"]
|
||||
# Default: "local" (storage on local disk)
|
||||
# NOTE: s3 storage is not yet supported!
|
||||
storage-backend: "local"
|
||||
|
||||
# String. Directory to use as a base path for storing files.
|
||||
# Make sure whatever user/group gotosocial is running as has permission to access
|
||||
# this directory, and create new subdirectories and files within it.
|
||||
# Only required when running with the local storage backend.
|
||||
# Examples: ["/home/gotosocial/storage", "/opt/gotosocial/datastorage"]
|
||||
# Default: "/gotosocial/storage"
|
||||
storage-local-base-path: "/gotosocial/storage"
|
||||
|
||||
# String. API endpoint of the S3 compatible service.
|
||||
# Only required when running with the s3 storage backend.
|
||||
# Examples: ["minio:9000", "s3.nl-ams.scw.cloud", "s3.us-west-002.backblazeb2.com"]
|
||||
# Default: ""
|
||||
storage-s3-endpoint: ""
|
||||
|
||||
# String. Access key part of the S3 credentials.
|
||||
# Consider setting this value using environment variables to avoid leaking it via the config file
|
||||
# Only required when running with the s3 storage backend.
|
||||
# Examples: ["AKIAJSIE27KKMHXI3BJQ","miniouser"]
|
||||
# Default: ""
|
||||
storage-s3-access-key: ""
|
||||
# String. Secret key part of the S3 credentials.
|
||||
# Consider setting this value using environment variables to avoid leaking it via the config file
|
||||
# Only required when running with the s3 storage backend.
|
||||
# Examples: ["5bEYu26084qjSFyclM/f2pz4gviSfoOg+mFwBH39","miniopassword"]
|
||||
# Default: ""
|
||||
storage-s3-secret-key: ""
|
||||
# String. Name of the storage bucket.
|
||||
#
|
||||
# If you have already encoded your bucket name in the storage-s3-endpoint, this
|
||||
# value will be used as a directory containing your data.
|
||||
#
|
||||
# The bucket must exist prior to starting GoToSocial
|
||||
#
|
||||
# Only required when running with the s3 storage backend.
|
||||
# Examples: ["gts","cool-instance"]
|
||||
# Default: ""
|
||||
storage-s3-bucket: ""
|
||||
|
||||
###########################
|
||||
##### STATUSES CONFIG #####
|
||||
###########################
|
||||
|
8
go.mod
8
go.mod
@ -26,6 +26,7 @@ require (
|
||||
github.com/jackc/pgx/v4 v4.15.0
|
||||
github.com/microcosm-cc/bluemonday v1.0.18
|
||||
github.com/miekg/dns v1.1.49
|
||||
github.com/minio/minio-go/v7 v7.0.29
|
||||
github.com/mitchellh/mapstructure v1.5.0
|
||||
github.com/nfnt/resize v0.0.0-20180221191011-83c6a9932646
|
||||
github.com/oklog/ulid v1.3.1
|
||||
@ -67,6 +68,7 @@ require (
|
||||
github.com/dsoprea/go-photoshop-info-format v0.0.0-20200610045659-121dd752914d // indirect
|
||||
github.com/dsoprea/go-png-image-structure/v2 v2.0.0-20210512210324-29b889a6093d // indirect
|
||||
github.com/dsoprea/go-utility/v2 v2.0.0-20200717064901-2fccff4aa15e // indirect
|
||||
github.com/dustin/go-humanize v1.0.0 // indirect
|
||||
github.com/fsnotify/fsnotify v1.5.4 // indirect
|
||||
github.com/gin-contrib/sse v0.1.0 // indirect
|
||||
github.com/go-errors/errors v1.4.1 // indirect
|
||||
@ -92,9 +94,14 @@ require (
|
||||
github.com/jinzhu/inflection v1.0.0 // indirect
|
||||
github.com/json-iterator/go v1.1.12 // indirect
|
||||
github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 // indirect
|
||||
github.com/klauspost/compress v1.13.6 // indirect
|
||||
github.com/klauspost/cpuid v1.3.1 // indirect
|
||||
github.com/leodido/go-urn v1.2.1 // indirect
|
||||
github.com/magiconair/properties v1.8.6 // indirect
|
||||
github.com/mattn/go-isatty v0.0.14 // indirect
|
||||
github.com/minio/md5-simd v1.1.0 // indirect
|
||||
github.com/minio/sha256-simd v0.1.1 // indirect
|
||||
github.com/mitchellh/go-homedir v1.1.0 // indirect
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
||||
github.com/modern-go/reflect2 v1.0.2 // indirect
|
||||
github.com/pelletier/go-toml v1.9.5 // indirect
|
||||
@ -102,6 +109,7 @@ require (
|
||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||
github.com/quasoft/memstore v0.0.0-20191010062613-2bce066d2b0b // indirect
|
||||
github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0 // indirect
|
||||
github.com/rs/xid v1.2.1 // indirect
|
||||
github.com/spf13/afero v1.8.2 // indirect
|
||||
github.com/spf13/cast v1.4.1 // indirect
|
||||
github.com/spf13/jwalterweatherman v1.1.0 // indirect
|
||||
|
13
go.sum
13
go.sum
@ -354,6 +354,10 @@ github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+o
|
||||
github.com/klauspost/compress v1.10.4/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
|
||||
github.com/klauspost/compress v1.10.10/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
|
||||
github.com/klauspost/compress v1.13.6 h1:P76CopJELS0TiO2mebmnzgWaajssP/EszplttgQxcgc=
|
||||
github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
|
||||
github.com/klauspost/cpuid v1.2.3/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
|
||||
github.com/klauspost/cpuid v1.3.1 h1:5JNjFYYQrZeKRJ0734q51WCEEn2huer72Dc7K+R/b6s=
|
||||
github.com/klauspost/cpuid v1.3.1/go.mod h1:bYW4mA6ZgKPob1/Dlai2LviZJO7KGI3uoWLd42rAQw4=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg=
|
||||
@ -393,6 +397,14 @@ github.com/microcosm-cc/bluemonday v1.0.18 h1:6HcxvXDAi3ARt3slx6nTesbvorIc3QeTzB
|
||||
github.com/microcosm-cc/bluemonday v1.0.18/go.mod h1:Z0r70sCuXHig8YpBzCc5eGHAap2K7e/u082ZUpDRRqM=
|
||||
github.com/miekg/dns v1.1.49 h1:qe0mQU3Z/XpFeE+AEBo2rqaS1IPBJ3anmqZ4XiZJVG8=
|
||||
github.com/miekg/dns v1.1.49/go.mod h1:e3IlAVfNqAllflbibAZEWOXOQ+Ynzk/dDozDxY7XnME=
|
||||
github.com/minio/md5-simd v1.1.0 h1:QPfiOqlZH+Cj9teu0t9b1nTBfPbyTl16Of5MeuShdK4=
|
||||
github.com/minio/md5-simd v1.1.0/go.mod h1:XpBqgZULrMYD3R+M28PcmP0CkI7PEMzB3U77ZrKZ0Gw=
|
||||
github.com/minio/minio-go/v7 v7.0.29 h1:7md6lIq1s6zPzUiDRX1BVLHolA4pDM8RMQqIszaJbY0=
|
||||
github.com/minio/minio-go/v7 v7.0.29/go.mod h1:x81+AX5gHSfCSqw7jxRKHvxUXMlE5uKX0Vb75Xk5yYg=
|
||||
github.com/minio/sha256-simd v0.1.1 h1:5QHSlgo3nt5yKOJrC7W8w7X+NFl8cMPZm96iu8kKUJU=
|
||||
github.com/minio/sha256-simd v0.1.1/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl0J58iy0KM=
|
||||
github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y=
|
||||
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
|
||||
github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
|
||||
github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
|
||||
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
@ -435,6 +447,7 @@ github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFR
|
||||
github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc=
|
||||
github.com/rogpeppe/go-internal v1.8.0 h1:FCbCCtXNOY3UtUuHUYaghJg4y7Fd14rXifAYUAtL9R8=
|
||||
github.com/rogpeppe/go-internal v1.8.0/go.mod h1:WmiCO8CzOY8rg0OYDC4/i/2WRWAB6poM+XZ2dLUbcbE=
|
||||
github.com/rs/xid v1.2.1 h1:mhH9Nq+C1fY2l1XIpgxIiUOfNpRBYH1kKcr+qfKgjRc=
|
||||
github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ=
|
||||
github.com/rs/zerolog v1.13.0/go.mod h1:YbFCdg8HfsridGWAh22vktObvhZbQsZXe4/zB0OKkWU=
|
||||
github.com/rs/zerolog v1.15.0/go.mod h1:xYTKnLHcpfU2225ny5qZjxnj9NvkumZYjJHlAThCjNc=
|
||||
|
@ -6,7 +6,6 @@
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
|
||||
"codeberg.org/gruf/go-store/kv"
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/stretchr/testify/suite"
|
||||
"github.com/superseriousbusiness/gotosocial/internal/api/client/account"
|
||||
@ -20,6 +19,7 @@
|
||||
"github.com/superseriousbusiness/gotosocial/internal/messages"
|
||||
"github.com/superseriousbusiness/gotosocial/internal/oauth"
|
||||
"github.com/superseriousbusiness/gotosocial/internal/processing"
|
||||
"github.com/superseriousbusiness/gotosocial/internal/storage"
|
||||
"github.com/superseriousbusiness/gotosocial/testrig"
|
||||
)
|
||||
|
||||
@ -27,7 +27,7 @@ type AccountStandardTestSuite struct {
|
||||
// standard suite interfaces
|
||||
suite.Suite
|
||||
db db.DB
|
||||
storage *kv.KVStore
|
||||
storage storage.Driver
|
||||
mediaManager media.Manager
|
||||
federator federation.Federator
|
||||
processor processing.Processor
|
||||
@ -65,7 +65,7 @@ func (suite *AccountStandardTestSuite) SetupTest() {
|
||||
clientWorker := concurrency.NewWorkerPool[messages.FromClientAPI](-1, -1)
|
||||
|
||||
suite.db = testrig.NewTestDB()
|
||||
suite.storage = testrig.NewTestStorage()
|
||||
suite.storage = testrig.NewInMemoryStorage()
|
||||
suite.mediaManager = testrig.NewTestMediaManager(suite.db, suite.storage)
|
||||
suite.federator = testrig.NewTestFederator(suite.db, testrig.NewTestTransportController(testrig.NewMockHTTPClient(nil, "../../../../testrig/media"), suite.db, fedWorker), suite.storage, suite.mediaManager, fedWorker)
|
||||
suite.sentEmails = make(map[string]string)
|
||||
|
@ -24,7 +24,6 @@
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
|
||||
"codeberg.org/gruf/go-store/kv"
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/stretchr/testify/suite"
|
||||
"github.com/superseriousbusiness/gotosocial/internal/api/client/admin"
|
||||
@ -38,6 +37,7 @@
|
||||
"github.com/superseriousbusiness/gotosocial/internal/messages"
|
||||
"github.com/superseriousbusiness/gotosocial/internal/oauth"
|
||||
"github.com/superseriousbusiness/gotosocial/internal/processing"
|
||||
"github.com/superseriousbusiness/gotosocial/internal/storage"
|
||||
"github.com/superseriousbusiness/gotosocial/testrig"
|
||||
)
|
||||
|
||||
@ -45,7 +45,7 @@ type AdminStandardTestSuite struct {
|
||||
// standard suite interfaces
|
||||
suite.Suite
|
||||
db db.DB
|
||||
storage *kv.KVStore
|
||||
storage storage.Driver
|
||||
mediaManager media.Manager
|
||||
federator federation.Federator
|
||||
processor processing.Processor
|
||||
@ -83,7 +83,7 @@ func (suite *AdminStandardTestSuite) SetupTest() {
|
||||
clientWorker := concurrency.NewWorkerPool[messages.FromClientAPI](-1, -1)
|
||||
|
||||
suite.db = testrig.NewTestDB()
|
||||
suite.storage = testrig.NewTestStorage()
|
||||
suite.storage = testrig.NewInMemoryStorage()
|
||||
suite.mediaManager = testrig.NewTestMediaManager(suite.db, suite.storage)
|
||||
suite.federator = testrig.NewTestFederator(suite.db, testrig.NewTestTransportController(testrig.NewMockHTTPClient(nil, "../../../../testrig/media"), suite.db, fedWorker), suite.storage, suite.mediaManager, fedWorker)
|
||||
suite.sentEmails = make(map[string]string)
|
||||
|
@ -103,10 +103,10 @@ func (suite *EmojiCreateTestSuite) TestEmojiCreate() {
|
||||
suite.Empty(dbEmoji.CategoryID)
|
||||
|
||||
// emoji should be in storage
|
||||
emojiBytes, err := suite.storage.Get(dbEmoji.ImagePath)
|
||||
emojiBytes, err := suite.storage.Get(ctx, dbEmoji.ImagePath)
|
||||
suite.NoError(err)
|
||||
suite.Len(emojiBytes, dbEmoji.ImageFileSize)
|
||||
emojiStaticBytes, err := suite.storage.Get(dbEmoji.ImageStaticPath)
|
||||
emojiStaticBytes, err := suite.storage.Get(ctx, dbEmoji.ImageStaticPath)
|
||||
suite.NoError(err)
|
||||
suite.Len(emojiStaticBytes, dbEmoji.ImageStaticFileSize)
|
||||
}
|
||||
|
@ -24,7 +24,6 @@
|
||||
"fmt"
|
||||
"net/http/httptest"
|
||||
|
||||
"codeberg.org/gruf/go-store/kv"
|
||||
"github.com/gin-contrib/sessions"
|
||||
"github.com/gin-contrib/sessions/memstore"
|
||||
"github.com/gin-gonic/gin"
|
||||
@ -42,13 +41,14 @@
|
||||
"github.com/superseriousbusiness/gotosocial/internal/oidc"
|
||||
"github.com/superseriousbusiness/gotosocial/internal/processing"
|
||||
"github.com/superseriousbusiness/gotosocial/internal/router"
|
||||
"github.com/superseriousbusiness/gotosocial/internal/storage"
|
||||
"github.com/superseriousbusiness/gotosocial/testrig"
|
||||
)
|
||||
|
||||
type AuthStandardTestSuite struct {
|
||||
suite.Suite
|
||||
db db.DB
|
||||
storage *kv.KVStore
|
||||
storage storage.Driver
|
||||
mediaManager media.Manager
|
||||
federator federation.Federator
|
||||
processor processing.Processor
|
||||
@ -88,7 +88,7 @@ func (suite *AuthStandardTestSuite) SetupTest() {
|
||||
clientWorker := concurrency.NewWorkerPool[messages.FromClientAPI](-1, -1)
|
||||
|
||||
suite.db = testrig.NewTestDB()
|
||||
suite.storage = testrig.NewTestStorage()
|
||||
suite.storage = testrig.NewInMemoryStorage()
|
||||
suite.mediaManager = testrig.NewTestMediaManager(suite.db, suite.storage)
|
||||
suite.federator = testrig.NewTestFederator(suite.db, testrig.NewTestTransportController(testrig.NewMockHTTPClient(nil, "../../../../testrig/media"), suite.db, fedWorker), suite.storage, suite.mediaManager, fedWorker)
|
||||
suite.emailSender = testrig.NewEmailSender("../../../../web/template/", nil)
|
||||
|
@ -84,6 +84,11 @@ func (m *FileServer) ServeFile(c *gin.Context) {
|
||||
return
|
||||
}
|
||||
|
||||
if content.URL != nil {
|
||||
c.Redirect(http.StatusFound, content.URL.String())
|
||||
return
|
||||
}
|
||||
|
||||
defer func() {
|
||||
// if the content is a ReadCloser, close it when we're done
|
||||
if closer, ok := content.Content.(io.ReadCloser); ok {
|
||||
|
@ -26,7 +26,6 @@
|
||||
"net/http/httptest"
|
||||
"testing"
|
||||
|
||||
"codeberg.org/gruf/go-store/kv"
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/stretchr/testify/suite"
|
||||
@ -40,6 +39,7 @@
|
||||
"github.com/superseriousbusiness/gotosocial/internal/messages"
|
||||
"github.com/superseriousbusiness/gotosocial/internal/oauth"
|
||||
"github.com/superseriousbusiness/gotosocial/internal/processing"
|
||||
"github.com/superseriousbusiness/gotosocial/internal/storage"
|
||||
"github.com/superseriousbusiness/gotosocial/internal/typeutils"
|
||||
"github.com/superseriousbusiness/gotosocial/testrig"
|
||||
)
|
||||
@ -48,7 +48,7 @@ type ServeFileTestSuite struct {
|
||||
// standard suite interfaces
|
||||
suite.Suite
|
||||
db db.DB
|
||||
storage *kv.KVStore
|
||||
storage storage.Driver
|
||||
federator federation.Federator
|
||||
tc typeutils.TypeConverter
|
||||
processor processing.Processor
|
||||
@ -81,7 +81,7 @@ func (suite *ServeFileTestSuite) SetupSuite() {
|
||||
clientWorker := concurrency.NewWorkerPool[messages.FromClientAPI](-1, -1)
|
||||
|
||||
suite.db = testrig.NewTestDB()
|
||||
suite.storage = testrig.NewTestStorage()
|
||||
suite.storage = testrig.NewInMemoryStorage()
|
||||
suite.federator = testrig.NewTestFederator(suite.db, testrig.NewTestTransportController(testrig.NewMockHTTPClient(nil, "../../../../testrig/media"), suite.db, fedWorker), suite.storage, suite.mediaManager, fedWorker)
|
||||
suite.emailSender = testrig.NewEmailSender("../../../../web/template/", nil)
|
||||
|
||||
@ -160,7 +160,7 @@ func (suite *ServeFileTestSuite) TestServeOriginalFileSuccessful() {
|
||||
suite.NoError(err)
|
||||
suite.NotNil(b)
|
||||
|
||||
fileInStorage, err := suite.storage.Get(targetAttachment.File.Path)
|
||||
fileInStorage, err := suite.storage.Get(ctx, targetAttachment.File.Path)
|
||||
suite.NoError(err)
|
||||
suite.NotNil(fileInStorage)
|
||||
suite.Equal(b, fileInStorage)
|
||||
@ -206,7 +206,7 @@ func (suite *ServeFileTestSuite) TestServeSmallFileSuccessful() {
|
||||
suite.NoError(err)
|
||||
suite.NotNil(b)
|
||||
|
||||
fileInStorage, err := suite.storage.Get(targetAttachment.Thumbnail.Path)
|
||||
fileInStorage, err := suite.storage.Get(ctx, targetAttachment.Thumbnail.Path)
|
||||
suite.NoError(err)
|
||||
suite.NotNil(fileInStorage)
|
||||
suite.Equal(b, fileInStorage)
|
||||
|
@ -23,7 +23,6 @@
|
||||
"fmt"
|
||||
"net/http/httptest"
|
||||
|
||||
"codeberg.org/gruf/go-store/kv"
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/stretchr/testify/suite"
|
||||
"github.com/superseriousbusiness/gotosocial/internal/api/client/followrequest"
|
||||
@ -37,13 +36,14 @@
|
||||
"github.com/superseriousbusiness/gotosocial/internal/messages"
|
||||
"github.com/superseriousbusiness/gotosocial/internal/oauth"
|
||||
"github.com/superseriousbusiness/gotosocial/internal/processing"
|
||||
"github.com/superseriousbusiness/gotosocial/internal/storage"
|
||||
"github.com/superseriousbusiness/gotosocial/testrig"
|
||||
)
|
||||
|
||||
type FollowRequestStandardTestSuite struct {
|
||||
suite.Suite
|
||||
db db.DB
|
||||
storage *kv.KVStore
|
||||
storage storage.Driver
|
||||
mediaManager media.Manager
|
||||
federator federation.Federator
|
||||
processor processing.Processor
|
||||
@ -80,7 +80,7 @@ func (suite *FollowRequestStandardTestSuite) SetupTest() {
|
||||
clientWorker := concurrency.NewWorkerPool[messages.FromClientAPI](-1, -1)
|
||||
|
||||
suite.db = testrig.NewTestDB()
|
||||
suite.storage = testrig.NewTestStorage()
|
||||
suite.storage = testrig.NewInMemoryStorage()
|
||||
suite.mediaManager = testrig.NewTestMediaManager(suite.db, suite.storage)
|
||||
suite.federator = testrig.NewTestFederator(suite.db, testrig.NewTestTransportController(testrig.NewMockHTTPClient(nil, "../../../../testrig/media"), suite.db, fedWorker), suite.storage, suite.mediaManager, fedWorker)
|
||||
suite.emailSender = testrig.NewEmailSender("../../../../web/template/", nil)
|
||||
|
@ -23,7 +23,6 @@
|
||||
"fmt"
|
||||
"net/http/httptest"
|
||||
|
||||
"codeberg.org/gruf/go-store/kv"
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/stretchr/testify/suite"
|
||||
"github.com/superseriousbusiness/gotosocial/internal/api/client/instance"
|
||||
@ -37,6 +36,7 @@
|
||||
"github.com/superseriousbusiness/gotosocial/internal/messages"
|
||||
"github.com/superseriousbusiness/gotosocial/internal/oauth"
|
||||
"github.com/superseriousbusiness/gotosocial/internal/processing"
|
||||
"github.com/superseriousbusiness/gotosocial/internal/storage"
|
||||
"github.com/superseriousbusiness/gotosocial/testrig"
|
||||
)
|
||||
|
||||
@ -44,7 +44,7 @@ type InstanceStandardTestSuite struct {
|
||||
// standard suite interfaces
|
||||
suite.Suite
|
||||
db db.DB
|
||||
storage *kv.KVStore
|
||||
storage storage.Driver
|
||||
mediaManager media.Manager
|
||||
federator federation.Federator
|
||||
processor processing.Processor
|
||||
@ -82,7 +82,7 @@ func (suite *InstanceStandardTestSuite) SetupTest() {
|
||||
clientWorker := concurrency.NewWorkerPool[messages.FromClientAPI](-1, -1)
|
||||
|
||||
suite.db = testrig.NewTestDB()
|
||||
suite.storage = testrig.NewTestStorage()
|
||||
suite.storage = testrig.NewInMemoryStorage()
|
||||
suite.mediaManager = testrig.NewTestMediaManager(suite.db, suite.storage)
|
||||
suite.federator = testrig.NewTestFederator(suite.db, testrig.NewTestTransportController(testrig.NewMockHTTPClient(nil, "../../../../testrig/media"), suite.db, fedWorker), suite.storage, suite.mediaManager, fedWorker)
|
||||
suite.sentEmails = make(map[string]string)
|
||||
|
@ -63,7 +63,7 @@ func (suite *InstancePatchTestSuite) TestInstancePatch1() {
|
||||
b, err := io.ReadAll(result.Body)
|
||||
suite.NoError(err)
|
||||
|
||||
suite.Equal(`{"uri":"http://localhost:8080","title":"Example Instance","description":"","short_description":"","email":"someone@example.org","version":"","registrations":true,"approval_required":true,"invites_enabled":false,"configuration":{"statuses":{"max_characters":5000,"max_media_attachments":6,"characters_reserved_per_url":999},"media_attachments":{"supported_mime_types":["image/jpeg","image/gif","image/png"],"image_size_limit":1048576,"image_matrix_limit":16777216,"video_size_limit":5242880,"video_frame_rate_limit":60,"video_matrix_limit":16777216},"polls":{"max_options":6,"max_characters_per_option":50,"min_expiration":300,"max_expiration":2629746}},"urls":{"streaming_api":"wss://localhost:8080"},"stats":{"domain_count":2,"status_count":16,"user_count":4},"thumbnail":"http://localhost:8080/assets/logo.png","contact_account":{"id":"01F8MH17FWEB39HZJ76B6VXSKF","username":"admin","acct":"admin","display_name":"","locked":false,"bot":false,"created_at":"2022-05-17T13:10:59.000Z","note":"","url":"http://localhost:8080/@admin","avatar":"","avatar_static":"","header":"","header_static":"","followers_count":1,"following_count":1,"statuses_count":4,"last_status_at":"2021-10-20T10:41:37.000Z","emojis":[],"fields":[]},"max_toot_chars":5000}`, string(b))
|
||||
suite.Equal(`{"uri":"http://localhost:8080","title":"Example Instance","description":"","short_description":"","email":"someone@example.org","version":"0.0.0-testrig","registrations":true,"approval_required":true,"invites_enabled":false,"configuration":{"statuses":{"max_characters":5000,"max_media_attachments":6,"characters_reserved_per_url":999},"media_attachments":{"supported_mime_types":["image/jpeg","image/gif","image/png"],"image_size_limit":1048576,"image_matrix_limit":16777216,"video_size_limit":5242880,"video_frame_rate_limit":60,"video_matrix_limit":16777216},"polls":{"max_options":6,"max_characters_per_option":50,"min_expiration":300,"max_expiration":2629746}},"urls":{"streaming_api":"wss://localhost:8080"},"stats":{"domain_count":2,"status_count":16,"user_count":4},"thumbnail":"http://localhost:8080/assets/logo.png","contact_account":{"id":"01F8MH17FWEB39HZJ76B6VXSKF","username":"admin","acct":"admin","display_name":"","locked":false,"bot":false,"created_at":"2022-05-17T13:10:59.000Z","note":"","url":"http://localhost:8080/@admin","avatar":"","avatar_static":"","header":"","header_static":"","followers_count":1,"following_count":1,"statuses_count":4,"last_status_at":"2021-10-20T10:41:37.000Z","emojis":[],"fields":[]},"max_toot_chars":5000}`, string(b))
|
||||
}
|
||||
|
||||
func (suite *InstancePatchTestSuite) TestInstancePatch2() {
|
||||
@ -93,7 +93,7 @@ func (suite *InstancePatchTestSuite) TestInstancePatch2() {
|
||||
b, err := io.ReadAll(result.Body)
|
||||
suite.NoError(err)
|
||||
|
||||
suite.Equal(`{"uri":"http://localhost:8080","title":"Geoff's Instance","description":"","short_description":"","email":"","version":"","registrations":true,"approval_required":true,"invites_enabled":false,"configuration":{"statuses":{"max_characters":5000,"max_media_attachments":6,"characters_reserved_per_url":999},"media_attachments":{"supported_mime_types":["image/jpeg","image/gif","image/png"],"image_size_limit":1048576,"image_matrix_limit":16777216,"video_size_limit":5242880,"video_frame_rate_limit":60,"video_matrix_limit":16777216},"polls":{"max_options":6,"max_characters_per_option":50,"min_expiration":300,"max_expiration":2629746}},"urls":{"streaming_api":"wss://localhost:8080"},"stats":{"domain_count":2,"status_count":16,"user_count":4},"thumbnail":"http://localhost:8080/assets/logo.png","max_toot_chars":5000}`, string(b))
|
||||
suite.Equal(`{"uri":"http://localhost:8080","title":"Geoff's Instance","description":"","short_description":"","email":"","version":"0.0.0-testrig","registrations":true,"approval_required":true,"invites_enabled":false,"configuration":{"statuses":{"max_characters":5000,"max_media_attachments":6,"characters_reserved_per_url":999},"media_attachments":{"supported_mime_types":["image/jpeg","image/gif","image/png"],"image_size_limit":1048576,"image_matrix_limit":16777216,"video_size_limit":5242880,"video_frame_rate_limit":60,"video_matrix_limit":16777216},"polls":{"max_options":6,"max_characters_per_option":50,"min_expiration":300,"max_expiration":2629746}},"urls":{"streaming_api":"wss://localhost:8080"},"stats":{"domain_count":2,"status_count":16,"user_count":4},"thumbnail":"http://localhost:8080/assets/logo.png","max_toot_chars":5000}`, string(b))
|
||||
}
|
||||
|
||||
func (suite *InstancePatchTestSuite) TestInstancePatch3() {
|
||||
@ -123,7 +123,7 @@ func (suite *InstancePatchTestSuite) TestInstancePatch3() {
|
||||
b, err := io.ReadAll(result.Body)
|
||||
suite.NoError(err)
|
||||
|
||||
suite.Equal(`{"uri":"http://localhost:8080","title":"localhost:8080","description":"","short_description":"\u003cp\u003eThis is some html, which is \u003cem\u003eallowed\u003c/em\u003e in short descriptions.\u003c/p\u003e","email":"","version":"","registrations":true,"approval_required":true,"invites_enabled":false,"configuration":{"statuses":{"max_characters":5000,"max_media_attachments":6,"characters_reserved_per_url":999},"media_attachments":{"supported_mime_types":["image/jpeg","image/gif","image/png"],"image_size_limit":1048576,"image_matrix_limit":16777216,"video_size_limit":5242880,"video_frame_rate_limit":60,"video_matrix_limit":16777216},"polls":{"max_options":6,"max_characters_per_option":50,"min_expiration":300,"max_expiration":2629746}},"urls":{"streaming_api":"wss://localhost:8080"},"stats":{"domain_count":2,"status_count":16,"user_count":4},"thumbnail":"http://localhost:8080/assets/logo.png","max_toot_chars":5000}`, string(b))
|
||||
suite.Equal(`{"uri":"http://localhost:8080","title":"localhost:8080","description":"","short_description":"\u003cp\u003eThis is some html, which is \u003cem\u003eallowed\u003c/em\u003e in short descriptions.\u003c/p\u003e","email":"","version":"0.0.0-testrig","registrations":true,"approval_required":true,"invites_enabled":false,"configuration":{"statuses":{"max_characters":5000,"max_media_attachments":6,"characters_reserved_per_url":999},"media_attachments":{"supported_mime_types":["image/jpeg","image/gif","image/png"],"image_size_limit":1048576,"image_matrix_limit":16777216,"video_size_limit":5242880,"video_frame_rate_limit":60,"video_matrix_limit":16777216},"polls":{"max_options":6,"max_characters_per_option":50,"min_expiration":300,"max_expiration":2629746}},"urls":{"streaming_api":"wss://localhost:8080"},"stats":{"domain_count":2,"status_count":16,"user_count":4},"thumbnail":"http://localhost:8080/assets/logo.png","max_toot_chars":5000}`, string(b))
|
||||
}
|
||||
|
||||
func (suite *InstancePatchTestSuite) TestInstancePatch4() {
|
||||
@ -214,7 +214,7 @@ func (suite *InstancePatchTestSuite) TestInstancePatch6() {
|
||||
b, err := io.ReadAll(result.Body)
|
||||
suite.NoError(err)
|
||||
|
||||
suite.Equal(`{"uri":"http://localhost:8080","title":"localhost:8080","description":"","short_description":"","email":"","version":"","registrations":true,"approval_required":true,"invites_enabled":false,"configuration":{"statuses":{"max_characters":5000,"max_media_attachments":6,"characters_reserved_per_url":999},"media_attachments":{"supported_mime_types":["image/jpeg","image/gif","image/png"],"image_size_limit":1048576,"image_matrix_limit":16777216,"video_size_limit":5242880,"video_frame_rate_limit":60,"video_matrix_limit":16777216},"polls":{"max_options":6,"max_characters_per_option":50,"min_expiration":300,"max_expiration":2629746}},"urls":{"streaming_api":"wss://localhost:8080"},"stats":{"domain_count":2,"status_count":16,"user_count":4},"thumbnail":"http://localhost:8080/assets/logo.png","max_toot_chars":5000}`, string(b))
|
||||
suite.Equal(`{"uri":"http://localhost:8080","title":"localhost:8080","description":"","short_description":"","email":"","version":"0.0.0-testrig","registrations":true,"approval_required":true,"invites_enabled":false,"configuration":{"statuses":{"max_characters":5000,"max_media_attachments":6,"characters_reserved_per_url":999},"media_attachments":{"supported_mime_types":["image/jpeg","image/gif","image/png"],"image_size_limit":1048576,"image_matrix_limit":16777216,"video_size_limit":5242880,"video_frame_rate_limit":60,"video_matrix_limit":16777216},"polls":{"max_options":6,"max_characters_per_option":50,"min_expiration":300,"max_expiration":2629746}},"urls":{"streaming_api":"wss://localhost:8080"},"stats":{"domain_count":2,"status_count":16,"user_count":4},"thumbnail":"http://localhost:8080/assets/logo.png","max_toot_chars":5000}`, string(b))
|
||||
}
|
||||
|
||||
func (suite *InstancePatchTestSuite) TestInstancePatch7() {
|
||||
|
@ -30,7 +30,6 @@
|
||||
"net/http/httptest"
|
||||
"testing"
|
||||
|
||||
"codeberg.org/gruf/go-store/kv"
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/stretchr/testify/suite"
|
||||
@ -46,6 +45,7 @@
|
||||
"github.com/superseriousbusiness/gotosocial/internal/messages"
|
||||
"github.com/superseriousbusiness/gotosocial/internal/oauth"
|
||||
"github.com/superseriousbusiness/gotosocial/internal/processing"
|
||||
"github.com/superseriousbusiness/gotosocial/internal/storage"
|
||||
"github.com/superseriousbusiness/gotosocial/internal/typeutils"
|
||||
"github.com/superseriousbusiness/gotosocial/testrig"
|
||||
)
|
||||
@ -54,7 +54,7 @@ type MediaCreateTestSuite struct {
|
||||
// standard suite interfaces
|
||||
suite.Suite
|
||||
db db.DB
|
||||
storage *kv.KVStore
|
||||
storage *storage.Local
|
||||
mediaManager media.Manager
|
||||
federator federation.Federator
|
||||
tc typeutils.TypeConverter
|
||||
@ -87,7 +87,7 @@ func (suite *MediaCreateTestSuite) SetupSuite() {
|
||||
clientWorker := concurrency.NewWorkerPool[messages.FromClientAPI](-1, -1)
|
||||
|
||||
suite.db = testrig.NewTestDB()
|
||||
suite.storage = testrig.NewTestStorage()
|
||||
suite.storage = testrig.NewInMemoryStorage()
|
||||
suite.tc = testrig.NewTestTypeConverter(suite.db)
|
||||
suite.mediaManager = testrig.NewTestMediaManager(suite.db, suite.storage)
|
||||
suite.oauthServer = testrig.NewTestOauthServer(suite.db)
|
||||
@ -138,7 +138,7 @@ func (suite *MediaCreateTestSuite) TestMediaCreateSuccessful() {
|
||||
|
||||
// see what's in storage *before* the request
|
||||
storageKeysBeforeRequest := []string{}
|
||||
iter, err := suite.storage.Iterator(nil)
|
||||
iter, err := suite.storage.KVStore.Iterator(nil)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
@ -164,7 +164,7 @@ func (suite *MediaCreateTestSuite) TestMediaCreateSuccessful() {
|
||||
|
||||
// check what's in storage *after* the request
|
||||
storageKeysAfterRequest := []string{}
|
||||
iter, err = suite.storage.Iterator(nil)
|
||||
iter, err = suite.storage.KVStore.Iterator(nil)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
@ -28,7 +28,6 @@
|
||||
"net/http/httptest"
|
||||
"testing"
|
||||
|
||||
"codeberg.org/gruf/go-store/kv"
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/stretchr/testify/suite"
|
||||
@ -44,6 +43,7 @@
|
||||
"github.com/superseriousbusiness/gotosocial/internal/messages"
|
||||
"github.com/superseriousbusiness/gotosocial/internal/oauth"
|
||||
"github.com/superseriousbusiness/gotosocial/internal/processing"
|
||||
"github.com/superseriousbusiness/gotosocial/internal/storage"
|
||||
"github.com/superseriousbusiness/gotosocial/internal/typeutils"
|
||||
"github.com/superseriousbusiness/gotosocial/testrig"
|
||||
)
|
||||
@ -52,7 +52,7 @@ type MediaUpdateTestSuite struct {
|
||||
// standard suite interfaces
|
||||
suite.Suite
|
||||
db db.DB
|
||||
storage *kv.KVStore
|
||||
storage storage.Driver
|
||||
federator federation.Federator
|
||||
tc typeutils.TypeConverter
|
||||
mediaManager media.Manager
|
||||
@ -85,7 +85,7 @@ func (suite *MediaUpdateTestSuite) SetupSuite() {
|
||||
clientWorker := concurrency.NewWorkerPool[messages.FromClientAPI](-1, -1)
|
||||
|
||||
suite.db = testrig.NewTestDB()
|
||||
suite.storage = testrig.NewTestStorage()
|
||||
suite.storage = testrig.NewInMemoryStorage()
|
||||
suite.tc = testrig.NewTestTypeConverter(suite.db)
|
||||
suite.mediaManager = testrig.NewTestMediaManager(suite.db, suite.storage)
|
||||
suite.oauthServer = testrig.NewTestOauthServer(suite.db)
|
||||
|
@ -19,7 +19,6 @@
|
||||
package status_test
|
||||
|
||||
import (
|
||||
"codeberg.org/gruf/go-store/kv"
|
||||
"github.com/stretchr/testify/suite"
|
||||
"github.com/superseriousbusiness/gotosocial/internal/api/client/status"
|
||||
"github.com/superseriousbusiness/gotosocial/internal/concurrency"
|
||||
@ -30,6 +29,7 @@
|
||||
"github.com/superseriousbusiness/gotosocial/internal/media"
|
||||
"github.com/superseriousbusiness/gotosocial/internal/messages"
|
||||
"github.com/superseriousbusiness/gotosocial/internal/processing"
|
||||
"github.com/superseriousbusiness/gotosocial/internal/storage"
|
||||
"github.com/superseriousbusiness/gotosocial/internal/typeutils"
|
||||
"github.com/superseriousbusiness/gotosocial/testrig"
|
||||
)
|
||||
@ -43,7 +43,7 @@ type StatusStandardTestSuite struct {
|
||||
federator federation.Federator
|
||||
emailSender email.Sender
|
||||
processor processing.Processor
|
||||
storage *kv.KVStore
|
||||
storage storage.Driver
|
||||
|
||||
// standard suite models
|
||||
testTokens map[string]*gtsmodel.Token
|
||||
@ -76,7 +76,7 @@ func (suite *StatusStandardTestSuite) SetupTest() {
|
||||
|
||||
suite.db = testrig.NewTestDB()
|
||||
suite.tc = testrig.NewTestTypeConverter(suite.db)
|
||||
suite.storage = testrig.NewTestStorage()
|
||||
suite.storage = testrig.NewInMemoryStorage()
|
||||
testrig.StandardDBSetup(suite.db, nil)
|
||||
testrig.StandardStorageSetup(suite.storage, "../../../../testrig/media")
|
||||
|
||||
|
@ -26,7 +26,6 @@
|
||||
"testing"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/suite"
|
||||
"github.com/superseriousbusiness/gotosocial/internal/api/client/status"
|
||||
"github.com/superseriousbusiness/gotosocial/internal/api/model"
|
||||
@ -72,36 +71,101 @@ func (suite *StatusBoostTestSuite) TestPostBoost() {
|
||||
result := recorder.Result()
|
||||
defer result.Body.Close()
|
||||
b, err := ioutil.ReadAll(result.Body)
|
||||
assert.NoError(suite.T(), err)
|
||||
suite.NoError(err)
|
||||
|
||||
statusReply := &model.Status{}
|
||||
err = json.Unmarshal(b, statusReply)
|
||||
assert.NoError(suite.T(), err)
|
||||
suite.NoError(err)
|
||||
|
||||
assert.False(suite.T(), statusReply.Sensitive)
|
||||
assert.Equal(suite.T(), model.VisibilityPublic, statusReply.Visibility)
|
||||
suite.False(statusReply.Sensitive)
|
||||
suite.Equal(model.VisibilityPublic, statusReply.Visibility)
|
||||
|
||||
assert.Equal(suite.T(), targetStatus.ContentWarning, statusReply.SpoilerText)
|
||||
assert.Equal(suite.T(), targetStatus.Content, statusReply.Content)
|
||||
assert.Equal(suite.T(), "the_mighty_zork", statusReply.Account.Username)
|
||||
assert.Len(suite.T(), statusReply.MediaAttachments, 0)
|
||||
assert.Len(suite.T(), statusReply.Mentions, 0)
|
||||
assert.Len(suite.T(), statusReply.Emojis, 0)
|
||||
assert.Len(suite.T(), statusReply.Tags, 0)
|
||||
suite.Equal(targetStatus.ContentWarning, statusReply.SpoilerText)
|
||||
suite.Equal(targetStatus.Content, statusReply.Content)
|
||||
suite.Equal("the_mighty_zork", statusReply.Account.Username)
|
||||
suite.Len(statusReply.MediaAttachments, 0)
|
||||
suite.Len(statusReply.Mentions, 0)
|
||||
suite.Len(statusReply.Emojis, 0)
|
||||
suite.Len(statusReply.Tags, 0)
|
||||
|
||||
assert.NotNil(suite.T(), statusReply.Application)
|
||||
assert.Equal(suite.T(), "really cool gts application", statusReply.Application.Name)
|
||||
suite.NotNil(statusReply.Application)
|
||||
suite.Equal("really cool gts application", statusReply.Application.Name)
|
||||
|
||||
assert.NotNil(suite.T(), statusReply.Reblog)
|
||||
assert.Equal(suite.T(), 1, statusReply.Reblog.ReblogsCount)
|
||||
assert.Equal(suite.T(), 1, statusReply.Reblog.FavouritesCount)
|
||||
assert.Equal(suite.T(), targetStatus.Content, statusReply.Reblog.Content)
|
||||
assert.Equal(suite.T(), targetStatus.ContentWarning, statusReply.Reblog.SpoilerText)
|
||||
assert.Equal(suite.T(), targetStatus.AccountID, statusReply.Reblog.Account.ID)
|
||||
assert.Len(suite.T(), statusReply.Reblog.MediaAttachments, 1)
|
||||
assert.Len(suite.T(), statusReply.Reblog.Tags, 1)
|
||||
assert.Len(suite.T(), statusReply.Reblog.Emojis, 1)
|
||||
assert.Equal(suite.T(), "superseriousbusiness", statusReply.Reblog.Application.Name)
|
||||
suite.NotNil(statusReply.Reblog)
|
||||
suite.Equal(1, statusReply.Reblog.ReblogsCount)
|
||||
suite.Equal(1, statusReply.Reblog.FavouritesCount)
|
||||
suite.Equal(targetStatus.Content, statusReply.Reblog.Content)
|
||||
suite.Equal(targetStatus.ContentWarning, statusReply.Reblog.SpoilerText)
|
||||
suite.Equal(targetStatus.AccountID, statusReply.Reblog.Account.ID)
|
||||
suite.Len(statusReply.Reblog.MediaAttachments, 1)
|
||||
suite.Len(statusReply.Reblog.Tags, 1)
|
||||
suite.Len(statusReply.Reblog.Emojis, 1)
|
||||
suite.Equal("superseriousbusiness", statusReply.Reblog.Application.Name)
|
||||
}
|
||||
|
||||
func (suite *StatusBoostTestSuite) TestPostBoostOwnFollowersOnly() {
|
||||
t := suite.testTokens["local_account_1"]
|
||||
oauthToken := oauth.DBTokenToToken(t)
|
||||
|
||||
testStatus := suite.testStatuses["local_account_1_status_5"]
|
||||
testAccount := suite.testAccounts["local_account_1"]
|
||||
testUser := suite.testUsers["local_account_1"]
|
||||
|
||||
recorder := httptest.NewRecorder()
|
||||
ctx, _ := gin.CreateTestContext(recorder)
|
||||
ctx.Set(oauth.SessionAuthorizedApplication, suite.testApplications["application_1"])
|
||||
ctx.Set(oauth.SessionAuthorizedToken, oauthToken)
|
||||
ctx.Set(oauth.SessionAuthorizedUser, testUser)
|
||||
ctx.Set(oauth.SessionAuthorizedAccount, testAccount)
|
||||
ctx.Request = httptest.NewRequest(http.MethodPost, fmt.Sprintf("http://localhost:8080%s", strings.Replace(status.ReblogPath, ":id", testStatus.ID, 1)), nil)
|
||||
ctx.Request.Header.Set("accept", "application/json")
|
||||
|
||||
ctx.Params = gin.Params{
|
||||
gin.Param{
|
||||
Key: status.IDKey,
|
||||
Value: testStatus.ID,
|
||||
},
|
||||
}
|
||||
|
||||
suite.statusModule.StatusBoostPOSTHandler(ctx)
|
||||
|
||||
// check response
|
||||
suite.EqualValues(http.StatusOK, recorder.Code)
|
||||
|
||||
result := recorder.Result()
|
||||
defer result.Body.Close()
|
||||
b, err := ioutil.ReadAll(result.Body)
|
||||
suite.NoError(err)
|
||||
|
||||
responseStatus := &model.Status{}
|
||||
err = json.Unmarshal(b, responseStatus)
|
||||
suite.NoError(err)
|
||||
|
||||
suite.False(responseStatus.Sensitive)
|
||||
suite.Equal(suite.tc.VisToAPIVis(context.Background(), testStatus.Visibility), responseStatus.Visibility)
|
||||
|
||||
suite.Equal(testStatus.ContentWarning, responseStatus.SpoilerText)
|
||||
suite.Equal(testStatus.Content, responseStatus.Content)
|
||||
suite.Equal("the_mighty_zork", responseStatus.Account.Username)
|
||||
suite.Len(responseStatus.MediaAttachments, 0)
|
||||
suite.Len(responseStatus.Mentions, 0)
|
||||
suite.Len(responseStatus.Emojis, 0)
|
||||
suite.Len(responseStatus.Tags, 0)
|
||||
|
||||
suite.NotNil(responseStatus.Application)
|
||||
suite.Equal("really cool gts application", responseStatus.Application.Name)
|
||||
|
||||
suite.NotNil(responseStatus.Reblog)
|
||||
suite.Equal(1, responseStatus.Reblog.ReblogsCount)
|
||||
suite.Equal(0, responseStatus.Reblog.FavouritesCount)
|
||||
suite.Equal(testStatus.Content, responseStatus.Reblog.Content)
|
||||
suite.Equal(testStatus.ContentWarning, responseStatus.Reblog.SpoilerText)
|
||||
suite.Equal(testStatus.AccountID, responseStatus.Reblog.Account.ID)
|
||||
suite.Equal(suite.tc.VisToAPIVis(context.Background(), testStatus.Visibility), responseStatus.Reblog.Visibility)
|
||||
suite.Empty(responseStatus.Reblog.MediaAttachments)
|
||||
suite.Empty(responseStatus.Reblog.Tags)
|
||||
suite.Empty(responseStatus.Reblog.Emojis)
|
||||
suite.Equal("really cool gts application", responseStatus.Reblog.Application.Name)
|
||||
}
|
||||
|
||||
// try to boost a status that's not boostable
|
||||
@ -139,8 +203,8 @@ func (suite *StatusBoostTestSuite) TestPostUnboostable() {
|
||||
result := recorder.Result()
|
||||
defer result.Body.Close()
|
||||
b, err := ioutil.ReadAll(result.Body)
|
||||
assert.NoError(suite.T(), err)
|
||||
assert.Equal(suite.T(), `{"error":"Forbidden"}`, string(b))
|
||||
suite.NoError(err)
|
||||
suite.Equal(`{"error":"Forbidden"}`, string(b))
|
||||
}
|
||||
|
||||
// try to boost a status that's not visible to the user
|
||||
|
@ -19,7 +19,6 @@
|
||||
package user_test
|
||||
|
||||
import (
|
||||
"codeberg.org/gruf/go-store/kv"
|
||||
"github.com/stretchr/testify/suite"
|
||||
"github.com/superseriousbusiness/gotosocial/internal/api/client/user"
|
||||
"github.com/superseriousbusiness/gotosocial/internal/concurrency"
|
||||
@ -30,6 +29,7 @@
|
||||
"github.com/superseriousbusiness/gotosocial/internal/media"
|
||||
"github.com/superseriousbusiness/gotosocial/internal/messages"
|
||||
"github.com/superseriousbusiness/gotosocial/internal/processing"
|
||||
"github.com/superseriousbusiness/gotosocial/internal/storage"
|
||||
"github.com/superseriousbusiness/gotosocial/internal/typeutils"
|
||||
"github.com/superseriousbusiness/gotosocial/testrig"
|
||||
)
|
||||
@ -42,7 +42,7 @@ type UserStandardTestSuite struct {
|
||||
federator federation.Federator
|
||||
emailSender email.Sender
|
||||
processor processing.Processor
|
||||
storage *kv.KVStore
|
||||
storage storage.Driver
|
||||
|
||||
testTokens map[string]*gtsmodel.Token
|
||||
testClients map[string]*gtsmodel.Client
|
||||
@ -66,7 +66,7 @@ func (suite *UserStandardTestSuite) SetupTest() {
|
||||
suite.testUsers = testrig.NewTestUsers()
|
||||
suite.testAccounts = testrig.NewTestAccounts()
|
||||
suite.db = testrig.NewTestDB()
|
||||
suite.storage = testrig.NewTestStorage()
|
||||
suite.storage = testrig.NewInMemoryStorage()
|
||||
suite.tc = testrig.NewTestTypeConverter(suite.db)
|
||||
suite.mediaManager = testrig.NewTestMediaManager(suite.db, suite.storage)
|
||||
suite.federator = testrig.NewTestFederator(suite.db, testrig.NewTestTransportController(testrig.NewMockHTTPClient(nil, "../../../../testrig/media"), suite.db, fedWorker), suite.storage, suite.mediaManager, fedWorker)
|
||||
|
@ -18,7 +18,10 @@
|
||||
|
||||
package model
|
||||
|
||||
import "io"
|
||||
import (
|
||||
"io"
|
||||
"net/url"
|
||||
)
|
||||
|
||||
// Content wraps everything needed to serve a blob of content (some kind of media) through the API.
|
||||
type Content struct {
|
||||
@ -28,6 +31,8 @@ type Content struct {
|
||||
ContentLength int64
|
||||
// Actual content
|
||||
Content io.Reader
|
||||
// Resource URL to forward to if the file can be fetched from the storage directly (e.g signed S3 URL)
|
||||
URL *url.URL
|
||||
}
|
||||
|
||||
// GetContentRequestForm describes a piece of content desired by the caller of the fileserver API.
|
||||
|
@ -19,7 +19,6 @@
|
||||
package user_test
|
||||
|
||||
import (
|
||||
"codeberg.org/gruf/go-store/kv"
|
||||
"github.com/stretchr/testify/suite"
|
||||
"github.com/superseriousbusiness/gotosocial/internal/api/s2s/user"
|
||||
"github.com/superseriousbusiness/gotosocial/internal/api/security"
|
||||
@ -32,6 +31,7 @@
|
||||
"github.com/superseriousbusiness/gotosocial/internal/messages"
|
||||
"github.com/superseriousbusiness/gotosocial/internal/oauth"
|
||||
"github.com/superseriousbusiness/gotosocial/internal/processing"
|
||||
"github.com/superseriousbusiness/gotosocial/internal/storage"
|
||||
"github.com/superseriousbusiness/gotosocial/internal/typeutils"
|
||||
"github.com/superseriousbusiness/gotosocial/testrig"
|
||||
)
|
||||
@ -45,7 +45,7 @@ type UserStandardTestSuite struct {
|
||||
federator federation.Federator
|
||||
emailSender email.Sender
|
||||
processor processing.Processor
|
||||
storage *kv.KVStore
|
||||
storage storage.Driver
|
||||
oauthServer oauth.Server
|
||||
securityModule *security.Module
|
||||
|
||||
@ -83,7 +83,7 @@ func (suite *UserStandardTestSuite) SetupTest() {
|
||||
|
||||
suite.db = testrig.NewTestDB()
|
||||
suite.tc = testrig.NewTestTypeConverter(suite.db)
|
||||
suite.storage = testrig.NewTestStorage()
|
||||
suite.storage = testrig.NewInMemoryStorage()
|
||||
suite.mediaManager = testrig.NewTestMediaManager(suite.db, suite.storage)
|
||||
suite.federator = testrig.NewTestFederator(suite.db, testrig.NewTestTransportController(testrig.NewMockHTTPClient(nil, "../../../../testrig/media"), suite.db, fedWorker), suite.storage, suite.mediaManager, fedWorker)
|
||||
suite.emailSender = testrig.NewEmailSender("../../../../web/template/", nil)
|
||||
|
@ -23,7 +23,6 @@
|
||||
"crypto/rsa"
|
||||
"time"
|
||||
|
||||
"codeberg.org/gruf/go-store/kv"
|
||||
"github.com/stretchr/testify/suite"
|
||||
"github.com/superseriousbusiness/gotosocial/internal/ap"
|
||||
"github.com/superseriousbusiness/gotosocial/internal/api/s2s/webfinger"
|
||||
@ -37,6 +36,7 @@
|
||||
"github.com/superseriousbusiness/gotosocial/internal/messages"
|
||||
"github.com/superseriousbusiness/gotosocial/internal/oauth"
|
||||
"github.com/superseriousbusiness/gotosocial/internal/processing"
|
||||
"github.com/superseriousbusiness/gotosocial/internal/storage"
|
||||
"github.com/superseriousbusiness/gotosocial/internal/typeutils"
|
||||
"github.com/superseriousbusiness/gotosocial/testrig"
|
||||
)
|
||||
@ -50,7 +50,7 @@ type WebfingerStandardTestSuite struct {
|
||||
federator federation.Federator
|
||||
emailSender email.Sender
|
||||
processor processing.Processor
|
||||
storage *kv.KVStore
|
||||
storage storage.Driver
|
||||
oauthServer oauth.Server
|
||||
securityModule *security.Module
|
||||
|
||||
@ -86,7 +86,7 @@ func (suite *WebfingerStandardTestSuite) SetupTest() {
|
||||
|
||||
suite.db = testrig.NewTestDB()
|
||||
suite.tc = testrig.NewTestTypeConverter(suite.db)
|
||||
suite.storage = testrig.NewTestStorage()
|
||||
suite.storage = testrig.NewInMemoryStorage()
|
||||
suite.mediaManager = testrig.NewTestMediaManager(suite.db, suite.storage)
|
||||
suite.federator = testrig.NewTestFederator(suite.db, testrig.NewTestTransportController(testrig.NewMockHTTPClient(nil, "../../../../testrig/media"), suite.db, fedWorker), suite.storage, suite.mediaManager, fedWorker)
|
||||
suite.emailSender = testrig.NewEmailSender("../../../../web/template/", nil)
|
||||
|
@ -82,6 +82,11 @@ type Configuration struct {
|
||||
|
||||
StorageBackend string `name:"storage-backend" usage:"Storage backend to use for media attachments"`
|
||||
StorageLocalBasePath string `name:"storage-local-base-path" usage:"Full path to an already-created directory where gts should store/retrieve media files. Subfolders will be created within this dir."`
|
||||
StorageS3Endpoint string `name:"storage-s3-endpoint" usage:"S3 Endpoint URL (e.g 'minio.example.org:9000')"`
|
||||
StorageS3AccessKey string `name:"storage-s3-access-key" usage:"S3 Access Key"`
|
||||
StorageS3SecretKey string `name:"storage-s3-secret-key" usage:"S3 Secret Key"`
|
||||
StorageS3UseSSL bool `name:"storage-s3-use-ssl" usage:"Use SSL for S3 connections. Only set this to 'false' when testing locally"`
|
||||
StorageS3BucketName string `name:"storage-s3-bucket" usage:"Place blobs in this bucket"`
|
||||
|
||||
StatusesMaxChars int `name:"statuses-max-chars" usage:"Max permitted characters for posted statuses"`
|
||||
StatusesCWMaxChars int `name:"statuses-cw-max-chars" usage:"Max permitted characters for content/spoiler warnings on statuses"`
|
||||
|
@ -61,6 +61,7 @@
|
||||
|
||||
StorageBackend: "local",
|
||||
StorageLocalBasePath: "/gotosocial/storage",
|
||||
StorageS3UseSSL: true,
|
||||
|
||||
StatusesMaxChars: 5000,
|
||||
StatusesCWMaxChars: 100,
|
||||
|
@ -843,6 +843,131 @@ func GetStorageLocalBasePath() string { return global.GetStorageLocalBasePath()
|
||||
// SetStorageLocalBasePath safely sets the value for global configuration 'StorageLocalBasePath' field
|
||||
func SetStorageLocalBasePath(v string) { global.SetStorageLocalBasePath(v) }
|
||||
|
||||
// GetStorageS3Endpoint safely fetches the Configuration value for state's 'StorageS3Endpoint' field
|
||||
func (st *ConfigState) GetStorageS3Endpoint() (v string) {
|
||||
st.mutex.Lock()
|
||||
v = st.config.StorageS3Endpoint
|
||||
st.mutex.Unlock()
|
||||
return
|
||||
}
|
||||
|
||||
// SetStorageS3Endpoint safely sets the Configuration value for state's 'StorageS3Endpoint' field
|
||||
func (st *ConfigState) SetStorageS3Endpoint(v string) {
|
||||
st.mutex.Lock()
|
||||
defer st.mutex.Unlock()
|
||||
st.config.StorageS3Endpoint = v
|
||||
st.reloadToViper()
|
||||
}
|
||||
|
||||
// StorageS3EndpointFlag returns the flag name for the 'StorageS3Endpoint' field
|
||||
func StorageS3EndpointFlag() string { return "storage-s3-endpoint" }
|
||||
|
||||
// GetStorageS3Endpoint safely fetches the value for global configuration 'StorageS3Endpoint' field
|
||||
func GetStorageS3Endpoint() string { return global.GetStorageS3Endpoint() }
|
||||
|
||||
// SetStorageS3Endpoint safely sets the value for global configuration 'StorageS3Endpoint' field
|
||||
func SetStorageS3Endpoint(v string) { global.SetStorageS3Endpoint(v) }
|
||||
|
||||
// GetStorageS3AccessKey safely fetches the Configuration value for state's 'StorageS3AccessKey' field
|
||||
func (st *ConfigState) GetStorageS3AccessKey() (v string) {
|
||||
st.mutex.Lock()
|
||||
v = st.config.StorageS3AccessKey
|
||||
st.mutex.Unlock()
|
||||
return
|
||||
}
|
||||
|
||||
// SetStorageS3AccessKey safely sets the Configuration value for state's 'StorageS3AccessKey' field
|
||||
func (st *ConfigState) SetStorageS3AccessKey(v string) {
|
||||
st.mutex.Lock()
|
||||
defer st.mutex.Unlock()
|
||||
st.config.StorageS3AccessKey = v
|
||||
st.reloadToViper()
|
||||
}
|
||||
|
||||
// StorageS3AccessKeyFlag returns the flag name for the 'StorageS3AccessKey' field
|
||||
func StorageS3AccessKeyFlag() string { return "storage-s3-access-key" }
|
||||
|
||||
// GetStorageS3AccessKey safely fetches the value for global configuration 'StorageS3AccessKey' field
|
||||
func GetStorageS3AccessKey() string { return global.GetStorageS3AccessKey() }
|
||||
|
||||
// SetStorageS3AccessKey safely sets the value for global configuration 'StorageS3AccessKey' field
|
||||
func SetStorageS3AccessKey(v string) { global.SetStorageS3AccessKey(v) }
|
||||
|
||||
// GetStorageS3SecretKey safely fetches the Configuration value for state's 'StorageS3SecretKey' field
|
||||
func (st *ConfigState) GetStorageS3SecretKey() (v string) {
|
||||
st.mutex.Lock()
|
||||
v = st.config.StorageS3SecretKey
|
||||
st.mutex.Unlock()
|
||||
return
|
||||
}
|
||||
|
||||
// SetStorageS3SecretKey safely sets the Configuration value for state's 'StorageS3SecretKey' field
|
||||
func (st *ConfigState) SetStorageS3SecretKey(v string) {
|
||||
st.mutex.Lock()
|
||||
defer st.mutex.Unlock()
|
||||
st.config.StorageS3SecretKey = v
|
||||
st.reloadToViper()
|
||||
}
|
||||
|
||||
// StorageS3SecretKeyFlag returns the flag name for the 'StorageS3SecretKey' field
|
||||
func StorageS3SecretKeyFlag() string { return "storage-s3-secret-key" }
|
||||
|
||||
// GetStorageS3SecretKey safely fetches the value for global configuration 'StorageS3SecretKey' field
|
||||
func GetStorageS3SecretKey() string { return global.GetStorageS3SecretKey() }
|
||||
|
||||
// SetStorageS3SecretKey safely sets the value for global configuration 'StorageS3SecretKey' field
|
||||
func SetStorageS3SecretKey(v string) { global.SetStorageS3SecretKey(v) }
|
||||
|
||||
// GetStorageS3UseSSL safely fetches the Configuration value for state's 'StorageS3UseSSL' field
|
||||
func (st *ConfigState) GetStorageS3UseSSL() (v bool) {
|
||||
st.mutex.Lock()
|
||||
v = st.config.StorageS3UseSSL
|
||||
st.mutex.Unlock()
|
||||
return
|
||||
}
|
||||
|
||||
// SetStorageS3UseSSL safely sets the Configuration value for state's 'StorageS3UseSSL' field
|
||||
func (st *ConfigState) SetStorageS3UseSSL(v bool) {
|
||||
st.mutex.Lock()
|
||||
defer st.mutex.Unlock()
|
||||
st.config.StorageS3UseSSL = v
|
||||
st.reloadToViper()
|
||||
}
|
||||
|
||||
// StorageS3UseSSLFlag returns the flag name for the 'StorageS3UseSSL' field
|
||||
func StorageS3UseSSLFlag() string { return "storage-s3-use-ssl" }
|
||||
|
||||
// GetStorageS3UseSSL safely fetches the value for global configuration 'StorageS3UseSSL' field
|
||||
func GetStorageS3UseSSL() bool { return global.GetStorageS3UseSSL() }
|
||||
|
||||
// SetStorageS3UseSSL safely sets the value for global configuration 'StorageS3UseSSL' field
|
||||
func SetStorageS3UseSSL(v bool) { global.SetStorageS3UseSSL(v) }
|
||||
|
||||
// GetStorageS3BucketName safely fetches the Configuration value for state's 'StorageS3BucketName' field
|
||||
func (st *ConfigState) GetStorageS3BucketName() (v string) {
|
||||
st.mutex.Lock()
|
||||
v = st.config.StorageS3BucketName
|
||||
st.mutex.Unlock()
|
||||
return
|
||||
}
|
||||
|
||||
// SetStorageS3BucketName safely sets the Configuration value for state's 'StorageS3BucketName' field
|
||||
func (st *ConfigState) SetStorageS3BucketName(v string) {
|
||||
st.mutex.Lock()
|
||||
defer st.mutex.Unlock()
|
||||
st.config.StorageS3BucketName = v
|
||||
st.reloadToViper()
|
||||
}
|
||||
|
||||
// StorageS3BucketNameFlag returns the flag name for the 'StorageS3BucketName' field
|
||||
func StorageS3BucketNameFlag() string { return "storage-s3-bucket" }
|
||||
|
||||
// GetStorageS3BucketName safely fetches the value for global configuration 'StorageS3BucketName' field
|
||||
func GetStorageS3BucketName() string { return global.GetStorageS3BucketName() }
|
||||
|
||||
// SetStorageS3BucketName safely sets the value for global configuration 'StorageS3BucketName' field
|
||||
func SetStorageS3BucketName(v string) { global.SetStorageS3BucketName(v) }
|
||||
|
||||
// GetStatusesMaxChars safely fetches the Configuration value for state's 'StatusesMaxChars' field
|
||||
func (st *ConfigState) GetStatusesMaxChars() (v int) {
|
||||
st.mutex.Lock()
|
||||
|
@ -59,6 +59,11 @@ func init() {
|
||||
up := func(ctx context.Context, db *bun.DB) error {
|
||||
l := logrus.WithField("migration", "20220612091800_duplicated_media_cleanup")
|
||||
|
||||
if config.GetStorageBackend() != "local" {
|
||||
// this migration only affects versions which only supported local storage
|
||||
return nil
|
||||
}
|
||||
|
||||
storageBasePath := config.GetStorageLocalBasePath()
|
||||
if storageBasePath == "" {
|
||||
return fmt.Errorf("%s must be set to do storage migration", config.StorageLocalBasePathFlag())
|
||||
|
@ -19,7 +19,6 @@
|
||||
package dereferencing_test
|
||||
|
||||
import (
|
||||
"codeberg.org/gruf/go-store/kv"
|
||||
"github.com/stretchr/testify/suite"
|
||||
"github.com/superseriousbusiness/activity/streams/vocab"
|
||||
"github.com/superseriousbusiness/gotosocial/internal/concurrency"
|
||||
@ -27,13 +26,14 @@
|
||||
"github.com/superseriousbusiness/gotosocial/internal/federation/dereferencing"
|
||||
"github.com/superseriousbusiness/gotosocial/internal/gtsmodel"
|
||||
"github.com/superseriousbusiness/gotosocial/internal/messages"
|
||||
"github.com/superseriousbusiness/gotosocial/internal/storage"
|
||||
"github.com/superseriousbusiness/gotosocial/testrig"
|
||||
)
|
||||
|
||||
type DereferencerStandardTestSuite struct {
|
||||
suite.Suite
|
||||
db db.DB
|
||||
storage *kv.KVStore
|
||||
storage storage.Driver
|
||||
|
||||
testRemoteStatuses map[string]vocab.ActivityStreamsNote
|
||||
testRemotePeople map[string]vocab.ActivityStreamsPerson
|
||||
@ -57,7 +57,7 @@ func (suite *DereferencerStandardTestSuite) SetupTest() {
|
||||
suite.testRemoteAttachments = testrig.NewTestFediAttachments("../../../testrig/media")
|
||||
|
||||
suite.db = testrig.NewTestDB()
|
||||
suite.storage = testrig.NewTestStorage()
|
||||
suite.storage = testrig.NewInMemoryStorage()
|
||||
suite.dereferencer = dereferencing.NewDereferencer(suite.db, testrig.NewTestTypeConverter(suite.db), testrig.NewTestTransportController(testrig.NewMockHTTPClient(nil, "../../../testrig/media"), suite.db, concurrency.NewWorkerPool[messages.FromFederator](-1, -1)), testrig.NewTestMediaManager(suite.db, suite.storage))
|
||||
testrig.StandardDBSetup(suite.db, nil)
|
||||
}
|
||||
|
@ -19,11 +19,11 @@
|
||||
package federation_test
|
||||
|
||||
import (
|
||||
"codeberg.org/gruf/go-store/kv"
|
||||
"github.com/stretchr/testify/suite"
|
||||
|
||||
"github.com/superseriousbusiness/gotosocial/internal/db"
|
||||
"github.com/superseriousbusiness/gotosocial/internal/gtsmodel"
|
||||
"github.com/superseriousbusiness/gotosocial/internal/storage"
|
||||
"github.com/superseriousbusiness/gotosocial/internal/typeutils"
|
||||
"github.com/superseriousbusiness/gotosocial/testrig"
|
||||
)
|
||||
@ -31,7 +31,7 @@
|
||||
type FederatorStandardTestSuite struct {
|
||||
suite.Suite
|
||||
db db.DB
|
||||
storage *kv.KVStore
|
||||
storage storage.Driver
|
||||
tc typeutils.TypeConverter
|
||||
testAccounts map[string]*gtsmodel.Account
|
||||
testStatuses map[string]*gtsmodel.Status
|
||||
@ -41,7 +41,7 @@ type FederatorStandardTestSuite struct {
|
||||
// SetupSuite sets some variables on the suite that we can use as consts (more or less) throughout
|
||||
func (suite *FederatorStandardTestSuite) SetupSuite() {
|
||||
// setup standard items
|
||||
suite.storage = testrig.NewTestStorage()
|
||||
suite.storage = testrig.NewInMemoryStorage()
|
||||
suite.tc = testrig.NewTestTypeConverter(suite.db)
|
||||
suite.testAccounts = testrig.NewTestAccounts()
|
||||
suite.testStatuses = testrig.NewTestStatuses()
|
||||
|
@ -23,12 +23,12 @@
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"codeberg.org/gruf/go-store/kv"
|
||||
"github.com/robfig/cron/v3"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/superseriousbusiness/gotosocial/internal/concurrency"
|
||||
"github.com/superseriousbusiness/gotosocial/internal/config"
|
||||
"github.com/superseriousbusiness/gotosocial/internal/db"
|
||||
"github.com/superseriousbusiness/gotosocial/internal/storage"
|
||||
)
|
||||
|
||||
// selectPruneLimit is the amount of media entries to select at a time from the db when pruning
|
||||
@ -98,7 +98,7 @@ type Manager interface {
|
||||
|
||||
type manager struct {
|
||||
db db.DB
|
||||
storage *kv.KVStore
|
||||
storage storage.Driver
|
||||
emojiWorker *concurrency.WorkerPool[*ProcessingEmoji]
|
||||
mediaWorker *concurrency.WorkerPool[*ProcessingMedia]
|
||||
stopCronJobs func() error
|
||||
@ -110,7 +110,7 @@ type manager struct {
|
||||
// a limited number of media will be processed in parallel. The numbers of workers
|
||||
// is determined from the $GOMAXPROCS environment variable (usually no. CPU cores).
|
||||
// See internal/concurrency.NewWorkerPool() documentation for further information.
|
||||
func NewManager(database db.DB, storage *kv.KVStore) (Manager, error) {
|
||||
func NewManager(database db.DB, storage storage.Driver) (Manager, error) {
|
||||
m := &manager{
|
||||
db: database,
|
||||
storage: storage,
|
||||
|
@ -33,6 +33,7 @@
|
||||
"github.com/stretchr/testify/suite"
|
||||
gtsmodel "github.com/superseriousbusiness/gotosocial/internal/gtsmodel"
|
||||
"github.com/superseriousbusiness/gotosocial/internal/media"
|
||||
gtsstorage "github.com/superseriousbusiness/gotosocial/internal/storage"
|
||||
)
|
||||
|
||||
type ManagerTestSuite struct {
|
||||
@ -87,7 +88,7 @@ func (suite *ManagerTestSuite) TestSimpleJpegProcessBlocking() {
|
||||
suite.NotNil(dbAttachment)
|
||||
|
||||
// make sure the processed file is in storage
|
||||
processedFullBytes, err := suite.storage.Get(attachment.File.Path)
|
||||
processedFullBytes, err := suite.storage.Get(ctx, attachment.File.Path)
|
||||
suite.NoError(err)
|
||||
suite.NotEmpty(processedFullBytes)
|
||||
|
||||
@ -100,7 +101,7 @@ func (suite *ManagerTestSuite) TestSimpleJpegProcessBlocking() {
|
||||
suite.Equal(processedFullBytesExpected, processedFullBytes)
|
||||
|
||||
// now do the same for the thumbnail and make sure it's what we expected
|
||||
processedThumbnailBytes, err := suite.storage.Get(attachment.Thumbnail.Path)
|
||||
processedThumbnailBytes, err := suite.storage.Get(ctx, attachment.Thumbnail.Path)
|
||||
suite.NoError(err)
|
||||
suite.NotEmpty(processedThumbnailBytes)
|
||||
|
||||
@ -159,7 +160,7 @@ func (suite *ManagerTestSuite) TestPngNoAlphaChannelProcessBlocking() {
|
||||
suite.NotNil(dbAttachment)
|
||||
|
||||
// make sure the processed file is in storage
|
||||
processedFullBytes, err := suite.storage.Get(attachment.File.Path)
|
||||
processedFullBytes, err := suite.storage.Get(ctx, attachment.File.Path)
|
||||
suite.NoError(err)
|
||||
suite.NotEmpty(processedFullBytes)
|
||||
|
||||
@ -172,7 +173,7 @@ func (suite *ManagerTestSuite) TestPngNoAlphaChannelProcessBlocking() {
|
||||
suite.Equal(processedFullBytesExpected, processedFullBytes)
|
||||
|
||||
// now do the same for the thumbnail and make sure it's what we expected
|
||||
processedThumbnailBytes, err := suite.storage.Get(attachment.Thumbnail.Path)
|
||||
processedThumbnailBytes, err := suite.storage.Get(ctx, attachment.Thumbnail.Path)
|
||||
suite.NoError(err)
|
||||
suite.NotEmpty(processedThumbnailBytes)
|
||||
|
||||
@ -231,7 +232,7 @@ func (suite *ManagerTestSuite) TestPngAlphaChannelProcessBlocking() {
|
||||
suite.NotNil(dbAttachment)
|
||||
|
||||
// make sure the processed file is in storage
|
||||
processedFullBytes, err := suite.storage.Get(attachment.File.Path)
|
||||
processedFullBytes, err := suite.storage.Get(ctx, attachment.File.Path)
|
||||
suite.NoError(err)
|
||||
suite.NotEmpty(processedFullBytes)
|
||||
|
||||
@ -244,7 +245,7 @@ func (suite *ManagerTestSuite) TestPngAlphaChannelProcessBlocking() {
|
||||
suite.Equal(processedFullBytesExpected, processedFullBytes)
|
||||
|
||||
// now do the same for the thumbnail and make sure it's what we expected
|
||||
processedThumbnailBytes, err := suite.storage.Get(attachment.Thumbnail.Path)
|
||||
processedThumbnailBytes, err := suite.storage.Get(ctx, attachment.Thumbnail.Path)
|
||||
suite.NoError(err)
|
||||
suite.NotEmpty(processedThumbnailBytes)
|
||||
|
||||
@ -314,7 +315,7 @@ func (suite *ManagerTestSuite) TestSimpleJpegProcessBlockingWithCallback() {
|
||||
suite.NotNil(dbAttachment)
|
||||
|
||||
// make sure the processed file is in storage
|
||||
processedFullBytes, err := suite.storage.Get(attachment.File.Path)
|
||||
processedFullBytes, err := suite.storage.Get(ctx, attachment.File.Path)
|
||||
suite.NoError(err)
|
||||
suite.NotEmpty(processedFullBytes)
|
||||
|
||||
@ -327,7 +328,7 @@ func (suite *ManagerTestSuite) TestSimpleJpegProcessBlockingWithCallback() {
|
||||
suite.Equal(processedFullBytesExpected, processedFullBytes)
|
||||
|
||||
// now do the same for the thumbnail and make sure it's what we expected
|
||||
processedThumbnailBytes, err := suite.storage.Get(attachment.Thumbnail.Path)
|
||||
processedThumbnailBytes, err := suite.storage.Get(ctx, attachment.Thumbnail.Path)
|
||||
suite.NoError(err)
|
||||
suite.NotEmpty(processedThumbnailBytes)
|
||||
|
||||
@ -393,7 +394,7 @@ func (suite *ManagerTestSuite) TestSimpleJpegProcessAsync() {
|
||||
suite.NotNil(dbAttachment)
|
||||
|
||||
// make sure the processed file is in storage
|
||||
processedFullBytes, err := suite.storage.Get(attachment.File.Path)
|
||||
processedFullBytes, err := suite.storage.Get(ctx, attachment.File.Path)
|
||||
suite.NoError(err)
|
||||
suite.NotEmpty(processedFullBytes)
|
||||
|
||||
@ -406,7 +407,7 @@ func (suite *ManagerTestSuite) TestSimpleJpegProcessAsync() {
|
||||
suite.Equal(processedFullBytesExpected, processedFullBytes)
|
||||
|
||||
// now do the same for the thumbnail and make sure it's what we expected
|
||||
processedThumbnailBytes, err := suite.storage.Get(attachment.Thumbnail.Path)
|
||||
processedThumbnailBytes, err := suite.storage.Get(ctx, attachment.Thumbnail.Path)
|
||||
suite.NoError(err)
|
||||
suite.NotEmpty(processedThumbnailBytes)
|
||||
|
||||
@ -474,7 +475,7 @@ func (suite *ManagerTestSuite) TestSimpleJpegQueueSpamming() {
|
||||
suite.NotNil(dbAttachment)
|
||||
|
||||
// make sure the processed file is in storage
|
||||
processedFullBytes, err := suite.storage.Get(attachment.File.Path)
|
||||
processedFullBytes, err := suite.storage.Get(ctx, attachment.File.Path)
|
||||
suite.NoError(err)
|
||||
suite.NotEmpty(processedFullBytes)
|
||||
|
||||
@ -487,7 +488,7 @@ func (suite *ManagerTestSuite) TestSimpleJpegQueueSpamming() {
|
||||
suite.Equal(processedFullBytesExpected, processedFullBytes)
|
||||
|
||||
// now do the same for the thumbnail and make sure it's what we expected
|
||||
processedThumbnailBytes, err := suite.storage.Get(attachment.Thumbnail.Path)
|
||||
processedThumbnailBytes, err := suite.storage.Get(ctx, attachment.Thumbnail.Path)
|
||||
suite.NoError(err)
|
||||
suite.NotEmpty(processedThumbnailBytes)
|
||||
|
||||
@ -523,7 +524,7 @@ func (suite *ManagerTestSuite) TestSimpleJpegProcessBlockingWithDiskStorage() {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
diskManager, err := media.NewManager(suite.db, diskStorage)
|
||||
diskManager, err := media.NewManager(suite.db, >sstorage.Local{KVStore: diskStorage})
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
@ -19,11 +19,11 @@
|
||||
package media_test
|
||||
|
||||
import (
|
||||
"codeberg.org/gruf/go-store/kv"
|
||||
"github.com/stretchr/testify/suite"
|
||||
"github.com/superseriousbusiness/gotosocial/internal/db"
|
||||
gtsmodel "github.com/superseriousbusiness/gotosocial/internal/gtsmodel"
|
||||
"github.com/superseriousbusiness/gotosocial/internal/media"
|
||||
"github.com/superseriousbusiness/gotosocial/internal/storage"
|
||||
"github.com/superseriousbusiness/gotosocial/testrig"
|
||||
)
|
||||
|
||||
@ -31,7 +31,7 @@ type MediaStandardTestSuite struct {
|
||||
suite.Suite
|
||||
|
||||
db db.DB
|
||||
storage *kv.KVStore
|
||||
storage storage.Driver
|
||||
manager media.Manager
|
||||
testAttachments map[string]*gtsmodel.MediaAttachment
|
||||
testAccounts map[string]*gtsmodel.Account
|
||||
@ -42,7 +42,7 @@ func (suite *MediaStandardTestSuite) SetupSuite() {
|
||||
testrig.InitTestLog()
|
||||
|
||||
suite.db = testrig.NewTestDB()
|
||||
suite.storage = testrig.NewTestStorage()
|
||||
suite.storage = testrig.NewInMemoryStorage()
|
||||
}
|
||||
|
||||
func (suite *MediaStandardTestSuite) SetupTest() {
|
||||
|
@ -28,10 +28,10 @@
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"codeberg.org/gruf/go-store/kv"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/superseriousbusiness/gotosocial/internal/db"
|
||||
"github.com/superseriousbusiness/gotosocial/internal/gtsmodel"
|
||||
"github.com/superseriousbusiness/gotosocial/internal/storage"
|
||||
"github.com/superseriousbusiness/gotosocial/internal/uris"
|
||||
)
|
||||
|
||||
@ -64,7 +64,7 @@ type ProcessingEmoji struct {
|
||||
*/
|
||||
|
||||
database db.DB
|
||||
storage *kv.KVStore
|
||||
storage storage.Driver
|
||||
|
||||
err error // error created during processing, if any
|
||||
|
||||
@ -113,7 +113,7 @@ func (p *ProcessingEmoji) loadStatic(ctx context.Context) error {
|
||||
switch processState(staticState) {
|
||||
case received:
|
||||
// stream the original file out of storage...
|
||||
stored, err := p.storage.GetStream(p.emoji.ImagePath)
|
||||
stored, err := p.storage.GetStream(ctx, p.emoji.ImagePath)
|
||||
if err != nil {
|
||||
p.err = fmt.Errorf("loadStatic: error fetching file from storage: %s", err)
|
||||
atomic.StoreInt32(&p.staticState, int32(errored))
|
||||
@ -135,7 +135,7 @@ func (p *ProcessingEmoji) loadStatic(ctx context.Context) error {
|
||||
}
|
||||
|
||||
// put the static in storage
|
||||
if err := p.storage.Put(p.emoji.ImageStaticPath, static.small); err != nil {
|
||||
if err := p.storage.Put(ctx, p.emoji.ImageStaticPath, static.small); err != nil {
|
||||
p.err = fmt.Errorf("loadStatic: error storing static: %s", err)
|
||||
atomic.StoreInt32(&p.staticState, int32(errored))
|
||||
return p.err
|
||||
@ -211,7 +211,7 @@ func (p *ProcessingEmoji) store(ctx context.Context) error {
|
||||
multiReader := io.MultiReader(bytes.NewBuffer(firstBytes), reader)
|
||||
|
||||
// store this for now -- other processes can pull it out of storage as they please
|
||||
if err := p.storage.PutStream(p.emoji.ImagePath, multiReader); err != nil {
|
||||
if err := p.storage.PutStream(ctx, p.emoji.ImagePath, multiReader); err != nil {
|
||||
return fmt.Errorf("store: error storing stream: %s", err)
|
||||
}
|
||||
|
||||
|
@ -28,12 +28,12 @@
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"codeberg.org/gruf/go-store/kv"
|
||||
"github.com/sirupsen/logrus"
|
||||
terminator "github.com/superseriousbusiness/exif-terminator"
|
||||
"github.com/superseriousbusiness/gotosocial/internal/db"
|
||||
"github.com/superseriousbusiness/gotosocial/internal/gtsmodel"
|
||||
"github.com/superseriousbusiness/gotosocial/internal/id"
|
||||
"github.com/superseriousbusiness/gotosocial/internal/storage"
|
||||
"github.com/superseriousbusiness/gotosocial/internal/uris"
|
||||
)
|
||||
|
||||
@ -61,7 +61,7 @@ type ProcessingMedia struct {
|
||||
*/
|
||||
|
||||
database db.DB
|
||||
storage *kv.KVStore
|
||||
storage storage.Driver
|
||||
|
||||
err error // error created during processing, if any
|
||||
|
||||
@ -138,7 +138,7 @@ func (p *ProcessingMedia) loadThumb(ctx context.Context) error {
|
||||
|
||||
// stream the original file out of storage
|
||||
logrus.Tracef("loadThumb: fetching attachment from storage %s", p.attachment.URL)
|
||||
stored, err := p.storage.GetStream(p.attachment.File.Path)
|
||||
stored, err := p.storage.GetStream(ctx, p.attachment.File.Path)
|
||||
if err != nil {
|
||||
p.err = fmt.Errorf("loadThumb: error fetching file from storage: %s", err)
|
||||
atomic.StoreInt32(&p.thumbState, int32(errored))
|
||||
@ -164,7 +164,7 @@ func (p *ProcessingMedia) loadThumb(ctx context.Context) error {
|
||||
|
||||
// put the thumbnail in storage
|
||||
logrus.Tracef("loadThumb: storing new thumbnail %s", p.attachment.URL)
|
||||
if err := p.storage.Put(p.attachment.Thumbnail.Path, thumb.small); err != nil {
|
||||
if err := p.storage.Put(ctx, p.attachment.Thumbnail.Path, thumb.small); err != nil {
|
||||
p.err = fmt.Errorf("loadThumb: error storing thumbnail: %s", err)
|
||||
atomic.StoreInt32(&p.thumbState, int32(errored))
|
||||
return p.err
|
||||
@ -203,7 +203,7 @@ func (p *ProcessingMedia) loadFullSize(ctx context.Context) error {
|
||||
var decoded *imageMeta
|
||||
|
||||
// stream the original file out of storage...
|
||||
stored, err := p.storage.GetStream(p.attachment.File.Path)
|
||||
stored, err := p.storage.GetStream(ctx, p.attachment.File.Path)
|
||||
if err != nil {
|
||||
p.err = fmt.Errorf("loadFullSize: error fetching file from storage: %s", err)
|
||||
atomic.StoreInt32(&p.fullSizeState, int32(errored))
|
||||
@ -343,7 +343,7 @@ func (p *ProcessingMedia) store(ctx context.Context) error {
|
||||
p.attachment.File.FileSize = fileSize
|
||||
|
||||
// store this for now -- other processes can pull it out of storage as they please
|
||||
if err := p.storage.PutStream(p.attachment.File.Path, clean); err != nil {
|
||||
if err := p.storage.PutStream(ctx, p.attachment.File.Path, clean); err != nil {
|
||||
return fmt.Errorf("store: error storing stream: %s", err)
|
||||
}
|
||||
p.attachment.Cached = true
|
||||
|
@ -69,7 +69,7 @@ func (m *manager) pruneOneAvatarOrHeader(ctx context.Context, attachment *gtsmod
|
||||
if attachment.File.Path != "" {
|
||||
// delete the full size attachment from storage
|
||||
logrus.Tracef("pruneOneAvatarOrHeader: deleting %s", attachment.File.Path)
|
||||
if err := m.storage.Delete(attachment.File.Path); err != nil && err != storage.ErrNotFound {
|
||||
if err := m.storage.Delete(ctx, attachment.File.Path); err != nil && err != storage.ErrNotFound {
|
||||
return err
|
||||
}
|
||||
}
|
||||
@ -77,7 +77,7 @@ func (m *manager) pruneOneAvatarOrHeader(ctx context.Context, attachment *gtsmod
|
||||
if attachment.Thumbnail.Path != "" {
|
||||
// delete the thumbnail from storage
|
||||
logrus.Tracef("pruneOneAvatarOrHeader: deleting %s", attachment.Thumbnail.Path)
|
||||
if err := m.storage.Delete(attachment.Thumbnail.Path); err != nil && err != storage.ErrNotFound {
|
||||
if err := m.storage.Delete(ctx, attachment.Thumbnail.Path); err != nil && err != storage.ErrNotFound {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
@ -49,13 +49,13 @@ func (suite *PruneMetaTestSuite) TestPruneMeta() {
|
||||
suite.Equal(2, totalPruned)
|
||||
|
||||
// media should no longer be stored
|
||||
_, err = suite.storage.Get(zorkOldAvatar.File.Path)
|
||||
_, err = suite.storage.Get(ctx, zorkOldAvatar.File.Path)
|
||||
suite.ErrorIs(err, storage.ErrNotFound)
|
||||
_, err = suite.storage.Get(zorkOldAvatar.Thumbnail.Path)
|
||||
_, err = suite.storage.Get(ctx, zorkOldAvatar.Thumbnail.Path)
|
||||
suite.ErrorIs(err, storage.ErrNotFound)
|
||||
_, err = suite.storage.Get(zorkOldHeader.File.Path)
|
||||
_, err = suite.storage.Get(ctx, zorkOldHeader.File.Path)
|
||||
suite.ErrorIs(err, storage.ErrNotFound)
|
||||
_, err = suite.storage.Get(zorkOldHeader.Thumbnail.Path)
|
||||
_, err = suite.storage.Get(ctx, zorkOldHeader.Thumbnail.Path)
|
||||
suite.ErrorIs(err, storage.ErrNotFound)
|
||||
|
||||
// attachments should no longer be in the db
|
||||
@ -110,13 +110,13 @@ func (suite *PruneMetaTestSuite) TestPruneMetaMultipleAccounts() {
|
||||
suite.Equal(2, totalPruned)
|
||||
|
||||
// media should no longer be stored
|
||||
_, err = suite.storage.Get(zorkOldAvatar.File.Path)
|
||||
_, err = suite.storage.Get(ctx, zorkOldAvatar.File.Path)
|
||||
suite.ErrorIs(err, storage.ErrNotFound)
|
||||
_, err = suite.storage.Get(zorkOldAvatar.Thumbnail.Path)
|
||||
_, err = suite.storage.Get(ctx, zorkOldAvatar.Thumbnail.Path)
|
||||
suite.ErrorIs(err, storage.ErrNotFound)
|
||||
_, err = suite.storage.Get(zorkOldHeader.File.Path)
|
||||
_, err = suite.storage.Get(ctx, zorkOldHeader.File.Path)
|
||||
suite.ErrorIs(err, storage.ErrNotFound)
|
||||
_, err = suite.storage.Get(zorkOldHeader.Thumbnail.Path)
|
||||
_, err = suite.storage.Get(ctx, zorkOldHeader.Thumbnail.Path)
|
||||
suite.ErrorIs(err, storage.ErrNotFound)
|
||||
|
||||
// attachments should no longer be in the db
|
||||
|
@ -67,7 +67,7 @@ func (m *manager) pruneOneRemote(ctx context.Context, attachment *gtsmodel.Media
|
||||
if attachment.File.Path != "" {
|
||||
// delete the full size attachment from storage
|
||||
logrus.Tracef("pruneOneRemote: deleting %s", attachment.File.Path)
|
||||
if err := m.storage.Delete(attachment.File.Path); err != nil && err != storage.ErrNotFound {
|
||||
if err := m.storage.Delete(ctx, attachment.File.Path); err != nil && err != storage.ErrNotFound {
|
||||
return err
|
||||
}
|
||||
attachment.Cached = false
|
||||
@ -76,7 +76,7 @@ func (m *manager) pruneOneRemote(ctx context.Context, attachment *gtsmodel.Media
|
||||
if attachment.Thumbnail.Path != "" {
|
||||
// delete the thumbnail from storage
|
||||
logrus.Tracef("pruneOneRemote: deleting %s", attachment.Thumbnail.Path)
|
||||
if err := m.storage.Delete(attachment.Thumbnail.Path); err != nil && err != storage.ErrNotFound {
|
||||
if err := m.storage.Delete(ctx, attachment.Thumbnail.Path); err != nil && err != storage.ErrNotFound {
|
||||
return err
|
||||
}
|
||||
attachment.Cached = false
|
||||
|
@ -68,9 +68,9 @@ func (suite *PruneRemoteTestSuite) TestPruneAndRecache() {
|
||||
suite.Equal(2, totalPruned)
|
||||
|
||||
// media should no longer be stored
|
||||
_, err = suite.storage.Get(testAttachment.File.Path)
|
||||
_, err = suite.storage.Get(ctx, testAttachment.File.Path)
|
||||
suite.ErrorIs(err, storage.ErrNotFound)
|
||||
_, err = suite.storage.Get(testAttachment.Thumbnail.Path)
|
||||
_, err = suite.storage.Get(ctx, testAttachment.Thumbnail.Path)
|
||||
suite.ErrorIs(err, storage.ErrNotFound)
|
||||
|
||||
// now recache the image....
|
||||
@ -98,9 +98,9 @@ func (suite *PruneRemoteTestSuite) TestPruneAndRecache() {
|
||||
suite.EqualValues(testAttachment.FileMeta, recachedAttachment.FileMeta) // and the filemeta should be the same
|
||||
|
||||
// recached files should be back in storage
|
||||
_, err = suite.storage.Get(recachedAttachment.File.Path)
|
||||
_, err = suite.storage.Get(ctx, recachedAttachment.File.Path)
|
||||
suite.NoError(err)
|
||||
_, err = suite.storage.Get(recachedAttachment.Thumbnail.Path)
|
||||
_, err = suite.storage.Get(ctx, recachedAttachment.Thumbnail.Path)
|
||||
suite.NoError(err)
|
||||
}
|
||||
|
||||
@ -112,7 +112,7 @@ func (suite *PruneRemoteTestSuite) TestPruneOneNonExistent() {
|
||||
media, err := suite.db.GetAttachmentByID(ctx, testAttachment.ID)
|
||||
suite.NoError(err)
|
||||
suite.True(media.Cached)
|
||||
err = suite.storage.Delete(media.File.Path)
|
||||
err = suite.storage.Delete(ctx, media.File.Path)
|
||||
suite.NoError(err)
|
||||
|
||||
// Now attempt to prune remote for item with db entry no file
|
||||
|
@ -68,7 +68,7 @@ func (m *manager) pruneOneLocal(ctx context.Context, attachment *gtsmodel.MediaA
|
||||
if attachment.File.Path != "" {
|
||||
// delete the full size attachment from storage
|
||||
logrus.Tracef("pruneOneLocal: deleting %s", attachment.File.Path)
|
||||
if err := m.storage.Delete(attachment.File.Path); err != nil && err != storage.ErrNotFound {
|
||||
if err := m.storage.Delete(ctx, attachment.File.Path); err != nil && err != storage.ErrNotFound {
|
||||
return err
|
||||
}
|
||||
}
|
||||
@ -76,7 +76,7 @@ func (m *manager) pruneOneLocal(ctx context.Context, attachment *gtsmodel.MediaA
|
||||
if attachment.Thumbnail.Path != "" {
|
||||
// delete the thumbnail from storage
|
||||
logrus.Tracef("pruneOneLocal: deleting %s", attachment.Thumbnail.Path)
|
||||
if err := m.storage.Delete(attachment.Thumbnail.Path); err != nil && err != storage.ErrNotFound {
|
||||
if err := m.storage.Delete(ctx, attachment.Thumbnail.Path); err != nil && err != storage.ErrNotFound {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
@ -61,7 +61,7 @@ func (suite *PruneUnusedLocalTestSuite) TestPruneOneNonExistent() {
|
||||
media, err := suite.db.GetAttachmentByID(ctx, testAttachment.ID)
|
||||
suite.NoError(err)
|
||||
suite.True(media.Cached)
|
||||
err = suite.storage.Delete(media.File.Path)
|
||||
err = suite.storage.Delete(ctx, media.File.Path)
|
||||
suite.NoError(err)
|
||||
|
||||
// Now attempt to prune for item with db entry no file
|
||||
|
@ -21,7 +21,6 @@
|
||||
import (
|
||||
"context"
|
||||
|
||||
"codeberg.org/gruf/go-store/kv"
|
||||
"github.com/stretchr/testify/suite"
|
||||
"github.com/superseriousbusiness/gotosocial/internal/concurrency"
|
||||
"github.com/superseriousbusiness/gotosocial/internal/db"
|
||||
@ -33,6 +32,7 @@
|
||||
"github.com/superseriousbusiness/gotosocial/internal/oauth"
|
||||
"github.com/superseriousbusiness/gotosocial/internal/processing"
|
||||
"github.com/superseriousbusiness/gotosocial/internal/processing/account"
|
||||
"github.com/superseriousbusiness/gotosocial/internal/storage"
|
||||
"github.com/superseriousbusiness/gotosocial/internal/transport"
|
||||
"github.com/superseriousbusiness/gotosocial/internal/typeutils"
|
||||
"github.com/superseriousbusiness/gotosocial/testrig"
|
||||
@ -43,7 +43,7 @@ type AccountStandardTestSuite struct {
|
||||
suite.Suite
|
||||
db db.DB
|
||||
tc typeutils.TypeConverter
|
||||
storage *kv.KVStore
|
||||
storage storage.Driver
|
||||
mediaManager media.Manager
|
||||
oauthServer oauth.Server
|
||||
fromClientAPIChan chan messages.FromClientAPI
|
||||
@ -91,7 +91,7 @@ func (suite *AccountStandardTestSuite) SetupTest() {
|
||||
|
||||
suite.db = testrig.NewTestDB()
|
||||
suite.tc = testrig.NewTestTypeConverter(suite.db)
|
||||
suite.storage = testrig.NewTestStorage()
|
||||
suite.storage = testrig.NewInMemoryStorage()
|
||||
suite.mediaManager = testrig.NewTestMediaManager(suite.db, suite.storage)
|
||||
suite.oauthServer = testrig.NewTestOauthServer(suite.db)
|
||||
suite.fromClientAPIChan = make(chan messages.FromClientAPI, 100)
|
||||
|
@ -24,14 +24,14 @@ func (p *processor) Delete(ctx context.Context, mediaAttachmentID string) gtserr
|
||||
|
||||
// delete the thumbnail from storage
|
||||
if attachment.Thumbnail.Path != "" {
|
||||
if err := p.storage.Delete(attachment.Thumbnail.Path); err != nil {
|
||||
if err := p.storage.Delete(ctx, attachment.Thumbnail.Path); err != nil {
|
||||
errs = append(errs, fmt.Sprintf("remove thumbnail at path %s: %s", attachment.Thumbnail.Path, err))
|
||||
}
|
||||
}
|
||||
|
||||
// delete the file from storage
|
||||
if attachment.File.Path != "" {
|
||||
if err := p.storage.Delete(attachment.File.Path); err != nil {
|
||||
if err := p.storage.Delete(ctx, attachment.File.Path); err != nil {
|
||||
errs = append(errs, fmt.Sprintf("remove file at path %s: %s", attachment.File.Path, err))
|
||||
}
|
||||
}
|
||||
|
@ -113,7 +113,7 @@ func (p *processor) getAttachmentContent(ctx context.Context, requestingAccount
|
||||
|
||||
// if we have the media cached on our server already, we can now simply return it from storage
|
||||
if a.Cached {
|
||||
return p.streamFromStorage(storagePath, attachmentContent)
|
||||
return p.retrieveFromStorage(ctx, storagePath, attachmentContent)
|
||||
}
|
||||
|
||||
// if we don't have it cached, then we can assume two things:
|
||||
@ -221,7 +221,7 @@ func (p *processor) getAttachmentContent(ctx context.Context, requestingAccount
|
||||
return nil, gtserror.NewErrorNotFound(fmt.Errorf("error loading recached attachment: %s", err))
|
||||
}
|
||||
// ... so now we can safely return it
|
||||
return p.streamFromStorage(storagePath, attachmentContent)
|
||||
return p.retrieveFromStorage(ctx, storagePath, attachmentContent)
|
||||
}
|
||||
|
||||
return attachmentContent, nil
|
||||
@ -253,11 +253,15 @@ func (p *processor) getEmojiContent(ctx context.Context, wantedEmojiID string, e
|
||||
return nil, gtserror.NewErrorNotFound(fmt.Errorf("media size %s not recognized for emoji", emojiSize))
|
||||
}
|
||||
|
||||
return p.streamFromStorage(storagePath, emojiContent)
|
||||
return p.retrieveFromStorage(ctx, storagePath, emojiContent)
|
||||
}
|
||||
|
||||
func (p *processor) streamFromStorage(storagePath string, content *apimodel.Content) (*apimodel.Content, gtserror.WithCode) {
|
||||
reader, err := p.storage.GetStream(storagePath)
|
||||
func (p *processor) retrieveFromStorage(ctx context.Context, storagePath string, content *apimodel.Content) (*apimodel.Content, gtserror.WithCode) {
|
||||
if url := p.storage.URL(ctx, storagePath); url != nil {
|
||||
content.URL = url
|
||||
return content, nil
|
||||
}
|
||||
reader, err := p.storage.GetStream(ctx, storagePath)
|
||||
if err != nil {
|
||||
return nil, gtserror.NewErrorNotFound(fmt.Errorf("error retrieving from storage: %s", err))
|
||||
}
|
||||
|
@ -70,9 +70,9 @@ func (suite *GetFileTestSuite) TestGetRemoteFileUncached() {
|
||||
testAttachment.Cached = false
|
||||
err := suite.db.UpdateByPrimaryKey(ctx, testAttachment)
|
||||
suite.NoError(err)
|
||||
err = suite.storage.Delete(testAttachment.File.Path)
|
||||
err = suite.storage.Delete(ctx, testAttachment.File.Path)
|
||||
suite.NoError(err)
|
||||
err = suite.storage.Delete(testAttachment.Thumbnail.Path)
|
||||
err = suite.storage.Delete(ctx, testAttachment.Thumbnail.Path)
|
||||
suite.NoError(err)
|
||||
|
||||
// now fetch it
|
||||
@ -106,7 +106,7 @@ func (suite *GetFileTestSuite) TestGetRemoteFileUncached() {
|
||||
suite.True(dbAttachment.Cached)
|
||||
|
||||
// the file should be back in storage at the same path as before
|
||||
refreshedBytes, err := suite.storage.Get(testAttachment.File.Path)
|
||||
refreshedBytes, err := suite.storage.Get(ctx, testAttachment.File.Path)
|
||||
suite.NoError(err)
|
||||
suite.Equal(suite.testRemoteAttachments[testAttachment.RemoteURL].Data, refreshedBytes)
|
||||
}
|
||||
@ -119,9 +119,9 @@ func (suite *GetFileTestSuite) TestGetRemoteFileUncachedInterrupted() {
|
||||
testAttachment.Cached = false
|
||||
err := suite.db.UpdateByPrimaryKey(ctx, testAttachment)
|
||||
suite.NoError(err)
|
||||
err = suite.storage.Delete(testAttachment.File.Path)
|
||||
err = suite.storage.Delete(ctx, testAttachment.File.Path)
|
||||
suite.NoError(err)
|
||||
err = suite.storage.Delete(testAttachment.Thumbnail.Path)
|
||||
err = suite.storage.Delete(ctx, testAttachment.Thumbnail.Path)
|
||||
suite.NoError(err)
|
||||
|
||||
// now fetch it
|
||||
@ -156,7 +156,7 @@ func (suite *GetFileTestSuite) TestGetRemoteFileUncachedInterrupted() {
|
||||
suite.True(dbAttachment.Cached)
|
||||
|
||||
// the file should be back in storage at the same path as before
|
||||
refreshedBytes, err := suite.storage.Get(testAttachment.File.Path)
|
||||
refreshedBytes, err := suite.storage.Get(ctx, testAttachment.File.Path)
|
||||
suite.NoError(err)
|
||||
suite.Equal(suite.testRemoteAttachments[testAttachment.RemoteURL].Data, refreshedBytes)
|
||||
}
|
||||
@ -166,16 +166,16 @@ func (suite *GetFileTestSuite) TestGetRemoteFileThumbnailUncached() {
|
||||
testAttachment := suite.testAttachments["remote_account_1_status_1_attachment_1"]
|
||||
|
||||
// fetch the existing thumbnail bytes from storage first
|
||||
thumbnailBytes, err := suite.storage.Get(testAttachment.Thumbnail.Path)
|
||||
thumbnailBytes, err := suite.storage.Get(ctx, testAttachment.Thumbnail.Path)
|
||||
suite.NoError(err)
|
||||
|
||||
// uncache the file from local
|
||||
testAttachment.Cached = false
|
||||
err = suite.db.UpdateByPrimaryKey(ctx, testAttachment)
|
||||
suite.NoError(err)
|
||||
err = suite.storage.Delete(testAttachment.File.Path)
|
||||
err = suite.storage.Delete(ctx, testAttachment.File.Path)
|
||||
suite.NoError(err)
|
||||
err = suite.storage.Delete(testAttachment.Thumbnail.Path)
|
||||
err = suite.storage.Delete(ctx, testAttachment.Thumbnail.Path)
|
||||
suite.NoError(err)
|
||||
|
||||
// now fetch the thumbnail
|
||||
|
@ -21,12 +21,12 @@
|
||||
import (
|
||||
"context"
|
||||
|
||||
"codeberg.org/gruf/go-store/kv"
|
||||
apimodel "github.com/superseriousbusiness/gotosocial/internal/api/model"
|
||||
"github.com/superseriousbusiness/gotosocial/internal/db"
|
||||
"github.com/superseriousbusiness/gotosocial/internal/gtserror"
|
||||
"github.com/superseriousbusiness/gotosocial/internal/gtsmodel"
|
||||
"github.com/superseriousbusiness/gotosocial/internal/media"
|
||||
"github.com/superseriousbusiness/gotosocial/internal/storage"
|
||||
"github.com/superseriousbusiness/gotosocial/internal/transport"
|
||||
"github.com/superseriousbusiness/gotosocial/internal/typeutils"
|
||||
)
|
||||
@ -51,12 +51,12 @@ type processor struct {
|
||||
tc typeutils.TypeConverter
|
||||
mediaManager media.Manager
|
||||
transportController transport.Controller
|
||||
storage *kv.KVStore
|
||||
storage storage.Driver
|
||||
db db.DB
|
||||
}
|
||||
|
||||
// New returns a new media processor.
|
||||
func New(db db.DB, tc typeutils.TypeConverter, mediaManager media.Manager, transportController transport.Controller, storage *kv.KVStore) Processor {
|
||||
func New(db db.DB, tc typeutils.TypeConverter, mediaManager media.Manager, transportController transport.Controller, storage storage.Driver) Processor {
|
||||
return &processor{
|
||||
tc: tc,
|
||||
mediaManager: mediaManager,
|
||||
|
@ -19,7 +19,6 @@
|
||||
package media_test
|
||||
|
||||
import (
|
||||
"codeberg.org/gruf/go-store/kv"
|
||||
"github.com/stretchr/testify/suite"
|
||||
"github.com/superseriousbusiness/gotosocial/internal/concurrency"
|
||||
"github.com/superseriousbusiness/gotosocial/internal/db"
|
||||
@ -27,6 +26,7 @@
|
||||
"github.com/superseriousbusiness/gotosocial/internal/media"
|
||||
"github.com/superseriousbusiness/gotosocial/internal/messages"
|
||||
mediaprocessing "github.com/superseriousbusiness/gotosocial/internal/processing/media"
|
||||
"github.com/superseriousbusiness/gotosocial/internal/storage"
|
||||
"github.com/superseriousbusiness/gotosocial/internal/transport"
|
||||
"github.com/superseriousbusiness/gotosocial/internal/typeutils"
|
||||
"github.com/superseriousbusiness/gotosocial/testrig"
|
||||
@ -37,7 +37,7 @@ type MediaStandardTestSuite struct {
|
||||
suite.Suite
|
||||
db db.DB
|
||||
tc typeutils.TypeConverter
|
||||
storage *kv.KVStore
|
||||
storage storage.Driver
|
||||
mediaManager media.Manager
|
||||
transportController transport.Controller
|
||||
|
||||
@ -72,7 +72,7 @@ func (suite *MediaStandardTestSuite) SetupTest() {
|
||||
|
||||
suite.db = testrig.NewTestDB()
|
||||
suite.tc = testrig.NewTestTypeConverter(suite.db)
|
||||
suite.storage = testrig.NewTestStorage()
|
||||
suite.storage = testrig.NewInMemoryStorage()
|
||||
suite.mediaManager = testrig.NewTestMediaManager(suite.db, suite.storage)
|
||||
suite.transportController = testrig.NewTestTransportController(testrig.NewMockHTTPClient(nil, "../../../testrig/media"), suite.db, concurrency.NewWorkerPool[messages.FromFederator](-1, -1))
|
||||
suite.mediaProcessor = mediaprocessing.New(suite.db, suite.tc, suite.mediaManager, suite.transportController, suite.storage)
|
||||
|
@ -23,7 +23,6 @@
|
||||
"net/http"
|
||||
"net/url"
|
||||
|
||||
"codeberg.org/gruf/go-store/kv"
|
||||
apimodel "github.com/superseriousbusiness/gotosocial/internal/api/model"
|
||||
"github.com/superseriousbusiness/gotosocial/internal/concurrency"
|
||||
"github.com/superseriousbusiness/gotosocial/internal/db"
|
||||
@ -41,6 +40,7 @@
|
||||
"github.com/superseriousbusiness/gotosocial/internal/processing/status"
|
||||
"github.com/superseriousbusiness/gotosocial/internal/processing/streaming"
|
||||
"github.com/superseriousbusiness/gotosocial/internal/processing/user"
|
||||
"github.com/superseriousbusiness/gotosocial/internal/storage"
|
||||
"github.com/superseriousbusiness/gotosocial/internal/stream"
|
||||
"github.com/superseriousbusiness/gotosocial/internal/timeline"
|
||||
"github.com/superseriousbusiness/gotosocial/internal/typeutils"
|
||||
@ -251,7 +251,7 @@ type processor struct {
|
||||
tc typeutils.TypeConverter
|
||||
oauthServer oauth.Server
|
||||
mediaManager media.Manager
|
||||
storage *kv.KVStore
|
||||
storage storage.Driver
|
||||
statusTimelines timeline.Manager
|
||||
db db.DB
|
||||
filter visibility.Filter
|
||||
@ -275,7 +275,7 @@ func NewProcessor(
|
||||
federator federation.Federator,
|
||||
oauthServer oauth.Server,
|
||||
mediaManager media.Manager,
|
||||
storage *kv.KVStore,
|
||||
storage storage.Driver,
|
||||
db db.DB,
|
||||
emailSender email.Sender,
|
||||
clientWorker *concurrency.WorkerPool[messages.FromClientAPI],
|
||||
|
@ -19,7 +19,6 @@
|
||||
package processing_test
|
||||
|
||||
import (
|
||||
"codeberg.org/gruf/go-store/kv"
|
||||
"github.com/stretchr/testify/suite"
|
||||
"github.com/superseriousbusiness/gotosocial/internal/concurrency"
|
||||
"github.com/superseriousbusiness/gotosocial/internal/db"
|
||||
@ -30,6 +29,7 @@
|
||||
"github.com/superseriousbusiness/gotosocial/internal/messages"
|
||||
"github.com/superseriousbusiness/gotosocial/internal/oauth"
|
||||
"github.com/superseriousbusiness/gotosocial/internal/processing"
|
||||
"github.com/superseriousbusiness/gotosocial/internal/storage"
|
||||
"github.com/superseriousbusiness/gotosocial/internal/transport"
|
||||
"github.com/superseriousbusiness/gotosocial/internal/typeutils"
|
||||
"github.com/superseriousbusiness/gotosocial/testrig"
|
||||
@ -39,7 +39,7 @@ type ProcessingStandardTestSuite struct {
|
||||
// standard suite interfaces
|
||||
suite.Suite
|
||||
db db.DB
|
||||
storage *kv.KVStore
|
||||
storage storage.Driver
|
||||
mediaManager media.Manager
|
||||
typeconverter typeutils.TypeConverter
|
||||
httpClient *testrig.MockHTTPClient
|
||||
@ -91,7 +91,7 @@ func (suite *ProcessingStandardTestSuite) SetupTest() {
|
||||
|
||||
suite.db = testrig.NewTestDB()
|
||||
suite.testActivities = testrig.NewTestActivities(suite.testAccounts)
|
||||
suite.storage = testrig.NewTestStorage()
|
||||
suite.storage = testrig.NewInMemoryStorage()
|
||||
suite.typeconverter = testrig.NewTestTypeConverter(suite.db)
|
||||
suite.httpClient = testrig.NewMockHTTPClient(nil, "../../testrig/media")
|
||||
|
||||
|
@ -19,7 +19,6 @@
|
||||
package status_test
|
||||
|
||||
import (
|
||||
"codeberg.org/gruf/go-store/kv"
|
||||
"github.com/stretchr/testify/suite"
|
||||
"github.com/superseriousbusiness/gotosocial/internal/concurrency"
|
||||
"github.com/superseriousbusiness/gotosocial/internal/db"
|
||||
@ -29,6 +28,7 @@
|
||||
"github.com/superseriousbusiness/gotosocial/internal/messages"
|
||||
"github.com/superseriousbusiness/gotosocial/internal/processing"
|
||||
"github.com/superseriousbusiness/gotosocial/internal/processing/status"
|
||||
"github.com/superseriousbusiness/gotosocial/internal/storage"
|
||||
"github.com/superseriousbusiness/gotosocial/internal/transport"
|
||||
"github.com/superseriousbusiness/gotosocial/internal/typeutils"
|
||||
"github.com/superseriousbusiness/gotosocial/testrig"
|
||||
@ -39,7 +39,7 @@ type StatusStandardTestSuite struct {
|
||||
db db.DB
|
||||
typeConverter typeutils.TypeConverter
|
||||
tc transport.Controller
|
||||
storage *kv.KVStore
|
||||
storage storage.Driver
|
||||
mediaManager media.Manager
|
||||
federator federation.Federator
|
||||
clientWorker *concurrency.WorkerPool[messages.FromClientAPI]
|
||||
@ -81,7 +81,7 @@ func (suite *StatusStandardTestSuite) SetupTest() {
|
||||
suite.typeConverter = testrig.NewTestTypeConverter(suite.db)
|
||||
suite.clientWorker = concurrency.NewWorkerPool[messages.FromClientAPI](-1, -1)
|
||||
suite.tc = testrig.NewTestTransportController(testrig.NewMockHTTPClient(nil, "../../../testrig/media"), suite.db, fedWorker)
|
||||
suite.storage = testrig.NewTestStorage()
|
||||
suite.storage = testrig.NewInMemoryStorage()
|
||||
suite.mediaManager = testrig.NewTestMediaManager(suite.db, suite.storage)
|
||||
suite.federator = testrig.NewTestFederator(suite.db, suite.tc, suite.storage, suite.mediaManager, fedWorker)
|
||||
suite.status = status.New(suite.db, suite.typeConverter, suite.clientWorker, processing.GetParseMentionFunc(suite.db, suite.federator))
|
||||
|
@ -71,8 +71,8 @@
|
||||
// MentionFinder extracts mentions from a piece of text.
|
||||
MentionFinder = regexp.MustCompile(mentionFinder)
|
||||
|
||||
// hashtag regex can be played with here: https://regex101.com/r/bPxeca/1
|
||||
hashtagFinder = fmt.Sprintf(`(?:^|\s)(?:#*)(#[a-zA-Z0-9]{1,%d})(?:#|\b)`, maximumHashtagLength)
|
||||
// hashtag regex can be played with here: https://regex101.com/r/bpyGlj/1
|
||||
hashtagFinder = fmt.Sprintf(`(?:^|\s)(?:#*)(#[\p{L}\p{N}]{1,%d})(?:#|\b)`, maximumHashtagLength)
|
||||
// HashtagFinder finds possible hashtags in a string.
|
||||
// It returns just the string part of the hashtag, not the # symbol.
|
||||
HashtagFinder = regexp.MustCompile(hashtagFinder)
|
||||
|
50
internal/storage/local.go
Normal file
50
internal/storage/local.go
Normal file
@ -0,0 +1,50 @@
|
||||
/*
|
||||
GoToSocial
|
||||
Copyright (C) 2021-2022 GoToSocial Authors admin@gotosocial.org
|
||||
|
||||
This program is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU Affero General Public License as published by
|
||||
the Free Software Foundation, either version 3 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU Affero General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU Affero General Public License
|
||||
along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
package storage
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"net/url"
|
||||
|
||||
"codeberg.org/gruf/go-store/kv"
|
||||
)
|
||||
|
||||
type Local struct {
|
||||
KVStore *kv.KVStore
|
||||
}
|
||||
|
||||
func (l *Local) Get(ctx context.Context, key string) ([]byte, error) {
|
||||
return l.KVStore.Get(key)
|
||||
}
|
||||
func (l *Local) GetStream(ctx context.Context, key string) (io.ReadCloser, error) {
|
||||
return l.KVStore.GetStream(key)
|
||||
}
|
||||
func (l *Local) PutStream(ctx context.Context, key string, r io.Reader) error {
|
||||
return l.KVStore.PutStream(key, r)
|
||||
}
|
||||
func (l *Local) Put(ctx context.Context, key string, value []byte) error {
|
||||
return l.KVStore.Put(key, value)
|
||||
}
|
||||
func (l *Local) Delete(ctx context.Context, key string) error {
|
||||
return l.KVStore.Delete(key)
|
||||
}
|
||||
func (l *Local) URL(ctx context.Context, key string) *url.URL {
|
||||
return nil
|
||||
}
|
87
internal/storage/s3.go
Normal file
87
internal/storage/s3.go
Normal file
@ -0,0 +1,87 @@
|
||||
/*
|
||||
GoToSocial
|
||||
Copyright (C) 2021-2022 GoToSocial Authors admin@gotosocial.org
|
||||
|
||||
This program is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU Affero General Public License as published by
|
||||
the Free Software Foundation, either version 3 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU Affero General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU Affero General Public License
|
||||
along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
package storage
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"mime"
|
||||
"net/url"
|
||||
"path"
|
||||
"time"
|
||||
|
||||
"github.com/minio/minio-go/v7"
|
||||
)
|
||||
|
||||
type S3 struct {
|
||||
mc *minio.Client
|
||||
bucket string
|
||||
}
|
||||
|
||||
func NewS3(mc *minio.Client, bucket string) *S3 {
|
||||
return &S3{
|
||||
mc: mc,
|
||||
bucket: bucket,
|
||||
}
|
||||
}
|
||||
|
||||
func (s *S3) Get(ctx context.Context, key string) ([]byte, error) {
|
||||
r, err := s.GetStream(ctx, key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer r.Close()
|
||||
b, err := io.ReadAll(r)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("reading data from s3: %w", err)
|
||||
}
|
||||
return b, nil
|
||||
}
|
||||
func (s *S3) GetStream(ctx context.Context, key string) (io.ReadCloser, error) {
|
||||
o, err := s.mc.GetObject(ctx, s.bucket, key, minio.GetObjectOptions{})
|
||||
if err != nil {
|
||||
err = fmt.Errorf("retrieving object from s3: %w", err)
|
||||
}
|
||||
return o, err
|
||||
}
|
||||
func (s *S3) PutStream(ctx context.Context, key string, r io.Reader) error {
|
||||
if _, err := s.mc.PutObject(ctx, s.bucket, key, r, -1, minio.PutObjectOptions{}); err != nil {
|
||||
return fmt.Errorf("uploading data stream: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func (s *S3) Put(ctx context.Context, key string, value []byte) error {
|
||||
if _, err := s.mc.PutObject(ctx, s.bucket, key, bytes.NewBuffer(value), -1, minio.PutObjectOptions{}); err != nil {
|
||||
return fmt.Errorf("uploading data slice: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func (s *S3) Delete(ctx context.Context, key string) error {
|
||||
return s.mc.RemoveObject(ctx, s.bucket, key, minio.RemoveObjectOptions{})
|
||||
}
|
||||
func (s *S3) URL(ctx context.Context, key string) *url.URL {
|
||||
// it's safe to ignore the error here, as we just fall back to fetching the
|
||||
// file if the url request fails
|
||||
url, _ := s.mc.PresignedGetObject(ctx, s.bucket, key, time.Hour, url.Values{
|
||||
"response-content-type": []string{mime.TypeByExtension(path.Ext(key))},
|
||||
})
|
||||
return url
|
||||
}
|
78
internal/storage/storage.go
Normal file
78
internal/storage/storage.go
Normal file
@ -0,0 +1,78 @@
|
||||
/*
|
||||
GoToSocial
|
||||
Copyright (C) 2021-2022 GoToSocial Authors admin@gotosocial.org
|
||||
|
||||
This program is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU Affero General Public License as published by
|
||||
the Free Software Foundation, either version 3 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU Affero General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU Affero General Public License
|
||||
along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
package storage
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/url"
|
||||
"path"
|
||||
|
||||
"codeberg.org/gruf/go-store/kv"
|
||||
"codeberg.org/gruf/go-store/storage"
|
||||
"github.com/minio/minio-go/v7"
|
||||
"github.com/minio/minio-go/v7/pkg/credentials"
|
||||
"github.com/superseriousbusiness/gotosocial/internal/config"
|
||||
)
|
||||
|
||||
var (
|
||||
ErrNotSupported = errors.New("driver does not suppport functionality")
|
||||
)
|
||||
|
||||
// Driver implements the functionality to store and retrieve blobs
|
||||
// (images,video,audio)
|
||||
type Driver interface {
|
||||
Get(ctx context.Context, key string) ([]byte, error)
|
||||
GetStream(ctx context.Context, key string) (io.ReadCloser, error)
|
||||
PutStream(ctx context.Context, key string, r io.Reader) error
|
||||
Put(ctx context.Context, key string, value []byte) error
|
||||
Delete(ctx context.Context, key string) error
|
||||
URL(ctx context.Context, key string) *url.URL
|
||||
}
|
||||
|
||||
func AutoConfig() (Driver, error) {
|
||||
switch config.GetStorageBackend() {
|
||||
case "s3":
|
||||
mc, err := minio.New(config.GetStorageS3Endpoint(), &minio.Options{
|
||||
Creds: credentials.NewStaticV4(config.GetStorageS3AccessKey(), config.GetStorageS3SecretKey(), ""),
|
||||
Secure: config.GetStorageS3UseSSL(),
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("creating minio client: %w", err)
|
||||
}
|
||||
return NewS3(mc, config.GetStorageS3BucketName()), nil
|
||||
case "local":
|
||||
storageBasePath := config.GetStorageLocalBasePath()
|
||||
storage, err := kv.OpenFile(storageBasePath, &storage.DiskConfig{
|
||||
// Put the store lockfile in the storage dir itself.
|
||||
// Normally this would not be safe, since we could end up
|
||||
// overwriting the lockfile if we store a file called 'store.lock'.
|
||||
// However, in this case it's OK because the keys are set by
|
||||
// GtS and not the user, so we know we're never going to overwrite it.
|
||||
LockFile: path.Join(storageBasePath, "store.lock"),
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error creating storage backend: %s", err)
|
||||
}
|
||||
return &Local{KVStore: storage}, nil
|
||||
}
|
||||
return nil, fmt.Errorf("invalid storage backend %s", config.GetStorageBackend())
|
||||
}
|
@ -815,19 +815,23 @@ func (c *converter) BoostToAS(ctx context.Context, boostWrapperStatus *gtsmodel.
|
||||
announce.SetActivityStreamsTo(toProp)
|
||||
|
||||
// set the cc
|
||||
boostedURI, err := url.Parse(boostedAccount.URI)
|
||||
ccProp := streams.NewActivityStreamsCcProperty()
|
||||
boostedAccountURI, err := url.Parse(boostedAccount.URI)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("BoostToAS: error parsing uri %s: %s", boostedAccount.URI, err)
|
||||
}
|
||||
ccProp.AppendIRI(boostedAccountURI)
|
||||
|
||||
publicURI, err := url.Parse(pub.PublicActivityPubIRI)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("BoostToAS: error parsing uri %s: %s", pub.PublicActivityPubIRI, err)
|
||||
// maybe CC it to public depending on the boosted status visibility
|
||||
switch boostWrapperStatus.BoostOf.Visibility {
|
||||
case gtsmodel.VisibilityPublic, gtsmodel.VisibilityUnlocked:
|
||||
publicURI, err := url.Parse(pub.PublicActivityPubIRI)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("BoostToAS: error parsing uri %s: %s", pub.PublicActivityPubIRI, err)
|
||||
}
|
||||
ccProp.AppendIRI(publicURI)
|
||||
}
|
||||
|
||||
ccProp := streams.NewActivityStreamsCcProperty()
|
||||
ccProp.AppendIRI(boostedURI)
|
||||
ccProp.AppendIRI(publicURI)
|
||||
announce.SetActivityStreamsCc(ccProp)
|
||||
|
||||
return announce, nil
|
||||
|
@ -24,9 +24,9 @@
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/suite"
|
||||
"github.com/superseriousbusiness/activity/streams"
|
||||
"github.com/superseriousbusiness/gotosocial/testrig"
|
||||
)
|
||||
|
||||
type InternalToASTestSuite struct {
|
||||
@ -60,7 +60,7 @@ func (suite *InternalToASTestSuite) TestOutboxToASCollection() {
|
||||
suite.NoError(err)
|
||||
|
||||
ser, err := streams.Serialize(collection)
|
||||
assert.NoError(suite.T(), err)
|
||||
suite.NoError(err)
|
||||
|
||||
bytes, err := json.Marshal(ser)
|
||||
suite.NoError(err)
|
||||
@ -86,7 +86,7 @@ func (suite *InternalToASTestSuite) TestStatusToAS() {
|
||||
suite.NoError(err)
|
||||
|
||||
ser, err := streams.Serialize(asStatus)
|
||||
assert.NoError(suite.T(), err)
|
||||
suite.NoError(err)
|
||||
|
||||
bytes, err := json.Marshal(ser)
|
||||
suite.NoError(err)
|
||||
@ -105,7 +105,7 @@ func (suite *InternalToASTestSuite) TestStatusToASWithMentions() {
|
||||
suite.NoError(err)
|
||||
|
||||
ser, err := streams.Serialize(asStatus)
|
||||
assert.NoError(suite.T(), err)
|
||||
suite.NoError(err)
|
||||
|
||||
bytes, err := json.Marshal(ser)
|
||||
suite.NoError(err)
|
||||
@ -122,7 +122,7 @@ func (suite *InternalToASTestSuite) TestStatusToASNotSensitive() {
|
||||
suite.NoError(err)
|
||||
|
||||
ser, err := streams.Serialize(asStatus)
|
||||
assert.NoError(suite.T(), err)
|
||||
suite.NoError(err)
|
||||
|
||||
bytes, err := json.Marshal(ser)
|
||||
suite.NoError(err)
|
||||
@ -142,7 +142,7 @@ func (suite *InternalToASTestSuite) TestStatusesToASOutboxPage() {
|
||||
suite.NoError(err)
|
||||
|
||||
ser, err := streams.Serialize(page)
|
||||
assert.NoError(suite.T(), err)
|
||||
suite.NoError(err)
|
||||
|
||||
bytes, err := json.Marshal(ser)
|
||||
suite.NoError(err)
|
||||
@ -184,6 +184,32 @@ func (suite *InternalToASTestSuite) TestStatusesToASOutboxPage() {
|
||||
suite.Equal(`{"@context":"https://www.w3.org/ns/activitystreams","id":"http://localhost:8080/users/admin/outbox?page=true","next":"http://localhost:8080/users/admin/outbox?page=true\u0026max_id=01F8MH75CBF9JFX4ZAD54N0W0R","orderedItems":[{"actor":"http://localhost:8080/users/admin","cc":"http://localhost:8080/users/admin/followers","id":"http://localhost:8080/users/admin/statuses/01F8MHAAY43M6RJ473VQFCVH37/activity","object":"http://localhost:8080/users/admin/statuses/01F8MHAAY43M6RJ473VQFCVH37","published":"2021-10-20T12:36:45Z","to":"https://www.w3.org/ns/activitystreams#Public","type":"Create"},{"actor":"http://localhost:8080/users/admin","cc":"http://localhost:8080/users/admin/followers","id":"http://localhost:8080/users/admin/statuses/01F8MH75CBF9JFX4ZAD54N0W0R/activity","object":"http://localhost:8080/users/admin/statuses/01F8MH75CBF9JFX4ZAD54N0W0R","published":"2021-10-20T11:36:45Z","to":"https://www.w3.org/ns/activitystreams#Public","type":"Create"}],"partOf":"http://localhost:8080/users/admin/outbox","prev":"http://localhost:8080/users/admin/outbox?page=true\u0026min_id=01F8MHAAY43M6RJ473VQFCVH37","type":"OrderedCollectionPage"}`, string(bytes))
|
||||
}
|
||||
|
||||
func (suite *InternalToASTestSuite) TestSelfBoostFollowersOnlyToAS() {
|
||||
ctx := context.Background()
|
||||
|
||||
testStatus := suite.testStatuses["local_account_1_status_5"]
|
||||
testAccount := suite.testAccounts["local_account_1"]
|
||||
|
||||
boostWrapperStatus, err := suite.typeconverter.StatusToBoost(ctx, testStatus, testAccount)
|
||||
suite.NoError(err)
|
||||
suite.NotNil(boostWrapperStatus)
|
||||
|
||||
boostWrapperStatus.ID = "01G74JJ1KS331G2JXHRMZCE0ER"
|
||||
boostWrapperStatus.URI = "http://localhost:8080/users/the_mighty_zork/statuses/01G74JJ1KS331G2JXHRMZCE0ER"
|
||||
boostWrapperStatus.CreatedAt = testrig.TimeMustParse("2022-06-09T13:12:00Z")
|
||||
|
||||
asBoost, err := suite.typeconverter.BoostToAS(ctx, boostWrapperStatus, testAccount, testAccount)
|
||||
suite.NoError(err)
|
||||
|
||||
ser, err := streams.Serialize(asBoost)
|
||||
suite.NoError(err)
|
||||
|
||||
bytes, err := json.Marshal(ser)
|
||||
suite.NoError(err)
|
||||
|
||||
suite.Equal(`{"@context":"https://www.w3.org/ns/activitystreams","actor":"http://localhost:8080/users/the_mighty_zork","cc":"http://localhost:8080/users/the_mighty_zork","id":"http://localhost:8080/users/the_mighty_zork/statuses/01G74JJ1KS331G2JXHRMZCE0ER","object":"http://localhost:8080/users/the_mighty_zork/statuses/01FCTA44PW9H1TB328S9AQXKDS","published":"2022-06-09T13:12:00Z","to":"http://localhost:8080/users/the_mighty_zork/followers","type":"Announce"}`, string(bytes))
|
||||
}
|
||||
|
||||
func TestInternalToASTestSuite(t *testing.T) {
|
||||
suite.Run(t, new(InternalToASTestSuite))
|
||||
}
|
||||
|
@ -83,15 +83,20 @@ func (suite *StatusTestSuite) TestDeriveHashtagsOK() {
|
||||
|
||||
#ThisShouldAlsoWork #not_this_though
|
||||
|
||||
#111111 thisalsoshouldn'twork#### ##`
|
||||
#111111 thisalsoshouldn'twork#### ##
|
||||
|
||||
#alimentación, #saúde
|
||||
`
|
||||
|
||||
tags := util.DeriveHashtagsFromText(statusText)
|
||||
assert.Len(suite.T(), tags, 5)
|
||||
assert.Len(suite.T(), tags, 7)
|
||||
assert.Equal(suite.T(), "testing123", tags[0])
|
||||
assert.Equal(suite.T(), "also", tags[1])
|
||||
assert.Equal(suite.T(), "thisshouldwork", tags[2])
|
||||
assert.Equal(suite.T(), "ThisShouldAlsoWork", tags[3])
|
||||
assert.Equal(suite.T(), "111111", tags[4])
|
||||
assert.Equal(suite.T(), "alimentación", tags[5])
|
||||
assert.Equal(suite.T(), "saúde", tags[6])
|
||||
}
|
||||
|
||||
func (suite *StatusTestSuite) TestDeriveEmojiOK() {
|
||||
|
@ -5,7 +5,7 @@ set -e
|
||||
echo "STARTING CLI TESTS"
|
||||
|
||||
echo "TEST_1 Make sure defaults are set correctly."
|
||||
TEST_1_EXPECTED='{"account-domain":"","accounts-approval-required":true,"accounts-reason-required":true,"accounts-registration-open":true,"advanced-cookies-samesite":"lax","application-name":"gotosocial","bind-address":"0.0.0.0","config-path":"","db-address":"","db-database":"gotosocial","db-password":"","db-port":5432,"db-tls-ca-cert":"","db-tls-mode":"disable","db-type":"postgres","db-user":"","email":"","host":"","instance-expose-peers":false,"instance-expose-suspended":false,"letsencrypt-cert-dir":"/gotosocial/storage/certs","letsencrypt-email-address":"","letsencrypt-enabled":false,"letsencrypt-port":80,"log-db-queries":false,"log-level":"info","media-description-max-chars":500,"media-description-min-chars":0,"media-image-max-size":2097152,"media-remote-cache-days":30,"media-video-max-size":10485760,"oidc-client-id":"","oidc-client-secret":"","oidc-enabled":false,"oidc-idp-name":"","oidc-issuer":"","oidc-scopes":["openid","profile","email","groups"],"oidc-skip-verification":false,"password":"","path":"","port":8080,"protocol":"https","smtp-from":"GoToSocial","smtp-host":"","smtp-password":"","smtp-port":0,"smtp-username":"","software-version":"","statuses-cw-max-chars":100,"statuses-max-chars":5000,"statuses-media-max-files":6,"statuses-poll-max-options":6,"statuses-poll-option-max-chars":50,"storage-backend":"local","storage-local-base-path":"/gotosocial/storage","syslog-address":"localhost:514","syslog-enabled":false,"syslog-protocol":"udp","trusted-proxies":["127.0.0.1/32"],"username":"","web-asset-base-dir":"./web/assets/","web-template-base-dir":"./web/template/"}'
|
||||
TEST_1_EXPECTED='{"account-domain":"","accounts-approval-required":true,"accounts-reason-required":true,"accounts-registration-open":true,"advanced-cookies-samesite":"lax","application-name":"gotosocial","bind-address":"0.0.0.0","config-path":"","db-address":"","db-database":"gotosocial","db-password":"","db-port":5432,"db-tls-ca-cert":"","db-tls-mode":"disable","db-type":"postgres","db-user":"","email":"","host":"","instance-expose-peers":false,"instance-expose-suspended":false,"letsencrypt-cert-dir":"/gotosocial/storage/certs","letsencrypt-email-address":"","letsencrypt-enabled":false,"letsencrypt-port":80,"log-db-queries":false,"log-level":"info","media-description-max-chars":500,"media-description-min-chars":0,"media-image-max-size":2097152,"media-remote-cache-days":30,"media-video-max-size":10485760,"oidc-client-id":"","oidc-client-secret":"","oidc-enabled":false,"oidc-idp-name":"","oidc-issuer":"","oidc-scopes":["openid","profile","email","groups"],"oidc-skip-verification":false,"password":"","path":"","port":8080,"protocol":"https","smtp-from":"GoToSocial","smtp-host":"","smtp-password":"","smtp-port":0,"smtp-username":"","software-version":"","statuses-cw-max-chars":100,"statuses-max-chars":5000,"statuses-media-max-files":6,"statuses-poll-max-options":6,"statuses-poll-option-max-chars":50,"storage-backend":"local","storage-local-base-path":"/gotosocial/storage","storage-s3-access-key":"","storage-s3-bucket":"","storage-s3-endpoint":"","storage-s3-secret-key":"","storage-s3-use-ssl":true,"syslog-address":"localhost:514","syslog-enabled":false,"syslog-protocol":"udp","trusted-proxies":["127.0.0.1/32"],"username":"","web-asset-base-dir":"./web/assets/","web-template-base-dir":"./web/template/"}'
|
||||
TEST_1="$(go run ./cmd/gotosocial/... debug config)"
|
||||
if [ "${TEST_1}" != "${TEST_1_EXPECTED}" ]; then
|
||||
echo "TEST_1 not equal TEST_1_EXPECTED"
|
||||
@ -15,7 +15,7 @@ else
|
||||
fi
|
||||
|
||||
echo "TEST_2 Override db-address from default using cli flag."
|
||||
TEST_2_EXPECTED='{"account-domain":"","accounts-approval-required":true,"accounts-reason-required":true,"accounts-registration-open":true,"advanced-cookies-samesite":"lax","application-name":"gotosocial","bind-address":"0.0.0.0","config-path":"","db-address":"some.db.address","db-database":"gotosocial","db-password":"","db-port":5432,"db-tls-ca-cert":"","db-tls-mode":"disable","db-type":"postgres","db-user":"","email":"","host":"","instance-expose-peers":false,"instance-expose-suspended":false,"letsencrypt-cert-dir":"/gotosocial/storage/certs","letsencrypt-email-address":"","letsencrypt-enabled":false,"letsencrypt-port":80,"log-db-queries":false,"log-level":"info","media-description-max-chars":500,"media-description-min-chars":0,"media-image-max-size":2097152,"media-remote-cache-days":30,"media-video-max-size":10485760,"oidc-client-id":"","oidc-client-secret":"","oidc-enabled":false,"oidc-idp-name":"","oidc-issuer":"","oidc-scopes":["openid","profile","email","groups"],"oidc-skip-verification":false,"password":"","path":"","port":8080,"protocol":"https","smtp-from":"GoToSocial","smtp-host":"","smtp-password":"","smtp-port":0,"smtp-username":"","software-version":"","statuses-cw-max-chars":100,"statuses-max-chars":5000,"statuses-media-max-files":6,"statuses-poll-max-options":6,"statuses-poll-option-max-chars":50,"storage-backend":"local","storage-local-base-path":"/gotosocial/storage","syslog-address":"localhost:514","syslog-enabled":false,"syslog-protocol":"udp","trusted-proxies":["127.0.0.1/32"],"username":"","web-asset-base-dir":"./web/assets/","web-template-base-dir":"./web/template/"}'
|
||||
TEST_2_EXPECTED='{"account-domain":"","accounts-approval-required":true,"accounts-reason-required":true,"accounts-registration-open":true,"advanced-cookies-samesite":"lax","application-name":"gotosocial","bind-address":"0.0.0.0","config-path":"","db-address":"some.db.address","db-database":"gotosocial","db-password":"","db-port":5432,"db-tls-ca-cert":"","db-tls-mode":"disable","db-type":"postgres","db-user":"","email":"","host":"","instance-expose-peers":false,"instance-expose-suspended":false,"letsencrypt-cert-dir":"/gotosocial/storage/certs","letsencrypt-email-address":"","letsencrypt-enabled":false,"letsencrypt-port":80,"log-db-queries":false,"log-level":"info","media-description-max-chars":500,"media-description-min-chars":0,"media-image-max-size":2097152,"media-remote-cache-days":30,"media-video-max-size":10485760,"oidc-client-id":"","oidc-client-secret":"","oidc-enabled":false,"oidc-idp-name":"","oidc-issuer":"","oidc-scopes":["openid","profile","email","groups"],"oidc-skip-verification":false,"password":"","path":"","port":8080,"protocol":"https","smtp-from":"GoToSocial","smtp-host":"","smtp-password":"","smtp-port":0,"smtp-username":"","software-version":"","statuses-cw-max-chars":100,"statuses-max-chars":5000,"statuses-media-max-files":6,"statuses-poll-max-options":6,"statuses-poll-option-max-chars":50,"storage-backend":"local","storage-local-base-path":"/gotosocial/storage","storage-s3-access-key":"","storage-s3-bucket":"","storage-s3-endpoint":"","storage-s3-secret-key":"","storage-s3-use-ssl":true,"syslog-address":"localhost:514","syslog-enabled":false,"syslog-protocol":"udp","trusted-proxies":["127.0.0.1/32"],"username":"","web-asset-base-dir":"./web/assets/","web-template-base-dir":"./web/template/"}'
|
||||
TEST_2="$(go run ./cmd/gotosocial/... --db-address some.db.address debug config)"
|
||||
if [ "${TEST_2}" != "${TEST_2_EXPECTED}" ]; then
|
||||
echo "TEST_2 not equal TEST_2_EXPECTED"
|
||||
@ -25,7 +25,7 @@ else
|
||||
fi
|
||||
|
||||
echo "TEST_3 Override db-address from default using env var."
|
||||
TEST_3_EXPECTED='{"account-domain":"","accounts-approval-required":true,"accounts-reason-required":true,"accounts-registration-open":true,"advanced-cookies-samesite":"lax","application-name":"gotosocial","bind-address":"0.0.0.0","config-path":"","db-address":"some.db.address","db-database":"gotosocial","db-password":"","db-port":5432,"db-tls-ca-cert":"","db-tls-mode":"disable","db-type":"postgres","db-user":"","email":"","host":"","instance-expose-peers":false,"instance-expose-suspended":false,"letsencrypt-cert-dir":"/gotosocial/storage/certs","letsencrypt-email-address":"","letsencrypt-enabled":false,"letsencrypt-port":80,"log-db-queries":false,"log-level":"info","media-description-max-chars":500,"media-description-min-chars":0,"media-image-max-size":2097152,"media-remote-cache-days":30,"media-video-max-size":10485760,"oidc-client-id":"","oidc-client-secret":"","oidc-enabled":false,"oidc-idp-name":"","oidc-issuer":"","oidc-scopes":["openid","profile","email","groups"],"oidc-skip-verification":false,"password":"","path":"","port":8080,"protocol":"https","smtp-from":"GoToSocial","smtp-host":"","smtp-password":"","smtp-port":0,"smtp-username":"","software-version":"","statuses-cw-max-chars":100,"statuses-max-chars":5000,"statuses-media-max-files":6,"statuses-poll-max-options":6,"statuses-poll-option-max-chars":50,"storage-backend":"local","storage-local-base-path":"/gotosocial/storage","syslog-address":"localhost:514","syslog-enabled":false,"syslog-protocol":"udp","trusted-proxies":["127.0.0.1/32"],"username":"","web-asset-base-dir":"./web/assets/","web-template-base-dir":"./web/template/"}'
|
||||
TEST_3_EXPECTED='{"account-domain":"","accounts-approval-required":true,"accounts-reason-required":true,"accounts-registration-open":true,"advanced-cookies-samesite":"lax","application-name":"gotosocial","bind-address":"0.0.0.0","config-path":"","db-address":"some.db.address","db-database":"gotosocial","db-password":"","db-port":5432,"db-tls-ca-cert":"","db-tls-mode":"disable","db-type":"postgres","db-user":"","email":"","host":"","instance-expose-peers":false,"instance-expose-suspended":false,"letsencrypt-cert-dir":"/gotosocial/storage/certs","letsencrypt-email-address":"","letsencrypt-enabled":false,"letsencrypt-port":80,"log-db-queries":false,"log-level":"info","media-description-max-chars":500,"media-description-min-chars":0,"media-image-max-size":2097152,"media-remote-cache-days":30,"media-video-max-size":10485760,"oidc-client-id":"","oidc-client-secret":"","oidc-enabled":false,"oidc-idp-name":"","oidc-issuer":"","oidc-scopes":["openid","profile","email","groups"],"oidc-skip-verification":false,"password":"","path":"","port":8080,"protocol":"https","smtp-from":"GoToSocial","smtp-host":"","smtp-password":"","smtp-port":0,"smtp-username":"","software-version":"","statuses-cw-max-chars":100,"statuses-max-chars":5000,"statuses-media-max-files":6,"statuses-poll-max-options":6,"statuses-poll-option-max-chars":50,"storage-backend":"local","storage-local-base-path":"/gotosocial/storage","storage-s3-access-key":"","storage-s3-bucket":"","storage-s3-endpoint":"","storage-s3-secret-key":"","storage-s3-use-ssl":true,"syslog-address":"localhost:514","syslog-enabled":false,"syslog-protocol":"udp","trusted-proxies":["127.0.0.1/32"],"username":"","web-asset-base-dir":"./web/assets/","web-template-base-dir":"./web/template/"}'
|
||||
TEST_3="$(GTS_DB_ADDRESS=some.db.address go run ./cmd/gotosocial/... debug config)"
|
||||
if [ "${TEST_3}" != "${TEST_3_EXPECTED}" ]; then
|
||||
echo "TEST_3 not equal TEST_3_EXPECTED"
|
||||
@ -35,7 +35,7 @@ else
|
||||
fi
|
||||
|
||||
echo "TEST_4 Override db-address from default using both env var and cli flag. The cli flag should take priority."
|
||||
TEST_4_EXPECTED='{"account-domain":"","accounts-approval-required":true,"accounts-reason-required":true,"accounts-registration-open":true,"advanced-cookies-samesite":"lax","application-name":"gotosocial","bind-address":"0.0.0.0","config-path":"","db-address":"some.other.db.address","db-database":"gotosocial","db-password":"","db-port":5432,"db-tls-ca-cert":"","db-tls-mode":"disable","db-type":"postgres","db-user":"","email":"","host":"","instance-expose-peers":false,"instance-expose-suspended":false,"letsencrypt-cert-dir":"/gotosocial/storage/certs","letsencrypt-email-address":"","letsencrypt-enabled":false,"letsencrypt-port":80,"log-db-queries":false,"log-level":"info","media-description-max-chars":500,"media-description-min-chars":0,"media-image-max-size":2097152,"media-remote-cache-days":30,"media-video-max-size":10485760,"oidc-client-id":"","oidc-client-secret":"","oidc-enabled":false,"oidc-idp-name":"","oidc-issuer":"","oidc-scopes":["openid","profile","email","groups"],"oidc-skip-verification":false,"password":"","path":"","port":8080,"protocol":"https","smtp-from":"GoToSocial","smtp-host":"","smtp-password":"","smtp-port":0,"smtp-username":"","software-version":"","statuses-cw-max-chars":100,"statuses-max-chars":5000,"statuses-media-max-files":6,"statuses-poll-max-options":6,"statuses-poll-option-max-chars":50,"storage-backend":"local","storage-local-base-path":"/gotosocial/storage","syslog-address":"localhost:514","syslog-enabled":false,"syslog-protocol":"udp","trusted-proxies":["127.0.0.1/32"],"username":"","web-asset-base-dir":"./web/assets/","web-template-base-dir":"./web/template/"}'
|
||||
TEST_4_EXPECTED='{"account-domain":"","accounts-approval-required":true,"accounts-reason-required":true,"accounts-registration-open":true,"advanced-cookies-samesite":"lax","application-name":"gotosocial","bind-address":"0.0.0.0","config-path":"","db-address":"some.other.db.address","db-database":"gotosocial","db-password":"","db-port":5432,"db-tls-ca-cert":"","db-tls-mode":"disable","db-type":"postgres","db-user":"","email":"","host":"","instance-expose-peers":false,"instance-expose-suspended":false,"letsencrypt-cert-dir":"/gotosocial/storage/certs","letsencrypt-email-address":"","letsencrypt-enabled":false,"letsencrypt-port":80,"log-db-queries":false,"log-level":"info","media-description-max-chars":500,"media-description-min-chars":0,"media-image-max-size":2097152,"media-remote-cache-days":30,"media-video-max-size":10485760,"oidc-client-id":"","oidc-client-secret":"","oidc-enabled":false,"oidc-idp-name":"","oidc-issuer":"","oidc-scopes":["openid","profile","email","groups"],"oidc-skip-verification":false,"password":"","path":"","port":8080,"protocol":"https","smtp-from":"GoToSocial","smtp-host":"","smtp-password":"","smtp-port":0,"smtp-username":"","software-version":"","statuses-cw-max-chars":100,"statuses-max-chars":5000,"statuses-media-max-files":6,"statuses-poll-max-options":6,"statuses-poll-option-max-chars":50,"storage-backend":"local","storage-local-base-path":"/gotosocial/storage","storage-s3-access-key":"","storage-s3-bucket":"","storage-s3-endpoint":"","storage-s3-secret-key":"","storage-s3-use-ssl":true,"syslog-address":"localhost:514","syslog-enabled":false,"syslog-protocol":"udp","trusted-proxies":["127.0.0.1/32"],"username":"","web-asset-base-dir":"./web/assets/","web-template-base-dir":"./web/template/"}'
|
||||
TEST_4="$(GTS_DB_ADDRESS=some.db.address go run ./cmd/gotosocial/... --db-address some.other.db.address debug config)"
|
||||
if [ "${TEST_4}" != "${TEST_4_EXPECTED}" ]; then
|
||||
echo "TEST_4 not equal TEST_4_EXPECTED"
|
||||
@ -45,7 +45,7 @@ else
|
||||
fi
|
||||
|
||||
echo "TEST_5 Test loading a config file by passing an env var."
|
||||
TEST_5_EXPECTED='{"account-domain":"example.org","accounts-approval-required":true,"accounts-reason-required":true,"accounts-registration-open":true,"advanced-cookies-samesite":"lax","application-name":"gotosocial","bind-address":"0.0.0.0","config-path":"./test/test.yaml","db-address":"127.0.0.1","db-database":"postgres","db-password":"postgres","db-port":5432,"db-tls-ca-cert":"","db-tls-mode":"disable","db-type":"postgres","db-user":"postgres","email":"","host":"gts.example.org","instance-expose-peers":false,"instance-expose-suspended":false,"letsencrypt-cert-dir":"/gotosocial/storage/certs","letsencrypt-email-address":"","letsencrypt-enabled":true,"letsencrypt-port":80,"log-db-queries":false,"log-level":"info","media-description-max-chars":500,"media-description-min-chars":0,"media-image-max-size":2097152,"media-remote-cache-days":30,"media-video-max-size":10485760,"oidc-client-id":"","oidc-client-secret":"","oidc-enabled":false,"oidc-idp-name":"","oidc-issuer":"","oidc-scopes":["openid","email","profile","groups"],"oidc-skip-verification":false,"password":"","path":"","port":8080,"protocol":"https","smtp-from":"someone@example.org","smtp-host":"verycoolemailhost.mail","smtp-password":"smtp-password","smtp-port":8888,"smtp-username":"smtp-username","software-version":"","statuses-cw-max-chars":100,"statuses-max-chars":5000,"statuses-media-max-files":6,"statuses-poll-max-options":6,"statuses-poll-option-max-chars":50,"storage-backend":"local","storage-local-base-path":"/gotosocial/storage","syslog-address":"localhost:514","syslog-enabled":false,"syslog-protocol":"udp","trusted-proxies":["127.0.0.1/32","0.0.0.0/0"],"username":"","web-asset-base-dir":"./web/assets/","web-template-base-dir":"./web/template/"}'
|
||||
TEST_5_EXPECTED='{"account-domain":"example.org","accounts-approval-required":true,"accounts-reason-required":true,"accounts-registration-open":true,"advanced-cookies-samesite":"lax","application-name":"gotosocial","bind-address":"0.0.0.0","config-path":"./test/test.yaml","db-address":"127.0.0.1","db-database":"postgres","db-password":"postgres","db-port":5432,"db-tls-ca-cert":"","db-tls-mode":"disable","db-type":"postgres","db-user":"postgres","email":"","host":"gts.example.org","instance-expose-peers":false,"instance-expose-suspended":false,"letsencrypt-cert-dir":"/gotosocial/storage/certs","letsencrypt-email-address":"","letsencrypt-enabled":true,"letsencrypt-port":80,"log-db-queries":false,"log-level":"info","media-description-max-chars":500,"media-description-min-chars":0,"media-image-max-size":2097152,"media-remote-cache-days":30,"media-video-max-size":10485760,"oidc-client-id":"","oidc-client-secret":"","oidc-enabled":false,"oidc-idp-name":"","oidc-issuer":"","oidc-scopes":["openid","email","profile","groups"],"oidc-skip-verification":false,"password":"","path":"","port":8080,"protocol":"https","smtp-from":"someone@example.org","smtp-host":"verycoolemailhost.mail","smtp-password":"smtp-password","smtp-port":8888,"smtp-username":"smtp-username","software-version":"","statuses-cw-max-chars":100,"statuses-max-chars":5000,"statuses-media-max-files":6,"statuses-poll-max-options":6,"statuses-poll-option-max-chars":50,"storage-backend":"local","storage-local-base-path":"/gotosocial/storage","storage-s3-access-key":"","storage-s3-bucket":"","storage-s3-endpoint":"","storage-s3-secret-key":"","storage-s3-use-ssl":true,"syslog-address":"localhost:514","syslog-enabled":false,"syslog-protocol":"udp","trusted-proxies":["127.0.0.1/32","0.0.0.0/0"],"username":"","web-asset-base-dir":"./web/assets/","web-template-base-dir":"./web/template/"}'
|
||||
TEST_5="$(GTS_CONFIG_PATH=./test/test.yaml go run ./cmd/gotosocial/... debug config)"
|
||||
if [ "${TEST_5}" != "${TEST_5_EXPECTED}" ]; then
|
||||
echo "TEST_5 not equal TEST_5_EXPECTED"
|
||||
@ -55,7 +55,7 @@ else
|
||||
fi
|
||||
|
||||
echo "TEST_6 Test loading a config file by passing cli flag."
|
||||
TEST_6_EXPECTED='{"account-domain":"example.org","accounts-approval-required":true,"accounts-reason-required":true,"accounts-registration-open":true,"advanced-cookies-samesite":"lax","application-name":"gotosocial","bind-address":"0.0.0.0","config-path":"./test/test.yaml","db-address":"127.0.0.1","db-database":"postgres","db-password":"postgres","db-port":5432,"db-tls-ca-cert":"","db-tls-mode":"disable","db-type":"postgres","db-user":"postgres","email":"","host":"gts.example.org","instance-expose-peers":false,"instance-expose-suspended":false,"letsencrypt-cert-dir":"/gotosocial/storage/certs","letsencrypt-email-address":"","letsencrypt-enabled":true,"letsencrypt-port":80,"log-db-queries":false,"log-level":"info","media-description-max-chars":500,"media-description-min-chars":0,"media-image-max-size":2097152,"media-remote-cache-days":30,"media-video-max-size":10485760,"oidc-client-id":"","oidc-client-secret":"","oidc-enabled":false,"oidc-idp-name":"","oidc-issuer":"","oidc-scopes":["openid","email","profile","groups"],"oidc-skip-verification":false,"password":"","path":"","port":8080,"protocol":"https","smtp-from":"someone@example.org","smtp-host":"verycoolemailhost.mail","smtp-password":"smtp-password","smtp-port":8888,"smtp-username":"smtp-username","software-version":"","statuses-cw-max-chars":100,"statuses-max-chars":5000,"statuses-media-max-files":6,"statuses-poll-max-options":6,"statuses-poll-option-max-chars":50,"storage-backend":"local","storage-local-base-path":"/gotosocial/storage","syslog-address":"localhost:514","syslog-enabled":false,"syslog-protocol":"udp","trusted-proxies":["127.0.0.1/32","0.0.0.0/0"],"username":"","web-asset-base-dir":"./web/assets/","web-template-base-dir":"./web/template/"}'
|
||||
TEST_6_EXPECTED='{"account-domain":"example.org","accounts-approval-required":true,"accounts-reason-required":true,"accounts-registration-open":true,"advanced-cookies-samesite":"lax","application-name":"gotosocial","bind-address":"0.0.0.0","config-path":"./test/test.yaml","db-address":"127.0.0.1","db-database":"postgres","db-password":"postgres","db-port":5432,"db-tls-ca-cert":"","db-tls-mode":"disable","db-type":"postgres","db-user":"postgres","email":"","host":"gts.example.org","instance-expose-peers":false,"instance-expose-suspended":false,"letsencrypt-cert-dir":"/gotosocial/storage/certs","letsencrypt-email-address":"","letsencrypt-enabled":true,"letsencrypt-port":80,"log-db-queries":false,"log-level":"info","media-description-max-chars":500,"media-description-min-chars":0,"media-image-max-size":2097152,"media-remote-cache-days":30,"media-video-max-size":10485760,"oidc-client-id":"","oidc-client-secret":"","oidc-enabled":false,"oidc-idp-name":"","oidc-issuer":"","oidc-scopes":["openid","email","profile","groups"],"oidc-skip-verification":false,"password":"","path":"","port":8080,"protocol":"https","smtp-from":"someone@example.org","smtp-host":"verycoolemailhost.mail","smtp-password":"smtp-password","smtp-port":8888,"smtp-username":"smtp-username","software-version":"","statuses-cw-max-chars":100,"statuses-max-chars":5000,"statuses-media-max-files":6,"statuses-poll-max-options":6,"statuses-poll-option-max-chars":50,"storage-backend":"local","storage-local-base-path":"/gotosocial/storage","storage-s3-access-key":"","storage-s3-bucket":"","storage-s3-endpoint":"","storage-s3-secret-key":"","storage-s3-use-ssl":true,"syslog-address":"localhost:514","syslog-enabled":false,"syslog-protocol":"udp","trusted-proxies":["127.0.0.1/32","0.0.0.0/0"],"username":"","web-asset-base-dir":"./web/assets/","web-template-base-dir":"./web/template/"}'
|
||||
TEST_6="$(go run ./cmd/gotosocial/... --config-path ./test/test.yaml debug config)"
|
||||
if [ "${TEST_6}" != "${TEST_6_EXPECTED}" ]; then
|
||||
echo "TEST_6 not equal TEST_6_EXPECTED"
|
||||
@ -65,7 +65,7 @@ else
|
||||
fi
|
||||
|
||||
echo "TEST_7 Test loading a config file and overriding one of the variables with a cli flag."
|
||||
TEST_7_EXPECTED='{"account-domain":"","accounts-approval-required":true,"accounts-reason-required":true,"accounts-registration-open":true,"advanced-cookies-samesite":"lax","application-name":"gotosocial","bind-address":"0.0.0.0","config-path":"./test/test.yaml","db-address":"127.0.0.1","db-database":"postgres","db-password":"postgres","db-port":5432,"db-tls-ca-cert":"","db-tls-mode":"disable","db-type":"postgres","db-user":"postgres","email":"","host":"gts.example.org","instance-expose-peers":false,"instance-expose-suspended":false,"letsencrypt-cert-dir":"/gotosocial/storage/certs","letsencrypt-email-address":"","letsencrypt-enabled":true,"letsencrypt-port":80,"log-db-queries":false,"log-level":"info","media-description-max-chars":500,"media-description-min-chars":0,"media-image-max-size":2097152,"media-remote-cache-days":30,"media-video-max-size":10485760,"oidc-client-id":"","oidc-client-secret":"","oidc-enabled":false,"oidc-idp-name":"","oidc-issuer":"","oidc-scopes":["openid","email","profile","groups"],"oidc-skip-verification":false,"password":"","path":"","port":8080,"protocol":"https","smtp-from":"someone@example.org","smtp-host":"verycoolemailhost.mail","smtp-password":"smtp-password","smtp-port":8888,"smtp-username":"smtp-username","software-version":"","statuses-cw-max-chars":100,"statuses-max-chars":5000,"statuses-media-max-files":6,"statuses-poll-max-options":6,"statuses-poll-option-max-chars":50,"storage-backend":"local","storage-local-base-path":"/gotosocial/storage","syslog-address":"localhost:514","syslog-enabled":false,"syslog-protocol":"udp","trusted-proxies":["127.0.0.1/32","0.0.0.0/0"],"username":"","web-asset-base-dir":"./web/assets/","web-template-base-dir":"./web/template/"}'
|
||||
TEST_7_EXPECTED='{"account-domain":"","accounts-approval-required":true,"accounts-reason-required":true,"accounts-registration-open":true,"advanced-cookies-samesite":"lax","application-name":"gotosocial","bind-address":"0.0.0.0","config-path":"./test/test.yaml","db-address":"127.0.0.1","db-database":"postgres","db-password":"postgres","db-port":5432,"db-tls-ca-cert":"","db-tls-mode":"disable","db-type":"postgres","db-user":"postgres","email":"","host":"gts.example.org","instance-expose-peers":false,"instance-expose-suspended":false,"letsencrypt-cert-dir":"/gotosocial/storage/certs","letsencrypt-email-address":"","letsencrypt-enabled":true,"letsencrypt-port":80,"log-db-queries":false,"log-level":"info","media-description-max-chars":500,"media-description-min-chars":0,"media-image-max-size":2097152,"media-remote-cache-days":30,"media-video-max-size":10485760,"oidc-client-id":"","oidc-client-secret":"","oidc-enabled":false,"oidc-idp-name":"","oidc-issuer":"","oidc-scopes":["openid","email","profile","groups"],"oidc-skip-verification":false,"password":"","path":"","port":8080,"protocol":"https","smtp-from":"someone@example.org","smtp-host":"verycoolemailhost.mail","smtp-password":"smtp-password","smtp-port":8888,"smtp-username":"smtp-username","software-version":"","statuses-cw-max-chars":100,"statuses-max-chars":5000,"statuses-media-max-files":6,"statuses-poll-max-options":6,"statuses-poll-option-max-chars":50,"storage-backend":"local","storage-local-base-path":"/gotosocial/storage","storage-s3-access-key":"","storage-s3-bucket":"","storage-s3-endpoint":"","storage-s3-secret-key":"","storage-s3-use-ssl":true,"syslog-address":"localhost:514","syslog-enabled":false,"syslog-protocol":"udp","trusted-proxies":["127.0.0.1/32","0.0.0.0/0"],"username":"","web-asset-base-dir":"./web/assets/","web-template-base-dir":"./web/template/"}'
|
||||
TEST_7="$(go run ./cmd/gotosocial/... --config-path ./test/test.yaml --account-domain '' debug config)"
|
||||
if [ "${TEST_7}" != "${TEST_7_EXPECTED}" ]; then
|
||||
echo "TEST_7 not equal TEST_7_EXPECTED"
|
||||
@ -75,7 +75,7 @@ else
|
||||
fi
|
||||
|
||||
echo "TEST_8 Test loading a config file and overriding one of the variables with an env var."
|
||||
TEST_8_EXPECTED='{"account-domain":"peepee","accounts-approval-required":true,"accounts-reason-required":true,"accounts-registration-open":true,"advanced-cookies-samesite":"lax","application-name":"gotosocial","bind-address":"0.0.0.0","config-path":"./test/test.yaml","db-address":"127.0.0.1","db-database":"postgres","db-password":"postgres","db-port":5432,"db-tls-ca-cert":"","db-tls-mode":"disable","db-type":"postgres","db-user":"postgres","email":"","host":"gts.example.org","instance-expose-peers":false,"instance-expose-suspended":false,"letsencrypt-cert-dir":"/gotosocial/storage/certs","letsencrypt-email-address":"","letsencrypt-enabled":true,"letsencrypt-port":80,"log-db-queries":false,"log-level":"info","media-description-max-chars":500,"media-description-min-chars":0,"media-image-max-size":2097152,"media-remote-cache-days":30,"media-video-max-size":10485760,"oidc-client-id":"","oidc-client-secret":"","oidc-enabled":false,"oidc-idp-name":"","oidc-issuer":"","oidc-scopes":["openid","email","profile","groups"],"oidc-skip-verification":false,"password":"","path":"","port":8080,"protocol":"https","smtp-from":"someone@example.org","smtp-host":"verycoolemailhost.mail","smtp-password":"smtp-password","smtp-port":8888,"smtp-username":"smtp-username","software-version":"","statuses-cw-max-chars":100,"statuses-max-chars":5000,"statuses-media-max-files":6,"statuses-poll-max-options":6,"statuses-poll-option-max-chars":50,"storage-backend":"local","storage-local-base-path":"/gotosocial/storage","syslog-address":"localhost:514","syslog-enabled":false,"syslog-protocol":"udp","trusted-proxies":["127.0.0.1/32","0.0.0.0/0"],"username":"","web-asset-base-dir":"./web/assets/","web-template-base-dir":"./web/template/"}'
|
||||
TEST_8_EXPECTED='{"account-domain":"peepee","accounts-approval-required":true,"accounts-reason-required":true,"accounts-registration-open":true,"advanced-cookies-samesite":"lax","application-name":"gotosocial","bind-address":"0.0.0.0","config-path":"./test/test.yaml","db-address":"127.0.0.1","db-database":"postgres","db-password":"postgres","db-port":5432,"db-tls-ca-cert":"","db-tls-mode":"disable","db-type":"postgres","db-user":"postgres","email":"","host":"gts.example.org","instance-expose-peers":false,"instance-expose-suspended":false,"letsencrypt-cert-dir":"/gotosocial/storage/certs","letsencrypt-email-address":"","letsencrypt-enabled":true,"letsencrypt-port":80,"log-db-queries":false,"log-level":"info","media-description-max-chars":500,"media-description-min-chars":0,"media-image-max-size":2097152,"media-remote-cache-days":30,"media-video-max-size":10485760,"oidc-client-id":"","oidc-client-secret":"","oidc-enabled":false,"oidc-idp-name":"","oidc-issuer":"","oidc-scopes":["openid","email","profile","groups"],"oidc-skip-verification":false,"password":"","path":"","port":8080,"protocol":"https","smtp-from":"someone@example.org","smtp-host":"verycoolemailhost.mail","smtp-password":"smtp-password","smtp-port":8888,"smtp-username":"smtp-username","software-version":"","statuses-cw-max-chars":100,"statuses-max-chars":5000,"statuses-media-max-files":6,"statuses-poll-max-options":6,"statuses-poll-option-max-chars":50,"storage-backend":"local","storage-local-base-path":"/gotosocial/storage","storage-s3-access-key":"","storage-s3-bucket":"","storage-s3-endpoint":"","storage-s3-secret-key":"","storage-s3-use-ssl":true,"syslog-address":"localhost:514","syslog-enabled":false,"syslog-protocol":"udp","trusted-proxies":["127.0.0.1/32","0.0.0.0/0"],"username":"","web-asset-base-dir":"./web/assets/","web-template-base-dir":"./web/template/"}'
|
||||
TEST_8="$(GTS_ACCOUNT_DOMAIN='peepee' go run ./cmd/gotosocial/... --config-path ./test/test.yaml debug config)"
|
||||
if [ "${TEST_8}" != "${TEST_8_EXPECTED}" ]; then
|
||||
echo "TEST_8 not equal TEST_8_EXPECTED"
|
||||
@ -85,7 +85,7 @@ else
|
||||
fi
|
||||
|
||||
echo "TEST_9 Test loading a config file and overriding one of the variables with both an env var and a cli flag. The cli flag should have priority."
|
||||
TEST_9_EXPECTED='{"account-domain":"","accounts-approval-required":true,"accounts-reason-required":true,"accounts-registration-open":true,"advanced-cookies-samesite":"lax","application-name":"gotosocial","bind-address":"0.0.0.0","config-path":"./test/test.yaml","db-address":"127.0.0.1","db-database":"postgres","db-password":"postgres","db-port":5432,"db-tls-ca-cert":"","db-tls-mode":"disable","db-type":"postgres","db-user":"postgres","email":"","host":"gts.example.org","instance-expose-peers":false,"instance-expose-suspended":false,"letsencrypt-cert-dir":"/gotosocial/storage/certs","letsencrypt-email-address":"","letsencrypt-enabled":true,"letsencrypt-port":80,"log-db-queries":false,"log-level":"info","media-description-max-chars":500,"media-description-min-chars":0,"media-image-max-size":2097152,"media-remote-cache-days":30,"media-video-max-size":10485760,"oidc-client-id":"","oidc-client-secret":"","oidc-enabled":false,"oidc-idp-name":"","oidc-issuer":"","oidc-scopes":["openid","email","profile","groups"],"oidc-skip-verification":false,"password":"","path":"","port":8080,"protocol":"https","smtp-from":"someone@example.org","smtp-host":"verycoolemailhost.mail","smtp-password":"smtp-password","smtp-port":8888,"smtp-username":"smtp-username","software-version":"","statuses-cw-max-chars":100,"statuses-max-chars":5000,"statuses-media-max-files":6,"statuses-poll-max-options":6,"statuses-poll-option-max-chars":50,"storage-backend":"local","storage-local-base-path":"/gotosocial/storage","syslog-address":"localhost:514","syslog-enabled":false,"syslog-protocol":"udp","trusted-proxies":["127.0.0.1/32","0.0.0.0/0"],"username":"","web-asset-base-dir":"./web/assets/","web-template-base-dir":"./web/template/"}'
|
||||
TEST_9_EXPECTED='{"account-domain":"","accounts-approval-required":true,"accounts-reason-required":true,"accounts-registration-open":true,"advanced-cookies-samesite":"lax","application-name":"gotosocial","bind-address":"0.0.0.0","config-path":"./test/test.yaml","db-address":"127.0.0.1","db-database":"postgres","db-password":"postgres","db-port":5432,"db-tls-ca-cert":"","db-tls-mode":"disable","db-type":"postgres","db-user":"postgres","email":"","host":"gts.example.org","instance-expose-peers":false,"instance-expose-suspended":false,"letsencrypt-cert-dir":"/gotosocial/storage/certs","letsencrypt-email-address":"","letsencrypt-enabled":true,"letsencrypt-port":80,"log-db-queries":false,"log-level":"info","media-description-max-chars":500,"media-description-min-chars":0,"media-image-max-size":2097152,"media-remote-cache-days":30,"media-video-max-size":10485760,"oidc-client-id":"","oidc-client-secret":"","oidc-enabled":false,"oidc-idp-name":"","oidc-issuer":"","oidc-scopes":["openid","email","profile","groups"],"oidc-skip-verification":false,"password":"","path":"","port":8080,"protocol":"https","smtp-from":"someone@example.org","smtp-host":"verycoolemailhost.mail","smtp-password":"smtp-password","smtp-port":8888,"smtp-username":"smtp-username","software-version":"","statuses-cw-max-chars":100,"statuses-max-chars":5000,"statuses-media-max-files":6,"statuses-poll-max-options":6,"statuses-poll-option-max-chars":50,"storage-backend":"local","storage-local-base-path":"/gotosocial/storage","storage-s3-access-key":"","storage-s3-bucket":"","storage-s3-endpoint":"","storage-s3-secret-key":"","storage-s3-use-ssl":true,"syslog-address":"localhost:514","syslog-enabled":false,"syslog-protocol":"udp","trusted-proxies":["127.0.0.1/32","0.0.0.0/0"],"username":"","web-asset-base-dir":"./web/assets/","web-template-base-dir":"./web/template/"}'
|
||||
TEST_9="$(GTS_ACCOUNT_DOMAIN='peepee' go run ./cmd/gotosocial/... --config-path ./test/test.yaml --account-domain '' debug config)"
|
||||
if [ "${TEST_9}" != "${TEST_9_EXPECTED}" ]; then
|
||||
echo "TEST_9 not equal TEST_9_EXPECTED"
|
||||
@ -95,7 +95,7 @@ else
|
||||
fi
|
||||
|
||||
echo "TEST_10 Test loading a config file from json."
|
||||
TEST_10_EXPECTED='{"account-domain":"example.org","accounts-approval-required":true,"accounts-reason-required":true,"accounts-registration-open":true,"advanced-cookies-samesite":"lax","application-name":"gotosocial","bind-address":"0.0.0.0","config-path":"./test/test.json","db-address":"127.0.0.1","db-database":"postgres","db-password":"postgres","db-port":5432,"db-tls-ca-cert":"","db-tls-mode":"disable","db-type":"postgres","db-user":"postgres","email":"","host":"gts.example.org","instance-expose-peers":false,"instance-expose-suspended":false,"letsencrypt-cert-dir":"/gotosocial/storage/certs","letsencrypt-email-address":"","letsencrypt-enabled":true,"letsencrypt-port":80,"log-db-queries":false,"log-level":"info","media-description-max-chars":500,"media-description-min-chars":0,"media-image-max-size":2097152,"media-remote-cache-days":30,"media-video-max-size":10485760,"oidc-client-id":"","oidc-client-secret":"","oidc-enabled":false,"oidc-idp-name":"","oidc-issuer":"","oidc-scopes":["openid","email","profile","groups"],"oidc-skip-verification":false,"password":"","path":"","port":8080,"protocol":"https","smtp-from":"someone@example.org","smtp-host":"verycoolemailhost.mail","smtp-password":"smtp-password","smtp-port":8888,"smtp-username":"smtp-username","software-version":"","statuses-cw-max-chars":100,"statuses-max-chars":5000,"statuses-media-max-files":6,"statuses-poll-max-options":6,"statuses-poll-option-max-chars":50,"storage-backend":"local","storage-local-base-path":"/gotosocial/storage","syslog-address":"localhost:514","syslog-enabled":false,"syslog-protocol":"udp","trusted-proxies":["127.0.0.1/32","0.0.0.0/0"],"username":"","web-asset-base-dir":"./web/assets/","web-template-base-dir":"./web/template/"}'
|
||||
TEST_10_EXPECTED='{"account-domain":"example.org","accounts-approval-required":true,"accounts-reason-required":true,"accounts-registration-open":true,"advanced-cookies-samesite":"lax","application-name":"gotosocial","bind-address":"0.0.0.0","config-path":"./test/test.json","db-address":"127.0.0.1","db-database":"postgres","db-password":"postgres","db-port":5432,"db-tls-ca-cert":"","db-tls-mode":"disable","db-type":"postgres","db-user":"postgres","email":"","host":"gts.example.org","instance-expose-peers":false,"instance-expose-suspended":false,"letsencrypt-cert-dir":"/gotosocial/storage/certs","letsencrypt-email-address":"","letsencrypt-enabled":true,"letsencrypt-port":80,"log-db-queries":false,"log-level":"info","media-description-max-chars":500,"media-description-min-chars":0,"media-image-max-size":2097152,"media-remote-cache-days":30,"media-video-max-size":10485760,"oidc-client-id":"","oidc-client-secret":"","oidc-enabled":false,"oidc-idp-name":"","oidc-issuer":"","oidc-scopes":["openid","email","profile","groups"],"oidc-skip-verification":false,"password":"","path":"","port":8080,"protocol":"https","smtp-from":"someone@example.org","smtp-host":"verycoolemailhost.mail","smtp-password":"smtp-password","smtp-port":8888,"smtp-username":"smtp-username","software-version":"","statuses-cw-max-chars":100,"statuses-max-chars":5000,"statuses-media-max-files":6,"statuses-poll-max-options":6,"statuses-poll-option-max-chars":50,"storage-backend":"local","storage-local-base-path":"/gotosocial/storage","storage-s3-access-key":"","storage-s3-bucket":"","storage-s3-endpoint":"","storage-s3-secret-key":"","storage-s3-use-ssl":true,"syslog-address":"localhost:514","syslog-enabled":false,"syslog-protocol":"udp","trusted-proxies":["127.0.0.1/32","0.0.0.0/0"],"username":"","web-asset-base-dir":"./web/assets/","web-template-base-dir":"./web/template/"}'
|
||||
TEST_10="$(go run ./cmd/gotosocial/... --config-path ./test/test.json debug config)"
|
||||
if [ "${TEST_10}" != "${TEST_10_EXPECTED}" ]; then
|
||||
echo "TEST_10 not equal TEST_10_EXPECTED"
|
||||
@ -105,7 +105,7 @@ else
|
||||
fi
|
||||
|
||||
echo "TEST_11 Test loading a partial config file. Default values should be used apart from those set in the config file."
|
||||
TEST_11_EXPECTED='{"account-domain":"peepee.poopoo","accounts-approval-required":true,"accounts-reason-required":true,"accounts-registration-open":true,"advanced-cookies-samesite":"lax","application-name":"gotosocial","bind-address":"0.0.0.0","config-path":"./test/test2.yaml","db-address":"","db-database":"gotosocial","db-password":"","db-port":5432,"db-tls-ca-cert":"","db-tls-mode":"disable","db-type":"postgres","db-user":"","email":"","host":"","instance-expose-peers":false,"instance-expose-suspended":false,"letsencrypt-cert-dir":"/gotosocial/storage/certs","letsencrypt-email-address":"","letsencrypt-enabled":false,"letsencrypt-port":80,"log-db-queries":false,"log-level":"trace","media-description-max-chars":500,"media-description-min-chars":0,"media-image-max-size":2097152,"media-remote-cache-days":30,"media-video-max-size":10485760,"oidc-client-id":"","oidc-client-secret":"","oidc-enabled":false,"oidc-idp-name":"","oidc-issuer":"","oidc-scopes":["openid","profile","email","groups"],"oidc-skip-verification":false,"password":"","path":"","port":8080,"protocol":"https","smtp-from":"GoToSocial","smtp-host":"","smtp-password":"","smtp-port":0,"smtp-username":"","software-version":"","statuses-cw-max-chars":100,"statuses-max-chars":5000,"statuses-media-max-files":6,"statuses-poll-max-options":6,"statuses-poll-option-max-chars":50,"storage-backend":"local","storage-local-base-path":"/gotosocial/storage","syslog-address":"localhost:514","syslog-enabled":false,"syslog-protocol":"udp","trusted-proxies":["127.0.0.1/32"],"username":"","web-asset-base-dir":"./web/assets/","web-template-base-dir":"./web/template/"}'
|
||||
TEST_11_EXPECTED='{"account-domain":"peepee.poopoo","accounts-approval-required":true,"accounts-reason-required":true,"accounts-registration-open":true,"advanced-cookies-samesite":"lax","application-name":"gotosocial","bind-address":"0.0.0.0","config-path":"./test/test2.yaml","db-address":"","db-database":"gotosocial","db-password":"","db-port":5432,"db-tls-ca-cert":"","db-tls-mode":"disable","db-type":"postgres","db-user":"","email":"","host":"","instance-expose-peers":false,"instance-expose-suspended":false,"letsencrypt-cert-dir":"/gotosocial/storage/certs","letsencrypt-email-address":"","letsencrypt-enabled":false,"letsencrypt-port":80,"log-db-queries":false,"log-level":"trace","media-description-max-chars":500,"media-description-min-chars":0,"media-image-max-size":2097152,"media-remote-cache-days":30,"media-video-max-size":10485760,"oidc-client-id":"","oidc-client-secret":"","oidc-enabled":false,"oidc-idp-name":"","oidc-issuer":"","oidc-scopes":["openid","profile","email","groups"],"oidc-skip-verification":false,"password":"","path":"","port":8080,"protocol":"https","smtp-from":"GoToSocial","smtp-host":"","smtp-password":"","smtp-port":0,"smtp-username":"","software-version":"","statuses-cw-max-chars":100,"statuses-max-chars":5000,"statuses-media-max-files":6,"statuses-poll-max-options":6,"statuses-poll-option-max-chars":50,"storage-backend":"local","storage-local-base-path":"/gotosocial/storage","storage-s3-access-key":"","storage-s3-bucket":"","storage-s3-endpoint":"","storage-s3-secret-key":"","storage-s3-use-ssl":true,"syslog-address":"localhost:514","syslog-enabled":false,"syslog-protocol":"udp","trusted-proxies":["127.0.0.1/32"],"username":"","web-asset-base-dir":"./web/assets/","web-template-base-dir":"./web/template/"}'
|
||||
TEST_11="$(go run ./cmd/gotosocial/... --config-path ./test/test2.yaml debug config)"
|
||||
if [ "${TEST_11}" != "${TEST_11_EXPECTED}" ]; then
|
||||
echo "TEST_11 not equal TEST_11_EXPECTED"
|
||||
|
@ -2,7 +2,7 @@
|
||||
|
||||
set -eu
|
||||
|
||||
EXPECTED='{"account-domain":"peepee","accounts-approval-required":false,"accounts-reason-required":false,"accounts-registration-open":true,"advanced-cookies-samesite":"strict","application-name":"gts","bind-address":"127.0.0.1","config-path":"./test/test.yaml","db-address":":memory:","db-database":"gotosocial_prod","db-password":"hunter2","db-port":6969,"db-tls-ca-cert":"","db-tls-mode":"disable","db-type":"sqlite","db-user":"sex-haver","email":"","host":"example.com","instance-expose-peers":true,"instance-expose-suspended":true,"letsencrypt-cert-dir":"/gotosocial/storage/certs","letsencrypt-email-address":"","letsencrypt-enabled":true,"letsencrypt-port":80,"log-db-queries":true,"log-level":"info","media-description-max-chars":5000,"media-description-min-chars":69,"media-image-max-size":420,"media-remote-cache-days":30,"media-video-max-size":420,"oidc-client-id":"1234","oidc-client-secret":"shhhh its a secret","oidc-enabled":true,"oidc-idp-name":"sex-haver","oidc-issuer":"whoknows","oidc-scopes":["read","write"],"oidc-skip-verification":true,"password":"","path":"","port":6969,"protocol":"http","smtp-from":"queen@terfisland.org","smtp-host":"example.com","smtp-password":"hunter2","smtp-port":4269,"smtp-username":"sex-haver","software-version":"","statuses-cw-max-chars":420,"statuses-max-chars":69,"statuses-media-max-files":1,"statuses-poll-max-options":1,"statuses-poll-option-max-chars":50,"storage-backend":"local","storage-local-base-path":"/root/store","syslog-address":"127.0.0.1:6969","syslog-enabled":true,"syslog-protocol":"udp","trusted-proxies":["127.0.0.1/32","0.0.0.0/0"],"username":"","web-asset-base-dir":"/root","web-template-base-dir":"/root"}'
|
||||
EXPECTED='{"account-domain":"peepee","accounts-approval-required":false,"accounts-reason-required":false,"accounts-registration-open":true,"advanced-cookies-samesite":"strict","application-name":"gts","bind-address":"127.0.0.1","config-path":"./test/test.yaml","db-address":":memory:","db-database":"gotosocial_prod","db-password":"hunter2","db-port":6969,"db-tls-ca-cert":"","db-tls-mode":"disable","db-type":"sqlite","db-user":"sex-haver","email":"","host":"example.com","instance-expose-peers":true,"instance-expose-suspended":true,"letsencrypt-cert-dir":"/gotosocial/storage/certs","letsencrypt-email-address":"","letsencrypt-enabled":true,"letsencrypt-port":80,"log-db-queries":true,"log-level":"info","media-description-max-chars":5000,"media-description-min-chars":69,"media-image-max-size":420,"media-remote-cache-days":30,"media-video-max-size":420,"oidc-client-id":"1234","oidc-client-secret":"shhhh its a secret","oidc-enabled":true,"oidc-idp-name":"sex-haver","oidc-issuer":"whoknows","oidc-scopes":["read","write"],"oidc-skip-verification":true,"password":"","path":"","port":6969,"protocol":"http","smtp-from":"queen@terfisland.org","smtp-host":"example.com","smtp-password":"hunter2","smtp-port":4269,"smtp-username":"sex-haver","software-version":"","statuses-cw-max-chars":420,"statuses-max-chars":69,"statuses-media-max-files":1,"statuses-poll-max-options":1,"statuses-poll-option-max-chars":50,"storage-backend":"local","storage-local-base-path":"/root/store","storage-s3-access-key":"minio","storage-s3-bucket":"gts","storage-s3-endpoint":"localhost:9000","storage-s3-secret-key":"miniostorage","storage-s3-use-ssl":false,"syslog-address":"127.0.0.1:6969","syslog-enabled":true,"syslog-protocol":"udp","trusted-proxies":["127.0.0.1/32","0.0.0.0/0"],"username":"","web-asset-base-dir":"/root","web-template-base-dir":"/root"}'
|
||||
|
||||
# Set all the environment variables to
|
||||
# ensure that these are parsed without panic
|
||||
@ -37,6 +37,11 @@ GTS_MEDIA_DESCRIPTION_MAX_CHARS=5000 \
|
||||
GTS_MEDIA_REMOTE_CACHE_DAYS=30 \
|
||||
GTS_STORAGE_BACKEND='local' \
|
||||
GTS_STORAGE_LOCAL_BASE_PATH='/root/store' \
|
||||
GTS_STORAGE_S3_ACCESS_KEY='minio' \
|
||||
GTS_STORAGE_S3_SECRET_KEY='miniostorage' \
|
||||
GTS_STORAGE_S3_ENDPOINT='localhost:9000' \
|
||||
GTS_STORAGE_S3_USE_SSL='false' \
|
||||
GTS_STORAGE_S3_BUCKET='gts' \
|
||||
GTS_STATUSES_MAX_CHARS=69 \
|
||||
GTS_STATUSES_CW_MAX_CHARS=420 \
|
||||
GTS_STATUSES_POLL_MAX_OPTIONS=1 \
|
||||
|
@ -102,4 +102,6 @@ func InitTestConfig() {
|
||||
SyslogAddress: "localhost:514",
|
||||
|
||||
AdvancedCookiesSamesite: "lax",
|
||||
|
||||
SoftwareVersion: "0.0.0-testrig",
|
||||
}
|
||||
|
@ -19,16 +19,16 @@
|
||||
package testrig
|
||||
|
||||
import (
|
||||
"codeberg.org/gruf/go-store/kv"
|
||||
"github.com/superseriousbusiness/gotosocial/internal/concurrency"
|
||||
"github.com/superseriousbusiness/gotosocial/internal/db"
|
||||
"github.com/superseriousbusiness/gotosocial/internal/federation"
|
||||
"github.com/superseriousbusiness/gotosocial/internal/media"
|
||||
"github.com/superseriousbusiness/gotosocial/internal/messages"
|
||||
"github.com/superseriousbusiness/gotosocial/internal/storage"
|
||||
"github.com/superseriousbusiness/gotosocial/internal/transport"
|
||||
)
|
||||
|
||||
// NewTestFederator returns a federator with the given database and (mock!!) transport controller.
|
||||
func NewTestFederator(db db.DB, tc transport.Controller, storage *kv.KVStore, mediaManager media.Manager, fedWorker *concurrency.WorkerPool[messages.FromFederator]) federation.Federator {
|
||||
func NewTestFederator(db db.DB, tc transport.Controller, storage storage.Driver, mediaManager media.Manager, fedWorker *concurrency.WorkerPool[messages.FromFederator]) federation.Federator {
|
||||
return federation.NewFederator(db, NewTestFederatingDB(db, fedWorker), tc, NewTestTypeConverter(db), mediaManager)
|
||||
}
|
||||
|
@ -19,13 +19,13 @@
|
||||
package testrig
|
||||
|
||||
import (
|
||||
"codeberg.org/gruf/go-store/kv"
|
||||
"github.com/superseriousbusiness/gotosocial/internal/db"
|
||||
"github.com/superseriousbusiness/gotosocial/internal/media"
|
||||
"github.com/superseriousbusiness/gotosocial/internal/storage"
|
||||
)
|
||||
|
||||
// NewTestMediaManager returns a media handler with the default test config, and the given db and storage.
|
||||
func NewTestMediaManager(db db.DB, storage *kv.KVStore) media.Manager {
|
||||
func NewTestMediaManager(db db.DB, storage storage.Driver) media.Manager {
|
||||
m, err := media.NewManager(db, storage)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
|
@ -19,7 +19,6 @@
|
||||
package testrig
|
||||
|
||||
import (
|
||||
"codeberg.org/gruf/go-store/kv"
|
||||
"github.com/superseriousbusiness/gotosocial/internal/concurrency"
|
||||
"github.com/superseriousbusiness/gotosocial/internal/db"
|
||||
"github.com/superseriousbusiness/gotosocial/internal/email"
|
||||
@ -27,9 +26,10 @@
|
||||
"github.com/superseriousbusiness/gotosocial/internal/media"
|
||||
"github.com/superseriousbusiness/gotosocial/internal/messages"
|
||||
"github.com/superseriousbusiness/gotosocial/internal/processing"
|
||||
"github.com/superseriousbusiness/gotosocial/internal/storage"
|
||||
)
|
||||
|
||||
// NewTestProcessor returns a Processor suitable for testing purposes
|
||||
func NewTestProcessor(db db.DB, storage *kv.KVStore, federator federation.Federator, emailSender email.Sender, mediaManager media.Manager, clientWorker *concurrency.WorkerPool[messages.FromClientAPI], fedWorker *concurrency.WorkerPool[messages.FromFederator]) processing.Processor {
|
||||
func NewTestProcessor(db db.DB, storage storage.Driver, federator federation.Federator, emailSender email.Sender, mediaManager media.Manager, clientWorker *concurrency.WorkerPool[messages.FromClientAPI], fedWorker *concurrency.WorkerPool[messages.FromFederator]) processing.Processor {
|
||||
return processing.NewProcessor(NewTestTypeConverter(db), federator, NewTestOauthServer(db), mediaManager, storage, db, emailSender, clientWorker, fedWorker)
|
||||
}
|
||||
|
@ -19,25 +19,40 @@
|
||||
package testrig
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"path"
|
||||
|
||||
"codeberg.org/gruf/go-store/kv"
|
||||
"codeberg.org/gruf/go-store/storage"
|
||||
"github.com/minio/minio-go/v7"
|
||||
"github.com/minio/minio-go/v7/pkg/credentials"
|
||||
gtsstorage "github.com/superseriousbusiness/gotosocial/internal/storage"
|
||||
)
|
||||
|
||||
// NewTestStorage returns a new in memory storage with the default test config
|
||||
func NewTestStorage() *kv.KVStore {
|
||||
// NewInMemoryStorage returns a new in memory storage with the default test config
|
||||
func NewInMemoryStorage() *gtsstorage.Local {
|
||||
storage, err := kv.OpenStorage(storage.OpenMemory(200, false))
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return storage
|
||||
return >sstorage.Local{KVStore: storage}
|
||||
}
|
||||
|
||||
func NewS3Storage() gtsstorage.Driver {
|
||||
mc, err := minio.New(os.Getenv("GTS_STORAGE_S3_ENDPOINT"), &minio.Options{
|
||||
Creds: credentials.NewStaticV4(os.Getenv("GTS_STORAGE_S3_ACCESS_KEY"), os.Getenv("GTS_STORAGE_S3_SECRET_KEY"), ""),
|
||||
Secure: false,
|
||||
})
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return gtsstorage.NewS3(mc, os.Getenv("GTS_STORAGE_S3_BUCKET"))
|
||||
}
|
||||
|
||||
// StandardStorageSetup populates the storage with standard test entries from the given directory.
|
||||
func StandardStorageSetup(s *kv.KVStore, relativePath string) {
|
||||
func StandardStorageSetup(s gtsstorage.Driver, relativePath string) {
|
||||
storedA := newTestStoredAttachments()
|
||||
a := NewTestAttachments()
|
||||
for k, paths := range storedA {
|
||||
@ -53,14 +68,14 @@ func StandardStorageSetup(s *kv.KVStore, relativePath string) {
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
if err := s.Put(pathOriginal, bOriginal); err != nil {
|
||||
if err := s.Put(context.TODO(), pathOriginal, bOriginal); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
bSmall, err := os.ReadFile(fmt.Sprintf("%s/%s", relativePath, filenameSmall))
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
if err := s.Put(pathSmall, bSmall); err != nil {
|
||||
if err := s.Put(context.TODO(), pathSmall, bSmall); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
@ -80,35 +95,40 @@ func StandardStorageSetup(s *kv.KVStore, relativePath string) {
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
if err := s.Put(pathOriginal, bOriginal); err != nil {
|
||||
if err := s.Put(context.TODO(), pathOriginal, bOriginal); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
bStatic, err := os.ReadFile(fmt.Sprintf("%s/%s", relativePath, filenameStatic))
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
if err := s.Put(pathStatic, bStatic); err != nil {
|
||||
if err := s.Put(context.TODO(), pathStatic, bStatic); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// StandardStorageTeardown deletes everything in storage so that it's clean for the next test
|
||||
func StandardStorageTeardown(s *kv.KVStore) {
|
||||
// StandardStorageTeardown deletes everything in storage so that it's clean for
|
||||
// the next test
|
||||
// nolint:gocritic // complains about the type switch, but it's the cleanest solution
|
||||
func StandardStorageTeardown(s gtsstorage.Driver) {
|
||||
defer os.RemoveAll(path.Join(os.TempDir(), "gotosocial"))
|
||||
|
||||
iter, err := s.Iterator(nil)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
keys := []string{}
|
||||
for iter.Next() {
|
||||
keys = append(keys, iter.Key())
|
||||
}
|
||||
iter.Release()
|
||||
for _, k := range keys {
|
||||
if err := s.Delete(k); err != nil {
|
||||
switch st := s.(type) {
|
||||
case *gtsstorage.Local:
|
||||
iter, err := st.KVStore.Iterator(nil)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
keys := []string{}
|
||||
for iter.Next() {
|
||||
keys = append(keys, iter.Key())
|
||||
}
|
||||
iter.Release()
|
||||
for _, k := range keys {
|
||||
if err := s.Delete(context.TODO(), k); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
21
vendor/github.com/dustin/go-humanize/.travis.yml
generated
vendored
Normal file
21
vendor/github.com/dustin/go-humanize/.travis.yml
generated
vendored
Normal file
@ -0,0 +1,21 @@
|
||||
sudo: false
|
||||
language: go
|
||||
go:
|
||||
- 1.3.x
|
||||
- 1.5.x
|
||||
- 1.6.x
|
||||
- 1.7.x
|
||||
- 1.8.x
|
||||
- 1.9.x
|
||||
- master
|
||||
matrix:
|
||||
allow_failures:
|
||||
- go: master
|
||||
fast_finish: true
|
||||
install:
|
||||
- # Do nothing. This is needed to prevent default install action "go get -t -v ./..." from happening here (we want it to happen inside script step).
|
||||
script:
|
||||
- go get -t -v ./...
|
||||
- diff -u <(echo -n) <(gofmt -d -s .)
|
||||
- go tool vet .
|
||||
- go test -v -race ./...
|
21
vendor/github.com/dustin/go-humanize/LICENSE
generated
vendored
Normal file
21
vendor/github.com/dustin/go-humanize/LICENSE
generated
vendored
Normal file
@ -0,0 +1,21 @@
|
||||
Copyright (c) 2005-2008 Dustin Sallings <dustin@spy.net>
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
|
||||
<http://www.opensource.org/licenses/mit-license.php>
|
124
vendor/github.com/dustin/go-humanize/README.markdown
generated
vendored
Normal file
124
vendor/github.com/dustin/go-humanize/README.markdown
generated
vendored
Normal file
@ -0,0 +1,124 @@
|
||||
# Humane Units [![Build Status](https://travis-ci.org/dustin/go-humanize.svg?branch=master)](https://travis-ci.org/dustin/go-humanize) [![GoDoc](https://godoc.org/github.com/dustin/go-humanize?status.svg)](https://godoc.org/github.com/dustin/go-humanize)
|
||||
|
||||
Just a few functions for helping humanize times and sizes.
|
||||
|
||||
`go get` it as `github.com/dustin/go-humanize`, import it as
|
||||
`"github.com/dustin/go-humanize"`, use it as `humanize`.
|
||||
|
||||
See [godoc](https://godoc.org/github.com/dustin/go-humanize) for
|
||||
complete documentation.
|
||||
|
||||
## Sizes
|
||||
|
||||
This lets you take numbers like `82854982` and convert them to useful
|
||||
strings like, `83 MB` or `79 MiB` (whichever you prefer).
|
||||
|
||||
Example:
|
||||
|
||||
```go
|
||||
fmt.Printf("That file is %s.", humanize.Bytes(82854982)) // That file is 83 MB.
|
||||
```
|
||||
|
||||
## Times
|
||||
|
||||
This lets you take a `time.Time` and spit it out in relative terms.
|
||||
For example, `12 seconds ago` or `3 days from now`.
|
||||
|
||||
Example:
|
||||
|
||||
```go
|
||||
fmt.Printf("This was touched %s.", humanize.Time(someTimeInstance)) // This was touched 7 hours ago.
|
||||
```
|
||||
|
||||
Thanks to Kyle Lemons for the time implementation from an IRC
|
||||
conversation one day. It's pretty neat.
|
||||
|
||||
## Ordinals
|
||||
|
||||
From a [mailing list discussion][odisc] where a user wanted to be able
|
||||
to label ordinals.
|
||||
|
||||
0 -> 0th
|
||||
1 -> 1st
|
||||
2 -> 2nd
|
||||
3 -> 3rd
|
||||
4 -> 4th
|
||||
[...]
|
||||
|
||||
Example:
|
||||
|
||||
```go
|
||||
fmt.Printf("You're my %s best friend.", humanize.Ordinal(193)) // You are my 193rd best friend.
|
||||
```
|
||||
|
||||
## Commas
|
||||
|
||||
Want to shove commas into numbers? Be my guest.
|
||||
|
||||
0 -> 0
|
||||
100 -> 100
|
||||
1000 -> 1,000
|
||||
1000000000 -> 1,000,000,000
|
||||
-100000 -> -100,000
|
||||
|
||||
Example:
|
||||
|
||||
```go
|
||||
fmt.Printf("You owe $%s.\n", humanize.Comma(6582491)) // You owe $6,582,491.
|
||||
```
|
||||
|
||||
## Ftoa
|
||||
|
||||
Nicer float64 formatter that removes trailing zeros.
|
||||
|
||||
```go
|
||||
fmt.Printf("%f", 2.24) // 2.240000
|
||||
fmt.Printf("%s", humanize.Ftoa(2.24)) // 2.24
|
||||
fmt.Printf("%f", 2.0) // 2.000000
|
||||
fmt.Printf("%s", humanize.Ftoa(2.0)) // 2
|
||||
```
|
||||
|
||||
## SI notation
|
||||
|
||||
Format numbers with [SI notation][sinotation].
|
||||
|
||||
Example:
|
||||
|
||||
```go
|
||||
humanize.SI(0.00000000223, "M") // 2.23 nM
|
||||
```
|
||||
|
||||
## English-specific functions
|
||||
|
||||
The following functions are in the `humanize/english` subpackage.
|
||||
|
||||
### Plurals
|
||||
|
||||
Simple English pluralization
|
||||
|
||||
```go
|
||||
english.PluralWord(1, "object", "") // object
|
||||
english.PluralWord(42, "object", "") // objects
|
||||
english.PluralWord(2, "bus", "") // buses
|
||||
english.PluralWord(99, "locus", "loci") // loci
|
||||
|
||||
english.Plural(1, "object", "") // 1 object
|
||||
english.Plural(42, "object", "") // 42 objects
|
||||
english.Plural(2, "bus", "") // 2 buses
|
||||
english.Plural(99, "locus", "loci") // 99 loci
|
||||
```
|
||||
|
||||
### Word series
|
||||
|
||||
Format comma-separated words lists with conjuctions:
|
||||
|
||||
```go
|
||||
english.WordSeries([]string{"foo"}, "and") // foo
|
||||
english.WordSeries([]string{"foo", "bar"}, "and") // foo and bar
|
||||
english.WordSeries([]string{"foo", "bar", "baz"}, "and") // foo, bar and baz
|
||||
|
||||
english.OxfordWordSeries([]string{"foo", "bar", "baz"}, "and") // foo, bar, and baz
|
||||
```
|
||||
|
||||
[odisc]: https://groups.google.com/d/topic/golang-nuts/l8NhI74jl-4/discussion
|
||||
[sinotation]: http://en.wikipedia.org/wiki/Metric_prefix
|
31
vendor/github.com/dustin/go-humanize/big.go
generated
vendored
Normal file
31
vendor/github.com/dustin/go-humanize/big.go
generated
vendored
Normal file
@ -0,0 +1,31 @@
|
||||
package humanize
|
||||
|
||||
import (
|
||||
"math/big"
|
||||
)
|
||||
|
||||
// order of magnitude (to a max order)
|
||||
func oomm(n, b *big.Int, maxmag int) (float64, int) {
|
||||
mag := 0
|
||||
m := &big.Int{}
|
||||
for n.Cmp(b) >= 0 {
|
||||
n.DivMod(n, b, m)
|
||||
mag++
|
||||
if mag == maxmag && maxmag >= 0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
return float64(n.Int64()) + (float64(m.Int64()) / float64(b.Int64())), mag
|
||||
}
|
||||
|
||||
// total order of magnitude
|
||||
// (same as above, but with no upper limit)
|
||||
func oom(n, b *big.Int) (float64, int) {
|
||||
mag := 0
|
||||
m := &big.Int{}
|
||||
for n.Cmp(b) >= 0 {
|
||||
n.DivMod(n, b, m)
|
||||
mag++
|
||||
}
|
||||
return float64(n.Int64()) + (float64(m.Int64()) / float64(b.Int64())), mag
|
||||
}
|
173
vendor/github.com/dustin/go-humanize/bigbytes.go
generated
vendored
Normal file
173
vendor/github.com/dustin/go-humanize/bigbytes.go
generated
vendored
Normal file
@ -0,0 +1,173 @@
|
||||
package humanize
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math/big"
|
||||
"strings"
|
||||
"unicode"
|
||||
)
|
||||
|
||||
var (
|
||||
bigIECExp = big.NewInt(1024)
|
||||
|
||||
// BigByte is one byte in bit.Ints
|
||||
BigByte = big.NewInt(1)
|
||||
// BigKiByte is 1,024 bytes in bit.Ints
|
||||
BigKiByte = (&big.Int{}).Mul(BigByte, bigIECExp)
|
||||
// BigMiByte is 1,024 k bytes in bit.Ints
|
||||
BigMiByte = (&big.Int{}).Mul(BigKiByte, bigIECExp)
|
||||
// BigGiByte is 1,024 m bytes in bit.Ints
|
||||
BigGiByte = (&big.Int{}).Mul(BigMiByte, bigIECExp)
|
||||
// BigTiByte is 1,024 g bytes in bit.Ints
|
||||
BigTiByte = (&big.Int{}).Mul(BigGiByte, bigIECExp)
|
||||
// BigPiByte is 1,024 t bytes in bit.Ints
|
||||
BigPiByte = (&big.Int{}).Mul(BigTiByte, bigIECExp)
|
||||
// BigEiByte is 1,024 p bytes in bit.Ints
|
||||
BigEiByte = (&big.Int{}).Mul(BigPiByte, bigIECExp)
|
||||
// BigZiByte is 1,024 e bytes in bit.Ints
|
||||
BigZiByte = (&big.Int{}).Mul(BigEiByte, bigIECExp)
|
||||
// BigYiByte is 1,024 z bytes in bit.Ints
|
||||
BigYiByte = (&big.Int{}).Mul(BigZiByte, bigIECExp)
|
||||
)
|
||||
|
||||
var (
|
||||
bigSIExp = big.NewInt(1000)
|
||||
|
||||
// BigSIByte is one SI byte in big.Ints
|
||||
BigSIByte = big.NewInt(1)
|
||||
// BigKByte is 1,000 SI bytes in big.Ints
|
||||
BigKByte = (&big.Int{}).Mul(BigSIByte, bigSIExp)
|
||||
// BigMByte is 1,000 SI k bytes in big.Ints
|
||||
BigMByte = (&big.Int{}).Mul(BigKByte, bigSIExp)
|
||||
// BigGByte is 1,000 SI m bytes in big.Ints
|
||||
BigGByte = (&big.Int{}).Mul(BigMByte, bigSIExp)
|
||||
// BigTByte is 1,000 SI g bytes in big.Ints
|
||||
BigTByte = (&big.Int{}).Mul(BigGByte, bigSIExp)
|
||||
// BigPByte is 1,000 SI t bytes in big.Ints
|
||||
BigPByte = (&big.Int{}).Mul(BigTByte, bigSIExp)
|
||||
// BigEByte is 1,000 SI p bytes in big.Ints
|
||||
BigEByte = (&big.Int{}).Mul(BigPByte, bigSIExp)
|
||||
// BigZByte is 1,000 SI e bytes in big.Ints
|
||||
BigZByte = (&big.Int{}).Mul(BigEByte, bigSIExp)
|
||||
// BigYByte is 1,000 SI z bytes in big.Ints
|
||||
BigYByte = (&big.Int{}).Mul(BigZByte, bigSIExp)
|
||||
)
|
||||
|
||||
var bigBytesSizeTable = map[string]*big.Int{
|
||||
"b": BigByte,
|
||||
"kib": BigKiByte,
|
||||
"kb": BigKByte,
|
||||
"mib": BigMiByte,
|
||||
"mb": BigMByte,
|
||||
"gib": BigGiByte,
|
||||
"gb": BigGByte,
|
||||
"tib": BigTiByte,
|
||||
"tb": BigTByte,
|
||||
"pib": BigPiByte,
|
||||
"pb": BigPByte,
|
||||
"eib": BigEiByte,
|
||||
"eb": BigEByte,
|
||||
"zib": BigZiByte,
|
||||
"zb": BigZByte,
|
||||
"yib": BigYiByte,
|
||||
"yb": BigYByte,
|
||||
// Without suffix
|
||||
"": BigByte,
|
||||
"ki": BigKiByte,
|
||||
"k": BigKByte,
|
||||
"mi": BigMiByte,
|
||||
"m": BigMByte,
|
||||
"gi": BigGiByte,
|
||||
"g": BigGByte,
|
||||
"ti": BigTiByte,
|
||||
"t": BigTByte,
|
||||
"pi": BigPiByte,
|
||||
"p": BigPByte,
|
||||
"ei": BigEiByte,
|
||||
"e": BigEByte,
|
||||
"z": BigZByte,
|
||||
"zi": BigZiByte,
|
||||
"y": BigYByte,
|
||||
"yi": BigYiByte,
|
||||
}
|
||||
|
||||
var ten = big.NewInt(10)
|
||||
|
||||
func humanateBigBytes(s, base *big.Int, sizes []string) string {
|
||||
if s.Cmp(ten) < 0 {
|
||||
return fmt.Sprintf("%d B", s)
|
||||
}
|
||||
c := (&big.Int{}).Set(s)
|
||||
val, mag := oomm(c, base, len(sizes)-1)
|
||||
suffix := sizes[mag]
|
||||
f := "%.0f %s"
|
||||
if val < 10 {
|
||||
f = "%.1f %s"
|
||||
}
|
||||
|
||||
return fmt.Sprintf(f, val, suffix)
|
||||
|
||||
}
|
||||
|
||||
// BigBytes produces a human readable representation of an SI size.
|
||||
//
|
||||
// See also: ParseBigBytes.
|
||||
//
|
||||
// BigBytes(82854982) -> 83 MB
|
||||
func BigBytes(s *big.Int) string {
|
||||
sizes := []string{"B", "kB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB"}
|
||||
return humanateBigBytes(s, bigSIExp, sizes)
|
||||
}
|
||||
|
||||
// BigIBytes produces a human readable representation of an IEC size.
|
||||
//
|
||||
// See also: ParseBigBytes.
|
||||
//
|
||||
// BigIBytes(82854982) -> 79 MiB
|
||||
func BigIBytes(s *big.Int) string {
|
||||
sizes := []string{"B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB", "ZiB", "YiB"}
|
||||
return humanateBigBytes(s, bigIECExp, sizes)
|
||||
}
|
||||
|
||||
// ParseBigBytes parses a string representation of bytes into the number
|
||||
// of bytes it represents.
|
||||
//
|
||||
// See also: BigBytes, BigIBytes.
|
||||
//
|
||||
// ParseBigBytes("42 MB") -> 42000000, nil
|
||||
// ParseBigBytes("42 mib") -> 44040192, nil
|
||||
func ParseBigBytes(s string) (*big.Int, error) {
|
||||
lastDigit := 0
|
||||
hasComma := false
|
||||
for _, r := range s {
|
||||
if !(unicode.IsDigit(r) || r == '.' || r == ',') {
|
||||
break
|
||||
}
|
||||
if r == ',' {
|
||||
hasComma = true
|
||||
}
|
||||
lastDigit++
|
||||
}
|
||||
|
||||
num := s[:lastDigit]
|
||||
if hasComma {
|
||||
num = strings.Replace(num, ",", "", -1)
|
||||
}
|
||||
|
||||
val := &big.Rat{}
|
||||
_, err := fmt.Sscanf(num, "%f", val)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
extra := strings.ToLower(strings.TrimSpace(s[lastDigit:]))
|
||||
if m, ok := bigBytesSizeTable[extra]; ok {
|
||||
mv := (&big.Rat{}).SetInt(m)
|
||||
val.Mul(val, mv)
|
||||
rv := &big.Int{}
|
||||
rv.Div(val.Num(), val.Denom())
|
||||
return rv, nil
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("unhandled size name: %v", extra)
|
||||
}
|
143
vendor/github.com/dustin/go-humanize/bytes.go
generated
vendored
Normal file
143
vendor/github.com/dustin/go-humanize/bytes.go
generated
vendored
Normal file
@ -0,0 +1,143 @@
|
||||
package humanize
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
"strconv"
|
||||
"strings"
|
||||
"unicode"
|
||||
)
|
||||
|
||||
// IEC Sizes.
|
||||
// kibis of bits
|
||||
const (
|
||||
Byte = 1 << (iota * 10)
|
||||
KiByte
|
||||
MiByte
|
||||
GiByte
|
||||
TiByte
|
||||
PiByte
|
||||
EiByte
|
||||
)
|
||||
|
||||
// SI Sizes.
|
||||
const (
|
||||
IByte = 1
|
||||
KByte = IByte * 1000
|
||||
MByte = KByte * 1000
|
||||
GByte = MByte * 1000
|
||||
TByte = GByte * 1000
|
||||
PByte = TByte * 1000
|
||||
EByte = PByte * 1000
|
||||
)
|
||||
|
||||
var bytesSizeTable = map[string]uint64{
|
||||
"b": Byte,
|
||||
"kib": KiByte,
|
||||
"kb": KByte,
|
||||
"mib": MiByte,
|
||||
"mb": MByte,
|
||||
"gib": GiByte,
|
||||
"gb": GByte,
|
||||
"tib": TiByte,
|
||||
"tb": TByte,
|
||||
"pib": PiByte,
|
||||
"pb": PByte,
|
||||
"eib": EiByte,
|
||||
"eb": EByte,
|
||||
// Without suffix
|
||||
"": Byte,
|
||||
"ki": KiByte,
|
||||
"k": KByte,
|
||||
"mi": MiByte,
|
||||
"m": MByte,
|
||||
"gi": GiByte,
|
||||
"g": GByte,
|
||||
"ti": TiByte,
|
||||
"t": TByte,
|
||||
"pi": PiByte,
|
||||
"p": PByte,
|
||||
"ei": EiByte,
|
||||
"e": EByte,
|
||||
}
|
||||
|
||||
func logn(n, b float64) float64 {
|
||||
return math.Log(n) / math.Log(b)
|
||||
}
|
||||
|
||||
func humanateBytes(s uint64, base float64, sizes []string) string {
|
||||
if s < 10 {
|
||||
return fmt.Sprintf("%d B", s)
|
||||
}
|
||||
e := math.Floor(logn(float64(s), base))
|
||||
suffix := sizes[int(e)]
|
||||
val := math.Floor(float64(s)/math.Pow(base, e)*10+0.5) / 10
|
||||
f := "%.0f %s"
|
||||
if val < 10 {
|
||||
f = "%.1f %s"
|
||||
}
|
||||
|
||||
return fmt.Sprintf(f, val, suffix)
|
||||
}
|
||||
|
||||
// Bytes produces a human readable representation of an SI size.
|
||||
//
|
||||
// See also: ParseBytes.
|
||||
//
|
||||
// Bytes(82854982) -> 83 MB
|
||||
func Bytes(s uint64) string {
|
||||
sizes := []string{"B", "kB", "MB", "GB", "TB", "PB", "EB"}
|
||||
return humanateBytes(s, 1000, sizes)
|
||||
}
|
||||
|
||||
// IBytes produces a human readable representation of an IEC size.
|
||||
//
|
||||
// See also: ParseBytes.
|
||||
//
|
||||
// IBytes(82854982) -> 79 MiB
|
||||
func IBytes(s uint64) string {
|
||||
sizes := []string{"B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB"}
|
||||
return humanateBytes(s, 1024, sizes)
|
||||
}
|
||||
|
||||
// ParseBytes parses a string representation of bytes into the number
|
||||
// of bytes it represents.
|
||||
//
|
||||
// See Also: Bytes, IBytes.
|
||||
//
|
||||
// ParseBytes("42 MB") -> 42000000, nil
|
||||
// ParseBytes("42 mib") -> 44040192, nil
|
||||
func ParseBytes(s string) (uint64, error) {
|
||||
lastDigit := 0
|
||||
hasComma := false
|
||||
for _, r := range s {
|
||||
if !(unicode.IsDigit(r) || r == '.' || r == ',') {
|
||||
break
|
||||
}
|
||||
if r == ',' {
|
||||
hasComma = true
|
||||
}
|
||||
lastDigit++
|
||||
}
|
||||
|
||||
num := s[:lastDigit]
|
||||
if hasComma {
|
||||
num = strings.Replace(num, ",", "", -1)
|
||||
}
|
||||
|
||||
f, err := strconv.ParseFloat(num, 64)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
extra := strings.ToLower(strings.TrimSpace(s[lastDigit:]))
|
||||
if m, ok := bytesSizeTable[extra]; ok {
|
||||
f *= float64(m)
|
||||
if f >= math.MaxUint64 {
|
||||
return 0, fmt.Errorf("too large: %v", s)
|
||||
}
|
||||
return uint64(f), nil
|
||||
}
|
||||
|
||||
return 0, fmt.Errorf("unhandled size name: %v", extra)
|
||||
}
|
116
vendor/github.com/dustin/go-humanize/comma.go
generated
vendored
Normal file
116
vendor/github.com/dustin/go-humanize/comma.go
generated
vendored
Normal file
@ -0,0 +1,116 @@
|
||||
package humanize
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"math"
|
||||
"math/big"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Comma produces a string form of the given number in base 10 with
|
||||
// commas after every three orders of magnitude.
|
||||
//
|
||||
// e.g. Comma(834142) -> 834,142
|
||||
func Comma(v int64) string {
|
||||
sign := ""
|
||||
|
||||
// Min int64 can't be negated to a usable value, so it has to be special cased.
|
||||
if v == math.MinInt64 {
|
||||
return "-9,223,372,036,854,775,808"
|
||||
}
|
||||
|
||||
if v < 0 {
|
||||
sign = "-"
|
||||
v = 0 - v
|
||||
}
|
||||
|
||||
parts := []string{"", "", "", "", "", "", ""}
|
||||
j := len(parts) - 1
|
||||
|
||||
for v > 999 {
|
||||
parts[j] = strconv.FormatInt(v%1000, 10)
|
||||
switch len(parts[j]) {
|
||||
case 2:
|
||||
parts[j] = "0" + parts[j]
|
||||
case 1:
|
||||
parts[j] = "00" + parts[j]
|
||||
}
|
||||
v = v / 1000
|
||||
j--
|
||||
}
|
||||
parts[j] = strconv.Itoa(int(v))
|
||||
return sign + strings.Join(parts[j:], ",")
|
||||
}
|
||||
|
||||
// Commaf produces a string form of the given number in base 10 with
|
||||
// commas after every three orders of magnitude.
|
||||
//
|
||||
// e.g. Commaf(834142.32) -> 834,142.32
|
||||
func Commaf(v float64) string {
|
||||
buf := &bytes.Buffer{}
|
||||
if v < 0 {
|
||||
buf.Write([]byte{'-'})
|
||||
v = 0 - v
|
||||
}
|
||||
|
||||
comma := []byte{','}
|
||||
|
||||
parts := strings.Split(strconv.FormatFloat(v, 'f', -1, 64), ".")
|
||||
pos := 0
|
||||
if len(parts[0])%3 != 0 {
|
||||
pos += len(parts[0]) % 3
|
||||
buf.WriteString(parts[0][:pos])
|
||||
buf.Write(comma)
|
||||
}
|
||||
for ; pos < len(parts[0]); pos += 3 {
|
||||
buf.WriteString(parts[0][pos : pos+3])
|
||||
buf.Write(comma)
|
||||
}
|
||||
buf.Truncate(buf.Len() - 1)
|
||||
|
||||
if len(parts) > 1 {
|
||||
buf.Write([]byte{'.'})
|
||||
buf.WriteString(parts[1])
|
||||
}
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
// CommafWithDigits works like the Commaf but limits the resulting
|
||||
// string to the given number of decimal places.
|
||||
//
|
||||
// e.g. CommafWithDigits(834142.32, 1) -> 834,142.3
|
||||
func CommafWithDigits(f float64, decimals int) string {
|
||||
return stripTrailingDigits(Commaf(f), decimals)
|
||||
}
|
||||
|
||||
// BigComma produces a string form of the given big.Int in base 10
|
||||
// with commas after every three orders of magnitude.
|
||||
func BigComma(b *big.Int) string {
|
||||
sign := ""
|
||||
if b.Sign() < 0 {
|
||||
sign = "-"
|
||||
b.Abs(b)
|
||||
}
|
||||
|
||||
athousand := big.NewInt(1000)
|
||||
c := (&big.Int{}).Set(b)
|
||||
_, m := oom(c, athousand)
|
||||
parts := make([]string, m+1)
|
||||
j := len(parts) - 1
|
||||
|
||||
mod := &big.Int{}
|
||||
for b.Cmp(athousand) >= 0 {
|
||||
b.DivMod(b, athousand, mod)
|
||||
parts[j] = strconv.FormatInt(mod.Int64(), 10)
|
||||
switch len(parts[j]) {
|
||||
case 2:
|
||||
parts[j] = "0" + parts[j]
|
||||
case 1:
|
||||
parts[j] = "00" + parts[j]
|
||||
}
|
||||
j--
|
||||
}
|
||||
parts[j] = strconv.Itoa(int(b.Int64()))
|
||||
return sign + strings.Join(parts[j:], ",")
|
||||
}
|
40
vendor/github.com/dustin/go-humanize/commaf.go
generated
vendored
Normal file
40
vendor/github.com/dustin/go-humanize/commaf.go
generated
vendored
Normal file
@ -0,0 +1,40 @@
|
||||
// +build go1.6
|
||||
|
||||
package humanize
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"math/big"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// BigCommaf produces a string form of the given big.Float in base 10
|
||||
// with commas after every three orders of magnitude.
|
||||
func BigCommaf(v *big.Float) string {
|
||||
buf := &bytes.Buffer{}
|
||||
if v.Sign() < 0 {
|
||||
buf.Write([]byte{'-'})
|
||||
v.Abs(v)
|
||||
}
|
||||
|
||||
comma := []byte{','}
|
||||
|
||||
parts := strings.Split(v.Text('f', -1), ".")
|
||||
pos := 0
|
||||
if len(parts[0])%3 != 0 {
|
||||
pos += len(parts[0]) % 3
|
||||
buf.WriteString(parts[0][:pos])
|
||||
buf.Write(comma)
|
||||
}
|
||||
for ; pos < len(parts[0]); pos += 3 {
|
||||
buf.WriteString(parts[0][pos : pos+3])
|
||||
buf.Write(comma)
|
||||
}
|
||||
buf.Truncate(buf.Len() - 1)
|
||||
|
||||
if len(parts) > 1 {
|
||||
buf.Write([]byte{'.'})
|
||||
buf.WriteString(parts[1])
|
||||
}
|
||||
return buf.String()
|
||||
}
|
46
vendor/github.com/dustin/go-humanize/ftoa.go
generated
vendored
Normal file
46
vendor/github.com/dustin/go-humanize/ftoa.go
generated
vendored
Normal file
@ -0,0 +1,46 @@
|
||||
package humanize
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
func stripTrailingZeros(s string) string {
|
||||
offset := len(s) - 1
|
||||
for offset > 0 {
|
||||
if s[offset] == '.' {
|
||||
offset--
|
||||
break
|
||||
}
|
||||
if s[offset] != '0' {
|
||||
break
|
||||
}
|
||||
offset--
|
||||
}
|
||||
return s[:offset+1]
|
||||
}
|
||||
|
||||
func stripTrailingDigits(s string, digits int) string {
|
||||
if i := strings.Index(s, "."); i >= 0 {
|
||||
if digits <= 0 {
|
||||
return s[:i]
|
||||
}
|
||||
i++
|
||||
if i+digits >= len(s) {
|
||||
return s
|
||||
}
|
||||
return s[:i+digits]
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// Ftoa converts a float to a string with no trailing zeros.
|
||||
func Ftoa(num float64) string {
|
||||
return stripTrailingZeros(strconv.FormatFloat(num, 'f', 6, 64))
|
||||
}
|
||||
|
||||
// FtoaWithDigits converts a float to a string but limits the resulting string
|
||||
// to the given number of decimal places, and no trailing zeros.
|
||||
func FtoaWithDigits(num float64, digits int) string {
|
||||
return stripTrailingZeros(stripTrailingDigits(strconv.FormatFloat(num, 'f', 6, 64), digits))
|
||||
}
|
8
vendor/github.com/dustin/go-humanize/humanize.go
generated
vendored
Normal file
8
vendor/github.com/dustin/go-humanize/humanize.go
generated
vendored
Normal file
@ -0,0 +1,8 @@
|
||||
/*
|
||||
Package humanize converts boring ugly numbers to human-friendly strings and back.
|
||||
|
||||
Durations can be turned into strings such as "3 days ago", numbers
|
||||
representing sizes like 82854982 into useful strings like, "83 MB" or
|
||||
"79 MiB" (whichever you prefer).
|
||||
*/
|
||||
package humanize
|
192
vendor/github.com/dustin/go-humanize/number.go
generated
vendored
Normal file
192
vendor/github.com/dustin/go-humanize/number.go
generated
vendored
Normal file
@ -0,0 +1,192 @@
|
||||
package humanize
|
||||
|
||||
/*
|
||||
Slightly adapted from the source to fit go-humanize.
|
||||
|
||||
Author: https://github.com/gorhill
|
||||
Source: https://gist.github.com/gorhill/5285193
|
||||
|
||||
*/
|
||||
|
||||
import (
|
||||
"math"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
var (
|
||||
renderFloatPrecisionMultipliers = [...]float64{
|
||||
1,
|
||||
10,
|
||||
100,
|
||||
1000,
|
||||
10000,
|
||||
100000,
|
||||
1000000,
|
||||
10000000,
|
||||
100000000,
|
||||
1000000000,
|
||||
}
|
||||
|
||||
renderFloatPrecisionRounders = [...]float64{
|
||||
0.5,
|
||||
0.05,
|
||||
0.005,
|
||||
0.0005,
|
||||
0.00005,
|
||||
0.000005,
|
||||
0.0000005,
|
||||
0.00000005,
|
||||
0.000000005,
|
||||
0.0000000005,
|
||||
}
|
||||
)
|
||||
|
||||
// FormatFloat produces a formatted number as string based on the following user-specified criteria:
|
||||
// * thousands separator
|
||||
// * decimal separator
|
||||
// * decimal precision
|
||||
//
|
||||
// Usage: s := RenderFloat(format, n)
|
||||
// The format parameter tells how to render the number n.
|
||||
//
|
||||
// See examples: http://play.golang.org/p/LXc1Ddm1lJ
|
||||
//
|
||||
// Examples of format strings, given n = 12345.6789:
|
||||
// "#,###.##" => "12,345.67"
|
||||
// "#,###." => "12,345"
|
||||
// "#,###" => "12345,678"
|
||||
// "#\u202F###,##" => "12 345,68"
|
||||
// "#.###,###### => 12.345,678900
|
||||
// "" (aka default format) => 12,345.67
|
||||
//
|
||||
// The highest precision allowed is 9 digits after the decimal symbol.
|
||||
// There is also a version for integer number, FormatInteger(),
|
||||
// which is convenient for calls within template.
|
||||
func FormatFloat(format string, n float64) string {
|
||||
// Special cases:
|
||||
// NaN = "NaN"
|
||||
// +Inf = "+Infinity"
|
||||
// -Inf = "-Infinity"
|
||||
if math.IsNaN(n) {
|
||||
return "NaN"
|
||||
}
|
||||
if n > math.MaxFloat64 {
|
||||
return "Infinity"
|
||||
}
|
||||
if n < -math.MaxFloat64 {
|
||||
return "-Infinity"
|
||||
}
|
||||
|
||||
// default format
|
||||
precision := 2
|
||||
decimalStr := "."
|
||||
thousandStr := ","
|
||||
positiveStr := ""
|
||||
negativeStr := "-"
|
||||
|
||||
if len(format) > 0 {
|
||||
format := []rune(format)
|
||||
|
||||
// If there is an explicit format directive,
|
||||
// then default values are these:
|
||||
precision = 9
|
||||
thousandStr = ""
|
||||
|
||||
// collect indices of meaningful formatting directives
|
||||
formatIndx := []int{}
|
||||
for i, char := range format {
|
||||
if char != '#' && char != '0' {
|
||||
formatIndx = append(formatIndx, i)
|
||||
}
|
||||
}
|
||||
|
||||
if len(formatIndx) > 0 {
|
||||
// Directive at index 0:
|
||||
// Must be a '+'
|
||||
// Raise an error if not the case
|
||||
// index: 0123456789
|
||||
// +0.000,000
|
||||
// +000,000.0
|
||||
// +0000.00
|
||||
// +0000
|
||||
if formatIndx[0] == 0 {
|
||||
if format[formatIndx[0]] != '+' {
|
||||
panic("RenderFloat(): invalid positive sign directive")
|
||||
}
|
||||
positiveStr = "+"
|
||||
formatIndx = formatIndx[1:]
|
||||
}
|
||||
|
||||
// Two directives:
|
||||
// First is thousands separator
|
||||
// Raise an error if not followed by 3-digit
|
||||
// 0123456789
|
||||
// 0.000,000
|
||||
// 000,000.00
|
||||
if len(formatIndx) == 2 {
|
||||
if (formatIndx[1] - formatIndx[0]) != 4 {
|
||||
panic("RenderFloat(): thousands separator directive must be followed by 3 digit-specifiers")
|
||||
}
|
||||
thousandStr = string(format[formatIndx[0]])
|
||||
formatIndx = formatIndx[1:]
|
||||
}
|
||||
|
||||
// One directive:
|
||||
// Directive is decimal separator
|
||||
// The number of digit-specifier following the separator indicates wanted precision
|
||||
// 0123456789
|
||||
// 0.00
|
||||
// 000,0000
|
||||
if len(formatIndx) == 1 {
|
||||
decimalStr = string(format[formatIndx[0]])
|
||||
precision = len(format) - formatIndx[0] - 1
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// generate sign part
|
||||
var signStr string
|
||||
if n >= 0.000000001 {
|
||||
signStr = positiveStr
|
||||
} else if n <= -0.000000001 {
|
||||
signStr = negativeStr
|
||||
n = -n
|
||||
} else {
|
||||
signStr = ""
|
||||
n = 0.0
|
||||
}
|
||||
|
||||
// split number into integer and fractional parts
|
||||
intf, fracf := math.Modf(n + renderFloatPrecisionRounders[precision])
|
||||
|
||||
// generate integer part string
|
||||
intStr := strconv.FormatInt(int64(intf), 10)
|
||||
|
||||
// add thousand separator if required
|
||||
if len(thousandStr) > 0 {
|
||||
for i := len(intStr); i > 3; {
|
||||
i -= 3
|
||||
intStr = intStr[:i] + thousandStr + intStr[i:]
|
||||
}
|
||||
}
|
||||
|
||||
// no fractional part, we can leave now
|
||||
if precision == 0 {
|
||||
return signStr + intStr
|
||||
}
|
||||
|
||||
// generate fractional part
|
||||
fracStr := strconv.Itoa(int(fracf * renderFloatPrecisionMultipliers[precision]))
|
||||
// may need padding
|
||||
if len(fracStr) < precision {
|
||||
fracStr = "000000000000000"[:precision-len(fracStr)] + fracStr
|
||||
}
|
||||
|
||||
return signStr + intStr + decimalStr + fracStr
|
||||
}
|
||||
|
||||
// FormatInteger produces a formatted number as string.
|
||||
// See FormatFloat.
|
||||
func FormatInteger(format string, n int) string {
|
||||
return FormatFloat(format, float64(n))
|
||||
}
|
25
vendor/github.com/dustin/go-humanize/ordinals.go
generated
vendored
Normal file
25
vendor/github.com/dustin/go-humanize/ordinals.go
generated
vendored
Normal file
@ -0,0 +1,25 @@
|
||||
package humanize
|
||||
|
||||
import "strconv"
|
||||
|
||||
// Ordinal gives you the input number in a rank/ordinal format.
|
||||
//
|
||||
// Ordinal(3) -> 3rd
|
||||
func Ordinal(x int) string {
|
||||
suffix := "th"
|
||||
switch x % 10 {
|
||||
case 1:
|
||||
if x%100 != 11 {
|
||||
suffix = "st"
|
||||
}
|
||||
case 2:
|
||||
if x%100 != 12 {
|
||||
suffix = "nd"
|
||||
}
|
||||
case 3:
|
||||
if x%100 != 13 {
|
||||
suffix = "rd"
|
||||
}
|
||||
}
|
||||
return strconv.Itoa(x) + suffix
|
||||
}
|
123
vendor/github.com/dustin/go-humanize/si.go
generated
vendored
Normal file
123
vendor/github.com/dustin/go-humanize/si.go
generated
vendored
Normal file
@ -0,0 +1,123 @@
|
||||
package humanize
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"math"
|
||||
"regexp"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
var siPrefixTable = map[float64]string{
|
||||
-24: "y", // yocto
|
||||
-21: "z", // zepto
|
||||
-18: "a", // atto
|
||||
-15: "f", // femto
|
||||
-12: "p", // pico
|
||||
-9: "n", // nano
|
||||
-6: "µ", // micro
|
||||
-3: "m", // milli
|
||||
0: "",
|
||||
3: "k", // kilo
|
||||
6: "M", // mega
|
||||
9: "G", // giga
|
||||
12: "T", // tera
|
||||
15: "P", // peta
|
||||
18: "E", // exa
|
||||
21: "Z", // zetta
|
||||
24: "Y", // yotta
|
||||
}
|
||||
|
||||
var revSIPrefixTable = revfmap(siPrefixTable)
|
||||
|
||||
// revfmap reverses the map and precomputes the power multiplier
|
||||
func revfmap(in map[float64]string) map[string]float64 {
|
||||
rv := map[string]float64{}
|
||||
for k, v := range in {
|
||||
rv[v] = math.Pow(10, k)
|
||||
}
|
||||
return rv
|
||||
}
|
||||
|
||||
var riParseRegex *regexp.Regexp
|
||||
|
||||
func init() {
|
||||
ri := `^([\-0-9.]+)\s?([`
|
||||
for _, v := range siPrefixTable {
|
||||
ri += v
|
||||
}
|
||||
ri += `]?)(.*)`
|
||||
|
||||
riParseRegex = regexp.MustCompile(ri)
|
||||
}
|
||||
|
||||
// ComputeSI finds the most appropriate SI prefix for the given number
|
||||
// and returns the prefix along with the value adjusted to be within
|
||||
// that prefix.
|
||||
//
|
||||
// See also: SI, ParseSI.
|
||||
//
|
||||
// e.g. ComputeSI(2.2345e-12) -> (2.2345, "p")
|
||||
func ComputeSI(input float64) (float64, string) {
|
||||
if input == 0 {
|
||||
return 0, ""
|
||||
}
|
||||
mag := math.Abs(input)
|
||||
exponent := math.Floor(logn(mag, 10))
|
||||
exponent = math.Floor(exponent/3) * 3
|
||||
|
||||
value := mag / math.Pow(10, exponent)
|
||||
|
||||
// Handle special case where value is exactly 1000.0
|
||||
// Should return 1 M instead of 1000 k
|
||||
if value == 1000.0 {
|
||||
exponent += 3
|
||||
value = mag / math.Pow(10, exponent)
|
||||
}
|
||||
|
||||
value = math.Copysign(value, input)
|
||||
|
||||
prefix := siPrefixTable[exponent]
|
||||
return value, prefix
|
||||
}
|
||||
|
||||
// SI returns a string with default formatting.
|
||||
//
|
||||
// SI uses Ftoa to format float value, removing trailing zeros.
|
||||
//
|
||||
// See also: ComputeSI, ParseSI.
|
||||
//
|
||||
// e.g. SI(1000000, "B") -> 1 MB
|
||||
// e.g. SI(2.2345e-12, "F") -> 2.2345 pF
|
||||
func SI(input float64, unit string) string {
|
||||
value, prefix := ComputeSI(input)
|
||||
return Ftoa(value) + " " + prefix + unit
|
||||
}
|
||||
|
||||
// SIWithDigits works like SI but limits the resulting string to the
|
||||
// given number of decimal places.
|
||||
//
|
||||
// e.g. SIWithDigits(1000000, 0, "B") -> 1 MB
|
||||
// e.g. SIWithDigits(2.2345e-12, 2, "F") -> 2.23 pF
|
||||
func SIWithDigits(input float64, decimals int, unit string) string {
|
||||
value, prefix := ComputeSI(input)
|
||||
return FtoaWithDigits(value, decimals) + " " + prefix + unit
|
||||
}
|
||||
|
||||
var errInvalid = errors.New("invalid input")
|
||||
|
||||
// ParseSI parses an SI string back into the number and unit.
|
||||
//
|
||||
// See also: SI, ComputeSI.
|
||||
//
|
||||
// e.g. ParseSI("2.2345 pF") -> (2.2345e-12, "F", nil)
|
||||
func ParseSI(input string) (float64, string, error) {
|
||||
found := riParseRegex.FindStringSubmatch(input)
|
||||
if len(found) != 4 {
|
||||
return 0, "", errInvalid
|
||||
}
|
||||
mag := revSIPrefixTable[found[2]]
|
||||
unit := found[3]
|
||||
|
||||
base, err := strconv.ParseFloat(found[1], 64)
|
||||
return base * mag, unit, err
|
||||
}
|
117
vendor/github.com/dustin/go-humanize/times.go
generated
vendored
Normal file
117
vendor/github.com/dustin/go-humanize/times.go
generated
vendored
Normal file
@ -0,0 +1,117 @@
|
||||
package humanize
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
"sort"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Seconds-based time units
|
||||
const (
|
||||
Day = 24 * time.Hour
|
||||
Week = 7 * Day
|
||||
Month = 30 * Day
|
||||
Year = 12 * Month
|
||||
LongTime = 37 * Year
|
||||
)
|
||||
|
||||
// Time formats a time into a relative string.
|
||||
//
|
||||
// Time(someT) -> "3 weeks ago"
|
||||
func Time(then time.Time) string {
|
||||
return RelTime(then, time.Now(), "ago", "from now")
|
||||
}
|
||||
|
||||
// A RelTimeMagnitude struct contains a relative time point at which
|
||||
// the relative format of time will switch to a new format string. A
|
||||
// slice of these in ascending order by their "D" field is passed to
|
||||
// CustomRelTime to format durations.
|
||||
//
|
||||
// The Format field is a string that may contain a "%s" which will be
|
||||
// replaced with the appropriate signed label (e.g. "ago" or "from
|
||||
// now") and a "%d" that will be replaced by the quantity.
|
||||
//
|
||||
// The DivBy field is the amount of time the time difference must be
|
||||
// divided by in order to display correctly.
|
||||
//
|
||||
// e.g. if D is 2*time.Minute and you want to display "%d minutes %s"
|
||||
// DivBy should be time.Minute so whatever the duration is will be
|
||||
// expressed in minutes.
|
||||
type RelTimeMagnitude struct {
|
||||
D time.Duration
|
||||
Format string
|
||||
DivBy time.Duration
|
||||
}
|
||||
|
||||
var defaultMagnitudes = []RelTimeMagnitude{
|
||||
{time.Second, "now", time.Second},
|
||||
{2 * time.Second, "1 second %s", 1},
|
||||
{time.Minute, "%d seconds %s", time.Second},
|
||||
{2 * time.Minute, "1 minute %s", 1},
|
||||
{time.Hour, "%d minutes %s", time.Minute},
|
||||
{2 * time.Hour, "1 hour %s", 1},
|
||||
{Day, "%d hours %s", time.Hour},
|
||||
{2 * Day, "1 day %s", 1},
|
||||
{Week, "%d days %s", Day},
|
||||
{2 * Week, "1 week %s", 1},
|
||||
{Month, "%d weeks %s", Week},
|
||||
{2 * Month, "1 month %s", 1},
|
||||
{Year, "%d months %s", Month},
|
||||
{18 * Month, "1 year %s", 1},
|
||||
{2 * Year, "2 years %s", 1},
|
||||
{LongTime, "%d years %s", Year},
|
||||
{math.MaxInt64, "a long while %s", 1},
|
||||
}
|
||||
|
||||
// RelTime formats a time into a relative string.
|
||||
//
|
||||
// It takes two times and two labels. In addition to the generic time
|
||||
// delta string (e.g. 5 minutes), the labels are used applied so that
|
||||
// the label corresponding to the smaller time is applied.
|
||||
//
|
||||
// RelTime(timeInPast, timeInFuture, "earlier", "later") -> "3 weeks earlier"
|
||||
func RelTime(a, b time.Time, albl, blbl string) string {
|
||||
return CustomRelTime(a, b, albl, blbl, defaultMagnitudes)
|
||||
}
|
||||
|
||||
// CustomRelTime formats a time into a relative string.
|
||||
//
|
||||
// It takes two times two labels and a table of relative time formats.
|
||||
// In addition to the generic time delta string (e.g. 5 minutes), the
|
||||
// labels are used applied so that the label corresponding to the
|
||||
// smaller time is applied.
|
||||
func CustomRelTime(a, b time.Time, albl, blbl string, magnitudes []RelTimeMagnitude) string {
|
||||
lbl := albl
|
||||
diff := b.Sub(a)
|
||||
|
||||
if a.After(b) {
|
||||
lbl = blbl
|
||||
diff = a.Sub(b)
|
||||
}
|
||||
|
||||
n := sort.Search(len(magnitudes), func(i int) bool {
|
||||
return magnitudes[i].D > diff
|
||||
})
|
||||
|
||||
if n >= len(magnitudes) {
|
||||
n = len(magnitudes) - 1
|
||||
}
|
||||
mag := magnitudes[n]
|
||||
args := []interface{}{}
|
||||
escaped := false
|
||||
for _, ch := range mag.Format {
|
||||
if escaped {
|
||||
switch ch {
|
||||
case 's':
|
||||
args = append(args, lbl)
|
||||
case 'd':
|
||||
args = append(args, diff/mag.DivBy)
|
||||
}
|
||||
escaped = false
|
||||
} else {
|
||||
escaped = ch == '%'
|
||||
}
|
||||
}
|
||||
return fmt.Sprintf(mag.Format, args...)
|
||||
}
|
304
vendor/github.com/klauspost/compress/LICENSE
generated
vendored
Normal file
304
vendor/github.com/klauspost/compress/LICENSE
generated
vendored
Normal file
@ -0,0 +1,304 @@
|
||||
Copyright (c) 2012 The Go Authors. All rights reserved.
|
||||
Copyright (c) 2019 Klaus Post. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Google Inc. nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
------------------
|
||||
|
||||
Files: gzhttp/*
|
||||
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright 2016-2017 The New York Times Company
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
|
||||
------------------
|
||||
|
||||
Files: s2/cmd/internal/readahead/*
|
||||
|
||||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2015 Klaus Post
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
|
||||
---------------------
|
||||
Files: snappy/*
|
||||
Files: internal/snapref/*
|
||||
|
||||
Copyright (c) 2011 The Snappy-Go Authors. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Google Inc. nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
-----------------
|
||||
|
||||
Files: s2/cmd/internal/filepathx/*
|
||||
|
||||
Copyright 2016 The filepathx Authors
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
15
vendor/github.com/klauspost/compress/s2/.gitignore
generated
vendored
Normal file
15
vendor/github.com/klauspost/compress/s2/.gitignore
generated
vendored
Normal file
@ -0,0 +1,15 @@
|
||||
testdata/bench
|
||||
|
||||
# These explicitly listed benchmark data files are for an obsolete version of
|
||||
# snappy_test.go.
|
||||
testdata/alice29.txt
|
||||
testdata/asyoulik.txt
|
||||
testdata/fireworks.jpeg
|
||||
testdata/geo.protodata
|
||||
testdata/html
|
||||
testdata/html_x_4
|
||||
testdata/kppkn.gtb
|
||||
testdata/lcet10.txt
|
||||
testdata/paper-100k.pdf
|
||||
testdata/plrabn12.txt
|
||||
testdata/urls.10K
|
28
vendor/github.com/klauspost/compress/s2/LICENSE
generated
vendored
Normal file
28
vendor/github.com/klauspost/compress/s2/LICENSE
generated
vendored
Normal file
@ -0,0 +1,28 @@
|
||||
Copyright (c) 2011 The Snappy-Go Authors. All rights reserved.
|
||||
Copyright (c) 2019 Klaus Post. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Google Inc. nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
717
vendor/github.com/klauspost/compress/s2/README.md
generated
vendored
Normal file
717
vendor/github.com/klauspost/compress/s2/README.md
generated
vendored
Normal file
@ -0,0 +1,717 @@
|
||||
# S2 Compression
|
||||
|
||||
S2 is an extension of [Snappy](https://github.com/google/snappy).
|
||||
|
||||
S2 is aimed for high throughput, which is why it features concurrent compression for bigger payloads.
|
||||
|
||||
Decoding is compatible with Snappy compressed content, but content compressed with S2 cannot be decompressed by Snappy.
|
||||
This means that S2 can seamlessly replace Snappy without converting compressed content.
|
||||
|
||||
S2 can produce Snappy compatible output, faster and better than Snappy.
|
||||
If you want full benefit of the changes you should use s2 without Snappy compatibility.
|
||||
|
||||
S2 is designed to have high throughput on content that cannot be compressed.
|
||||
This is important, so you don't have to worry about spending CPU cycles on already compressed data.
|
||||
|
||||
## Benefits over Snappy
|
||||
|
||||
* Better compression
|
||||
* Adjustable compression (3 levels)
|
||||
* Concurrent stream compression
|
||||
* Faster decompression, even for Snappy compatible content
|
||||
* Ability to quickly skip forward in compressed stream
|
||||
* Compatible with reading Snappy compressed content
|
||||
* Smaller block size overhead on incompressible blocks
|
||||
* Block concatenation
|
||||
* Uncompressed stream mode
|
||||
* Automatic stream size padding
|
||||
* Snappy compatible block compression
|
||||
|
||||
## Drawbacks over Snappy
|
||||
|
||||
* Not optimized for 32 bit systems.
|
||||
* Streams use slightly more memory due to larger blocks and concurrency (configurable).
|
||||
|
||||
# Usage
|
||||
|
||||
Installation: `go get -u github.com/klauspost/compress/s2`
|
||||
|
||||
Full package documentation:
|
||||
|
||||
[![godoc][1]][2]
|
||||
|
||||
[1]: https://godoc.org/github.com/klauspost/compress?status.svg
|
||||
[2]: https://godoc.org/github.com/klauspost/compress/s2
|
||||
|
||||
## Compression
|
||||
|
||||
```Go
|
||||
func EncodeStream(src io.Reader, dst io.Writer) error {
|
||||
enc := s2.NewWriter(dst)
|
||||
_, err := io.Copy(enc, src)
|
||||
if err != nil {
|
||||
enc.Close()
|
||||
return err
|
||||
}
|
||||
// Blocks until compression is done.
|
||||
return enc.Close()
|
||||
}
|
||||
```
|
||||
|
||||
You should always call `enc.Close()`, otherwise you will leak resources and your encode will be incomplete.
|
||||
|
||||
For the best throughput, you should attempt to reuse the `Writer` using the `Reset()` method.
|
||||
|
||||
The Writer in S2 is always buffered, therefore `NewBufferedWriter` in Snappy can be replaced with `NewWriter` in S2.
|
||||
It is possible to flush any buffered data using the `Flush()` method.
|
||||
This will block until all data sent to the encoder has been written to the output.
|
||||
|
||||
S2 also supports the `io.ReaderFrom` interface, which will consume all input from a reader.
|
||||
|
||||
As a final method to compress data, if you have a single block of data you would like to have encoded as a stream,
|
||||
a slightly more efficient method is to use the `EncodeBuffer` method.
|
||||
This will take ownership of the buffer until the stream is closed.
|
||||
|
||||
```Go
|
||||
func EncodeStream(src []byte, dst io.Writer) error {
|
||||
enc := s2.NewWriter(dst)
|
||||
// The encoder owns the buffer until Flush or Close is called.
|
||||
err := enc.EncodeBuffer(buf)
|
||||
if err != nil {
|
||||
enc.Close()
|
||||
return err
|
||||
}
|
||||
// Blocks until compression is done.
|
||||
return enc.Close()
|
||||
}
|
||||
```
|
||||
|
||||
Each call to `EncodeBuffer` will result in discrete blocks being created without buffering,
|
||||
so it should only be used a single time per stream.
|
||||
If you need to write several blocks, you should use the regular io.Writer interface.
|
||||
|
||||
|
||||
## Decompression
|
||||
|
||||
```Go
|
||||
func DecodeStream(src io.Reader, dst io.Writer) error {
|
||||
dec := s2.NewReader(src)
|
||||
_, err := io.Copy(dst, dec)
|
||||
return err
|
||||
}
|
||||
```
|
||||
|
||||
Similar to the Writer, a Reader can be reused using the `Reset` method.
|
||||
|
||||
For the best possible throughput, there is a `EncodeBuffer(buf []byte)` function available.
|
||||
However, it requires that the provided buffer isn't used after it is handed over to S2 and until the stream is flushed or closed.
|
||||
|
||||
For smaller data blocks, there is also a non-streaming interface: `Encode()`, `EncodeBetter()` and `Decode()`.
|
||||
Do however note that these functions (similar to Snappy) does not provide validation of data,
|
||||
so data corruption may be undetected. Stream encoding provides CRC checks of data.
|
||||
|
||||
It is possible to efficiently skip forward in a compressed stream using the `Skip()` method.
|
||||
For big skips the decompressor is able to skip blocks without decompressing them.
|
||||
|
||||
## Single Blocks
|
||||
|
||||
Similar to Snappy S2 offers single block compression.
|
||||
Blocks do not offer the same flexibility and safety as streams,
|
||||
but may be preferable for very small payloads, less than 100K.
|
||||
|
||||
Using a simple `dst := s2.Encode(nil, src)` will compress `src` and return the compressed result.
|
||||
It is possible to provide a destination buffer.
|
||||
If the buffer has a capacity of `s2.MaxEncodedLen(len(src))` it will be used.
|
||||
If not a new will be allocated.
|
||||
|
||||
Alternatively `EncodeBetter`/`EncodeBest` can also be used for better, but slightly slower compression.
|
||||
|
||||
Similarly to decompress a block you can use `dst, err := s2.Decode(nil, src)`.
|
||||
Again an optional destination buffer can be supplied.
|
||||
The `s2.DecodedLen(src)` can be used to get the minimum capacity needed.
|
||||
If that is not satisfied a new buffer will be allocated.
|
||||
|
||||
Block function always operate on a single goroutine since it should only be used for small payloads.
|
||||
|
||||
# Commandline tools
|
||||
|
||||
Some very simply commandline tools are provided; `s2c` for compression and `s2d` for decompression.
|
||||
|
||||
Binaries can be downloaded on the [Releases Page](https://github.com/klauspost/compress/releases).
|
||||
|
||||
Installing then requires Go to be installed. To install them, use:
|
||||
|
||||
`go install github.com/klauspost/compress/s2/cmd/s2c && go install github.com/klauspost/compress/s2/cmd/s2d`
|
||||
|
||||
To build binaries to the current folder use:
|
||||
|
||||
`go build github.com/klauspost/compress/s2/cmd/s2c && go build github.com/klauspost/compress/s2/cmd/s2d`
|
||||
|
||||
|
||||
## s2c
|
||||
|
||||
```
|
||||
Usage: s2c [options] file1 file2
|
||||
|
||||
Compresses all files supplied as input separately.
|
||||
Output files are written as 'filename.ext.s2' or 'filename.ext.snappy'.
|
||||
By default output files will be overwritten.
|
||||
Use - as the only file name to read from stdin and write to stdout.
|
||||
|
||||
Wildcards are accepted: testdir/*.txt will compress all files in testdir ending with .txt
|
||||
Directories can be wildcards as well. testdir/*/*.txt will match testdir/subdir/b.txt
|
||||
|
||||
File names beginning with 'http://' and 'https://' will be downloaded and compressed.
|
||||
Only http response code 200 is accepted.
|
||||
|
||||
Options:
|
||||
-bench int
|
||||
Run benchmark n times. No output will be written
|
||||
-blocksize string
|
||||
Max block size. Examples: 64K, 256K, 1M, 4M. Must be power of two and <= 4MB (default "4M")
|
||||
-c Write all output to stdout. Multiple input files will be concatenated
|
||||
-cpu int
|
||||
Compress using this amount of threads (default 32)
|
||||
-faster
|
||||
Compress faster, but with a minor compression loss
|
||||
-help
|
||||
Display help
|
||||
-o string
|
||||
Write output to another file. Single input file only
|
||||
-pad string
|
||||
Pad size to a multiple of this value, Examples: 500, 64K, 256K, 1M, 4M, etc (default "1")
|
||||
-q Don't write any output to terminal, except errors
|
||||
-rm
|
||||
Delete source file(s) after successful compression
|
||||
-safe
|
||||
Do not overwrite output files
|
||||
-slower
|
||||
Compress more, but a lot slower
|
||||
-snappy
|
||||
Generate Snappy compatible output stream
|
||||
-verify
|
||||
Verify written files
|
||||
|
||||
```
|
||||
|
||||
## s2d
|
||||
|
||||
```
|
||||
Usage: s2d [options] file1 file2
|
||||
|
||||
Decompresses all files supplied as input. Input files must end with '.s2' or '.snappy'.
|
||||
Output file names have the extension removed. By default output files will be overwritten.
|
||||
Use - as the only file name to read from stdin and write to stdout.
|
||||
|
||||
Wildcards are accepted: testdir/*.txt will compress all files in testdir ending with .txt
|
||||
Directories can be wildcards as well. testdir/*/*.txt will match testdir/subdir/b.txt
|
||||
|
||||
File names beginning with 'http://' and 'https://' will be downloaded and decompressed.
|
||||
Extensions on downloaded files are ignored. Only http response code 200 is accepted.
|
||||
|
||||
Options:
|
||||
-bench int
|
||||
Run benchmark n times. No output will be written
|
||||
-c Write all output to stdout. Multiple input files will be concatenated
|
||||
-help
|
||||
Display help
|
||||
-o string
|
||||
Write output to another file. Single input file only
|
||||
-q Don't write any output to terminal, except errors
|
||||
-rm
|
||||
Delete source file(s) after successful decompression
|
||||
-safe
|
||||
Do not overwrite output files
|
||||
-verify
|
||||
Verify files, but do not write output
|
||||
```
|
||||
|
||||
## s2sx: self-extracting archives
|
||||
|
||||
s2sx allows creating self-extracting archives with no dependencies.
|
||||
|
||||
By default, executables are created for the same platforms as the host os,
|
||||
but this can be overridden with `-os` and `-arch` parameters.
|
||||
|
||||
Extracted files have 0666 permissions, except when untar option used.
|
||||
|
||||
```
|
||||
Usage: s2sx [options] file1 file2
|
||||
|
||||
Compresses all files supplied as input separately.
|
||||
If files have '.s2' extension they are assumed to be compressed already.
|
||||
Output files are written as 'filename.s2sx' and with '.exe' for windows targets.
|
||||
If output is big, an additional file with ".more" is written. This must be included as well.
|
||||
By default output files will be overwritten.
|
||||
|
||||
Wildcards are accepted: testdir/*.txt will compress all files in testdir ending with .txt
|
||||
Directories can be wildcards as well. testdir/*/*.txt will match testdir/subdir/b.txt
|
||||
|
||||
Options:
|
||||
-arch string
|
||||
Destination architecture (default "amd64")
|
||||
-c Write all output to stdout. Multiple input files will be concatenated
|
||||
-cpu int
|
||||
Compress using this amount of threads (default 32)
|
||||
-help
|
||||
Display help
|
||||
-max string
|
||||
Maximum executable size. Rest will be written to another file. (default "1G")
|
||||
-os string
|
||||
Destination operating system (default "windows")
|
||||
-q Don't write any output to terminal, except errors
|
||||
-rm
|
||||
Delete source file(s) after successful compression
|
||||
-safe
|
||||
Do not overwrite output files
|
||||
-untar
|
||||
Untar on destination
|
||||
```
|
||||
|
||||
Available platforms are:
|
||||
|
||||
* darwin-amd64
|
||||
* darwin-arm64
|
||||
* linux-amd64
|
||||
* linux-arm
|
||||
* linux-arm64
|
||||
* linux-mips64
|
||||
* linux-ppc64le
|
||||
* windows-386
|
||||
* windows-amd64
|
||||
|
||||
By default, there is a size limit of 1GB for the output executable.
|
||||
|
||||
When this is exceeded the remaining file content is written to a file called
|
||||
output+`.more`. This file must be included for a successful extraction and
|
||||
placed alongside the executable for a successful extraction.
|
||||
|
||||
This file *must* have the same name as the executable, so if the executable is renamed,
|
||||
so must the `.more` file.
|
||||
|
||||
This functionality is disabled with stdin/stdout.
|
||||
|
||||
### Self-extracting TAR files
|
||||
|
||||
If you wrap a TAR file you can specify `-untar` to make it untar on the destination host.
|
||||
|
||||
Files are extracted to the current folder with the path specified in the tar file.
|
||||
|
||||
Note that tar files are not validated before they are wrapped.
|
||||
|
||||
For security reasons files that move below the root folder are not allowed.
|
||||
|
||||
# Performance
|
||||
|
||||
This section will focus on comparisons to Snappy.
|
||||
This package is solely aimed at replacing Snappy as a high speed compression package.
|
||||
If you are mainly looking for better compression [zstandard](https://github.com/klauspost/compress/tree/master/zstd#zstd)
|
||||
gives better compression, but typically at speeds slightly below "better" mode in this package.
|
||||
|
||||
Compression is increased compared to Snappy, mostly around 5-20% and the throughput is typically 25-40% increased (single threaded) compared to the Snappy Go implementation.
|
||||
|
||||
Streams are concurrently compressed. The stream will be distributed among all available CPU cores for the best possible throughput.
|
||||
|
||||
A "better" compression mode is also available. This allows to trade a bit of speed for a minor compression gain.
|
||||
The content compressed in this mode is fully compatible with the standard decoder.
|
||||
|
||||
Snappy vs S2 **compression** speed on 16 core (32 thread) computer, using all threads and a single thread (1 CPU):
|
||||
|
||||
| File | S2 speed | S2 Throughput | S2 % smaller | S2 "better" | "better" throughput | "better" % smaller |
|
||||
|-----------------------------------------------------------------------------------------------------|----------|---------------|--------------|-------------|---------------------|--------------------|
|
||||
| [rawstudio-mint14.tar](https://files.klauspost.com/compress/rawstudio-mint14.7z) | 12.70x | 10556 MB/s | 7.35% | 4.15x | 3455 MB/s | 12.79% |
|
||||
| (1 CPU) | 1.14x | 948 MB/s | - | 0.42x | 349 MB/s | - |
|
||||
| [github-june-2days-2019.json](https://files.klauspost.com/compress/github-june-2days-2019.json.zst) | 17.13x | 14484 MB/s | 31.60% | 10.09x | 8533 MB/s | 37.71% |
|
||||
| (1 CPU) | 1.33x | 1127 MB/s | - | 0.70x | 589 MB/s | - |
|
||||
| [github-ranks-backup.bin](https://files.klauspost.com/compress/github-ranks-backup.bin.zst) | 15.14x | 12000 MB/s | -5.79% | 6.59x | 5223 MB/s | 5.80% |
|
||||
| (1 CPU) | 1.11x | 877 MB/s | - | 0.47x | 370 MB/s | - |
|
||||
| [consensus.db.10gb](https://files.klauspost.com/compress/consensus.db.10gb.zst) | 14.62x | 12116 MB/s | 15.90% | 5.35x | 4430 MB/s | 16.08% |
|
||||
| (1 CPU) | 1.38x | 1146 MB/s | - | 0.38x | 312 MB/s | - |
|
||||
| [adresser.json](https://files.klauspost.com/compress/adresser.json.zst) | 8.83x | 17579 MB/s | 43.86% | 6.54x | 13011 MB/s | 47.23% |
|
||||
| (1 CPU) | 1.14x | 2259 MB/s | - | 0.74x | 1475 MB/s | - |
|
||||
| [gob-stream](https://files.klauspost.com/compress/gob-stream.7z) | 16.72x | 14019 MB/s | 24.02% | 10.11x | 8477 MB/s | 30.48% |
|
||||
| (1 CPU) | 1.24x | 1043 MB/s | - | 0.70x | 586 MB/s | - |
|
||||
| [10gb.tar](http://mattmahoney.net/dc/10gb.html) | 13.33x | 9254 MB/s | 1.84% | 6.75x | 4686 MB/s | 6.72% |
|
||||
| (1 CPU) | 0.97x | 672 MB/s | - | 0.53x | 366 MB/s | - |
|
||||
| sharnd.out.2gb | 2.11x | 12639 MB/s | 0.01% | 1.98x | 11833 MB/s | 0.01% |
|
||||
| (1 CPU) | 0.93x | 5594 MB/s | - | 1.34x | 8030 MB/s | - |
|
||||
| [enwik9](http://mattmahoney.net/dc/textdata.html) | 19.34x | 8220 MB/s | 3.98% | 7.87x | 3345 MB/s | 15.82% |
|
||||
| (1 CPU) | 1.06x | 452 MB/s | - | 0.50x | 213 MB/s | - |
|
||||
| [silesia.tar](http://sun.aei.polsl.pl/~sdeor/corpus/silesia.zip) | 10.48x | 6124 MB/s | 5.67% | 3.76x | 2197 MB/s | 12.60% |
|
||||
| (1 CPU) | 0.97x | 568 MB/s | - | 0.46x | 271 MB/s | - |
|
||||
| [enwik10](https://encode.su/threads/3315-enwik10-benchmark-results) | 21.07x | 9020 MB/s | 6.36% | 6.91x | 2959 MB/s | 16.95% |
|
||||
| (1 CPU) | 1.07x | 460 MB/s | - | 0.51x | 220 MB/s | - |
|
||||
|
||||
### Legend
|
||||
|
||||
* `S2 speed`: Speed of S2 compared to Snappy, using 16 cores and 1 core.
|
||||
* `S2 throughput`: Throughput of S2 in MB/s.
|
||||
* `S2 % smaller`: How many percent of the Snappy output size is S2 better.
|
||||
* `S2 "better"`: Speed when enabling "better" compression mode in S2 compared to Snappy.
|
||||
* `"better" throughput`: Speed when enabling "better" compression mode in S2 compared to Snappy.
|
||||
* `"better" % smaller`: How many percent of the Snappy output size is S2 better when using "better" compression.
|
||||
|
||||
There is a good speedup across the board when using a single thread and a significant speedup when using multiple threads.
|
||||
|
||||
Machine generated data gets by far the biggest compression boost, with size being being reduced by up to 45% of Snappy size.
|
||||
|
||||
The "better" compression mode sees a good improvement in all cases, but usually at a performance cost.
|
||||
|
||||
Incompressible content (`sharnd.out.2gb`, 2GB random data) sees the smallest speedup.
|
||||
This is likely dominated by synchronization overhead, which is confirmed by the fact that single threaded performance is higher (see above).
|
||||
|
||||
## Decompression
|
||||
|
||||
S2 attempts to create content that is also fast to decompress, except in "better" mode where the smallest representation is used.
|
||||
|
||||
S2 vs Snappy **decompression** speed. Both operating on single core:
|
||||
|
||||
| File | S2 Throughput | vs. Snappy | Better Throughput | vs. Snappy |
|
||||
|-----------------------------------------------------------------------------------------------------|---------------|------------|-------------------|------------|
|
||||
| [rawstudio-mint14.tar](https://files.klauspost.com/compress/rawstudio-mint14.7z) | 2117 MB/s | 1.14x | 1738 MB/s | 0.94x |
|
||||
| [github-june-2days-2019.json](https://files.klauspost.com/compress/github-june-2days-2019.json.zst) | 2401 MB/s | 1.25x | 2307 MB/s | 1.20x |
|
||||
| [github-ranks-backup.bin](https://files.klauspost.com/compress/github-ranks-backup.bin.zst) | 2075 MB/s | 0.98x | 1764 MB/s | 0.83x |
|
||||
| [consensus.db.10gb](https://files.klauspost.com/compress/consensus.db.10gb.zst) | 2967 MB/s | 1.05x | 2885 MB/s | 1.02x |
|
||||
| [adresser.json](https://files.klauspost.com/compress/adresser.json.zst) | 4141 MB/s | 1.07x | 4184 MB/s | 1.08x |
|
||||
| [gob-stream](https://files.klauspost.com/compress/gob-stream.7z) | 2264 MB/s | 1.12x | 2185 MB/s | 1.08x |
|
||||
| [10gb.tar](http://mattmahoney.net/dc/10gb.html) | 1525 MB/s | 1.03x | 1347 MB/s | 0.91x |
|
||||
| sharnd.out.2gb | 3813 MB/s | 0.79x | 3900 MB/s | 0.81x |
|
||||
| [enwik9](http://mattmahoney.net/dc/textdata.html) | 1246 MB/s | 1.29x | 967 MB/s | 1.00x |
|
||||
| [silesia.tar](http://sun.aei.polsl.pl/~sdeor/corpus/silesia.zip) | 1433 MB/s | 1.12x | 1203 MB/s | 0.94x |
|
||||
| [enwik10](https://encode.su/threads/3315-enwik10-benchmark-results) | 1284 MB/s | 1.32x | 1010 MB/s | 1.04x |
|
||||
|
||||
### Legend
|
||||
|
||||
* `S2 Throughput`: Decompression speed of S2 encoded content.
|
||||
* `Better Throughput`: Decompression speed of S2 "better" encoded content.
|
||||
* `vs Snappy`: Decompression speed of S2 "better" mode compared to Snappy and absolute speed.
|
||||
|
||||
|
||||
While the decompression code hasn't changed, there is a significant speedup in decompression speed.
|
||||
S2 prefers longer matches and will typically only find matches that are 6 bytes or longer.
|
||||
While this reduces compression a bit, it improves decompression speed.
|
||||
|
||||
The "better" compression mode will actively look for shorter matches, which is why it has a decompression speed quite similar to Snappy.
|
||||
|
||||
Without assembly decompression is also very fast; single goroutine decompression speed. No assembly:
|
||||
|
||||
| File | S2 Throughput | S2 throughput |
|
||||
|--------------------------------|--------------|---------------|
|
||||
| consensus.db.10gb.s2 | 1.84x | 2289.8 MB/s |
|
||||
| 10gb.tar.s2 | 1.30x | 867.07 MB/s |
|
||||
| rawstudio-mint14.tar.s2 | 1.66x | 1329.65 MB/s |
|
||||
| github-june-2days-2019.json.s2 | 2.36x | 1831.59 MB/s |
|
||||
| github-ranks-backup.bin.s2 | 1.73x | 1390.7 MB/s |
|
||||
| enwik9.s2 | 1.67x | 681.53 MB/s |
|
||||
| adresser.json.s2 | 3.41x | 4230.53 MB/s |
|
||||
| silesia.tar.s2 | 1.52x | 811.58 |
|
||||
|
||||
Even though S2 typically compresses better than Snappy, decompression speed is always better.
|
||||
|
||||
## Block compression
|
||||
|
||||
|
||||
When compressing blocks no concurrent compression is performed just as Snappy.
|
||||
This is because blocks are for smaller payloads and generally will not benefit from concurrent compression.
|
||||
|
||||
An important change is that incompressible blocks will not be more than at most 10 bytes bigger than the input.
|
||||
In rare, worst case scenario Snappy blocks could be significantly bigger than the input.
|
||||
|
||||
### Mixed content blocks
|
||||
|
||||
The most reliable is a wide dataset.
|
||||
For this we use [`webdevdata.org-2015-01-07-subset`](https://files.klauspost.com/compress/webdevdata.org-2015-01-07-4GB-subset.7z),
|
||||
53927 files, total input size: 4,014,735,833 bytes. Single goroutine used.
|
||||
|
||||
| * | Input | Output | Reduction | MB/s |
|
||||
|-------------------|------------|------------|-----------|--------|
|
||||
| S2 | 4014735833 | 1059723369 | 73.60% | **934.34** |
|
||||
| S2 Better | 4014735833 | 969670507 | 75.85% | 532.70 |
|
||||
| S2 Best | 4014735833 | 906625668 | **77.85%** | 46.84 |
|
||||
| Snappy | 4014735833 | 1128706759 | 71.89% | 762.59 |
|
||||
| S2, Snappy Output | 4014735833 | 1093821420 | 72.75% | 908.60 |
|
||||
| LZ4 | 4014735833 | 1079259294 | 73.12% | 526.94 |
|
||||
|
||||
S2 delivers both the best single threaded throughput with regular mode and the best compression rate with "best".
|
||||
"Better" mode provides the same compression speed as LZ4 with better compression ratio.
|
||||
|
||||
When outputting Snappy compatible output it still delivers better throughput (150MB/s more) and better compression.
|
||||
|
||||
As can be seen from the other benchmarks decompression should also be easier on the S2 generated output.
|
||||
|
||||
Though they cannot be compared due to different decompression speeds here are the speed/size comparisons for
|
||||
other Go compressors:
|
||||
|
||||
| * | Input | Output | Reduction | MB/s |
|
||||
|-------------------|------------|------------|-----------|--------|
|
||||
| Zstd Fastest (Go) | 4014735833 | 794608518 | 80.21% | 236.04 |
|
||||
| Zstd Best (Go) | 4014735833 | 704603356 | 82.45% | 35.63 |
|
||||
| Deflate (Go) l1 | 4014735833 | 871294239 | 78.30% | 214.04 |
|
||||
| Deflate (Go) l9 | 4014735833 | 730389060 | 81.81% | 41.17 |
|
||||
|
||||
### Standard block compression
|
||||
|
||||
Benchmarking single block performance is subject to a lot more variation since it only tests a limited number of file patterns.
|
||||
So individual benchmarks should only be seen as a guideline and the overall picture is more important.
|
||||
|
||||
These micro-benchmarks are with data in cache and trained branch predictors. For a more realistic benchmark see the mixed content above.
|
||||
|
||||
Block compression. Parallel benchmark running on 16 cores, 16 goroutines.
|
||||
|
||||
AMD64 assembly is use for both S2 and Snappy.
|
||||
|
||||
| Absolute Perf | Snappy size | S2 Size | Snappy Speed | S2 Speed | Snappy dec | S2 dec |
|
||||
|-----------------------|-------------|---------|--------------|-------------|-------------|-------------|
|
||||
| html | 22843 | 21111 | 16246 MB/s | 17438 MB/s | 40972 MB/s | 49263 MB/s |
|
||||
| urls.10K | 335492 | 287326 | 7943 MB/s | 9693 MB/s | 22523 MB/s | 26484 MB/s |
|
||||
| fireworks.jpeg | 123034 | 123100 | 349544 MB/s | 273889 MB/s | 718321 MB/s | 827552 MB/s |
|
||||
| fireworks.jpeg (200B) | 146 | 155 | 8869 MB/s | 17773 MB/s | 33691 MB/s | 52421 MB/s |
|
||||
| paper-100k.pdf | 85304 | 84459 | 167546 MB/s | 101263 MB/s | 326905 MB/s | 291944 MB/s |
|
||||
| html_x_4 | 92234 | 21113 | 15194 MB/s | 50670 MB/s | 30843 MB/s | 32217 MB/s |
|
||||
| alice29.txt | 88034 | 85975 | 5936 MB/s | 6139 MB/s | 12882 MB/s | 20044 MB/s |
|
||||
| asyoulik.txt | 77503 | 79650 | 5517 MB/s | 6366 MB/s | 12735 MB/s | 22806 MB/s |
|
||||
| lcet10.txt | 234661 | 220670 | 6235 MB/s | 6067 MB/s | 14519 MB/s | 18697 MB/s |
|
||||
| plrabn12.txt | 319267 | 317985 | 5159 MB/s | 5726 MB/s | 11923 MB/s | 19901 MB/s |
|
||||
| geo.protodata | 23335 | 18690 | 21220 MB/s | 26529 MB/s | 56271 MB/s | 62540 MB/s |
|
||||
| kppkn.gtb | 69526 | 65312 | 9732 MB/s | 8559 MB/s | 18491 MB/s | 18969 MB/s |
|
||||
| alice29.txt (128B) | 80 | 82 | 6691 MB/s | 15489 MB/s | 31883 MB/s | 38874 MB/s |
|
||||
| alice29.txt (1000B) | 774 | 774 | 12204 MB/s | 13000 MB/s | 48056 MB/s | 52341 MB/s |
|
||||
| alice29.txt (10000B) | 6648 | 6933 | 10044 MB/s | 12806 MB/s | 32378 MB/s | 46322 MB/s |
|
||||
| alice29.txt (20000B) | 12686 | 13574 | 7733 MB/s | 11210 MB/s | 30566 MB/s | 58969 MB/s |
|
||||
|
||||
|
||||
| Relative Perf | Snappy size | S2 size improved | S2 Speed | S2 Dec Speed |
|
||||
|-----------------------|-------------|------------------|----------|--------------|
|
||||
| html | 22.31% | 7.58% | 1.07x | 1.20x |
|
||||
| urls.10K | 47.78% | 14.36% | 1.22x | 1.18x |
|
||||
| fireworks.jpeg | 99.95% | -0.05% | 0.78x | 1.15x |
|
||||
| fireworks.jpeg (200B) | 73.00% | -6.16% | 2.00x | 1.56x |
|
||||
| paper-100k.pdf | 83.30% | 0.99% | 0.60x | 0.89x |
|
||||
| html_x_4 | 22.52% | 77.11% | 3.33x | 1.04x |
|
||||
| alice29.txt | 57.88% | 2.34% | 1.03x | 1.56x |
|
||||
| asyoulik.txt | 61.91% | -2.77% | 1.15x | 1.79x |
|
||||
| lcet10.txt | 54.99% | 5.96% | 0.97x | 1.29x |
|
||||
| plrabn12.txt | 66.26% | 0.40% | 1.11x | 1.67x |
|
||||
| geo.protodata | 19.68% | 19.91% | 1.25x | 1.11x |
|
||||
| kppkn.gtb | 37.72% | 6.06% | 0.88x | 1.03x |
|
||||
| alice29.txt (128B) | 62.50% | -2.50% | 2.31x | 1.22x |
|
||||
| alice29.txt (1000B) | 77.40% | 0.00% | 1.07x | 1.09x |
|
||||
| alice29.txt (10000B) | 66.48% | -4.29% | 1.27x | 1.43x |
|
||||
| alice29.txt (20000B) | 63.43% | -7.00% | 1.45x | 1.93x |
|
||||
|
||||
Speed is generally at or above Snappy. Small blocks gets a significant speedup, although at the expense of size.
|
||||
|
||||
Decompression speed is better than Snappy, except in one case.
|
||||
|
||||
Since payloads are very small the variance in terms of size is rather big, so they should only be seen as a general guideline.
|
||||
|
||||
Size is on average around Snappy, but varies on content type.
|
||||
In cases where compression is worse, it usually is compensated by a speed boost.
|
||||
|
||||
|
||||
### Better compression
|
||||
|
||||
Benchmarking single block performance is subject to a lot more variation since it only tests a limited number of file patterns.
|
||||
So individual benchmarks should only be seen as a guideline and the overall picture is more important.
|
||||
|
||||
| Absolute Perf | Snappy size | Better Size | Snappy Speed | Better Speed | Snappy dec | Better dec |
|
||||
|-----------------------|-------------|-------------|--------------|--------------|-------------|-------------|
|
||||
| html | 22843 | 19833 | 16246 MB/s | 7731 MB/s | 40972 MB/s | 40292 MB/s |
|
||||
| urls.10K | 335492 | 253529 | 7943 MB/s | 3980 MB/s | 22523 MB/s | 20981 MB/s |
|
||||
| fireworks.jpeg | 123034 | 123100 | 349544 MB/s | 9760 MB/s | 718321 MB/s | 823698 MB/s |
|
||||
| fireworks.jpeg (200B) | 146 | 142 | 8869 MB/s | 594 MB/s | 33691 MB/s | 30101 MB/s |
|
||||
| paper-100k.pdf | 85304 | 82915 | 167546 MB/s | 7470 MB/s | 326905 MB/s | 198869 MB/s |
|
||||
| html_x_4 | 92234 | 19841 | 15194 MB/s | 23403 MB/s | 30843 MB/s | 30937 MB/s |
|
||||
| alice29.txt | 88034 | 73218 | 5936 MB/s | 2945 MB/s | 12882 MB/s | 16611 MB/s |
|
||||
| asyoulik.txt | 77503 | 66844 | 5517 MB/s | 2739 MB/s | 12735 MB/s | 14975 MB/s |
|
||||
| lcet10.txt | 234661 | 190589 | 6235 MB/s | 3099 MB/s | 14519 MB/s | 16634 MB/s |
|
||||
| plrabn12.txt | 319267 | 270828 | 5159 MB/s | 2600 MB/s | 11923 MB/s | 13382 MB/s |
|
||||
| geo.protodata | 23335 | 18278 | 21220 MB/s | 11208 MB/s | 56271 MB/s | 57961 MB/s |
|
||||
| kppkn.gtb | 69526 | 61851 | 9732 MB/s | 4556 MB/s | 18491 MB/s | 16524 MB/s |
|
||||
| alice29.txt (128B) | 80 | 81 | 6691 MB/s | 529 MB/s | 31883 MB/s | 34225 MB/s |
|
||||
| alice29.txt (1000B) | 774 | 748 | 12204 MB/s | 1943 MB/s | 48056 MB/s | 42068 MB/s |
|
||||
| alice29.txt (10000B) | 6648 | 6234 | 10044 MB/s | 2949 MB/s | 32378 MB/s | 28813 MB/s |
|
||||
| alice29.txt (20000B) | 12686 | 11584 | 7733 MB/s | 2822 MB/s | 30566 MB/s | 27315 MB/s |
|
||||
|
||||
|
||||
| Relative Perf | Snappy size | Better size | Better Speed | Better dec |
|
||||
|-----------------------|-------------|-------------|--------------|------------|
|
||||
| html | 22.31% | 13.18% | 0.48x | 0.98x |
|
||||
| urls.10K | 47.78% | 24.43% | 0.50x | 0.93x |
|
||||
| fireworks.jpeg | 99.95% | -0.05% | 0.03x | 1.15x |
|
||||
| fireworks.jpeg (200B) | 73.00% | 2.74% | 0.07x | 0.89x |
|
||||
| paper-100k.pdf | 83.30% | 2.80% | 0.07x | 0.61x |
|
||||
| html_x_4 | 22.52% | 78.49% | 0.04x | 1.00x |
|
||||
| alice29.txt | 57.88% | 16.83% | 1.54x | 1.29x |
|
||||
| asyoulik.txt | 61.91% | 13.75% | 0.50x | 1.18x |
|
||||
| lcet10.txt | 54.99% | 18.78% | 0.50x | 1.15x |
|
||||
| plrabn12.txt | 66.26% | 15.17% | 0.50x | 1.12x |
|
||||
| geo.protodata | 19.68% | 21.67% | 0.50x | 1.03x |
|
||||
| kppkn.gtb | 37.72% | 11.04% | 0.53x | 0.89x |
|
||||
| alice29.txt (128B) | 62.50% | -1.25% | 0.47x | 1.07x |
|
||||
| alice29.txt (1000B) | 77.40% | 3.36% | 0.08x | 0.88x |
|
||||
| alice29.txt (10000B) | 66.48% | 6.23% | 0.16x | 0.89x |
|
||||
| alice29.txt (20000B) | 63.43% | 8.69% | 0.29x | 0.89x |
|
||||
|
||||
Except for the mostly incompressible JPEG image compression is better and usually in the
|
||||
double digits in terms of percentage reduction over Snappy.
|
||||
|
||||
The PDF sample shows a significant slowdown compared to Snappy, as this mode tries harder
|
||||
to compress the data. Very small blocks are also not favorable for better compression, so throughput is way down.
|
||||
|
||||
This mode aims to provide better compression at the expense of performance and achieves that
|
||||
without a huge performance penalty, except on very small blocks.
|
||||
|
||||
Decompression speed suffers a little compared to the regular S2 mode,
|
||||
but still manages to be close to Snappy in spite of increased compression.
|
||||
|
||||
# Best compression mode
|
||||
|
||||
S2 offers a "best" compression mode.
|
||||
|
||||
This will compress as much as possible with little regard to CPU usage.
|
||||
|
||||
Mainly for offline compression, but where decompression speed should still
|
||||
be high and compatible with other S2 compressed data.
|
||||
|
||||
Some examples compared on 16 core CPU, amd64 assembly used:
|
||||
|
||||
```
|
||||
* enwik10
|
||||
Default... 10000000000 -> 4761467548 [47.61%]; 1.098s, 8685.6MB/s
|
||||
Better... 10000000000 -> 4219438251 [42.19%]; 1.925s, 4954.2MB/s
|
||||
Best... 10000000000 -> 3627364337 [36.27%]; 43.051s, 221.5MB/s
|
||||
|
||||
* github-june-2days-2019.json
|
||||
Default... 6273951764 -> 1043196283 [16.63%]; 431ms, 13882.3MB/s
|
||||
Better... 6273951764 -> 949146808 [15.13%]; 547ms, 10938.4MB/s
|
||||
Best... 6273951764 -> 832855506 [13.27%]; 9.455s, 632.8MB/s
|
||||
|
||||
* nyc-taxi-data-10M.csv
|
||||
Default... 3325605752 -> 1095998837 [32.96%]; 324ms, 9788.7MB/s
|
||||
Better... 3325605752 -> 954776589 [28.71%]; 491ms, 6459.4MB/s
|
||||
Best... 3325605752 -> 779098746 [23.43%]; 8.29s, 382.6MB/s
|
||||
|
||||
* 10gb.tar
|
||||
Default... 10065157632 -> 5916578242 [58.78%]; 1.028s, 9337.4MB/s
|
||||
Better... 10065157632 -> 5649207485 [56.13%]; 1.597s, 6010.6MB/s
|
||||
Best... 10065157632 -> 5208719802 [51.75%]; 32.78s, 292.8MB/
|
||||
|
||||
* consensus.db.10gb
|
||||
Default... 10737418240 -> 4562648848 [42.49%]; 882ms, 11610.0MB/s
|
||||
Better... 10737418240 -> 4542428129 [42.30%]; 1.533s, 6679.7MB/s
|
||||
Best... 10737418240 -> 4244773384 [39.53%]; 42.96s, 238.4MB/s
|
||||
```
|
||||
|
||||
Decompression speed should be around the same as using the 'better' compression mode.
|
||||
|
||||
# Snappy Compatibility
|
||||
|
||||
S2 now offers full compatibility with Snappy.
|
||||
|
||||
This means that the efficient encoders of S2 can be used to generate fully Snappy compatible output.
|
||||
|
||||
There is a [snappy](https://github.com/klauspost/compress/tree/master/snappy) package that can be used by
|
||||
simply changing imports from `github.com/golang/snappy` to `github.com/klauspost/compress/snappy`.
|
||||
This uses "better" mode for all operations.
|
||||
If you would like more control, you can use the s2 package as described below:
|
||||
|
||||
## Blocks
|
||||
|
||||
Snappy compatible blocks can be generated with the S2 encoder.
|
||||
Compression and speed is typically a bit better `MaxEncodedLen` is also smaller for smaller memory usage. Replace
|
||||
|
||||
| Snappy | S2 replacement |
|
||||
|----------------------------|-------------------------|
|
||||
| snappy.Encode(...) | s2.EncodeSnappy(...) |
|
||||
| snappy.MaxEncodedLen(...) | s2.MaxEncodedLen(...) |
|
||||
|
||||
`s2.EncodeSnappy` can be replaced with `s2.EncodeSnappyBetter` or `s2.EncodeSnappyBest` to get more efficiently compressed snappy compatible output.
|
||||
|
||||
`s2.ConcatBlocks` is compatible with snappy blocks.
|
||||
|
||||
Comparison of [`webdevdata.org-2015-01-07-subset`](https://files.klauspost.com/compress/webdevdata.org-2015-01-07-4GB-subset.7z),
|
||||
53927 files, total input size: 4,014,735,833 bytes. amd64, single goroutine used:
|
||||
|
||||
| Encoder | Size | MB/s | Reduction |
|
||||
|-----------------------|------------|--------|------------
|
||||
| snappy.Encode | 1128706759 | 725.59 | 71.89% |
|
||||
| s2.EncodeSnappy | 1093823291 | 899.16 | 72.75% |
|
||||
| s2.EncodeSnappyBetter | 1001158548 | 578.49 | 75.06% |
|
||||
| s2.EncodeSnappyBest | 944507998 | 66.00 | 76.47% |
|
||||
|
||||
## Streams
|
||||
|
||||
For streams, replace `enc = snappy.NewBufferedWriter(w)` with `enc = s2.NewWriter(w, s2.WriterSnappyCompat())`.
|
||||
All other options are available, but note that block size limit is different for snappy.
|
||||
|
||||
Comparison of different streams, AMD Ryzen 3950x, 16 cores. Size and throughput:
|
||||
|
||||
| File | snappy.NewWriter | S2 Snappy | S2 Snappy, Better | S2 Snappy, Best |
|
||||
|-----------------------------|--------------------------|---------------------------|--------------------------|-------------------------|
|
||||
| nyc-taxi-data-10M.csv | 1316042016 - 517.54MB/s | 1307003093 - 8406.29MB/s | 1174534014 - 4984.35MB/s | 1115904679 - 177.81MB/s |
|
||||
| enwik10 | 5088294643 - 433.45MB/s | 5175840939 - 8454.52MB/s | 4560784526 - 4403.10MB/s | 4340299103 - 159.71MB/s |
|
||||
| 10gb.tar | 6056946612 - 703.25MB/s | 6208571995 - 9035.75MB/s | 5741646126 - 2402.08MB/s | 5548973895 - 171.17MB/s |
|
||||
| github-june-2days-2019.json | 1525176492 - 908.11MB/s | 1476519054 - 12625.93MB/s | 1400547532 - 6163.61MB/s | 1321887137 - 200.71MB/s |
|
||||
| consensus.db.10gb | 5412897703 - 1054.38MB/s | 5354073487 - 12634.82MB/s | 5335069899 - 2472.23MB/s | 5201000954 - 166.32MB/s |
|
||||
|
||||
# Decompression
|
||||
|
||||
All decompression functions map directly to equivalent s2 functions.
|
||||
|
||||
| Snappy | S2 replacement |
|
||||
|------------------------|--------------------|
|
||||
| snappy.Decode(...) | s2.Decode(...) |
|
||||
| snappy.DecodedLen(...) | s2.DecodedLen(...) |
|
||||
| snappy.NewReader(...) | s2.NewReader(...) |
|
||||
|
||||
Features like [quick forward skipping without decompression](https://pkg.go.dev/github.com/klauspost/compress/s2#Reader.Skip)
|
||||
are also available for Snappy streams.
|
||||
|
||||
If you know you are only decompressing snappy streams, setting [`ReaderMaxBlockSize(64<<10)`](https://pkg.go.dev/github.com/klauspost/compress/s2#ReaderMaxBlockSize)
|
||||
on your Reader will reduce memory consumption.
|
||||
|
||||
# Concatenating blocks and streams.
|
||||
|
||||
Concatenating streams will concatenate the output of both without recompressing them.
|
||||
While this is inefficient in terms of compression it might be usable in certain scenarios.
|
||||
The 10 byte 'stream identifier' of the second stream can optionally be stripped, but it is not a requirement.
|
||||
|
||||
Blocks can be concatenated using the `ConcatBlocks` function.
|
||||
|
||||
Snappy blocks/streams can safely be concatenated with S2 blocks and streams.
|
||||
|
||||
# Format Extensions
|
||||
|
||||
* Frame [Stream identifier](https://github.com/google/snappy/blob/master/framing_format.txt#L68) changed from `sNaPpY` to `S2sTwO`.
|
||||
* [Framed compressed blocks](https://github.com/google/snappy/blob/master/format_description.txt) can be up to 4MB (up from 64KB).
|
||||
* Compressed blocks can have an offset of `0`, which indicates to repeat the last seen offset.
|
||||
|
||||
Repeat offsets must be encoded as a [2.2.1. Copy with 1-byte offset (01)](https://github.com/google/snappy/blob/master/format_description.txt#L89), where the offset is 0.
|
||||
|
||||
The length is specified by reading the 3-bit length specified in the tag and decode using this table:
|
||||
|
||||
| Length | Actual Length |
|
||||
|--------|----------------------|
|
||||
| 0 | 4 |
|
||||
| 1 | 5 |
|
||||
| 2 | 6 |
|
||||
| 3 | 7 |
|
||||
| 4 | 8 |
|
||||
| 5 | 8 + read 1 byte |
|
||||
| 6 | 260 + read 2 bytes |
|
||||
| 7 | 65540 + read 3 bytes |
|
||||
|
||||
This allows any repeat offset + length to be represented by 2 to 5 bytes.
|
||||
|
||||
Lengths are stored as little endian values.
|
||||
|
||||
The first copy of a block cannot be a repeat offset and the offset is not carried across blocks in streams.
|
||||
|
||||
Default streaming block size is 1MB.
|
||||
|
||||
# LICENSE
|
||||
|
||||
This code is based on the [Snappy-Go](https://github.com/golang/snappy) implementation.
|
||||
|
||||
Use of this source code is governed by a BSD-style license that can be found in the LICENSE file.
|
565
vendor/github.com/klauspost/compress/s2/decode.go
generated
vendored
Normal file
565
vendor/github.com/klauspost/compress/s2/decode.go
generated
vendored
Normal file
@ -0,0 +1,565 @@
|
||||
// Copyright 2011 The Snappy-Go Authors. All rights reserved.
|
||||
// Copyright (c) 2019 Klaus Post. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package s2
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"io"
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrCorrupt reports that the input is invalid.
|
||||
ErrCorrupt = errors.New("s2: corrupt input")
|
||||
// ErrCRC reports that the input failed CRC validation (streams only)
|
||||
ErrCRC = errors.New("s2: corrupt input, crc mismatch")
|
||||
// ErrTooLarge reports that the uncompressed length is too large.
|
||||
ErrTooLarge = errors.New("s2: decoded block is too large")
|
||||
// ErrUnsupported reports that the input isn't supported.
|
||||
ErrUnsupported = errors.New("s2: unsupported input")
|
||||
)
|
||||
|
||||
// DecodedLen returns the length of the decoded block.
|
||||
func DecodedLen(src []byte) (int, error) {
|
||||
v, _, err := decodedLen(src)
|
||||
return v, err
|
||||
}
|
||||
|
||||
// decodedLen returns the length of the decoded block and the number of bytes
|
||||
// that the length header occupied.
|
||||
func decodedLen(src []byte) (blockLen, headerLen int, err error) {
|
||||
v, n := binary.Uvarint(src)
|
||||
if n <= 0 || v > 0xffffffff {
|
||||
return 0, 0, ErrCorrupt
|
||||
}
|
||||
|
||||
const wordSize = 32 << (^uint(0) >> 32 & 1)
|
||||
if wordSize == 32 && v > 0x7fffffff {
|
||||
return 0, 0, ErrTooLarge
|
||||
}
|
||||
return int(v), n, nil
|
||||
}
|
||||
|
||||
const (
|
||||
decodeErrCodeCorrupt = 1
|
||||
)
|
||||
|
||||
// Decode returns the decoded form of src. The returned slice may be a sub-
|
||||
// slice of dst if dst was large enough to hold the entire decoded block.
|
||||
// Otherwise, a newly allocated slice will be returned.
|
||||
//
|
||||
// The dst and src must not overlap. It is valid to pass a nil dst.
|
||||
func Decode(dst, src []byte) ([]byte, error) {
|
||||
dLen, s, err := decodedLen(src)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if dLen <= cap(dst) {
|
||||
dst = dst[:dLen]
|
||||
} else {
|
||||
dst = make([]byte, dLen)
|
||||
}
|
||||
if s2Decode(dst, src[s:]) != 0 {
|
||||
return nil, ErrCorrupt
|
||||
}
|
||||
return dst, nil
|
||||
}
|
||||
|
||||
// NewReader returns a new Reader that decompresses from r, using the framing
|
||||
// format described at
|
||||
// https://github.com/google/snappy/blob/master/framing_format.txt with S2 changes.
|
||||
func NewReader(r io.Reader, opts ...ReaderOption) *Reader {
|
||||
nr := Reader{
|
||||
r: r,
|
||||
maxBlock: maxBlockSize,
|
||||
}
|
||||
for _, opt := range opts {
|
||||
if err := opt(&nr); err != nil {
|
||||
nr.err = err
|
||||
return &nr
|
||||
}
|
||||
}
|
||||
nr.maxBufSize = MaxEncodedLen(nr.maxBlock) + checksumSize
|
||||
if nr.lazyBuf > 0 {
|
||||
nr.buf = make([]byte, MaxEncodedLen(nr.lazyBuf)+checksumSize)
|
||||
} else {
|
||||
nr.buf = make([]byte, MaxEncodedLen(defaultBlockSize)+checksumSize)
|
||||
}
|
||||
nr.paramsOK = true
|
||||
return &nr
|
||||
}
|
||||
|
||||
// ReaderOption is an option for creating a decoder.
|
||||
type ReaderOption func(*Reader) error
|
||||
|
||||
// ReaderMaxBlockSize allows to control allocations if the stream
|
||||
// has been compressed with a smaller WriterBlockSize, or with the default 1MB.
|
||||
// Blocks must be this size or smaller to decompress,
|
||||
// otherwise the decoder will return ErrUnsupported.
|
||||
//
|
||||
// For streams compressed with Snappy this can safely be set to 64KB (64 << 10).
|
||||
//
|
||||
// Default is the maximum limit of 4MB.
|
||||
func ReaderMaxBlockSize(blockSize int) ReaderOption {
|
||||
return func(r *Reader) error {
|
||||
if blockSize > maxBlockSize || blockSize <= 0 {
|
||||
return errors.New("s2: block size too large. Must be <= 4MB and > 0")
|
||||
}
|
||||
if r.lazyBuf == 0 && blockSize < defaultBlockSize {
|
||||
r.lazyBuf = blockSize
|
||||
}
|
||||
r.maxBlock = blockSize
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// ReaderAllocBlock allows to control upfront stream allocations
|
||||
// and not allocate for frames bigger than this initially.
|
||||
// If frames bigger than this is seen a bigger buffer will be allocated.
|
||||
//
|
||||
// Default is 1MB, which is default output size.
|
||||
func ReaderAllocBlock(blockSize int) ReaderOption {
|
||||
return func(r *Reader) error {
|
||||
if blockSize > maxBlockSize || blockSize < 1024 {
|
||||
return errors.New("s2: invalid ReaderAllocBlock. Must be <= 4MB and >= 1024")
|
||||
}
|
||||
r.lazyBuf = blockSize
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// Reader is an io.Reader that can read Snappy-compressed bytes.
|
||||
type Reader struct {
|
||||
r io.Reader
|
||||
err error
|
||||
decoded []byte
|
||||
buf []byte
|
||||
// decoded[i:j] contains decoded bytes that have not yet been passed on.
|
||||
i, j int
|
||||
// maximum block size allowed.
|
||||
maxBlock int
|
||||
// maximum expected buffer size.
|
||||
maxBufSize int
|
||||
// alloc a buffer this size if > 0.
|
||||
lazyBuf int
|
||||
readHeader bool
|
||||
paramsOK bool
|
||||
snappyFrame bool
|
||||
}
|
||||
|
||||
// ensureBufferSize will ensure that the buffer can take at least n bytes.
|
||||
// If false is returned the buffer exceeds maximum allowed size.
|
||||
func (r *Reader) ensureBufferSize(n int) bool {
|
||||
if len(r.buf) >= n {
|
||||
return true
|
||||
}
|
||||
if n > r.maxBufSize {
|
||||
r.err = ErrCorrupt
|
||||
return false
|
||||
}
|
||||
// Realloc buffer.
|
||||
r.buf = make([]byte, n)
|
||||
return true
|
||||
}
|
||||
|
||||
// Reset discards any buffered data, resets all state, and switches the Snappy
|
||||
// reader to read from r. This permits reusing a Reader rather than allocating
|
||||
// a new one.
|
||||
func (r *Reader) Reset(reader io.Reader) {
|
||||
if !r.paramsOK {
|
||||
return
|
||||
}
|
||||
r.r = reader
|
||||
r.err = nil
|
||||
r.i = 0
|
||||
r.j = 0
|
||||
r.readHeader = false
|
||||
}
|
||||
|
||||
func (r *Reader) readFull(p []byte, allowEOF bool) (ok bool) {
|
||||
if _, r.err = io.ReadFull(r.r, p); r.err != nil {
|
||||
if r.err == io.ErrUnexpectedEOF || (r.err == io.EOF && !allowEOF) {
|
||||
r.err = ErrCorrupt
|
||||
}
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// skipN will skip n bytes.
|
||||
// If the supplied reader supports seeking that is used.
|
||||
// tmp is used as a temporary buffer for reading.
|
||||
// The supplied slice does not need to be the size of the read.
|
||||
func (r *Reader) skipN(tmp []byte, n int, allowEOF bool) (ok bool) {
|
||||
if rs, ok := r.r.(io.ReadSeeker); ok {
|
||||
_, err := rs.Seek(int64(n), io.SeekCurrent)
|
||||
if err == nil {
|
||||
return true
|
||||
}
|
||||
if err == io.ErrUnexpectedEOF || (r.err == io.EOF && !allowEOF) {
|
||||
r.err = ErrCorrupt
|
||||
return false
|
||||
}
|
||||
}
|
||||
for n > 0 {
|
||||
if n < len(tmp) {
|
||||
tmp = tmp[:n]
|
||||
}
|
||||
if _, r.err = io.ReadFull(r.r, tmp); r.err != nil {
|
||||
if r.err == io.ErrUnexpectedEOF || (r.err == io.EOF && !allowEOF) {
|
||||
r.err = ErrCorrupt
|
||||
}
|
||||
return false
|
||||
}
|
||||
n -= len(tmp)
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// Read satisfies the io.Reader interface.
|
||||
func (r *Reader) Read(p []byte) (int, error) {
|
||||
if r.err != nil {
|
||||
return 0, r.err
|
||||
}
|
||||
for {
|
||||
if r.i < r.j {
|
||||
n := copy(p, r.decoded[r.i:r.j])
|
||||
r.i += n
|
||||
return n, nil
|
||||
}
|
||||
if !r.readFull(r.buf[:4], true) {
|
||||
return 0, r.err
|
||||
}
|
||||
chunkType := r.buf[0]
|
||||
if !r.readHeader {
|
||||
if chunkType != chunkTypeStreamIdentifier {
|
||||
r.err = ErrCorrupt
|
||||
return 0, r.err
|
||||
}
|
||||
r.readHeader = true
|
||||
}
|
||||
chunkLen := int(r.buf[1]) | int(r.buf[2])<<8 | int(r.buf[3])<<16
|
||||
|
||||
// The chunk types are specified at
|
||||
// https://github.com/google/snappy/blob/master/framing_format.txt
|
||||
switch chunkType {
|
||||
case chunkTypeCompressedData:
|
||||
// Section 4.2. Compressed data (chunk type 0x00).
|
||||
if chunkLen < checksumSize {
|
||||
r.err = ErrCorrupt
|
||||
return 0, r.err
|
||||
}
|
||||
if !r.ensureBufferSize(chunkLen) {
|
||||
if r.err == nil {
|
||||
r.err = ErrUnsupported
|
||||
}
|
||||
return 0, r.err
|
||||
}
|
||||
buf := r.buf[:chunkLen]
|
||||
if !r.readFull(buf, false) {
|
||||
return 0, r.err
|
||||
}
|
||||
checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24
|
||||
buf = buf[checksumSize:]
|
||||
|
||||
n, err := DecodedLen(buf)
|
||||
if err != nil {
|
||||
r.err = err
|
||||
return 0, r.err
|
||||
}
|
||||
if r.snappyFrame && n > maxSnappyBlockSize {
|
||||
r.err = ErrCorrupt
|
||||
return 0, r.err
|
||||
}
|
||||
|
||||
if n > len(r.decoded) {
|
||||
if n > r.maxBlock {
|
||||
r.err = ErrCorrupt
|
||||
return 0, r.err
|
||||
}
|
||||
r.decoded = make([]byte, n)
|
||||
}
|
||||
if _, err := Decode(r.decoded, buf); err != nil {
|
||||
r.err = err
|
||||
return 0, r.err
|
||||
}
|
||||
if crc(r.decoded[:n]) != checksum {
|
||||
r.err = ErrCRC
|
||||
return 0, r.err
|
||||
}
|
||||
r.i, r.j = 0, n
|
||||
continue
|
||||
|
||||
case chunkTypeUncompressedData:
|
||||
// Section 4.3. Uncompressed data (chunk type 0x01).
|
||||
if chunkLen < checksumSize {
|
||||
r.err = ErrCorrupt
|
||||
return 0, r.err
|
||||
}
|
||||
if !r.ensureBufferSize(chunkLen) {
|
||||
if r.err == nil {
|
||||
r.err = ErrUnsupported
|
||||
}
|
||||
return 0, r.err
|
||||
}
|
||||
buf := r.buf[:checksumSize]
|
||||
if !r.readFull(buf, false) {
|
||||
return 0, r.err
|
||||
}
|
||||
checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24
|
||||
// Read directly into r.decoded instead of via r.buf.
|
||||
n := chunkLen - checksumSize
|
||||
if r.snappyFrame && n > maxSnappyBlockSize {
|
||||
r.err = ErrCorrupt
|
||||
return 0, r.err
|
||||
}
|
||||
if n > len(r.decoded) {
|
||||
if n > r.maxBlock {
|
||||
r.err = ErrCorrupt
|
||||
return 0, r.err
|
||||
}
|
||||
r.decoded = make([]byte, n)
|
||||
}
|
||||
if !r.readFull(r.decoded[:n], false) {
|
||||
return 0, r.err
|
||||
}
|
||||
if crc(r.decoded[:n]) != checksum {
|
||||
r.err = ErrCRC
|
||||
return 0, r.err
|
||||
}
|
||||
r.i, r.j = 0, n
|
||||
continue
|
||||
|
||||
case chunkTypeStreamIdentifier:
|
||||
// Section 4.1. Stream identifier (chunk type 0xff).
|
||||
if chunkLen != len(magicBody) {
|
||||
r.err = ErrCorrupt
|
||||
return 0, r.err
|
||||
}
|
||||
if !r.readFull(r.buf[:len(magicBody)], false) {
|
||||
return 0, r.err
|
||||
}
|
||||
if string(r.buf[:len(magicBody)]) != magicBody {
|
||||
if string(r.buf[:len(magicBody)]) != magicBodySnappy {
|
||||
r.err = ErrCorrupt
|
||||
return 0, r.err
|
||||
} else {
|
||||
r.snappyFrame = true
|
||||
}
|
||||
} else {
|
||||
r.snappyFrame = false
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
if chunkType <= 0x7f {
|
||||
// Section 4.5. Reserved unskippable chunks (chunk types 0x02-0x7f).
|
||||
r.err = ErrUnsupported
|
||||
return 0, r.err
|
||||
}
|
||||
// Section 4.4 Padding (chunk type 0xfe).
|
||||
// Section 4.6. Reserved skippable chunks (chunk types 0x80-0xfd).
|
||||
if chunkLen > maxBlockSize {
|
||||
r.err = ErrUnsupported
|
||||
return 0, r.err
|
||||
}
|
||||
|
||||
if !r.skipN(r.buf, chunkLen, false) {
|
||||
return 0, r.err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Skip will skip n bytes forward in the decompressed output.
|
||||
// For larger skips this consumes less CPU and is faster than reading output and discarding it.
|
||||
// CRC is not checked on skipped blocks.
|
||||
// io.ErrUnexpectedEOF is returned if the stream ends before all bytes have been skipped.
|
||||
// If a decoding error is encountered subsequent calls to Read will also fail.
|
||||
func (r *Reader) Skip(n int64) error {
|
||||
if n < 0 {
|
||||
return errors.New("attempted negative skip")
|
||||
}
|
||||
if r.err != nil {
|
||||
return r.err
|
||||
}
|
||||
|
||||
for n > 0 {
|
||||
if r.i < r.j {
|
||||
// Skip in buffer.
|
||||
// decoded[i:j] contains decoded bytes that have not yet been passed on.
|
||||
left := int64(r.j - r.i)
|
||||
if left >= n {
|
||||
r.i += int(n)
|
||||
return nil
|
||||
}
|
||||
n -= int64(r.j - r.i)
|
||||
r.i, r.j = 0, 0
|
||||
}
|
||||
|
||||
// Buffer empty; read blocks until we have content.
|
||||
if !r.readFull(r.buf[:4], true) {
|
||||
if r.err == io.EOF {
|
||||
r.err = io.ErrUnexpectedEOF
|
||||
}
|
||||
return r.err
|
||||
}
|
||||
chunkType := r.buf[0]
|
||||
if !r.readHeader {
|
||||
if chunkType != chunkTypeStreamIdentifier {
|
||||
r.err = ErrCorrupt
|
||||
return r.err
|
||||
}
|
||||
r.readHeader = true
|
||||
}
|
||||
chunkLen := int(r.buf[1]) | int(r.buf[2])<<8 | int(r.buf[3])<<16
|
||||
|
||||
// The chunk types are specified at
|
||||
// https://github.com/google/snappy/blob/master/framing_format.txt
|
||||
switch chunkType {
|
||||
case chunkTypeCompressedData:
|
||||
// Section 4.2. Compressed data (chunk type 0x00).
|
||||
if chunkLen < checksumSize {
|
||||
r.err = ErrCorrupt
|
||||
return r.err
|
||||
}
|
||||
if !r.ensureBufferSize(chunkLen) {
|
||||
if r.err == nil {
|
||||
r.err = ErrUnsupported
|
||||
}
|
||||
return r.err
|
||||
}
|
||||
buf := r.buf[:chunkLen]
|
||||
if !r.readFull(buf, false) {
|
||||
return r.err
|
||||
}
|
||||
checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24
|
||||
buf = buf[checksumSize:]
|
||||
|
||||
dLen, err := DecodedLen(buf)
|
||||
if err != nil {
|
||||
r.err = err
|
||||
return r.err
|
||||
}
|
||||
if dLen > r.maxBlock {
|
||||
r.err = ErrCorrupt
|
||||
return r.err
|
||||
}
|
||||
// Check if destination is within this block
|
||||
if int64(dLen) > n {
|
||||
if len(r.decoded) < dLen {
|
||||
r.decoded = make([]byte, dLen)
|
||||
}
|
||||
if _, err := Decode(r.decoded, buf); err != nil {
|
||||
r.err = err
|
||||
return r.err
|
||||
}
|
||||
if crc(r.decoded[:dLen]) != checksum {
|
||||
r.err = ErrCorrupt
|
||||
return r.err
|
||||
}
|
||||
} else {
|
||||
// Skip block completely
|
||||
n -= int64(dLen)
|
||||
dLen = 0
|
||||
}
|
||||
r.i, r.j = 0, dLen
|
||||
continue
|
||||
case chunkTypeUncompressedData:
|
||||
// Section 4.3. Uncompressed data (chunk type 0x01).
|
||||
if chunkLen < checksumSize {
|
||||
r.err = ErrCorrupt
|
||||
return r.err
|
||||
}
|
||||
if !r.ensureBufferSize(chunkLen) {
|
||||
if r.err != nil {
|
||||
r.err = ErrUnsupported
|
||||
}
|
||||
return r.err
|
||||
}
|
||||
buf := r.buf[:checksumSize]
|
||||
if !r.readFull(buf, false) {
|
||||
return r.err
|
||||
}
|
||||
checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24
|
||||
// Read directly into r.decoded instead of via r.buf.
|
||||
n2 := chunkLen - checksumSize
|
||||
if n2 > len(r.decoded) {
|
||||
if n2 > r.maxBlock {
|
||||
r.err = ErrCorrupt
|
||||
return r.err
|
||||
}
|
||||
r.decoded = make([]byte, n2)
|
||||
}
|
||||
if !r.readFull(r.decoded[:n2], false) {
|
||||
return r.err
|
||||
}
|
||||
if int64(n2) < n {
|
||||
if crc(r.decoded[:n2]) != checksum {
|
||||
r.err = ErrCorrupt
|
||||
return r.err
|
||||
}
|
||||
}
|
||||
r.i, r.j = 0, n2
|
||||
continue
|
||||
case chunkTypeStreamIdentifier:
|
||||
// Section 4.1. Stream identifier (chunk type 0xff).
|
||||
if chunkLen != len(magicBody) {
|
||||
r.err = ErrCorrupt
|
||||
return r.err
|
||||
}
|
||||
if !r.readFull(r.buf[:len(magicBody)], false) {
|
||||
return r.err
|
||||
}
|
||||
if string(r.buf[:len(magicBody)]) != magicBody {
|
||||
if string(r.buf[:len(magicBody)]) != magicBodySnappy {
|
||||
r.err = ErrCorrupt
|
||||
return r.err
|
||||
}
|
||||
}
|
||||
|
||||
continue
|
||||
}
|
||||
|
||||
if chunkType <= 0x7f {
|
||||
// Section 4.5. Reserved unskippable chunks (chunk types 0x02-0x7f).
|
||||
r.err = ErrUnsupported
|
||||
return r.err
|
||||
}
|
||||
if chunkLen > maxBlockSize {
|
||||
r.err = ErrUnsupported
|
||||
return r.err
|
||||
}
|
||||
// Section 4.4 Padding (chunk type 0xfe).
|
||||
// Section 4.6. Reserved skippable chunks (chunk types 0x80-0xfd).
|
||||
if !r.skipN(r.buf, chunkLen, false) {
|
||||
return r.err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ReadByte satisfies the io.ByteReader interface.
|
||||
func (r *Reader) ReadByte() (byte, error) {
|
||||
if r.err != nil {
|
||||
return 0, r.err
|
||||
}
|
||||
if r.i < r.j {
|
||||
c := r.decoded[r.i]
|
||||
r.i++
|
||||
return c, nil
|
||||
}
|
||||
var tmp [1]byte
|
||||
for i := 0; i < 10; i++ {
|
||||
n, err := r.Read(tmp[:])
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
if n == 1 {
|
||||
return tmp[0], nil
|
||||
}
|
||||
}
|
||||
return 0, io.ErrNoProgress
|
||||
}
|
568
vendor/github.com/klauspost/compress/s2/decode_amd64.s
generated
vendored
Normal file
568
vendor/github.com/klauspost/compress/s2/decode_amd64.s
generated
vendored
Normal file
@ -0,0 +1,568 @@
|
||||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// Copyright (c) 2019 Klaus Post. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build !appengine
|
||||
// +build gc
|
||||
// +build !noasm
|
||||
|
||||
#include "textflag.h"
|
||||
|
||||
#define R_TMP0 AX
|
||||
#define R_TMP1 BX
|
||||
#define R_LEN CX
|
||||
#define R_OFF DX
|
||||
#define R_SRC SI
|
||||
#define R_DST DI
|
||||
#define R_DBASE R8
|
||||
#define R_DLEN R9
|
||||
#define R_DEND R10
|
||||
#define R_SBASE R11
|
||||
#define R_SLEN R12
|
||||
#define R_SEND R13
|
||||
#define R_TMP2 R14
|
||||
#define R_TMP3 R15
|
||||
|
||||
// The asm code generally follows the pure Go code in decode_other.go, except
|
||||
// where marked with a "!!!".
|
||||
|
||||
// func decode(dst, src []byte) int
|
||||
//
|
||||
// All local variables fit into registers. The non-zero stack size is only to
|
||||
// spill registers and push args when issuing a CALL. The register allocation:
|
||||
// - R_TMP0 scratch
|
||||
// - R_TMP1 scratch
|
||||
// - R_LEN length or x (shared)
|
||||
// - R_OFF offset
|
||||
// - R_SRC &src[s]
|
||||
// - R_DST &dst[d]
|
||||
// + R_DBASE dst_base
|
||||
// + R_DLEN dst_len
|
||||
// + R_DEND dst_base + dst_len
|
||||
// + R_SBASE src_base
|
||||
// + R_SLEN src_len
|
||||
// + R_SEND src_base + src_len
|
||||
// - R_TMP2 used by doCopy
|
||||
// - R_TMP3 used by doCopy
|
||||
//
|
||||
// The registers R_DBASE-R_SEND (marked with a "+") are set at the start of the
|
||||
// function, and after a CALL returns, and are not otherwise modified.
|
||||
//
|
||||
// The d variable is implicitly R_DST - R_DBASE, and len(dst)-d is R_DEND - R_DST.
|
||||
// The s variable is implicitly R_SRC - R_SBASE, and len(src)-s is R_SEND - R_SRC.
|
||||
TEXT ·s2Decode(SB), NOSPLIT, $48-56
|
||||
// Initialize R_SRC, R_DST and R_DBASE-R_SEND.
|
||||
MOVQ dst_base+0(FP), R_DBASE
|
||||
MOVQ dst_len+8(FP), R_DLEN
|
||||
MOVQ R_DBASE, R_DST
|
||||
MOVQ R_DBASE, R_DEND
|
||||
ADDQ R_DLEN, R_DEND
|
||||
MOVQ src_base+24(FP), R_SBASE
|
||||
MOVQ src_len+32(FP), R_SLEN
|
||||
MOVQ R_SBASE, R_SRC
|
||||
MOVQ R_SBASE, R_SEND
|
||||
ADDQ R_SLEN, R_SEND
|
||||
XORQ R_OFF, R_OFF
|
||||
|
||||
loop:
|
||||
// for s < len(src)
|
||||
CMPQ R_SRC, R_SEND
|
||||
JEQ end
|
||||
|
||||
// R_LEN = uint32(src[s])
|
||||
//
|
||||
// switch src[s] & 0x03
|
||||
MOVBLZX (R_SRC), R_LEN
|
||||
MOVL R_LEN, R_TMP1
|
||||
ANDL $3, R_TMP1
|
||||
CMPL R_TMP1, $1
|
||||
JAE tagCopy
|
||||
|
||||
// ----------------------------------------
|
||||
// The code below handles literal tags.
|
||||
|
||||
// case tagLiteral:
|
||||
// x := uint32(src[s] >> 2)
|
||||
// switch
|
||||
SHRL $2, R_LEN
|
||||
CMPL R_LEN, $60
|
||||
JAE tagLit60Plus
|
||||
|
||||
// case x < 60:
|
||||
// s++
|
||||
INCQ R_SRC
|
||||
|
||||
doLit:
|
||||
// This is the end of the inner "switch", when we have a literal tag.
|
||||
//
|
||||
// We assume that R_LEN == x and x fits in a uint32, where x is the variable
|
||||
// used in the pure Go decode_other.go code.
|
||||
|
||||
// length = int(x) + 1
|
||||
//
|
||||
// Unlike the pure Go code, we don't need to check if length <= 0 because
|
||||
// R_LEN can hold 64 bits, so the increment cannot overflow.
|
||||
INCQ R_LEN
|
||||
|
||||
// Prepare to check if copying length bytes will run past the end of dst or
|
||||
// src.
|
||||
//
|
||||
// R_TMP0 = len(dst) - d
|
||||
// R_TMP1 = len(src) - s
|
||||
MOVQ R_DEND, R_TMP0
|
||||
SUBQ R_DST, R_TMP0
|
||||
MOVQ R_SEND, R_TMP1
|
||||
SUBQ R_SRC, R_TMP1
|
||||
|
||||
// !!! Try a faster technique for short (16 or fewer bytes) copies.
|
||||
//
|
||||
// if length > 16 || len(dst)-d < 16 || len(src)-s < 16 {
|
||||
// goto callMemmove // Fall back on calling runtime·memmove.
|
||||
// }
|
||||
//
|
||||
// The C++ snappy code calls this TryFastAppend. It also checks len(src)-s
|
||||
// against 21 instead of 16, because it cannot assume that all of its input
|
||||
// is contiguous in memory and so it needs to leave enough source bytes to
|
||||
// read the next tag without refilling buffers, but Go's Decode assumes
|
||||
// contiguousness (the src argument is a []byte).
|
||||
CMPQ R_LEN, $16
|
||||
JGT callMemmove
|
||||
CMPQ R_TMP0, $16
|
||||
JLT callMemmove
|
||||
CMPQ R_TMP1, $16
|
||||
JLT callMemmove
|
||||
|
||||
// !!! Implement the copy from src to dst as a 16-byte load and store.
|
||||
// (Decode's documentation says that dst and src must not overlap.)
|
||||
//
|
||||
// This always copies 16 bytes, instead of only length bytes, but that's
|
||||
// OK. If the input is a valid Snappy encoding then subsequent iterations
|
||||
// will fix up the overrun. Otherwise, Decode returns a nil []byte (and a
|
||||
// non-nil error), so the overrun will be ignored.
|
||||
//
|
||||
// Note that on amd64, it is legal and cheap to issue unaligned 8-byte or
|
||||
// 16-byte loads and stores. This technique probably wouldn't be as
|
||||
// effective on architectures that are fussier about alignment.
|
||||
MOVOU 0(R_SRC), X0
|
||||
MOVOU X0, 0(R_DST)
|
||||
|
||||
// d += length
|
||||
// s += length
|
||||
ADDQ R_LEN, R_DST
|
||||
ADDQ R_LEN, R_SRC
|
||||
JMP loop
|
||||
|
||||
callMemmove:
|
||||
// if length > len(dst)-d || length > len(src)-s { etc }
|
||||
CMPQ R_LEN, R_TMP0
|
||||
JGT errCorrupt
|
||||
CMPQ R_LEN, R_TMP1
|
||||
JGT errCorrupt
|
||||
|
||||
// copy(dst[d:], src[s:s+length])
|
||||
//
|
||||
// This means calling runtime·memmove(&dst[d], &src[s], length), so we push
|
||||
// R_DST, R_SRC and R_LEN as arguments. Coincidentally, we also need to spill those
|
||||
// three registers to the stack, to save local variables across the CALL.
|
||||
MOVQ R_DST, 0(SP)
|
||||
MOVQ R_SRC, 8(SP)
|
||||
MOVQ R_LEN, 16(SP)
|
||||
MOVQ R_DST, 24(SP)
|
||||
MOVQ R_SRC, 32(SP)
|
||||
MOVQ R_LEN, 40(SP)
|
||||
MOVQ R_OFF, 48(SP)
|
||||
CALL runtime·memmove(SB)
|
||||
|
||||
// Restore local variables: unspill registers from the stack and
|
||||
// re-calculate R_DBASE-R_SEND.
|
||||
MOVQ 24(SP), R_DST
|
||||
MOVQ 32(SP), R_SRC
|
||||
MOVQ 40(SP), R_LEN
|
||||
MOVQ 48(SP), R_OFF
|
||||
MOVQ dst_base+0(FP), R_DBASE
|
||||
MOVQ dst_len+8(FP), R_DLEN
|
||||
MOVQ R_DBASE, R_DEND
|
||||
ADDQ R_DLEN, R_DEND
|
||||
MOVQ src_base+24(FP), R_SBASE
|
||||
MOVQ src_len+32(FP), R_SLEN
|
||||
MOVQ R_SBASE, R_SEND
|
||||
ADDQ R_SLEN, R_SEND
|
||||
|
||||
// d += length
|
||||
// s += length
|
||||
ADDQ R_LEN, R_DST
|
||||
ADDQ R_LEN, R_SRC
|
||||
JMP loop
|
||||
|
||||
tagLit60Plus:
|
||||
// !!! This fragment does the
|
||||
//
|
||||
// s += x - 58; if uint(s) > uint(len(src)) { etc }
|
||||
//
|
||||
// checks. In the asm version, we code it once instead of once per switch case.
|
||||
ADDQ R_LEN, R_SRC
|
||||
SUBQ $58, R_SRC
|
||||
CMPQ R_SRC, R_SEND
|
||||
JA errCorrupt
|
||||
|
||||
// case x == 60:
|
||||
CMPL R_LEN, $61
|
||||
JEQ tagLit61
|
||||
JA tagLit62Plus
|
||||
|
||||
// x = uint32(src[s-1])
|
||||
MOVBLZX -1(R_SRC), R_LEN
|
||||
JMP doLit
|
||||
|
||||
tagLit61:
|
||||
// case x == 61:
|
||||
// x = uint32(src[s-2]) | uint32(src[s-1])<<8
|
||||
MOVWLZX -2(R_SRC), R_LEN
|
||||
JMP doLit
|
||||
|
||||
tagLit62Plus:
|
||||
CMPL R_LEN, $62
|
||||
JA tagLit63
|
||||
|
||||
// case x == 62:
|
||||
// x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16
|
||||
// We read one byte, safe to read one back, since we are just reading tag.
|
||||
// x = binary.LittleEndian.Uint32(src[s-1:]) >> 8
|
||||
MOVL -4(R_SRC), R_LEN
|
||||
SHRL $8, R_LEN
|
||||
JMP doLit
|
||||
|
||||
tagLit63:
|
||||
// case x == 63:
|
||||
// x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24
|
||||
MOVL -4(R_SRC), R_LEN
|
||||
JMP doLit
|
||||
|
||||
// The code above handles literal tags.
|
||||
// ----------------------------------------
|
||||
// The code below handles copy tags.
|
||||
|
||||
tagCopy4:
|
||||
// case tagCopy4:
|
||||
// s += 5
|
||||
ADDQ $5, R_SRC
|
||||
|
||||
// if uint(s) > uint(len(src)) { etc }
|
||||
CMPQ R_SRC, R_SEND
|
||||
JA errCorrupt
|
||||
|
||||
// length = 1 + int(src[s-5])>>2
|
||||
SHRQ $2, R_LEN
|
||||
INCQ R_LEN
|
||||
|
||||
// offset = int(uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24)
|
||||
MOVLQZX -4(R_SRC), R_OFF
|
||||
JMP doCopy
|
||||
|
||||
tagCopy2:
|
||||
// case tagCopy2:
|
||||
// s += 3
|
||||
ADDQ $3, R_SRC
|
||||
|
||||
// if uint(s) > uint(len(src)) { etc }
|
||||
CMPQ R_SRC, R_SEND
|
||||
JA errCorrupt
|
||||
|
||||
// length = 1 + int(src[s-3])>>2
|
||||
SHRQ $2, R_LEN
|
||||
INCQ R_LEN
|
||||
|
||||
// offset = int(uint32(src[s-2]) | uint32(src[s-1])<<8)
|
||||
MOVWQZX -2(R_SRC), R_OFF
|
||||
JMP doCopy
|
||||
|
||||
tagCopy:
|
||||
// We have a copy tag. We assume that:
|
||||
// - R_TMP1 == src[s] & 0x03
|
||||
// - R_LEN == src[s]
|
||||
CMPQ R_TMP1, $2
|
||||
JEQ tagCopy2
|
||||
JA tagCopy4
|
||||
|
||||
// case tagCopy1:
|
||||
// s += 2
|
||||
ADDQ $2, R_SRC
|
||||
|
||||
// if uint(s) > uint(len(src)) { etc }
|
||||
CMPQ R_SRC, R_SEND
|
||||
JA errCorrupt
|
||||
|
||||
// offset = int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1]))
|
||||
// length = 4 + int(src[s-2])>>2&0x7
|
||||
MOVBQZX -1(R_SRC), R_TMP1
|
||||
MOVQ R_LEN, R_TMP0
|
||||
SHRQ $2, R_LEN
|
||||
ANDQ $0xe0, R_TMP0
|
||||
ANDQ $7, R_LEN
|
||||
SHLQ $3, R_TMP0
|
||||
ADDQ $4, R_LEN
|
||||
ORQ R_TMP1, R_TMP0
|
||||
|
||||
// check if repeat code, ZF set by ORQ.
|
||||
JZ repeatCode
|
||||
|
||||
// This is a regular copy, transfer our temporary value to R_OFF (length)
|
||||
MOVQ R_TMP0, R_OFF
|
||||
JMP doCopy
|
||||
|
||||
// This is a repeat code.
|
||||
repeatCode:
|
||||
// If length < 9, reuse last offset, with the length already calculated.
|
||||
CMPQ R_LEN, $9
|
||||
JL doCopyRepeat
|
||||
|
||||
// Read additional bytes for length.
|
||||
JE repeatLen1
|
||||
|
||||
// Rare, so the extra branch shouldn't hurt too much.
|
||||
CMPQ R_LEN, $10
|
||||
JE repeatLen2
|
||||
JMP repeatLen3
|
||||
|
||||
// Read repeat lengths.
|
||||
repeatLen1:
|
||||
// s ++
|
||||
ADDQ $1, R_SRC
|
||||
|
||||
// if uint(s) > uint(len(src)) { etc }
|
||||
CMPQ R_SRC, R_SEND
|
||||
JA errCorrupt
|
||||
|
||||
// length = src[s-1] + 8
|
||||
MOVBQZX -1(R_SRC), R_LEN
|
||||
ADDL $8, R_LEN
|
||||
JMP doCopyRepeat
|
||||
|
||||
repeatLen2:
|
||||
// s +=2
|
||||
ADDQ $2, R_SRC
|
||||
|
||||
// if uint(s) > uint(len(src)) { etc }
|
||||
CMPQ R_SRC, R_SEND
|
||||
JA errCorrupt
|
||||
|
||||
// length = uint32(src[s-2]) | (uint32(src[s-1])<<8) + (1 << 8)
|
||||
MOVWQZX -2(R_SRC), R_LEN
|
||||
ADDL $260, R_LEN
|
||||
JMP doCopyRepeat
|
||||
|
||||
repeatLen3:
|
||||
// s +=3
|
||||
ADDQ $3, R_SRC
|
||||
|
||||
// if uint(s) > uint(len(src)) { etc }
|
||||
CMPQ R_SRC, R_SEND
|
||||
JA errCorrupt
|
||||
|
||||
// length = uint32(src[s-3]) | (uint32(src[s-2])<<8) | (uint32(src[s-1])<<16) + (1 << 16)
|
||||
// Read one byte further back (just part of the tag, shifted out)
|
||||
MOVL -4(R_SRC), R_LEN
|
||||
SHRL $8, R_LEN
|
||||
ADDL $65540, R_LEN
|
||||
JMP doCopyRepeat
|
||||
|
||||
doCopy:
|
||||
// This is the end of the outer "switch", when we have a copy tag.
|
||||
//
|
||||
// We assume that:
|
||||
// - R_LEN == length && R_LEN > 0
|
||||
// - R_OFF == offset
|
||||
|
||||
// if d < offset { etc }
|
||||
MOVQ R_DST, R_TMP1
|
||||
SUBQ R_DBASE, R_TMP1
|
||||
CMPQ R_TMP1, R_OFF
|
||||
JLT errCorrupt
|
||||
|
||||
// Repeat values can skip the test above, since any offset > 0 will be in dst.
|
||||
doCopyRepeat:
|
||||
// if offset <= 0 { etc }
|
||||
CMPQ R_OFF, $0
|
||||
JLE errCorrupt
|
||||
|
||||
// if length > len(dst)-d { etc }
|
||||
MOVQ R_DEND, R_TMP1
|
||||
SUBQ R_DST, R_TMP1
|
||||
CMPQ R_LEN, R_TMP1
|
||||
JGT errCorrupt
|
||||
|
||||
// forwardCopy(dst[d:d+length], dst[d-offset:]); d += length
|
||||
//
|
||||
// Set:
|
||||
// - R_TMP2 = len(dst)-d
|
||||
// - R_TMP3 = &dst[d-offset]
|
||||
MOVQ R_DEND, R_TMP2
|
||||
SUBQ R_DST, R_TMP2
|
||||
MOVQ R_DST, R_TMP3
|
||||
SUBQ R_OFF, R_TMP3
|
||||
|
||||
// !!! Try a faster technique for short (16 or fewer bytes) forward copies.
|
||||
//
|
||||
// First, try using two 8-byte load/stores, similar to the doLit technique
|
||||
// above. Even if dst[d:d+length] and dst[d-offset:] can overlap, this is
|
||||
// still OK if offset >= 8. Note that this has to be two 8-byte load/stores
|
||||
// and not one 16-byte load/store, and the first store has to be before the
|
||||
// second load, due to the overlap if offset is in the range [8, 16).
|
||||
//
|
||||
// if length > 16 || offset < 8 || len(dst)-d < 16 {
|
||||
// goto slowForwardCopy
|
||||
// }
|
||||
// copy 16 bytes
|
||||
// d += length
|
||||
CMPQ R_LEN, $16
|
||||
JGT slowForwardCopy
|
||||
CMPQ R_OFF, $8
|
||||
JLT slowForwardCopy
|
||||
CMPQ R_TMP2, $16
|
||||
JLT slowForwardCopy
|
||||
MOVQ 0(R_TMP3), R_TMP0
|
||||
MOVQ R_TMP0, 0(R_DST)
|
||||
MOVQ 8(R_TMP3), R_TMP1
|
||||
MOVQ R_TMP1, 8(R_DST)
|
||||
ADDQ R_LEN, R_DST
|
||||
JMP loop
|
||||
|
||||
slowForwardCopy:
|
||||
// !!! If the forward copy is longer than 16 bytes, or if offset < 8, we
|
||||
// can still try 8-byte load stores, provided we can overrun up to 10 extra
|
||||
// bytes. As above, the overrun will be fixed up by subsequent iterations
|
||||
// of the outermost loop.
|
||||
//
|
||||
// The C++ snappy code calls this technique IncrementalCopyFastPath. Its
|
||||
// commentary says:
|
||||
//
|
||||
// ----
|
||||
//
|
||||
// The main part of this loop is a simple copy of eight bytes at a time
|
||||
// until we've copied (at least) the requested amount of bytes. However,
|
||||
// if d and d-offset are less than eight bytes apart (indicating a
|
||||
// repeating pattern of length < 8), we first need to expand the pattern in
|
||||
// order to get the correct results. For instance, if the buffer looks like
|
||||
// this, with the eight-byte <d-offset> and <d> patterns marked as
|
||||
// intervals:
|
||||
//
|
||||
// abxxxxxxxxxxxx
|
||||
// [------] d-offset
|
||||
// [------] d
|
||||
//
|
||||
// a single eight-byte copy from <d-offset> to <d> will repeat the pattern
|
||||
// once, after which we can move <d> two bytes without moving <d-offset>:
|
||||
//
|
||||
// ababxxxxxxxxxx
|
||||
// [------] d-offset
|
||||
// [------] d
|
||||
//
|
||||
// and repeat the exercise until the two no longer overlap.
|
||||
//
|
||||
// This allows us to do very well in the special case of one single byte
|
||||
// repeated many times, without taking a big hit for more general cases.
|
||||
//
|
||||
// The worst case of extra writing past the end of the match occurs when
|
||||
// offset == 1 and length == 1; the last copy will read from byte positions
|
||||
// [0..7] and write to [4..11], whereas it was only supposed to write to
|
||||
// position 1. Thus, ten excess bytes.
|
||||
//
|
||||
// ----
|
||||
//
|
||||
// That "10 byte overrun" worst case is confirmed by Go's
|
||||
// TestSlowForwardCopyOverrun, which also tests the fixUpSlowForwardCopy
|
||||
// and finishSlowForwardCopy algorithm.
|
||||
//
|
||||
// if length > len(dst)-d-10 {
|
||||
// goto verySlowForwardCopy
|
||||
// }
|
||||
SUBQ $10, R_TMP2
|
||||
CMPQ R_LEN, R_TMP2
|
||||
JGT verySlowForwardCopy
|
||||
|
||||
// We want to keep the offset, so we use R_TMP2 from here.
|
||||
MOVQ R_OFF, R_TMP2
|
||||
|
||||
makeOffsetAtLeast8:
|
||||
// !!! As above, expand the pattern so that offset >= 8 and we can use
|
||||
// 8-byte load/stores.
|
||||
//
|
||||
// for offset < 8 {
|
||||
// copy 8 bytes from dst[d-offset:] to dst[d:]
|
||||
// length -= offset
|
||||
// d += offset
|
||||
// offset += offset
|
||||
// // The two previous lines together means that d-offset, and therefore
|
||||
// // R_TMP3, is unchanged.
|
||||
// }
|
||||
CMPQ R_TMP2, $8
|
||||
JGE fixUpSlowForwardCopy
|
||||
MOVQ (R_TMP3), R_TMP1
|
||||
MOVQ R_TMP1, (R_DST)
|
||||
SUBQ R_TMP2, R_LEN
|
||||
ADDQ R_TMP2, R_DST
|
||||
ADDQ R_TMP2, R_TMP2
|
||||
JMP makeOffsetAtLeast8
|
||||
|
||||
fixUpSlowForwardCopy:
|
||||
// !!! Add length (which might be negative now) to d (implied by R_DST being
|
||||
// &dst[d]) so that d ends up at the right place when we jump back to the
|
||||
// top of the loop. Before we do that, though, we save R_DST to R_TMP0 so that, if
|
||||
// length is positive, copying the remaining length bytes will write to the
|
||||
// right place.
|
||||
MOVQ R_DST, R_TMP0
|
||||
ADDQ R_LEN, R_DST
|
||||
|
||||
finishSlowForwardCopy:
|
||||
// !!! Repeat 8-byte load/stores until length <= 0. Ending with a negative
|
||||
// length means that we overrun, but as above, that will be fixed up by
|
||||
// subsequent iterations of the outermost loop.
|
||||
CMPQ R_LEN, $0
|
||||
JLE loop
|
||||
MOVQ (R_TMP3), R_TMP1
|
||||
MOVQ R_TMP1, (R_TMP0)
|
||||
ADDQ $8, R_TMP3
|
||||
ADDQ $8, R_TMP0
|
||||
SUBQ $8, R_LEN
|
||||
JMP finishSlowForwardCopy
|
||||
|
||||
verySlowForwardCopy:
|
||||
// verySlowForwardCopy is a simple implementation of forward copy. In C
|
||||
// parlance, this is a do/while loop instead of a while loop, since we know
|
||||
// that length > 0. In Go syntax:
|
||||
//
|
||||
// for {
|
||||
// dst[d] = dst[d - offset]
|
||||
// d++
|
||||
// length--
|
||||
// if length == 0 {
|
||||
// break
|
||||
// }
|
||||
// }
|
||||
MOVB (R_TMP3), R_TMP1
|
||||
MOVB R_TMP1, (R_DST)
|
||||
INCQ R_TMP3
|
||||
INCQ R_DST
|
||||
DECQ R_LEN
|
||||
JNZ verySlowForwardCopy
|
||||
JMP loop
|
||||
|
||||
// The code above handles copy tags.
|
||||
// ----------------------------------------
|
||||
|
||||
end:
|
||||
// This is the end of the "for s < len(src)".
|
||||
//
|
||||
// if d != len(dst) { etc }
|
||||
CMPQ R_DST, R_DEND
|
||||
JNE errCorrupt
|
||||
|
||||
// return 0
|
||||
MOVQ $0, ret+48(FP)
|
||||
RET
|
||||
|
||||
errCorrupt:
|
||||
// return decodeErrCodeCorrupt
|
||||
MOVQ $1, ret+48(FP)
|
||||
RET
|
574
vendor/github.com/klauspost/compress/s2/decode_arm64.s
generated
vendored
Normal file
574
vendor/github.com/klauspost/compress/s2/decode_arm64.s
generated
vendored
Normal file
@ -0,0 +1,574 @@
|
||||
// Copyright 2020 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build !appengine
|
||||
// +build gc
|
||||
// +build !noasm
|
||||
|
||||
#include "textflag.h"
|
||||
|
||||
#define R_TMP0 R2
|
||||
#define R_TMP1 R3
|
||||
#define R_LEN R4
|
||||
#define R_OFF R5
|
||||
#define R_SRC R6
|
||||
#define R_DST R7
|
||||
#define R_DBASE R8
|
||||
#define R_DLEN R9
|
||||
#define R_DEND R10
|
||||
#define R_SBASE R11
|
||||
#define R_SLEN R12
|
||||
#define R_SEND R13
|
||||
#define R_TMP2 R14
|
||||
#define R_TMP3 R15
|
||||
|
||||
// TEST_SRC will check if R_SRC is <= SRC_END
|
||||
#define TEST_SRC() \
|
||||
CMP R_SEND, R_SRC \
|
||||
BGT errCorrupt
|
||||
|
||||
// MOVD R_SRC, R_TMP1
|
||||
// SUB R_SBASE, R_TMP1, R_TMP1
|
||||
// CMP R_SLEN, R_TMP1
|
||||
// BGT errCorrupt
|
||||
|
||||
// The asm code generally follows the pure Go code in decode_other.go, except
|
||||
// where marked with a "!!!".
|
||||
|
||||
// func decode(dst, src []byte) int
|
||||
//
|
||||
// All local variables fit into registers. The non-zero stack size is only to
|
||||
// spill registers and push args when issuing a CALL. The register allocation:
|
||||
// - R_TMP0 scratch
|
||||
// - R_TMP1 scratch
|
||||
// - R_LEN length or x
|
||||
// - R_OFF offset
|
||||
// - R_SRC &src[s]
|
||||
// - R_DST &dst[d]
|
||||
// + R_DBASE dst_base
|
||||
// + R_DLEN dst_len
|
||||
// + R_DEND dst_base + dst_len
|
||||
// + R_SBASE src_base
|
||||
// + R_SLEN src_len
|
||||
// + R_SEND src_base + src_len
|
||||
// - R_TMP2 used by doCopy
|
||||
// - R_TMP3 used by doCopy
|
||||
//
|
||||
// The registers R_DBASE-R_SEND (marked with a "+") are set at the start of the
|
||||
// function, and after a CALL returns, and are not otherwise modified.
|
||||
//
|
||||
// The d variable is implicitly R_DST - R_DBASE, and len(dst)-d is R_DEND - R_DST.
|
||||
// The s variable is implicitly R_SRC - R_SBASE, and len(src)-s is R_SEND - R_SRC.
|
||||
TEXT ·s2Decode(SB), NOSPLIT, $56-64
|
||||
// Initialize R_SRC, R_DST and R_DBASE-R_SEND.
|
||||
MOVD dst_base+0(FP), R_DBASE
|
||||
MOVD dst_len+8(FP), R_DLEN
|
||||
MOVD R_DBASE, R_DST
|
||||
MOVD R_DBASE, R_DEND
|
||||
ADD R_DLEN, R_DEND, R_DEND
|
||||
MOVD src_base+24(FP), R_SBASE
|
||||
MOVD src_len+32(FP), R_SLEN
|
||||
MOVD R_SBASE, R_SRC
|
||||
MOVD R_SBASE, R_SEND
|
||||
ADD R_SLEN, R_SEND, R_SEND
|
||||
MOVD $0, R_OFF
|
||||
|
||||
loop:
|
||||
// for s < len(src)
|
||||
CMP R_SEND, R_SRC
|
||||
BEQ end
|
||||
|
||||
// R_LEN = uint32(src[s])
|
||||
//
|
||||
// switch src[s] & 0x03
|
||||
MOVBU (R_SRC), R_LEN
|
||||
MOVW R_LEN, R_TMP1
|
||||
ANDW $3, R_TMP1
|
||||
MOVW $1, R1
|
||||
CMPW R1, R_TMP1
|
||||
BGE tagCopy
|
||||
|
||||
// ----------------------------------------
|
||||
// The code below handles literal tags.
|
||||
|
||||
// case tagLiteral:
|
||||
// x := uint32(src[s] >> 2)
|
||||
// switch
|
||||
MOVW $60, R1
|
||||
LSRW $2, R_LEN, R_LEN
|
||||
CMPW R_LEN, R1
|
||||
BLS tagLit60Plus
|
||||
|
||||
// case x < 60:
|
||||
// s++
|
||||
ADD $1, R_SRC, R_SRC
|
||||
|
||||
doLit:
|
||||
// This is the end of the inner "switch", when we have a literal tag.
|
||||
//
|
||||
// We assume that R_LEN == x and x fits in a uint32, where x is the variable
|
||||
// used in the pure Go decode_other.go code.
|
||||
|
||||
// length = int(x) + 1
|
||||
//
|
||||
// Unlike the pure Go code, we don't need to check if length <= 0 because
|
||||
// R_LEN can hold 64 bits, so the increment cannot overflow.
|
||||
ADD $1, R_LEN, R_LEN
|
||||
|
||||
// Prepare to check if copying length bytes will run past the end of dst or
|
||||
// src.
|
||||
//
|
||||
// R_TMP0 = len(dst) - d
|
||||
// R_TMP1 = len(src) - s
|
||||
MOVD R_DEND, R_TMP0
|
||||
SUB R_DST, R_TMP0, R_TMP0
|
||||
MOVD R_SEND, R_TMP1
|
||||
SUB R_SRC, R_TMP1, R_TMP1
|
||||
|
||||
// !!! Try a faster technique for short (16 or fewer bytes) copies.
|
||||
//
|
||||
// if length > 16 || len(dst)-d < 16 || len(src)-s < 16 {
|
||||
// goto callMemmove // Fall back on calling runtime·memmove.
|
||||
// }
|
||||
//
|
||||
// The C++ snappy code calls this TryFastAppend. It also checks len(src)-s
|
||||
// against 21 instead of 16, because it cannot assume that all of its input
|
||||
// is contiguous in memory and so it needs to leave enough source bytes to
|
||||
// read the next tag without refilling buffers, but Go's Decode assumes
|
||||
// contiguousness (the src argument is a []byte).
|
||||
CMP $16, R_LEN
|
||||
BGT callMemmove
|
||||
CMP $16, R_TMP0
|
||||
BLT callMemmove
|
||||
CMP $16, R_TMP1
|
||||
BLT callMemmove
|
||||
|
||||
// !!! Implement the copy from src to dst as a 16-byte load and store.
|
||||
// (Decode's documentation says that dst and src must not overlap.)
|
||||
//
|
||||
// This always copies 16 bytes, instead of only length bytes, but that's
|
||||
// OK. If the input is a valid Snappy encoding then subsequent iterations
|
||||
// will fix up the overrun. Otherwise, Decode returns a nil []byte (and a
|
||||
// non-nil error), so the overrun will be ignored.
|
||||
//
|
||||
// Note that on arm64, it is legal and cheap to issue unaligned 8-byte or
|
||||
// 16-byte loads and stores. This technique probably wouldn't be as
|
||||
// effective on architectures that are fussier about alignment.
|
||||
LDP 0(R_SRC), (R_TMP2, R_TMP3)
|
||||
STP (R_TMP2, R_TMP3), 0(R_DST)
|
||||
|
||||
// d += length
|
||||
// s += length
|
||||
ADD R_LEN, R_DST, R_DST
|
||||
ADD R_LEN, R_SRC, R_SRC
|
||||
B loop
|
||||
|
||||
callMemmove:
|
||||
// if length > len(dst)-d || length > len(src)-s { etc }
|
||||
CMP R_TMP0, R_LEN
|
||||
BGT errCorrupt
|
||||
CMP R_TMP1, R_LEN
|
||||
BGT errCorrupt
|
||||
|
||||
// copy(dst[d:], src[s:s+length])
|
||||
//
|
||||
// This means calling runtime·memmove(&dst[d], &src[s], length), so we push
|
||||
// R_DST, R_SRC and R_LEN as arguments. Coincidentally, we also need to spill those
|
||||
// three registers to the stack, to save local variables across the CALL.
|
||||
MOVD R_DST, 8(RSP)
|
||||
MOVD R_SRC, 16(RSP)
|
||||
MOVD R_LEN, 24(RSP)
|
||||
MOVD R_DST, 32(RSP)
|
||||
MOVD R_SRC, 40(RSP)
|
||||
MOVD R_LEN, 48(RSP)
|
||||
MOVD R_OFF, 56(RSP)
|
||||
CALL runtime·memmove(SB)
|
||||
|
||||
// Restore local variables: unspill registers from the stack and
|
||||
// re-calculate R_DBASE-R_SEND.
|
||||
MOVD 32(RSP), R_DST
|
||||
MOVD 40(RSP), R_SRC
|
||||
MOVD 48(RSP), R_LEN
|
||||
MOVD 56(RSP), R_OFF
|
||||
MOVD dst_base+0(FP), R_DBASE
|
||||
MOVD dst_len+8(FP), R_DLEN
|
||||
MOVD R_DBASE, R_DEND
|
||||
ADD R_DLEN, R_DEND, R_DEND
|
||||
MOVD src_base+24(FP), R_SBASE
|
||||
MOVD src_len+32(FP), R_SLEN
|
||||
MOVD R_SBASE, R_SEND
|
||||
ADD R_SLEN, R_SEND, R_SEND
|
||||
|
||||
// d += length
|
||||
// s += length
|
||||
ADD R_LEN, R_DST, R_DST
|
||||
ADD R_LEN, R_SRC, R_SRC
|
||||
B loop
|
||||
|
||||
tagLit60Plus:
|
||||
// !!! This fragment does the
|
||||
//
|
||||
// s += x - 58; if uint(s) > uint(len(src)) { etc }
|
||||
//
|
||||
// checks. In the asm version, we code it once instead of once per switch case.
|
||||
ADD R_LEN, R_SRC, R_SRC
|
||||
SUB $58, R_SRC, R_SRC
|
||||
TEST_SRC()
|
||||
|
||||
// case x == 60:
|
||||
MOVW $61, R1
|
||||
CMPW R1, R_LEN
|
||||
BEQ tagLit61
|
||||
BGT tagLit62Plus
|
||||
|
||||
// x = uint32(src[s-1])
|
||||
MOVBU -1(R_SRC), R_LEN
|
||||
B doLit
|
||||
|
||||
tagLit61:
|
||||
// case x == 61:
|
||||
// x = uint32(src[s-2]) | uint32(src[s-1])<<8
|
||||
MOVHU -2(R_SRC), R_LEN
|
||||
B doLit
|
||||
|
||||
tagLit62Plus:
|
||||
CMPW $62, R_LEN
|
||||
BHI tagLit63
|
||||
|
||||
// case x == 62:
|
||||
// x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16
|
||||
MOVHU -3(R_SRC), R_LEN
|
||||
MOVBU -1(R_SRC), R_TMP1
|
||||
ORR R_TMP1<<16, R_LEN
|
||||
B doLit
|
||||
|
||||
tagLit63:
|
||||
// case x == 63:
|
||||
// x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24
|
||||
MOVWU -4(R_SRC), R_LEN
|
||||
B doLit
|
||||
|
||||
// The code above handles literal tags.
|
||||
// ----------------------------------------
|
||||
// The code below handles copy tags.
|
||||
|
||||
tagCopy4:
|
||||
// case tagCopy4:
|
||||
// s += 5
|
||||
ADD $5, R_SRC, R_SRC
|
||||
|
||||
// if uint(s) > uint(len(src)) { etc }
|
||||
MOVD R_SRC, R_TMP1
|
||||
SUB R_SBASE, R_TMP1, R_TMP1
|
||||
CMP R_SLEN, R_TMP1
|
||||
BGT errCorrupt
|
||||
|
||||
// length = 1 + int(src[s-5])>>2
|
||||
MOVD $1, R1
|
||||
ADD R_LEN>>2, R1, R_LEN
|
||||
|
||||
// offset = int(uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24)
|
||||
MOVWU -4(R_SRC), R_OFF
|
||||
B doCopy
|
||||
|
||||
tagCopy2:
|
||||
// case tagCopy2:
|
||||
// s += 3
|
||||
ADD $3, R_SRC, R_SRC
|
||||
|
||||
// if uint(s) > uint(len(src)) { etc }
|
||||
TEST_SRC()
|
||||
|
||||
// length = 1 + int(src[s-3])>>2
|
||||
MOVD $1, R1
|
||||
ADD R_LEN>>2, R1, R_LEN
|
||||
|
||||
// offset = int(uint32(src[s-2]) | uint32(src[s-1])<<8)
|
||||
MOVHU -2(R_SRC), R_OFF
|
||||
B doCopy
|
||||
|
||||
tagCopy:
|
||||
// We have a copy tag. We assume that:
|
||||
// - R_TMP1 == src[s] & 0x03
|
||||
// - R_LEN == src[s]
|
||||
CMP $2, R_TMP1
|
||||
BEQ tagCopy2
|
||||
BGT tagCopy4
|
||||
|
||||
// case tagCopy1:
|
||||
// s += 2
|
||||
ADD $2, R_SRC, R_SRC
|
||||
|
||||
// if uint(s) > uint(len(src)) { etc }
|
||||
TEST_SRC()
|
||||
|
||||
// offset = int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1]))
|
||||
// Calculate offset in R_TMP0 in case it is a repeat.
|
||||
MOVD R_LEN, R_TMP0
|
||||
AND $0xe0, R_TMP0
|
||||
MOVBU -1(R_SRC), R_TMP1
|
||||
ORR R_TMP0<<3, R_TMP1, R_TMP0
|
||||
|
||||
// length = 4 + int(src[s-2])>>2&0x7
|
||||
MOVD $7, R1
|
||||
AND R_LEN>>2, R1, R_LEN
|
||||
ADD $4, R_LEN, R_LEN
|
||||
|
||||
// check if repeat code with offset 0.
|
||||
CMP $0, R_TMP0
|
||||
BEQ repeatCode
|
||||
|
||||
// This is a regular copy, transfer our temporary value to R_OFF (offset)
|
||||
MOVD R_TMP0, R_OFF
|
||||
B doCopy
|
||||
|
||||
// This is a repeat code.
|
||||
repeatCode:
|
||||
// If length < 9, reuse last offset, with the length already calculated.
|
||||
CMP $9, R_LEN
|
||||
BLT doCopyRepeat
|
||||
BEQ repeatLen1
|
||||
CMP $10, R_LEN
|
||||
BEQ repeatLen2
|
||||
|
||||
repeatLen3:
|
||||
// s +=3
|
||||
ADD $3, R_SRC, R_SRC
|
||||
|
||||
// if uint(s) > uint(len(src)) { etc }
|
||||
TEST_SRC()
|
||||
|
||||
// length = uint32(src[s-3]) | (uint32(src[s-2])<<8) | (uint32(src[s-1])<<16) + 65540
|
||||
MOVBU -1(R_SRC), R_TMP0
|
||||
MOVHU -3(R_SRC), R_LEN
|
||||
ORR R_TMP0<<16, R_LEN, R_LEN
|
||||
ADD $65540, R_LEN, R_LEN
|
||||
B doCopyRepeat
|
||||
|
||||
repeatLen2:
|
||||
// s +=2
|
||||
ADD $2, R_SRC, R_SRC
|
||||
|
||||
// if uint(s) > uint(len(src)) { etc }
|
||||
TEST_SRC()
|
||||
|
||||
// length = uint32(src[s-2]) | (uint32(src[s-1])<<8) + 260
|
||||
MOVHU -2(R_SRC), R_LEN
|
||||
ADD $260, R_LEN, R_LEN
|
||||
B doCopyRepeat
|
||||
|
||||
repeatLen1:
|
||||
// s +=1
|
||||
ADD $1, R_SRC, R_SRC
|
||||
|
||||
// if uint(s) > uint(len(src)) { etc }
|
||||
TEST_SRC()
|
||||
|
||||
// length = src[s-1] + 8
|
||||
MOVBU -1(R_SRC), R_LEN
|
||||
ADD $8, R_LEN, R_LEN
|
||||
B doCopyRepeat
|
||||
|
||||
doCopy:
|
||||
// This is the end of the outer "switch", when we have a copy tag.
|
||||
//
|
||||
// We assume that:
|
||||
// - R_LEN == length && R_LEN > 0
|
||||
// - R_OFF == offset
|
||||
|
||||
// if d < offset { etc }
|
||||
MOVD R_DST, R_TMP1
|
||||
SUB R_DBASE, R_TMP1, R_TMP1
|
||||
CMP R_OFF, R_TMP1
|
||||
BLT errCorrupt
|
||||
|
||||
// Repeat values can skip the test above, since any offset > 0 will be in dst.
|
||||
doCopyRepeat:
|
||||
|
||||
// if offset <= 0 { etc }
|
||||
CMP $0, R_OFF
|
||||
BLE errCorrupt
|
||||
|
||||
// if length > len(dst)-d { etc }
|
||||
MOVD R_DEND, R_TMP1
|
||||
SUB R_DST, R_TMP1, R_TMP1
|
||||
CMP R_TMP1, R_LEN
|
||||
BGT errCorrupt
|
||||
|
||||
// forwardCopy(dst[d:d+length], dst[d-offset:]); d += length
|
||||
//
|
||||
// Set:
|
||||
// - R_TMP2 = len(dst)-d
|
||||
// - R_TMP3 = &dst[d-offset]
|
||||
MOVD R_DEND, R_TMP2
|
||||
SUB R_DST, R_TMP2, R_TMP2
|
||||
MOVD R_DST, R_TMP3
|
||||
SUB R_OFF, R_TMP3, R_TMP3
|
||||
|
||||
// !!! Try a faster technique for short (16 or fewer bytes) forward copies.
|
||||
//
|
||||
// First, try using two 8-byte load/stores, similar to the doLit technique
|
||||
// above. Even if dst[d:d+length] and dst[d-offset:] can overlap, this is
|
||||
// still OK if offset >= 8. Note that this has to be two 8-byte load/stores
|
||||
// and not one 16-byte load/store, and the first store has to be before the
|
||||
// second load, due to the overlap if offset is in the range [8, 16).
|
||||
//
|
||||
// if length > 16 || offset < 8 || len(dst)-d < 16 {
|
||||
// goto slowForwardCopy
|
||||
// }
|
||||
// copy 16 bytes
|
||||
// d += length
|
||||
CMP $16, R_LEN
|
||||
BGT slowForwardCopy
|
||||
CMP $8, R_OFF
|
||||
BLT slowForwardCopy
|
||||
CMP $16, R_TMP2
|
||||
BLT slowForwardCopy
|
||||
MOVD 0(R_TMP3), R_TMP0
|
||||
MOVD R_TMP0, 0(R_DST)
|
||||
MOVD 8(R_TMP3), R_TMP1
|
||||
MOVD R_TMP1, 8(R_DST)
|
||||
ADD R_LEN, R_DST, R_DST
|
||||
B loop
|
||||
|
||||
slowForwardCopy:
|
||||
// !!! If the forward copy is longer than 16 bytes, or if offset < 8, we
|
||||
// can still try 8-byte load stores, provided we can overrun up to 10 extra
|
||||
// bytes. As above, the overrun will be fixed up by subsequent iterations
|
||||
// of the outermost loop.
|
||||
//
|
||||
// The C++ snappy code calls this technique IncrementalCopyFastPath. Its
|
||||
// commentary says:
|
||||
//
|
||||
// ----
|
||||
//
|
||||
// The main part of this loop is a simple copy of eight bytes at a time
|
||||
// until we've copied (at least) the requested amount of bytes. However,
|
||||
// if d and d-offset are less than eight bytes apart (indicating a
|
||||
// repeating pattern of length < 8), we first need to expand the pattern in
|
||||
// order to get the correct results. For instance, if the buffer looks like
|
||||
// this, with the eight-byte <d-offset> and <d> patterns marked as
|
||||
// intervals:
|
||||
//
|
||||
// abxxxxxxxxxxxx
|
||||
// [------] d-offset
|
||||
// [------] d
|
||||
//
|
||||
// a single eight-byte copy from <d-offset> to <d> will repeat the pattern
|
||||
// once, after which we can move <d> two bytes without moving <d-offset>:
|
||||
//
|
||||
// ababxxxxxxxxxx
|
||||
// [------] d-offset
|
||||
// [------] d
|
||||
//
|
||||
// and repeat the exercise until the two no longer overlap.
|
||||
//
|
||||
// This allows us to do very well in the special case of one single byte
|
||||
// repeated many times, without taking a big hit for more general cases.
|
||||
//
|
||||
// The worst case of extra writing past the end of the match occurs when
|
||||
// offset == 1 and length == 1; the last copy will read from byte positions
|
||||
// [0..7] and write to [4..11], whereas it was only supposed to write to
|
||||
// position 1. Thus, ten excess bytes.
|
||||
//
|
||||
// ----
|
||||
//
|
||||
// That "10 byte overrun" worst case is confirmed by Go's
|
||||
// TestSlowForwardCopyOverrun, which also tests the fixUpSlowForwardCopy
|
||||
// and finishSlowForwardCopy algorithm.
|
||||
//
|
||||
// if length > len(dst)-d-10 {
|
||||
// goto verySlowForwardCopy
|
||||
// }
|
||||
SUB $10, R_TMP2, R_TMP2
|
||||
CMP R_TMP2, R_LEN
|
||||
BGT verySlowForwardCopy
|
||||
|
||||
// We want to keep the offset, so we use R_TMP2 from here.
|
||||
MOVD R_OFF, R_TMP2
|
||||
|
||||
makeOffsetAtLeast8:
|
||||
// !!! As above, expand the pattern so that offset >= 8 and we can use
|
||||
// 8-byte load/stores.
|
||||
//
|
||||
// for offset < 8 {
|
||||
// copy 8 bytes from dst[d-offset:] to dst[d:]
|
||||
// length -= offset
|
||||
// d += offset
|
||||
// offset += offset
|
||||
// // The two previous lines together means that d-offset, and therefore
|
||||
// // R_TMP3, is unchanged.
|
||||
// }
|
||||
CMP $8, R_TMP2
|
||||
BGE fixUpSlowForwardCopy
|
||||
MOVD (R_TMP3), R_TMP1
|
||||
MOVD R_TMP1, (R_DST)
|
||||
SUB R_TMP2, R_LEN, R_LEN
|
||||
ADD R_TMP2, R_DST, R_DST
|
||||
ADD R_TMP2, R_TMP2, R_TMP2
|
||||
B makeOffsetAtLeast8
|
||||
|
||||
fixUpSlowForwardCopy:
|
||||
// !!! Add length (which might be negative now) to d (implied by R_DST being
|
||||
// &dst[d]) so that d ends up at the right place when we jump back to the
|
||||
// top of the loop. Before we do that, though, we save R_DST to R_TMP0 so that, if
|
||||
// length is positive, copying the remaining length bytes will write to the
|
||||
// right place.
|
||||
MOVD R_DST, R_TMP0
|
||||
ADD R_LEN, R_DST, R_DST
|
||||
|
||||
finishSlowForwardCopy:
|
||||
// !!! Repeat 8-byte load/stores until length <= 0. Ending with a negative
|
||||
// length means that we overrun, but as above, that will be fixed up by
|
||||
// subsequent iterations of the outermost loop.
|
||||
MOVD $0, R1
|
||||
CMP R1, R_LEN
|
||||
BLE loop
|
||||
MOVD (R_TMP3), R_TMP1
|
||||
MOVD R_TMP1, (R_TMP0)
|
||||
ADD $8, R_TMP3, R_TMP3
|
||||
ADD $8, R_TMP0, R_TMP0
|
||||
SUB $8, R_LEN, R_LEN
|
||||
B finishSlowForwardCopy
|
||||
|
||||
verySlowForwardCopy:
|
||||
// verySlowForwardCopy is a simple implementation of forward copy. In C
|
||||
// parlance, this is a do/while loop instead of a while loop, since we know
|
||||
// that length > 0. In Go syntax:
|
||||
//
|
||||
// for {
|
||||
// dst[d] = dst[d - offset]
|
||||
// d++
|
||||
// length--
|
||||
// if length == 0 {
|
||||
// break
|
||||
// }
|
||||
// }
|
||||
MOVB (R_TMP3), R_TMP1
|
||||
MOVB R_TMP1, (R_DST)
|
||||
ADD $1, R_TMP3, R_TMP3
|
||||
ADD $1, R_DST, R_DST
|
||||
SUB $1, R_LEN, R_LEN
|
||||
CBNZ R_LEN, verySlowForwardCopy
|
||||
B loop
|
||||
|
||||
// The code above handles copy tags.
|
||||
// ----------------------------------------
|
||||
|
||||
end:
|
||||
// This is the end of the "for s < len(src)".
|
||||
//
|
||||
// if d != len(dst) { etc }
|
||||
CMP R_DEND, R_DST
|
||||
BNE errCorrupt
|
||||
|
||||
// return 0
|
||||
MOVD $0, ret+48(FP)
|
||||
RET
|
||||
|
||||
errCorrupt:
|
||||
// return decodeErrCodeCorrupt
|
||||
MOVD $1, R_TMP0
|
||||
MOVD R_TMP0, ret+48(FP)
|
||||
RET
|
17
vendor/github.com/klauspost/compress/s2/decode_asm.go
generated
vendored
Normal file
17
vendor/github.com/klauspost/compress/s2/decode_asm.go
generated
vendored
Normal file
@ -0,0 +1,17 @@
|
||||
// Copyright 2016 The Snappy-Go Authors. All rights reserved.
|
||||
// Copyright (c) 2019 Klaus Post. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build (amd64 || arm64) && !appengine && gc && !noasm
|
||||
// +build amd64 arm64
|
||||
// +build !appengine
|
||||
// +build gc
|
||||
// +build !noasm
|
||||
|
||||
package s2
|
||||
|
||||
// decode has the same semantics as in decode_other.go.
|
||||
//
|
||||
//go:noescape
|
||||
func s2Decode(dst, src []byte) int
|
267
vendor/github.com/klauspost/compress/s2/decode_other.go
generated
vendored
Normal file
267
vendor/github.com/klauspost/compress/s2/decode_other.go
generated
vendored
Normal file
@ -0,0 +1,267 @@
|
||||
// Copyright 2016 The Snappy-Go Authors. All rights reserved.
|
||||
// Copyright (c) 2019 Klaus Post. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build (!amd64 && !arm64) || appengine || !gc || noasm
|
||||
// +build !amd64,!arm64 appengine !gc noasm
|
||||
|
||||
package s2
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
// decode writes the decoding of src to dst. It assumes that the varint-encoded
|
||||
// length of the decompressed bytes has already been read, and that len(dst)
|
||||
// equals that length.
|
||||
//
|
||||
// It returns 0 on success or a decodeErrCodeXxx error code on failure.
|
||||
func s2Decode(dst, src []byte) int {
|
||||
const debug = false
|
||||
if debug {
|
||||
fmt.Println("Starting decode, dst len:", len(dst))
|
||||
}
|
||||
var d, s, length int
|
||||
offset := 0
|
||||
|
||||
// As long as we can read at least 5 bytes...
|
||||
for s < len(src)-5 {
|
||||
switch src[s] & 0x03 {
|
||||
case tagLiteral:
|
||||
x := uint32(src[s] >> 2)
|
||||
switch {
|
||||
case x < 60:
|
||||
s++
|
||||
case x == 60:
|
||||
s += 2
|
||||
x = uint32(src[s-1])
|
||||
case x == 61:
|
||||
s += 3
|
||||
x = uint32(src[s-2]) | uint32(src[s-1])<<8
|
||||
case x == 62:
|
||||
s += 4
|
||||
x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16
|
||||
case x == 63:
|
||||
s += 5
|
||||
x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24
|
||||
}
|
||||
length = int(x) + 1
|
||||
if length > len(dst)-d || length > len(src)-s || (strconv.IntSize == 32 && length <= 0) {
|
||||
return decodeErrCodeCorrupt
|
||||
}
|
||||
if debug {
|
||||
fmt.Println("literals, length:", length, "d-after:", d+length)
|
||||
}
|
||||
|
||||
copy(dst[d:], src[s:s+length])
|
||||
d += length
|
||||
s += length
|
||||
continue
|
||||
|
||||
case tagCopy1:
|
||||
s += 2
|
||||
length = int(src[s-2]) >> 2 & 0x7
|
||||
toffset := int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1]))
|
||||
if toffset == 0 {
|
||||
if debug {
|
||||
fmt.Print("(repeat) ")
|
||||
}
|
||||
// keep last offset
|
||||
switch length {
|
||||
case 5:
|
||||
s += 1
|
||||
length = int(uint32(src[s-1])) + 4
|
||||
case 6:
|
||||
s += 2
|
||||
length = int(uint32(src[s-2])|(uint32(src[s-1])<<8)) + (1 << 8)
|
||||
case 7:
|
||||
s += 3
|
||||
length = int(uint32(src[s-3])|(uint32(src[s-2])<<8)|(uint32(src[s-1])<<16)) + (1 << 16)
|
||||
default: // 0-> 4
|
||||
}
|
||||
} else {
|
||||
offset = toffset
|
||||
}
|
||||
length += 4
|
||||
case tagCopy2:
|
||||
s += 3
|
||||
length = 1 + int(src[s-3])>>2
|
||||
offset = int(uint32(src[s-2]) | uint32(src[s-1])<<8)
|
||||
|
||||
case tagCopy4:
|
||||
s += 5
|
||||
length = 1 + int(src[s-5])>>2
|
||||
offset = int(uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24)
|
||||
}
|
||||
|
||||
if offset <= 0 || d < offset || length > len(dst)-d {
|
||||
return decodeErrCodeCorrupt
|
||||
}
|
||||
|
||||
if debug {
|
||||
fmt.Println("copy, length:", length, "offset:", offset, "d-after:", d+length)
|
||||
}
|
||||
|
||||
// Copy from an earlier sub-slice of dst to a later sub-slice.
|
||||
// If no overlap, use the built-in copy:
|
||||
if offset > length {
|
||||
copy(dst[d:d+length], dst[d-offset:])
|
||||
d += length
|
||||
continue
|
||||
}
|
||||
|
||||
// Unlike the built-in copy function, this byte-by-byte copy always runs
|
||||
// forwards, even if the slices overlap. Conceptually, this is:
|
||||
//
|
||||
// d += forwardCopy(dst[d:d+length], dst[d-offset:])
|
||||
//
|
||||
// We align the slices into a and b and show the compiler they are the same size.
|
||||
// This allows the loop to run without bounds checks.
|
||||
a := dst[d : d+length]
|
||||
b := dst[d-offset:]
|
||||
b = b[:len(a)]
|
||||
for i := range a {
|
||||
a[i] = b[i]
|
||||
}
|
||||
d += length
|
||||
}
|
||||
|
||||
// Remaining with extra checks...
|
||||
for s < len(src) {
|
||||
switch src[s] & 0x03 {
|
||||
case tagLiteral:
|
||||
x := uint32(src[s] >> 2)
|
||||
switch {
|
||||
case x < 60:
|
||||
s++
|
||||
case x == 60:
|
||||
s += 2
|
||||
if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
|
||||
return decodeErrCodeCorrupt
|
||||
}
|
||||
x = uint32(src[s-1])
|
||||
case x == 61:
|
||||
s += 3
|
||||
if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
|
||||
return decodeErrCodeCorrupt
|
||||
}
|
||||
x = uint32(src[s-2]) | uint32(src[s-1])<<8
|
||||
case x == 62:
|
||||
s += 4
|
||||
if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
|
||||
return decodeErrCodeCorrupt
|
||||
}
|
||||
x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16
|
||||
case x == 63:
|
||||
s += 5
|
||||
if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
|
||||
return decodeErrCodeCorrupt
|
||||
}
|
||||
x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24
|
||||
}
|
||||
length = int(x) + 1
|
||||
if length > len(dst)-d || length > len(src)-s || (strconv.IntSize == 32 && length <= 0) {
|
||||
return decodeErrCodeCorrupt
|
||||
}
|
||||
if debug {
|
||||
fmt.Println("literals, length:", length, "d-after:", d+length)
|
||||
}
|
||||
|
||||
copy(dst[d:], src[s:s+length])
|
||||
d += length
|
||||
s += length
|
||||
continue
|
||||
|
||||
case tagCopy1:
|
||||
s += 2
|
||||
if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
|
||||
return decodeErrCodeCorrupt
|
||||
}
|
||||
length = int(src[s-2]) >> 2 & 0x7
|
||||
toffset := int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1]))
|
||||
if toffset == 0 {
|
||||
if debug {
|
||||
fmt.Print("(repeat) ")
|
||||
}
|
||||
// keep last offset
|
||||
switch length {
|
||||
case 5:
|
||||
s += 1
|
||||
if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
|
||||
return decodeErrCodeCorrupt
|
||||
}
|
||||
length = int(uint32(src[s-1])) + 4
|
||||
case 6:
|
||||
s += 2
|
||||
if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
|
||||
return decodeErrCodeCorrupt
|
||||
}
|
||||
length = int(uint32(src[s-2])|(uint32(src[s-1])<<8)) + (1 << 8)
|
||||
case 7:
|
||||
s += 3
|
||||
if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
|
||||
return decodeErrCodeCorrupt
|
||||
}
|
||||
length = int(uint32(src[s-3])|(uint32(src[s-2])<<8)|(uint32(src[s-1])<<16)) + (1 << 16)
|
||||
default: // 0-> 4
|
||||
}
|
||||
} else {
|
||||
offset = toffset
|
||||
}
|
||||
length += 4
|
||||
case tagCopy2:
|
||||
s += 3
|
||||
if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
|
||||
return decodeErrCodeCorrupt
|
||||
}
|
||||
length = 1 + int(src[s-3])>>2
|
||||
offset = int(uint32(src[s-2]) | uint32(src[s-1])<<8)
|
||||
|
||||
case tagCopy4:
|
||||
s += 5
|
||||
if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
|
||||
return decodeErrCodeCorrupt
|
||||
}
|
||||
length = 1 + int(src[s-5])>>2
|
||||
offset = int(uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24)
|
||||
}
|
||||
|
||||
if offset <= 0 || d < offset || length > len(dst)-d {
|
||||
return decodeErrCodeCorrupt
|
||||
}
|
||||
|
||||
if debug {
|
||||
fmt.Println("copy, length:", length, "offset:", offset, "d-after:", d+length)
|
||||
}
|
||||
|
||||
// Copy from an earlier sub-slice of dst to a later sub-slice.
|
||||
// If no overlap, use the built-in copy:
|
||||
if offset > length {
|
||||
copy(dst[d:d+length], dst[d-offset:])
|
||||
d += length
|
||||
continue
|
||||
}
|
||||
|
||||
// Unlike the built-in copy function, this byte-by-byte copy always runs
|
||||
// forwards, even if the slices overlap. Conceptually, this is:
|
||||
//
|
||||
// d += forwardCopy(dst[d:d+length], dst[d-offset:])
|
||||
//
|
||||
// We align the slices into a and b and show the compiler they are the same size.
|
||||
// This allows the loop to run without bounds checks.
|
||||
a := dst[d : d+length]
|
||||
b := dst[d-offset:]
|
||||
b = b[:len(a)]
|
||||
for i := range a {
|
||||
a[i] = b[i]
|
||||
}
|
||||
d += length
|
||||
}
|
||||
|
||||
if d != len(dst) {
|
||||
return decodeErrCodeCorrupt
|
||||
}
|
||||
return 0
|
||||
}
|
1172
vendor/github.com/klauspost/compress/s2/encode.go
generated
vendored
Normal file
1172
vendor/github.com/klauspost/compress/s2/encode.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
456
vendor/github.com/klauspost/compress/s2/encode_all.go
generated
vendored
Normal file
456
vendor/github.com/klauspost/compress/s2/encode_all.go
generated
vendored
Normal file
@ -0,0 +1,456 @@
|
||||
// Copyright 2016 The Snappy-Go Authors. All rights reserved.
|
||||
// Copyright (c) 2019 Klaus Post. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package s2
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"math/bits"
|
||||
)
|
||||
|
||||
func load32(b []byte, i int) uint32 {
|
||||
return binary.LittleEndian.Uint32(b[i:])
|
||||
}
|
||||
|
||||
func load64(b []byte, i int) uint64 {
|
||||
return binary.LittleEndian.Uint64(b[i:])
|
||||
}
|
||||
|
||||
// hash6 returns the hash of the lowest 6 bytes of u to fit in a hash table with h bits.
|
||||
// Preferably h should be a constant and should always be <64.
|
||||
func hash6(u uint64, h uint8) uint32 {
|
||||
const prime6bytes = 227718039650203
|
||||
return uint32(((u << (64 - 48)) * prime6bytes) >> ((64 - h) & 63))
|
||||
}
|
||||
|
||||
func encodeGo(dst, src []byte) []byte {
|
||||
if n := MaxEncodedLen(len(src)); n < 0 {
|
||||
panic(ErrTooLarge)
|
||||
} else if len(dst) < n {
|
||||
dst = make([]byte, n)
|
||||
}
|
||||
|
||||
// The block starts with the varint-encoded length of the decompressed bytes.
|
||||
d := binary.PutUvarint(dst, uint64(len(src)))
|
||||
|
||||
if len(src) == 0 {
|
||||
return dst[:d]
|
||||
}
|
||||
if len(src) < minNonLiteralBlockSize {
|
||||
d += emitLiteral(dst[d:], src)
|
||||
return dst[:d]
|
||||
}
|
||||
n := encodeBlockGo(dst[d:], src)
|
||||
if n > 0 {
|
||||
d += n
|
||||
return dst[:d]
|
||||
}
|
||||
// Not compressible
|
||||
d += emitLiteral(dst[d:], src)
|
||||
return dst[:d]
|
||||
}
|
||||
|
||||
// encodeBlockGo encodes a non-empty src to a guaranteed-large-enough dst. It
|
||||
// assumes that the varint-encoded length of the decompressed bytes has already
|
||||
// been written.
|
||||
//
|
||||
// It also assumes that:
|
||||
// len(dst) >= MaxEncodedLen(len(src)) &&
|
||||
// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize
|
||||
func encodeBlockGo(dst, src []byte) (d int) {
|
||||
// Initialize the hash table.
|
||||
const (
|
||||
tableBits = 14
|
||||
maxTableSize = 1 << tableBits
|
||||
|
||||
debug = false
|
||||
)
|
||||
|
||||
var table [maxTableSize]uint32
|
||||
|
||||
// sLimit is when to stop looking for offset/length copies. The inputMargin
|
||||
// lets us use a fast path for emitLiteral in the main loop, while we are
|
||||
// looking for copies.
|
||||
sLimit := len(src) - inputMargin
|
||||
|
||||
// Bail if we can't compress to at least this.
|
||||
dstLimit := len(src) - len(src)>>5 - 5
|
||||
|
||||
// nextEmit is where in src the next emitLiteral should start from.
|
||||
nextEmit := 0
|
||||
|
||||
// The encoded form must start with a literal, as there are no previous
|
||||
// bytes to copy, so we start looking for hash matches at s == 1.
|
||||
s := 1
|
||||
cv := load64(src, s)
|
||||
|
||||
// We search for a repeat at -1, but don't output repeats when nextEmit == 0
|
||||
repeat := 1
|
||||
|
||||
for {
|
||||
candidate := 0
|
||||
for {
|
||||
// Next src position to check
|
||||
nextS := s + (s-nextEmit)>>6 + 4
|
||||
if nextS > sLimit {
|
||||
goto emitRemainder
|
||||
}
|
||||
hash0 := hash6(cv, tableBits)
|
||||
hash1 := hash6(cv>>8, tableBits)
|
||||
candidate = int(table[hash0])
|
||||
candidate2 := int(table[hash1])
|
||||
table[hash0] = uint32(s)
|
||||
table[hash1] = uint32(s + 1)
|
||||
hash2 := hash6(cv>>16, tableBits)
|
||||
|
||||
// Check repeat at offset checkRep.
|
||||
const checkRep = 1
|
||||
if uint32(cv>>(checkRep*8)) == load32(src, s-repeat+checkRep) {
|
||||
base := s + checkRep
|
||||
// Extend back
|
||||
for i := base - repeat; base > nextEmit && i > 0 && src[i-1] == src[base-1]; {
|
||||
i--
|
||||
base--
|
||||
}
|
||||
d += emitLiteral(dst[d:], src[nextEmit:base])
|
||||
|
||||
// Extend forward
|
||||
candidate := s - repeat + 4 + checkRep
|
||||
s += 4 + checkRep
|
||||
for s <= sLimit {
|
||||
if diff := load64(src, s) ^ load64(src, candidate); diff != 0 {
|
||||
s += bits.TrailingZeros64(diff) >> 3
|
||||
break
|
||||
}
|
||||
s += 8
|
||||
candidate += 8
|
||||
}
|
||||
if debug {
|
||||
// Validate match.
|
||||
if s <= candidate {
|
||||
panic("s <= candidate")
|
||||
}
|
||||
a := src[base:s]
|
||||
b := src[base-repeat : base-repeat+(s-base)]
|
||||
if !bytes.Equal(a, b) {
|
||||
panic("mismatch")
|
||||
}
|
||||
}
|
||||
if nextEmit > 0 {
|
||||
// same as `add := emitCopy(dst[d:], repeat, s-base)` but skips storing offset.
|
||||
d += emitRepeat(dst[d:], repeat, s-base)
|
||||
} else {
|
||||
// First match, cannot be repeat.
|
||||
d += emitCopy(dst[d:], repeat, s-base)
|
||||
}
|
||||
nextEmit = s
|
||||
if s >= sLimit {
|
||||
goto emitRemainder
|
||||
}
|
||||
|
||||
cv = load64(src, s)
|
||||
continue
|
||||
}
|
||||
|
||||
if uint32(cv) == load32(src, candidate) {
|
||||
break
|
||||
}
|
||||
candidate = int(table[hash2])
|
||||
if uint32(cv>>8) == load32(src, candidate2) {
|
||||
table[hash2] = uint32(s + 2)
|
||||
candidate = candidate2
|
||||
s++
|
||||
break
|
||||
}
|
||||
table[hash2] = uint32(s + 2)
|
||||
if uint32(cv>>16) == load32(src, candidate) {
|
||||
s += 2
|
||||
break
|
||||
}
|
||||
|
||||
cv = load64(src, nextS)
|
||||
s = nextS
|
||||
}
|
||||
|
||||
// Extend backwards.
|
||||
// The top bytes will be rechecked to get the full match.
|
||||
for candidate > 0 && s > nextEmit && src[candidate-1] == src[s-1] {
|
||||
candidate--
|
||||
s--
|
||||
}
|
||||
|
||||
// Bail if we exceed the maximum size.
|
||||
if d+(s-nextEmit) > dstLimit {
|
||||
return 0
|
||||
}
|
||||
|
||||
// A 4-byte match has been found. We'll later see if more than 4 bytes
|
||||
// match. But, prior to the match, src[nextEmit:s] are unmatched. Emit
|
||||
// them as literal bytes.
|
||||
|
||||
d += emitLiteral(dst[d:], src[nextEmit:s])
|
||||
|
||||
// Call emitCopy, and then see if another emitCopy could be our next
|
||||
// move. Repeat until we find no match for the input immediately after
|
||||
// what was consumed by the last emitCopy call.
|
||||
//
|
||||
// If we exit this loop normally then we need to call emitLiteral next,
|
||||
// though we don't yet know how big the literal will be. We handle that
|
||||
// by proceeding to the next iteration of the main loop. We also can
|
||||
// exit this loop via goto if we get close to exhausting the input.
|
||||
for {
|
||||
// Invariant: we have a 4-byte match at s, and no need to emit any
|
||||
// literal bytes prior to s.
|
||||
base := s
|
||||
repeat = base - candidate
|
||||
|
||||
// Extend the 4-byte match as long as possible.
|
||||
s += 4
|
||||
candidate += 4
|
||||
for s <= len(src)-8 {
|
||||
if diff := load64(src, s) ^ load64(src, candidate); diff != 0 {
|
||||
s += bits.TrailingZeros64(diff) >> 3
|
||||
break
|
||||
}
|
||||
s += 8
|
||||
candidate += 8
|
||||
}
|
||||
|
||||
d += emitCopy(dst[d:], repeat, s-base)
|
||||
if debug {
|
||||
// Validate match.
|
||||
if s <= candidate {
|
||||
panic("s <= candidate")
|
||||
}
|
||||
a := src[base:s]
|
||||
b := src[base-repeat : base-repeat+(s-base)]
|
||||
if !bytes.Equal(a, b) {
|
||||
panic("mismatch")
|
||||
}
|
||||
}
|
||||
|
||||
nextEmit = s
|
||||
if s >= sLimit {
|
||||
goto emitRemainder
|
||||
}
|
||||
|
||||
if d > dstLimit {
|
||||
// Do we have space for more, if not bail.
|
||||
return 0
|
||||
}
|
||||
// Check for an immediate match, otherwise start search at s+1
|
||||
x := load64(src, s-2)
|
||||
m2Hash := hash6(x, tableBits)
|
||||
currHash := hash6(x>>16, tableBits)
|
||||
candidate = int(table[currHash])
|
||||
table[m2Hash] = uint32(s - 2)
|
||||
table[currHash] = uint32(s)
|
||||
if debug && s == candidate {
|
||||
panic("s == candidate")
|
||||
}
|
||||
if uint32(x>>16) != load32(src, candidate) {
|
||||
cv = load64(src, s+1)
|
||||
s++
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
emitRemainder:
|
||||
if nextEmit < len(src) {
|
||||
// Bail if we exceed the maximum size.
|
||||
if d+len(src)-nextEmit > dstLimit {
|
||||
return 0
|
||||
}
|
||||
d += emitLiteral(dst[d:], src[nextEmit:])
|
||||
}
|
||||
return d
|
||||
}
|
||||
|
||||
func encodeBlockSnappyGo(dst, src []byte) (d int) {
|
||||
// Initialize the hash table.
|
||||
const (
|
||||
tableBits = 14
|
||||
maxTableSize = 1 << tableBits
|
||||
)
|
||||
|
||||
var table [maxTableSize]uint32
|
||||
|
||||
// sLimit is when to stop looking for offset/length copies. The inputMargin
|
||||
// lets us use a fast path for emitLiteral in the main loop, while we are
|
||||
// looking for copies.
|
||||
sLimit := len(src) - inputMargin
|
||||
|
||||
// Bail if we can't compress to at least this.
|
||||
dstLimit := len(src) - len(src)>>5 - 5
|
||||
|
||||
// nextEmit is where in src the next emitLiteral should start from.
|
||||
nextEmit := 0
|
||||
|
||||
// The encoded form must start with a literal, as there are no previous
|
||||
// bytes to copy, so we start looking for hash matches at s == 1.
|
||||
s := 1
|
||||
cv := load64(src, s)
|
||||
|
||||
// We search for a repeat at -1, but don't output repeats when nextEmit == 0
|
||||
repeat := 1
|
||||
|
||||
for {
|
||||
candidate := 0
|
||||
for {
|
||||
// Next src position to check
|
||||
nextS := s + (s-nextEmit)>>6 + 4
|
||||
if nextS > sLimit {
|
||||
goto emitRemainder
|
||||
}
|
||||
hash0 := hash6(cv, tableBits)
|
||||
hash1 := hash6(cv>>8, tableBits)
|
||||
candidate = int(table[hash0])
|
||||
candidate2 := int(table[hash1])
|
||||
table[hash0] = uint32(s)
|
||||
table[hash1] = uint32(s + 1)
|
||||
hash2 := hash6(cv>>16, tableBits)
|
||||
|
||||
// Check repeat at offset checkRep.
|
||||
const checkRep = 1
|
||||
if uint32(cv>>(checkRep*8)) == load32(src, s-repeat+checkRep) {
|
||||
base := s + checkRep
|
||||
// Extend back
|
||||
for i := base - repeat; base > nextEmit && i > 0 && src[i-1] == src[base-1]; {
|
||||
i--
|
||||
base--
|
||||
}
|
||||
d += emitLiteral(dst[d:], src[nextEmit:base])
|
||||
|
||||
// Extend forward
|
||||
candidate := s - repeat + 4 + checkRep
|
||||
s += 4 + checkRep
|
||||
for s <= sLimit {
|
||||
if diff := load64(src, s) ^ load64(src, candidate); diff != 0 {
|
||||
s += bits.TrailingZeros64(diff) >> 3
|
||||
break
|
||||
}
|
||||
s += 8
|
||||
candidate += 8
|
||||
}
|
||||
|
||||
d += emitCopyNoRepeat(dst[d:], repeat, s-base)
|
||||
nextEmit = s
|
||||
if s >= sLimit {
|
||||
goto emitRemainder
|
||||
}
|
||||
|
||||
cv = load64(src, s)
|
||||
continue
|
||||
}
|
||||
|
||||
if uint32(cv) == load32(src, candidate) {
|
||||
break
|
||||
}
|
||||
candidate = int(table[hash2])
|
||||
if uint32(cv>>8) == load32(src, candidate2) {
|
||||
table[hash2] = uint32(s + 2)
|
||||
candidate = candidate2
|
||||
s++
|
||||
break
|
||||
}
|
||||
table[hash2] = uint32(s + 2)
|
||||
if uint32(cv>>16) == load32(src, candidate) {
|
||||
s += 2
|
||||
break
|
||||
}
|
||||
|
||||
cv = load64(src, nextS)
|
||||
s = nextS
|
||||
}
|
||||
|
||||
// Extend backwards
|
||||
for candidate > 0 && s > nextEmit && src[candidate-1] == src[s-1] {
|
||||
candidate--
|
||||
s--
|
||||
}
|
||||
|
||||
// Bail if we exceed the maximum size.
|
||||
if d+(s-nextEmit) > dstLimit {
|
||||
return 0
|
||||
}
|
||||
|
||||
// A 4-byte match has been found. We'll later see if more than 4 bytes
|
||||
// match. But, prior to the match, src[nextEmit:s] are unmatched. Emit
|
||||
// them as literal bytes.
|
||||
|
||||
d += emitLiteral(dst[d:], src[nextEmit:s])
|
||||
|
||||
// Call emitCopy, and then see if another emitCopy could be our next
|
||||
// move. Repeat until we find no match for the input immediately after
|
||||
// what was consumed by the last emitCopy call.
|
||||
//
|
||||
// If we exit this loop normally then we need to call emitLiteral next,
|
||||
// though we don't yet know how big the literal will be. We handle that
|
||||
// by proceeding to the next iteration of the main loop. We also can
|
||||
// exit this loop via goto if we get close to exhausting the input.
|
||||
for {
|
||||
// Invariant: we have a 4-byte match at s, and no need to emit any
|
||||
// literal bytes prior to s.
|
||||
base := s
|
||||
repeat = base - candidate
|
||||
|
||||
// Extend the 4-byte match as long as possible.
|
||||
s += 4
|
||||
candidate += 4
|
||||
for s <= len(src)-8 {
|
||||
if diff := load64(src, s) ^ load64(src, candidate); diff != 0 {
|
||||
s += bits.TrailingZeros64(diff) >> 3
|
||||
break
|
||||
}
|
||||
s += 8
|
||||
candidate += 8
|
||||
}
|
||||
|
||||
d += emitCopyNoRepeat(dst[d:], repeat, s-base)
|
||||
if false {
|
||||
// Validate match.
|
||||
a := src[base:s]
|
||||
b := src[base-repeat : base-repeat+(s-base)]
|
||||
if !bytes.Equal(a, b) {
|
||||
panic("mismatch")
|
||||
}
|
||||
}
|
||||
|
||||
nextEmit = s
|
||||
if s >= sLimit {
|
||||
goto emitRemainder
|
||||
}
|
||||
|
||||
if d > dstLimit {
|
||||
// Do we have space for more, if not bail.
|
||||
return 0
|
||||
}
|
||||
// Check for an immediate match, otherwise start search at s+1
|
||||
x := load64(src, s-2)
|
||||
m2Hash := hash6(x, tableBits)
|
||||
currHash := hash6(x>>16, tableBits)
|
||||
candidate = int(table[currHash])
|
||||
table[m2Hash] = uint32(s - 2)
|
||||
table[currHash] = uint32(s)
|
||||
if uint32(x>>16) != load32(src, candidate) {
|
||||
cv = load64(src, s+1)
|
||||
s++
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
emitRemainder:
|
||||
if nextEmit < len(src) {
|
||||
// Bail if we exceed the maximum size.
|
||||
if d+len(src)-nextEmit > dstLimit {
|
||||
return 0
|
||||
}
|
||||
d += emitLiteral(dst[d:], src[nextEmit:])
|
||||
}
|
||||
return d
|
||||
}
|
142
vendor/github.com/klauspost/compress/s2/encode_amd64.go
generated
vendored
Normal file
142
vendor/github.com/klauspost/compress/s2/encode_amd64.go
generated
vendored
Normal file
@ -0,0 +1,142 @@
|
||||
//go:build !appengine && !noasm && gc
|
||||
// +build !appengine,!noasm,gc
|
||||
|
||||
package s2
|
||||
|
||||
// encodeBlock encodes a non-empty src to a guaranteed-large-enough dst. It
|
||||
// assumes that the varint-encoded length of the decompressed bytes has already
|
||||
// been written.
|
||||
//
|
||||
// It also assumes that:
|
||||
// len(dst) >= MaxEncodedLen(len(src)) &&
|
||||
// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize
|
||||
func encodeBlock(dst, src []byte) (d int) {
|
||||
const (
|
||||
// Use 12 bit table when less than...
|
||||
limit12B = 16 << 10
|
||||
// Use 10 bit table when less than...
|
||||
limit10B = 4 << 10
|
||||
// Use 8 bit table when less than...
|
||||
limit8B = 512
|
||||
)
|
||||
|
||||
if len(src) >= 4<<20 {
|
||||
return encodeBlockAsm(dst, src)
|
||||
}
|
||||
if len(src) >= limit12B {
|
||||
return encodeBlockAsm4MB(dst, src)
|
||||
}
|
||||
if len(src) >= limit10B {
|
||||
return encodeBlockAsm12B(dst, src)
|
||||
}
|
||||
if len(src) >= limit8B {
|
||||
return encodeBlockAsm10B(dst, src)
|
||||
}
|
||||
if len(src) < minNonLiteralBlockSize {
|
||||
return 0
|
||||
}
|
||||
return encodeBlockAsm8B(dst, src)
|
||||
}
|
||||
|
||||
// encodeBlockBetter encodes a non-empty src to a guaranteed-large-enough dst. It
|
||||
// assumes that the varint-encoded length of the decompressed bytes has already
|
||||
// been written.
|
||||
//
|
||||
// It also assumes that:
|
||||
// len(dst) >= MaxEncodedLen(len(src)) &&
|
||||
// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize
|
||||
func encodeBlockBetter(dst, src []byte) (d int) {
|
||||
const (
|
||||
// Use 12 bit table when less than...
|
||||
limit12B = 16 << 10
|
||||
// Use 10 bit table when less than...
|
||||
limit10B = 4 << 10
|
||||
// Use 8 bit table when less than...
|
||||
limit8B = 512
|
||||
)
|
||||
|
||||
if len(src) > 4<<20 {
|
||||
return encodeBetterBlockAsm(dst, src)
|
||||
}
|
||||
if len(src) >= limit12B {
|
||||
return encodeBetterBlockAsm4MB(dst, src)
|
||||
}
|
||||
if len(src) >= limit10B {
|
||||
return encodeBetterBlockAsm12B(dst, src)
|
||||
}
|
||||
if len(src) >= limit8B {
|
||||
return encodeBetterBlockAsm10B(dst, src)
|
||||
}
|
||||
if len(src) < minNonLiteralBlockSize {
|
||||
return 0
|
||||
}
|
||||
return encodeBetterBlockAsm8B(dst, src)
|
||||
}
|
||||
|
||||
// encodeBlockSnappy encodes a non-empty src to a guaranteed-large-enough dst. It
|
||||
// assumes that the varint-encoded length of the decompressed bytes has already
|
||||
// been written.
|
||||
//
|
||||
// It also assumes that:
|
||||
// len(dst) >= MaxEncodedLen(len(src)) &&
|
||||
// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize
|
||||
func encodeBlockSnappy(dst, src []byte) (d int) {
|
||||
const (
|
||||
// Use 12 bit table when less than...
|
||||
limit12B = 16 << 10
|
||||
// Use 10 bit table when less than...
|
||||
limit10B = 4 << 10
|
||||
// Use 8 bit table when less than...
|
||||
limit8B = 512
|
||||
)
|
||||
if len(src) >= 64<<10 {
|
||||
return encodeSnappyBlockAsm(dst, src)
|
||||
}
|
||||
if len(src) >= limit12B {
|
||||
return encodeSnappyBlockAsm64K(dst, src)
|
||||
}
|
||||
if len(src) >= limit10B {
|
||||
return encodeSnappyBlockAsm12B(dst, src)
|
||||
}
|
||||
if len(src) >= limit8B {
|
||||
return encodeSnappyBlockAsm10B(dst, src)
|
||||
}
|
||||
if len(src) < minNonLiteralBlockSize {
|
||||
return 0
|
||||
}
|
||||
return encodeSnappyBlockAsm8B(dst, src)
|
||||
}
|
||||
|
||||
// encodeBlockSnappy encodes a non-empty src to a guaranteed-large-enough dst. It
|
||||
// assumes that the varint-encoded length of the decompressed bytes has already
|
||||
// been written.
|
||||
//
|
||||
// It also assumes that:
|
||||
// len(dst) >= MaxEncodedLen(len(src)) &&
|
||||
// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize
|
||||
func encodeBlockBetterSnappy(dst, src []byte) (d int) {
|
||||
const (
|
||||
// Use 12 bit table when less than...
|
||||
limit12B = 16 << 10
|
||||
// Use 10 bit table when less than...
|
||||
limit10B = 4 << 10
|
||||
// Use 8 bit table when less than...
|
||||
limit8B = 512
|
||||
)
|
||||
if len(src) >= 64<<10 {
|
||||
return encodeSnappyBetterBlockAsm(dst, src)
|
||||
}
|
||||
if len(src) >= limit12B {
|
||||
return encodeSnappyBetterBlockAsm64K(dst, src)
|
||||
}
|
||||
if len(src) >= limit10B {
|
||||
return encodeSnappyBetterBlockAsm12B(dst, src)
|
||||
}
|
||||
if len(src) >= limit8B {
|
||||
return encodeSnappyBetterBlockAsm10B(dst, src)
|
||||
}
|
||||
if len(src) < minNonLiteralBlockSize {
|
||||
return 0
|
||||
}
|
||||
return encodeSnappyBetterBlockAsm8B(dst, src)
|
||||
}
|
604
vendor/github.com/klauspost/compress/s2/encode_best.go
generated
vendored
Normal file
604
vendor/github.com/klauspost/compress/s2/encode_best.go
generated
vendored
Normal file
@ -0,0 +1,604 @@
|
||||
// Copyright 2016 The Snappy-Go Authors. All rights reserved.
|
||||
// Copyright (c) 2019 Klaus Post. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package s2
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math/bits"
|
||||
)
|
||||
|
||||
// encodeBlockBest encodes a non-empty src to a guaranteed-large-enough dst. It
|
||||
// assumes that the varint-encoded length of the decompressed bytes has already
|
||||
// been written.
|
||||
//
|
||||
// It also assumes that:
|
||||
// len(dst) >= MaxEncodedLen(len(src)) &&
|
||||
// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize
|
||||
func encodeBlockBest(dst, src []byte) (d int) {
|
||||
// Initialize the hash tables.
|
||||
const (
|
||||
// Long hash matches.
|
||||
lTableBits = 19
|
||||
maxLTableSize = 1 << lTableBits
|
||||
|
||||
// Short hash matches.
|
||||
sTableBits = 16
|
||||
maxSTableSize = 1 << sTableBits
|
||||
|
||||
inputMargin = 8 + 2
|
||||
)
|
||||
|
||||
// sLimit is when to stop looking for offset/length copies. The inputMargin
|
||||
// lets us use a fast path for emitLiteral in the main loop, while we are
|
||||
// looking for copies.
|
||||
sLimit := len(src) - inputMargin
|
||||
if len(src) < minNonLiteralBlockSize {
|
||||
return 0
|
||||
}
|
||||
|
||||
var lTable [maxLTableSize]uint64
|
||||
var sTable [maxSTableSize]uint64
|
||||
|
||||
// Bail if we can't compress to at least this.
|
||||
dstLimit := len(src) - 5
|
||||
|
||||
// nextEmit is where in src the next emitLiteral should start from.
|
||||
nextEmit := 0
|
||||
|
||||
// The encoded form must start with a literal, as there are no previous
|
||||
// bytes to copy, so we start looking for hash matches at s == 1.
|
||||
s := 1
|
||||
cv := load64(src, s)
|
||||
|
||||
// We search for a repeat at -1, but don't output repeats when nextEmit == 0
|
||||
repeat := 1
|
||||
const lowbitMask = 0xffffffff
|
||||
getCur := func(x uint64) int {
|
||||
return int(x & lowbitMask)
|
||||
}
|
||||
getPrev := func(x uint64) int {
|
||||
return int(x >> 32)
|
||||
}
|
||||
const maxSkip = 64
|
||||
|
||||
for {
|
||||
type match struct {
|
||||
offset int
|
||||
s int
|
||||
length int
|
||||
score int
|
||||
rep bool
|
||||
}
|
||||
var best match
|
||||
for {
|
||||
// Next src position to check
|
||||
nextS := (s-nextEmit)>>8 + 1
|
||||
if nextS > maxSkip {
|
||||
nextS = s + maxSkip
|
||||
} else {
|
||||
nextS += s
|
||||
}
|
||||
if nextS > sLimit {
|
||||
goto emitRemainder
|
||||
}
|
||||
hashL := hash8(cv, lTableBits)
|
||||
hashS := hash4(cv, sTableBits)
|
||||
candidateL := lTable[hashL]
|
||||
candidateS := sTable[hashS]
|
||||
|
||||
score := func(m match) int {
|
||||
// Matches that are longer forward are penalized since we must emit it as a literal.
|
||||
score := m.length - m.s
|
||||
if nextEmit == m.s {
|
||||
// If we do not have to emit literals, we save 1 byte
|
||||
score++
|
||||
}
|
||||
offset := m.s - m.offset
|
||||
if m.rep {
|
||||
return score - emitRepeatSize(offset, m.length)
|
||||
}
|
||||
return score - emitCopySize(offset, m.length)
|
||||
}
|
||||
|
||||
matchAt := func(offset, s int, first uint32, rep bool) match {
|
||||
if best.length != 0 && best.s-best.offset == s-offset {
|
||||
// Don't retest if we have the same offset.
|
||||
return match{offset: offset, s: s}
|
||||
}
|
||||
if load32(src, offset) != first {
|
||||
return match{offset: offset, s: s}
|
||||
}
|
||||
m := match{offset: offset, s: s, length: 4 + offset, rep: rep}
|
||||
s += 4
|
||||
for s <= sLimit {
|
||||
if diff := load64(src, s) ^ load64(src, m.length); diff != 0 {
|
||||
m.length += bits.TrailingZeros64(diff) >> 3
|
||||
break
|
||||
}
|
||||
s += 8
|
||||
m.length += 8
|
||||
}
|
||||
m.length -= offset
|
||||
m.score = score(m)
|
||||
if m.score <= -m.s {
|
||||
// Eliminate if no savings, we might find a better one.
|
||||
m.length = 0
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
bestOf := func(a, b match) match {
|
||||
if b.length == 0 {
|
||||
return a
|
||||
}
|
||||
if a.length == 0 {
|
||||
return b
|
||||
}
|
||||
as := a.score + b.s
|
||||
bs := b.score + a.s
|
||||
if as >= bs {
|
||||
return a
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
best = bestOf(matchAt(getCur(candidateL), s, uint32(cv), false), matchAt(getPrev(candidateL), s, uint32(cv), false))
|
||||
best = bestOf(best, matchAt(getCur(candidateS), s, uint32(cv), false))
|
||||
best = bestOf(best, matchAt(getPrev(candidateS), s, uint32(cv), false))
|
||||
|
||||
{
|
||||
best = bestOf(best, matchAt(s-repeat+1, s+1, uint32(cv>>8), true))
|
||||
if best.length > 0 {
|
||||
// s+1
|
||||
nextShort := sTable[hash4(cv>>8, sTableBits)]
|
||||
s := s + 1
|
||||
cv := load64(src, s)
|
||||
nextLong := lTable[hash8(cv, lTableBits)]
|
||||
best = bestOf(best, matchAt(getCur(nextShort), s, uint32(cv), false))
|
||||
best = bestOf(best, matchAt(getPrev(nextShort), s, uint32(cv), false))
|
||||
best = bestOf(best, matchAt(getCur(nextLong), s, uint32(cv), false))
|
||||
best = bestOf(best, matchAt(getPrev(nextLong), s, uint32(cv), false))
|
||||
// Repeat at + 2
|
||||
best = bestOf(best, matchAt(s-repeat+1, s+1, uint32(cv>>8), true))
|
||||
|
||||
// s+2
|
||||
if true {
|
||||
nextShort = sTable[hash4(cv>>8, sTableBits)]
|
||||
s++
|
||||
cv = load64(src, s)
|
||||
nextLong = lTable[hash8(cv, lTableBits)]
|
||||
best = bestOf(best, matchAt(getCur(nextShort), s, uint32(cv), false))
|
||||
best = bestOf(best, matchAt(getPrev(nextShort), s, uint32(cv), false))
|
||||
best = bestOf(best, matchAt(getCur(nextLong), s, uint32(cv), false))
|
||||
best = bestOf(best, matchAt(getPrev(nextLong), s, uint32(cv), false))
|
||||
}
|
||||
// Search for a match at best match end, see if that is better.
|
||||
if sAt := best.s + best.length; sAt < sLimit {
|
||||
sBack := best.s
|
||||
backL := best.length
|
||||
// Load initial values
|
||||
cv = load64(src, sBack)
|
||||
// Search for mismatch
|
||||
next := lTable[hash8(load64(src, sAt), lTableBits)]
|
||||
//next := sTable[hash4(load64(src, sAt), sTableBits)]
|
||||
|
||||
if checkAt := getCur(next) - backL; checkAt > 0 {
|
||||
best = bestOf(best, matchAt(checkAt, sBack, uint32(cv), false))
|
||||
}
|
||||
if checkAt := getPrev(next) - backL; checkAt > 0 {
|
||||
best = bestOf(best, matchAt(checkAt, sBack, uint32(cv), false))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Update table
|
||||
lTable[hashL] = uint64(s) | candidateL<<32
|
||||
sTable[hashS] = uint64(s) | candidateS<<32
|
||||
|
||||
if best.length > 0 {
|
||||
break
|
||||
}
|
||||
|
||||
cv = load64(src, nextS)
|
||||
s = nextS
|
||||
}
|
||||
|
||||
// Extend backwards, not needed for repeats...
|
||||
s = best.s
|
||||
if !best.rep {
|
||||
for best.offset > 0 && s > nextEmit && src[best.offset-1] == src[s-1] {
|
||||
best.offset--
|
||||
best.length++
|
||||
s--
|
||||
}
|
||||
}
|
||||
if false && best.offset >= s {
|
||||
panic(fmt.Errorf("t %d >= s %d", best.offset, s))
|
||||
}
|
||||
// Bail if we exceed the maximum size.
|
||||
if d+(s-nextEmit) > dstLimit {
|
||||
return 0
|
||||
}
|
||||
|
||||
base := s
|
||||
offset := s - best.offset
|
||||
|
||||
s += best.length
|
||||
|
||||
if offset > 65535 && s-base <= 5 && !best.rep {
|
||||
// Bail if the match is equal or worse to the encoding.
|
||||
s = best.s + 1
|
||||
if s >= sLimit {
|
||||
goto emitRemainder
|
||||
}
|
||||
cv = load64(src, s)
|
||||
continue
|
||||
}
|
||||
d += emitLiteral(dst[d:], src[nextEmit:base])
|
||||
if best.rep {
|
||||
if nextEmit > 0 {
|
||||
// same as `add := emitCopy(dst[d:], repeat, s-base)` but skips storing offset.
|
||||
d += emitRepeat(dst[d:], offset, best.length)
|
||||
} else {
|
||||
// First match, cannot be repeat.
|
||||
d += emitCopy(dst[d:], offset, best.length)
|
||||
}
|
||||
} else {
|
||||
d += emitCopy(dst[d:], offset, best.length)
|
||||
}
|
||||
repeat = offset
|
||||
|
||||
nextEmit = s
|
||||
if s >= sLimit {
|
||||
goto emitRemainder
|
||||
}
|
||||
|
||||
if d > dstLimit {
|
||||
// Do we have space for more, if not bail.
|
||||
return 0
|
||||
}
|
||||
// Fill tables...
|
||||
for i := best.s + 1; i < s; i++ {
|
||||
cv0 := load64(src, i)
|
||||
long0 := hash8(cv0, lTableBits)
|
||||
short0 := hash4(cv0, sTableBits)
|
||||
lTable[long0] = uint64(i) | lTable[long0]<<32
|
||||
sTable[short0] = uint64(i) | sTable[short0]<<32
|
||||
}
|
||||
cv = load64(src, s)
|
||||
}
|
||||
|
||||
emitRemainder:
|
||||
if nextEmit < len(src) {
|
||||
// Bail if we exceed the maximum size.
|
||||
if d+len(src)-nextEmit > dstLimit {
|
||||
return 0
|
||||
}
|
||||
d += emitLiteral(dst[d:], src[nextEmit:])
|
||||
}
|
||||
return d
|
||||
}
|
||||
|
||||
// encodeBlockBestSnappy encodes a non-empty src to a guaranteed-large-enough dst. It
|
||||
// assumes that the varint-encoded length of the decompressed bytes has already
|
||||
// been written.
|
||||
//
|
||||
// It also assumes that:
|
||||
// len(dst) >= MaxEncodedLen(len(src)) &&
|
||||
// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize
|
||||
func encodeBlockBestSnappy(dst, src []byte) (d int) {
|
||||
// Initialize the hash tables.
|
||||
const (
|
||||
// Long hash matches.
|
||||
lTableBits = 19
|
||||
maxLTableSize = 1 << lTableBits
|
||||
|
||||
// Short hash matches.
|
||||
sTableBits = 16
|
||||
maxSTableSize = 1 << sTableBits
|
||||
|
||||
inputMargin = 8 + 2
|
||||
)
|
||||
|
||||
// sLimit is when to stop looking for offset/length copies. The inputMargin
|
||||
// lets us use a fast path for emitLiteral in the main loop, while we are
|
||||
// looking for copies.
|
||||
sLimit := len(src) - inputMargin
|
||||
if len(src) < minNonLiteralBlockSize {
|
||||
return 0
|
||||
}
|
||||
|
||||
var lTable [maxLTableSize]uint64
|
||||
var sTable [maxSTableSize]uint64
|
||||
|
||||
// Bail if we can't compress to at least this.
|
||||
dstLimit := len(src) - 5
|
||||
|
||||
// nextEmit is where in src the next emitLiteral should start from.
|
||||
nextEmit := 0
|
||||
|
||||
// The encoded form must start with a literal, as there are no previous
|
||||
// bytes to copy, so we start looking for hash matches at s == 1.
|
||||
s := 1
|
||||
cv := load64(src, s)
|
||||
|
||||
// We search for a repeat at -1, but don't output repeats when nextEmit == 0
|
||||
repeat := 1
|
||||
const lowbitMask = 0xffffffff
|
||||
getCur := func(x uint64) int {
|
||||
return int(x & lowbitMask)
|
||||
}
|
||||
getPrev := func(x uint64) int {
|
||||
return int(x >> 32)
|
||||
}
|
||||
const maxSkip = 64
|
||||
|
||||
for {
|
||||
type match struct {
|
||||
offset int
|
||||
s int
|
||||
length int
|
||||
score int
|
||||
}
|
||||
var best match
|
||||
for {
|
||||
// Next src position to check
|
||||
nextS := (s-nextEmit)>>8 + 1
|
||||
if nextS > maxSkip {
|
||||
nextS = s + maxSkip
|
||||
} else {
|
||||
nextS += s
|
||||
}
|
||||
if nextS > sLimit {
|
||||
goto emitRemainder
|
||||
}
|
||||
hashL := hash8(cv, lTableBits)
|
||||
hashS := hash4(cv, sTableBits)
|
||||
candidateL := lTable[hashL]
|
||||
candidateS := sTable[hashS]
|
||||
|
||||
score := func(m match) int {
|
||||
// Matches that are longer forward are penalized since we must emit it as a literal.
|
||||
score := m.length - m.s
|
||||
if nextEmit == m.s {
|
||||
// If we do not have to emit literals, we save 1 byte
|
||||
score++
|
||||
}
|
||||
offset := m.s - m.offset
|
||||
|
||||
return score - emitCopySize(offset, m.length)
|
||||
}
|
||||
|
||||
matchAt := func(offset, s int, first uint32) match {
|
||||
if best.length != 0 && best.s-best.offset == s-offset {
|
||||
// Don't retest if we have the same offset.
|
||||
return match{offset: offset, s: s}
|
||||
}
|
||||
if load32(src, offset) != first {
|
||||
return match{offset: offset, s: s}
|
||||
}
|
||||
m := match{offset: offset, s: s, length: 4 + offset}
|
||||
s += 4
|
||||
for s <= sLimit {
|
||||
if diff := load64(src, s) ^ load64(src, m.length); diff != 0 {
|
||||
m.length += bits.TrailingZeros64(diff) >> 3
|
||||
break
|
||||
}
|
||||
s += 8
|
||||
m.length += 8
|
||||
}
|
||||
m.length -= offset
|
||||
m.score = score(m)
|
||||
if m.score <= -m.s {
|
||||
// Eliminate if no savings, we might find a better one.
|
||||
m.length = 0
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
bestOf := func(a, b match) match {
|
||||
if b.length == 0 {
|
||||
return a
|
||||
}
|
||||
if a.length == 0 {
|
||||
return b
|
||||
}
|
||||
as := a.score + b.s
|
||||
bs := b.score + a.s
|
||||
if as >= bs {
|
||||
return a
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
best = bestOf(matchAt(getCur(candidateL), s, uint32(cv)), matchAt(getPrev(candidateL), s, uint32(cv)))
|
||||
best = bestOf(best, matchAt(getCur(candidateS), s, uint32(cv)))
|
||||
best = bestOf(best, matchAt(getPrev(candidateS), s, uint32(cv)))
|
||||
|
||||
{
|
||||
best = bestOf(best, matchAt(s-repeat+1, s+1, uint32(cv>>8)))
|
||||
if best.length > 0 {
|
||||
// s+1
|
||||
nextShort := sTable[hash4(cv>>8, sTableBits)]
|
||||
s := s + 1
|
||||
cv := load64(src, s)
|
||||
nextLong := lTable[hash8(cv, lTableBits)]
|
||||
best = bestOf(best, matchAt(getCur(nextShort), s, uint32(cv)))
|
||||
best = bestOf(best, matchAt(getPrev(nextShort), s, uint32(cv)))
|
||||
best = bestOf(best, matchAt(getCur(nextLong), s, uint32(cv)))
|
||||
best = bestOf(best, matchAt(getPrev(nextLong), s, uint32(cv)))
|
||||
// Repeat at + 2
|
||||
best = bestOf(best, matchAt(s-repeat+1, s+1, uint32(cv>>8)))
|
||||
|
||||
// s+2
|
||||
if true {
|
||||
nextShort = sTable[hash4(cv>>8, sTableBits)]
|
||||
s++
|
||||
cv = load64(src, s)
|
||||
nextLong = lTable[hash8(cv, lTableBits)]
|
||||
best = bestOf(best, matchAt(getCur(nextShort), s, uint32(cv)))
|
||||
best = bestOf(best, matchAt(getPrev(nextShort), s, uint32(cv)))
|
||||
best = bestOf(best, matchAt(getCur(nextLong), s, uint32(cv)))
|
||||
best = bestOf(best, matchAt(getPrev(nextLong), s, uint32(cv)))
|
||||
}
|
||||
// Search for a match at best match end, see if that is better.
|
||||
if sAt := best.s + best.length; sAt < sLimit {
|
||||
sBack := best.s
|
||||
backL := best.length
|
||||
// Load initial values
|
||||
cv = load64(src, sBack)
|
||||
// Search for mismatch
|
||||
next := lTable[hash8(load64(src, sAt), lTableBits)]
|
||||
//next := sTable[hash4(load64(src, sAt), sTableBits)]
|
||||
|
||||
if checkAt := getCur(next) - backL; checkAt > 0 {
|
||||
best = bestOf(best, matchAt(checkAt, sBack, uint32(cv)))
|
||||
}
|
||||
if checkAt := getPrev(next) - backL; checkAt > 0 {
|
||||
best = bestOf(best, matchAt(checkAt, sBack, uint32(cv)))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Update table
|
||||
lTable[hashL] = uint64(s) | candidateL<<32
|
||||
sTable[hashS] = uint64(s) | candidateS<<32
|
||||
|
||||
if best.length > 0 {
|
||||
break
|
||||
}
|
||||
|
||||
cv = load64(src, nextS)
|
||||
s = nextS
|
||||
}
|
||||
|
||||
// Extend backwards, not needed for repeats...
|
||||
s = best.s
|
||||
if true {
|
||||
for best.offset > 0 && s > nextEmit && src[best.offset-1] == src[s-1] {
|
||||
best.offset--
|
||||
best.length++
|
||||
s--
|
||||
}
|
||||
}
|
||||
if false && best.offset >= s {
|
||||
panic(fmt.Errorf("t %d >= s %d", best.offset, s))
|
||||
}
|
||||
// Bail if we exceed the maximum size.
|
||||
if d+(s-nextEmit) > dstLimit {
|
||||
return 0
|
||||
}
|
||||
|
||||
base := s
|
||||
offset := s - best.offset
|
||||
|
||||
s += best.length
|
||||
|
||||
if offset > 65535 && s-base <= 5 {
|
||||
// Bail if the match is equal or worse to the encoding.
|
||||
s = best.s + 1
|
||||
if s >= sLimit {
|
||||
goto emitRemainder
|
||||
}
|
||||
cv = load64(src, s)
|
||||
continue
|
||||
}
|
||||
d += emitLiteral(dst[d:], src[nextEmit:base])
|
||||
d += emitCopyNoRepeat(dst[d:], offset, best.length)
|
||||
repeat = offset
|
||||
|
||||
nextEmit = s
|
||||
if s >= sLimit {
|
||||
goto emitRemainder
|
||||
}
|
||||
|
||||
if d > dstLimit {
|
||||
// Do we have space for more, if not bail.
|
||||
return 0
|
||||
}
|
||||
// Fill tables...
|
||||
for i := best.s + 1; i < s; i++ {
|
||||
cv0 := load64(src, i)
|
||||
long0 := hash8(cv0, lTableBits)
|
||||
short0 := hash4(cv0, sTableBits)
|
||||
lTable[long0] = uint64(i) | lTable[long0]<<32
|
||||
sTable[short0] = uint64(i) | sTable[short0]<<32
|
||||
}
|
||||
cv = load64(src, s)
|
||||
}
|
||||
|
||||
emitRemainder:
|
||||
if nextEmit < len(src) {
|
||||
// Bail if we exceed the maximum size.
|
||||
if d+len(src)-nextEmit > dstLimit {
|
||||
return 0
|
||||
}
|
||||
d += emitLiteral(dst[d:], src[nextEmit:])
|
||||
}
|
||||
return d
|
||||
}
|
||||
|
||||
// emitCopySize returns the size to encode the offset+length
|
||||
//
|
||||
// It assumes that:
|
||||
// 1 <= offset && offset <= math.MaxUint32
|
||||
// 4 <= length && length <= 1 << 24
|
||||
func emitCopySize(offset, length int) int {
|
||||
if offset >= 65536 {
|
||||
i := 0
|
||||
if length > 64 {
|
||||
length -= 64
|
||||
if length >= 4 {
|
||||
// Emit remaining as repeats
|
||||
return 5 + emitRepeatSize(offset, length)
|
||||
}
|
||||
i = 5
|
||||
}
|
||||
if length == 0 {
|
||||
return i
|
||||
}
|
||||
return i + 5
|
||||
}
|
||||
|
||||
// Offset no more than 2 bytes.
|
||||
if length > 64 {
|
||||
// Emit remaining as repeats, at least 4 bytes remain.
|
||||
return 3 + emitRepeatSize(offset, length-60)
|
||||
}
|
||||
if length >= 12 || offset >= 2048 {
|
||||
return 3
|
||||
}
|
||||
// Emit the remaining copy, encoded as 2 bytes.
|
||||
return 2
|
||||
}
|
||||
|
||||
// emitRepeatSize returns the number of bytes required to encode a repeat.
|
||||
// Length must be at least 4 and < 1<<24
|
||||
func emitRepeatSize(offset, length int) int {
|
||||
// Repeat offset, make length cheaper
|
||||
if length <= 4+4 || (length < 8+4 && offset < 2048) {
|
||||
return 2
|
||||
}
|
||||
if length < (1<<8)+4+4 {
|
||||
return 3
|
||||
}
|
||||
if length < (1<<16)+(1<<8)+4 {
|
||||
return 4
|
||||
}
|
||||
const maxRepeat = (1 << 24) - 1
|
||||
length -= (1 << 16) - 4
|
||||
left := 0
|
||||
if length > maxRepeat {
|
||||
left = length - maxRepeat + 4
|
||||
length = maxRepeat - 4
|
||||
}
|
||||
if left > 0 {
|
||||
return 5 + emitRepeatSize(offset, left)
|
||||
}
|
||||
return 5
|
||||
}
|
431
vendor/github.com/klauspost/compress/s2/encode_better.go
generated
vendored
Normal file
431
vendor/github.com/klauspost/compress/s2/encode_better.go
generated
vendored
Normal file
@ -0,0 +1,431 @@
|
||||
// Copyright 2016 The Snappy-Go Authors. All rights reserved.
|
||||
// Copyright (c) 2019 Klaus Post. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package s2
|
||||
|
||||
import (
|
||||
"math/bits"
|
||||
)
|
||||
|
||||
// hash4 returns the hash of the lowest 4 bytes of u to fit in a hash table with h bits.
|
||||
// Preferably h should be a constant and should always be <32.
|
||||
func hash4(u uint64, h uint8) uint32 {
|
||||
const prime4bytes = 2654435761
|
||||
return (uint32(u) * prime4bytes) >> ((32 - h) & 31)
|
||||
}
|
||||
|
||||
// hash5 returns the hash of the lowest 5 bytes of u to fit in a hash table with h bits.
|
||||
// Preferably h should be a constant and should always be <64.
|
||||
func hash5(u uint64, h uint8) uint32 {
|
||||
const prime5bytes = 889523592379
|
||||
return uint32(((u << (64 - 40)) * prime5bytes) >> ((64 - h) & 63))
|
||||
}
|
||||
|
||||
// hash7 returns the hash of the lowest 7 bytes of u to fit in a hash table with h bits.
|
||||
// Preferably h should be a constant and should always be <64.
|
||||
func hash7(u uint64, h uint8) uint32 {
|
||||
const prime7bytes = 58295818150454627
|
||||
return uint32(((u << (64 - 56)) * prime7bytes) >> ((64 - h) & 63))
|
||||
}
|
||||
|
||||
// hash8 returns the hash of u to fit in a hash table with h bits.
|
||||
// Preferably h should be a constant and should always be <64.
|
||||
func hash8(u uint64, h uint8) uint32 {
|
||||
const prime8bytes = 0xcf1bbcdcb7a56463
|
||||
return uint32((u * prime8bytes) >> ((64 - h) & 63))
|
||||
}
|
||||
|
||||
// encodeBlockBetter encodes a non-empty src to a guaranteed-large-enough dst. It
|
||||
// assumes that the varint-encoded length of the decompressed bytes has already
|
||||
// been written.
|
||||
//
|
||||
// It also assumes that:
|
||||
// len(dst) >= MaxEncodedLen(len(src)) &&
|
||||
// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize
|
||||
func encodeBlockBetterGo(dst, src []byte) (d int) {
|
||||
// sLimit is when to stop looking for offset/length copies. The inputMargin
|
||||
// lets us use a fast path for emitLiteral in the main loop, while we are
|
||||
// looking for copies.
|
||||
sLimit := len(src) - inputMargin
|
||||
if len(src) < minNonLiteralBlockSize {
|
||||
return 0
|
||||
}
|
||||
|
||||
// Initialize the hash tables.
|
||||
const (
|
||||
// Long hash matches.
|
||||
lTableBits = 16
|
||||
maxLTableSize = 1 << lTableBits
|
||||
|
||||
// Short hash matches.
|
||||
sTableBits = 14
|
||||
maxSTableSize = 1 << sTableBits
|
||||
)
|
||||
|
||||
var lTable [maxLTableSize]uint32
|
||||
var sTable [maxSTableSize]uint32
|
||||
|
||||
// Bail if we can't compress to at least this.
|
||||
dstLimit := len(src) - len(src)>>5 - 6
|
||||
|
||||
// nextEmit is where in src the next emitLiteral should start from.
|
||||
nextEmit := 0
|
||||
|
||||
// The encoded form must start with a literal, as there are no previous
|
||||
// bytes to copy, so we start looking for hash matches at s == 1.
|
||||
s := 1
|
||||
cv := load64(src, s)
|
||||
|
||||
// We initialize repeat to 0, so we never match on first attempt
|
||||
repeat := 0
|
||||
|
||||
for {
|
||||
candidateL := 0
|
||||
nextS := 0
|
||||
for {
|
||||
// Next src position to check
|
||||
nextS = s + (s-nextEmit)>>7 + 1
|
||||
if nextS > sLimit {
|
||||
goto emitRemainder
|
||||
}
|
||||
hashL := hash7(cv, lTableBits)
|
||||
hashS := hash4(cv, sTableBits)
|
||||
candidateL = int(lTable[hashL])
|
||||
candidateS := int(sTable[hashS])
|
||||
lTable[hashL] = uint32(s)
|
||||
sTable[hashS] = uint32(s)
|
||||
|
||||
// Check repeat at offset checkRep.
|
||||
const checkRep = 1
|
||||
if false && uint32(cv>>(checkRep*8)) == load32(src, s-repeat+checkRep) {
|
||||
base := s + checkRep
|
||||
// Extend back
|
||||
for i := base - repeat; base > nextEmit && i > 0 && src[i-1] == src[base-1]; {
|
||||
i--
|
||||
base--
|
||||
}
|
||||
d += emitLiteral(dst[d:], src[nextEmit:base])
|
||||
|
||||
// Extend forward
|
||||
candidate := s - repeat + 4 + checkRep
|
||||
s += 4 + checkRep
|
||||
for s < len(src) {
|
||||
if len(src)-s < 8 {
|
||||
if src[s] == src[candidate] {
|
||||
s++
|
||||
candidate++
|
||||
continue
|
||||
}
|
||||
break
|
||||
}
|
||||
if diff := load64(src, s) ^ load64(src, candidate); diff != 0 {
|
||||
s += bits.TrailingZeros64(diff) >> 3
|
||||
break
|
||||
}
|
||||
s += 8
|
||||
candidate += 8
|
||||
}
|
||||
if nextEmit > 0 {
|
||||
// same as `add := emitCopy(dst[d:], repeat, s-base)` but skips storing offset.
|
||||
d += emitRepeat(dst[d:], repeat, s-base)
|
||||
} else {
|
||||
// First match, cannot be repeat.
|
||||
d += emitCopy(dst[d:], repeat, s-base)
|
||||
}
|
||||
nextEmit = s
|
||||
if s >= sLimit {
|
||||
goto emitRemainder
|
||||
}
|
||||
|
||||
cv = load64(src, s)
|
||||
continue
|
||||
}
|
||||
|
||||
if uint32(cv) == load32(src, candidateL) {
|
||||
break
|
||||
}
|
||||
|
||||
// Check our short candidate
|
||||
if uint32(cv) == load32(src, candidateS) {
|
||||
// Try a long candidate at s+1
|
||||
hashL = hash7(cv>>8, lTableBits)
|
||||
candidateL = int(lTable[hashL])
|
||||
lTable[hashL] = uint32(s + 1)
|
||||
if uint32(cv>>8) == load32(src, candidateL) {
|
||||
s++
|
||||
break
|
||||
}
|
||||
// Use our short candidate.
|
||||
candidateL = candidateS
|
||||
break
|
||||
}
|
||||
|
||||
cv = load64(src, nextS)
|
||||
s = nextS
|
||||
}
|
||||
|
||||
// Extend backwards
|
||||
for candidateL > 0 && s > nextEmit && src[candidateL-1] == src[s-1] {
|
||||
candidateL--
|
||||
s--
|
||||
}
|
||||
|
||||
// Bail if we exceed the maximum size.
|
||||
if d+(s-nextEmit) > dstLimit {
|
||||
return 0
|
||||
}
|
||||
|
||||
base := s
|
||||
offset := base - candidateL
|
||||
|
||||
// Extend the 4-byte match as long as possible.
|
||||
s += 4
|
||||
candidateL += 4
|
||||
for s < len(src) {
|
||||
if len(src)-s < 8 {
|
||||
if src[s] == src[candidateL] {
|
||||
s++
|
||||
candidateL++
|
||||
continue
|
||||
}
|
||||
break
|
||||
}
|
||||
if diff := load64(src, s) ^ load64(src, candidateL); diff != 0 {
|
||||
s += bits.TrailingZeros64(diff) >> 3
|
||||
break
|
||||
}
|
||||
s += 8
|
||||
candidateL += 8
|
||||
}
|
||||
|
||||
if offset > 65535 && s-base <= 5 && repeat != offset {
|
||||
// Bail if the match is equal or worse to the encoding.
|
||||
s = nextS + 1
|
||||
if s >= sLimit {
|
||||
goto emitRemainder
|
||||
}
|
||||
cv = load64(src, s)
|
||||
continue
|
||||
}
|
||||
|
||||
d += emitLiteral(dst[d:], src[nextEmit:base])
|
||||
if repeat == offset {
|
||||
d += emitRepeat(dst[d:], offset, s-base)
|
||||
} else {
|
||||
d += emitCopy(dst[d:], offset, s-base)
|
||||
repeat = offset
|
||||
}
|
||||
|
||||
nextEmit = s
|
||||
if s >= sLimit {
|
||||
goto emitRemainder
|
||||
}
|
||||
|
||||
if d > dstLimit {
|
||||
// Do we have space for more, if not bail.
|
||||
return 0
|
||||
}
|
||||
// Index match start+1 (long) and start+2 (short)
|
||||
index0 := base + 1
|
||||
// Index match end-2 (long) and end-1 (short)
|
||||
index1 := s - 2
|
||||
|
||||
cv0 := load64(src, index0)
|
||||
cv1 := load64(src, index1)
|
||||
cv = load64(src, s)
|
||||
lTable[hash7(cv0, lTableBits)] = uint32(index0)
|
||||
lTable[hash7(cv0>>8, lTableBits)] = uint32(index0 + 1)
|
||||
lTable[hash7(cv1, lTableBits)] = uint32(index1)
|
||||
lTable[hash7(cv1>>8, lTableBits)] = uint32(index1 + 1)
|
||||
sTable[hash4(cv0>>8, sTableBits)] = uint32(index0 + 1)
|
||||
sTable[hash4(cv0>>16, sTableBits)] = uint32(index0 + 2)
|
||||
sTable[hash4(cv1>>8, sTableBits)] = uint32(index1 + 1)
|
||||
}
|
||||
|
||||
emitRemainder:
|
||||
if nextEmit < len(src) {
|
||||
// Bail if we exceed the maximum size.
|
||||
if d+len(src)-nextEmit > dstLimit {
|
||||
return 0
|
||||
}
|
||||
d += emitLiteral(dst[d:], src[nextEmit:])
|
||||
}
|
||||
return d
|
||||
}
|
||||
|
||||
// encodeBlockBetterSnappyGo encodes a non-empty src to a guaranteed-large-enough dst. It
|
||||
// assumes that the varint-encoded length of the decompressed bytes has already
|
||||
// been written.
|
||||
//
|
||||
// It also assumes that:
|
||||
// len(dst) >= MaxEncodedLen(len(src)) &&
|
||||
// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize
|
||||
func encodeBlockBetterSnappyGo(dst, src []byte) (d int) {
|
||||
// sLimit is when to stop looking for offset/length copies. The inputMargin
|
||||
// lets us use a fast path for emitLiteral in the main loop, while we are
|
||||
// looking for copies.
|
||||
sLimit := len(src) - inputMargin
|
||||
if len(src) < minNonLiteralBlockSize {
|
||||
return 0
|
||||
}
|
||||
|
||||
// Initialize the hash tables.
|
||||
const (
|
||||
// Long hash matches.
|
||||
lTableBits = 16
|
||||
maxLTableSize = 1 << lTableBits
|
||||
|
||||
// Short hash matches.
|
||||
sTableBits = 14
|
||||
maxSTableSize = 1 << sTableBits
|
||||
)
|
||||
|
||||
var lTable [maxLTableSize]uint32
|
||||
var sTable [maxSTableSize]uint32
|
||||
|
||||
// Bail if we can't compress to at least this.
|
||||
dstLimit := len(src) - len(src)>>5 - 6
|
||||
|
||||
// nextEmit is where in src the next emitLiteral should start from.
|
||||
nextEmit := 0
|
||||
|
||||
// The encoded form must start with a literal, as there are no previous
|
||||
// bytes to copy, so we start looking for hash matches at s == 1.
|
||||
s := 1
|
||||
cv := load64(src, s)
|
||||
|
||||
// We initialize repeat to 0, so we never match on first attempt
|
||||
repeat := 0
|
||||
const maxSkip = 100
|
||||
|
||||
for {
|
||||
candidateL := 0
|
||||
nextS := 0
|
||||
for {
|
||||
// Next src position to check
|
||||
nextS = (s-nextEmit)>>7 + 1
|
||||
if nextS > maxSkip {
|
||||
nextS = s + maxSkip
|
||||
} else {
|
||||
nextS += s
|
||||
}
|
||||
|
||||
if nextS > sLimit {
|
||||
goto emitRemainder
|
||||
}
|
||||
hashL := hash7(cv, lTableBits)
|
||||
hashS := hash4(cv, sTableBits)
|
||||
candidateL = int(lTable[hashL])
|
||||
candidateS := int(sTable[hashS])
|
||||
lTable[hashL] = uint32(s)
|
||||
sTable[hashS] = uint32(s)
|
||||
|
||||
if uint32(cv) == load32(src, candidateL) {
|
||||
break
|
||||
}
|
||||
|
||||
// Check our short candidate
|
||||
if uint32(cv) == load32(src, candidateS) {
|
||||
// Try a long candidate at s+1
|
||||
hashL = hash7(cv>>8, lTableBits)
|
||||
candidateL = int(lTable[hashL])
|
||||
lTable[hashL] = uint32(s + 1)
|
||||
if uint32(cv>>8) == load32(src, candidateL) {
|
||||
s++
|
||||
break
|
||||
}
|
||||
// Use our short candidate.
|
||||
candidateL = candidateS
|
||||
break
|
||||
}
|
||||
|
||||
cv = load64(src, nextS)
|
||||
s = nextS
|
||||
}
|
||||
|
||||
// Extend backwards
|
||||
for candidateL > 0 && s > nextEmit && src[candidateL-1] == src[s-1] {
|
||||
candidateL--
|
||||
s--
|
||||
}
|
||||
|
||||
// Bail if we exceed the maximum size.
|
||||
if d+(s-nextEmit) > dstLimit {
|
||||
return 0
|
||||
}
|
||||
|
||||
base := s
|
||||
offset := base - candidateL
|
||||
|
||||
// Extend the 4-byte match as long as possible.
|
||||
s += 4
|
||||
candidateL += 4
|
||||
for s < len(src) {
|
||||
if len(src)-s < 8 {
|
||||
if src[s] == src[candidateL] {
|
||||
s++
|
||||
candidateL++
|
||||
continue
|
||||
}
|
||||
break
|
||||
}
|
||||
if diff := load64(src, s) ^ load64(src, candidateL); diff != 0 {
|
||||
s += bits.TrailingZeros64(diff) >> 3
|
||||
break
|
||||
}
|
||||
s += 8
|
||||
candidateL += 8
|
||||
}
|
||||
|
||||
if offset > 65535 && s-base <= 5 && repeat != offset {
|
||||
// Bail if the match is equal or worse to the encoding.
|
||||
s = nextS + 1
|
||||
if s >= sLimit {
|
||||
goto emitRemainder
|
||||
}
|
||||
cv = load64(src, s)
|
||||
continue
|
||||
}
|
||||
|
||||
d += emitLiteral(dst[d:], src[nextEmit:base])
|
||||
d += emitCopyNoRepeat(dst[d:], offset, s-base)
|
||||
repeat = offset
|
||||
|
||||
nextEmit = s
|
||||
if s >= sLimit {
|
||||
goto emitRemainder
|
||||
}
|
||||
|
||||
if d > dstLimit {
|
||||
// Do we have space for more, if not bail.
|
||||
return 0
|
||||
}
|
||||
// Index match start+1 (long) and start+2 (short)
|
||||
index0 := base + 1
|
||||
// Index match end-2 (long) and end-1 (short)
|
||||
index1 := s - 2
|
||||
|
||||
cv0 := load64(src, index0)
|
||||
cv1 := load64(src, index1)
|
||||
cv = load64(src, s)
|
||||
lTable[hash7(cv0, lTableBits)] = uint32(index0)
|
||||
lTable[hash7(cv0>>8, lTableBits)] = uint32(index0 + 1)
|
||||
lTable[hash7(cv1, lTableBits)] = uint32(index1)
|
||||
lTable[hash7(cv1>>8, lTableBits)] = uint32(index1 + 1)
|
||||
sTable[hash4(cv0>>8, sTableBits)] = uint32(index0 + 1)
|
||||
sTable[hash4(cv0>>16, sTableBits)] = uint32(index0 + 2)
|
||||
sTable[hash4(cv1>>8, sTableBits)] = uint32(index1 + 1)
|
||||
}
|
||||
|
||||
emitRemainder:
|
||||
if nextEmit < len(src) {
|
||||
// Bail if we exceed the maximum size.
|
||||
if d+len(src)-nextEmit > dstLimit {
|
||||
return 0
|
||||
}
|
||||
d += emitLiteral(dst[d:], src[nextEmit:])
|
||||
}
|
||||
return d
|
||||
}
|
298
vendor/github.com/klauspost/compress/s2/encode_go.go
generated
vendored
Normal file
298
vendor/github.com/klauspost/compress/s2/encode_go.go
generated
vendored
Normal file
@ -0,0 +1,298 @@
|
||||
//go:build !amd64 || appengine || !gc || noasm
|
||||
// +build !amd64 appengine !gc noasm
|
||||
|
||||
package s2
|
||||
|
||||
import (
|
||||
"math/bits"
|
||||
)
|
||||
|
||||
// encodeBlock encodes a non-empty src to a guaranteed-large-enough dst. It
|
||||
// assumes that the varint-encoded length of the decompressed bytes has already
|
||||
// been written.
|
||||
//
|
||||
// It also assumes that:
|
||||
// len(dst) >= MaxEncodedLen(len(src))
|
||||
func encodeBlock(dst, src []byte) (d int) {
|
||||
if len(src) < minNonLiteralBlockSize {
|
||||
return 0
|
||||
}
|
||||
return encodeBlockGo(dst, src)
|
||||
}
|
||||
|
||||
// encodeBlockBetter encodes a non-empty src to a guaranteed-large-enough dst. It
|
||||
// assumes that the varint-encoded length of the decompressed bytes has already
|
||||
// been written.
|
||||
//
|
||||
// It also assumes that:
|
||||
// len(dst) >= MaxEncodedLen(len(src))
|
||||
func encodeBlockBetter(dst, src []byte) (d int) {
|
||||
return encodeBlockBetterGo(dst, src)
|
||||
}
|
||||
|
||||
// encodeBlockBetter encodes a non-empty src to a guaranteed-large-enough dst. It
|
||||
// assumes that the varint-encoded length of the decompressed bytes has already
|
||||
// been written.
|
||||
//
|
||||
// It also assumes that:
|
||||
// len(dst) >= MaxEncodedLen(len(src))
|
||||
func encodeBlockBetterSnappy(dst, src []byte) (d int) {
|
||||
return encodeBlockBetterSnappyGo(dst, src)
|
||||
}
|
||||
|
||||
// encodeBlock encodes a non-empty src to a guaranteed-large-enough dst. It
|
||||
// assumes that the varint-encoded length of the decompressed bytes has already
|
||||
// been written.
|
||||
//
|
||||
// It also assumes that:
|
||||
// len(dst) >= MaxEncodedLen(len(src))
|
||||
func encodeBlockSnappy(dst, src []byte) (d int) {
|
||||
if len(src) < minNonLiteralBlockSize {
|
||||
return 0
|
||||
}
|
||||
return encodeBlockSnappyGo(dst, src)
|
||||
}
|
||||
|
||||
// emitLiteral writes a literal chunk and returns the number of bytes written.
|
||||
//
|
||||
// It assumes that:
|
||||
// dst is long enough to hold the encoded bytes
|
||||
// 0 <= len(lit) && len(lit) <= math.MaxUint32
|
||||
func emitLiteral(dst, lit []byte) int {
|
||||
if len(lit) == 0 {
|
||||
return 0
|
||||
}
|
||||
const num = 63<<2 | tagLiteral
|
||||
i, n := 0, uint(len(lit)-1)
|
||||
switch {
|
||||
case n < 60:
|
||||
dst[0] = uint8(n)<<2 | tagLiteral
|
||||
i = 1
|
||||
case n < 1<<8:
|
||||
dst[1] = uint8(n)
|
||||
dst[0] = 60<<2 | tagLiteral
|
||||
i = 2
|
||||
case n < 1<<16:
|
||||
dst[2] = uint8(n >> 8)
|
||||
dst[1] = uint8(n)
|
||||
dst[0] = 61<<2 | tagLiteral
|
||||
i = 3
|
||||
case n < 1<<24:
|
||||
dst[3] = uint8(n >> 16)
|
||||
dst[2] = uint8(n >> 8)
|
||||
dst[1] = uint8(n)
|
||||
dst[0] = 62<<2 | tagLiteral
|
||||
i = 4
|
||||
default:
|
||||
dst[4] = uint8(n >> 24)
|
||||
dst[3] = uint8(n >> 16)
|
||||
dst[2] = uint8(n >> 8)
|
||||
dst[1] = uint8(n)
|
||||
dst[0] = 63<<2 | tagLiteral
|
||||
i = 5
|
||||
}
|
||||
return i + copy(dst[i:], lit)
|
||||
}
|
||||
|
||||
// emitRepeat writes a repeat chunk and returns the number of bytes written.
|
||||
// Length must be at least 4 and < 1<<24
|
||||
func emitRepeat(dst []byte, offset, length int) int {
|
||||
// Repeat offset, make length cheaper
|
||||
length -= 4
|
||||
if length <= 4 {
|
||||
dst[0] = uint8(length)<<2 | tagCopy1
|
||||
dst[1] = 0
|
||||
return 2
|
||||
}
|
||||
if length < 8 && offset < 2048 {
|
||||
// Encode WITH offset
|
||||
dst[1] = uint8(offset)
|
||||
dst[0] = uint8(offset>>8)<<5 | uint8(length)<<2 | tagCopy1
|
||||
return 2
|
||||
}
|
||||
if length < (1<<8)+4 {
|
||||
length -= 4
|
||||
dst[2] = uint8(length)
|
||||
dst[1] = 0
|
||||
dst[0] = 5<<2 | tagCopy1
|
||||
return 3
|
||||
}
|
||||
if length < (1<<16)+(1<<8) {
|
||||
length -= 1 << 8
|
||||
dst[3] = uint8(length >> 8)
|
||||
dst[2] = uint8(length >> 0)
|
||||
dst[1] = 0
|
||||
dst[0] = 6<<2 | tagCopy1
|
||||
return 4
|
||||
}
|
||||
const maxRepeat = (1 << 24) - 1
|
||||
length -= 1 << 16
|
||||
left := 0
|
||||
if length > maxRepeat {
|
||||
left = length - maxRepeat + 4
|
||||
length = maxRepeat - 4
|
||||
}
|
||||
dst[4] = uint8(length >> 16)
|
||||
dst[3] = uint8(length >> 8)
|
||||
dst[2] = uint8(length >> 0)
|
||||
dst[1] = 0
|
||||
dst[0] = 7<<2 | tagCopy1
|
||||
if left > 0 {
|
||||
return 5 + emitRepeat(dst[5:], offset, left)
|
||||
}
|
||||
return 5
|
||||
}
|
||||
|
||||
// emitCopy writes a copy chunk and returns the number of bytes written.
|
||||
//
|
||||
// It assumes that:
|
||||
// dst is long enough to hold the encoded bytes
|
||||
// 1 <= offset && offset <= math.MaxUint32
|
||||
// 4 <= length && length <= 1 << 24
|
||||
func emitCopy(dst []byte, offset, length int) int {
|
||||
if offset >= 65536 {
|
||||
i := 0
|
||||
if length > 64 {
|
||||
// Emit a length 64 copy, encoded as 5 bytes.
|
||||
dst[4] = uint8(offset >> 24)
|
||||
dst[3] = uint8(offset >> 16)
|
||||
dst[2] = uint8(offset >> 8)
|
||||
dst[1] = uint8(offset)
|
||||
dst[0] = 63<<2 | tagCopy4
|
||||
length -= 64
|
||||
if length >= 4 {
|
||||
// Emit remaining as repeats
|
||||
return 5 + emitRepeat(dst[5:], offset, length)
|
||||
}
|
||||
i = 5
|
||||
}
|
||||
if length == 0 {
|
||||
return i
|
||||
}
|
||||
// Emit a copy, offset encoded as 4 bytes.
|
||||
dst[i+0] = uint8(length-1)<<2 | tagCopy4
|
||||
dst[i+1] = uint8(offset)
|
||||
dst[i+2] = uint8(offset >> 8)
|
||||
dst[i+3] = uint8(offset >> 16)
|
||||
dst[i+4] = uint8(offset >> 24)
|
||||
return i + 5
|
||||
}
|
||||
|
||||
// Offset no more than 2 bytes.
|
||||
if length > 64 {
|
||||
// Emit a length 60 copy, encoded as 3 bytes.
|
||||
// Emit remaining as repeat value (minimum 4 bytes).
|
||||
dst[2] = uint8(offset >> 8)
|
||||
dst[1] = uint8(offset)
|
||||
dst[0] = 59<<2 | tagCopy2
|
||||
length -= 60
|
||||
// Emit remaining as repeats, at least 4 bytes remain.
|
||||
return 3 + emitRepeat(dst[3:], offset, length)
|
||||
}
|
||||
if length >= 12 || offset >= 2048 {
|
||||
// Emit the remaining copy, encoded as 3 bytes.
|
||||
dst[2] = uint8(offset >> 8)
|
||||
dst[1] = uint8(offset)
|
||||
dst[0] = uint8(length-1)<<2 | tagCopy2
|
||||
return 3
|
||||
}
|
||||
// Emit the remaining copy, encoded as 2 bytes.
|
||||
dst[1] = uint8(offset)
|
||||
dst[0] = uint8(offset>>8)<<5 | uint8(length-4)<<2 | tagCopy1
|
||||
return 2
|
||||
}
|
||||
|
||||
// emitCopyNoRepeat writes a copy chunk and returns the number of bytes written.
|
||||
//
|
||||
// It assumes that:
|
||||
// dst is long enough to hold the encoded bytes
|
||||
// 1 <= offset && offset <= math.MaxUint32
|
||||
// 4 <= length && length <= 1 << 24
|
||||
func emitCopyNoRepeat(dst []byte, offset, length int) int {
|
||||
if offset >= 65536 {
|
||||
i := 0
|
||||
if length > 64 {
|
||||
// Emit a length 64 copy, encoded as 5 bytes.
|
||||
dst[4] = uint8(offset >> 24)
|
||||
dst[3] = uint8(offset >> 16)
|
||||
dst[2] = uint8(offset >> 8)
|
||||
dst[1] = uint8(offset)
|
||||
dst[0] = 63<<2 | tagCopy4
|
||||
length -= 64
|
||||
if length >= 4 {
|
||||
// Emit remaining as repeats
|
||||
return 5 + emitCopyNoRepeat(dst[5:], offset, length)
|
||||
}
|
||||
i = 5
|
||||
}
|
||||
if length == 0 {
|
||||
return i
|
||||
}
|
||||
// Emit a copy, offset encoded as 4 bytes.
|
||||
dst[i+0] = uint8(length-1)<<2 | tagCopy4
|
||||
dst[i+1] = uint8(offset)
|
||||
dst[i+2] = uint8(offset >> 8)
|
||||
dst[i+3] = uint8(offset >> 16)
|
||||
dst[i+4] = uint8(offset >> 24)
|
||||
return i + 5
|
||||
}
|
||||
|
||||
// Offset no more than 2 bytes.
|
||||
if length > 64 {
|
||||
// Emit a length 60 copy, encoded as 3 bytes.
|
||||
// Emit remaining as repeat value (minimum 4 bytes).
|
||||
dst[2] = uint8(offset >> 8)
|
||||
dst[1] = uint8(offset)
|
||||
dst[0] = 59<<2 | tagCopy2
|
||||
length -= 60
|
||||
// Emit remaining as repeats, at least 4 bytes remain.
|
||||
return 3 + emitCopyNoRepeat(dst[3:], offset, length)
|
||||
}
|
||||
if length >= 12 || offset >= 2048 {
|
||||
// Emit the remaining copy, encoded as 3 bytes.
|
||||
dst[2] = uint8(offset >> 8)
|
||||
dst[1] = uint8(offset)
|
||||
dst[0] = uint8(length-1)<<2 | tagCopy2
|
||||
return 3
|
||||
}
|
||||
// Emit the remaining copy, encoded as 2 bytes.
|
||||
dst[1] = uint8(offset)
|
||||
dst[0] = uint8(offset>>8)<<5 | uint8(length-4)<<2 | tagCopy1
|
||||
return 2
|
||||
}
|
||||
|
||||
// matchLen returns how many bytes match in a and b
|
||||
//
|
||||
// It assumes that:
|
||||
// len(a) <= len(b)
|
||||
//
|
||||
func matchLen(a []byte, b []byte) int {
|
||||
b = b[:len(a)]
|
||||
var checked int
|
||||
if len(a) > 4 {
|
||||
// Try 4 bytes first
|
||||
if diff := load32(a, 0) ^ load32(b, 0); diff != 0 {
|
||||
return bits.TrailingZeros32(diff) >> 3
|
||||
}
|
||||
// Switch to 8 byte matching.
|
||||
checked = 4
|
||||
a = a[4:]
|
||||
b = b[4:]
|
||||
for len(a) >= 8 {
|
||||
b = b[:len(a)]
|
||||
if diff := load64(a, 0) ^ load64(b, 0); diff != 0 {
|
||||
return checked + (bits.TrailingZeros64(diff) >> 3)
|
||||
}
|
||||
checked += 8
|
||||
a = a[8:]
|
||||
b = b[8:]
|
||||
}
|
||||
}
|
||||
b = b[:len(a)]
|
||||
for i := range a {
|
||||
if a[i] != b[i] {
|
||||
return int(i) + checked
|
||||
}
|
||||
}
|
||||
return len(a) + checked
|
||||
}
|
189
vendor/github.com/klauspost/compress/s2/encodeblock_amd64.go
generated
vendored
Normal file
189
vendor/github.com/klauspost/compress/s2/encodeblock_amd64.go
generated
vendored
Normal file
@ -0,0 +1,189 @@
|
||||
// Code generated by command: go run gen.go -out ../encodeblock_amd64.s -stubs ../encodeblock_amd64.go -pkg=s2. DO NOT EDIT.
|
||||
|
||||
//go:build !appengine && !noasm && gc
|
||||
// +build !appengine,!noasm,gc
|
||||
|
||||
package s2
|
||||
|
||||
// encodeBlockAsm encodes a non-empty src to a guaranteed-large-enough dst.
|
||||
// Maximum input 4294967295 bytes.
|
||||
// It assumes that the varint-encoded length of the decompressed bytes has already been written.
|
||||
//
|
||||
//go:noescape
|
||||
func encodeBlockAsm(dst []byte, src []byte) int
|
||||
|
||||
// encodeBlockAsm4MB encodes a non-empty src to a guaranteed-large-enough dst.
|
||||
// Maximum input 4194304 bytes.
|
||||
// It assumes that the varint-encoded length of the decompressed bytes has already been written.
|
||||
//
|
||||
//go:noescape
|
||||
func encodeBlockAsm4MB(dst []byte, src []byte) int
|
||||
|
||||
// encodeBlockAsm12B encodes a non-empty src to a guaranteed-large-enough dst.
|
||||
// Maximum input 16383 bytes.
|
||||
// It assumes that the varint-encoded length of the decompressed bytes has already been written.
|
||||
//
|
||||
//go:noescape
|
||||
func encodeBlockAsm12B(dst []byte, src []byte) int
|
||||
|
||||
// encodeBlockAsm10B encodes a non-empty src to a guaranteed-large-enough dst.
|
||||
// Maximum input 4095 bytes.
|
||||
// It assumes that the varint-encoded length of the decompressed bytes has already been written.
|
||||
//
|
||||
//go:noescape
|
||||
func encodeBlockAsm10B(dst []byte, src []byte) int
|
||||
|
||||
// encodeBlockAsm8B encodes a non-empty src to a guaranteed-large-enough dst.
|
||||
// Maximum input 511 bytes.
|
||||
// It assumes that the varint-encoded length of the decompressed bytes has already been written.
|
||||
//
|
||||
//go:noescape
|
||||
func encodeBlockAsm8B(dst []byte, src []byte) int
|
||||
|
||||
// encodeBetterBlockAsm encodes a non-empty src to a guaranteed-large-enough dst.
|
||||
// Maximum input 4294967295 bytes.
|
||||
// It assumes that the varint-encoded length of the decompressed bytes has already been written.
|
||||
//
|
||||
//go:noescape
|
||||
func encodeBetterBlockAsm(dst []byte, src []byte) int
|
||||
|
||||
// encodeBetterBlockAsm4MB encodes a non-empty src to a guaranteed-large-enough dst.
|
||||
// Maximum input 4194304 bytes.
|
||||
// It assumes that the varint-encoded length of the decompressed bytes has already been written.
|
||||
//
|
||||
//go:noescape
|
||||
func encodeBetterBlockAsm4MB(dst []byte, src []byte) int
|
||||
|
||||
// encodeBetterBlockAsm12B encodes a non-empty src to a guaranteed-large-enough dst.
|
||||
// Maximum input 16383 bytes.
|
||||
// It assumes that the varint-encoded length of the decompressed bytes has already been written.
|
||||
//
|
||||
//go:noescape
|
||||
func encodeBetterBlockAsm12B(dst []byte, src []byte) int
|
||||
|
||||
// encodeBetterBlockAsm10B encodes a non-empty src to a guaranteed-large-enough dst.
|
||||
// Maximum input 4095 bytes.
|
||||
// It assumes that the varint-encoded length of the decompressed bytes has already been written.
|
||||
//
|
||||
//go:noescape
|
||||
func encodeBetterBlockAsm10B(dst []byte, src []byte) int
|
||||
|
||||
// encodeBetterBlockAsm8B encodes a non-empty src to a guaranteed-large-enough dst.
|
||||
// Maximum input 511 bytes.
|
||||
// It assumes that the varint-encoded length of the decompressed bytes has already been written.
|
||||
//
|
||||
//go:noescape
|
||||
func encodeBetterBlockAsm8B(dst []byte, src []byte) int
|
||||
|
||||
// encodeSnappyBlockAsm encodes a non-empty src to a guaranteed-large-enough dst.
|
||||
// Maximum input 4294967295 bytes.
|
||||
// It assumes that the varint-encoded length of the decompressed bytes has already been written.
|
||||
//
|
||||
//go:noescape
|
||||
func encodeSnappyBlockAsm(dst []byte, src []byte) int
|
||||
|
||||
// encodeSnappyBlockAsm64K encodes a non-empty src to a guaranteed-large-enough dst.
|
||||
// Maximum input 65535 bytes.
|
||||
// It assumes that the varint-encoded length of the decompressed bytes has already been written.
|
||||
//
|
||||
//go:noescape
|
||||
func encodeSnappyBlockAsm64K(dst []byte, src []byte) int
|
||||
|
||||
// encodeSnappyBlockAsm12B encodes a non-empty src to a guaranteed-large-enough dst.
|
||||
// Maximum input 16383 bytes.
|
||||
// It assumes that the varint-encoded length of the decompressed bytes has already been written.
|
||||
//
|
||||
//go:noescape
|
||||
func encodeSnappyBlockAsm12B(dst []byte, src []byte) int
|
||||
|
||||
// encodeSnappyBlockAsm10B encodes a non-empty src to a guaranteed-large-enough dst.
|
||||
// Maximum input 4095 bytes.
|
||||
// It assumes that the varint-encoded length of the decompressed bytes has already been written.
|
||||
//
|
||||
//go:noescape
|
||||
func encodeSnappyBlockAsm10B(dst []byte, src []byte) int
|
||||
|
||||
// encodeSnappyBlockAsm8B encodes a non-empty src to a guaranteed-large-enough dst.
|
||||
// Maximum input 511 bytes.
|
||||
// It assumes that the varint-encoded length of the decompressed bytes has already been written.
|
||||
//
|
||||
//go:noescape
|
||||
func encodeSnappyBlockAsm8B(dst []byte, src []byte) int
|
||||
|
||||
// encodeSnappyBetterBlockAsm encodes a non-empty src to a guaranteed-large-enough dst.
|
||||
// Maximum input 4294967295 bytes.
|
||||
// It assumes that the varint-encoded length of the decompressed bytes has already been written.
|
||||
//
|
||||
//go:noescape
|
||||
func encodeSnappyBetterBlockAsm(dst []byte, src []byte) int
|
||||
|
||||
// encodeSnappyBetterBlockAsm64K encodes a non-empty src to a guaranteed-large-enough dst.
|
||||
// Maximum input 65535 bytes.
|
||||
// It assumes that the varint-encoded length of the decompressed bytes has already been written.
|
||||
//
|
||||
//go:noescape
|
||||
func encodeSnappyBetterBlockAsm64K(dst []byte, src []byte) int
|
||||
|
||||
// encodeSnappyBetterBlockAsm12B encodes a non-empty src to a guaranteed-large-enough dst.
|
||||
// Maximum input 16383 bytes.
|
||||
// It assumes that the varint-encoded length of the decompressed bytes has already been written.
|
||||
//
|
||||
//go:noescape
|
||||
func encodeSnappyBetterBlockAsm12B(dst []byte, src []byte) int
|
||||
|
||||
// encodeSnappyBetterBlockAsm10B encodes a non-empty src to a guaranteed-large-enough dst.
|
||||
// Maximum input 4095 bytes.
|
||||
// It assumes that the varint-encoded length of the decompressed bytes has already been written.
|
||||
//
|
||||
//go:noescape
|
||||
func encodeSnappyBetterBlockAsm10B(dst []byte, src []byte) int
|
||||
|
||||
// encodeSnappyBetterBlockAsm8B encodes a non-empty src to a guaranteed-large-enough dst.
|
||||
// Maximum input 511 bytes.
|
||||
// It assumes that the varint-encoded length of the decompressed bytes has already been written.
|
||||
//
|
||||
//go:noescape
|
||||
func encodeSnappyBetterBlockAsm8B(dst []byte, src []byte) int
|
||||
|
||||
// emitLiteral writes a literal chunk and returns the number of bytes written.
|
||||
//
|
||||
// It assumes that:
|
||||
// dst is long enough to hold the encoded bytes with margin of 0 bytes
|
||||
// 0 <= len(lit) && len(lit) <= math.MaxUint32
|
||||
//
|
||||
//go:noescape
|
||||
func emitLiteral(dst []byte, lit []byte) int
|
||||
|
||||
// emitRepeat writes a repeat chunk and returns the number of bytes written.
|
||||
// Length must be at least 4 and < 1<<32
|
||||
//
|
||||
//go:noescape
|
||||
func emitRepeat(dst []byte, offset int, length int) int
|
||||
|
||||
// emitCopy writes a copy chunk and returns the number of bytes written.
|
||||
//
|
||||
// It assumes that:
|
||||
// dst is long enough to hold the encoded bytes
|
||||
// 1 <= offset && offset <= math.MaxUint32
|
||||
// 4 <= length && length <= 1 << 24
|
||||
//
|
||||
//go:noescape
|
||||
func emitCopy(dst []byte, offset int, length int) int
|
||||
|
||||
// emitCopyNoRepeat writes a copy chunk and returns the number of bytes written.
|
||||
//
|
||||
// It assumes that:
|
||||
// dst is long enough to hold the encoded bytes
|
||||
// 1 <= offset && offset <= math.MaxUint32
|
||||
// 4 <= length && length <= 1 << 24
|
||||
//
|
||||
//go:noescape
|
||||
func emitCopyNoRepeat(dst []byte, offset int, length int) int
|
||||
|
||||
// matchLen returns how many bytes match in a and b
|
||||
//
|
||||
// It assumes that:
|
||||
// len(a) <= len(b)
|
||||
//
|
||||
//go:noescape
|
||||
func matchLen(a []byte, b []byte) int
|
15678
vendor/github.com/klauspost/compress/s2/encodeblock_amd64.s
generated
vendored
Normal file
15678
vendor/github.com/klauspost/compress/s2/encodeblock_amd64.s
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
139
vendor/github.com/klauspost/compress/s2/s2.go
generated
vendored
Normal file
139
vendor/github.com/klauspost/compress/s2/s2.go
generated
vendored
Normal file
@ -0,0 +1,139 @@
|
||||
// Copyright 2011 The Snappy-Go Authors. All rights reserved.
|
||||
// Copyright (c) 2019 Klaus Post. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package s2 implements the S2 compression format.
|
||||
//
|
||||
// S2 is an extension of Snappy. Similar to Snappy S2 is aimed for high throughput,
|
||||
// which is why it features concurrent compression for bigger payloads.
|
||||
//
|
||||
// Decoding is compatible with Snappy compressed content,
|
||||
// but content compressed with S2 cannot be decompressed by Snappy.
|
||||
//
|
||||
// For more information on Snappy/S2 differences see README in: https://github.com/klauspost/compress/tree/master/s2
|
||||
//
|
||||
// There are actually two S2 formats: block and stream. They are related,
|
||||
// but different: trying to decompress block-compressed data as a S2 stream
|
||||
// will fail, and vice versa. The block format is the Decode and Encode
|
||||
// functions and the stream format is the Reader and Writer types.
|
||||
//
|
||||
// A "better" compression option is available. This will trade some compression
|
||||
// speed
|
||||
//
|
||||
// The block format, the more common case, is used when the complete size (the
|
||||
// number of bytes) of the original data is known upfront, at the time
|
||||
// compression starts. The stream format, also known as the framing format, is
|
||||
// for when that isn't always true.
|
||||
//
|
||||
// Blocks to not offer much data protection, so it is up to you to
|
||||
// add data validation of decompressed blocks.
|
||||
//
|
||||
// Streams perform CRC validation of the decompressed data.
|
||||
// Stream compression will also be performed on multiple CPU cores concurrently
|
||||
// significantly improving throughput.
|
||||
package s2
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"hash/crc32"
|
||||
)
|
||||
|
||||
/*
|
||||
Each encoded block begins with the varint-encoded length of the decoded data,
|
||||
followed by a sequence of chunks. Chunks begin and end on byte boundaries. The
|
||||
first byte of each chunk is broken into its 2 least and 6 most significant bits
|
||||
called l and m: l ranges in [0, 4) and m ranges in [0, 64). l is the chunk tag.
|
||||
Zero means a literal tag. All other values mean a copy tag.
|
||||
|
||||
For literal tags:
|
||||
- If m < 60, the next 1 + m bytes are literal bytes.
|
||||
- Otherwise, let n be the little-endian unsigned integer denoted by the next
|
||||
m - 59 bytes. The next 1 + n bytes after that are literal bytes.
|
||||
|
||||
For copy tags, length bytes are copied from offset bytes ago, in the style of
|
||||
Lempel-Ziv compression algorithms. In particular:
|
||||
- For l == 1, the offset ranges in [0, 1<<11) and the length in [4, 12).
|
||||
The length is 4 + the low 3 bits of m. The high 3 bits of m form bits 8-10
|
||||
of the offset. The next byte is bits 0-7 of the offset.
|
||||
- For l == 2, the offset ranges in [0, 1<<16) and the length in [1, 65).
|
||||
The length is 1 + m. The offset is the little-endian unsigned integer
|
||||
denoted by the next 2 bytes.
|
||||
- For l == 3, the offset ranges in [0, 1<<32) and the length in
|
||||
[1, 65). The length is 1 + m. The offset is the little-endian unsigned
|
||||
integer denoted by the next 4 bytes.
|
||||
*/
|
||||
const (
|
||||
tagLiteral = 0x00
|
||||
tagCopy1 = 0x01
|
||||
tagCopy2 = 0x02
|
||||
tagCopy4 = 0x03
|
||||
)
|
||||
|
||||
const (
|
||||
checksumSize = 4
|
||||
chunkHeaderSize = 4
|
||||
magicChunk = "\xff\x06\x00\x00" + magicBody
|
||||
magicChunkSnappy = "\xff\x06\x00\x00" + magicBodySnappy
|
||||
magicBodySnappy = "sNaPpY"
|
||||
magicBody = "S2sTwO"
|
||||
|
||||
// maxBlockSize is the maximum size of the input to encodeBlock.
|
||||
//
|
||||
// For the framing format (Writer type instead of Encode function),
|
||||
// this is the maximum uncompressed size of a block.
|
||||
maxBlockSize = 4 << 20
|
||||
|
||||
// minBlockSize is the minimum size of block setting when creating a writer.
|
||||
minBlockSize = 4 << 10
|
||||
|
||||
// Default block size
|
||||
defaultBlockSize = 1 << 20
|
||||
|
||||
// maxSnappyBlockSize is the maximum snappy block size.
|
||||
maxSnappyBlockSize = 1 << 16
|
||||
|
||||
obufHeaderLen = checksumSize + chunkHeaderSize
|
||||
)
|
||||
|
||||
const (
|
||||
chunkTypeCompressedData = 0x00
|
||||
chunkTypeUncompressedData = 0x01
|
||||
chunkTypePadding = 0xfe
|
||||
chunkTypeStreamIdentifier = 0xff
|
||||
)
|
||||
|
||||
var crcTable = crc32.MakeTable(crc32.Castagnoli)
|
||||
|
||||
// crc implements the checksum specified in section 3 of
|
||||
// https://github.com/google/snappy/blob/master/framing_format.txt
|
||||
func crc(b []byte) uint32 {
|
||||
c := crc32.Update(0, crcTable, b)
|
||||
return c>>15 | c<<17 + 0xa282ead8
|
||||
}
|
||||
|
||||
// literalExtraSize returns the extra size of encoding n literals.
|
||||
// n should be >= 0 and <= math.MaxUint32.
|
||||
func literalExtraSize(n int64) int64 {
|
||||
if n == 0 {
|
||||
return 0
|
||||
}
|
||||
switch {
|
||||
case n < 60:
|
||||
return 1
|
||||
case n < 1<<8:
|
||||
return 2
|
||||
case n < 1<<16:
|
||||
return 3
|
||||
case n < 1<<24:
|
||||
return 4
|
||||
default:
|
||||
return 5
|
||||
}
|
||||
}
|
||||
|
||||
type byter interface {
|
||||
Bytes() []byte
|
||||
}
|
||||
|
||||
var _ byter = &bytes.Buffer{}
|
24
vendor/github.com/klauspost/cpuid/.gitignore
generated
vendored
Normal file
24
vendor/github.com/klauspost/cpuid/.gitignore
generated
vendored
Normal file
@ -0,0 +1,24 @@
|
||||
# Compiled Object files, Static and Dynamic libs (Shared Objects)
|
||||
*.o
|
||||
*.a
|
||||
*.so
|
||||
|
||||
# Folders
|
||||
_obj
|
||||
_test
|
||||
|
||||
# Architecture specific extensions/prefixes
|
||||
*.[568vq]
|
||||
[568vq].out
|
||||
|
||||
*.cgo1.go
|
||||
*.cgo2.c
|
||||
_cgo_defun.c
|
||||
_cgo_gotypes.go
|
||||
_cgo_export.*
|
||||
|
||||
_testmain.go
|
||||
|
||||
*.exe
|
||||
*.test
|
||||
*.prof
|
46
vendor/github.com/klauspost/cpuid/.travis.yml
generated
vendored
Normal file
46
vendor/github.com/klauspost/cpuid/.travis.yml
generated
vendored
Normal file
@ -0,0 +1,46 @@
|
||||
language: go
|
||||
|
||||
os:
|
||||
- linux
|
||||
- osx
|
||||
- windows
|
||||
|
||||
arch:
|
||||
- amd64
|
||||
- arm64
|
||||
|
||||
go:
|
||||
- 1.12.x
|
||||
- 1.13.x
|
||||
- 1.14.x
|
||||
- master
|
||||
|
||||
script:
|
||||
- go vet ./...
|
||||
- go test -race ./...
|
||||
- go test -tags=noasm ./...
|
||||
|
||||
stages:
|
||||
- gofmt
|
||||
- test
|
||||
|
||||
matrix:
|
||||
allow_failures:
|
||||
- go: 'master'
|
||||
fast_finish: true
|
||||
include:
|
||||
- stage: gofmt
|
||||
go: 1.14.x
|
||||
os: linux
|
||||
arch: amd64
|
||||
script:
|
||||
- diff <(gofmt -d .) <(printf "")
|
||||
- diff <(gofmt -d ./private) <(printf "")
|
||||
- go install github.com/klauspost/asmfmt/cmd/asmfmt
|
||||
- diff <(asmfmt -d .) <(printf "")
|
||||
- stage: i386
|
||||
go: 1.14.x
|
||||
os: linux
|
||||
arch: amd64
|
||||
script:
|
||||
- GOOS=linux GOARCH=386 go test .
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user