mirror of
https://github.com/superseriousbusiness/gotosocial.git
synced 2024-11-25 17:53:15 +01:00
Merge branch 'main' into settings-refactor
This commit is contained in:
commit
772231d24e
@ -107,4 +107,36 @@ db-tls-mode: "disable"
|
||||
# Examples: ["/path/to/some/cert.crt"]
|
||||
# Default: ""
|
||||
db-tls-ca-cert: ""
|
||||
|
||||
# String. SQLite journaling mode.
|
||||
# SQLite only -- unused otherwise.
|
||||
# If set to empty string, the sqlite default will be used.
|
||||
# See: https://www.sqlite.org/pragma.html#pragma_journal_mode
|
||||
# Examples: ["DELETE", "TRUNCATE", "PERSIST", "MEMORY", "WAL", "OFF"]
|
||||
# Default: "WAL"
|
||||
db-sqlite-journal-mode: "WAL"
|
||||
|
||||
# String. SQLite synchronous mode.
|
||||
# SQLite only -- unused otherwise.
|
||||
# If set to empty string, the sqlite default will be used.
|
||||
# See: https://www.sqlite.org/pragma.html#pragma_synchronous
|
||||
# Examples: ["OFF", "NORMAL", "FULL", "EXTRA"]
|
||||
# Default: "NORMAL"
|
||||
db-sqlite-synchronous: "NORMAL"
|
||||
|
||||
# Byte size. SQlite cache size.
|
||||
# SQLite only -- unused otherwise.
|
||||
# If set to empty string or zero, the sqlite default will be used.
|
||||
# See: https://www.sqlite.org/pragma.html#pragma_cache_size
|
||||
# Examples: ["32MiB", "0", "64MiB"]
|
||||
# Default: "64MiB"
|
||||
db-sqlite-cache-size: "64MiB"
|
||||
|
||||
# Duration. SQlite busy timeout.
|
||||
# SQLite only -- unused otherwise.
|
||||
# If set to empty string or zero, the sqlite default will be used.
|
||||
# See: https://www.sqlite.org/pragma.html#pragma_busy_timeout
|
||||
# Examples: ["0s", "1s", "30s", "1m", "5m"]
|
||||
# Default: "5s"
|
||||
db-sqlite-busy-timeout: "30s"
|
||||
```
|
||||
|
@ -2,8 +2,6 @@
|
||||
|
||||
- **Where's the user interface?** GoToSocial is just a bare server for the most part and is designed to be used thru external applications. [Pinafore](https://pinafore.social) and [Tusky](https://tusky.app/) are the best-supported, but anything that supports the Mastodon API should work, other than the features GoToSocial doesn't yet have. Permalinks and profile pages are served directly thru GoToSocial as well as the admin panel, but most interaction goes thru the apps.
|
||||
|
||||
- **What happened to the gifs?** While GoToSocial supports gifs, it doesn't support videos. This wouldn't be a big problem, except that Mastodon doesn't support gifs; it converts them into videos when they get uploaded. So if someone posts a gif from a Mastodon server, it won't be visible. At the time of this writing, the video will be dropped altogether, but [in the future there should be at least a placeholder link](https://github.com/superseriousbusiness/gotosocial/issues/765).
|
||||
|
||||
- **Why aren't my posts showing up on my profile page?** Unlike Mastodon, the default post visibility is Unlisted. If you want something to be visible on your profile page, the post must have Public visibility.
|
||||
|
||||
- **Why aren't my posts showing up on other servers?** First check the visibility as noted above. TODO: explain how to debug common federation issues
|
||||
@ -17,7 +15,6 @@
|
||||
- **How can I sign up for a server?** Right now the only way to create an account is by the server's admin to run a command directly on the server. A web-based signup flow is in the roadmap but not implemented yet.
|
||||
|
||||
- **Why's it still in alpha?** Take a look at the [list of open bugs](https://github.com/superseriousbusiness/gotosocial/issues?q=is%3Aissue+is%3Aopen+label%3Abug) and the [roadmap](https://github.com/superseriousbusiness/gotosocial/blob/main/ROADMAP.md) for a more detailed rundown, but the main missing features at the time of this writing are:
|
||||
* videos
|
||||
* reporting posts to admins
|
||||
* muting conversations
|
||||
* backfill of posts
|
||||
|
@ -164,6 +164,38 @@ db-tls-mode: "disable"
|
||||
# Default: ""
|
||||
db-tls-ca-cert: ""
|
||||
|
||||
# String. SQLite journaling mode.
|
||||
# SQLite only -- unused otherwise.
|
||||
# If set to empty string, the sqlite default will be used.
|
||||
# See: https://www.sqlite.org/pragma.html#pragma_journal_mode
|
||||
# Examples: ["DELETE", "TRUNCATE", "PERSIST", "MEMORY", "WAL", "OFF"]
|
||||
# Default: "WAL"
|
||||
db-sqlite-journal-mode: "WAL"
|
||||
|
||||
# String. SQLite synchronous mode.
|
||||
# SQLite only -- unused otherwise.
|
||||
# If set to empty string, the sqlite default will be used.
|
||||
# See: https://www.sqlite.org/pragma.html#pragma_synchronous
|
||||
# Examples: ["OFF", "NORMAL", "FULL", "EXTRA"]
|
||||
# Default: "NORMAL"
|
||||
db-sqlite-synchronous: "NORMAL"
|
||||
|
||||
# Byte size. SQlite cache size.
|
||||
# SQLite only -- unused otherwise.
|
||||
# If set to empty string or zero, the sqlite default will be used.
|
||||
# See: https://www.sqlite.org/pragma.html#pragma_cache_size
|
||||
# Examples: ["32MiB", "0", "64MiB"]
|
||||
# Default: "64MiB"
|
||||
db-sqlite-cache-size: "64MiB"
|
||||
|
||||
# Duration. SQlite busy timeout.
|
||||
# SQLite only -- unused otherwise.
|
||||
# If set to empty string or zero, the sqlite default will be used.
|
||||
# See: https://www.sqlite.org/pragma.html#pragma_busy_timeout
|
||||
# Examples: ["0s", "1s", "30s", "1m", "5m"]
|
||||
# Default: "5s"
|
||||
db-sqlite-busy-timeout: "30s"
|
||||
|
||||
cache:
|
||||
gts:
|
||||
###########################
|
||||
|
6
go.mod
6
go.mod
@ -7,7 +7,7 @@ require (
|
||||
codeberg.org/gruf/go-byteutil v1.0.2
|
||||
codeberg.org/gruf/go-cache/v3 v3.2.2
|
||||
codeberg.org/gruf/go-debug v1.2.0
|
||||
codeberg.org/gruf/go-errors/v2 v2.0.2
|
||||
codeberg.org/gruf/go-errors/v2 v2.1.1
|
||||
codeberg.org/gruf/go-kv v1.5.2
|
||||
codeberg.org/gruf/go-logger/v2 v2.2.1
|
||||
codeberg.org/gruf/go-mutexes v1.1.5
|
||||
@ -32,7 +32,7 @@ require (
|
||||
github.com/jackc/pgx/v4 v4.17.2
|
||||
github.com/microcosm-cc/bluemonday v1.0.21
|
||||
github.com/miekg/dns v1.1.50
|
||||
github.com/minio/minio-go/v7 v7.0.44
|
||||
github.com/minio/minio-go/v7 v7.0.47
|
||||
github.com/mitchellh/mapstructure v1.5.0
|
||||
github.com/oklog/ulid v1.3.1
|
||||
github.com/robfig/cron/v3 v3.0.1
|
||||
@ -53,7 +53,7 @@ require (
|
||||
golang.org/x/exp v0.0.0-20220613132600-b0d781184e0d
|
||||
golang.org/x/image v0.3.0
|
||||
golang.org/x/net v0.5.0
|
||||
golang.org/x/oauth2 v0.3.0
|
||||
golang.org/x/oauth2 v0.4.0
|
||||
golang.org/x/text v0.6.0
|
||||
gopkg.in/mcuadros/go-syslog.v2 v2.3.0
|
||||
gopkg.in/yaml.v3 v3.0.1
|
||||
|
11
go.sum
11
go.sum
@ -54,8 +54,8 @@ codeberg.org/gruf/go-cache/v3 v3.2.2/go.mod h1:+Eje6nCvN8QF71VyYjMWMnkdv6t1kHnCO
|
||||
codeberg.org/gruf/go-debug v1.2.0 h1:WBbTMnK1ArFKUmgv04aO2JiC/daTOB8zQGi521qb7OU=
|
||||
codeberg.org/gruf/go-debug v1.2.0/go.mod h1:N+vSy9uJBQgpQcJUqjctvqFz7tBHJf+S/PIjLILzpLg=
|
||||
codeberg.org/gruf/go-errors/v2 v2.0.0/go.mod h1:ZRhbdhvgoUA3Yw6e56kd9Ox984RrvbEFC2pOXyHDJP4=
|
||||
codeberg.org/gruf/go-errors/v2 v2.0.2 h1:T9CqfC+ntSIQL5mdQxwHlUMod1htpgNe3P1tugxKlT4=
|
||||
codeberg.org/gruf/go-errors/v2 v2.0.2/go.mod h1:6sI75OmvXE2AtRm4WUyGMEyqEOKTsfe+CA+aBXwbtJY=
|
||||
codeberg.org/gruf/go-errors/v2 v2.1.1 h1:oj7JUIvUBafF60HrwN74JrCMol1Ouh3gq1ggrH5hGTw=
|
||||
codeberg.org/gruf/go-errors/v2 v2.1.1/go.mod h1:LfzD9nkAAJpEDbkUqOZQ2jdaQ8VrK0pnR36zLOMFq6Y=
|
||||
codeberg.org/gruf/go-fastcopy v1.1.2 h1:YwmYXPsyOcRBxKEE2+w1bGAZfclHVaPijFsOVOcnNcw=
|
||||
codeberg.org/gruf/go-fastcopy v1.1.2/go.mod h1:GDDYR0Cnb3U/AIfGM3983V/L+GN+vuwVMvrmVABo21s=
|
||||
codeberg.org/gruf/go-fastpath v1.0.1/go.mod h1:edveE/Kp3Eqi0JJm0lXYdkVrB28cNUkcb/bRGFTPqeI=
|
||||
@ -418,8 +418,8 @@ github.com/miekg/dns v1.1.50 h1:DQUfb9uc6smULcREF09Uc+/Gd46YWqJd5DbpPE9xkcA=
|
||||
github.com/miekg/dns v1.1.50/go.mod h1:e3IlAVfNqAllflbibAZEWOXOQ+Ynzk/dDozDxY7XnME=
|
||||
github.com/minio/md5-simd v1.1.2 h1:Gdi1DZK69+ZVMoNHRXJyNcxrMA4dSxoYHZSQbirFg34=
|
||||
github.com/minio/md5-simd v1.1.2/go.mod h1:MzdKDxYpY2BT9XQFocsiZf/NKVtR7nkE4RoEpN+20RM=
|
||||
github.com/minio/minio-go/v7 v7.0.44 h1:9zUJ7iU7ax2P1jOvTp6nVrgzlZq3AZlFm0XfRFDKstM=
|
||||
github.com/minio/minio-go/v7 v7.0.44/go.mod h1:nCrRzjoSUQh8hgKKtu3Y708OLvRLtuASMg2/nvmbarw=
|
||||
github.com/minio/minio-go/v7 v7.0.47 h1:sLiuCKGSIcn/MI6lREmTzX91DX/oRau4ia0j6e6eOSs=
|
||||
github.com/minio/minio-go/v7 v7.0.47/go.mod h1:nCrRzjoSUQh8hgKKtu3Y708OLvRLtuASMg2/nvmbarw=
|
||||
github.com/minio/sha256-simd v1.0.0 h1:v1ta+49hkWZyvaKwrQB8elexRqm6Y0aMLjCNsrYxo6g=
|
||||
github.com/minio/sha256-simd v1.0.0/go.mod h1:OuYzVNI5vcoYIAmbIvHPl3N3jUzVedXbKy5RFepssQM=
|
||||
github.com/mitchellh/hashstructure/v2 v2.0.2 h1:vGKWl0YJqUNxE8d+h8f6NJLcCJrgbhC4NcD46KavDd4=
|
||||
@ -729,8 +729,9 @@ golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ
|
||||
golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||
golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||
golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||
golang.org/x/oauth2 v0.3.0 h1:6l90koy8/LaBLmLu8jpHeHexzMwEita0zFfYlggy2F8=
|
||||
golang.org/x/oauth2 v0.3.0/go.mod h1:rQrIauxkUhJ6CuwEXwymO2/eh4xz2ZWF1nBkcxS+tGk=
|
||||
golang.org/x/oauth2 v0.4.0 h1:NF0gk8LVPg1Ml7SSbGyySuoxdsXitj7TvgvuRxIMc/M=
|
||||
golang.org/x/oauth2 v0.4.0/go.mod h1:RznEsdpjGAINPTOF0UH/t+xJ75L18YO3Ho6Pyn+uRec=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
|
@ -201,7 +201,7 @@ func (suite *MediaCreateTestSuite) TestMediaCreateSuccessful() {
|
||||
Size: "512x288",
|
||||
Aspect: 1.7777778,
|
||||
},
|
||||
Focus: apimodel.MediaFocus{
|
||||
Focus: &apimodel.MediaFocus{
|
||||
X: -0.5,
|
||||
Y: 0.5,
|
||||
},
|
||||
@ -290,7 +290,7 @@ func (suite *MediaCreateTestSuite) TestMediaCreateSuccessfulV2() {
|
||||
Size: "512x288",
|
||||
Aspect: 1.7777778,
|
||||
},
|
||||
Focus: apimodel.MediaFocus{
|
||||
Focus: &apimodel.MediaFocus{
|
||||
X: -0.5,
|
||||
Y: 0.5,
|
||||
},
|
||||
|
@ -172,7 +172,7 @@ func (suite *MediaUpdateTestSuite) TestUpdateImage() {
|
||||
suite.EqualValues(apimodel.MediaMeta{
|
||||
Original: apimodel.MediaDimensions{Width: 800, Height: 450, FrameRate: "", Duration: 0, Bitrate: 0, Size: "800x450", Aspect: 1.7777778},
|
||||
Small: apimodel.MediaDimensions{Width: 256, Height: 144, FrameRate: "", Duration: 0, Bitrate: 0, Size: "256x144", Aspect: 1.7777778},
|
||||
Focus: apimodel.MediaFocus{X: -0.1, Y: 0.3},
|
||||
Focus: &apimodel.MediaFocus{X: -0.1, Y: 0.3},
|
||||
}, attachmentReply.Meta)
|
||||
suite.Equal(toUpdate.Blurhash, attachmentReply.Blurhash)
|
||||
suite.Equal(toUpdate.ID, attachmentReply.ID)
|
||||
|
@ -29,6 +29,7 @@
|
||||
apimodel "github.com/superseriousbusiness/gotosocial/internal/api/model"
|
||||
apiutil "github.com/superseriousbusiness/gotosocial/internal/api/util"
|
||||
"github.com/superseriousbusiness/gotosocial/internal/gtserror"
|
||||
"github.com/superseriousbusiness/gotosocial/internal/iotools"
|
||||
"github.com/superseriousbusiness/gotosocial/internal/log"
|
||||
"github.com/superseriousbusiness/gotosocial/internal/oauth"
|
||||
)
|
||||
@ -128,8 +129,34 @@ func (m *Module) ServeFile(c *gin.Context) {
|
||||
return
|
||||
}
|
||||
|
||||
// we're good, return the slurped bytes + the rest of the content
|
||||
c.DataFromReader(http.StatusOK, content.ContentLength, format, io.MultiReader(
|
||||
bytes.NewReader(b), content.Content,
|
||||
), nil)
|
||||
// reconstruct the original content reader
|
||||
r := io.MultiReader(bytes.NewReader(b), content.Content)
|
||||
|
||||
// Check the Range header: if this is a simple query for the whole file, we can return it now.
|
||||
if c.GetHeader("Range") == "" && c.GetHeader("If-Range") == "" {
|
||||
c.DataFromReader(http.StatusOK, content.ContentLength, format, r, nil)
|
||||
return
|
||||
}
|
||||
|
||||
// Range is set, so we need a ReadSeeker to pass to the ServeContent function.
|
||||
tfs, err := iotools.TempFileSeeker(r)
|
||||
if err != nil {
|
||||
err = fmt.Errorf("ServeFile: error creating temp file seeker: %w", err)
|
||||
apiutil.ErrorHandler(c, gtserror.NewErrorInternalError(err), m.processor.InstanceGet)
|
||||
return
|
||||
}
|
||||
defer func() {
|
||||
if err := tfs.Close(); err != nil {
|
||||
log.Errorf("ServeFile: error closing temp file seeker: %s", err)
|
||||
}
|
||||
}()
|
||||
|
||||
// to avoid ServeContent wasting time seeking for the
|
||||
// mime type, set this header already since we know it
|
||||
c.Header("Content-Type", format)
|
||||
|
||||
// allow ServeContent to handle the rest of the request;
|
||||
// it will handle Range as appropriate, and write correct
|
||||
// response headers, http code, etc
|
||||
http.ServeContent(c.Writer, c.Request, fileName, content.ContentUpdated, tfs)
|
||||
}
|
||||
|
@ -98,40 +98,12 @@ type Attachment struct {
|
||||
//
|
||||
// swagger:model mediaMeta
|
||||
type MediaMeta struct {
|
||||
Length string `json:"length,omitempty"`
|
||||
// Duration of the media in seconds.
|
||||
// Only set for video and audio.
|
||||
// example: 5.43
|
||||
Duration float32 `json:"duration,omitempty"`
|
||||
// Framerate of the media.
|
||||
// Only set for video and gifs.
|
||||
// example: 30
|
||||
FPS uint16 `json:"fps,omitempty"`
|
||||
// Size of the media, in the format `[width]x[height]`.
|
||||
// Not set for audio.
|
||||
// example: 1920x1080
|
||||
Size string `json:"size,omitempty"`
|
||||
// Width of the media in pixels.
|
||||
// Not set for audio.
|
||||
// example: 1920
|
||||
Width int `json:"width,omitempty"`
|
||||
// Height of the media in pixels.
|
||||
// Not set for audio.
|
||||
// example: 1080
|
||||
Height int `json:"height,omitempty"`
|
||||
// Aspect ratio of the media.
|
||||
// Equal to width / height.
|
||||
// example: 1.777777778
|
||||
Aspect float32 `json:"aspect,omitempty"`
|
||||
AudioEncode string `json:"audio_encode,omitempty"`
|
||||
AudioBitrate string `json:"audio_bitrate,omitempty"`
|
||||
AudioChannels string `json:"audio_channels,omitempty"`
|
||||
// Dimensions of the original media.
|
||||
Original MediaDimensions `json:"original"`
|
||||
// Dimensions of the thumbnail/small version of the media.
|
||||
Small MediaDimensions `json:"small,omitempty"`
|
||||
// Focus data for the media.
|
||||
Focus MediaFocus `json:"focus,omitempty"`
|
||||
Focus *MediaFocus `json:"focus,omitempty"`
|
||||
}
|
||||
|
||||
// MediaFocus models the focal point of a piece of media.
|
||||
|
@ -21,6 +21,7 @@
|
||||
import (
|
||||
"io"
|
||||
"net/url"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Content wraps everything needed to serve a blob of content (some kind of media) through the API.
|
||||
@ -29,6 +30,8 @@ type Content struct {
|
||||
ContentType string
|
||||
// ContentLength in bytes
|
||||
ContentLength int64
|
||||
// Time when the content was last updated.
|
||||
ContentUpdated time.Time
|
||||
// Actual content
|
||||
Content io.ReadCloser
|
||||
// Resource URL to forward to if the file can be fetched from the storage directly (e.g signed S3 URL)
|
||||
|
@ -58,14 +58,18 @@ type Configuration struct {
|
||||
TrustedProxies []string `name:"trusted-proxies" usage:"Proxies to trust when parsing x-forwarded headers into real IPs."`
|
||||
SoftwareVersion string `name:"software-version" usage:""`
|
||||
|
||||
DbType string `name:"db-type" usage:"Database type: eg., postgres"`
|
||||
DbAddress string `name:"db-address" usage:"Database ipv4 address, hostname, or filename"`
|
||||
DbPort int `name:"db-port" usage:"Database port"`
|
||||
DbUser string `name:"db-user" usage:"Database username"`
|
||||
DbPassword string `name:"db-password" usage:"Database password"`
|
||||
DbDatabase string `name:"db-database" usage:"Database name"`
|
||||
DbTLSMode string `name:"db-tls-mode" usage:"Database tls mode"`
|
||||
DbTLSCACert string `name:"db-tls-ca-cert" usage:"Path to CA cert for db tls connection"`
|
||||
DbType string `name:"db-type" usage:"Database type: eg., postgres"`
|
||||
DbAddress string `name:"db-address" usage:"Database ipv4 address, hostname, or filename"`
|
||||
DbPort int `name:"db-port" usage:"Database port"`
|
||||
DbUser string `name:"db-user" usage:"Database username"`
|
||||
DbPassword string `name:"db-password" usage:"Database password"`
|
||||
DbDatabase string `name:"db-database" usage:"Database name"`
|
||||
DbTLSMode string `name:"db-tls-mode" usage:"Database tls mode"`
|
||||
DbTLSCACert string `name:"db-tls-ca-cert" usage:"Path to CA cert for db tls connection"`
|
||||
DbSqliteJournalMode string `name:"db-sqlite-journal-mode" usage:"Sqlite only: see https://www.sqlite.org/pragma.html#pragma_journal_mode"`
|
||||
DbSqliteSynchronous string `name:"db-sqlite-synchronous" usage:"Sqlite only: see https://www.sqlite.org/pragma.html#pragma_synchronous"`
|
||||
DbSqliteCacheSize bytesize.Size `name:"db-sqlite-cache-size" usage:"Sqlite only: see https://www.sqlite.org/pragma.html#pragma_cache_size"`
|
||||
DbSqliteBusyTimeout time.Duration `name:"db-sqlite-busy-timeout" usage:"Sqlite only: see https://www.sqlite.org/pragma.html#pragma_busy_timeout"`
|
||||
|
||||
WebTemplateBaseDir string `name:"web-template-base-dir" usage:"Basedir for html templating files for rendering pages and composing emails."`
|
||||
WebAssetBaseDir string `name:"web-asset-base-dir" usage:"Directory to serve static assets from, accessible at example.org/assets/"`
|
||||
|
@ -40,14 +40,18 @@
|
||||
Port: 8080,
|
||||
TrustedProxies: []string{"127.0.0.1/32", "::1"}, // localhost
|
||||
|
||||
DbType: "postgres",
|
||||
DbAddress: "",
|
||||
DbPort: 5432,
|
||||
DbUser: "",
|
||||
DbPassword: "",
|
||||
DbDatabase: "gotosocial",
|
||||
DbTLSMode: "disable",
|
||||
DbTLSCACert: "",
|
||||
DbType: "postgres",
|
||||
DbAddress: "",
|
||||
DbPort: 5432,
|
||||
DbUser: "",
|
||||
DbPassword: "",
|
||||
DbDatabase: "gotosocial",
|
||||
DbTLSMode: "disable",
|
||||
DbTLSCACert: "",
|
||||
DbSqliteJournalMode: "WAL",
|
||||
DbSqliteSynchronous: "NORMAL",
|
||||
DbSqliteCacheSize: 64 * bytesize.MiB,
|
||||
DbSqliteBusyTimeout: time.Second * 30,
|
||||
|
||||
WebTemplateBaseDir: "./web/template/",
|
||||
WebAssetBaseDir: "./web/assets/",
|
||||
|
@ -51,6 +51,10 @@ func (s *ConfigState) AddGlobalFlags(cmd *cobra.Command) {
|
||||
cmd.PersistentFlags().String(DbDatabaseFlag(), cfg.DbDatabase, fieldtag("DbDatabase", "usage"))
|
||||
cmd.PersistentFlags().String(DbTLSModeFlag(), cfg.DbTLSMode, fieldtag("DbTLSMode", "usage"))
|
||||
cmd.PersistentFlags().String(DbTLSCACertFlag(), cfg.DbTLSCACert, fieldtag("DbTLSCACert", "usage"))
|
||||
cmd.PersistentFlags().String(DbSqliteJournalModeFlag(), cfg.DbSqliteJournalMode, fieldtag("DbSqliteJournalMode", "usage"))
|
||||
cmd.PersistentFlags().String(DbSqliteSynchronousFlag(), cfg.DbSqliteSynchronous, fieldtag("DbSqliteSynchronous", "usage"))
|
||||
cmd.PersistentFlags().Uint64(DbSqliteCacheSizeFlag(), uint64(cfg.DbSqliteCacheSize), fieldtag("DbSqliteCacheSize", "usage"))
|
||||
cmd.PersistentFlags().Duration(DbSqliteBusyTimeoutFlag(), cfg.DbSqliteBusyTimeout, fieldtag("DbSqliteBusyTimeout", "usage"))
|
||||
})
|
||||
}
|
||||
|
||||
|
@ -524,6 +524,106 @@ func GetDbTLSCACert() string { return global.GetDbTLSCACert() }
|
||||
// SetDbTLSCACert safely sets the value for global configuration 'DbTLSCACert' field
|
||||
func SetDbTLSCACert(v string) { global.SetDbTLSCACert(v) }
|
||||
|
||||
// GetDbSqliteJournalMode safely fetches the Configuration value for state's 'DbSqliteJournalMode' field
|
||||
func (st *ConfigState) GetDbSqliteJournalMode() (v string) {
|
||||
st.mutex.Lock()
|
||||
v = st.config.DbSqliteJournalMode
|
||||
st.mutex.Unlock()
|
||||
return
|
||||
}
|
||||
|
||||
// SetDbSqliteJournalMode safely sets the Configuration value for state's 'DbSqliteJournalMode' field
|
||||
func (st *ConfigState) SetDbSqliteJournalMode(v string) {
|
||||
st.mutex.Lock()
|
||||
defer st.mutex.Unlock()
|
||||
st.config.DbSqliteJournalMode = v
|
||||
st.reloadToViper()
|
||||
}
|
||||
|
||||
// DbSqliteJournalModeFlag returns the flag name for the 'DbSqliteJournalMode' field
|
||||
func DbSqliteJournalModeFlag() string { return "db-sqlite-journal-mode" }
|
||||
|
||||
// GetDbSqliteJournalMode safely fetches the value for global configuration 'DbSqliteJournalMode' field
|
||||
func GetDbSqliteJournalMode() string { return global.GetDbSqliteJournalMode() }
|
||||
|
||||
// SetDbSqliteJournalMode safely sets the value for global configuration 'DbSqliteJournalMode' field
|
||||
func SetDbSqliteJournalMode(v string) { global.SetDbSqliteJournalMode(v) }
|
||||
|
||||
// GetDbSqliteSynchronous safely fetches the Configuration value for state's 'DbSqliteSynchronous' field
|
||||
func (st *ConfigState) GetDbSqliteSynchronous() (v string) {
|
||||
st.mutex.Lock()
|
||||
v = st.config.DbSqliteSynchronous
|
||||
st.mutex.Unlock()
|
||||
return
|
||||
}
|
||||
|
||||
// SetDbSqliteSynchronous safely sets the Configuration value for state's 'DbSqliteSynchronous' field
|
||||
func (st *ConfigState) SetDbSqliteSynchronous(v string) {
|
||||
st.mutex.Lock()
|
||||
defer st.mutex.Unlock()
|
||||
st.config.DbSqliteSynchronous = v
|
||||
st.reloadToViper()
|
||||
}
|
||||
|
||||
// DbSqliteSynchronousFlag returns the flag name for the 'DbSqliteSynchronous' field
|
||||
func DbSqliteSynchronousFlag() string { return "db-sqlite-synchronous" }
|
||||
|
||||
// GetDbSqliteSynchronous safely fetches the value for global configuration 'DbSqliteSynchronous' field
|
||||
func GetDbSqliteSynchronous() string { return global.GetDbSqliteSynchronous() }
|
||||
|
||||
// SetDbSqliteSynchronous safely sets the value for global configuration 'DbSqliteSynchronous' field
|
||||
func SetDbSqliteSynchronous(v string) { global.SetDbSqliteSynchronous(v) }
|
||||
|
||||
// GetDbSqliteCacheSize safely fetches the Configuration value for state's 'DbSqliteCacheSize' field
|
||||
func (st *ConfigState) GetDbSqliteCacheSize() (v bytesize.Size) {
|
||||
st.mutex.Lock()
|
||||
v = st.config.DbSqliteCacheSize
|
||||
st.mutex.Unlock()
|
||||
return
|
||||
}
|
||||
|
||||
// SetDbSqliteCacheSize safely sets the Configuration value for state's 'DbSqliteCacheSize' field
|
||||
func (st *ConfigState) SetDbSqliteCacheSize(v bytesize.Size) {
|
||||
st.mutex.Lock()
|
||||
defer st.mutex.Unlock()
|
||||
st.config.DbSqliteCacheSize = v
|
||||
st.reloadToViper()
|
||||
}
|
||||
|
||||
// DbSqliteCacheSizeFlag returns the flag name for the 'DbSqliteCacheSize' field
|
||||
func DbSqliteCacheSizeFlag() string { return "db-sqlite-cache-size" }
|
||||
|
||||
// GetDbSqliteCacheSize safely fetches the value for global configuration 'DbSqliteCacheSize' field
|
||||
func GetDbSqliteCacheSize() bytesize.Size { return global.GetDbSqliteCacheSize() }
|
||||
|
||||
// SetDbSqliteCacheSize safely sets the value for global configuration 'DbSqliteCacheSize' field
|
||||
func SetDbSqliteCacheSize(v bytesize.Size) { global.SetDbSqliteCacheSize(v) }
|
||||
|
||||
// GetDbSqliteBusyTimeout safely fetches the Configuration value for state's 'DbSqliteBusyTimeout' field
|
||||
func (st *ConfigState) GetDbSqliteBusyTimeout() (v time.Duration) {
|
||||
st.mutex.Lock()
|
||||
v = st.config.DbSqliteBusyTimeout
|
||||
st.mutex.Unlock()
|
||||
return
|
||||
}
|
||||
|
||||
// SetDbSqliteBusyTimeout safely sets the Configuration value for state's 'DbSqliteBusyTimeout' field
|
||||
func (st *ConfigState) SetDbSqliteBusyTimeout(v time.Duration) {
|
||||
st.mutex.Lock()
|
||||
defer st.mutex.Unlock()
|
||||
st.config.DbSqliteBusyTimeout = v
|
||||
st.reloadToViper()
|
||||
}
|
||||
|
||||
// DbSqliteBusyTimeoutFlag returns the flag name for the 'DbSqliteBusyTimeout' field
|
||||
func DbSqliteBusyTimeoutFlag() string { return "db-sqlite-busy-timeout" }
|
||||
|
||||
// GetDbSqliteBusyTimeout safely fetches the value for global configuration 'DbSqliteBusyTimeout' field
|
||||
func GetDbSqliteBusyTimeout() time.Duration { return global.GetDbSqliteBusyTimeout() }
|
||||
|
||||
// SetDbSqliteBusyTimeout safely sets the value for global configuration 'DbSqliteBusyTimeout' field
|
||||
func SetDbSqliteBusyTimeout(v time.Duration) { global.SetDbSqliteBusyTimeout(v) }
|
||||
|
||||
// GetWebTemplateBaseDir safely fetches the Configuration value for state's 'WebTemplateBaseDir' field
|
||||
func (st *ConfigState) GetWebTemplateBaseDir() (v string) {
|
||||
st.mutex.Lock()
|
||||
|
@ -28,9 +28,11 @@
|
||||
"fmt"
|
||||
"os"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"codeberg.org/gruf/go-bytesize"
|
||||
"github.com/google/uuid"
|
||||
"github.com/jackc/pgx/v4"
|
||||
"github.com/jackc/pgx/v4/stdlib"
|
||||
@ -49,22 +51,6 @@
|
||||
"modernc.org/sqlite"
|
||||
)
|
||||
|
||||
const (
|
||||
dbTypePostgres = "postgres"
|
||||
dbTypeSqlite = "sqlite"
|
||||
|
||||
// dbTLSModeDisable does not attempt to make a TLS connection to the database.
|
||||
dbTLSModeDisable = "disable"
|
||||
// dbTLSModeEnable attempts to make a TLS connection to the database, but doesn't fail if
|
||||
// the certificate passed by the database isn't verified.
|
||||
dbTLSModeEnable = "enable"
|
||||
// dbTLSModeRequire attempts to make a TLS connection to the database, and requires
|
||||
// that the certificate presented by the database is valid.
|
||||
dbTLSModeRequire = "require"
|
||||
// dbTLSModeUnset means that the TLS mode has not been set.
|
||||
dbTLSModeUnset = ""
|
||||
)
|
||||
|
||||
var registerTables = []interface{}{
|
||||
>smodel.AccountToEmoji{},
|
||||
>smodel.StatusToEmoji{},
|
||||
@ -127,26 +113,34 @@ func doMigration(ctx context.Context, db *bun.DB) error {
|
||||
func NewBunDBService(ctx context.Context, state *state.State) (db.DB, error) {
|
||||
var conn *DBConn
|
||||
var err error
|
||||
dbType := strings.ToLower(config.GetDbType())
|
||||
t := strings.ToLower(config.GetDbType())
|
||||
|
||||
switch dbType {
|
||||
case dbTypePostgres:
|
||||
switch t {
|
||||
case "postgres":
|
||||
conn, err = pgConn(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
case dbTypeSqlite:
|
||||
case "sqlite":
|
||||
conn, err = sqliteConn(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
default:
|
||||
return nil, fmt.Errorf("database type %s not supported for bundb", dbType)
|
||||
return nil, fmt.Errorf("database type %s not supported for bundb", t)
|
||||
}
|
||||
|
||||
// Add database query hook
|
||||
conn.DB.AddQueryHook(queryHook{})
|
||||
|
||||
// execute sqlite pragmas *after* adding database hook;
|
||||
// this allows the pragma queries to be logged
|
||||
if t == "sqlite" {
|
||||
if err := sqlitePragmas(ctx, conn); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
// table registration is needed for many-to-many, see:
|
||||
// https://bun.uptrace.dev/orm/many-to-many-relation/
|
||||
for _, t := range registerTables {
|
||||
@ -230,29 +224,29 @@ func NewBunDBService(ctx context.Context, state *state.State) (db.DB, error) {
|
||||
|
||||
func sqliteConn(ctx context.Context) (*DBConn, error) {
|
||||
// validate db address has actually been set
|
||||
dbAddress := config.GetDbAddress()
|
||||
if dbAddress == "" {
|
||||
address := config.GetDbAddress()
|
||||
if address == "" {
|
||||
return nil, fmt.Errorf("'%s' was not set when attempting to start sqlite", config.DbAddressFlag())
|
||||
}
|
||||
|
||||
// Drop anything fancy from DB address
|
||||
dbAddress = strings.Split(dbAddress, "?")[0]
|
||||
dbAddress = strings.TrimPrefix(dbAddress, "file:")
|
||||
address = strings.Split(address, "?")[0]
|
||||
address = strings.TrimPrefix(address, "file:")
|
||||
|
||||
// Append our own SQLite preferences
|
||||
dbAddress = "file:" + dbAddress + "?cache=shared"
|
||||
address = "file:" + address
|
||||
|
||||
var inMem bool
|
||||
|
||||
if dbAddress == "file::memory:?cache=shared" {
|
||||
dbAddress = fmt.Sprintf("file:%s?mode=memory&cache=shared", uuid.NewString())
|
||||
log.Infof("using in-memory database address " + dbAddress)
|
||||
if address == "file::memory:" {
|
||||
address = fmt.Sprintf("file:%s?mode=memory&cache=shared", uuid.NewString())
|
||||
log.Infof("using in-memory database address " + address)
|
||||
log.Warn("sqlite in-memory database should only be used for debugging")
|
||||
inMem = true
|
||||
}
|
||||
|
||||
// Open new DB instance
|
||||
sqldb, err := sql.Open("sqlite", dbAddress)
|
||||
sqldb, err := sql.Open("sqlite", address)
|
||||
if err != nil {
|
||||
if errWithCode, ok := err.(*sqlite.Error); ok {
|
||||
err = errors.New(sqlite.ErrorCodeString[errWithCode.Code()])
|
||||
@ -260,8 +254,6 @@ func sqliteConn(ctx context.Context) (*DBConn, error) {
|
||||
return nil, fmt.Errorf("could not open sqlite db: %s", err)
|
||||
}
|
||||
|
||||
tweakConnectionValues(sqldb)
|
||||
|
||||
if inMem {
|
||||
// don't close connections on disconnect -- otherwise
|
||||
// the SQLite database will be deleted when there
|
||||
@ -269,6 +261,7 @@ func sqliteConn(ctx context.Context) (*DBConn, error) {
|
||||
sqldb.SetConnMaxLifetime(0)
|
||||
}
|
||||
|
||||
// Wrap Bun database conn in our own wrapper
|
||||
conn := WrapDBConn(bun.NewDB(sqldb, sqlitedialect.New()))
|
||||
|
||||
// ping to check the db is there and listening
|
||||
@ -278,11 +271,56 @@ func sqliteConn(ctx context.Context) (*DBConn, error) {
|
||||
}
|
||||
return nil, fmt.Errorf("sqlite ping: %s", err)
|
||||
}
|
||||
|
||||
log.Info("connected to SQLITE database")
|
||||
|
||||
return conn, nil
|
||||
}
|
||||
|
||||
func sqlitePragmas(ctx context.Context, conn *DBConn) error {
|
||||
var pragmas [][]string
|
||||
if mode := config.GetDbSqliteJournalMode(); mode != "" {
|
||||
// Set the user provided SQLite journal mode
|
||||
pragmas = append(pragmas, []string{"journal_mode", mode})
|
||||
}
|
||||
|
||||
if mode := config.GetDbSqliteSynchronous(); mode != "" {
|
||||
// Set the user provided SQLite synchronous mode
|
||||
pragmas = append(pragmas, []string{"synchronous", mode})
|
||||
}
|
||||
|
||||
if size := config.GetDbSqliteCacheSize(); size > 0 {
|
||||
// Set the user provided SQLite cache size (in kibibytes)
|
||||
// Prepend a '-' character to this to indicate to sqlite
|
||||
// that we're giving kibibytes rather than num pages.
|
||||
// https://www.sqlite.org/pragma.html#pragma_cache_size
|
||||
s := "-" + strconv.FormatUint(uint64(size/bytesize.KiB), 10)
|
||||
pragmas = append(pragmas, []string{"cache_size", s})
|
||||
}
|
||||
|
||||
if timeout := config.GetDbSqliteBusyTimeout(); timeout > 0 {
|
||||
t := strconv.FormatInt(timeout.Milliseconds(), 10)
|
||||
pragmas = append(pragmas, []string{"busy_timeout", t})
|
||||
}
|
||||
|
||||
for _, p := range pragmas {
|
||||
pk := p[0]
|
||||
pv := p[1]
|
||||
|
||||
if _, err := conn.DB.ExecContext(ctx, "PRAGMA ?=?", bun.Ident(pk), bun.Safe(pv)); err != nil {
|
||||
return fmt.Errorf("error executing sqlite pragma %s: %w", pk, err)
|
||||
}
|
||||
|
||||
var res string
|
||||
if err := conn.DB.NewRaw("PRAGMA ?", bun.Ident(pk)).Scan(ctx, &res); err != nil {
|
||||
return fmt.Errorf("error scanning sqlite pragma %s: %w", pv, err)
|
||||
}
|
||||
|
||||
log.Infof("sqlite pragma %s set to %s", pk, res)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func pgConn(ctx context.Context) (*DBConn, error) {
|
||||
opts, err := deriveBunDBPGOptions() //nolint:contextcheck
|
||||
if err != nil {
|
||||
@ -291,7 +329,10 @@ func pgConn(ctx context.Context) (*DBConn, error) {
|
||||
|
||||
sqldb := stdlib.OpenDB(*opts)
|
||||
|
||||
tweakConnectionValues(sqldb)
|
||||
// https://bun.uptrace.dev/postgres/running-bun-in-production.html#database-sql
|
||||
maxOpenConns := 4 * runtime.GOMAXPROCS(0)
|
||||
sqldb.SetMaxOpenConns(maxOpenConns)
|
||||
sqldb.SetMaxIdleConns(maxOpenConns)
|
||||
|
||||
conn := WrapDBConn(bun.NewDB(sqldb, pgdialect.New()))
|
||||
|
||||
@ -311,10 +352,6 @@ func pgConn(ctx context.Context) (*DBConn, error) {
|
||||
// deriveBunDBPGOptions takes an application config and returns either a ready-to-use set of options
|
||||
// with sensible defaults, or an error if it's not satisfied by the provided config.
|
||||
func deriveBunDBPGOptions() (*pgx.ConnConfig, error) {
|
||||
if strings.ToUpper(config.GetDbType()) != db.DBTypePostgres {
|
||||
return nil, fmt.Errorf("expected db type of %s but got %s", db.DBTypePostgres, config.DbTypeFlag())
|
||||
}
|
||||
|
||||
// these are all optional, the db adapter figures out defaults
|
||||
address := config.GetDbAddress()
|
||||
|
||||
@ -326,14 +363,14 @@ func deriveBunDBPGOptions() (*pgx.ConnConfig, error) {
|
||||
|
||||
var tlsConfig *tls.Config
|
||||
switch config.GetDbTLSMode() {
|
||||
case dbTLSModeDisable, dbTLSModeUnset:
|
||||
case "", "disable":
|
||||
break // nothing to do
|
||||
case dbTLSModeEnable:
|
||||
case "enable":
|
||||
/* #nosec G402 */
|
||||
tlsConfig = &tls.Config{
|
||||
InsecureSkipVerify: true,
|
||||
}
|
||||
case dbTLSModeRequire:
|
||||
case "require":
|
||||
tlsConfig = &tls.Config{
|
||||
InsecureSkipVerify: false,
|
||||
ServerName: address,
|
||||
@ -397,13 +434,6 @@ func deriveBunDBPGOptions() (*pgx.ConnConfig, error) {
|
||||
return cfg, nil
|
||||
}
|
||||
|
||||
// https://bun.uptrace.dev/postgres/running-bun-in-production.html#database-sql
|
||||
func tweakConnectionValues(sqldb *sql.DB) {
|
||||
maxOpenConns := 4 * runtime.GOMAXPROCS(0)
|
||||
sqldb.SetMaxOpenConns(maxOpenConns)
|
||||
sqldb.SetMaxIdleConns(maxOpenConns)
|
||||
}
|
||||
|
||||
/*
|
||||
CONVERSION FUNCTIONS
|
||||
*/
|
||||
|
@ -20,6 +20,7 @@
|
||||
|
||||
import (
|
||||
"io"
|
||||
"os"
|
||||
)
|
||||
|
||||
// ReadFnCloser takes an io.Reader and wraps it to use the provided function to implement io.Closer.
|
||||
@ -157,3 +158,35 @@ func StreamWriteFunc(write func(io.Writer) error) io.Reader {
|
||||
|
||||
return pr
|
||||
}
|
||||
|
||||
type tempFileSeeker struct {
|
||||
io.Reader
|
||||
io.Seeker
|
||||
tmp *os.File
|
||||
}
|
||||
|
||||
func (tfs *tempFileSeeker) Close() error {
|
||||
tfs.tmp.Close()
|
||||
return os.Remove(tfs.tmp.Name())
|
||||
}
|
||||
|
||||
// TempFileSeeker converts the provided Reader into a ReadSeekCloser
|
||||
// by using an underlying temporary file. Callers should call the Close
|
||||
// function when they're done with the TempFileSeeker, to release +
|
||||
// clean up the temporary file.
|
||||
func TempFileSeeker(r io.Reader) (io.ReadSeekCloser, error) {
|
||||
tmp, err := os.CreateTemp(os.TempDir(), "gotosocial-")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if _, err := io.Copy(tmp, r); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &tempFileSeeker{
|
||||
Reader: tmp,
|
||||
Seeker: tmp,
|
||||
tmp: tmp,
|
||||
}, nil
|
||||
}
|
||||
|
@ -414,9 +414,9 @@ func (suite *ManagerTestSuite) TestSlothVineProcessBlocking() {
|
||||
suite.Equal(240, attachment.FileMeta.Original.Height)
|
||||
suite.Equal(81120, attachment.FileMeta.Original.Size)
|
||||
suite.EqualValues(1.4083333, attachment.FileMeta.Original.Aspect)
|
||||
suite.EqualValues(6.5862, *attachment.FileMeta.Original.Duration)
|
||||
suite.EqualValues(6.640907, *attachment.FileMeta.Original.Duration)
|
||||
suite.EqualValues(29.000029, *attachment.FileMeta.Original.Framerate)
|
||||
suite.EqualValues(0x3b3e1, *attachment.FileMeta.Original.Bitrate)
|
||||
suite.EqualValues(0x59e74, *attachment.FileMeta.Original.Bitrate)
|
||||
suite.EqualValues(gtsmodel.Small{
|
||||
Width: 338, Height: 240, Size: 81120, Aspect: 1.4083333333333334,
|
||||
}, attachment.FileMeta.Small)
|
||||
@ -531,6 +531,82 @@ func (suite *ManagerTestSuite) TestLongerMp4ProcessBlocking() {
|
||||
suite.Equal(processedThumbnailBytesExpected, processedThumbnailBytes)
|
||||
}
|
||||
|
||||
func (suite *ManagerTestSuite) TestBirdnestMp4ProcessBlocking() {
|
||||
ctx := context.Background()
|
||||
|
||||
data := func(_ context.Context) (io.ReadCloser, int64, error) {
|
||||
// load bytes from a test video
|
||||
b, err := os.ReadFile("./test/birdnest-original.mp4")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return io.NopCloser(bytes.NewBuffer(b)), int64(len(b)), nil
|
||||
}
|
||||
|
||||
accountID := "01FS1X72SK9ZPW0J1QQ68BD264"
|
||||
|
||||
// process the media with no additional info provided
|
||||
processingMedia, err := suite.manager.ProcessMedia(ctx, data, nil, accountID, nil)
|
||||
suite.NoError(err)
|
||||
// fetch the attachment id from the processing media
|
||||
attachmentID := processingMedia.AttachmentID()
|
||||
|
||||
// do a blocking call to fetch the attachment
|
||||
attachment, err := processingMedia.LoadAttachment(ctx)
|
||||
suite.NoError(err)
|
||||
suite.NotNil(attachment)
|
||||
|
||||
// make sure it's got the stuff set on it that we expect
|
||||
// the attachment ID and accountID we expect
|
||||
suite.Equal(attachmentID, attachment.ID)
|
||||
suite.Equal(accountID, attachment.AccountID)
|
||||
|
||||
// file meta should be correctly derived from the video
|
||||
suite.Equal(404, attachment.FileMeta.Original.Width)
|
||||
suite.Equal(720, attachment.FileMeta.Original.Height)
|
||||
suite.Equal(290880, attachment.FileMeta.Original.Size)
|
||||
suite.EqualValues(0.5611111, attachment.FileMeta.Original.Aspect)
|
||||
suite.EqualValues(9.822041, *attachment.FileMeta.Original.Duration)
|
||||
suite.EqualValues(30, *attachment.FileMeta.Original.Framerate)
|
||||
suite.EqualValues(0x117c79, *attachment.FileMeta.Original.Bitrate)
|
||||
suite.EqualValues(gtsmodel.Small{
|
||||
Width: 287, Height: 512, Size: 146944, Aspect: 0.5605469,
|
||||
}, attachment.FileMeta.Small)
|
||||
suite.Equal("video/mp4", attachment.File.ContentType)
|
||||
suite.Equal("image/jpeg", attachment.Thumbnail.ContentType)
|
||||
suite.Equal(1409577, attachment.File.FileSize)
|
||||
suite.Equal("L00000fQfQfQfQfQfQfQfQfQfQfQ", attachment.Blurhash)
|
||||
|
||||
// now make sure the attachment is in the database
|
||||
dbAttachment, err := suite.db.GetAttachmentByID(ctx, attachmentID)
|
||||
suite.NoError(err)
|
||||
suite.NotNil(dbAttachment)
|
||||
|
||||
// make sure the processed file is in storage
|
||||
processedFullBytes, err := suite.storage.Get(ctx, attachment.File.Path)
|
||||
suite.NoError(err)
|
||||
suite.NotEmpty(processedFullBytes)
|
||||
|
||||
// load the processed bytes from our test folder, to compare
|
||||
processedFullBytesExpected, err := os.ReadFile("./test/birdnest-processed.mp4")
|
||||
suite.NoError(err)
|
||||
suite.NotEmpty(processedFullBytesExpected)
|
||||
|
||||
// the bytes in storage should be what we expected
|
||||
suite.Equal(processedFullBytesExpected, processedFullBytes)
|
||||
|
||||
// now do the same for the thumbnail and make sure it's what we expected
|
||||
processedThumbnailBytes, err := suite.storage.Get(ctx, attachment.Thumbnail.Path)
|
||||
suite.NoError(err)
|
||||
suite.NotEmpty(processedThumbnailBytes)
|
||||
|
||||
processedThumbnailBytesExpected, err := os.ReadFile("./test/birdnest-thumbnail.jpg")
|
||||
suite.NoError(err)
|
||||
suite.NotEmpty(processedThumbnailBytesExpected)
|
||||
|
||||
suite.Equal(processedThumbnailBytesExpected, processedThumbnailBytes)
|
||||
}
|
||||
|
||||
func (suite *ManagerTestSuite) TestNotAnMp4ProcessBlocking() {
|
||||
// try to load an 'mp4' that's actually an mkv in disguise
|
||||
|
||||
@ -553,7 +629,7 @@ func (suite *ManagerTestSuite) TestNotAnMp4ProcessBlocking() {
|
||||
|
||||
// we should get an error while loading
|
||||
attachment, err := processingMedia.LoadAttachment(ctx)
|
||||
suite.EqualError(err, "error decoding video: error determining video metadata: [width height duration framerate bitrate]")
|
||||
suite.EqualError(err, "error decoding video: error determining video metadata: [width height framerate]")
|
||||
suite.Nil(attachment)
|
||||
}
|
||||
|
||||
|
BIN
internal/media/test/birdnest-original.mp4
Normal file
BIN
internal/media/test/birdnest-original.mp4
Normal file
Binary file not shown.
BIN
internal/media/test/birdnest-processed.mp4
Normal file
BIN
internal/media/test/birdnest-processed.mp4
Normal file
Binary file not shown.
BIN
internal/media/test/birdnest-thumbnail.jpg
Normal file
BIN
internal/media/test/birdnest-thumbnail.jpg
Normal file
Binary file not shown.
After Width: | Height: | Size: 2.8 KiB |
@ -21,9 +21,10 @@
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
|
||||
"github.com/abema/go-mp4"
|
||||
"github.com/superseriousbusiness/gotosocial/internal/iotools"
|
||||
"github.com/superseriousbusiness/gotosocial/internal/log"
|
||||
)
|
||||
|
||||
type gtsVideo struct {
|
||||
@ -36,43 +37,48 @@ type gtsVideo struct {
|
||||
// decodeVideoFrame decodes and returns an image from a single frame in the given video stream.
|
||||
// (note: currently this only returns a blank image resized to fit video dimensions).
|
||||
func decodeVideoFrame(r io.Reader) (*gtsVideo, error) {
|
||||
// We'll need a readseeker to decode the video. We can get a readseeker
|
||||
// without burning too much mem by first copying the reader into a temp file.
|
||||
// First create the file in the temporary directory...
|
||||
tmp, err := os.CreateTemp(os.TempDir(), "gotosocial-")
|
||||
// we need a readseeker to decode the video...
|
||||
tfs, err := iotools.TempFileSeeker(r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, fmt.Errorf("error creating temp file seeker: %w", err)
|
||||
}
|
||||
|
||||
defer func() {
|
||||
tmp.Close()
|
||||
os.Remove(tmp.Name())
|
||||
if err := tfs.Close(); err != nil {
|
||||
log.Errorf("error closing temp file seeker: %s", err)
|
||||
}
|
||||
}()
|
||||
|
||||
// Now copy the entire reader we've been provided into the
|
||||
// temporary file; we won't use the reader again after this.
|
||||
if _, err := io.Copy(tmp, r); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// probe the video file to extract useful metadata from it; for methodology, see:
|
||||
// https://github.com/abema/go-mp4/blob/7d8e5a7c5e644e0394261b0cf72fef79ce246d31/mp4tool/probe/probe.go#L85-L154
|
||||
info, err := mp4.Probe(tmp)
|
||||
info, err := mp4.Probe(tfs)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error probing tmp file %s: %w", tmp.Name(), err)
|
||||
return nil, fmt.Errorf("error during mp4 probe: %w", err)
|
||||
}
|
||||
|
||||
var (
|
||||
width int
|
||||
height int
|
||||
video gtsVideo
|
||||
width int
|
||||
height int
|
||||
videoBitrate uint64
|
||||
audioBitrate uint64
|
||||
video gtsVideo
|
||||
)
|
||||
|
||||
for _, tr := range info.Tracks {
|
||||
if tr.AVC == nil {
|
||||
// audio track
|
||||
if br := tr.Samples.GetBitrate(tr.Timescale); br > audioBitrate {
|
||||
audioBitrate = br
|
||||
} else if br := info.Segments.GetBitrate(tr.TrackID, tr.Timescale); br > audioBitrate {
|
||||
audioBitrate = br
|
||||
}
|
||||
|
||||
if d := float64(tr.Duration) / float64(tr.Timescale); d > float64(video.duration) {
|
||||
video.duration = float32(d)
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
// video track
|
||||
if w := int(tr.AVC.Width); w > width {
|
||||
width = w
|
||||
}
|
||||
@ -81,10 +87,10 @@ func decodeVideoFrame(r io.Reader) (*gtsVideo, error) {
|
||||
height = h
|
||||
}
|
||||
|
||||
if br := tr.Samples.GetBitrate(tr.Timescale); br > video.bitrate {
|
||||
video.bitrate = br
|
||||
} else if br := info.Segments.GetBitrate(tr.TrackID, tr.Timescale); br > video.bitrate {
|
||||
video.bitrate = br
|
||||
if br := tr.Samples.GetBitrate(tr.Timescale); br > videoBitrate {
|
||||
videoBitrate = br
|
||||
} else if br := info.Segments.GetBitrate(tr.TrackID, tr.Timescale); br > videoBitrate {
|
||||
videoBitrate = br
|
||||
}
|
||||
|
||||
if d := float64(tr.Duration) / float64(tr.Timescale); d > float64(video.duration) {
|
||||
@ -93,6 +99,10 @@ func decodeVideoFrame(r io.Reader) (*gtsVideo, error) {
|
||||
}
|
||||
}
|
||||
|
||||
// overall bitrate should be audio + video combined
|
||||
// (since they're both playing at the same time)
|
||||
video.bitrate = audioBitrate + videoBitrate
|
||||
|
||||
// Check for empty video metadata.
|
||||
var empty []string
|
||||
if width == 0 {
|
||||
|
@ -85,9 +85,6 @@ func (p *processor) GetFile(ctx context.Context, requestingAccount *gtsmodel.Acc
|
||||
}
|
||||
|
||||
func (p *processor) getAttachmentContent(ctx context.Context, requestingAccount *gtsmodel.Account, wantedMediaID string, owningAccountID string, mediaSize media.Size) (*apimodel.Content, gtserror.WithCode) {
|
||||
attachmentContent := &apimodel.Content{}
|
||||
var storagePath string
|
||||
|
||||
// retrieve attachment from the database and do basic checks on it
|
||||
a, err := p.db.GetAttachmentByID(ctx, wantedMediaID)
|
||||
if err != nil {
|
||||
@ -146,6 +143,13 @@ func (p *processor) getAttachmentContent(ctx context.Context, requestingAccount
|
||||
}
|
||||
}
|
||||
|
||||
var (
|
||||
storagePath string
|
||||
attachmentContent = &apimodel.Content{
|
||||
ContentUpdated: a.UpdatedAt,
|
||||
}
|
||||
)
|
||||
|
||||
// get file information from the attachment depending on the requested media size
|
||||
switch mediaSize {
|
||||
case media.SizeOriginal:
|
||||
|
@ -284,19 +284,13 @@ func (c *converter) AttachmentToAPIAttachment(ctx context.Context, a *gtsmodel.M
|
||||
Original: apimodel.MediaDimensions{
|
||||
Width: a.FileMeta.Original.Width,
|
||||
Height: a.FileMeta.Original.Height,
|
||||
Size: fmt.Sprintf("%dx%d", a.FileMeta.Original.Width, a.FileMeta.Original.Height),
|
||||
Aspect: float32(a.FileMeta.Original.Aspect),
|
||||
},
|
||||
Small: apimodel.MediaDimensions{
|
||||
Width: a.FileMeta.Small.Width,
|
||||
Height: a.FileMeta.Small.Height,
|
||||
Size: fmt.Sprintf("%dx%d", a.FileMeta.Small.Width, a.FileMeta.Small.Height),
|
||||
Size: strconv.Itoa(a.FileMeta.Small.Width) + "x" + strconv.Itoa(a.FileMeta.Small.Height),
|
||||
Aspect: float32(a.FileMeta.Small.Aspect),
|
||||
},
|
||||
Focus: apimodel.MediaFocus{
|
||||
X: a.FileMeta.Focus.X,
|
||||
Y: a.FileMeta.Focus.Y,
|
||||
},
|
||||
},
|
||||
Blurhash: a.Blurhash,
|
||||
}
|
||||
@ -318,20 +312,31 @@ func (c *converter) AttachmentToAPIAttachment(ctx context.Context, a *gtsmodel.M
|
||||
apiAttachment.Description = &i
|
||||
}
|
||||
|
||||
if i := a.FileMeta.Original.Duration; i != nil {
|
||||
apiAttachment.Meta.Original.Duration = *i
|
||||
}
|
||||
// type specific fields
|
||||
switch a.Type {
|
||||
case gtsmodel.FileTypeImage:
|
||||
apiAttachment.Meta.Original.Size = strconv.Itoa(a.FileMeta.Original.Width) + "x" + strconv.Itoa(a.FileMeta.Original.Height)
|
||||
apiAttachment.Meta.Original.Aspect = float32(a.FileMeta.Original.Aspect)
|
||||
apiAttachment.Meta.Focus = &apimodel.MediaFocus{
|
||||
X: a.FileMeta.Focus.X,
|
||||
Y: a.FileMeta.Focus.Y,
|
||||
}
|
||||
case gtsmodel.FileTypeVideo:
|
||||
if i := a.FileMeta.Original.Duration; i != nil {
|
||||
apiAttachment.Meta.Original.Duration = *i
|
||||
}
|
||||
|
||||
if i := a.FileMeta.Original.Framerate; i != nil {
|
||||
// the masto api expects this as a string in
|
||||
// the format `integer/1`, so 30fps is `30/1`
|
||||
round := math.Round(float64(*i))
|
||||
fr := strconv.FormatInt(int64(round), 10)
|
||||
apiAttachment.Meta.Original.FrameRate = fr + "/1"
|
||||
}
|
||||
if i := a.FileMeta.Original.Framerate; i != nil {
|
||||
// the masto api expects this as a string in
|
||||
// the format `integer/1`, so 30fps is `30/1`
|
||||
round := math.Round(float64(*i))
|
||||
fr := strconv.FormatInt(int64(round), 10)
|
||||
apiAttachment.Meta.Original.FrameRate = fr + "/1"
|
||||
}
|
||||
|
||||
if i := a.FileMeta.Original.Bitrate; i != nil {
|
||||
apiAttachment.Meta.Original.Bitrate = int(*i)
|
||||
if i := a.FileMeta.Original.Bitrate; i != nil {
|
||||
apiAttachment.Meta.Original.Bitrate = int(*i)
|
||||
}
|
||||
}
|
||||
|
||||
return apiAttachment, nil
|
||||
|
@ -441,19 +441,13 @@ func (suite *InternalToFrontendTestSuite) TestVideoAttachmentToFrontend() {
|
||||
"height": 404,
|
||||
"frame_rate": "30/1",
|
||||
"duration": 15.033334,
|
||||
"bitrate": 1206522,
|
||||
"size": "720x404",
|
||||
"aspect": 1.7821782
|
||||
"bitrate": 1206522
|
||||
},
|
||||
"small": {
|
||||
"width": 720,
|
||||
"height": 404,
|
||||
"size": "720x404",
|
||||
"aspect": 1.7821782
|
||||
},
|
||||
"focus": {
|
||||
"x": 0,
|
||||
"y": 0
|
||||
}
|
||||
},
|
||||
"description": "A cow adorably licking another cow!"
|
||||
|
@ -2,7 +2,7 @@
|
||||
|
||||
set -eu
|
||||
|
||||
EXPECT='{"account-domain":"peepee","accounts-allow-custom-css":true,"accounts-approval-required":false,"accounts-reason-required":false,"accounts-registration-open":true,"advanced-cookies-samesite":"strict","advanced-rate-limit-requests":6969,"advanced-throttling-multiplier":-1,"application-name":"gts","bind-address":"127.0.0.1","cache":{"gts":{"account-max-size":99,"account-sweep-freq":1000000000,"account-ttl":10800000000000,"block-max-size":100,"block-sweep-freq":10000000000,"block-ttl":300000000000,"domain-block-max-size":1000,"domain-block-sweep-freq":60000000000,"domain-block-ttl":86400000000000,"emoji-category-max-size":100,"emoji-category-sweep-freq":10000000000,"emoji-category-ttl":300000000000,"emoji-max-size":500,"emoji-sweep-freq":10000000000,"emoji-ttl":300000000000,"mention-max-size":500,"mention-sweep-freq":10000000000,"mention-ttl":300000000000,"notification-max-size":500,"notification-sweep-freq":10000000000,"notification-ttl":300000000000,"report-max-size":100,"report-sweep-freq":10000000000,"report-ttl":300000000000,"status-max-size":500,"status-sweep-freq":10000000000,"status-ttl":300000000000,"tombstone-max-size":100,"tombstone-sweep-freq":10000000000,"tombstone-ttl":300000000000,"user-max-size":100,"user-sweep-freq":10000000000,"user-ttl":300000000000}},"config-path":"internal/config/testdata/test.yaml","db-address":":memory:","db-database":"gotosocial_prod","db-password":"hunter2","db-port":6969,"db-tls-ca-cert":"","db-tls-mode":"disable","db-type":"sqlite","db-user":"sex-haver","dry-run":false,"email":"","host":"example.com","instance-deliver-to-shared-inboxes":false,"instance-expose-peers":true,"instance-expose-public-timeline":true,"instance-expose-suspended":true,"landing-page-user":"admin","letsencrypt-cert-dir":"/gotosocial/storage/certs","letsencrypt-email-address":"","letsencrypt-enabled":true,"letsencrypt-port":80,"log-db-queries":true,"log-level":"info","media-description-max-chars":5000,"media-description-min-chars":69,"media-emoji-local-max-size":420,"media-emoji-remote-max-size":420,"media-image-max-size":420,"media-remote-cache-days":30,"media-video-max-size":420,"oidc-client-id":"1234","oidc-client-secret":"shhhh its a secret","oidc-enabled":true,"oidc-idp-name":"sex-haver","oidc-issuer":"whoknows","oidc-link-existing":true,"oidc-scopes":["read","write"],"oidc-skip-verification":true,"password":"","path":"","port":6969,"protocol":"http","smtp-from":"queen.rip.in.piss@terfisland.org","smtp-host":"example.com","smtp-password":"hunter2","smtp-port":4269,"smtp-username":"sex-haver","software-version":"","statuses-cw-max-chars":420,"statuses-max-chars":69,"statuses-media-max-files":1,"statuses-poll-max-options":1,"statuses-poll-option-max-chars":50,"storage-backend":"local","storage-local-base-path":"/root/store","storage-s3-access-key":"minio","storage-s3-bucket":"gts","storage-s3-endpoint":"localhost:9000","storage-s3-proxy":true,"storage-s3-secret-key":"miniostorage","storage-s3-use-ssl":false,"syslog-address":"127.0.0.1:6969","syslog-enabled":true,"syslog-protocol":"udp","trusted-proxies":["127.0.0.1/32","docker.host.local"],"username":"","web-asset-base-dir":"/root","web-template-base-dir":"/root"}'
|
||||
EXPECT='{"account-domain":"peepee","accounts-allow-custom-css":true,"accounts-approval-required":false,"accounts-reason-required":false,"accounts-registration-open":true,"advanced-cookies-samesite":"strict","advanced-rate-limit-requests":6969,"advanced-throttling-multiplier":-1,"application-name":"gts","bind-address":"127.0.0.1","cache":{"gts":{"account-max-size":99,"account-sweep-freq":1000000000,"account-ttl":10800000000000,"block-max-size":100,"block-sweep-freq":10000000000,"block-ttl":300000000000,"domain-block-max-size":1000,"domain-block-sweep-freq":60000000000,"domain-block-ttl":86400000000000,"emoji-category-max-size":100,"emoji-category-sweep-freq":10000000000,"emoji-category-ttl":300000000000,"emoji-max-size":500,"emoji-sweep-freq":10000000000,"emoji-ttl":300000000000,"mention-max-size":500,"mention-sweep-freq":10000000000,"mention-ttl":300000000000,"notification-max-size":500,"notification-sweep-freq":10000000000,"notification-ttl":300000000000,"report-max-size":100,"report-sweep-freq":10000000000,"report-ttl":300000000000,"status-max-size":500,"status-sweep-freq":10000000000,"status-ttl":300000000000,"tombstone-max-size":100,"tombstone-sweep-freq":10000000000,"tombstone-ttl":300000000000,"user-max-size":100,"user-sweep-freq":10000000000,"user-ttl":300000000000}},"config-path":"internal/config/testdata/test.yaml","db-address":":memory:","db-database":"gotosocial_prod","db-password":"hunter2","db-port":6969,"db-sqlite-busy-timeout":1000000000,"db-sqlite-cache-size":0,"db-sqlite-journal-mode":"DELETE","db-sqlite-synchronous":"FULL","db-tls-ca-cert":"","db-tls-mode":"disable","db-type":"sqlite","db-user":"sex-haver","dry-run":false,"email":"","host":"example.com","instance-deliver-to-shared-inboxes":false,"instance-expose-peers":true,"instance-expose-public-timeline":true,"instance-expose-suspended":true,"landing-page-user":"admin","letsencrypt-cert-dir":"/gotosocial/storage/certs","letsencrypt-email-address":"","letsencrypt-enabled":true,"letsencrypt-port":80,"log-db-queries":true,"log-level":"info","media-description-max-chars":5000,"media-description-min-chars":69,"media-emoji-local-max-size":420,"media-emoji-remote-max-size":420,"media-image-max-size":420,"media-remote-cache-days":30,"media-video-max-size":420,"oidc-client-id":"1234","oidc-client-secret":"shhhh its a secret","oidc-enabled":true,"oidc-idp-name":"sex-haver","oidc-issuer":"whoknows","oidc-link-existing":true,"oidc-scopes":["read","write"],"oidc-skip-verification":true,"password":"","path":"","port":6969,"protocol":"http","smtp-from":"queen.rip.in.piss@terfisland.org","smtp-host":"example.com","smtp-password":"hunter2","smtp-port":4269,"smtp-username":"sex-haver","software-version":"","statuses-cw-max-chars":420,"statuses-max-chars":69,"statuses-media-max-files":1,"statuses-poll-max-options":1,"statuses-poll-option-max-chars":50,"storage-backend":"local","storage-local-base-path":"/root/store","storage-s3-access-key":"minio","storage-s3-bucket":"gts","storage-s3-endpoint":"localhost:9000","storage-s3-proxy":true,"storage-s3-secret-key":"miniostorage","storage-s3-use-ssl":false,"syslog-address":"127.0.0.1:6969","syslog-enabled":true,"syslog-protocol":"udp","trusted-proxies":["127.0.0.1/32","docker.host.local"],"username":"","web-asset-base-dir":"/root","web-template-base-dir":"/root"}'
|
||||
|
||||
# Set all the environment variables to
|
||||
# ensure that these are parsed without panic
|
||||
@ -22,6 +22,10 @@ GTS_DB_PORT=6969 \
|
||||
GTS_DB_USER='sex-haver' \
|
||||
GTS_DB_PASSWORD='hunter2' \
|
||||
GTS_DB_DATABASE='gotosocial_prod' \
|
||||
GTS_DB_SQLITE_JOURNAL_MODE='DELETE' \
|
||||
GTS_DB_SQLITE_SYNCHRONOUS='FULL' \
|
||||
GTS_DB_SQLITE_CACHE_SIZE=0 \
|
||||
GTS_DB_SQLITE_BUSY_TIMEOUT='1s' \
|
||||
GTS_TLS_MODE='' \
|
||||
GTS_DB_TLS_CA_CERT='' \
|
||||
GTS_WEB_TEMPLATE_BASE_DIR='/root' \
|
||||
|
@ -19,6 +19,9 @@
|
||||
package testrig
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"codeberg.org/gruf/go-bytesize"
|
||||
"github.com/coreos/go-oidc/v3/oidc"
|
||||
"github.com/superseriousbusiness/gotosocial/internal/config"
|
||||
)
|
||||
@ -43,12 +46,16 @@ func InitTestConfig() {
|
||||
Port: 8080,
|
||||
TrustedProxies: []string{"127.0.0.1/32", "::1"},
|
||||
|
||||
DbType: "sqlite",
|
||||
DbAddress: ":memory:",
|
||||
DbPort: 5432,
|
||||
DbUser: "postgres",
|
||||
DbPassword: "postgres",
|
||||
DbDatabase: "postgres",
|
||||
DbType: "sqlite",
|
||||
DbAddress: ":memory:",
|
||||
DbPort: 5432,
|
||||
DbUser: "postgres",
|
||||
DbPassword: "postgres",
|
||||
DbDatabase: "postgres",
|
||||
DbSqliteJournalMode: "WAL",
|
||||
DbSqliteSynchronous: "NORMAL",
|
||||
DbSqliteCacheSize: 64 * bytesize.MiB,
|
||||
DbSqliteBusyTimeout: time.Second * 30,
|
||||
|
||||
WebTemplateBaseDir: "./web/template/",
|
||||
WebAssetBaseDir: "./web/assets/",
|
||||
|
@ -73,15 +73,11 @@
|
||||
// value as the port instead.
|
||||
func NewTestDB() db.DB {
|
||||
if alternateAddress := os.Getenv("GTS_DB_ADDRESS"); alternateAddress != "" {
|
||||
config.Config(func(cfg *config.Configuration) {
|
||||
cfg.DbAddress = alternateAddress
|
||||
})
|
||||
config.SetDbAddress(alternateAddress)
|
||||
}
|
||||
|
||||
if alternateDBType := os.Getenv("GTS_DB_TYPE"); alternateDBType != "" {
|
||||
config.Config(func(cfg *config.Configuration) {
|
||||
cfg.DbType = alternateDBType
|
||||
})
|
||||
config.SetDbType(alternateDBType)
|
||||
}
|
||||
|
||||
if alternateDBPort := os.Getenv("GTS_DB_PORT"); alternateDBPort != "" {
|
||||
@ -89,9 +85,7 @@ func NewTestDB() db.DB {
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
config.Config(func(cfg *config.Configuration) {
|
||||
cfg.DbPort = int(port)
|
||||
})
|
||||
config.SetDbPort(int(port))
|
||||
}
|
||||
|
||||
var state state.State
|
||||
|
34
vendor/codeberg.org/gruf/go-errors/v2/callers.go
generated
vendored
34
vendor/codeberg.org/gruf/go-errors/v2/callers.go
generated
vendored
@ -40,33 +40,29 @@ func (f Callers) Frames() []runtime.Frame {
|
||||
return frames
|
||||
}
|
||||
|
||||
// MarshalJSON implements json.Marshaler to provide an easy, simply default.
|
||||
// MarshalJSON implements json.Marshaler to provide an easy, simple default.
|
||||
func (f Callers) MarshalJSON() ([]byte, error) {
|
||||
// JSON-able frame type
|
||||
type frame struct {
|
||||
type jsonFrame struct {
|
||||
Func string `json:"func"`
|
||||
File string `json:"file"`
|
||||
Line int `json:"line"`
|
||||
}
|
||||
|
||||
// Allocate expected frames slice
|
||||
frames := make([]frame, 0, len(f))
|
||||
// Convert to frames
|
||||
frames := f.Frames()
|
||||
|
||||
// Get frames iterator for PCs
|
||||
iter := runtime.CallersFrames(f)
|
||||
// Allocate expected size jsonFrame slice
|
||||
jsonFrames := make([]jsonFrame, 0, len(f))
|
||||
|
||||
for {
|
||||
// Get next frame
|
||||
f, ok := iter.Next()
|
||||
if !ok {
|
||||
break
|
||||
}
|
||||
for i := 0; i < len(frames); i++ {
|
||||
frame := frames[i]
|
||||
|
||||
// Append to frames slice
|
||||
frames = append(frames, frame{
|
||||
Func: funcname(f.Function),
|
||||
File: f.File,
|
||||
Line: f.Line,
|
||||
// Convert each to jsonFrame object
|
||||
jsonFrames = append(jsonFrames, jsonFrame{
|
||||
Func: funcname(frame.Function),
|
||||
File: frame.File,
|
||||
Line: frame.Line,
|
||||
})
|
||||
}
|
||||
|
||||
@ -86,8 +82,8 @@ func (f Callers) String() string {
|
||||
frame := frames[i]
|
||||
|
||||
// Append formatted caller info
|
||||
funcname := funcname(frame.Function)
|
||||
buf = append(buf, funcname+"()\n\t"+frame.File+":"...)
|
||||
fn := funcname(frame.Function)
|
||||
buf = append(buf, fn+"()\n\t"+frame.File+":"...)
|
||||
buf = strconv.AppendInt(buf, int64(frame.Line), 10)
|
||||
buf = append(buf, '\n')
|
||||
}
|
||||
|
12
vendor/codeberg.org/gruf/go-errors/v2/errors.go
generated
vendored
12
vendor/codeberg.org/gruf/go-errors/v2/errors.go
generated
vendored
@ -24,13 +24,13 @@ func Wrapf(err error, msgf string, args ...interface{}) error {
|
||||
return create(fmt.Sprintf(msgf, args...), err)
|
||||
}
|
||||
|
||||
// Stacktrace fetches a stored stacktrace of callers from an error, or returns nil.
|
||||
// Stacktrace fetches first stored stacktrace of callers from error chain.
|
||||
func Stacktrace(err error) Callers {
|
||||
var callers Callers
|
||||
if err, ok := err.(interface { //nolint
|
||||
var e interface {
|
||||
Stacktrace() Callers
|
||||
}); ok {
|
||||
callers = err.Stacktrace()
|
||||
}
|
||||
return callers
|
||||
if !As(err, &e) {
|
||||
return nil
|
||||
}
|
||||
return e.Stacktrace()
|
||||
}
|
||||
|
54
vendor/codeberg.org/gruf/go-errors/v2/standard.go
generated
vendored
54
vendor/codeberg.org/gruf/go-errors/v2/standard.go
generated
vendored
@ -3,6 +3,7 @@
|
||||
import (
|
||||
"errors"
|
||||
"reflect"
|
||||
_ "unsafe"
|
||||
|
||||
"codeberg.org/gruf/go-bitutil"
|
||||
)
|
||||
@ -18,7 +19,7 @@
|
||||
func Is(err error, targets ...error) bool {
|
||||
var flags bitutil.Flags64
|
||||
|
||||
// Flags only has 64 bit slots
|
||||
// Flags only has 64 bit-slots
|
||||
if len(targets) > 64 {
|
||||
panic("too many targets")
|
||||
}
|
||||
@ -46,26 +47,30 @@ func Is(err error, targets ...error) bool {
|
||||
}
|
||||
|
||||
for err != nil {
|
||||
var errorIs func(error) bool
|
||||
|
||||
// Check if this layer supports .Is interface
|
||||
is, ok := err.(interface{ Is(error) bool })
|
||||
if ok {
|
||||
errorIs = is.Is
|
||||
} else {
|
||||
errorIs = neveris
|
||||
}
|
||||
|
||||
for i := 0; i < len(targets); i++ {
|
||||
// Try directly compare errors
|
||||
if flags.Get(uint8(i)) &&
|
||||
err == targets[i] {
|
||||
return true
|
||||
if !ok {
|
||||
// Error does not support interface
|
||||
//
|
||||
// Only try perform direct compare
|
||||
for i := 0; i < len(targets); i++ {
|
||||
// Try directly compare errors
|
||||
if flags.Get(uint8(i)) &&
|
||||
err == targets[i] {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
// Try use .Is() interface
|
||||
if errorIs(targets[i]) {
|
||||
return true
|
||||
} else {
|
||||
// Error supports the .Is interface
|
||||
//
|
||||
// Perform direct compare AND .Is()
|
||||
for i := 0; i < len(targets); i++ {
|
||||
if (flags.Get(uint8(i)) &&
|
||||
err == targets[i]) ||
|
||||
is.Is(targets[i]) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -92,15 +97,12 @@ func Is(err error, targets ...error) bool {
|
||||
//
|
||||
// As panics if target is not a non-nil pointer to either a type that implements
|
||||
// error, or to any interface type.
|
||||
func As(err error, target interface{}) bool {
|
||||
return errors.As(err, target)
|
||||
}
|
||||
//
|
||||
//go:linkname As errors.As
|
||||
func As(err error, target interface{}) bool
|
||||
|
||||
// Unwrap returns the result of calling the Unwrap method on err, if err's
|
||||
// type contains an Unwrap method returning error. Otherwise, Unwrap returns nil.
|
||||
func Unwrap(err error) error {
|
||||
return errors.Unwrap(err)
|
||||
}
|
||||
|
||||
// neveris fits the .Is(error) bool interface function always returning false.
|
||||
func neveris(error) bool { return false }
|
||||
//
|
||||
//go:linkname Unwrap errors.Unwrap
|
||||
func Unwrap(err error) error
|
||||
|
54
vendor/codeberg.org/gruf/go-errors/v2/value.go
generated
vendored
Normal file
54
vendor/codeberg.org/gruf/go-errors/v2/value.go
generated
vendored
Normal file
@ -0,0 +1,54 @@
|
||||
package errors
|
||||
|
||||
// WithValue wraps err to store given key-value pair, accessible via Value() function.
|
||||
func WithValue(err error, key any, value any) error {
|
||||
if err == nil {
|
||||
panic("nil error")
|
||||
}
|
||||
return &errWithValue{
|
||||
err: err,
|
||||
key: key,
|
||||
val: value,
|
||||
}
|
||||
}
|
||||
|
||||
// Value searches for value stored under given key in error chain.
|
||||
func Value(err error, key any) any {
|
||||
var e *errWithValue
|
||||
|
||||
if !As(err, &e) {
|
||||
return nil
|
||||
}
|
||||
|
||||
return e.Value(key)
|
||||
}
|
||||
|
||||
type errWithValue struct {
|
||||
err error
|
||||
key any
|
||||
val any
|
||||
}
|
||||
|
||||
func (e *errWithValue) Error() string {
|
||||
return e.err.Error()
|
||||
}
|
||||
|
||||
func (e *errWithValue) Is(target error) bool {
|
||||
return e.err == target
|
||||
}
|
||||
|
||||
func (e *errWithValue) Unwrap() error {
|
||||
return Unwrap(e.err)
|
||||
}
|
||||
|
||||
func (e *errWithValue) Value(key any) any {
|
||||
for {
|
||||
if key == e.key {
|
||||
return e.val
|
||||
}
|
||||
|
||||
if !As(e.err, &e) {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
2
vendor/github.com/minio/minio-go/v7/Makefile
generated
vendored
2
vendor/github.com/minio/minio-go/v7/Makefile
generated
vendored
@ -16,6 +16,8 @@ lint:
|
||||
|
||||
vet:
|
||||
@GO111MODULE=on go vet ./...
|
||||
@echo "Installing staticcheck" && go install honnef.co/go/tools/cmd/staticcheck@latest
|
||||
${GOPATH}/bin/staticcheck -tests=false -checks="all,-ST1000,-ST1003,-ST1016,-ST1020,-ST1021,-ST1022,-ST1023,-ST1005"
|
||||
|
||||
test:
|
||||
@GO111MODULE=on SERVER_ENDPOINT=localhost:9000 ACCESS_KEY=minio SECRET_KEY=minio123 ENABLE_HTTPS=1 MINT_MODE=full go test -race -v ./...
|
||||
|
4
vendor/github.com/minio/minio-go/v7/api-bucket-lifecycle.go
generated
vendored
4
vendor/github.com/minio/minio-go/v7/api-bucket-lifecycle.go
generated
vendored
@ -21,7 +21,7 @@
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/xml"
|
||||
"io/ioutil"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
|
||||
@ -143,5 +143,5 @@ func (c *Client) getBucketLifecycle(ctx context.Context, bucketName string) ([]b
|
||||
}
|
||||
}
|
||||
|
||||
return ioutil.ReadAll(resp.Body)
|
||||
return io.ReadAll(resp.Body)
|
||||
}
|
||||
|
4
vendor/github.com/minio/minio-go/v7/api-bucket-policy.go
generated
vendored
4
vendor/github.com/minio/minio-go/v7/api-bucket-policy.go
generated
vendored
@ -18,7 +18,7 @@
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io/ioutil"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strings"
|
||||
@ -137,7 +137,7 @@ func (c *Client) getBucketPolicy(ctx context.Context, bucketName string) (string
|
||||
}
|
||||
}
|
||||
|
||||
bucketPolicyBuf, err := ioutil.ReadAll(resp.Body)
|
||||
bucketPolicyBuf, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
4
vendor/github.com/minio/minio-go/v7/api-bucket-replication.go
generated
vendored
4
vendor/github.com/minio/minio-go/v7/api-bucket-replication.go
generated
vendored
@ -22,7 +22,7 @@
|
||||
"context"
|
||||
"encoding/json"
|
||||
"encoding/xml"
|
||||
"io/ioutil"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"time"
|
||||
@ -180,7 +180,7 @@ func (c *Client) GetBucketReplicationMetrics(ctx context.Context, bucketName str
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return s, httpRespToErrorResponse(resp, bucketName, "")
|
||||
}
|
||||
respBytes, err := ioutil.ReadAll(resp.Body)
|
||||
respBytes, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return s, err
|
||||
}
|
||||
|
3
vendor/github.com/minio/minio-go/v7/api-bucket-tagging.go
generated
vendored
3
vendor/github.com/minio/minio-go/v7/api-bucket-tagging.go
generated
vendored
@ -22,7 +22,6 @@
|
||||
"encoding/xml"
|
||||
"errors"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/url"
|
||||
|
||||
@ -58,7 +57,7 @@ func (c *Client) GetBucketTagging(ctx context.Context, bucketName string) (*tags
|
||||
return nil, httpRespToErrorResponse(resp, bucketName, "")
|
||||
}
|
||||
|
||||
defer io.Copy(ioutil.Discard, resp.Body)
|
||||
defer io.Copy(io.Discard, resp.Body)
|
||||
return tags.ParseBucketXML(resp.Body)
|
||||
}
|
||||
|
||||
|
5
vendor/github.com/minio/minio-go/v7/api-compose-object.go
generated
vendored
5
vendor/github.com/minio/minio-go/v7/api-compose-object.go
generated
vendored
@ -21,7 +21,6 @@
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strconv"
|
||||
@ -516,7 +515,7 @@ func (c *Client) ComposeObject(ctx context.Context, dst CopyDestOptions, srcs ..
|
||||
return UploadInfo{}, err
|
||||
}
|
||||
if dst.Progress != nil {
|
||||
io.CopyN(ioutil.Discard, dst.Progress, end-start+1)
|
||||
io.CopyN(io.Discard, dst.Progress, end-start+1)
|
||||
}
|
||||
objParts = append(objParts, complPart)
|
||||
partIndex++
|
||||
@ -525,7 +524,7 @@ func (c *Client) ComposeObject(ctx context.Context, dst CopyDestOptions, srcs ..
|
||||
|
||||
// 4. Make final complete-multipart request.
|
||||
uploadInfo, err := c.completeMultipartUpload(ctx, dst.Bucket, dst.Object, uploadID,
|
||||
completeMultipartUpload{Parts: objParts}, PutObjectOptions{})
|
||||
completeMultipartUpload{Parts: objParts}, PutObjectOptions{ServerSideEncryption: dst.Encryption})
|
||||
if err != nil {
|
||||
return UploadInfo{}, err
|
||||
}
|
||||
|
3
vendor/github.com/minio/minio-go/v7/api-copy-object.go
generated
vendored
3
vendor/github.com/minio/minio-go/v7/api-copy-object.go
generated
vendored
@ -20,7 +20,6 @@
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
)
|
||||
|
||||
@ -54,7 +53,7 @@ func (c *Client) CopyObject(ctx context.Context, dst CopyDestOptions, src CopySr
|
||||
|
||||
// Update the progress properly after successful copy.
|
||||
if dst.Progress != nil {
|
||||
io.Copy(ioutil.Discard, io.LimitReader(dst.Progress, dst.Size))
|
||||
io.Copy(io.Discard, io.LimitReader(dst.Progress, dst.Size))
|
||||
}
|
||||
|
||||
cpObjRes := copyObjectResult{}
|
||||
|
23
vendor/github.com/minio/minio-go/v7/api-error-response.go
generated
vendored
23
vendor/github.com/minio/minio-go/v7/api-error-response.go
generated
vendored
@ -22,7 +22,6 @@
|
||||
"encoding/xml"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
)
|
||||
|
||||
@ -108,7 +107,7 @@ func (e ErrorResponse) Error() string {
|
||||
func xmlDecodeAndBody(bodyReader io.Reader, v interface{}) ([]byte, error) {
|
||||
// read the whole body (up to 1MB)
|
||||
const maxBodyLength = 1 << 20
|
||||
body, err := ioutil.ReadAll(io.LimitReader(bodyReader, maxBodyLength))
|
||||
body, err := io.ReadAll(io.LimitReader(bodyReader, maxBodyLength))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -253,26 +252,6 @@ func errUnexpectedEOF(totalRead, totalSize int64, bucketName, objectName string)
|
||||
}
|
||||
}
|
||||
|
||||
// errInvalidBucketName - Invalid bucket name response.
|
||||
func errInvalidBucketName(message string) error {
|
||||
return ErrorResponse{
|
||||
StatusCode: http.StatusBadRequest,
|
||||
Code: "InvalidBucketName",
|
||||
Message: message,
|
||||
RequestID: "minio",
|
||||
}
|
||||
}
|
||||
|
||||
// errInvalidObjectName - Invalid object name response.
|
||||
func errInvalidObjectName(message string) error {
|
||||
return ErrorResponse{
|
||||
StatusCode: http.StatusNotFound,
|
||||
Code: "NoSuchKey",
|
||||
Message: message,
|
||||
RequestID: "minio",
|
||||
}
|
||||
}
|
||||
|
||||
// errInvalidArgument - Invalid argument response.
|
||||
func errInvalidArgument(message string) error {
|
||||
return ErrorResponse{
|
||||
|
2
vendor/github.com/minio/minio-go/v7/api-list.go
generated
vendored
2
vendor/github.com/minio/minio-go/v7/api-list.go
generated
vendored
@ -897,6 +897,8 @@ func (c *Client) listMultipartUploadsQuery(ctx context.Context, bucketName, keyM
|
||||
}
|
||||
|
||||
// listObjectParts list all object parts recursively.
|
||||
//
|
||||
//lint:ignore U1000 Keep this around
|
||||
func (c *Client) listObjectParts(ctx context.Context, bucketName, objectName, uploadID string) (partsInfo map[int]ObjectPart, err error) {
|
||||
// Part number marker for the next batch of request.
|
||||
var nextPartNumberMarker int
|
||||
|
7
vendor/github.com/minio/minio-go/v7/api-put-object-multipart.go
generated
vendored
7
vendor/github.com/minio/minio-go/v7/api-put-object-multipart.go
generated
vendored
@ -26,7 +26,6 @@
|
||||
"fmt"
|
||||
"hash/crc32"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"sort"
|
||||
@ -201,7 +200,9 @@ func (c *Client) putObjectMultipartNoStream(ctx context.Context, bucketName, obj
|
||||
|
||||
// Sort all completed parts.
|
||||
sort.Sort(completedParts(complMultipartUpload.Parts))
|
||||
opts = PutObjectOptions{}
|
||||
opts = PutObjectOptions{
|
||||
ServerSideEncryption: opts.ServerSideEncryption,
|
||||
}
|
||||
if len(crcBytes) > 0 {
|
||||
// Add hash of hashes.
|
||||
crc.Reset()
|
||||
@ -412,7 +413,7 @@ func (c *Client) completeMultipartUpload(ctx context.Context, bucketName, object
|
||||
|
||||
// Read resp.Body into a []bytes to parse for Error response inside the body
|
||||
var b []byte
|
||||
b, err = ioutil.ReadAll(resp.Body)
|
||||
b, err = io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return UploadInfo{}, err
|
||||
}
|
||||
|
215
vendor/github.com/minio/minio-go/v7/api-put-object-streaming.go
generated
vendored
215
vendor/github.com/minio/minio-go/v7/api-put-object-streaming.go
generated
vendored
@ -28,6 +28,7 @@
|
||||
"net/url"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"github.com/minio/minio-go/v7/pkg/s3utils"
|
||||
@ -44,7 +45,9 @@
|
||||
func (c *Client) putObjectMultipartStream(ctx context.Context, bucketName, objectName string,
|
||||
reader io.Reader, size int64, opts PutObjectOptions,
|
||||
) (info UploadInfo, err error) {
|
||||
if !isObject(reader) && isReadAt(reader) && !opts.SendContentMd5 {
|
||||
if opts.ConcurrentStreamParts && opts.NumThreads > 1 {
|
||||
info, err = c.putObjectMultipartStreamParallel(ctx, bucketName, objectName, reader, opts)
|
||||
} else if !isObject(reader) && isReadAt(reader) && !opts.SendContentMd5 {
|
||||
// Verify if the reader implements ReadAt and it is not a *minio.Object then we will use parallel uploader.
|
||||
info, err = c.putObjectMultipartStreamFromReadAt(ctx, bucketName, objectName, reader.(io.ReaderAt), size, opts)
|
||||
} else {
|
||||
@ -266,6 +269,9 @@ func (c *Client) putObjectMultipartStreamFromReadAt(ctx context.Context, bucketN
|
||||
// Sort all completed parts.
|
||||
sort.Sort(completedParts(complMultipartUpload.Parts))
|
||||
|
||||
opts = PutObjectOptions{
|
||||
ServerSideEncryption: opts.ServerSideEncryption,
|
||||
}
|
||||
if withChecksum {
|
||||
// Add hash of hashes.
|
||||
crc := crc32.New(crc32.MakeTable(crc32.Castagnoli))
|
||||
@ -278,7 +284,7 @@ func (c *Client) putObjectMultipartStreamFromReadAt(ctx context.Context, bucketN
|
||||
opts.UserMetadata = map[string]string{"X-Amz-Checksum-Crc32c": base64.StdEncoding.EncodeToString(crc.Sum(nil))}
|
||||
}
|
||||
|
||||
uploadInfo, err := c.completeMultipartUpload(ctx, bucketName, objectName, uploadID, complMultipartUpload, PutObjectOptions{})
|
||||
uploadInfo, err := c.completeMultipartUpload(ctx, bucketName, objectName, uploadID, complMultipartUpload, opts)
|
||||
if err != nil {
|
||||
return UploadInfo{}, err
|
||||
}
|
||||
@ -425,6 +431,211 @@ func (c *Client) putObjectMultipartStreamOptionalChecksum(ctx context.Context, b
|
||||
// Sort all completed parts.
|
||||
sort.Sort(completedParts(complMultipartUpload.Parts))
|
||||
|
||||
opts = PutObjectOptions{
|
||||
ServerSideEncryption: opts.ServerSideEncryption,
|
||||
}
|
||||
if len(crcBytes) > 0 {
|
||||
// Add hash of hashes.
|
||||
crc.Reset()
|
||||
crc.Write(crcBytes)
|
||||
opts.UserMetadata = map[string]string{"X-Amz-Checksum-Crc32c": base64.StdEncoding.EncodeToString(crc.Sum(nil))}
|
||||
}
|
||||
uploadInfo, err := c.completeMultipartUpload(ctx, bucketName, objectName, uploadID, complMultipartUpload, opts)
|
||||
if err != nil {
|
||||
return UploadInfo{}, err
|
||||
}
|
||||
|
||||
uploadInfo.Size = totalUploadedSize
|
||||
return uploadInfo, nil
|
||||
}
|
||||
|
||||
// putObjectMultipartStreamParallel uploads opts.NumThreads parts in parallel.
|
||||
// This is expected to take opts.PartSize * opts.NumThreads * (GOGC / 100) bytes of buffer.
|
||||
func (c *Client) putObjectMultipartStreamParallel(ctx context.Context, bucketName, objectName string,
|
||||
reader io.Reader, opts PutObjectOptions) (info UploadInfo, err error) {
|
||||
// Input validation.
|
||||
if err = s3utils.CheckValidBucketName(bucketName); err != nil {
|
||||
return UploadInfo{}, err
|
||||
}
|
||||
|
||||
if err = s3utils.CheckValidObjectName(objectName); err != nil {
|
||||
return UploadInfo{}, err
|
||||
}
|
||||
|
||||
if !opts.SendContentMd5 {
|
||||
if opts.UserMetadata == nil {
|
||||
opts.UserMetadata = make(map[string]string, 1)
|
||||
}
|
||||
opts.UserMetadata["X-Amz-Checksum-Algorithm"] = "CRC32C"
|
||||
}
|
||||
|
||||
// Cancel all when an error occurs.
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
|
||||
// Calculate the optimal parts info for a given size.
|
||||
totalPartsCount, partSize, _, err := OptimalPartInfo(-1, opts.PartSize)
|
||||
if err != nil {
|
||||
return UploadInfo{}, err
|
||||
}
|
||||
|
||||
// Initiates a new multipart request
|
||||
uploadID, err := c.newUploadID(ctx, bucketName, objectName, opts)
|
||||
if err != nil {
|
||||
return UploadInfo{}, err
|
||||
}
|
||||
delete(opts.UserMetadata, "X-Amz-Checksum-Algorithm")
|
||||
|
||||
// Aborts the multipart upload if the function returns
|
||||
// any error, since we do not resume we should purge
|
||||
// the parts which have been uploaded to relinquish
|
||||
// storage space.
|
||||
defer func() {
|
||||
if err != nil {
|
||||
c.abortMultipartUpload(ctx, bucketName, objectName, uploadID)
|
||||
}
|
||||
}()
|
||||
|
||||
// Create checksums
|
||||
// CRC32C is ~50% faster on AMD64 @ 30GB/s
|
||||
var crcBytes []byte
|
||||
crc := crc32.New(crc32.MakeTable(crc32.Castagnoli))
|
||||
md5Hash := c.md5Hasher()
|
||||
defer md5Hash.Close()
|
||||
|
||||
// Total data read and written to server. should be equal to 'size' at the end of the call.
|
||||
var totalUploadedSize int64
|
||||
|
||||
// Initialize parts uploaded map.
|
||||
partsInfo := make(map[int]ObjectPart)
|
||||
|
||||
// Create a buffer.
|
||||
nBuffers := int64(opts.NumThreads)
|
||||
bufs := make(chan []byte, nBuffers)
|
||||
all := make([]byte, nBuffers*partSize)
|
||||
for i := int64(0); i < nBuffers; i++ {
|
||||
bufs <- all[i*partSize : i*partSize+partSize]
|
||||
}
|
||||
|
||||
var wg sync.WaitGroup
|
||||
var mu sync.Mutex
|
||||
errCh := make(chan error, opts.NumThreads)
|
||||
|
||||
reader = newHook(reader, opts.Progress)
|
||||
|
||||
// Part number always starts with '1'.
|
||||
var partNumber int
|
||||
for partNumber = 1; partNumber <= totalPartsCount; partNumber++ {
|
||||
// Proceed to upload the part.
|
||||
var buf []byte
|
||||
select {
|
||||
case buf = <-bufs:
|
||||
case err = <-errCh:
|
||||
cancel()
|
||||
wg.Wait()
|
||||
return UploadInfo{}, err
|
||||
}
|
||||
|
||||
if int64(len(buf)) != partSize {
|
||||
return UploadInfo{}, fmt.Errorf("read buffer < %d than expected partSize: %d", len(buf), partSize)
|
||||
}
|
||||
|
||||
length, rerr := readFull(reader, buf)
|
||||
if rerr == io.EOF && partNumber > 1 {
|
||||
// Done
|
||||
break
|
||||
}
|
||||
|
||||
if rerr != nil && rerr != io.ErrUnexpectedEOF && err != io.EOF {
|
||||
cancel()
|
||||
wg.Wait()
|
||||
return UploadInfo{}, rerr
|
||||
}
|
||||
|
||||
// Calculate md5sum.
|
||||
customHeader := make(http.Header)
|
||||
if !opts.SendContentMd5 {
|
||||
// Add CRC32C instead.
|
||||
crc.Reset()
|
||||
crc.Write(buf[:length])
|
||||
cSum := crc.Sum(nil)
|
||||
customHeader.Set("x-amz-checksum-crc32c", base64.StdEncoding.EncodeToString(cSum))
|
||||
crcBytes = append(crcBytes, cSum...)
|
||||
}
|
||||
|
||||
wg.Add(1)
|
||||
go func(partNumber int) {
|
||||
// Avoid declaring variables in the for loop
|
||||
var md5Base64 string
|
||||
|
||||
if opts.SendContentMd5 {
|
||||
md5Hash.Reset()
|
||||
md5Hash.Write(buf[:length])
|
||||
md5Base64 = base64.StdEncoding.EncodeToString(md5Hash.Sum(nil))
|
||||
}
|
||||
|
||||
defer wg.Done()
|
||||
p := uploadPartParams{
|
||||
bucketName: bucketName,
|
||||
objectName: objectName,
|
||||
uploadID: uploadID,
|
||||
reader: bytes.NewReader(buf[:length]),
|
||||
partNumber: partNumber,
|
||||
md5Base64: md5Base64,
|
||||
size: int64(length),
|
||||
sse: opts.ServerSideEncryption,
|
||||
streamSha256: !opts.DisableContentSha256,
|
||||
customHeader: customHeader,
|
||||
}
|
||||
objPart, uerr := c.uploadPart(ctx, p)
|
||||
if uerr != nil {
|
||||
errCh <- uerr
|
||||
}
|
||||
|
||||
// Save successfully uploaded part metadata.
|
||||
mu.Lock()
|
||||
partsInfo[partNumber] = objPart
|
||||
mu.Unlock()
|
||||
|
||||
// Send buffer back so it can be reused.
|
||||
bufs <- buf
|
||||
}(partNumber)
|
||||
|
||||
// Save successfully uploaded size.
|
||||
totalUploadedSize += int64(length)
|
||||
}
|
||||
wg.Wait()
|
||||
|
||||
// Collect any error
|
||||
select {
|
||||
case err = <-errCh:
|
||||
return UploadInfo{}, err
|
||||
default:
|
||||
}
|
||||
|
||||
// Complete multipart upload.
|
||||
var complMultipartUpload completeMultipartUpload
|
||||
|
||||
// Loop over total uploaded parts to save them in
|
||||
// Parts array before completing the multipart request.
|
||||
for i := 1; i < partNumber; i++ {
|
||||
part, ok := partsInfo[i]
|
||||
if !ok {
|
||||
return UploadInfo{}, errInvalidArgument(fmt.Sprintf("Missing part number %d", i))
|
||||
}
|
||||
complMultipartUpload.Parts = append(complMultipartUpload.Parts, CompletePart{
|
||||
ETag: part.ETag,
|
||||
PartNumber: part.PartNumber,
|
||||
ChecksumCRC32: part.ChecksumCRC32,
|
||||
ChecksumCRC32C: part.ChecksumCRC32C,
|
||||
ChecksumSHA1: part.ChecksumSHA1,
|
||||
ChecksumSHA256: part.ChecksumSHA256,
|
||||
})
|
||||
}
|
||||
|
||||
// Sort all completed parts.
|
||||
sort.Sort(completedParts(complMultipartUpload.Parts))
|
||||
|
||||
opts = PutObjectOptions{}
|
||||
if len(crcBytes) > 0 {
|
||||
// Add hash of hashes.
|
||||
|
10
vendor/github.com/minio/minio-go/v7/api-put-object.go
generated
vendored
10
vendor/github.com/minio/minio-go/v7/api-put-object.go
generated
vendored
@ -87,7 +87,12 @@ type PutObjectOptions struct {
|
||||
SendContentMd5 bool
|
||||
DisableContentSha256 bool
|
||||
DisableMultipart bool
|
||||
Internal AdvancedPutOptions
|
||||
|
||||
// ConcurrentStreamParts will create NumThreads buffers of PartSize bytes,
|
||||
// fill them serially and upload them in parallel.
|
||||
// This can be used for faster uploads on non-seekable or slow-to-seek input.
|
||||
ConcurrentStreamParts bool
|
||||
Internal AdvancedPutOptions
|
||||
}
|
||||
|
||||
// getNumThreads - gets the number of threads to be used in the multipart
|
||||
@ -272,6 +277,9 @@ func (c *Client) putObjectCommon(ctx context.Context, bucketName, objectName str
|
||||
if opts.DisableMultipart {
|
||||
return UploadInfo{}, errors.New("no length provided and multipart disabled")
|
||||
}
|
||||
if opts.ConcurrentStreamParts && opts.NumThreads > 1 {
|
||||
return c.putObjectMultipartStreamParallel(ctx, bucketName, objectName, reader, opts)
|
||||
}
|
||||
return c.putObjectMultipartStreamNoLength(ctx, bucketName, objectName, reader, opts)
|
||||
}
|
||||
|
||||
|
3
vendor/github.com/minio/minio-go/v7/api-putobject-snowball.go
generated
vendored
3
vendor/github.com/minio/minio-go/v7/api-putobject-snowball.go
generated
vendored
@ -24,7 +24,6 @@
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"strings"
|
||||
"sync"
|
||||
@ -107,7 +106,7 @@ func (c Client) PutObjectsSnowball(ctx context.Context, bucketName string, opts
|
||||
return nopReadSeekCloser{bytes.NewReader(b.Bytes())}, int64(b.Len()), nil
|
||||
}
|
||||
} else {
|
||||
f, err := ioutil.TempFile("", "s3-putsnowballobjects-*")
|
||||
f, err := os.CreateTemp("", "s3-putsnowballobjects-*")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
2
vendor/github.com/minio/minio-go/v7/api-s3-datatypes.go
generated
vendored
2
vendor/github.com/minio/minio-go/v7/api-s3-datatypes.go
generated
vendored
@ -316,8 +316,6 @@ type completeMultipartUploadResult struct {
|
||||
// CompletePart sub container lists individual part numbers and their
|
||||
// md5sum, part of completeMultipartUpload.
|
||||
type CompletePart struct {
|
||||
XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Part" json:"-"`
|
||||
|
||||
// Part number identifies the part.
|
||||
PartNumber int
|
||||
ETag string
|
||||
|
32
vendor/github.com/minio/minio-go/v7/api-select.go
generated
vendored
32
vendor/github.com/minio/minio-go/v7/api-select.go
generated
vendored
@ -41,8 +41,8 @@
|
||||
// Constants for file header info.
|
||||
const (
|
||||
CSVFileHeaderInfoNone CSVFileHeaderInfo = "NONE"
|
||||
CSVFileHeaderInfoIgnore = "IGNORE"
|
||||
CSVFileHeaderInfoUse = "USE"
|
||||
CSVFileHeaderInfoIgnore CSVFileHeaderInfo = "IGNORE"
|
||||
CSVFileHeaderInfoUse CSVFileHeaderInfo = "USE"
|
||||
)
|
||||
|
||||
// SelectCompressionType - is the parameter for what type of compression is
|
||||
@ -52,15 +52,15 @@
|
||||
// Constants for compression types under select API.
|
||||
const (
|
||||
SelectCompressionNONE SelectCompressionType = "NONE"
|
||||
SelectCompressionGZIP = "GZIP"
|
||||
SelectCompressionBZIP = "BZIP2"
|
||||
SelectCompressionGZIP SelectCompressionType = "GZIP"
|
||||
SelectCompressionBZIP SelectCompressionType = "BZIP2"
|
||||
|
||||
// Non-standard compression schemes, supported by MinIO hosts:
|
||||
|
||||
SelectCompressionZSTD = "ZSTD" // Zstandard compression.
|
||||
SelectCompressionLZ4 = "LZ4" // LZ4 Stream
|
||||
SelectCompressionS2 = "S2" // S2 Stream
|
||||
SelectCompressionSNAPPY = "SNAPPY" // Snappy stream
|
||||
SelectCompressionZSTD SelectCompressionType = "ZSTD" // Zstandard compression.
|
||||
SelectCompressionLZ4 SelectCompressionType = "LZ4" // LZ4 Stream
|
||||
SelectCompressionS2 SelectCompressionType = "S2" // S2 Stream
|
||||
SelectCompressionSNAPPY SelectCompressionType = "SNAPPY" // Snappy stream
|
||||
)
|
||||
|
||||
// CSVQuoteFields - is the parameter for how CSV fields are quoted.
|
||||
@ -69,7 +69,7 @@
|
||||
// Constants for csv quote styles.
|
||||
const (
|
||||
CSVQuoteFieldsAlways CSVQuoteFields = "Always"
|
||||
CSVQuoteFieldsAsNeeded = "AsNeeded"
|
||||
CSVQuoteFieldsAsNeeded CSVQuoteFields = "AsNeeded"
|
||||
)
|
||||
|
||||
// QueryExpressionType - is of what syntax the expression is, this should only
|
||||
@ -87,7 +87,7 @@
|
||||
// Constants for JSONTypes.
|
||||
const (
|
||||
JSONDocumentType JSONType = "DOCUMENT"
|
||||
JSONLinesType = "LINES"
|
||||
JSONLinesType JSONType = "LINES"
|
||||
)
|
||||
|
||||
// ParquetInputOptions parquet input specific options
|
||||
@ -378,8 +378,8 @@ func (o SelectObjectOptions) Header() http.Header {
|
||||
// Constants for input data types.
|
||||
const (
|
||||
SelectObjectTypeCSV SelectObjectType = "CSV"
|
||||
SelectObjectTypeJSON = "JSON"
|
||||
SelectObjectTypeParquet = "Parquet"
|
||||
SelectObjectTypeJSON SelectObjectType = "JSON"
|
||||
SelectObjectTypeParquet SelectObjectType = "Parquet"
|
||||
)
|
||||
|
||||
// preludeInfo is used for keeping track of necessary information from the
|
||||
@ -416,7 +416,7 @@ type StatsMessage struct {
|
||||
|
||||
const (
|
||||
errorMsg messageType = "error"
|
||||
commonMsg = "event"
|
||||
commonMsg messageType = "event"
|
||||
)
|
||||
|
||||
// eventType represents the type of event.
|
||||
@ -425,9 +425,9 @@ type StatsMessage struct {
|
||||
// list of event-types returned by Select API.
|
||||
const (
|
||||
endEvent eventType = "End"
|
||||
recordsEvent = "Records"
|
||||
progressEvent = "Progress"
|
||||
statsEvent = "Stats"
|
||||
recordsEvent eventType = "Records"
|
||||
progressEvent eventType = "Progress"
|
||||
statsEvent eventType = "Stats"
|
||||
)
|
||||
|
||||
// contentType represents content type of event.
|
||||
|
13
vendor/github.com/minio/minio-go/v7/api.go
generated
vendored
13
vendor/github.com/minio/minio-go/v7/api.go
generated
vendored
@ -25,7 +25,6 @@
|
||||
"fmt"
|
||||
"hash/crc32"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"math/rand"
|
||||
"net"
|
||||
"net/http"
|
||||
@ -119,7 +118,7 @@ type Options struct {
|
||||
// Global constants.
|
||||
const (
|
||||
libraryName = "minio-go"
|
||||
libraryVersion = "v7.0.44"
|
||||
libraryVersion = "v7.0.47"
|
||||
)
|
||||
|
||||
// User Agent should always following the below style.
|
||||
@ -635,7 +634,7 @@ func (c *Client) executeMethod(ctx context.Context, method string, metadata requ
|
||||
}
|
||||
|
||||
// Read the body to be saved later.
|
||||
errBodyBytes, err := ioutil.ReadAll(res.Body)
|
||||
errBodyBytes, err := io.ReadAll(res.Body)
|
||||
// res.Body should be closed
|
||||
closeResponse(res)
|
||||
if err != nil {
|
||||
@ -644,14 +643,14 @@ func (c *Client) executeMethod(ctx context.Context, method string, metadata requ
|
||||
|
||||
// Save the body.
|
||||
errBodySeeker := bytes.NewReader(errBodyBytes)
|
||||
res.Body = ioutil.NopCloser(errBodySeeker)
|
||||
res.Body = io.NopCloser(errBodySeeker)
|
||||
|
||||
// For errors verify if its retryable otherwise fail quickly.
|
||||
errResponse := ToErrorResponse(httpRespToErrorResponse(res, metadata.bucketName, metadata.objectName))
|
||||
|
||||
// Save the body back again.
|
||||
errBodySeeker.Seek(0, 0) // Seek back to starting point.
|
||||
res.Body = ioutil.NopCloser(errBodySeeker)
|
||||
res.Body = io.NopCloser(errBodySeeker)
|
||||
|
||||
// Bucket region if set in error response and the error
|
||||
// code dictates invalid region, we can retry the request
|
||||
@ -814,7 +813,7 @@ func (c *Client) newRequest(ctx context.Context, method string, metadata request
|
||||
if metadata.contentLength == 0 {
|
||||
req.Body = nil
|
||||
} else {
|
||||
req.Body = ioutil.NopCloser(metadata.contentBody)
|
||||
req.Body = io.NopCloser(metadata.contentBody)
|
||||
}
|
||||
|
||||
// Set incoming content-length.
|
||||
@ -846,7 +845,7 @@ func (c *Client) newRequest(ctx context.Context, method string, metadata request
|
||||
// Additionally, we also look if the initialized client is secure,
|
||||
// if yes then we don't need to perform streaming signature.
|
||||
req = signer.StreamingSignV4(req, accessKeyID,
|
||||
secretAccessKey, sessionToken, location, metadata.contentLength, time.Now().UTC())
|
||||
secretAccessKey, sessionToken, location, metadata.contentLength, time.Now().UTC(), c.sha256Hasher())
|
||||
default:
|
||||
// Set sha256 sum for signature calculation only with signature version '4'.
|
||||
shaHeader := unsignedPayload
|
||||
|
124
vendor/github.com/minio/minio-go/v7/functional_tests.go
generated
vendored
124
vendor/github.com/minio/minio-go/v7/functional_tests.go
generated
vendored
@ -31,7 +31,6 @@
|
||||
"hash"
|
||||
"hash/crc32"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"math/rand"
|
||||
"mime/multipart"
|
||||
"net/http"
|
||||
@ -346,7 +345,7 @@ func getDataReader(fileName string) io.ReadCloser {
|
||||
if _, ok := dataFileCRC32[fileName]; !ok {
|
||||
dataFileCRC32[fileName] = mustCrcReader(newRandomReader(size, size))
|
||||
}
|
||||
return ioutil.NopCloser(newRandomReader(size, size))
|
||||
return io.NopCloser(newRandomReader(size, size))
|
||||
}
|
||||
reader, _ := os.Open(getMintDataDirFilePath(fileName))
|
||||
if _, ok := dataFileCRC32[fileName]; !ok {
|
||||
@ -989,7 +988,7 @@ function := "GetObject()"
|
||||
|
||||
for _, testFile := range testFiles {
|
||||
r := getDataReader(testFile)
|
||||
buf, err := ioutil.ReadAll(r)
|
||||
buf, err := io.ReadAll(r)
|
||||
if err != nil {
|
||||
logError(testName, function, args, startTime, "", "unexpected failure", err)
|
||||
return
|
||||
@ -1131,7 +1130,7 @@ function := "GetObject()"
|
||||
var errs [n]error
|
||||
for i := 0; i < n; i++ {
|
||||
r := newRandomReader(int64((1<<20)*i+i), int64(i))
|
||||
buf, err := ioutil.ReadAll(r)
|
||||
buf, err := io.ReadAll(r)
|
||||
if err != nil {
|
||||
logError(testName, function, args, startTime, "", "unexpected failure", err)
|
||||
return
|
||||
@ -1271,7 +1270,7 @@ function := "CopyObject()"
|
||||
testFiles := []string{"datafile-1-b", "datafile-10-kB"}
|
||||
for _, testFile := range testFiles {
|
||||
r := getDataReader(testFile)
|
||||
buf, err := ioutil.ReadAll(r)
|
||||
buf, err := io.ReadAll(r)
|
||||
if err != nil {
|
||||
logError(testName, function, args, startTime, "", "unexpected failure", err)
|
||||
return
|
||||
@ -1304,7 +1303,7 @@ function := "CopyObject()"
|
||||
return
|
||||
}
|
||||
|
||||
oldestContent, err := ioutil.ReadAll(reader)
|
||||
oldestContent, err := io.ReadAll(reader)
|
||||
if err != nil {
|
||||
logError(testName, function, args, startTime, "", "Reading the oldest object version failed", err)
|
||||
return
|
||||
@ -1338,7 +1337,7 @@ function := "CopyObject()"
|
||||
}
|
||||
defer readerCopy.Close()
|
||||
|
||||
newestContent, err := ioutil.ReadAll(readerCopy)
|
||||
newestContent, err := io.ReadAll(readerCopy)
|
||||
if err != nil {
|
||||
logError(testName, function, args, startTime, "", "Reading from GetObject reader failed", err)
|
||||
return
|
||||
@ -1408,7 +1407,7 @@ function := "CopyObject()"
|
||||
testFiles := []string{"datafile-10-kB"}
|
||||
for _, testFile := range testFiles {
|
||||
r := getDataReader(testFile)
|
||||
buf, err := ioutil.ReadAll(r)
|
||||
buf, err := io.ReadAll(r)
|
||||
if err != nil {
|
||||
logError(testName, function, args, startTime, "", "unexpected failure", err)
|
||||
return
|
||||
@ -1441,7 +1440,7 @@ function := "CopyObject()"
|
||||
return
|
||||
}
|
||||
|
||||
oldestContent, err := ioutil.ReadAll(reader)
|
||||
oldestContent, err := io.ReadAll(reader)
|
||||
if err != nil {
|
||||
logError(testName, function, args, startTime, "", "Reading the oldest object version failed", err)
|
||||
return
|
||||
@ -1491,7 +1490,7 @@ function := "CopyObject()"
|
||||
}
|
||||
defer readerCopy.Close()
|
||||
|
||||
newestContent, err := ioutil.ReadAll(readerCopy)
|
||||
newestContent, err := io.ReadAll(readerCopy)
|
||||
if err != nil {
|
||||
logError(testName, function, args, startTime, "", "Reading from GetObject reader failed", err)
|
||||
return
|
||||
@ -1571,7 +1570,7 @@ function := "ComposeObject()"
|
||||
|
||||
for _, testFile := range testFiles {
|
||||
r := getDataReader(testFile)
|
||||
buf, err := ioutil.ReadAll(r)
|
||||
buf, err := io.ReadAll(r)
|
||||
if err != nil {
|
||||
logError(testName, function, args, startTime, "", "unexpected failure", err)
|
||||
return
|
||||
@ -1633,7 +1632,7 @@ function := "ComposeObject()"
|
||||
}
|
||||
defer readerCopy.Close()
|
||||
|
||||
copyContentBytes, err := ioutil.ReadAll(readerCopy)
|
||||
copyContentBytes, err := io.ReadAll(readerCopy)
|
||||
if err != nil {
|
||||
logError(testName, function, args, startTime, "", "Reading from the copy object reader failed", err)
|
||||
return
|
||||
@ -1733,12 +1732,39 @@ function := "DeleteObject()"
|
||||
logError(testName, function, args, startTime, "", "Unexpected versioning info, should not have any one ", err)
|
||||
return
|
||||
}
|
||||
|
||||
err = c.RemoveBucket(context.Background(), bucketName)
|
||||
// test delete marker version id is non-null
|
||||
_, err = c.PutObject(context.Background(), bucketName, objectName, getDataReader("datafile-10-kB"), int64(dataFileMap["datafile-10-kB"]), minio.PutObjectOptions{})
|
||||
if err != nil {
|
||||
logError(testName, function, args, startTime, "", "CleanupBucket failed", err)
|
||||
logError(testName, function, args, startTime, "", "PutObject failed", err)
|
||||
return
|
||||
}
|
||||
// create delete marker
|
||||
err = c.RemoveObject(context.Background(), bucketName, objectName, minio.RemoveObjectOptions{})
|
||||
if err != nil {
|
||||
logError(testName, function, args, startTime, "", "DeleteObject failed", err)
|
||||
return
|
||||
}
|
||||
objectsInfo = c.ListObjects(context.Background(), bucketName, minio.ListObjectsOptions{WithVersions: true, Recursive: true})
|
||||
idx := 0
|
||||
for info := range objectsInfo {
|
||||
if info.Err != nil {
|
||||
logError(testName, function, args, startTime, "", "Unexpected error during listing objects", err)
|
||||
return
|
||||
}
|
||||
if idx == 0 {
|
||||
if !info.IsDeleteMarker {
|
||||
logError(testName, function, args, startTime, "", "Unexpected error - expected delete marker to have been created", err)
|
||||
return
|
||||
}
|
||||
if info.VersionID == "" {
|
||||
logError(testName, function, args, startTime, "", "Unexpected error - expected delete marker to be versioned", err)
|
||||
return
|
||||
}
|
||||
}
|
||||
idx++
|
||||
}
|
||||
|
||||
defer cleanupBucket(bucketName, c)
|
||||
|
||||
successLogger(testName, function, args, startTime).Info()
|
||||
}
|
||||
@ -2461,7 +2487,7 @@ function := "GetObject(bucketName, objectName)"
|
||||
objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
|
||||
args["objectName"] = objectName
|
||||
|
||||
buf, err := ioutil.ReadAll(reader)
|
||||
buf, err := io.ReadAll(reader)
|
||||
if err != nil {
|
||||
logError(testName, function, args, startTime, "", "ReadAll failed", err)
|
||||
return
|
||||
@ -2982,7 +3008,7 @@ function := "FPutObject(bucketName, objectName, fileName, opts)"
|
||||
fileName := getMintDataDirFilePath("datafile-129-MB")
|
||||
if fileName == "" {
|
||||
// Make a temp file with minPartSize bytes of data.
|
||||
file, err := ioutil.TempFile(os.TempDir(), "FPutObjectTest")
|
||||
file, err := os.CreateTemp(os.TempDir(), "FPutObjectTest")
|
||||
if err != nil {
|
||||
logError(testName, function, args, startTime, "", "TempFile creation failed", err)
|
||||
return
|
||||
@ -3091,7 +3117,7 @@ function = "MakeBucket(bucketName, location)"
|
||||
fName := getMintDataDirFilePath("datafile-129-MB")
|
||||
if fName == "" {
|
||||
// Make a temp file with minPartSize bytes of data.
|
||||
file, err := ioutil.TempFile(os.TempDir(), "FPutObjectTest")
|
||||
file, err := os.CreateTemp(os.TempDir(), "FPutObjectTest")
|
||||
if err != nil {
|
||||
logError(testName, function, args, startTime, "", "TempFile creation failed", err)
|
||||
return
|
||||
@ -3257,7 +3283,7 @@ function := "FPutObject(bucketName, objectName, fileName, opts)"
|
||||
fName := getMintDataDirFilePath("datafile-1-MB")
|
||||
if fName == "" {
|
||||
// Make a temp file with 1 MiB bytes of data.
|
||||
file, err := ioutil.TempFile(os.TempDir(), "FPutObjectContextTest")
|
||||
file, err := os.CreateTemp(os.TempDir(), "FPutObjectContextTest")
|
||||
if err != nil {
|
||||
logError(testName, function, args, startTime, "", "TempFile creation failed", err)
|
||||
return
|
||||
@ -3357,7 +3383,7 @@ function := "FPutObjectContext(ctx, bucketName, objectName, fileName, opts)"
|
||||
fName := getMintDataDirFilePath("datafile-1-MB")
|
||||
if fName == "" {
|
||||
// Make a temp file with 1 MiB bytes of data.
|
||||
file, err := ioutil.TempFile(os.TempDir(), "FPutObjectContextTest")
|
||||
file, err := os.CreateTemp(os.TempDir(), "FPutObjectContextTest")
|
||||
if err != nil {
|
||||
logError(testName, function, args, startTime, "", "Temp file creation failed", err)
|
||||
return
|
||||
@ -3621,7 +3647,7 @@ function := "GetObject(bucketName, objectName)"
|
||||
logError(testName, function, args, startTime, "", "file.Open failed", err)
|
||||
return
|
||||
}
|
||||
want, err := ioutil.ReadAll(zfr)
|
||||
want, err := io.ReadAll(zfr)
|
||||
if err != nil {
|
||||
logError(testName, function, args, startTime, "", "fzip file read failed", err)
|
||||
return
|
||||
@ -3638,7 +3664,7 @@ function := "GetObject(bucketName, objectName)"
|
||||
}
|
||||
return
|
||||
}
|
||||
got, err := ioutil.ReadAll(r)
|
||||
got, err := io.ReadAll(r)
|
||||
if err != nil {
|
||||
logError(testName, function, args, startTime, "", "ReadAll failed", err)
|
||||
return
|
||||
@ -3722,7 +3748,7 @@ function := "GetObject(bucketName, objectName)"
|
||||
objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
|
||||
args["objectName"] = objectName
|
||||
|
||||
buf, err := ioutil.ReadAll(reader)
|
||||
buf, err := io.ReadAll(reader)
|
||||
if err != nil {
|
||||
logError(testName, function, args, startTime, "", "ReadAll failed", err)
|
||||
return
|
||||
@ -3885,7 +3911,7 @@ function := "GetObject(bucketName, objectName)"
|
||||
objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
|
||||
args["objectName"] = objectName
|
||||
|
||||
buf, err := ioutil.ReadAll(reader)
|
||||
buf, err := io.ReadAll(reader)
|
||||
if err != nil {
|
||||
logError(testName, function, args, startTime, "", "ReadAll failed", err)
|
||||
return
|
||||
@ -4062,7 +4088,7 @@ function := "GetObject(bucketName, objectName)"
|
||||
objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
|
||||
args["objectName"] = objectName
|
||||
|
||||
buf, err := ioutil.ReadAll(reader)
|
||||
buf, err := io.ReadAll(reader)
|
||||
if err != nil {
|
||||
logError(testName, function, args, startTime, "", "ReadAll failed", err)
|
||||
return
|
||||
@ -4181,7 +4207,7 @@ function := "PresignedPostPolicy(policy)"
|
||||
metadataKey := randString(60, rand.NewSource(time.Now().UnixNano()), "user")
|
||||
metadataValue := randString(60, rand.NewSource(time.Now().UnixNano()), "")
|
||||
|
||||
buf, err := ioutil.ReadAll(reader)
|
||||
buf, err := io.ReadAll(reader)
|
||||
if err != nil {
|
||||
logError(testName, function, args, startTime, "", "ReadAll failed", err)
|
||||
return
|
||||
@ -4245,7 +4271,7 @@ function := "PresignedPostPolicy(policy)"
|
||||
filePath := getMintDataDirFilePath("datafile-33-kB")
|
||||
if filePath == "" {
|
||||
// Make a temp file with 33 KB data.
|
||||
file, err := ioutil.TempFile(os.TempDir(), "PresignedPostPolicyTest")
|
||||
file, err := os.CreateTemp(os.TempDir(), "PresignedPostPolicyTest")
|
||||
if err != nil {
|
||||
logError(testName, function, args, startTime, "", "TempFile creation failed", err)
|
||||
return
|
||||
@ -4588,7 +4614,7 @@ function := "GetObject(bucketName, objectName)"
|
||||
objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
|
||||
args["objectName"] = objectName
|
||||
|
||||
buf, err := ioutil.ReadAll(reader)
|
||||
buf, err := io.ReadAll(reader)
|
||||
if err != nil {
|
||||
logError(testName, function, args, startTime, "", "ReadAll failed", err)
|
||||
return
|
||||
@ -4770,7 +4796,7 @@ function := "GetObject(bucketName, objectName)"
|
||||
objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
|
||||
args["objectName"] = objectName
|
||||
|
||||
buf, err := ioutil.ReadAll(reader)
|
||||
buf, err := io.ReadAll(reader)
|
||||
if err != nil {
|
||||
logError(testName, function, args, startTime, "", "ReadAll failed", err)
|
||||
return
|
||||
@ -4944,7 +4970,7 @@ function := "GetObject(bucketName, objectName)"
|
||||
objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
|
||||
args["objectName"] = objectName
|
||||
|
||||
buf, err := ioutil.ReadAll(reader)
|
||||
buf, err := io.ReadAll(reader)
|
||||
if err != nil {
|
||||
logError(testName, function, args, startTime, "", "ReadAll failed", err)
|
||||
return
|
||||
@ -5127,7 +5153,7 @@ function := "GetObject(bucketName, objectName)"
|
||||
objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
|
||||
args["objectName"] = objectName
|
||||
|
||||
buf, err := ioutil.ReadAll(reader)
|
||||
buf, err := io.ReadAll(reader)
|
||||
if err != nil {
|
||||
logError(testName, function, args, startTime, "", "ReadAll failed", err)
|
||||
return
|
||||
@ -6138,7 +6164,7 @@ functionAll += ", " + function
|
||||
return
|
||||
}
|
||||
|
||||
newReadBytes, err := ioutil.ReadAll(newReader)
|
||||
newReadBytes, err := io.ReadAll(newReader)
|
||||
if err != nil {
|
||||
logError(testName, function, args, startTime, "", "ReadAll failed", err)
|
||||
return
|
||||
@ -6269,7 +6295,7 @@ functionAll += ", " + function
|
||||
logError(testName, function, args, startTime, "", "PresignedGetObject response incorrect, status "+string(resp.StatusCode), err)
|
||||
return
|
||||
}
|
||||
newPresignedBytes, err := ioutil.ReadAll(resp.Body)
|
||||
newPresignedBytes, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
logError(testName, function, args, startTime, "", "PresignedGetObject response incorrect", err)
|
||||
return
|
||||
@ -6312,7 +6338,7 @@ functionAll += ", " + function
|
||||
logError(testName, function, args, startTime, "", "PresignedGetObject response incorrect, status "+string(resp.StatusCode), err)
|
||||
return
|
||||
}
|
||||
newPresignedBytes, err = ioutil.ReadAll(resp.Body)
|
||||
newPresignedBytes, err = io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
logError(testName, function, args, startTime, "", "PresignedGetObject response incorrect", err)
|
||||
return
|
||||
@ -6372,7 +6398,7 @@ functionAll += ", " + function
|
||||
return
|
||||
}
|
||||
|
||||
newReadBytes, err = ioutil.ReadAll(newReader)
|
||||
newReadBytes, err = io.ReadAll(newReader)
|
||||
if err != nil {
|
||||
logError(testName, function, args, startTime, "", "ReadAll after GetObject failed", err)
|
||||
return
|
||||
@ -6428,7 +6454,7 @@ functionAll += ", " + function
|
||||
return
|
||||
}
|
||||
|
||||
newReadBytes, err = ioutil.ReadAll(newReader)
|
||||
newReadBytes, err = io.ReadAll(newReader)
|
||||
if err != nil {
|
||||
logError(testName, function, args, startTime, "", "ReadAll failed during get on custom-presigned put object", err)
|
||||
return
|
||||
@ -6652,7 +6678,7 @@ function := "PutObject(bucketName, objectName, fileToUpload, contentType)"
|
||||
}
|
||||
args["fileToUpload"] = fileName
|
||||
} else {
|
||||
tempfile, err = ioutil.TempFile("", "minio-go-upload-test-")
|
||||
tempfile, err = os.CreateTemp("", "minio-go-upload-test-")
|
||||
if err != nil {
|
||||
logError(testName, function, args, startTime, "", "TempFile create failed", err)
|
||||
return
|
||||
@ -6916,7 +6942,7 @@ function := "FPutObject(bucketName, objectName, fileName, opts)"
|
||||
defer cleanupBucket(bucketName, c)
|
||||
|
||||
// Make a temp file with 11*1024*1024 bytes of data.
|
||||
file, err := ioutil.TempFile(os.TempDir(), "FPutObjectTest")
|
||||
file, err := os.CreateTemp(os.TempDir(), "FPutObjectTest")
|
||||
if err != nil {
|
||||
logError(testName, function, args, startTime, "", "TempFile creation failed", err)
|
||||
return
|
||||
@ -7145,7 +7171,7 @@ function := "GetObject(bucketName, objectName)"
|
||||
objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
|
||||
args["objectName"] = objectName
|
||||
|
||||
buf, err := ioutil.ReadAll(reader)
|
||||
buf, err := io.ReadAll(reader)
|
||||
if err != nil {
|
||||
logError(testName, function, args, startTime, "", "ReadAll failed", err)
|
||||
return
|
||||
@ -7299,7 +7325,7 @@ function := "GetObject(bucketName, objectName)"
|
||||
objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
|
||||
args["objectName"] = objectName
|
||||
|
||||
buf, err := ioutil.ReadAll(reader)
|
||||
buf, err := io.ReadAll(reader)
|
||||
if err != nil {
|
||||
logError(testName, function, args, startTime, "", "ReadAll failed", err)
|
||||
return
|
||||
@ -7837,7 +7863,7 @@ function = "CopyObject(dst, src)"
|
||||
}
|
||||
defer reader.Close()
|
||||
|
||||
decBytes, err := ioutil.ReadAll(reader)
|
||||
decBytes, err := io.ReadAll(reader)
|
||||
if err != nil {
|
||||
logError(testName, function, map[string]interface{}{}, startTime, "", "ReadAll failed", err)
|
||||
return
|
||||
@ -7915,7 +7941,7 @@ function := "CopyObject(destination, source)"
|
||||
return
|
||||
}
|
||||
|
||||
decBytes, err := ioutil.ReadAll(reader)
|
||||
decBytes, err := io.ReadAll(reader)
|
||||
if err != nil {
|
||||
logError(testName, function, args, startTime, "", "ReadAll failed", err)
|
||||
return
|
||||
@ -7955,7 +7981,7 @@ function := "CopyObject(destination, source)"
|
||||
return
|
||||
}
|
||||
|
||||
decBytes, err = ioutil.ReadAll(reader)
|
||||
decBytes, err = io.ReadAll(reader)
|
||||
if err != nil {
|
||||
logError(testName, function, args, startTime, "", "ReadAll failed", err)
|
||||
return
|
||||
@ -7994,7 +8020,7 @@ function := "CopyObject(destination, source)"
|
||||
}
|
||||
defer reader.Close()
|
||||
|
||||
decBytes, err = ioutil.ReadAll(reader)
|
||||
decBytes, err = io.ReadAll(reader)
|
||||
if err != nil {
|
||||
logError(testName, function, args, startTime, "", "ReadAll failed", err)
|
||||
return
|
||||
@ -11040,7 +11066,7 @@ functionAll += ", " + function
|
||||
return
|
||||
}
|
||||
|
||||
newReadBytes, err := ioutil.ReadAll(newReader)
|
||||
newReadBytes, err := io.ReadAll(newReader)
|
||||
if err != nil {
|
||||
logError(testName, function, args, startTime, "", "ReadAll failed", err)
|
||||
return
|
||||
@ -11146,7 +11172,7 @@ functionAll += ", " + function
|
||||
logError(testName, function, args, startTime, "", "PresignedGetObject URL returns status "+string(resp.StatusCode), err)
|
||||
return
|
||||
}
|
||||
newPresignedBytes, err := ioutil.ReadAll(resp.Body)
|
||||
newPresignedBytes, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
logError(testName, function, args, startTime, "", "ReadAll failed", err)
|
||||
return
|
||||
@ -11185,7 +11211,7 @@ functionAll += ", " + function
|
||||
logError(testName, function, args, startTime, "", "PresignedGetObject URL returns status "+string(resp.StatusCode), err)
|
||||
return
|
||||
}
|
||||
newPresignedBytes, err = ioutil.ReadAll(resp.Body)
|
||||
newPresignedBytes, err = io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
logError(testName, function, args, startTime, "", "ReadAll failed", err)
|
||||
return
|
||||
@ -11239,7 +11265,7 @@ functionAll += ", " + function
|
||||
return
|
||||
}
|
||||
|
||||
newReadBytes, err = ioutil.ReadAll(newReader)
|
||||
newReadBytes, err = io.ReadAll(newReader)
|
||||
if err != nil {
|
||||
logError(testName, function, args, startTime, "", "ReadAll failed during get on presigned put object", err)
|
||||
return
|
||||
@ -11553,7 +11579,7 @@ function := "GetObject(ctx, bucketName, objectName, fileName)"
|
||||
}
|
||||
for _, test := range tests {
|
||||
wantRC := getDataReader("datafile-129-MB")
|
||||
io.CopyN(ioutil.Discard, wantRC, test.start)
|
||||
io.CopyN(io.Discard, wantRC, test.start)
|
||||
want := mustCrcReader(io.LimitReader(wantRC, test.end-test.start+1))
|
||||
opts := minio.GetObjectOptions{}
|
||||
opts.SetRange(test.start, test.end)
|
||||
|
5
vendor/github.com/minio/minio-go/v7/pkg/credentials/assume_role.go
generated
vendored
5
vendor/github.com/minio/minio-go/v7/pkg/credentials/assume_role.go
generated
vendored
@ -24,7 +24,6 @@
|
||||
"encoding/xml"
|
||||
"errors"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strconv"
|
||||
@ -139,7 +138,7 @@ func closeResponse(resp *http.Response) {
|
||||
// Without this closing connection would disallow re-using
|
||||
// the same connection for future uses.
|
||||
// - http://stackoverflow.com/a/17961593/4465767
|
||||
io.Copy(ioutil.Discard, resp.Body)
|
||||
io.Copy(io.Discard, resp.Body)
|
||||
resp.Body.Close()
|
||||
}
|
||||
}
|
||||
@ -191,7 +190,7 @@ func getAssumeRoleCredentials(clnt *http.Client, endpoint string, opts STSAssume
|
||||
defer closeResponse(resp)
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
var errResp ErrorResponse
|
||||
buf, err := ioutil.ReadAll(resp.Body)
|
||||
buf, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return AssumeRoleResponse{}, err
|
||||
}
|
||||
|
3
vendor/github.com/minio/minio-go/v7/pkg/credentials/error_response.go
generated
vendored
3
vendor/github.com/minio/minio-go/v7/pkg/credentials/error_response.go
generated
vendored
@ -22,7 +22,6 @@
|
||||
"encoding/xml"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
)
|
||||
|
||||
// ErrorResponse - Is the typed error returned.
|
||||
@ -88,7 +87,7 @@ func xmlDecoder(body io.Reader, v interface{}) error {
|
||||
func xmlDecodeAndBody(bodyReader io.Reader, v interface{}) ([]byte, error) {
|
||||
// read the whole body (up to 1MB)
|
||||
const maxBodyLength = 1 << 20
|
||||
body, err := ioutil.ReadAll(io.LimitReader(bodyReader, maxBodyLength))
|
||||
body, err := io.ReadAll(io.LimitReader(bodyReader, maxBodyLength))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
9
vendor/github.com/minio/minio-go/v7/pkg/credentials/file_minio_client.go
generated
vendored
9
vendor/github.com/minio/minio-go/v7/pkg/credentials/file_minio_client.go
generated
vendored
@ -18,7 +18,6 @@
|
||||
package credentials
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
@ -114,6 +113,7 @@ type hostConfig struct {
|
||||
type config struct {
|
||||
Version string `json:"version"`
|
||||
Hosts map[string]hostConfig `json:"hosts"`
|
||||
Aliases map[string]hostConfig `json:"aliases"`
|
||||
}
|
||||
|
||||
// loadAliass loads from the file pointed to by shared credentials filename for alias.
|
||||
@ -123,12 +123,17 @@ func loadAlias(filename, alias string) (hostConfig, error) {
|
||||
cfg := &config{}
|
||||
json := jsoniter.ConfigCompatibleWithStandardLibrary
|
||||
|
||||
configBytes, err := ioutil.ReadFile(filename)
|
||||
configBytes, err := os.ReadFile(filename)
|
||||
if err != nil {
|
||||
return hostConfig{}, err
|
||||
}
|
||||
if err = json.Unmarshal(configBytes, cfg); err != nil {
|
||||
return hostConfig{}, err
|
||||
}
|
||||
|
||||
if cfg.Version == "10" {
|
||||
return cfg.Aliases[alias], nil
|
||||
}
|
||||
|
||||
return cfg.Hosts[alias], nil
|
||||
}
|
||||
|
6
vendor/github.com/minio/minio-go/v7/pkg/credentials/iam_aws.go
generated
vendored
6
vendor/github.com/minio/minio-go/v7/pkg/credentials/iam_aws.go
generated
vendored
@ -22,7 +22,7 @@
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"io"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
@ -106,7 +106,7 @@ func (m *IAM) Retrieve() (Value, error) {
|
||||
Client: m.Client,
|
||||
STSEndpoint: endpoint,
|
||||
GetWebIDTokenExpiry: func() (*WebIdentityToken, error) {
|
||||
token, err := ioutil.ReadFile(os.Getenv("AWS_WEB_IDENTITY_TOKEN_FILE"))
|
||||
token, err := os.ReadFile(os.Getenv("AWS_WEB_IDENTITY_TOKEN_FILE"))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -268,7 +268,7 @@ func fetchIMDSToken(client *http.Client, endpoint string) (string, error) {
|
||||
return "", err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
data, err := ioutil.ReadAll(resp.Body)
|
||||
data, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
4
vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_client_grants.go
generated
vendored
4
vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_client_grants.go
generated
vendored
@ -22,7 +22,7 @@
|
||||
"encoding/xml"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strings"
|
||||
@ -138,7 +138,7 @@ func getClientGrantsCredentials(clnt *http.Client, endpoint string,
|
||||
defer resp.Body.Close()
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
var errResp ErrorResponse
|
||||
buf, err := ioutil.ReadAll(resp.Body)
|
||||
buf, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return AssumeRoleWithClientGrantsResponse{}, err
|
||||
}
|
||||
|
4
vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_ldap_identity.go
generated
vendored
4
vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_ldap_identity.go
generated
vendored
@ -21,7 +21,7 @@
|
||||
"bytes"
|
||||
"encoding/xml"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strings"
|
||||
@ -156,7 +156,7 @@ func (k *LDAPIdentity) Retrieve() (value Value, err error) {
|
||||
defer resp.Body.Close()
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
var errResp ErrorResponse
|
||||
buf, err := ioutil.ReadAll(resp.Body)
|
||||
buf, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return value, err
|
||||
}
|
||||
|
3
vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_tls_identity.go
generated
vendored
3
vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_tls_identity.go
generated
vendored
@ -21,7 +21,6 @@
|
||||
"encoding/xml"
|
||||
"errors"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
@ -152,7 +151,7 @@ func (i *STSCertificateIdentity) Retrieve() (Value, error) {
|
||||
}
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
var errResp ErrorResponse
|
||||
buf, err := ioutil.ReadAll(resp.Body)
|
||||
buf, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return Value{}, err
|
||||
}
|
||||
|
4
vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_web_identity.go
generated
vendored
4
vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_web_identity.go
generated
vendored
@ -22,7 +22,7 @@
|
||||
"encoding/xml"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strconv"
|
||||
@ -155,7 +155,7 @@ func getWebIdentityCredentials(clnt *http.Client, endpoint, roleARN, roleSession
|
||||
defer resp.Body.Close()
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
var errResp ErrorResponse
|
||||
buf, err := ioutil.ReadAll(resp.Body)
|
||||
buf, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return AssumeRoleWithWebIdentityResponse{}, err
|
||||
}
|
||||
|
26
vendor/github.com/minio/minio-go/v7/pkg/notification/notification.go
generated
vendored
26
vendor/github.com/minio/minio-go/v7/pkg/notification/notification.go
generated
vendored
@ -34,19 +34,19 @@
|
||||
// http://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html#notification-how-to-event-types-and-destinations
|
||||
const (
|
||||
ObjectCreatedAll EventType = "s3:ObjectCreated:*"
|
||||
ObjectCreatedPut = "s3:ObjectCreated:Put"
|
||||
ObjectCreatedPost = "s3:ObjectCreated:Post"
|
||||
ObjectCreatedCopy = "s3:ObjectCreated:Copy"
|
||||
ObjectCreatedCompleteMultipartUpload = "s3:ObjectCreated:CompleteMultipartUpload"
|
||||
ObjectAccessedGet = "s3:ObjectAccessed:Get"
|
||||
ObjectAccessedHead = "s3:ObjectAccessed:Head"
|
||||
ObjectAccessedAll = "s3:ObjectAccessed:*"
|
||||
ObjectRemovedAll = "s3:ObjectRemoved:*"
|
||||
ObjectRemovedDelete = "s3:ObjectRemoved:Delete"
|
||||
ObjectRemovedDeleteMarkerCreated = "s3:ObjectRemoved:DeleteMarkerCreated"
|
||||
ObjectReducedRedundancyLostObject = "s3:ReducedRedundancyLostObject"
|
||||
BucketCreatedAll = "s3:BucketCreated:*"
|
||||
BucketRemovedAll = "s3:BucketRemoved:*"
|
||||
ObjectCreatedPut EventType = "s3:ObjectCreated:Put"
|
||||
ObjectCreatedPost EventType = "s3:ObjectCreated:Post"
|
||||
ObjectCreatedCopy EventType = "s3:ObjectCreated:Copy"
|
||||
ObjectCreatedCompleteMultipartUpload EventType = "s3:ObjectCreated:CompleteMultipartUpload"
|
||||
ObjectAccessedGet EventType = "s3:ObjectAccessed:Get"
|
||||
ObjectAccessedHead EventType = "s3:ObjectAccessed:Head"
|
||||
ObjectAccessedAll EventType = "s3:ObjectAccessed:*"
|
||||
ObjectRemovedAll EventType = "s3:ObjectRemoved:*"
|
||||
ObjectRemovedDelete EventType = "s3:ObjectRemoved:Delete"
|
||||
ObjectRemovedDeleteMarkerCreated EventType = "s3:ObjectRemoved:DeleteMarkerCreated"
|
||||
ObjectReducedRedundancyLostObject EventType = "s3:ReducedRedundancyLostObject"
|
||||
BucketCreatedAll EventType = "s3:BucketCreated:*"
|
||||
BucketRemovedAll EventType = "s3:BucketRemoved:*"
|
||||
)
|
||||
|
||||
// FilterRule - child of S3Key, a tag in the notification xml which
|
||||
|
4
vendor/github.com/minio/minio-go/v7/pkg/replication/replication.go
generated
vendored
4
vendor/github.com/minio/minio-go/v7/pkg/replication/replication.go
generated
vendored
@ -700,6 +700,10 @@ type TargetMetrics struct {
|
||||
PendingCount uint64 `json:"pendingReplicationCount"`
|
||||
// Total number of failed operations including metadata updates
|
||||
FailedCount uint64 `json:"failedReplicationCount"`
|
||||
// Bandwidth limit in bytes/sec for this target
|
||||
BandWidthLimitInBytesPerSecond int64 `json:"limitInBits"`
|
||||
// Current bandwidth used in bytes/sec for this target
|
||||
CurrentBandwidthInBytesPerSecond float64 `json:"currentBandwidth"`
|
||||
}
|
||||
|
||||
// Metrics represents inline replication metrics for a bucket.
|
||||
|
@ -21,7 +21,6 @@
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
@ -132,7 +131,7 @@ func StreamingUnsignedV4(req *http.Request, sessionToken string, dataLen int64,
|
||||
prepareUSStreamingRequest(req, sessionToken, dataLen, reqTime)
|
||||
|
||||
if req.Body == nil {
|
||||
req.Body = ioutil.NopCloser(bytes.NewReader([]byte("")))
|
||||
req.Body = io.NopCloser(bytes.NewReader([]byte("")))
|
||||
}
|
||||
|
||||
stReader := &StreamingUSReader{
|
||||
|
40
vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-streaming.go
generated
vendored
40
vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-streaming.go
generated
vendored
@ -22,11 +22,12 @@
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
md5simd "github.com/minio/md5-simd"
|
||||
)
|
||||
|
||||
// Reference for constants used below -
|
||||
@ -91,14 +92,14 @@ func getStreamLength(dataLen, chunkSize int64, trailers http.Header) int64 {
|
||||
|
||||
// buildChunkStringToSign - returns the string to sign given chunk data
|
||||
// and previous signature.
|
||||
func buildChunkStringToSign(t time.Time, region, previousSig string, chunkData []byte) string {
|
||||
func buildChunkStringToSign(t time.Time, region, previousSig, chunkChecksum string) string {
|
||||
stringToSignParts := []string{
|
||||
streamingPayloadHdr,
|
||||
t.Format(iso8601DateFormat),
|
||||
getScope(region, t, ServiceTypeS3),
|
||||
previousSig,
|
||||
emptySHA256,
|
||||
hex.EncodeToString(sum256(chunkData)),
|
||||
chunkChecksum,
|
||||
}
|
||||
|
||||
return strings.Join(stringToSignParts, "\n")
|
||||
@ -106,13 +107,13 @@ func buildChunkStringToSign(t time.Time, region, previousSig string, chunkData [
|
||||
|
||||
// buildTrailerChunkStringToSign - returns the string to sign given chunk data
|
||||
// and previous signature.
|
||||
func buildTrailerChunkStringToSign(t time.Time, region, previousSig string, chunkData []byte) string {
|
||||
func buildTrailerChunkStringToSign(t time.Time, region, previousSig, chunkChecksum string) string {
|
||||
stringToSignParts := []string{
|
||||
streamingTrailerHdr,
|
||||
t.Format(iso8601DateFormat),
|
||||
getScope(region, t, ServiceTypeS3),
|
||||
previousSig,
|
||||
hex.EncodeToString(sum256(chunkData)),
|
||||
chunkChecksum,
|
||||
}
|
||||
|
||||
return strings.Join(stringToSignParts, "\n")
|
||||
@ -149,21 +150,21 @@ func buildChunkHeader(chunkLen int64, signature string) []byte {
|
||||
}
|
||||
|
||||
// buildChunkSignature - returns chunk signature for a given chunk and previous signature.
|
||||
func buildChunkSignature(chunkData []byte, reqTime time.Time, region,
|
||||
func buildChunkSignature(chunkCheckSum string, reqTime time.Time, region,
|
||||
previousSignature, secretAccessKey string,
|
||||
) string {
|
||||
chunkStringToSign := buildChunkStringToSign(reqTime, region,
|
||||
previousSignature, chunkData)
|
||||
previousSignature, chunkCheckSum)
|
||||
signingKey := getSigningKey(secretAccessKey, region, reqTime, ServiceTypeS3)
|
||||
return getSignature(signingKey, chunkStringToSign)
|
||||
}
|
||||
|
||||
// buildChunkSignature - returns chunk signature for a given chunk and previous signature.
|
||||
func buildTrailerChunkSignature(chunkData []byte, reqTime time.Time, region,
|
||||
func buildTrailerChunkSignature(chunkChecksum string, reqTime time.Time, region,
|
||||
previousSignature, secretAccessKey string,
|
||||
) string {
|
||||
chunkStringToSign := buildTrailerChunkStringToSign(reqTime, region,
|
||||
previousSignature, chunkData)
|
||||
previousSignature, chunkChecksum)
|
||||
signingKey := getSigningKey(secretAccessKey, region, reqTime, ServiceTypeS3)
|
||||
return getSignature(signingKey, chunkStringToSign)
|
||||
}
|
||||
@ -203,12 +204,17 @@ type StreamingReader struct {
|
||||
totalChunks int
|
||||
lastChunkSize int
|
||||
trailer http.Header
|
||||
sh256 md5simd.Hasher
|
||||
}
|
||||
|
||||
// signChunk - signs a chunk read from s.baseReader of chunkLen size.
|
||||
func (s *StreamingReader) signChunk(chunkLen int, addCrLf bool) {
|
||||
// Compute chunk signature for next header
|
||||
signature := buildChunkSignature(s.chunkBuf[:chunkLen], s.reqTime,
|
||||
s.sh256.Reset()
|
||||
s.sh256.Write(s.chunkBuf[:chunkLen])
|
||||
chunckChecksum := hex.EncodeToString(s.sh256.Sum(nil))
|
||||
|
||||
signature := buildChunkSignature(chunckChecksum, s.reqTime,
|
||||
s.region, s.prevSignature, s.secretAccessKey)
|
||||
|
||||
// For next chunk signature computation
|
||||
@ -240,8 +246,11 @@ func (s *StreamingReader) addSignedTrailer(h http.Header) {
|
||||
s.chunkBuf = append(s.chunkBuf, []byte(strings.ToLower(k)+trailerKVSeparator+v[0]+"\n")...)
|
||||
}
|
||||
|
||||
s.sh256.Reset()
|
||||
s.sh256.Write(s.chunkBuf)
|
||||
chunkChecksum := hex.EncodeToString(s.sh256.Sum(nil))
|
||||
// Compute chunk signature
|
||||
signature := buildTrailerChunkSignature(s.chunkBuf, s.reqTime,
|
||||
signature := buildTrailerChunkSignature(chunkChecksum, s.reqTime,
|
||||
s.region, s.prevSignature, s.secretAccessKey)
|
||||
|
||||
// For next chunk signature computation
|
||||
@ -274,13 +283,13 @@ func (s *StreamingReader) setStreamingAuthHeader(req *http.Request) {
|
||||
// StreamingSignV4 - provides chunked upload signatureV4 support by
|
||||
// implementing io.Reader.
|
||||
func StreamingSignV4(req *http.Request, accessKeyID, secretAccessKey, sessionToken,
|
||||
region string, dataLen int64, reqTime time.Time,
|
||||
region string, dataLen int64, reqTime time.Time, sh256 md5simd.Hasher,
|
||||
) *http.Request {
|
||||
// Set headers needed for streaming signature.
|
||||
prepareStreamingRequest(req, sessionToken, dataLen, reqTime)
|
||||
|
||||
if req.Body == nil {
|
||||
req.Body = ioutil.NopCloser(bytes.NewReader([]byte("")))
|
||||
req.Body = io.NopCloser(bytes.NewReader([]byte("")))
|
||||
}
|
||||
|
||||
stReader := &StreamingReader{
|
||||
@ -295,6 +304,7 @@ func StreamingSignV4(req *http.Request, accessKeyID, secretAccessKey, sessionTok
|
||||
chunkNum: 1,
|
||||
totalChunks: int((dataLen+payloadChunkSize-1)/payloadChunkSize) + 1,
|
||||
lastChunkSize: int(dataLen % payloadChunkSize),
|
||||
sh256: sh256,
|
||||
}
|
||||
if len(req.Trailer) > 0 {
|
||||
stReader.trailer = req.Trailer
|
||||
@ -385,5 +395,9 @@ func (s *StreamingReader) Read(buf []byte) (int, error) {
|
||||
|
||||
// Close - this method makes underlying io.ReadCloser's Close method available.
|
||||
func (s *StreamingReader) Close() error {
|
||||
if s.sh256 != nil {
|
||||
s.sh256.Close()
|
||||
s.sh256 = nil
|
||||
}
|
||||
return s.baseReadCloser.Close()
|
||||
}
|
||||
|
2
vendor/github.com/minio/minio-go/v7/post-policy.go
generated
vendored
2
vendor/github.com/minio/minio-go/v7/post-policy.go
generated
vendored
@ -25,7 +25,7 @@
|
||||
)
|
||||
|
||||
// expirationDateFormat date format for expiration key in json policy.
|
||||
const expirationDateFormat = "2006-01-02T15:04:05.999Z"
|
||||
const expirationDateFormat = "2006-01-02T15:04:05.000Z"
|
||||
|
||||
// policyCondition explanation:
|
||||
// http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-HTTPPOSTConstructPolicy.html
|
||||
|
3
vendor/github.com/minio/minio-go/v7/transport.go
generated
vendored
3
vendor/github.com/minio/minio-go/v7/transport.go
generated
vendored
@ -23,7 +23,6 @@
|
||||
import (
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"net/http"
|
||||
"os"
|
||||
@ -73,7 +72,7 @@ func mustGetSystemCertPool() *x509.CertPool {
|
||||
}
|
||||
if f := os.Getenv("SSL_CERT_FILE"); f != "" {
|
||||
rootCAs := mustGetSystemCertPool()
|
||||
data, err := ioutil.ReadFile(f)
|
||||
data, err := os.ReadFile(f)
|
||||
if err == nil {
|
||||
rootCAs.AppendCertsFromPEM(data)
|
||||
}
|
||||
|
3
vendor/github.com/minio/minio-go/v7/utils.go
generated
vendored
3
vendor/github.com/minio/minio-go/v7/utils.go
generated
vendored
@ -28,7 +28,6 @@
|
||||
"fmt"
|
||||
"hash"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"math/rand"
|
||||
"net"
|
||||
"net/http"
|
||||
@ -142,7 +141,7 @@ func closeResponse(resp *http.Response) {
|
||||
// Without this closing connection would disallow re-using
|
||||
// the same connection for future uses.
|
||||
// - http://stackoverflow.com/a/17961593/4465767
|
||||
io.Copy(ioutil.Discard, resp.Body)
|
||||
io.Copy(io.Discard, resp.Body)
|
||||
resp.Body.Close()
|
||||
}
|
||||
}
|
||||
|
8
vendor/modules.txt
vendored
8
vendor/modules.txt
vendored
@ -21,8 +21,8 @@ codeberg.org/gruf/go-cache/v3/ttl
|
||||
# codeberg.org/gruf/go-debug v1.2.0
|
||||
## explicit; go 1.16
|
||||
codeberg.org/gruf/go-debug
|
||||
# codeberg.org/gruf/go-errors/v2 v2.0.2
|
||||
## explicit; go 1.16
|
||||
# codeberg.org/gruf/go-errors/v2 v2.1.1
|
||||
## explicit; go 1.19
|
||||
codeberg.org/gruf/go-errors/v2
|
||||
# codeberg.org/gruf/go-fastcopy v1.1.2
|
||||
## explicit; go 1.17
|
||||
@ -297,7 +297,7 @@ github.com/miekg/dns
|
||||
# github.com/minio/md5-simd v1.1.2
|
||||
## explicit; go 1.14
|
||||
github.com/minio/md5-simd
|
||||
# github.com/minio/minio-go/v7 v7.0.44
|
||||
# github.com/minio/minio-go/v7 v7.0.47
|
||||
## explicit; go 1.17
|
||||
github.com/minio/minio-go/v7
|
||||
github.com/minio/minio-go/v7/pkg/credentials
|
||||
@ -709,7 +709,7 @@ golang.org/x/net/internal/socket
|
||||
golang.org/x/net/ipv4
|
||||
golang.org/x/net/ipv6
|
||||
golang.org/x/net/publicsuffix
|
||||
# golang.org/x/oauth2 v0.3.0
|
||||
# golang.org/x/oauth2 v0.4.0
|
||||
## explicit; go 1.17
|
||||
golang.org/x/oauth2
|
||||
golang.org/x/oauth2/internal
|
||||
|
Loading…
Reference in New Issue
Block a user