Merge branch 'main' into settings-refactor

This commit is contained in:
f0x52 2023-01-17 21:42:06 +01:00 committed by GitHub
commit 772231d24e
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
64 changed files with 1030 additions and 406 deletions

View File

@ -107,4 +107,36 @@ db-tls-mode: "disable"
# Examples: ["/path/to/some/cert.crt"] # Examples: ["/path/to/some/cert.crt"]
# Default: "" # Default: ""
db-tls-ca-cert: "" db-tls-ca-cert: ""
# String. SQLite journaling mode.
# SQLite only -- unused otherwise.
# If set to empty string, the sqlite default will be used.
# See: https://www.sqlite.org/pragma.html#pragma_journal_mode
# Examples: ["DELETE", "TRUNCATE", "PERSIST", "MEMORY", "WAL", "OFF"]
# Default: "WAL"
db-sqlite-journal-mode: "WAL"
# String. SQLite synchronous mode.
# SQLite only -- unused otherwise.
# If set to empty string, the sqlite default will be used.
# See: https://www.sqlite.org/pragma.html#pragma_synchronous
# Examples: ["OFF", "NORMAL", "FULL", "EXTRA"]
# Default: "NORMAL"
db-sqlite-synchronous: "NORMAL"
# Byte size. SQlite cache size.
# SQLite only -- unused otherwise.
# If set to empty string or zero, the sqlite default will be used.
# See: https://www.sqlite.org/pragma.html#pragma_cache_size
# Examples: ["32MiB", "0", "64MiB"]
# Default: "64MiB"
db-sqlite-cache-size: "64MiB"
# Duration. SQlite busy timeout.
# SQLite only -- unused otherwise.
# If set to empty string or zero, the sqlite default will be used.
# See: https://www.sqlite.org/pragma.html#pragma_busy_timeout
# Examples: ["0s", "1s", "30s", "1m", "5m"]
# Default: "5s"
db-sqlite-busy-timeout: "30s"
``` ```

View File

@ -2,8 +2,6 @@
- **Where's the user interface?** GoToSocial is just a bare server for the most part and is designed to be used thru external applications. [Pinafore](https://pinafore.social) and [Tusky](https://tusky.app/) are the best-supported, but anything that supports the Mastodon API should work, other than the features GoToSocial doesn't yet have. Permalinks and profile pages are served directly thru GoToSocial as well as the admin panel, but most interaction goes thru the apps. - **Where's the user interface?** GoToSocial is just a bare server for the most part and is designed to be used thru external applications. [Pinafore](https://pinafore.social) and [Tusky](https://tusky.app/) are the best-supported, but anything that supports the Mastodon API should work, other than the features GoToSocial doesn't yet have. Permalinks and profile pages are served directly thru GoToSocial as well as the admin panel, but most interaction goes thru the apps.
- **What happened to the gifs?** While GoToSocial supports gifs, it doesn't support videos. This wouldn't be a big problem, except that Mastodon doesn't support gifs; it converts them into videos when they get uploaded. So if someone posts a gif from a Mastodon server, it won't be visible. At the time of this writing, the video will be dropped altogether, but [in the future there should be at least a placeholder link](https://github.com/superseriousbusiness/gotosocial/issues/765).
- **Why aren't my posts showing up on my profile page?** Unlike Mastodon, the default post visibility is Unlisted. If you want something to be visible on your profile page, the post must have Public visibility. - **Why aren't my posts showing up on my profile page?** Unlike Mastodon, the default post visibility is Unlisted. If you want something to be visible on your profile page, the post must have Public visibility.
- **Why aren't my posts showing up on other servers?** First check the visibility as noted above. TODO: explain how to debug common federation issues - **Why aren't my posts showing up on other servers?** First check the visibility as noted above. TODO: explain how to debug common federation issues
@ -17,7 +15,6 @@
- **How can I sign up for a server?** Right now the only way to create an account is by the server's admin to run a command directly on the server. A web-based signup flow is in the roadmap but not implemented yet. - **How can I sign up for a server?** Right now the only way to create an account is by the server's admin to run a command directly on the server. A web-based signup flow is in the roadmap but not implemented yet.
- **Why's it still in alpha?** Take a look at the [list of open bugs](https://github.com/superseriousbusiness/gotosocial/issues?q=is%3Aissue+is%3Aopen+label%3Abug) and the [roadmap](https://github.com/superseriousbusiness/gotosocial/blob/main/ROADMAP.md) for a more detailed rundown, but the main missing features at the time of this writing are: - **Why's it still in alpha?** Take a look at the [list of open bugs](https://github.com/superseriousbusiness/gotosocial/issues?q=is%3Aissue+is%3Aopen+label%3Abug) and the [roadmap](https://github.com/superseriousbusiness/gotosocial/blob/main/ROADMAP.md) for a more detailed rundown, but the main missing features at the time of this writing are:
* videos
* reporting posts to admins * reporting posts to admins
* muting conversations * muting conversations
* backfill of posts * backfill of posts

View File

@ -164,6 +164,38 @@ db-tls-mode: "disable"
# Default: "" # Default: ""
db-tls-ca-cert: "" db-tls-ca-cert: ""
# String. SQLite journaling mode.
# SQLite only -- unused otherwise.
# If set to empty string, the sqlite default will be used.
# See: https://www.sqlite.org/pragma.html#pragma_journal_mode
# Examples: ["DELETE", "TRUNCATE", "PERSIST", "MEMORY", "WAL", "OFF"]
# Default: "WAL"
db-sqlite-journal-mode: "WAL"
# String. SQLite synchronous mode.
# SQLite only -- unused otherwise.
# If set to empty string, the sqlite default will be used.
# See: https://www.sqlite.org/pragma.html#pragma_synchronous
# Examples: ["OFF", "NORMAL", "FULL", "EXTRA"]
# Default: "NORMAL"
db-sqlite-synchronous: "NORMAL"
# Byte size. SQlite cache size.
# SQLite only -- unused otherwise.
# If set to empty string or zero, the sqlite default will be used.
# See: https://www.sqlite.org/pragma.html#pragma_cache_size
# Examples: ["32MiB", "0", "64MiB"]
# Default: "64MiB"
db-sqlite-cache-size: "64MiB"
# Duration. SQlite busy timeout.
# SQLite only -- unused otherwise.
# If set to empty string or zero, the sqlite default will be used.
# See: https://www.sqlite.org/pragma.html#pragma_busy_timeout
# Examples: ["0s", "1s", "30s", "1m", "5m"]
# Default: "5s"
db-sqlite-busy-timeout: "30s"
cache: cache:
gts: gts:
########################### ###########################

6
go.mod
View File

@ -7,7 +7,7 @@ require (
codeberg.org/gruf/go-byteutil v1.0.2 codeberg.org/gruf/go-byteutil v1.0.2
codeberg.org/gruf/go-cache/v3 v3.2.2 codeberg.org/gruf/go-cache/v3 v3.2.2
codeberg.org/gruf/go-debug v1.2.0 codeberg.org/gruf/go-debug v1.2.0
codeberg.org/gruf/go-errors/v2 v2.0.2 codeberg.org/gruf/go-errors/v2 v2.1.1
codeberg.org/gruf/go-kv v1.5.2 codeberg.org/gruf/go-kv v1.5.2
codeberg.org/gruf/go-logger/v2 v2.2.1 codeberg.org/gruf/go-logger/v2 v2.2.1
codeberg.org/gruf/go-mutexes v1.1.5 codeberg.org/gruf/go-mutexes v1.1.5
@ -32,7 +32,7 @@ require (
github.com/jackc/pgx/v4 v4.17.2 github.com/jackc/pgx/v4 v4.17.2
github.com/microcosm-cc/bluemonday v1.0.21 github.com/microcosm-cc/bluemonday v1.0.21
github.com/miekg/dns v1.1.50 github.com/miekg/dns v1.1.50
github.com/minio/minio-go/v7 v7.0.44 github.com/minio/minio-go/v7 v7.0.47
github.com/mitchellh/mapstructure v1.5.0 github.com/mitchellh/mapstructure v1.5.0
github.com/oklog/ulid v1.3.1 github.com/oklog/ulid v1.3.1
github.com/robfig/cron/v3 v3.0.1 github.com/robfig/cron/v3 v3.0.1
@ -53,7 +53,7 @@ require (
golang.org/x/exp v0.0.0-20220613132600-b0d781184e0d golang.org/x/exp v0.0.0-20220613132600-b0d781184e0d
golang.org/x/image v0.3.0 golang.org/x/image v0.3.0
golang.org/x/net v0.5.0 golang.org/x/net v0.5.0
golang.org/x/oauth2 v0.3.0 golang.org/x/oauth2 v0.4.0
golang.org/x/text v0.6.0 golang.org/x/text v0.6.0
gopkg.in/mcuadros/go-syslog.v2 v2.3.0 gopkg.in/mcuadros/go-syslog.v2 v2.3.0
gopkg.in/yaml.v3 v3.0.1 gopkg.in/yaml.v3 v3.0.1

11
go.sum
View File

@ -54,8 +54,8 @@ codeberg.org/gruf/go-cache/v3 v3.2.2/go.mod h1:+Eje6nCvN8QF71VyYjMWMnkdv6t1kHnCO
codeberg.org/gruf/go-debug v1.2.0 h1:WBbTMnK1ArFKUmgv04aO2JiC/daTOB8zQGi521qb7OU= codeberg.org/gruf/go-debug v1.2.0 h1:WBbTMnK1ArFKUmgv04aO2JiC/daTOB8zQGi521qb7OU=
codeberg.org/gruf/go-debug v1.2.0/go.mod h1:N+vSy9uJBQgpQcJUqjctvqFz7tBHJf+S/PIjLILzpLg= codeberg.org/gruf/go-debug v1.2.0/go.mod h1:N+vSy9uJBQgpQcJUqjctvqFz7tBHJf+S/PIjLILzpLg=
codeberg.org/gruf/go-errors/v2 v2.0.0/go.mod h1:ZRhbdhvgoUA3Yw6e56kd9Ox984RrvbEFC2pOXyHDJP4= codeberg.org/gruf/go-errors/v2 v2.0.0/go.mod h1:ZRhbdhvgoUA3Yw6e56kd9Ox984RrvbEFC2pOXyHDJP4=
codeberg.org/gruf/go-errors/v2 v2.0.2 h1:T9CqfC+ntSIQL5mdQxwHlUMod1htpgNe3P1tugxKlT4= codeberg.org/gruf/go-errors/v2 v2.1.1 h1:oj7JUIvUBafF60HrwN74JrCMol1Ouh3gq1ggrH5hGTw=
codeberg.org/gruf/go-errors/v2 v2.0.2/go.mod h1:6sI75OmvXE2AtRm4WUyGMEyqEOKTsfe+CA+aBXwbtJY= codeberg.org/gruf/go-errors/v2 v2.1.1/go.mod h1:LfzD9nkAAJpEDbkUqOZQ2jdaQ8VrK0pnR36zLOMFq6Y=
codeberg.org/gruf/go-fastcopy v1.1.2 h1:YwmYXPsyOcRBxKEE2+w1bGAZfclHVaPijFsOVOcnNcw= codeberg.org/gruf/go-fastcopy v1.1.2 h1:YwmYXPsyOcRBxKEE2+w1bGAZfclHVaPijFsOVOcnNcw=
codeberg.org/gruf/go-fastcopy v1.1.2/go.mod h1:GDDYR0Cnb3U/AIfGM3983V/L+GN+vuwVMvrmVABo21s= codeberg.org/gruf/go-fastcopy v1.1.2/go.mod h1:GDDYR0Cnb3U/AIfGM3983V/L+GN+vuwVMvrmVABo21s=
codeberg.org/gruf/go-fastpath v1.0.1/go.mod h1:edveE/Kp3Eqi0JJm0lXYdkVrB28cNUkcb/bRGFTPqeI= codeberg.org/gruf/go-fastpath v1.0.1/go.mod h1:edveE/Kp3Eqi0JJm0lXYdkVrB28cNUkcb/bRGFTPqeI=
@ -418,8 +418,8 @@ github.com/miekg/dns v1.1.50 h1:DQUfb9uc6smULcREF09Uc+/Gd46YWqJd5DbpPE9xkcA=
github.com/miekg/dns v1.1.50/go.mod h1:e3IlAVfNqAllflbibAZEWOXOQ+Ynzk/dDozDxY7XnME= github.com/miekg/dns v1.1.50/go.mod h1:e3IlAVfNqAllflbibAZEWOXOQ+Ynzk/dDozDxY7XnME=
github.com/minio/md5-simd v1.1.2 h1:Gdi1DZK69+ZVMoNHRXJyNcxrMA4dSxoYHZSQbirFg34= github.com/minio/md5-simd v1.1.2 h1:Gdi1DZK69+ZVMoNHRXJyNcxrMA4dSxoYHZSQbirFg34=
github.com/minio/md5-simd v1.1.2/go.mod h1:MzdKDxYpY2BT9XQFocsiZf/NKVtR7nkE4RoEpN+20RM= github.com/minio/md5-simd v1.1.2/go.mod h1:MzdKDxYpY2BT9XQFocsiZf/NKVtR7nkE4RoEpN+20RM=
github.com/minio/minio-go/v7 v7.0.44 h1:9zUJ7iU7ax2P1jOvTp6nVrgzlZq3AZlFm0XfRFDKstM= github.com/minio/minio-go/v7 v7.0.47 h1:sLiuCKGSIcn/MI6lREmTzX91DX/oRau4ia0j6e6eOSs=
github.com/minio/minio-go/v7 v7.0.44/go.mod h1:nCrRzjoSUQh8hgKKtu3Y708OLvRLtuASMg2/nvmbarw= github.com/minio/minio-go/v7 v7.0.47/go.mod h1:nCrRzjoSUQh8hgKKtu3Y708OLvRLtuASMg2/nvmbarw=
github.com/minio/sha256-simd v1.0.0 h1:v1ta+49hkWZyvaKwrQB8elexRqm6Y0aMLjCNsrYxo6g= github.com/minio/sha256-simd v1.0.0 h1:v1ta+49hkWZyvaKwrQB8elexRqm6Y0aMLjCNsrYxo6g=
github.com/minio/sha256-simd v1.0.0/go.mod h1:OuYzVNI5vcoYIAmbIvHPl3N3jUzVedXbKy5RFepssQM= github.com/minio/sha256-simd v1.0.0/go.mod h1:OuYzVNI5vcoYIAmbIvHPl3N3jUzVedXbKy5RFepssQM=
github.com/mitchellh/hashstructure/v2 v2.0.2 h1:vGKWl0YJqUNxE8d+h8f6NJLcCJrgbhC4NcD46KavDd4= github.com/mitchellh/hashstructure/v2 v2.0.2 h1:vGKWl0YJqUNxE8d+h8f6NJLcCJrgbhC4NcD46KavDd4=
@ -729,8 +729,9 @@ golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ
golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.3.0 h1:6l90koy8/LaBLmLu8jpHeHexzMwEita0zFfYlggy2F8=
golang.org/x/oauth2 v0.3.0/go.mod h1:rQrIauxkUhJ6CuwEXwymO2/eh4xz2ZWF1nBkcxS+tGk= golang.org/x/oauth2 v0.3.0/go.mod h1:rQrIauxkUhJ6CuwEXwymO2/eh4xz2ZWF1nBkcxS+tGk=
golang.org/x/oauth2 v0.4.0 h1:NF0gk8LVPg1Ml7SSbGyySuoxdsXitj7TvgvuRxIMc/M=
golang.org/x/oauth2 v0.4.0/go.mod h1:RznEsdpjGAINPTOF0UH/t+xJ75L18YO3Ho6Pyn+uRec=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=

View File

@ -201,7 +201,7 @@ func (suite *MediaCreateTestSuite) TestMediaCreateSuccessful() {
Size: "512x288", Size: "512x288",
Aspect: 1.7777778, Aspect: 1.7777778,
}, },
Focus: apimodel.MediaFocus{ Focus: &apimodel.MediaFocus{
X: -0.5, X: -0.5,
Y: 0.5, Y: 0.5,
}, },
@ -290,7 +290,7 @@ func (suite *MediaCreateTestSuite) TestMediaCreateSuccessfulV2() {
Size: "512x288", Size: "512x288",
Aspect: 1.7777778, Aspect: 1.7777778,
}, },
Focus: apimodel.MediaFocus{ Focus: &apimodel.MediaFocus{
X: -0.5, X: -0.5,
Y: 0.5, Y: 0.5,
}, },

View File

@ -172,7 +172,7 @@ func (suite *MediaUpdateTestSuite) TestUpdateImage() {
suite.EqualValues(apimodel.MediaMeta{ suite.EqualValues(apimodel.MediaMeta{
Original: apimodel.MediaDimensions{Width: 800, Height: 450, FrameRate: "", Duration: 0, Bitrate: 0, Size: "800x450", Aspect: 1.7777778}, Original: apimodel.MediaDimensions{Width: 800, Height: 450, FrameRate: "", Duration: 0, Bitrate: 0, Size: "800x450", Aspect: 1.7777778},
Small: apimodel.MediaDimensions{Width: 256, Height: 144, FrameRate: "", Duration: 0, Bitrate: 0, Size: "256x144", Aspect: 1.7777778}, Small: apimodel.MediaDimensions{Width: 256, Height: 144, FrameRate: "", Duration: 0, Bitrate: 0, Size: "256x144", Aspect: 1.7777778},
Focus: apimodel.MediaFocus{X: -0.1, Y: 0.3}, Focus: &apimodel.MediaFocus{X: -0.1, Y: 0.3},
}, attachmentReply.Meta) }, attachmentReply.Meta)
suite.Equal(toUpdate.Blurhash, attachmentReply.Blurhash) suite.Equal(toUpdate.Blurhash, attachmentReply.Blurhash)
suite.Equal(toUpdate.ID, attachmentReply.ID) suite.Equal(toUpdate.ID, attachmentReply.ID)

View File

@ -29,6 +29,7 @@
apimodel "github.com/superseriousbusiness/gotosocial/internal/api/model" apimodel "github.com/superseriousbusiness/gotosocial/internal/api/model"
apiutil "github.com/superseriousbusiness/gotosocial/internal/api/util" apiutil "github.com/superseriousbusiness/gotosocial/internal/api/util"
"github.com/superseriousbusiness/gotosocial/internal/gtserror" "github.com/superseriousbusiness/gotosocial/internal/gtserror"
"github.com/superseriousbusiness/gotosocial/internal/iotools"
"github.com/superseriousbusiness/gotosocial/internal/log" "github.com/superseriousbusiness/gotosocial/internal/log"
"github.com/superseriousbusiness/gotosocial/internal/oauth" "github.com/superseriousbusiness/gotosocial/internal/oauth"
) )
@ -128,8 +129,34 @@ func (m *Module) ServeFile(c *gin.Context) {
return return
} }
// we're good, return the slurped bytes + the rest of the content // reconstruct the original content reader
c.DataFromReader(http.StatusOK, content.ContentLength, format, io.MultiReader( r := io.MultiReader(bytes.NewReader(b), content.Content)
bytes.NewReader(b), content.Content,
), nil) // Check the Range header: if this is a simple query for the whole file, we can return it now.
if c.GetHeader("Range") == "" && c.GetHeader("If-Range") == "" {
c.DataFromReader(http.StatusOK, content.ContentLength, format, r, nil)
return
}
// Range is set, so we need a ReadSeeker to pass to the ServeContent function.
tfs, err := iotools.TempFileSeeker(r)
if err != nil {
err = fmt.Errorf("ServeFile: error creating temp file seeker: %w", err)
apiutil.ErrorHandler(c, gtserror.NewErrorInternalError(err), m.processor.InstanceGet)
return
}
defer func() {
if err := tfs.Close(); err != nil {
log.Errorf("ServeFile: error closing temp file seeker: %s", err)
}
}()
// to avoid ServeContent wasting time seeking for the
// mime type, set this header already since we know it
c.Header("Content-Type", format)
// allow ServeContent to handle the rest of the request;
// it will handle Range as appropriate, and write correct
// response headers, http code, etc
http.ServeContent(c.Writer, c.Request, fileName, content.ContentUpdated, tfs)
} }

View File

@ -98,40 +98,12 @@ type Attachment struct {
// //
// swagger:model mediaMeta // swagger:model mediaMeta
type MediaMeta struct { type MediaMeta struct {
Length string `json:"length,omitempty"`
// Duration of the media in seconds.
// Only set for video and audio.
// example: 5.43
Duration float32 `json:"duration,omitempty"`
// Framerate of the media.
// Only set for video and gifs.
// example: 30
FPS uint16 `json:"fps,omitempty"`
// Size of the media, in the format `[width]x[height]`.
// Not set for audio.
// example: 1920x1080
Size string `json:"size,omitempty"`
// Width of the media in pixels.
// Not set for audio.
// example: 1920
Width int `json:"width,omitempty"`
// Height of the media in pixels.
// Not set for audio.
// example: 1080
Height int `json:"height,omitempty"`
// Aspect ratio of the media.
// Equal to width / height.
// example: 1.777777778
Aspect float32 `json:"aspect,omitempty"`
AudioEncode string `json:"audio_encode,omitempty"`
AudioBitrate string `json:"audio_bitrate,omitempty"`
AudioChannels string `json:"audio_channels,omitempty"`
// Dimensions of the original media. // Dimensions of the original media.
Original MediaDimensions `json:"original"` Original MediaDimensions `json:"original"`
// Dimensions of the thumbnail/small version of the media. // Dimensions of the thumbnail/small version of the media.
Small MediaDimensions `json:"small,omitempty"` Small MediaDimensions `json:"small,omitempty"`
// Focus data for the media. // Focus data for the media.
Focus MediaFocus `json:"focus,omitempty"` Focus *MediaFocus `json:"focus,omitempty"`
} }
// MediaFocus models the focal point of a piece of media. // MediaFocus models the focal point of a piece of media.

View File

@ -21,6 +21,7 @@
import ( import (
"io" "io"
"net/url" "net/url"
"time"
) )
// Content wraps everything needed to serve a blob of content (some kind of media) through the API. // Content wraps everything needed to serve a blob of content (some kind of media) through the API.
@ -29,6 +30,8 @@ type Content struct {
ContentType string ContentType string
// ContentLength in bytes // ContentLength in bytes
ContentLength int64 ContentLength int64
// Time when the content was last updated.
ContentUpdated time.Time
// Actual content // Actual content
Content io.ReadCloser Content io.ReadCloser
// Resource URL to forward to if the file can be fetched from the storage directly (e.g signed S3 URL) // Resource URL to forward to if the file can be fetched from the storage directly (e.g signed S3 URL)

View File

@ -58,14 +58,18 @@ type Configuration struct {
TrustedProxies []string `name:"trusted-proxies" usage:"Proxies to trust when parsing x-forwarded headers into real IPs."` TrustedProxies []string `name:"trusted-proxies" usage:"Proxies to trust when parsing x-forwarded headers into real IPs."`
SoftwareVersion string `name:"software-version" usage:""` SoftwareVersion string `name:"software-version" usage:""`
DbType string `name:"db-type" usage:"Database type: eg., postgres"` DbType string `name:"db-type" usage:"Database type: eg., postgres"`
DbAddress string `name:"db-address" usage:"Database ipv4 address, hostname, or filename"` DbAddress string `name:"db-address" usage:"Database ipv4 address, hostname, or filename"`
DbPort int `name:"db-port" usage:"Database port"` DbPort int `name:"db-port" usage:"Database port"`
DbUser string `name:"db-user" usage:"Database username"` DbUser string `name:"db-user" usage:"Database username"`
DbPassword string `name:"db-password" usage:"Database password"` DbPassword string `name:"db-password" usage:"Database password"`
DbDatabase string `name:"db-database" usage:"Database name"` DbDatabase string `name:"db-database" usage:"Database name"`
DbTLSMode string `name:"db-tls-mode" usage:"Database tls mode"` DbTLSMode string `name:"db-tls-mode" usage:"Database tls mode"`
DbTLSCACert string `name:"db-tls-ca-cert" usage:"Path to CA cert for db tls connection"` DbTLSCACert string `name:"db-tls-ca-cert" usage:"Path to CA cert for db tls connection"`
DbSqliteJournalMode string `name:"db-sqlite-journal-mode" usage:"Sqlite only: see https://www.sqlite.org/pragma.html#pragma_journal_mode"`
DbSqliteSynchronous string `name:"db-sqlite-synchronous" usage:"Sqlite only: see https://www.sqlite.org/pragma.html#pragma_synchronous"`
DbSqliteCacheSize bytesize.Size `name:"db-sqlite-cache-size" usage:"Sqlite only: see https://www.sqlite.org/pragma.html#pragma_cache_size"`
DbSqliteBusyTimeout time.Duration `name:"db-sqlite-busy-timeout" usage:"Sqlite only: see https://www.sqlite.org/pragma.html#pragma_busy_timeout"`
WebTemplateBaseDir string `name:"web-template-base-dir" usage:"Basedir for html templating files for rendering pages and composing emails."` WebTemplateBaseDir string `name:"web-template-base-dir" usage:"Basedir for html templating files for rendering pages and composing emails."`
WebAssetBaseDir string `name:"web-asset-base-dir" usage:"Directory to serve static assets from, accessible at example.org/assets/"` WebAssetBaseDir string `name:"web-asset-base-dir" usage:"Directory to serve static assets from, accessible at example.org/assets/"`

View File

@ -40,14 +40,18 @@
Port: 8080, Port: 8080,
TrustedProxies: []string{"127.0.0.1/32", "::1"}, // localhost TrustedProxies: []string{"127.0.0.1/32", "::1"}, // localhost
DbType: "postgres", DbType: "postgres",
DbAddress: "", DbAddress: "",
DbPort: 5432, DbPort: 5432,
DbUser: "", DbUser: "",
DbPassword: "", DbPassword: "",
DbDatabase: "gotosocial", DbDatabase: "gotosocial",
DbTLSMode: "disable", DbTLSMode: "disable",
DbTLSCACert: "", DbTLSCACert: "",
DbSqliteJournalMode: "WAL",
DbSqliteSynchronous: "NORMAL",
DbSqliteCacheSize: 64 * bytesize.MiB,
DbSqliteBusyTimeout: time.Second * 30,
WebTemplateBaseDir: "./web/template/", WebTemplateBaseDir: "./web/template/",
WebAssetBaseDir: "./web/assets/", WebAssetBaseDir: "./web/assets/",

View File

@ -51,6 +51,10 @@ func (s *ConfigState) AddGlobalFlags(cmd *cobra.Command) {
cmd.PersistentFlags().String(DbDatabaseFlag(), cfg.DbDatabase, fieldtag("DbDatabase", "usage")) cmd.PersistentFlags().String(DbDatabaseFlag(), cfg.DbDatabase, fieldtag("DbDatabase", "usage"))
cmd.PersistentFlags().String(DbTLSModeFlag(), cfg.DbTLSMode, fieldtag("DbTLSMode", "usage")) cmd.PersistentFlags().String(DbTLSModeFlag(), cfg.DbTLSMode, fieldtag("DbTLSMode", "usage"))
cmd.PersistentFlags().String(DbTLSCACertFlag(), cfg.DbTLSCACert, fieldtag("DbTLSCACert", "usage")) cmd.PersistentFlags().String(DbTLSCACertFlag(), cfg.DbTLSCACert, fieldtag("DbTLSCACert", "usage"))
cmd.PersistentFlags().String(DbSqliteJournalModeFlag(), cfg.DbSqliteJournalMode, fieldtag("DbSqliteJournalMode", "usage"))
cmd.PersistentFlags().String(DbSqliteSynchronousFlag(), cfg.DbSqliteSynchronous, fieldtag("DbSqliteSynchronous", "usage"))
cmd.PersistentFlags().Uint64(DbSqliteCacheSizeFlag(), uint64(cfg.DbSqliteCacheSize), fieldtag("DbSqliteCacheSize", "usage"))
cmd.PersistentFlags().Duration(DbSqliteBusyTimeoutFlag(), cfg.DbSqliteBusyTimeout, fieldtag("DbSqliteBusyTimeout", "usage"))
}) })
} }

View File

@ -524,6 +524,106 @@ func GetDbTLSCACert() string { return global.GetDbTLSCACert() }
// SetDbTLSCACert safely sets the value for global configuration 'DbTLSCACert' field // SetDbTLSCACert safely sets the value for global configuration 'DbTLSCACert' field
func SetDbTLSCACert(v string) { global.SetDbTLSCACert(v) } func SetDbTLSCACert(v string) { global.SetDbTLSCACert(v) }
// GetDbSqliteJournalMode safely fetches the Configuration value for state's 'DbSqliteJournalMode' field
func (st *ConfigState) GetDbSqliteJournalMode() (v string) {
st.mutex.Lock()
v = st.config.DbSqliteJournalMode
st.mutex.Unlock()
return
}
// SetDbSqliteJournalMode safely sets the Configuration value for state's 'DbSqliteJournalMode' field
func (st *ConfigState) SetDbSqliteJournalMode(v string) {
st.mutex.Lock()
defer st.mutex.Unlock()
st.config.DbSqliteJournalMode = v
st.reloadToViper()
}
// DbSqliteJournalModeFlag returns the flag name for the 'DbSqliteJournalMode' field
func DbSqliteJournalModeFlag() string { return "db-sqlite-journal-mode" }
// GetDbSqliteJournalMode safely fetches the value for global configuration 'DbSqliteJournalMode' field
func GetDbSqliteJournalMode() string { return global.GetDbSqliteJournalMode() }
// SetDbSqliteJournalMode safely sets the value for global configuration 'DbSqliteJournalMode' field
func SetDbSqliteJournalMode(v string) { global.SetDbSqliteJournalMode(v) }
// GetDbSqliteSynchronous safely fetches the Configuration value for state's 'DbSqliteSynchronous' field
func (st *ConfigState) GetDbSqliteSynchronous() (v string) {
st.mutex.Lock()
v = st.config.DbSqliteSynchronous
st.mutex.Unlock()
return
}
// SetDbSqliteSynchronous safely sets the Configuration value for state's 'DbSqliteSynchronous' field
func (st *ConfigState) SetDbSqliteSynchronous(v string) {
st.mutex.Lock()
defer st.mutex.Unlock()
st.config.DbSqliteSynchronous = v
st.reloadToViper()
}
// DbSqliteSynchronousFlag returns the flag name for the 'DbSqliteSynchronous' field
func DbSqliteSynchronousFlag() string { return "db-sqlite-synchronous" }
// GetDbSqliteSynchronous safely fetches the value for global configuration 'DbSqliteSynchronous' field
func GetDbSqliteSynchronous() string { return global.GetDbSqliteSynchronous() }
// SetDbSqliteSynchronous safely sets the value for global configuration 'DbSqliteSynchronous' field
func SetDbSqliteSynchronous(v string) { global.SetDbSqliteSynchronous(v) }
// GetDbSqliteCacheSize safely fetches the Configuration value for state's 'DbSqliteCacheSize' field
func (st *ConfigState) GetDbSqliteCacheSize() (v bytesize.Size) {
st.mutex.Lock()
v = st.config.DbSqliteCacheSize
st.mutex.Unlock()
return
}
// SetDbSqliteCacheSize safely sets the Configuration value for state's 'DbSqliteCacheSize' field
func (st *ConfigState) SetDbSqliteCacheSize(v bytesize.Size) {
st.mutex.Lock()
defer st.mutex.Unlock()
st.config.DbSqliteCacheSize = v
st.reloadToViper()
}
// DbSqliteCacheSizeFlag returns the flag name for the 'DbSqliteCacheSize' field
func DbSqliteCacheSizeFlag() string { return "db-sqlite-cache-size" }
// GetDbSqliteCacheSize safely fetches the value for global configuration 'DbSqliteCacheSize' field
func GetDbSqliteCacheSize() bytesize.Size { return global.GetDbSqliteCacheSize() }
// SetDbSqliteCacheSize safely sets the value for global configuration 'DbSqliteCacheSize' field
func SetDbSqliteCacheSize(v bytesize.Size) { global.SetDbSqliteCacheSize(v) }
// GetDbSqliteBusyTimeout safely fetches the Configuration value for state's 'DbSqliteBusyTimeout' field
func (st *ConfigState) GetDbSqliteBusyTimeout() (v time.Duration) {
st.mutex.Lock()
v = st.config.DbSqliteBusyTimeout
st.mutex.Unlock()
return
}
// SetDbSqliteBusyTimeout safely sets the Configuration value for state's 'DbSqliteBusyTimeout' field
func (st *ConfigState) SetDbSqliteBusyTimeout(v time.Duration) {
st.mutex.Lock()
defer st.mutex.Unlock()
st.config.DbSqliteBusyTimeout = v
st.reloadToViper()
}
// DbSqliteBusyTimeoutFlag returns the flag name for the 'DbSqliteBusyTimeout' field
func DbSqliteBusyTimeoutFlag() string { return "db-sqlite-busy-timeout" }
// GetDbSqliteBusyTimeout safely fetches the value for global configuration 'DbSqliteBusyTimeout' field
func GetDbSqliteBusyTimeout() time.Duration { return global.GetDbSqliteBusyTimeout() }
// SetDbSqliteBusyTimeout safely sets the value for global configuration 'DbSqliteBusyTimeout' field
func SetDbSqliteBusyTimeout(v time.Duration) { global.SetDbSqliteBusyTimeout(v) }
// GetWebTemplateBaseDir safely fetches the Configuration value for state's 'WebTemplateBaseDir' field // GetWebTemplateBaseDir safely fetches the Configuration value for state's 'WebTemplateBaseDir' field
func (st *ConfigState) GetWebTemplateBaseDir() (v string) { func (st *ConfigState) GetWebTemplateBaseDir() (v string) {
st.mutex.Lock() st.mutex.Lock()

View File

@ -28,9 +28,11 @@
"fmt" "fmt"
"os" "os"
"runtime" "runtime"
"strconv"
"strings" "strings"
"time" "time"
"codeberg.org/gruf/go-bytesize"
"github.com/google/uuid" "github.com/google/uuid"
"github.com/jackc/pgx/v4" "github.com/jackc/pgx/v4"
"github.com/jackc/pgx/v4/stdlib" "github.com/jackc/pgx/v4/stdlib"
@ -49,22 +51,6 @@
"modernc.org/sqlite" "modernc.org/sqlite"
) )
const (
dbTypePostgres = "postgres"
dbTypeSqlite = "sqlite"
// dbTLSModeDisable does not attempt to make a TLS connection to the database.
dbTLSModeDisable = "disable"
// dbTLSModeEnable attempts to make a TLS connection to the database, but doesn't fail if
// the certificate passed by the database isn't verified.
dbTLSModeEnable = "enable"
// dbTLSModeRequire attempts to make a TLS connection to the database, and requires
// that the certificate presented by the database is valid.
dbTLSModeRequire = "require"
// dbTLSModeUnset means that the TLS mode has not been set.
dbTLSModeUnset = ""
)
var registerTables = []interface{}{ var registerTables = []interface{}{
&gtsmodel.AccountToEmoji{}, &gtsmodel.AccountToEmoji{},
&gtsmodel.StatusToEmoji{}, &gtsmodel.StatusToEmoji{},
@ -127,26 +113,34 @@ func doMigration(ctx context.Context, db *bun.DB) error {
func NewBunDBService(ctx context.Context, state *state.State) (db.DB, error) { func NewBunDBService(ctx context.Context, state *state.State) (db.DB, error) {
var conn *DBConn var conn *DBConn
var err error var err error
dbType := strings.ToLower(config.GetDbType()) t := strings.ToLower(config.GetDbType())
switch dbType { switch t {
case dbTypePostgres: case "postgres":
conn, err = pgConn(ctx) conn, err = pgConn(ctx)
if err != nil { if err != nil {
return nil, err return nil, err
} }
case dbTypeSqlite: case "sqlite":
conn, err = sqliteConn(ctx) conn, err = sqliteConn(ctx)
if err != nil { if err != nil {
return nil, err return nil, err
} }
default: default:
return nil, fmt.Errorf("database type %s not supported for bundb", dbType) return nil, fmt.Errorf("database type %s not supported for bundb", t)
} }
// Add database query hook // Add database query hook
conn.DB.AddQueryHook(queryHook{}) conn.DB.AddQueryHook(queryHook{})
// execute sqlite pragmas *after* adding database hook;
// this allows the pragma queries to be logged
if t == "sqlite" {
if err := sqlitePragmas(ctx, conn); err != nil {
return nil, err
}
}
// table registration is needed for many-to-many, see: // table registration is needed for many-to-many, see:
// https://bun.uptrace.dev/orm/many-to-many-relation/ // https://bun.uptrace.dev/orm/many-to-many-relation/
for _, t := range registerTables { for _, t := range registerTables {
@ -230,29 +224,29 @@ func NewBunDBService(ctx context.Context, state *state.State) (db.DB, error) {
func sqliteConn(ctx context.Context) (*DBConn, error) { func sqliteConn(ctx context.Context) (*DBConn, error) {
// validate db address has actually been set // validate db address has actually been set
dbAddress := config.GetDbAddress() address := config.GetDbAddress()
if dbAddress == "" { if address == "" {
return nil, fmt.Errorf("'%s' was not set when attempting to start sqlite", config.DbAddressFlag()) return nil, fmt.Errorf("'%s' was not set when attempting to start sqlite", config.DbAddressFlag())
} }
// Drop anything fancy from DB address // Drop anything fancy from DB address
dbAddress = strings.Split(dbAddress, "?")[0] address = strings.Split(address, "?")[0]
dbAddress = strings.TrimPrefix(dbAddress, "file:") address = strings.TrimPrefix(address, "file:")
// Append our own SQLite preferences // Append our own SQLite preferences
dbAddress = "file:" + dbAddress + "?cache=shared" address = "file:" + address
var inMem bool var inMem bool
if dbAddress == "file::memory:?cache=shared" { if address == "file::memory:" {
dbAddress = fmt.Sprintf("file:%s?mode=memory&cache=shared", uuid.NewString()) address = fmt.Sprintf("file:%s?mode=memory&cache=shared", uuid.NewString())
log.Infof("using in-memory database address " + dbAddress) log.Infof("using in-memory database address " + address)
log.Warn("sqlite in-memory database should only be used for debugging") log.Warn("sqlite in-memory database should only be used for debugging")
inMem = true inMem = true
} }
// Open new DB instance // Open new DB instance
sqldb, err := sql.Open("sqlite", dbAddress) sqldb, err := sql.Open("sqlite", address)
if err != nil { if err != nil {
if errWithCode, ok := err.(*sqlite.Error); ok { if errWithCode, ok := err.(*sqlite.Error); ok {
err = errors.New(sqlite.ErrorCodeString[errWithCode.Code()]) err = errors.New(sqlite.ErrorCodeString[errWithCode.Code()])
@ -260,8 +254,6 @@ func sqliteConn(ctx context.Context) (*DBConn, error) {
return nil, fmt.Errorf("could not open sqlite db: %s", err) return nil, fmt.Errorf("could not open sqlite db: %s", err)
} }
tweakConnectionValues(sqldb)
if inMem { if inMem {
// don't close connections on disconnect -- otherwise // don't close connections on disconnect -- otherwise
// the SQLite database will be deleted when there // the SQLite database will be deleted when there
@ -269,6 +261,7 @@ func sqliteConn(ctx context.Context) (*DBConn, error) {
sqldb.SetConnMaxLifetime(0) sqldb.SetConnMaxLifetime(0)
} }
// Wrap Bun database conn in our own wrapper
conn := WrapDBConn(bun.NewDB(sqldb, sqlitedialect.New())) conn := WrapDBConn(bun.NewDB(sqldb, sqlitedialect.New()))
// ping to check the db is there and listening // ping to check the db is there and listening
@ -278,11 +271,56 @@ func sqliteConn(ctx context.Context) (*DBConn, error) {
} }
return nil, fmt.Errorf("sqlite ping: %s", err) return nil, fmt.Errorf("sqlite ping: %s", err)
} }
log.Info("connected to SQLITE database") log.Info("connected to SQLITE database")
return conn, nil return conn, nil
} }
func sqlitePragmas(ctx context.Context, conn *DBConn) error {
var pragmas [][]string
if mode := config.GetDbSqliteJournalMode(); mode != "" {
// Set the user provided SQLite journal mode
pragmas = append(pragmas, []string{"journal_mode", mode})
}
if mode := config.GetDbSqliteSynchronous(); mode != "" {
// Set the user provided SQLite synchronous mode
pragmas = append(pragmas, []string{"synchronous", mode})
}
if size := config.GetDbSqliteCacheSize(); size > 0 {
// Set the user provided SQLite cache size (in kibibytes)
// Prepend a '-' character to this to indicate to sqlite
// that we're giving kibibytes rather than num pages.
// https://www.sqlite.org/pragma.html#pragma_cache_size
s := "-" + strconv.FormatUint(uint64(size/bytesize.KiB), 10)
pragmas = append(pragmas, []string{"cache_size", s})
}
if timeout := config.GetDbSqliteBusyTimeout(); timeout > 0 {
t := strconv.FormatInt(timeout.Milliseconds(), 10)
pragmas = append(pragmas, []string{"busy_timeout", t})
}
for _, p := range pragmas {
pk := p[0]
pv := p[1]
if _, err := conn.DB.ExecContext(ctx, "PRAGMA ?=?", bun.Ident(pk), bun.Safe(pv)); err != nil {
return fmt.Errorf("error executing sqlite pragma %s: %w", pk, err)
}
var res string
if err := conn.DB.NewRaw("PRAGMA ?", bun.Ident(pk)).Scan(ctx, &res); err != nil {
return fmt.Errorf("error scanning sqlite pragma %s: %w", pv, err)
}
log.Infof("sqlite pragma %s set to %s", pk, res)
}
return nil
}
func pgConn(ctx context.Context) (*DBConn, error) { func pgConn(ctx context.Context) (*DBConn, error) {
opts, err := deriveBunDBPGOptions() //nolint:contextcheck opts, err := deriveBunDBPGOptions() //nolint:contextcheck
if err != nil { if err != nil {
@ -291,7 +329,10 @@ func pgConn(ctx context.Context) (*DBConn, error) {
sqldb := stdlib.OpenDB(*opts) sqldb := stdlib.OpenDB(*opts)
tweakConnectionValues(sqldb) // https://bun.uptrace.dev/postgres/running-bun-in-production.html#database-sql
maxOpenConns := 4 * runtime.GOMAXPROCS(0)
sqldb.SetMaxOpenConns(maxOpenConns)
sqldb.SetMaxIdleConns(maxOpenConns)
conn := WrapDBConn(bun.NewDB(sqldb, pgdialect.New())) conn := WrapDBConn(bun.NewDB(sqldb, pgdialect.New()))
@ -311,10 +352,6 @@ func pgConn(ctx context.Context) (*DBConn, error) {
// deriveBunDBPGOptions takes an application config and returns either a ready-to-use set of options // deriveBunDBPGOptions takes an application config and returns either a ready-to-use set of options
// with sensible defaults, or an error if it's not satisfied by the provided config. // with sensible defaults, or an error if it's not satisfied by the provided config.
func deriveBunDBPGOptions() (*pgx.ConnConfig, error) { func deriveBunDBPGOptions() (*pgx.ConnConfig, error) {
if strings.ToUpper(config.GetDbType()) != db.DBTypePostgres {
return nil, fmt.Errorf("expected db type of %s but got %s", db.DBTypePostgres, config.DbTypeFlag())
}
// these are all optional, the db adapter figures out defaults // these are all optional, the db adapter figures out defaults
address := config.GetDbAddress() address := config.GetDbAddress()
@ -326,14 +363,14 @@ func deriveBunDBPGOptions() (*pgx.ConnConfig, error) {
var tlsConfig *tls.Config var tlsConfig *tls.Config
switch config.GetDbTLSMode() { switch config.GetDbTLSMode() {
case dbTLSModeDisable, dbTLSModeUnset: case "", "disable":
break // nothing to do break // nothing to do
case dbTLSModeEnable: case "enable":
/* #nosec G402 */ /* #nosec G402 */
tlsConfig = &tls.Config{ tlsConfig = &tls.Config{
InsecureSkipVerify: true, InsecureSkipVerify: true,
} }
case dbTLSModeRequire: case "require":
tlsConfig = &tls.Config{ tlsConfig = &tls.Config{
InsecureSkipVerify: false, InsecureSkipVerify: false,
ServerName: address, ServerName: address,
@ -397,13 +434,6 @@ func deriveBunDBPGOptions() (*pgx.ConnConfig, error) {
return cfg, nil return cfg, nil
} }
// https://bun.uptrace.dev/postgres/running-bun-in-production.html#database-sql
func tweakConnectionValues(sqldb *sql.DB) {
maxOpenConns := 4 * runtime.GOMAXPROCS(0)
sqldb.SetMaxOpenConns(maxOpenConns)
sqldb.SetMaxIdleConns(maxOpenConns)
}
/* /*
CONVERSION FUNCTIONS CONVERSION FUNCTIONS
*/ */

View File

@ -20,6 +20,7 @@
import ( import (
"io" "io"
"os"
) )
// ReadFnCloser takes an io.Reader and wraps it to use the provided function to implement io.Closer. // ReadFnCloser takes an io.Reader and wraps it to use the provided function to implement io.Closer.
@ -157,3 +158,35 @@ func StreamWriteFunc(write func(io.Writer) error) io.Reader {
return pr return pr
} }
type tempFileSeeker struct {
io.Reader
io.Seeker
tmp *os.File
}
func (tfs *tempFileSeeker) Close() error {
tfs.tmp.Close()
return os.Remove(tfs.tmp.Name())
}
// TempFileSeeker converts the provided Reader into a ReadSeekCloser
// by using an underlying temporary file. Callers should call the Close
// function when they're done with the TempFileSeeker, to release +
// clean up the temporary file.
func TempFileSeeker(r io.Reader) (io.ReadSeekCloser, error) {
tmp, err := os.CreateTemp(os.TempDir(), "gotosocial-")
if err != nil {
return nil, err
}
if _, err := io.Copy(tmp, r); err != nil {
return nil, err
}
return &tempFileSeeker{
Reader: tmp,
Seeker: tmp,
tmp: tmp,
}, nil
}

View File

@ -414,9 +414,9 @@ func (suite *ManagerTestSuite) TestSlothVineProcessBlocking() {
suite.Equal(240, attachment.FileMeta.Original.Height) suite.Equal(240, attachment.FileMeta.Original.Height)
suite.Equal(81120, attachment.FileMeta.Original.Size) suite.Equal(81120, attachment.FileMeta.Original.Size)
suite.EqualValues(1.4083333, attachment.FileMeta.Original.Aspect) suite.EqualValues(1.4083333, attachment.FileMeta.Original.Aspect)
suite.EqualValues(6.5862, *attachment.FileMeta.Original.Duration) suite.EqualValues(6.640907, *attachment.FileMeta.Original.Duration)
suite.EqualValues(29.000029, *attachment.FileMeta.Original.Framerate) suite.EqualValues(29.000029, *attachment.FileMeta.Original.Framerate)
suite.EqualValues(0x3b3e1, *attachment.FileMeta.Original.Bitrate) suite.EqualValues(0x59e74, *attachment.FileMeta.Original.Bitrate)
suite.EqualValues(gtsmodel.Small{ suite.EqualValues(gtsmodel.Small{
Width: 338, Height: 240, Size: 81120, Aspect: 1.4083333333333334, Width: 338, Height: 240, Size: 81120, Aspect: 1.4083333333333334,
}, attachment.FileMeta.Small) }, attachment.FileMeta.Small)
@ -531,6 +531,82 @@ func (suite *ManagerTestSuite) TestLongerMp4ProcessBlocking() {
suite.Equal(processedThumbnailBytesExpected, processedThumbnailBytes) suite.Equal(processedThumbnailBytesExpected, processedThumbnailBytes)
} }
func (suite *ManagerTestSuite) TestBirdnestMp4ProcessBlocking() {
ctx := context.Background()
data := func(_ context.Context) (io.ReadCloser, int64, error) {
// load bytes from a test video
b, err := os.ReadFile("./test/birdnest-original.mp4")
if err != nil {
panic(err)
}
return io.NopCloser(bytes.NewBuffer(b)), int64(len(b)), nil
}
accountID := "01FS1X72SK9ZPW0J1QQ68BD264"
// process the media with no additional info provided
processingMedia, err := suite.manager.ProcessMedia(ctx, data, nil, accountID, nil)
suite.NoError(err)
// fetch the attachment id from the processing media
attachmentID := processingMedia.AttachmentID()
// do a blocking call to fetch the attachment
attachment, err := processingMedia.LoadAttachment(ctx)
suite.NoError(err)
suite.NotNil(attachment)
// make sure it's got the stuff set on it that we expect
// the attachment ID and accountID we expect
suite.Equal(attachmentID, attachment.ID)
suite.Equal(accountID, attachment.AccountID)
// file meta should be correctly derived from the video
suite.Equal(404, attachment.FileMeta.Original.Width)
suite.Equal(720, attachment.FileMeta.Original.Height)
suite.Equal(290880, attachment.FileMeta.Original.Size)
suite.EqualValues(0.5611111, attachment.FileMeta.Original.Aspect)
suite.EqualValues(9.822041, *attachment.FileMeta.Original.Duration)
suite.EqualValues(30, *attachment.FileMeta.Original.Framerate)
suite.EqualValues(0x117c79, *attachment.FileMeta.Original.Bitrate)
suite.EqualValues(gtsmodel.Small{
Width: 287, Height: 512, Size: 146944, Aspect: 0.5605469,
}, attachment.FileMeta.Small)
suite.Equal("video/mp4", attachment.File.ContentType)
suite.Equal("image/jpeg", attachment.Thumbnail.ContentType)
suite.Equal(1409577, attachment.File.FileSize)
suite.Equal("L00000fQfQfQfQfQfQfQfQfQfQfQ", attachment.Blurhash)
// now make sure the attachment is in the database
dbAttachment, err := suite.db.GetAttachmentByID(ctx, attachmentID)
suite.NoError(err)
suite.NotNil(dbAttachment)
// make sure the processed file is in storage
processedFullBytes, err := suite.storage.Get(ctx, attachment.File.Path)
suite.NoError(err)
suite.NotEmpty(processedFullBytes)
// load the processed bytes from our test folder, to compare
processedFullBytesExpected, err := os.ReadFile("./test/birdnest-processed.mp4")
suite.NoError(err)
suite.NotEmpty(processedFullBytesExpected)
// the bytes in storage should be what we expected
suite.Equal(processedFullBytesExpected, processedFullBytes)
// now do the same for the thumbnail and make sure it's what we expected
processedThumbnailBytes, err := suite.storage.Get(ctx, attachment.Thumbnail.Path)
suite.NoError(err)
suite.NotEmpty(processedThumbnailBytes)
processedThumbnailBytesExpected, err := os.ReadFile("./test/birdnest-thumbnail.jpg")
suite.NoError(err)
suite.NotEmpty(processedThumbnailBytesExpected)
suite.Equal(processedThumbnailBytesExpected, processedThumbnailBytes)
}
func (suite *ManagerTestSuite) TestNotAnMp4ProcessBlocking() { func (suite *ManagerTestSuite) TestNotAnMp4ProcessBlocking() {
// try to load an 'mp4' that's actually an mkv in disguise // try to load an 'mp4' that's actually an mkv in disguise
@ -553,7 +629,7 @@ func (suite *ManagerTestSuite) TestNotAnMp4ProcessBlocking() {
// we should get an error while loading // we should get an error while loading
attachment, err := processingMedia.LoadAttachment(ctx) attachment, err := processingMedia.LoadAttachment(ctx)
suite.EqualError(err, "error decoding video: error determining video metadata: [width height duration framerate bitrate]") suite.EqualError(err, "error decoding video: error determining video metadata: [width height framerate]")
suite.Nil(attachment) suite.Nil(attachment)
} }

Binary file not shown.

Binary file not shown.

Binary file not shown.

After

Width:  |  Height:  |  Size: 2.8 KiB

View File

@ -21,9 +21,10 @@
import ( import (
"fmt" "fmt"
"io" "io"
"os"
"github.com/abema/go-mp4" "github.com/abema/go-mp4"
"github.com/superseriousbusiness/gotosocial/internal/iotools"
"github.com/superseriousbusiness/gotosocial/internal/log"
) )
type gtsVideo struct { type gtsVideo struct {
@ -36,43 +37,48 @@ type gtsVideo struct {
// decodeVideoFrame decodes and returns an image from a single frame in the given video stream. // decodeVideoFrame decodes and returns an image from a single frame in the given video stream.
// (note: currently this only returns a blank image resized to fit video dimensions). // (note: currently this only returns a blank image resized to fit video dimensions).
func decodeVideoFrame(r io.Reader) (*gtsVideo, error) { func decodeVideoFrame(r io.Reader) (*gtsVideo, error) {
// We'll need a readseeker to decode the video. We can get a readseeker // we need a readseeker to decode the video...
// without burning too much mem by first copying the reader into a temp file. tfs, err := iotools.TempFileSeeker(r)
// First create the file in the temporary directory...
tmp, err := os.CreateTemp(os.TempDir(), "gotosocial-")
if err != nil { if err != nil {
return nil, err return nil, fmt.Errorf("error creating temp file seeker: %w", err)
} }
defer func() { defer func() {
tmp.Close() if err := tfs.Close(); err != nil {
os.Remove(tmp.Name()) log.Errorf("error closing temp file seeker: %s", err)
}
}() }()
// Now copy the entire reader we've been provided into the
// temporary file; we won't use the reader again after this.
if _, err := io.Copy(tmp, r); err != nil {
return nil, err
}
// probe the video file to extract useful metadata from it; for methodology, see: // probe the video file to extract useful metadata from it; for methodology, see:
// https://github.com/abema/go-mp4/blob/7d8e5a7c5e644e0394261b0cf72fef79ce246d31/mp4tool/probe/probe.go#L85-L154 // https://github.com/abema/go-mp4/blob/7d8e5a7c5e644e0394261b0cf72fef79ce246d31/mp4tool/probe/probe.go#L85-L154
info, err := mp4.Probe(tmp) info, err := mp4.Probe(tfs)
if err != nil { if err != nil {
return nil, fmt.Errorf("error probing tmp file %s: %w", tmp.Name(), err) return nil, fmt.Errorf("error during mp4 probe: %w", err)
} }
var ( var (
width int width int
height int height int
video gtsVideo videoBitrate uint64
audioBitrate uint64
video gtsVideo
) )
for _, tr := range info.Tracks { for _, tr := range info.Tracks {
if tr.AVC == nil { if tr.AVC == nil {
// audio track
if br := tr.Samples.GetBitrate(tr.Timescale); br > audioBitrate {
audioBitrate = br
} else if br := info.Segments.GetBitrate(tr.TrackID, tr.Timescale); br > audioBitrate {
audioBitrate = br
}
if d := float64(tr.Duration) / float64(tr.Timescale); d > float64(video.duration) {
video.duration = float32(d)
}
continue continue
} }
// video track
if w := int(tr.AVC.Width); w > width { if w := int(tr.AVC.Width); w > width {
width = w width = w
} }
@ -81,10 +87,10 @@ func decodeVideoFrame(r io.Reader) (*gtsVideo, error) {
height = h height = h
} }
if br := tr.Samples.GetBitrate(tr.Timescale); br > video.bitrate { if br := tr.Samples.GetBitrate(tr.Timescale); br > videoBitrate {
video.bitrate = br videoBitrate = br
} else if br := info.Segments.GetBitrate(tr.TrackID, tr.Timescale); br > video.bitrate { } else if br := info.Segments.GetBitrate(tr.TrackID, tr.Timescale); br > videoBitrate {
video.bitrate = br videoBitrate = br
} }
if d := float64(tr.Duration) / float64(tr.Timescale); d > float64(video.duration) { if d := float64(tr.Duration) / float64(tr.Timescale); d > float64(video.duration) {
@ -93,6 +99,10 @@ func decodeVideoFrame(r io.Reader) (*gtsVideo, error) {
} }
} }
// overall bitrate should be audio + video combined
// (since they're both playing at the same time)
video.bitrate = audioBitrate + videoBitrate
// Check for empty video metadata. // Check for empty video metadata.
var empty []string var empty []string
if width == 0 { if width == 0 {

View File

@ -85,9 +85,6 @@ func (p *processor) GetFile(ctx context.Context, requestingAccount *gtsmodel.Acc
} }
func (p *processor) getAttachmentContent(ctx context.Context, requestingAccount *gtsmodel.Account, wantedMediaID string, owningAccountID string, mediaSize media.Size) (*apimodel.Content, gtserror.WithCode) { func (p *processor) getAttachmentContent(ctx context.Context, requestingAccount *gtsmodel.Account, wantedMediaID string, owningAccountID string, mediaSize media.Size) (*apimodel.Content, gtserror.WithCode) {
attachmentContent := &apimodel.Content{}
var storagePath string
// retrieve attachment from the database and do basic checks on it // retrieve attachment from the database and do basic checks on it
a, err := p.db.GetAttachmentByID(ctx, wantedMediaID) a, err := p.db.GetAttachmentByID(ctx, wantedMediaID)
if err != nil { if err != nil {
@ -146,6 +143,13 @@ func (p *processor) getAttachmentContent(ctx context.Context, requestingAccount
} }
} }
var (
storagePath string
attachmentContent = &apimodel.Content{
ContentUpdated: a.UpdatedAt,
}
)
// get file information from the attachment depending on the requested media size // get file information from the attachment depending on the requested media size
switch mediaSize { switch mediaSize {
case media.SizeOriginal: case media.SizeOriginal:

View File

@ -284,19 +284,13 @@ func (c *converter) AttachmentToAPIAttachment(ctx context.Context, a *gtsmodel.M
Original: apimodel.MediaDimensions{ Original: apimodel.MediaDimensions{
Width: a.FileMeta.Original.Width, Width: a.FileMeta.Original.Width,
Height: a.FileMeta.Original.Height, Height: a.FileMeta.Original.Height,
Size: fmt.Sprintf("%dx%d", a.FileMeta.Original.Width, a.FileMeta.Original.Height),
Aspect: float32(a.FileMeta.Original.Aspect),
}, },
Small: apimodel.MediaDimensions{ Small: apimodel.MediaDimensions{
Width: a.FileMeta.Small.Width, Width: a.FileMeta.Small.Width,
Height: a.FileMeta.Small.Height, Height: a.FileMeta.Small.Height,
Size: fmt.Sprintf("%dx%d", a.FileMeta.Small.Width, a.FileMeta.Small.Height), Size: strconv.Itoa(a.FileMeta.Small.Width) + "x" + strconv.Itoa(a.FileMeta.Small.Height),
Aspect: float32(a.FileMeta.Small.Aspect), Aspect: float32(a.FileMeta.Small.Aspect),
}, },
Focus: apimodel.MediaFocus{
X: a.FileMeta.Focus.X,
Y: a.FileMeta.Focus.Y,
},
}, },
Blurhash: a.Blurhash, Blurhash: a.Blurhash,
} }
@ -318,20 +312,31 @@ func (c *converter) AttachmentToAPIAttachment(ctx context.Context, a *gtsmodel.M
apiAttachment.Description = &i apiAttachment.Description = &i
} }
if i := a.FileMeta.Original.Duration; i != nil { // type specific fields
apiAttachment.Meta.Original.Duration = *i switch a.Type {
} case gtsmodel.FileTypeImage:
apiAttachment.Meta.Original.Size = strconv.Itoa(a.FileMeta.Original.Width) + "x" + strconv.Itoa(a.FileMeta.Original.Height)
apiAttachment.Meta.Original.Aspect = float32(a.FileMeta.Original.Aspect)
apiAttachment.Meta.Focus = &apimodel.MediaFocus{
X: a.FileMeta.Focus.X,
Y: a.FileMeta.Focus.Y,
}
case gtsmodel.FileTypeVideo:
if i := a.FileMeta.Original.Duration; i != nil {
apiAttachment.Meta.Original.Duration = *i
}
if i := a.FileMeta.Original.Framerate; i != nil { if i := a.FileMeta.Original.Framerate; i != nil {
// the masto api expects this as a string in // the masto api expects this as a string in
// the format `integer/1`, so 30fps is `30/1` // the format `integer/1`, so 30fps is `30/1`
round := math.Round(float64(*i)) round := math.Round(float64(*i))
fr := strconv.FormatInt(int64(round), 10) fr := strconv.FormatInt(int64(round), 10)
apiAttachment.Meta.Original.FrameRate = fr + "/1" apiAttachment.Meta.Original.FrameRate = fr + "/1"
} }
if i := a.FileMeta.Original.Bitrate; i != nil { if i := a.FileMeta.Original.Bitrate; i != nil {
apiAttachment.Meta.Original.Bitrate = int(*i) apiAttachment.Meta.Original.Bitrate = int(*i)
}
} }
return apiAttachment, nil return apiAttachment, nil

View File

@ -441,19 +441,13 @@ func (suite *InternalToFrontendTestSuite) TestVideoAttachmentToFrontend() {
"height": 404, "height": 404,
"frame_rate": "30/1", "frame_rate": "30/1",
"duration": 15.033334, "duration": 15.033334,
"bitrate": 1206522, "bitrate": 1206522
"size": "720x404",
"aspect": 1.7821782
}, },
"small": { "small": {
"width": 720, "width": 720,
"height": 404, "height": 404,
"size": "720x404", "size": "720x404",
"aspect": 1.7821782 "aspect": 1.7821782
},
"focus": {
"x": 0,
"y": 0
} }
}, },
"description": "A cow adorably licking another cow!" "description": "A cow adorably licking another cow!"

View File

@ -2,7 +2,7 @@
set -eu set -eu
EXPECT='{"account-domain":"peepee","accounts-allow-custom-css":true,"accounts-approval-required":false,"accounts-reason-required":false,"accounts-registration-open":true,"advanced-cookies-samesite":"strict","advanced-rate-limit-requests":6969,"advanced-throttling-multiplier":-1,"application-name":"gts","bind-address":"127.0.0.1","cache":{"gts":{"account-max-size":99,"account-sweep-freq":1000000000,"account-ttl":10800000000000,"block-max-size":100,"block-sweep-freq":10000000000,"block-ttl":300000000000,"domain-block-max-size":1000,"domain-block-sweep-freq":60000000000,"domain-block-ttl":86400000000000,"emoji-category-max-size":100,"emoji-category-sweep-freq":10000000000,"emoji-category-ttl":300000000000,"emoji-max-size":500,"emoji-sweep-freq":10000000000,"emoji-ttl":300000000000,"mention-max-size":500,"mention-sweep-freq":10000000000,"mention-ttl":300000000000,"notification-max-size":500,"notification-sweep-freq":10000000000,"notification-ttl":300000000000,"report-max-size":100,"report-sweep-freq":10000000000,"report-ttl":300000000000,"status-max-size":500,"status-sweep-freq":10000000000,"status-ttl":300000000000,"tombstone-max-size":100,"tombstone-sweep-freq":10000000000,"tombstone-ttl":300000000000,"user-max-size":100,"user-sweep-freq":10000000000,"user-ttl":300000000000}},"config-path":"internal/config/testdata/test.yaml","db-address":":memory:","db-database":"gotosocial_prod","db-password":"hunter2","db-port":6969,"db-tls-ca-cert":"","db-tls-mode":"disable","db-type":"sqlite","db-user":"sex-haver","dry-run":false,"email":"","host":"example.com","instance-deliver-to-shared-inboxes":false,"instance-expose-peers":true,"instance-expose-public-timeline":true,"instance-expose-suspended":true,"landing-page-user":"admin","letsencrypt-cert-dir":"/gotosocial/storage/certs","letsencrypt-email-address":"","letsencrypt-enabled":true,"letsencrypt-port":80,"log-db-queries":true,"log-level":"info","media-description-max-chars":5000,"media-description-min-chars":69,"media-emoji-local-max-size":420,"media-emoji-remote-max-size":420,"media-image-max-size":420,"media-remote-cache-days":30,"media-video-max-size":420,"oidc-client-id":"1234","oidc-client-secret":"shhhh its a secret","oidc-enabled":true,"oidc-idp-name":"sex-haver","oidc-issuer":"whoknows","oidc-link-existing":true,"oidc-scopes":["read","write"],"oidc-skip-verification":true,"password":"","path":"","port":6969,"protocol":"http","smtp-from":"queen.rip.in.piss@terfisland.org","smtp-host":"example.com","smtp-password":"hunter2","smtp-port":4269,"smtp-username":"sex-haver","software-version":"","statuses-cw-max-chars":420,"statuses-max-chars":69,"statuses-media-max-files":1,"statuses-poll-max-options":1,"statuses-poll-option-max-chars":50,"storage-backend":"local","storage-local-base-path":"/root/store","storage-s3-access-key":"minio","storage-s3-bucket":"gts","storage-s3-endpoint":"localhost:9000","storage-s3-proxy":true,"storage-s3-secret-key":"miniostorage","storage-s3-use-ssl":false,"syslog-address":"127.0.0.1:6969","syslog-enabled":true,"syslog-protocol":"udp","trusted-proxies":["127.0.0.1/32","docker.host.local"],"username":"","web-asset-base-dir":"/root","web-template-base-dir":"/root"}' EXPECT='{"account-domain":"peepee","accounts-allow-custom-css":true,"accounts-approval-required":false,"accounts-reason-required":false,"accounts-registration-open":true,"advanced-cookies-samesite":"strict","advanced-rate-limit-requests":6969,"advanced-throttling-multiplier":-1,"application-name":"gts","bind-address":"127.0.0.1","cache":{"gts":{"account-max-size":99,"account-sweep-freq":1000000000,"account-ttl":10800000000000,"block-max-size":100,"block-sweep-freq":10000000000,"block-ttl":300000000000,"domain-block-max-size":1000,"domain-block-sweep-freq":60000000000,"domain-block-ttl":86400000000000,"emoji-category-max-size":100,"emoji-category-sweep-freq":10000000000,"emoji-category-ttl":300000000000,"emoji-max-size":500,"emoji-sweep-freq":10000000000,"emoji-ttl":300000000000,"mention-max-size":500,"mention-sweep-freq":10000000000,"mention-ttl":300000000000,"notification-max-size":500,"notification-sweep-freq":10000000000,"notification-ttl":300000000000,"report-max-size":100,"report-sweep-freq":10000000000,"report-ttl":300000000000,"status-max-size":500,"status-sweep-freq":10000000000,"status-ttl":300000000000,"tombstone-max-size":100,"tombstone-sweep-freq":10000000000,"tombstone-ttl":300000000000,"user-max-size":100,"user-sweep-freq":10000000000,"user-ttl":300000000000}},"config-path":"internal/config/testdata/test.yaml","db-address":":memory:","db-database":"gotosocial_prod","db-password":"hunter2","db-port":6969,"db-sqlite-busy-timeout":1000000000,"db-sqlite-cache-size":0,"db-sqlite-journal-mode":"DELETE","db-sqlite-synchronous":"FULL","db-tls-ca-cert":"","db-tls-mode":"disable","db-type":"sqlite","db-user":"sex-haver","dry-run":false,"email":"","host":"example.com","instance-deliver-to-shared-inboxes":false,"instance-expose-peers":true,"instance-expose-public-timeline":true,"instance-expose-suspended":true,"landing-page-user":"admin","letsencrypt-cert-dir":"/gotosocial/storage/certs","letsencrypt-email-address":"","letsencrypt-enabled":true,"letsencrypt-port":80,"log-db-queries":true,"log-level":"info","media-description-max-chars":5000,"media-description-min-chars":69,"media-emoji-local-max-size":420,"media-emoji-remote-max-size":420,"media-image-max-size":420,"media-remote-cache-days":30,"media-video-max-size":420,"oidc-client-id":"1234","oidc-client-secret":"shhhh its a secret","oidc-enabled":true,"oidc-idp-name":"sex-haver","oidc-issuer":"whoknows","oidc-link-existing":true,"oidc-scopes":["read","write"],"oidc-skip-verification":true,"password":"","path":"","port":6969,"protocol":"http","smtp-from":"queen.rip.in.piss@terfisland.org","smtp-host":"example.com","smtp-password":"hunter2","smtp-port":4269,"smtp-username":"sex-haver","software-version":"","statuses-cw-max-chars":420,"statuses-max-chars":69,"statuses-media-max-files":1,"statuses-poll-max-options":1,"statuses-poll-option-max-chars":50,"storage-backend":"local","storage-local-base-path":"/root/store","storage-s3-access-key":"minio","storage-s3-bucket":"gts","storage-s3-endpoint":"localhost:9000","storage-s3-proxy":true,"storage-s3-secret-key":"miniostorage","storage-s3-use-ssl":false,"syslog-address":"127.0.0.1:6969","syslog-enabled":true,"syslog-protocol":"udp","trusted-proxies":["127.0.0.1/32","docker.host.local"],"username":"","web-asset-base-dir":"/root","web-template-base-dir":"/root"}'
# Set all the environment variables to # Set all the environment variables to
# ensure that these are parsed without panic # ensure that these are parsed without panic
@ -22,6 +22,10 @@ GTS_DB_PORT=6969 \
GTS_DB_USER='sex-haver' \ GTS_DB_USER='sex-haver' \
GTS_DB_PASSWORD='hunter2' \ GTS_DB_PASSWORD='hunter2' \
GTS_DB_DATABASE='gotosocial_prod' \ GTS_DB_DATABASE='gotosocial_prod' \
GTS_DB_SQLITE_JOURNAL_MODE='DELETE' \
GTS_DB_SQLITE_SYNCHRONOUS='FULL' \
GTS_DB_SQLITE_CACHE_SIZE=0 \
GTS_DB_SQLITE_BUSY_TIMEOUT='1s' \
GTS_TLS_MODE='' \ GTS_TLS_MODE='' \
GTS_DB_TLS_CA_CERT='' \ GTS_DB_TLS_CA_CERT='' \
GTS_WEB_TEMPLATE_BASE_DIR='/root' \ GTS_WEB_TEMPLATE_BASE_DIR='/root' \

View File

@ -19,6 +19,9 @@
package testrig package testrig
import ( import (
"time"
"codeberg.org/gruf/go-bytesize"
"github.com/coreos/go-oidc/v3/oidc" "github.com/coreos/go-oidc/v3/oidc"
"github.com/superseriousbusiness/gotosocial/internal/config" "github.com/superseriousbusiness/gotosocial/internal/config"
) )
@ -43,12 +46,16 @@ func InitTestConfig() {
Port: 8080, Port: 8080,
TrustedProxies: []string{"127.0.0.1/32", "::1"}, TrustedProxies: []string{"127.0.0.1/32", "::1"},
DbType: "sqlite", DbType: "sqlite",
DbAddress: ":memory:", DbAddress: ":memory:",
DbPort: 5432, DbPort: 5432,
DbUser: "postgres", DbUser: "postgres",
DbPassword: "postgres", DbPassword: "postgres",
DbDatabase: "postgres", DbDatabase: "postgres",
DbSqliteJournalMode: "WAL",
DbSqliteSynchronous: "NORMAL",
DbSqliteCacheSize: 64 * bytesize.MiB,
DbSqliteBusyTimeout: time.Second * 30,
WebTemplateBaseDir: "./web/template/", WebTemplateBaseDir: "./web/template/",
WebAssetBaseDir: "./web/assets/", WebAssetBaseDir: "./web/assets/",

View File

@ -73,15 +73,11 @@
// value as the port instead. // value as the port instead.
func NewTestDB() db.DB { func NewTestDB() db.DB {
if alternateAddress := os.Getenv("GTS_DB_ADDRESS"); alternateAddress != "" { if alternateAddress := os.Getenv("GTS_DB_ADDRESS"); alternateAddress != "" {
config.Config(func(cfg *config.Configuration) { config.SetDbAddress(alternateAddress)
cfg.DbAddress = alternateAddress
})
} }
if alternateDBType := os.Getenv("GTS_DB_TYPE"); alternateDBType != "" { if alternateDBType := os.Getenv("GTS_DB_TYPE"); alternateDBType != "" {
config.Config(func(cfg *config.Configuration) { config.SetDbType(alternateDBType)
cfg.DbType = alternateDBType
})
} }
if alternateDBPort := os.Getenv("GTS_DB_PORT"); alternateDBPort != "" { if alternateDBPort := os.Getenv("GTS_DB_PORT"); alternateDBPort != "" {
@ -89,9 +85,7 @@ func NewTestDB() db.DB {
if err != nil { if err != nil {
panic(err) panic(err)
} }
config.Config(func(cfg *config.Configuration) { config.SetDbPort(int(port))
cfg.DbPort = int(port)
})
} }
var state state.State var state state.State

View File

@ -40,33 +40,29 @@ func (f Callers) Frames() []runtime.Frame {
return frames return frames
} }
// MarshalJSON implements json.Marshaler to provide an easy, simply default. // MarshalJSON implements json.Marshaler to provide an easy, simple default.
func (f Callers) MarshalJSON() ([]byte, error) { func (f Callers) MarshalJSON() ([]byte, error) {
// JSON-able frame type // JSON-able frame type
type frame struct { type jsonFrame struct {
Func string `json:"func"` Func string `json:"func"`
File string `json:"file"` File string `json:"file"`
Line int `json:"line"` Line int `json:"line"`
} }
// Allocate expected frames slice // Convert to frames
frames := make([]frame, 0, len(f)) frames := f.Frames()
// Get frames iterator for PCs // Allocate expected size jsonFrame slice
iter := runtime.CallersFrames(f) jsonFrames := make([]jsonFrame, 0, len(f))
for { for i := 0; i < len(frames); i++ {
// Get next frame frame := frames[i]
f, ok := iter.Next()
if !ok {
break
}
// Append to frames slice // Convert each to jsonFrame object
frames = append(frames, frame{ jsonFrames = append(jsonFrames, jsonFrame{
Func: funcname(f.Function), Func: funcname(frame.Function),
File: f.File, File: frame.File,
Line: f.Line, Line: frame.Line,
}) })
} }
@ -86,8 +82,8 @@ func (f Callers) String() string {
frame := frames[i] frame := frames[i]
// Append formatted caller info // Append formatted caller info
funcname := funcname(frame.Function) fn := funcname(frame.Function)
buf = append(buf, funcname+"()\n\t"+frame.File+":"...) buf = append(buf, fn+"()\n\t"+frame.File+":"...)
buf = strconv.AppendInt(buf, int64(frame.Line), 10) buf = strconv.AppendInt(buf, int64(frame.Line), 10)
buf = append(buf, '\n') buf = append(buf, '\n')
} }

View File

@ -24,13 +24,13 @@ func Wrapf(err error, msgf string, args ...interface{}) error {
return create(fmt.Sprintf(msgf, args...), err) return create(fmt.Sprintf(msgf, args...), err)
} }
// Stacktrace fetches a stored stacktrace of callers from an error, or returns nil. // Stacktrace fetches first stored stacktrace of callers from error chain.
func Stacktrace(err error) Callers { func Stacktrace(err error) Callers {
var callers Callers var e interface {
if err, ok := err.(interface { //nolint
Stacktrace() Callers Stacktrace() Callers
}); ok {
callers = err.Stacktrace()
} }
return callers if !As(err, &e) {
return nil
}
return e.Stacktrace()
} }

View File

@ -3,6 +3,7 @@
import ( import (
"errors" "errors"
"reflect" "reflect"
_ "unsafe"
"codeberg.org/gruf/go-bitutil" "codeberg.org/gruf/go-bitutil"
) )
@ -18,7 +19,7 @@
func Is(err error, targets ...error) bool { func Is(err error, targets ...error) bool {
var flags bitutil.Flags64 var flags bitutil.Flags64
// Flags only has 64 bit slots // Flags only has 64 bit-slots
if len(targets) > 64 { if len(targets) > 64 {
panic("too many targets") panic("too many targets")
} }
@ -46,26 +47,30 @@ func Is(err error, targets ...error) bool {
} }
for err != nil { for err != nil {
var errorIs func(error) bool
// Check if this layer supports .Is interface // Check if this layer supports .Is interface
is, ok := err.(interface{ Is(error) bool }) is, ok := err.(interface{ Is(error) bool })
if ok {
errorIs = is.Is
} else {
errorIs = neveris
}
for i := 0; i < len(targets); i++ { if !ok {
// Try directly compare errors // Error does not support interface
if flags.Get(uint8(i)) && //
err == targets[i] { // Only try perform direct compare
return true for i := 0; i < len(targets); i++ {
// Try directly compare errors
if flags.Get(uint8(i)) &&
err == targets[i] {
return true
}
} }
} else {
// Try use .Is() interface // Error supports the .Is interface
if errorIs(targets[i]) { //
return true // Perform direct compare AND .Is()
for i := 0; i < len(targets); i++ {
if (flags.Get(uint8(i)) &&
err == targets[i]) ||
is.Is(targets[i]) {
return true
}
} }
} }
@ -92,15 +97,12 @@ func Is(err error, targets ...error) bool {
// //
// As panics if target is not a non-nil pointer to either a type that implements // As panics if target is not a non-nil pointer to either a type that implements
// error, or to any interface type. // error, or to any interface type.
func As(err error, target interface{}) bool { //
return errors.As(err, target) //go:linkname As errors.As
} func As(err error, target interface{}) bool
// Unwrap returns the result of calling the Unwrap method on err, if err's // Unwrap returns the result of calling the Unwrap method on err, if err's
// type contains an Unwrap method returning error. Otherwise, Unwrap returns nil. // type contains an Unwrap method returning error. Otherwise, Unwrap returns nil.
func Unwrap(err error) error { //
return errors.Unwrap(err) //go:linkname Unwrap errors.Unwrap
} func Unwrap(err error) error
// neveris fits the .Is(error) bool interface function always returning false.
func neveris(error) bool { return false }

54
vendor/codeberg.org/gruf/go-errors/v2/value.go generated vendored Normal file
View File

@ -0,0 +1,54 @@
package errors
// WithValue wraps err to store given key-value pair, accessible via Value() function.
func WithValue(err error, key any, value any) error {
if err == nil {
panic("nil error")
}
return &errWithValue{
err: err,
key: key,
val: value,
}
}
// Value searches for value stored under given key in error chain.
func Value(err error, key any) any {
var e *errWithValue
if !As(err, &e) {
return nil
}
return e.Value(key)
}
type errWithValue struct {
err error
key any
val any
}
func (e *errWithValue) Error() string {
return e.err.Error()
}
func (e *errWithValue) Is(target error) bool {
return e.err == target
}
func (e *errWithValue) Unwrap() error {
return Unwrap(e.err)
}
func (e *errWithValue) Value(key any) any {
for {
if key == e.key {
return e.val
}
if !As(e.err, &e) {
return nil
}
}
}

View File

@ -16,6 +16,8 @@ lint:
vet: vet:
@GO111MODULE=on go vet ./... @GO111MODULE=on go vet ./...
@echo "Installing staticcheck" && go install honnef.co/go/tools/cmd/staticcheck@latest
${GOPATH}/bin/staticcheck -tests=false -checks="all,-ST1000,-ST1003,-ST1016,-ST1020,-ST1021,-ST1022,-ST1023,-ST1005"
test: test:
@GO111MODULE=on SERVER_ENDPOINT=localhost:9000 ACCESS_KEY=minio SECRET_KEY=minio123 ENABLE_HTTPS=1 MINT_MODE=full go test -race -v ./... @GO111MODULE=on SERVER_ENDPOINT=localhost:9000 ACCESS_KEY=minio SECRET_KEY=minio123 ENABLE_HTTPS=1 MINT_MODE=full go test -race -v ./...

View File

@ -21,7 +21,7 @@
"bytes" "bytes"
"context" "context"
"encoding/xml" "encoding/xml"
"io/ioutil" "io"
"net/http" "net/http"
"net/url" "net/url"
@ -143,5 +143,5 @@ func (c *Client) getBucketLifecycle(ctx context.Context, bucketName string) ([]b
} }
} }
return ioutil.ReadAll(resp.Body) return io.ReadAll(resp.Body)
} }

View File

@ -18,7 +18,7 @@
import ( import (
"context" "context"
"io/ioutil" "io"
"net/http" "net/http"
"net/url" "net/url"
"strings" "strings"
@ -137,7 +137,7 @@ func (c *Client) getBucketPolicy(ctx context.Context, bucketName string) (string
} }
} }
bucketPolicyBuf, err := ioutil.ReadAll(resp.Body) bucketPolicyBuf, err := io.ReadAll(resp.Body)
if err != nil { if err != nil {
return "", err return "", err
} }

View File

@ -22,7 +22,7 @@
"context" "context"
"encoding/json" "encoding/json"
"encoding/xml" "encoding/xml"
"io/ioutil" "io"
"net/http" "net/http"
"net/url" "net/url"
"time" "time"
@ -180,7 +180,7 @@ func (c *Client) GetBucketReplicationMetrics(ctx context.Context, bucketName str
if resp.StatusCode != http.StatusOK { if resp.StatusCode != http.StatusOK {
return s, httpRespToErrorResponse(resp, bucketName, "") return s, httpRespToErrorResponse(resp, bucketName, "")
} }
respBytes, err := ioutil.ReadAll(resp.Body) respBytes, err := io.ReadAll(resp.Body)
if err != nil { if err != nil {
return s, err return s, err
} }

View File

@ -22,7 +22,6 @@
"encoding/xml" "encoding/xml"
"errors" "errors"
"io" "io"
"io/ioutil"
"net/http" "net/http"
"net/url" "net/url"
@ -58,7 +57,7 @@ func (c *Client) GetBucketTagging(ctx context.Context, bucketName string) (*tags
return nil, httpRespToErrorResponse(resp, bucketName, "") return nil, httpRespToErrorResponse(resp, bucketName, "")
} }
defer io.Copy(ioutil.Discard, resp.Body) defer io.Copy(io.Discard, resp.Body)
return tags.ParseBucketXML(resp.Body) return tags.ParseBucketXML(resp.Body)
} }

View File

@ -21,7 +21,6 @@
"context" "context"
"fmt" "fmt"
"io" "io"
"io/ioutil"
"net/http" "net/http"
"net/url" "net/url"
"strconv" "strconv"
@ -516,7 +515,7 @@ func (c *Client) ComposeObject(ctx context.Context, dst CopyDestOptions, srcs ..
return UploadInfo{}, err return UploadInfo{}, err
} }
if dst.Progress != nil { if dst.Progress != nil {
io.CopyN(ioutil.Discard, dst.Progress, end-start+1) io.CopyN(io.Discard, dst.Progress, end-start+1)
} }
objParts = append(objParts, complPart) objParts = append(objParts, complPart)
partIndex++ partIndex++
@ -525,7 +524,7 @@ func (c *Client) ComposeObject(ctx context.Context, dst CopyDestOptions, srcs ..
// 4. Make final complete-multipart request. // 4. Make final complete-multipart request.
uploadInfo, err := c.completeMultipartUpload(ctx, dst.Bucket, dst.Object, uploadID, uploadInfo, err := c.completeMultipartUpload(ctx, dst.Bucket, dst.Object, uploadID,
completeMultipartUpload{Parts: objParts}, PutObjectOptions{}) completeMultipartUpload{Parts: objParts}, PutObjectOptions{ServerSideEncryption: dst.Encryption})
if err != nil { if err != nil {
return UploadInfo{}, err return UploadInfo{}, err
} }

View File

@ -20,7 +20,6 @@
import ( import (
"context" "context"
"io" "io"
"io/ioutil"
"net/http" "net/http"
) )
@ -54,7 +53,7 @@ func (c *Client) CopyObject(ctx context.Context, dst CopyDestOptions, src CopySr
// Update the progress properly after successful copy. // Update the progress properly after successful copy.
if dst.Progress != nil { if dst.Progress != nil {
io.Copy(ioutil.Discard, io.LimitReader(dst.Progress, dst.Size)) io.Copy(io.Discard, io.LimitReader(dst.Progress, dst.Size))
} }
cpObjRes := copyObjectResult{} cpObjRes := copyObjectResult{}

View File

@ -22,7 +22,6 @@
"encoding/xml" "encoding/xml"
"fmt" "fmt"
"io" "io"
"io/ioutil"
"net/http" "net/http"
) )
@ -108,7 +107,7 @@ func (e ErrorResponse) Error() string {
func xmlDecodeAndBody(bodyReader io.Reader, v interface{}) ([]byte, error) { func xmlDecodeAndBody(bodyReader io.Reader, v interface{}) ([]byte, error) {
// read the whole body (up to 1MB) // read the whole body (up to 1MB)
const maxBodyLength = 1 << 20 const maxBodyLength = 1 << 20
body, err := ioutil.ReadAll(io.LimitReader(bodyReader, maxBodyLength)) body, err := io.ReadAll(io.LimitReader(bodyReader, maxBodyLength))
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -253,26 +252,6 @@ func errUnexpectedEOF(totalRead, totalSize int64, bucketName, objectName string)
} }
} }
// errInvalidBucketName - Invalid bucket name response.
func errInvalidBucketName(message string) error {
return ErrorResponse{
StatusCode: http.StatusBadRequest,
Code: "InvalidBucketName",
Message: message,
RequestID: "minio",
}
}
// errInvalidObjectName - Invalid object name response.
func errInvalidObjectName(message string) error {
return ErrorResponse{
StatusCode: http.StatusNotFound,
Code: "NoSuchKey",
Message: message,
RequestID: "minio",
}
}
// errInvalidArgument - Invalid argument response. // errInvalidArgument - Invalid argument response.
func errInvalidArgument(message string) error { func errInvalidArgument(message string) error {
return ErrorResponse{ return ErrorResponse{

View File

@ -897,6 +897,8 @@ func (c *Client) listMultipartUploadsQuery(ctx context.Context, bucketName, keyM
} }
// listObjectParts list all object parts recursively. // listObjectParts list all object parts recursively.
//
//lint:ignore U1000 Keep this around
func (c *Client) listObjectParts(ctx context.Context, bucketName, objectName, uploadID string) (partsInfo map[int]ObjectPart, err error) { func (c *Client) listObjectParts(ctx context.Context, bucketName, objectName, uploadID string) (partsInfo map[int]ObjectPart, err error) {
// Part number marker for the next batch of request. // Part number marker for the next batch of request.
var nextPartNumberMarker int var nextPartNumberMarker int

View File

@ -26,7 +26,6 @@
"fmt" "fmt"
"hash/crc32" "hash/crc32"
"io" "io"
"io/ioutil"
"net/http" "net/http"
"net/url" "net/url"
"sort" "sort"
@ -201,7 +200,9 @@ func (c *Client) putObjectMultipartNoStream(ctx context.Context, bucketName, obj
// Sort all completed parts. // Sort all completed parts.
sort.Sort(completedParts(complMultipartUpload.Parts)) sort.Sort(completedParts(complMultipartUpload.Parts))
opts = PutObjectOptions{} opts = PutObjectOptions{
ServerSideEncryption: opts.ServerSideEncryption,
}
if len(crcBytes) > 0 { if len(crcBytes) > 0 {
// Add hash of hashes. // Add hash of hashes.
crc.Reset() crc.Reset()
@ -412,7 +413,7 @@ func (c *Client) completeMultipartUpload(ctx context.Context, bucketName, object
// Read resp.Body into a []bytes to parse for Error response inside the body // Read resp.Body into a []bytes to parse for Error response inside the body
var b []byte var b []byte
b, err = ioutil.ReadAll(resp.Body) b, err = io.ReadAll(resp.Body)
if err != nil { if err != nil {
return UploadInfo{}, err return UploadInfo{}, err
} }

View File

@ -28,6 +28,7 @@
"net/url" "net/url"
"sort" "sort"
"strings" "strings"
"sync"
"github.com/google/uuid" "github.com/google/uuid"
"github.com/minio/minio-go/v7/pkg/s3utils" "github.com/minio/minio-go/v7/pkg/s3utils"
@ -44,7 +45,9 @@
func (c *Client) putObjectMultipartStream(ctx context.Context, bucketName, objectName string, func (c *Client) putObjectMultipartStream(ctx context.Context, bucketName, objectName string,
reader io.Reader, size int64, opts PutObjectOptions, reader io.Reader, size int64, opts PutObjectOptions,
) (info UploadInfo, err error) { ) (info UploadInfo, err error) {
if !isObject(reader) && isReadAt(reader) && !opts.SendContentMd5 { if opts.ConcurrentStreamParts && opts.NumThreads > 1 {
info, err = c.putObjectMultipartStreamParallel(ctx, bucketName, objectName, reader, opts)
} else if !isObject(reader) && isReadAt(reader) && !opts.SendContentMd5 {
// Verify if the reader implements ReadAt and it is not a *minio.Object then we will use parallel uploader. // Verify if the reader implements ReadAt and it is not a *minio.Object then we will use parallel uploader.
info, err = c.putObjectMultipartStreamFromReadAt(ctx, bucketName, objectName, reader.(io.ReaderAt), size, opts) info, err = c.putObjectMultipartStreamFromReadAt(ctx, bucketName, objectName, reader.(io.ReaderAt), size, opts)
} else { } else {
@ -266,6 +269,9 @@ func (c *Client) putObjectMultipartStreamFromReadAt(ctx context.Context, bucketN
// Sort all completed parts. // Sort all completed parts.
sort.Sort(completedParts(complMultipartUpload.Parts)) sort.Sort(completedParts(complMultipartUpload.Parts))
opts = PutObjectOptions{
ServerSideEncryption: opts.ServerSideEncryption,
}
if withChecksum { if withChecksum {
// Add hash of hashes. // Add hash of hashes.
crc := crc32.New(crc32.MakeTable(crc32.Castagnoli)) crc := crc32.New(crc32.MakeTable(crc32.Castagnoli))
@ -278,7 +284,7 @@ func (c *Client) putObjectMultipartStreamFromReadAt(ctx context.Context, bucketN
opts.UserMetadata = map[string]string{"X-Amz-Checksum-Crc32c": base64.StdEncoding.EncodeToString(crc.Sum(nil))} opts.UserMetadata = map[string]string{"X-Amz-Checksum-Crc32c": base64.StdEncoding.EncodeToString(crc.Sum(nil))}
} }
uploadInfo, err := c.completeMultipartUpload(ctx, bucketName, objectName, uploadID, complMultipartUpload, PutObjectOptions{}) uploadInfo, err := c.completeMultipartUpload(ctx, bucketName, objectName, uploadID, complMultipartUpload, opts)
if err != nil { if err != nil {
return UploadInfo{}, err return UploadInfo{}, err
} }
@ -425,6 +431,211 @@ func (c *Client) putObjectMultipartStreamOptionalChecksum(ctx context.Context, b
// Sort all completed parts. // Sort all completed parts.
sort.Sort(completedParts(complMultipartUpload.Parts)) sort.Sort(completedParts(complMultipartUpload.Parts))
opts = PutObjectOptions{
ServerSideEncryption: opts.ServerSideEncryption,
}
if len(crcBytes) > 0 {
// Add hash of hashes.
crc.Reset()
crc.Write(crcBytes)
opts.UserMetadata = map[string]string{"X-Amz-Checksum-Crc32c": base64.StdEncoding.EncodeToString(crc.Sum(nil))}
}
uploadInfo, err := c.completeMultipartUpload(ctx, bucketName, objectName, uploadID, complMultipartUpload, opts)
if err != nil {
return UploadInfo{}, err
}
uploadInfo.Size = totalUploadedSize
return uploadInfo, nil
}
// putObjectMultipartStreamParallel uploads opts.NumThreads parts in parallel.
// This is expected to take opts.PartSize * opts.NumThreads * (GOGC / 100) bytes of buffer.
func (c *Client) putObjectMultipartStreamParallel(ctx context.Context, bucketName, objectName string,
reader io.Reader, opts PutObjectOptions) (info UploadInfo, err error) {
// Input validation.
if err = s3utils.CheckValidBucketName(bucketName); err != nil {
return UploadInfo{}, err
}
if err = s3utils.CheckValidObjectName(objectName); err != nil {
return UploadInfo{}, err
}
if !opts.SendContentMd5 {
if opts.UserMetadata == nil {
opts.UserMetadata = make(map[string]string, 1)
}
opts.UserMetadata["X-Amz-Checksum-Algorithm"] = "CRC32C"
}
// Cancel all when an error occurs.
ctx, cancel := context.WithCancel(ctx)
defer cancel()
// Calculate the optimal parts info for a given size.
totalPartsCount, partSize, _, err := OptimalPartInfo(-1, opts.PartSize)
if err != nil {
return UploadInfo{}, err
}
// Initiates a new multipart request
uploadID, err := c.newUploadID(ctx, bucketName, objectName, opts)
if err != nil {
return UploadInfo{}, err
}
delete(opts.UserMetadata, "X-Amz-Checksum-Algorithm")
// Aborts the multipart upload if the function returns
// any error, since we do not resume we should purge
// the parts which have been uploaded to relinquish
// storage space.
defer func() {
if err != nil {
c.abortMultipartUpload(ctx, bucketName, objectName, uploadID)
}
}()
// Create checksums
// CRC32C is ~50% faster on AMD64 @ 30GB/s
var crcBytes []byte
crc := crc32.New(crc32.MakeTable(crc32.Castagnoli))
md5Hash := c.md5Hasher()
defer md5Hash.Close()
// Total data read and written to server. should be equal to 'size' at the end of the call.
var totalUploadedSize int64
// Initialize parts uploaded map.
partsInfo := make(map[int]ObjectPart)
// Create a buffer.
nBuffers := int64(opts.NumThreads)
bufs := make(chan []byte, nBuffers)
all := make([]byte, nBuffers*partSize)
for i := int64(0); i < nBuffers; i++ {
bufs <- all[i*partSize : i*partSize+partSize]
}
var wg sync.WaitGroup
var mu sync.Mutex
errCh := make(chan error, opts.NumThreads)
reader = newHook(reader, opts.Progress)
// Part number always starts with '1'.
var partNumber int
for partNumber = 1; partNumber <= totalPartsCount; partNumber++ {
// Proceed to upload the part.
var buf []byte
select {
case buf = <-bufs:
case err = <-errCh:
cancel()
wg.Wait()
return UploadInfo{}, err
}
if int64(len(buf)) != partSize {
return UploadInfo{}, fmt.Errorf("read buffer < %d than expected partSize: %d", len(buf), partSize)
}
length, rerr := readFull(reader, buf)
if rerr == io.EOF && partNumber > 1 {
// Done
break
}
if rerr != nil && rerr != io.ErrUnexpectedEOF && err != io.EOF {
cancel()
wg.Wait()
return UploadInfo{}, rerr
}
// Calculate md5sum.
customHeader := make(http.Header)
if !opts.SendContentMd5 {
// Add CRC32C instead.
crc.Reset()
crc.Write(buf[:length])
cSum := crc.Sum(nil)
customHeader.Set("x-amz-checksum-crc32c", base64.StdEncoding.EncodeToString(cSum))
crcBytes = append(crcBytes, cSum...)
}
wg.Add(1)
go func(partNumber int) {
// Avoid declaring variables in the for loop
var md5Base64 string
if opts.SendContentMd5 {
md5Hash.Reset()
md5Hash.Write(buf[:length])
md5Base64 = base64.StdEncoding.EncodeToString(md5Hash.Sum(nil))
}
defer wg.Done()
p := uploadPartParams{
bucketName: bucketName,
objectName: objectName,
uploadID: uploadID,
reader: bytes.NewReader(buf[:length]),
partNumber: partNumber,
md5Base64: md5Base64,
size: int64(length),
sse: opts.ServerSideEncryption,
streamSha256: !opts.DisableContentSha256,
customHeader: customHeader,
}
objPart, uerr := c.uploadPart(ctx, p)
if uerr != nil {
errCh <- uerr
}
// Save successfully uploaded part metadata.
mu.Lock()
partsInfo[partNumber] = objPart
mu.Unlock()
// Send buffer back so it can be reused.
bufs <- buf
}(partNumber)
// Save successfully uploaded size.
totalUploadedSize += int64(length)
}
wg.Wait()
// Collect any error
select {
case err = <-errCh:
return UploadInfo{}, err
default:
}
// Complete multipart upload.
var complMultipartUpload completeMultipartUpload
// Loop over total uploaded parts to save them in
// Parts array before completing the multipart request.
for i := 1; i < partNumber; i++ {
part, ok := partsInfo[i]
if !ok {
return UploadInfo{}, errInvalidArgument(fmt.Sprintf("Missing part number %d", i))
}
complMultipartUpload.Parts = append(complMultipartUpload.Parts, CompletePart{
ETag: part.ETag,
PartNumber: part.PartNumber,
ChecksumCRC32: part.ChecksumCRC32,
ChecksumCRC32C: part.ChecksumCRC32C,
ChecksumSHA1: part.ChecksumSHA1,
ChecksumSHA256: part.ChecksumSHA256,
})
}
// Sort all completed parts.
sort.Sort(completedParts(complMultipartUpload.Parts))
opts = PutObjectOptions{} opts = PutObjectOptions{}
if len(crcBytes) > 0 { if len(crcBytes) > 0 {
// Add hash of hashes. // Add hash of hashes.

View File

@ -87,7 +87,12 @@ type PutObjectOptions struct {
SendContentMd5 bool SendContentMd5 bool
DisableContentSha256 bool DisableContentSha256 bool
DisableMultipart bool DisableMultipart bool
Internal AdvancedPutOptions
// ConcurrentStreamParts will create NumThreads buffers of PartSize bytes,
// fill them serially and upload them in parallel.
// This can be used for faster uploads on non-seekable or slow-to-seek input.
ConcurrentStreamParts bool
Internal AdvancedPutOptions
} }
// getNumThreads - gets the number of threads to be used in the multipart // getNumThreads - gets the number of threads to be used in the multipart
@ -272,6 +277,9 @@ func (c *Client) putObjectCommon(ctx context.Context, bucketName, objectName str
if opts.DisableMultipart { if opts.DisableMultipart {
return UploadInfo{}, errors.New("no length provided and multipart disabled") return UploadInfo{}, errors.New("no length provided and multipart disabled")
} }
if opts.ConcurrentStreamParts && opts.NumThreads > 1 {
return c.putObjectMultipartStreamParallel(ctx, bucketName, objectName, reader, opts)
}
return c.putObjectMultipartStreamNoLength(ctx, bucketName, objectName, reader, opts) return c.putObjectMultipartStreamNoLength(ctx, bucketName, objectName, reader, opts)
} }

View File

@ -24,7 +24,6 @@
"context" "context"
"fmt" "fmt"
"io" "io"
"io/ioutil"
"os" "os"
"strings" "strings"
"sync" "sync"
@ -107,7 +106,7 @@ func (c Client) PutObjectsSnowball(ctx context.Context, bucketName string, opts
return nopReadSeekCloser{bytes.NewReader(b.Bytes())}, int64(b.Len()), nil return nopReadSeekCloser{bytes.NewReader(b.Bytes())}, int64(b.Len()), nil
} }
} else { } else {
f, err := ioutil.TempFile("", "s3-putsnowballobjects-*") f, err := os.CreateTemp("", "s3-putsnowballobjects-*")
if err != nil { if err != nil {
return err return err
} }

View File

@ -316,8 +316,6 @@ type completeMultipartUploadResult struct {
// CompletePart sub container lists individual part numbers and their // CompletePart sub container lists individual part numbers and their
// md5sum, part of completeMultipartUpload. // md5sum, part of completeMultipartUpload.
type CompletePart struct { type CompletePart struct {
XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Part" json:"-"`
// Part number identifies the part. // Part number identifies the part.
PartNumber int PartNumber int
ETag string ETag string

View File

@ -41,8 +41,8 @@
// Constants for file header info. // Constants for file header info.
const ( const (
CSVFileHeaderInfoNone CSVFileHeaderInfo = "NONE" CSVFileHeaderInfoNone CSVFileHeaderInfo = "NONE"
CSVFileHeaderInfoIgnore = "IGNORE" CSVFileHeaderInfoIgnore CSVFileHeaderInfo = "IGNORE"
CSVFileHeaderInfoUse = "USE" CSVFileHeaderInfoUse CSVFileHeaderInfo = "USE"
) )
// SelectCompressionType - is the parameter for what type of compression is // SelectCompressionType - is the parameter for what type of compression is
@ -52,15 +52,15 @@
// Constants for compression types under select API. // Constants for compression types under select API.
const ( const (
SelectCompressionNONE SelectCompressionType = "NONE" SelectCompressionNONE SelectCompressionType = "NONE"
SelectCompressionGZIP = "GZIP" SelectCompressionGZIP SelectCompressionType = "GZIP"
SelectCompressionBZIP = "BZIP2" SelectCompressionBZIP SelectCompressionType = "BZIP2"
// Non-standard compression schemes, supported by MinIO hosts: // Non-standard compression schemes, supported by MinIO hosts:
SelectCompressionZSTD = "ZSTD" // Zstandard compression. SelectCompressionZSTD SelectCompressionType = "ZSTD" // Zstandard compression.
SelectCompressionLZ4 = "LZ4" // LZ4 Stream SelectCompressionLZ4 SelectCompressionType = "LZ4" // LZ4 Stream
SelectCompressionS2 = "S2" // S2 Stream SelectCompressionS2 SelectCompressionType = "S2" // S2 Stream
SelectCompressionSNAPPY = "SNAPPY" // Snappy stream SelectCompressionSNAPPY SelectCompressionType = "SNAPPY" // Snappy stream
) )
// CSVQuoteFields - is the parameter for how CSV fields are quoted. // CSVQuoteFields - is the parameter for how CSV fields are quoted.
@ -69,7 +69,7 @@
// Constants for csv quote styles. // Constants for csv quote styles.
const ( const (
CSVQuoteFieldsAlways CSVQuoteFields = "Always" CSVQuoteFieldsAlways CSVQuoteFields = "Always"
CSVQuoteFieldsAsNeeded = "AsNeeded" CSVQuoteFieldsAsNeeded CSVQuoteFields = "AsNeeded"
) )
// QueryExpressionType - is of what syntax the expression is, this should only // QueryExpressionType - is of what syntax the expression is, this should only
@ -87,7 +87,7 @@
// Constants for JSONTypes. // Constants for JSONTypes.
const ( const (
JSONDocumentType JSONType = "DOCUMENT" JSONDocumentType JSONType = "DOCUMENT"
JSONLinesType = "LINES" JSONLinesType JSONType = "LINES"
) )
// ParquetInputOptions parquet input specific options // ParquetInputOptions parquet input specific options
@ -378,8 +378,8 @@ func (o SelectObjectOptions) Header() http.Header {
// Constants for input data types. // Constants for input data types.
const ( const (
SelectObjectTypeCSV SelectObjectType = "CSV" SelectObjectTypeCSV SelectObjectType = "CSV"
SelectObjectTypeJSON = "JSON" SelectObjectTypeJSON SelectObjectType = "JSON"
SelectObjectTypeParquet = "Parquet" SelectObjectTypeParquet SelectObjectType = "Parquet"
) )
// preludeInfo is used for keeping track of necessary information from the // preludeInfo is used for keeping track of necessary information from the
@ -416,7 +416,7 @@ type StatsMessage struct {
const ( const (
errorMsg messageType = "error" errorMsg messageType = "error"
commonMsg = "event" commonMsg messageType = "event"
) )
// eventType represents the type of event. // eventType represents the type of event.
@ -425,9 +425,9 @@ type StatsMessage struct {
// list of event-types returned by Select API. // list of event-types returned by Select API.
const ( const (
endEvent eventType = "End" endEvent eventType = "End"
recordsEvent = "Records" recordsEvent eventType = "Records"
progressEvent = "Progress" progressEvent eventType = "Progress"
statsEvent = "Stats" statsEvent eventType = "Stats"
) )
// contentType represents content type of event. // contentType represents content type of event.

View File

@ -25,7 +25,6 @@
"fmt" "fmt"
"hash/crc32" "hash/crc32"
"io" "io"
"io/ioutil"
"math/rand" "math/rand"
"net" "net"
"net/http" "net/http"
@ -119,7 +118,7 @@ type Options struct {
// Global constants. // Global constants.
const ( const (
libraryName = "minio-go" libraryName = "minio-go"
libraryVersion = "v7.0.44" libraryVersion = "v7.0.47"
) )
// User Agent should always following the below style. // User Agent should always following the below style.
@ -635,7 +634,7 @@ func (c *Client) executeMethod(ctx context.Context, method string, metadata requ
} }
// Read the body to be saved later. // Read the body to be saved later.
errBodyBytes, err := ioutil.ReadAll(res.Body) errBodyBytes, err := io.ReadAll(res.Body)
// res.Body should be closed // res.Body should be closed
closeResponse(res) closeResponse(res)
if err != nil { if err != nil {
@ -644,14 +643,14 @@ func (c *Client) executeMethod(ctx context.Context, method string, metadata requ
// Save the body. // Save the body.
errBodySeeker := bytes.NewReader(errBodyBytes) errBodySeeker := bytes.NewReader(errBodyBytes)
res.Body = ioutil.NopCloser(errBodySeeker) res.Body = io.NopCloser(errBodySeeker)
// For errors verify if its retryable otherwise fail quickly. // For errors verify if its retryable otherwise fail quickly.
errResponse := ToErrorResponse(httpRespToErrorResponse(res, metadata.bucketName, metadata.objectName)) errResponse := ToErrorResponse(httpRespToErrorResponse(res, metadata.bucketName, metadata.objectName))
// Save the body back again. // Save the body back again.
errBodySeeker.Seek(0, 0) // Seek back to starting point. errBodySeeker.Seek(0, 0) // Seek back to starting point.
res.Body = ioutil.NopCloser(errBodySeeker) res.Body = io.NopCloser(errBodySeeker)
// Bucket region if set in error response and the error // Bucket region if set in error response and the error
// code dictates invalid region, we can retry the request // code dictates invalid region, we can retry the request
@ -814,7 +813,7 @@ func (c *Client) newRequest(ctx context.Context, method string, metadata request
if metadata.contentLength == 0 { if metadata.contentLength == 0 {
req.Body = nil req.Body = nil
} else { } else {
req.Body = ioutil.NopCloser(metadata.contentBody) req.Body = io.NopCloser(metadata.contentBody)
} }
// Set incoming content-length. // Set incoming content-length.
@ -846,7 +845,7 @@ func (c *Client) newRequest(ctx context.Context, method string, metadata request
// Additionally, we also look if the initialized client is secure, // Additionally, we also look if the initialized client is secure,
// if yes then we don't need to perform streaming signature. // if yes then we don't need to perform streaming signature.
req = signer.StreamingSignV4(req, accessKeyID, req = signer.StreamingSignV4(req, accessKeyID,
secretAccessKey, sessionToken, location, metadata.contentLength, time.Now().UTC()) secretAccessKey, sessionToken, location, metadata.contentLength, time.Now().UTC(), c.sha256Hasher())
default: default:
// Set sha256 sum for signature calculation only with signature version '4'. // Set sha256 sum for signature calculation only with signature version '4'.
shaHeader := unsignedPayload shaHeader := unsignedPayload

View File

@ -31,7 +31,6 @@
"hash" "hash"
"hash/crc32" "hash/crc32"
"io" "io"
"io/ioutil"
"math/rand" "math/rand"
"mime/multipart" "mime/multipart"
"net/http" "net/http"
@ -346,7 +345,7 @@ func getDataReader(fileName string) io.ReadCloser {
if _, ok := dataFileCRC32[fileName]; !ok { if _, ok := dataFileCRC32[fileName]; !ok {
dataFileCRC32[fileName] = mustCrcReader(newRandomReader(size, size)) dataFileCRC32[fileName] = mustCrcReader(newRandomReader(size, size))
} }
return ioutil.NopCloser(newRandomReader(size, size)) return io.NopCloser(newRandomReader(size, size))
} }
reader, _ := os.Open(getMintDataDirFilePath(fileName)) reader, _ := os.Open(getMintDataDirFilePath(fileName))
if _, ok := dataFileCRC32[fileName]; !ok { if _, ok := dataFileCRC32[fileName]; !ok {
@ -989,7 +988,7 @@ function := "GetObject()"
for _, testFile := range testFiles { for _, testFile := range testFiles {
r := getDataReader(testFile) r := getDataReader(testFile)
buf, err := ioutil.ReadAll(r) buf, err := io.ReadAll(r)
if err != nil { if err != nil {
logError(testName, function, args, startTime, "", "unexpected failure", err) logError(testName, function, args, startTime, "", "unexpected failure", err)
return return
@ -1131,7 +1130,7 @@ function := "GetObject()"
var errs [n]error var errs [n]error
for i := 0; i < n; i++ { for i := 0; i < n; i++ {
r := newRandomReader(int64((1<<20)*i+i), int64(i)) r := newRandomReader(int64((1<<20)*i+i), int64(i))
buf, err := ioutil.ReadAll(r) buf, err := io.ReadAll(r)
if err != nil { if err != nil {
logError(testName, function, args, startTime, "", "unexpected failure", err) logError(testName, function, args, startTime, "", "unexpected failure", err)
return return
@ -1271,7 +1270,7 @@ function := "CopyObject()"
testFiles := []string{"datafile-1-b", "datafile-10-kB"} testFiles := []string{"datafile-1-b", "datafile-10-kB"}
for _, testFile := range testFiles { for _, testFile := range testFiles {
r := getDataReader(testFile) r := getDataReader(testFile)
buf, err := ioutil.ReadAll(r) buf, err := io.ReadAll(r)
if err != nil { if err != nil {
logError(testName, function, args, startTime, "", "unexpected failure", err) logError(testName, function, args, startTime, "", "unexpected failure", err)
return return
@ -1304,7 +1303,7 @@ function := "CopyObject()"
return return
} }
oldestContent, err := ioutil.ReadAll(reader) oldestContent, err := io.ReadAll(reader)
if err != nil { if err != nil {
logError(testName, function, args, startTime, "", "Reading the oldest object version failed", err) logError(testName, function, args, startTime, "", "Reading the oldest object version failed", err)
return return
@ -1338,7 +1337,7 @@ function := "CopyObject()"
} }
defer readerCopy.Close() defer readerCopy.Close()
newestContent, err := ioutil.ReadAll(readerCopy) newestContent, err := io.ReadAll(readerCopy)
if err != nil { if err != nil {
logError(testName, function, args, startTime, "", "Reading from GetObject reader failed", err) logError(testName, function, args, startTime, "", "Reading from GetObject reader failed", err)
return return
@ -1408,7 +1407,7 @@ function := "CopyObject()"
testFiles := []string{"datafile-10-kB"} testFiles := []string{"datafile-10-kB"}
for _, testFile := range testFiles { for _, testFile := range testFiles {
r := getDataReader(testFile) r := getDataReader(testFile)
buf, err := ioutil.ReadAll(r) buf, err := io.ReadAll(r)
if err != nil { if err != nil {
logError(testName, function, args, startTime, "", "unexpected failure", err) logError(testName, function, args, startTime, "", "unexpected failure", err)
return return
@ -1441,7 +1440,7 @@ function := "CopyObject()"
return return
} }
oldestContent, err := ioutil.ReadAll(reader) oldestContent, err := io.ReadAll(reader)
if err != nil { if err != nil {
logError(testName, function, args, startTime, "", "Reading the oldest object version failed", err) logError(testName, function, args, startTime, "", "Reading the oldest object version failed", err)
return return
@ -1491,7 +1490,7 @@ function := "CopyObject()"
} }
defer readerCopy.Close() defer readerCopy.Close()
newestContent, err := ioutil.ReadAll(readerCopy) newestContent, err := io.ReadAll(readerCopy)
if err != nil { if err != nil {
logError(testName, function, args, startTime, "", "Reading from GetObject reader failed", err) logError(testName, function, args, startTime, "", "Reading from GetObject reader failed", err)
return return
@ -1571,7 +1570,7 @@ function := "ComposeObject()"
for _, testFile := range testFiles { for _, testFile := range testFiles {
r := getDataReader(testFile) r := getDataReader(testFile)
buf, err := ioutil.ReadAll(r) buf, err := io.ReadAll(r)
if err != nil { if err != nil {
logError(testName, function, args, startTime, "", "unexpected failure", err) logError(testName, function, args, startTime, "", "unexpected failure", err)
return return
@ -1633,7 +1632,7 @@ function := "ComposeObject()"
} }
defer readerCopy.Close() defer readerCopy.Close()
copyContentBytes, err := ioutil.ReadAll(readerCopy) copyContentBytes, err := io.ReadAll(readerCopy)
if err != nil { if err != nil {
logError(testName, function, args, startTime, "", "Reading from the copy object reader failed", err) logError(testName, function, args, startTime, "", "Reading from the copy object reader failed", err)
return return
@ -1733,12 +1732,39 @@ function := "DeleteObject()"
logError(testName, function, args, startTime, "", "Unexpected versioning info, should not have any one ", err) logError(testName, function, args, startTime, "", "Unexpected versioning info, should not have any one ", err)
return return
} }
// test delete marker version id is non-null
err = c.RemoveBucket(context.Background(), bucketName) _, err = c.PutObject(context.Background(), bucketName, objectName, getDataReader("datafile-10-kB"), int64(dataFileMap["datafile-10-kB"]), minio.PutObjectOptions{})
if err != nil { if err != nil {
logError(testName, function, args, startTime, "", "CleanupBucket failed", err) logError(testName, function, args, startTime, "", "PutObject failed", err)
return return
} }
// create delete marker
err = c.RemoveObject(context.Background(), bucketName, objectName, minio.RemoveObjectOptions{})
if err != nil {
logError(testName, function, args, startTime, "", "DeleteObject failed", err)
return
}
objectsInfo = c.ListObjects(context.Background(), bucketName, minio.ListObjectsOptions{WithVersions: true, Recursive: true})
idx := 0
for info := range objectsInfo {
if info.Err != nil {
logError(testName, function, args, startTime, "", "Unexpected error during listing objects", err)
return
}
if idx == 0 {
if !info.IsDeleteMarker {
logError(testName, function, args, startTime, "", "Unexpected error - expected delete marker to have been created", err)
return
}
if info.VersionID == "" {
logError(testName, function, args, startTime, "", "Unexpected error - expected delete marker to be versioned", err)
return
}
}
idx++
}
defer cleanupBucket(bucketName, c)
successLogger(testName, function, args, startTime).Info() successLogger(testName, function, args, startTime).Info()
} }
@ -2461,7 +2487,7 @@ function := "GetObject(bucketName, objectName)"
objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
args["objectName"] = objectName args["objectName"] = objectName
buf, err := ioutil.ReadAll(reader) buf, err := io.ReadAll(reader)
if err != nil { if err != nil {
logError(testName, function, args, startTime, "", "ReadAll failed", err) logError(testName, function, args, startTime, "", "ReadAll failed", err)
return return
@ -2982,7 +3008,7 @@ function := "FPutObject(bucketName, objectName, fileName, opts)"
fileName := getMintDataDirFilePath("datafile-129-MB") fileName := getMintDataDirFilePath("datafile-129-MB")
if fileName == "" { if fileName == "" {
// Make a temp file with minPartSize bytes of data. // Make a temp file with minPartSize bytes of data.
file, err := ioutil.TempFile(os.TempDir(), "FPutObjectTest") file, err := os.CreateTemp(os.TempDir(), "FPutObjectTest")
if err != nil { if err != nil {
logError(testName, function, args, startTime, "", "TempFile creation failed", err) logError(testName, function, args, startTime, "", "TempFile creation failed", err)
return return
@ -3091,7 +3117,7 @@ function = "MakeBucket(bucketName, location)"
fName := getMintDataDirFilePath("datafile-129-MB") fName := getMintDataDirFilePath("datafile-129-MB")
if fName == "" { if fName == "" {
// Make a temp file with minPartSize bytes of data. // Make a temp file with minPartSize bytes of data.
file, err := ioutil.TempFile(os.TempDir(), "FPutObjectTest") file, err := os.CreateTemp(os.TempDir(), "FPutObjectTest")
if err != nil { if err != nil {
logError(testName, function, args, startTime, "", "TempFile creation failed", err) logError(testName, function, args, startTime, "", "TempFile creation failed", err)
return return
@ -3257,7 +3283,7 @@ function := "FPutObject(bucketName, objectName, fileName, opts)"
fName := getMintDataDirFilePath("datafile-1-MB") fName := getMintDataDirFilePath("datafile-1-MB")
if fName == "" { if fName == "" {
// Make a temp file with 1 MiB bytes of data. // Make a temp file with 1 MiB bytes of data.
file, err := ioutil.TempFile(os.TempDir(), "FPutObjectContextTest") file, err := os.CreateTemp(os.TempDir(), "FPutObjectContextTest")
if err != nil { if err != nil {
logError(testName, function, args, startTime, "", "TempFile creation failed", err) logError(testName, function, args, startTime, "", "TempFile creation failed", err)
return return
@ -3357,7 +3383,7 @@ function := "FPutObjectContext(ctx, bucketName, objectName, fileName, opts)"
fName := getMintDataDirFilePath("datafile-1-MB") fName := getMintDataDirFilePath("datafile-1-MB")
if fName == "" { if fName == "" {
// Make a temp file with 1 MiB bytes of data. // Make a temp file with 1 MiB bytes of data.
file, err := ioutil.TempFile(os.TempDir(), "FPutObjectContextTest") file, err := os.CreateTemp(os.TempDir(), "FPutObjectContextTest")
if err != nil { if err != nil {
logError(testName, function, args, startTime, "", "Temp file creation failed", err) logError(testName, function, args, startTime, "", "Temp file creation failed", err)
return return
@ -3621,7 +3647,7 @@ function := "GetObject(bucketName, objectName)"
logError(testName, function, args, startTime, "", "file.Open failed", err) logError(testName, function, args, startTime, "", "file.Open failed", err)
return return
} }
want, err := ioutil.ReadAll(zfr) want, err := io.ReadAll(zfr)
if err != nil { if err != nil {
logError(testName, function, args, startTime, "", "fzip file read failed", err) logError(testName, function, args, startTime, "", "fzip file read failed", err)
return return
@ -3638,7 +3664,7 @@ function := "GetObject(bucketName, objectName)"
} }
return return
} }
got, err := ioutil.ReadAll(r) got, err := io.ReadAll(r)
if err != nil { if err != nil {
logError(testName, function, args, startTime, "", "ReadAll failed", err) logError(testName, function, args, startTime, "", "ReadAll failed", err)
return return
@ -3722,7 +3748,7 @@ function := "GetObject(bucketName, objectName)"
objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
args["objectName"] = objectName args["objectName"] = objectName
buf, err := ioutil.ReadAll(reader) buf, err := io.ReadAll(reader)
if err != nil { if err != nil {
logError(testName, function, args, startTime, "", "ReadAll failed", err) logError(testName, function, args, startTime, "", "ReadAll failed", err)
return return
@ -3885,7 +3911,7 @@ function := "GetObject(bucketName, objectName)"
objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
args["objectName"] = objectName args["objectName"] = objectName
buf, err := ioutil.ReadAll(reader) buf, err := io.ReadAll(reader)
if err != nil { if err != nil {
logError(testName, function, args, startTime, "", "ReadAll failed", err) logError(testName, function, args, startTime, "", "ReadAll failed", err)
return return
@ -4062,7 +4088,7 @@ function := "GetObject(bucketName, objectName)"
objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
args["objectName"] = objectName args["objectName"] = objectName
buf, err := ioutil.ReadAll(reader) buf, err := io.ReadAll(reader)
if err != nil { if err != nil {
logError(testName, function, args, startTime, "", "ReadAll failed", err) logError(testName, function, args, startTime, "", "ReadAll failed", err)
return return
@ -4181,7 +4207,7 @@ function := "PresignedPostPolicy(policy)"
metadataKey := randString(60, rand.NewSource(time.Now().UnixNano()), "user") metadataKey := randString(60, rand.NewSource(time.Now().UnixNano()), "user")
metadataValue := randString(60, rand.NewSource(time.Now().UnixNano()), "") metadataValue := randString(60, rand.NewSource(time.Now().UnixNano()), "")
buf, err := ioutil.ReadAll(reader) buf, err := io.ReadAll(reader)
if err != nil { if err != nil {
logError(testName, function, args, startTime, "", "ReadAll failed", err) logError(testName, function, args, startTime, "", "ReadAll failed", err)
return return
@ -4245,7 +4271,7 @@ function := "PresignedPostPolicy(policy)"
filePath := getMintDataDirFilePath("datafile-33-kB") filePath := getMintDataDirFilePath("datafile-33-kB")
if filePath == "" { if filePath == "" {
// Make a temp file with 33 KB data. // Make a temp file with 33 KB data.
file, err := ioutil.TempFile(os.TempDir(), "PresignedPostPolicyTest") file, err := os.CreateTemp(os.TempDir(), "PresignedPostPolicyTest")
if err != nil { if err != nil {
logError(testName, function, args, startTime, "", "TempFile creation failed", err) logError(testName, function, args, startTime, "", "TempFile creation failed", err)
return return
@ -4588,7 +4614,7 @@ function := "GetObject(bucketName, objectName)"
objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
args["objectName"] = objectName args["objectName"] = objectName
buf, err := ioutil.ReadAll(reader) buf, err := io.ReadAll(reader)
if err != nil { if err != nil {
logError(testName, function, args, startTime, "", "ReadAll failed", err) logError(testName, function, args, startTime, "", "ReadAll failed", err)
return return
@ -4770,7 +4796,7 @@ function := "GetObject(bucketName, objectName)"
objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
args["objectName"] = objectName args["objectName"] = objectName
buf, err := ioutil.ReadAll(reader) buf, err := io.ReadAll(reader)
if err != nil { if err != nil {
logError(testName, function, args, startTime, "", "ReadAll failed", err) logError(testName, function, args, startTime, "", "ReadAll failed", err)
return return
@ -4944,7 +4970,7 @@ function := "GetObject(bucketName, objectName)"
objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
args["objectName"] = objectName args["objectName"] = objectName
buf, err := ioutil.ReadAll(reader) buf, err := io.ReadAll(reader)
if err != nil { if err != nil {
logError(testName, function, args, startTime, "", "ReadAll failed", err) logError(testName, function, args, startTime, "", "ReadAll failed", err)
return return
@ -5127,7 +5153,7 @@ function := "GetObject(bucketName, objectName)"
objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
args["objectName"] = objectName args["objectName"] = objectName
buf, err := ioutil.ReadAll(reader) buf, err := io.ReadAll(reader)
if err != nil { if err != nil {
logError(testName, function, args, startTime, "", "ReadAll failed", err) logError(testName, function, args, startTime, "", "ReadAll failed", err)
return return
@ -6138,7 +6164,7 @@ functionAll += ", " + function
return return
} }
newReadBytes, err := ioutil.ReadAll(newReader) newReadBytes, err := io.ReadAll(newReader)
if err != nil { if err != nil {
logError(testName, function, args, startTime, "", "ReadAll failed", err) logError(testName, function, args, startTime, "", "ReadAll failed", err)
return return
@ -6269,7 +6295,7 @@ functionAll += ", " + function
logError(testName, function, args, startTime, "", "PresignedGetObject response incorrect, status "+string(resp.StatusCode), err) logError(testName, function, args, startTime, "", "PresignedGetObject response incorrect, status "+string(resp.StatusCode), err)
return return
} }
newPresignedBytes, err := ioutil.ReadAll(resp.Body) newPresignedBytes, err := io.ReadAll(resp.Body)
if err != nil { if err != nil {
logError(testName, function, args, startTime, "", "PresignedGetObject response incorrect", err) logError(testName, function, args, startTime, "", "PresignedGetObject response incorrect", err)
return return
@ -6312,7 +6338,7 @@ functionAll += ", " + function
logError(testName, function, args, startTime, "", "PresignedGetObject response incorrect, status "+string(resp.StatusCode), err) logError(testName, function, args, startTime, "", "PresignedGetObject response incorrect, status "+string(resp.StatusCode), err)
return return
} }
newPresignedBytes, err = ioutil.ReadAll(resp.Body) newPresignedBytes, err = io.ReadAll(resp.Body)
if err != nil { if err != nil {
logError(testName, function, args, startTime, "", "PresignedGetObject response incorrect", err) logError(testName, function, args, startTime, "", "PresignedGetObject response incorrect", err)
return return
@ -6372,7 +6398,7 @@ functionAll += ", " + function
return return
} }
newReadBytes, err = ioutil.ReadAll(newReader) newReadBytes, err = io.ReadAll(newReader)
if err != nil { if err != nil {
logError(testName, function, args, startTime, "", "ReadAll after GetObject failed", err) logError(testName, function, args, startTime, "", "ReadAll after GetObject failed", err)
return return
@ -6428,7 +6454,7 @@ functionAll += ", " + function
return return
} }
newReadBytes, err = ioutil.ReadAll(newReader) newReadBytes, err = io.ReadAll(newReader)
if err != nil { if err != nil {
logError(testName, function, args, startTime, "", "ReadAll failed during get on custom-presigned put object", err) logError(testName, function, args, startTime, "", "ReadAll failed during get on custom-presigned put object", err)
return return
@ -6652,7 +6678,7 @@ function := "PutObject(bucketName, objectName, fileToUpload, contentType)"
} }
args["fileToUpload"] = fileName args["fileToUpload"] = fileName
} else { } else {
tempfile, err = ioutil.TempFile("", "minio-go-upload-test-") tempfile, err = os.CreateTemp("", "minio-go-upload-test-")
if err != nil { if err != nil {
logError(testName, function, args, startTime, "", "TempFile create failed", err) logError(testName, function, args, startTime, "", "TempFile create failed", err)
return return
@ -6916,7 +6942,7 @@ function := "FPutObject(bucketName, objectName, fileName, opts)"
defer cleanupBucket(bucketName, c) defer cleanupBucket(bucketName, c)
// Make a temp file with 11*1024*1024 bytes of data. // Make a temp file with 11*1024*1024 bytes of data.
file, err := ioutil.TempFile(os.TempDir(), "FPutObjectTest") file, err := os.CreateTemp(os.TempDir(), "FPutObjectTest")
if err != nil { if err != nil {
logError(testName, function, args, startTime, "", "TempFile creation failed", err) logError(testName, function, args, startTime, "", "TempFile creation failed", err)
return return
@ -7145,7 +7171,7 @@ function := "GetObject(bucketName, objectName)"
objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
args["objectName"] = objectName args["objectName"] = objectName
buf, err := ioutil.ReadAll(reader) buf, err := io.ReadAll(reader)
if err != nil { if err != nil {
logError(testName, function, args, startTime, "", "ReadAll failed", err) logError(testName, function, args, startTime, "", "ReadAll failed", err)
return return
@ -7299,7 +7325,7 @@ function := "GetObject(bucketName, objectName)"
objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
args["objectName"] = objectName args["objectName"] = objectName
buf, err := ioutil.ReadAll(reader) buf, err := io.ReadAll(reader)
if err != nil { if err != nil {
logError(testName, function, args, startTime, "", "ReadAll failed", err) logError(testName, function, args, startTime, "", "ReadAll failed", err)
return return
@ -7837,7 +7863,7 @@ function = "CopyObject(dst, src)"
} }
defer reader.Close() defer reader.Close()
decBytes, err := ioutil.ReadAll(reader) decBytes, err := io.ReadAll(reader)
if err != nil { if err != nil {
logError(testName, function, map[string]interface{}{}, startTime, "", "ReadAll failed", err) logError(testName, function, map[string]interface{}{}, startTime, "", "ReadAll failed", err)
return return
@ -7915,7 +7941,7 @@ function := "CopyObject(destination, source)"
return return
} }
decBytes, err := ioutil.ReadAll(reader) decBytes, err := io.ReadAll(reader)
if err != nil { if err != nil {
logError(testName, function, args, startTime, "", "ReadAll failed", err) logError(testName, function, args, startTime, "", "ReadAll failed", err)
return return
@ -7955,7 +7981,7 @@ function := "CopyObject(destination, source)"
return return
} }
decBytes, err = ioutil.ReadAll(reader) decBytes, err = io.ReadAll(reader)
if err != nil { if err != nil {
logError(testName, function, args, startTime, "", "ReadAll failed", err) logError(testName, function, args, startTime, "", "ReadAll failed", err)
return return
@ -7994,7 +8020,7 @@ function := "CopyObject(destination, source)"
} }
defer reader.Close() defer reader.Close()
decBytes, err = ioutil.ReadAll(reader) decBytes, err = io.ReadAll(reader)
if err != nil { if err != nil {
logError(testName, function, args, startTime, "", "ReadAll failed", err) logError(testName, function, args, startTime, "", "ReadAll failed", err)
return return
@ -11040,7 +11066,7 @@ functionAll += ", " + function
return return
} }
newReadBytes, err := ioutil.ReadAll(newReader) newReadBytes, err := io.ReadAll(newReader)
if err != nil { if err != nil {
logError(testName, function, args, startTime, "", "ReadAll failed", err) logError(testName, function, args, startTime, "", "ReadAll failed", err)
return return
@ -11146,7 +11172,7 @@ functionAll += ", " + function
logError(testName, function, args, startTime, "", "PresignedGetObject URL returns status "+string(resp.StatusCode), err) logError(testName, function, args, startTime, "", "PresignedGetObject URL returns status "+string(resp.StatusCode), err)
return return
} }
newPresignedBytes, err := ioutil.ReadAll(resp.Body) newPresignedBytes, err := io.ReadAll(resp.Body)
if err != nil { if err != nil {
logError(testName, function, args, startTime, "", "ReadAll failed", err) logError(testName, function, args, startTime, "", "ReadAll failed", err)
return return
@ -11185,7 +11211,7 @@ functionAll += ", " + function
logError(testName, function, args, startTime, "", "PresignedGetObject URL returns status "+string(resp.StatusCode), err) logError(testName, function, args, startTime, "", "PresignedGetObject URL returns status "+string(resp.StatusCode), err)
return return
} }
newPresignedBytes, err = ioutil.ReadAll(resp.Body) newPresignedBytes, err = io.ReadAll(resp.Body)
if err != nil { if err != nil {
logError(testName, function, args, startTime, "", "ReadAll failed", err) logError(testName, function, args, startTime, "", "ReadAll failed", err)
return return
@ -11239,7 +11265,7 @@ functionAll += ", " + function
return return
} }
newReadBytes, err = ioutil.ReadAll(newReader) newReadBytes, err = io.ReadAll(newReader)
if err != nil { if err != nil {
logError(testName, function, args, startTime, "", "ReadAll failed during get on presigned put object", err) logError(testName, function, args, startTime, "", "ReadAll failed during get on presigned put object", err)
return return
@ -11553,7 +11579,7 @@ function := "GetObject(ctx, bucketName, objectName, fileName)"
} }
for _, test := range tests { for _, test := range tests {
wantRC := getDataReader("datafile-129-MB") wantRC := getDataReader("datafile-129-MB")
io.CopyN(ioutil.Discard, wantRC, test.start) io.CopyN(io.Discard, wantRC, test.start)
want := mustCrcReader(io.LimitReader(wantRC, test.end-test.start+1)) want := mustCrcReader(io.LimitReader(wantRC, test.end-test.start+1))
opts := minio.GetObjectOptions{} opts := minio.GetObjectOptions{}
opts.SetRange(test.start, test.end) opts.SetRange(test.start, test.end)

View File

@ -24,7 +24,6 @@
"encoding/xml" "encoding/xml"
"errors" "errors"
"io" "io"
"io/ioutil"
"net/http" "net/http"
"net/url" "net/url"
"strconv" "strconv"
@ -139,7 +138,7 @@ func closeResponse(resp *http.Response) {
// Without this closing connection would disallow re-using // Without this closing connection would disallow re-using
// the same connection for future uses. // the same connection for future uses.
// - http://stackoverflow.com/a/17961593/4465767 // - http://stackoverflow.com/a/17961593/4465767
io.Copy(ioutil.Discard, resp.Body) io.Copy(io.Discard, resp.Body)
resp.Body.Close() resp.Body.Close()
} }
} }
@ -191,7 +190,7 @@ func getAssumeRoleCredentials(clnt *http.Client, endpoint string, opts STSAssume
defer closeResponse(resp) defer closeResponse(resp)
if resp.StatusCode != http.StatusOK { if resp.StatusCode != http.StatusOK {
var errResp ErrorResponse var errResp ErrorResponse
buf, err := ioutil.ReadAll(resp.Body) buf, err := io.ReadAll(resp.Body)
if err != nil { if err != nil {
return AssumeRoleResponse{}, err return AssumeRoleResponse{}, err
} }

View File

@ -22,7 +22,6 @@
"encoding/xml" "encoding/xml"
"fmt" "fmt"
"io" "io"
"io/ioutil"
) )
// ErrorResponse - Is the typed error returned. // ErrorResponse - Is the typed error returned.
@ -88,7 +87,7 @@ func xmlDecoder(body io.Reader, v interface{}) error {
func xmlDecodeAndBody(bodyReader io.Reader, v interface{}) ([]byte, error) { func xmlDecodeAndBody(bodyReader io.Reader, v interface{}) ([]byte, error) {
// read the whole body (up to 1MB) // read the whole body (up to 1MB)
const maxBodyLength = 1 << 20 const maxBodyLength = 1 << 20
body, err := ioutil.ReadAll(io.LimitReader(bodyReader, maxBodyLength)) body, err := io.ReadAll(io.LimitReader(bodyReader, maxBodyLength))
if err != nil { if err != nil {
return nil, err return nil, err
} }

View File

@ -18,7 +18,6 @@
package credentials package credentials
import ( import (
"io/ioutil"
"os" "os"
"path/filepath" "path/filepath"
"runtime" "runtime"
@ -114,6 +113,7 @@ type hostConfig struct {
type config struct { type config struct {
Version string `json:"version"` Version string `json:"version"`
Hosts map[string]hostConfig `json:"hosts"` Hosts map[string]hostConfig `json:"hosts"`
Aliases map[string]hostConfig `json:"aliases"`
} }
// loadAliass loads from the file pointed to by shared credentials filename for alias. // loadAliass loads from the file pointed to by shared credentials filename for alias.
@ -123,12 +123,17 @@ func loadAlias(filename, alias string) (hostConfig, error) {
cfg := &config{} cfg := &config{}
json := jsoniter.ConfigCompatibleWithStandardLibrary json := jsoniter.ConfigCompatibleWithStandardLibrary
configBytes, err := ioutil.ReadFile(filename) configBytes, err := os.ReadFile(filename)
if err != nil { if err != nil {
return hostConfig{}, err return hostConfig{}, err
} }
if err = json.Unmarshal(configBytes, cfg); err != nil { if err = json.Unmarshal(configBytes, cfg); err != nil {
return hostConfig{}, err return hostConfig{}, err
} }
if cfg.Version == "10" {
return cfg.Aliases[alias], nil
}
return cfg.Hosts[alias], nil return cfg.Hosts[alias], nil
} }

View File

@ -22,7 +22,7 @@
"context" "context"
"errors" "errors"
"fmt" "fmt"
"io/ioutil" "io"
"net" "net"
"net/http" "net/http"
"net/url" "net/url"
@ -106,7 +106,7 @@ func (m *IAM) Retrieve() (Value, error) {
Client: m.Client, Client: m.Client,
STSEndpoint: endpoint, STSEndpoint: endpoint,
GetWebIDTokenExpiry: func() (*WebIdentityToken, error) { GetWebIDTokenExpiry: func() (*WebIdentityToken, error) {
token, err := ioutil.ReadFile(os.Getenv("AWS_WEB_IDENTITY_TOKEN_FILE")) token, err := os.ReadFile(os.Getenv("AWS_WEB_IDENTITY_TOKEN_FILE"))
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -268,7 +268,7 @@ func fetchIMDSToken(client *http.Client, endpoint string) (string, error) {
return "", err return "", err
} }
defer resp.Body.Close() defer resp.Body.Close()
data, err := ioutil.ReadAll(resp.Body) data, err := io.ReadAll(resp.Body)
if err != nil { if err != nil {
return "", err return "", err
} }

View File

@ -22,7 +22,7 @@
"encoding/xml" "encoding/xml"
"errors" "errors"
"fmt" "fmt"
"io/ioutil" "io"
"net/http" "net/http"
"net/url" "net/url"
"strings" "strings"
@ -138,7 +138,7 @@ func getClientGrantsCredentials(clnt *http.Client, endpoint string,
defer resp.Body.Close() defer resp.Body.Close()
if resp.StatusCode != http.StatusOK { if resp.StatusCode != http.StatusOK {
var errResp ErrorResponse var errResp ErrorResponse
buf, err := ioutil.ReadAll(resp.Body) buf, err := io.ReadAll(resp.Body)
if err != nil { if err != nil {
return AssumeRoleWithClientGrantsResponse{}, err return AssumeRoleWithClientGrantsResponse{}, err
} }

View File

@ -21,7 +21,7 @@
"bytes" "bytes"
"encoding/xml" "encoding/xml"
"fmt" "fmt"
"io/ioutil" "io"
"net/http" "net/http"
"net/url" "net/url"
"strings" "strings"
@ -156,7 +156,7 @@ func (k *LDAPIdentity) Retrieve() (value Value, err error) {
defer resp.Body.Close() defer resp.Body.Close()
if resp.StatusCode != http.StatusOK { if resp.StatusCode != http.StatusOK {
var errResp ErrorResponse var errResp ErrorResponse
buf, err := ioutil.ReadAll(resp.Body) buf, err := io.ReadAll(resp.Body)
if err != nil { if err != nil {
return value, err return value, err
} }

View File

@ -21,7 +21,6 @@
"encoding/xml" "encoding/xml"
"errors" "errors"
"io" "io"
"io/ioutil"
"net" "net"
"net/http" "net/http"
"net/url" "net/url"
@ -152,7 +151,7 @@ func (i *STSCertificateIdentity) Retrieve() (Value, error) {
} }
if resp.StatusCode != http.StatusOK { if resp.StatusCode != http.StatusOK {
var errResp ErrorResponse var errResp ErrorResponse
buf, err := ioutil.ReadAll(resp.Body) buf, err := io.ReadAll(resp.Body)
if err != nil { if err != nil {
return Value{}, err return Value{}, err
} }

View File

@ -22,7 +22,7 @@
"encoding/xml" "encoding/xml"
"errors" "errors"
"fmt" "fmt"
"io/ioutil" "io"
"net/http" "net/http"
"net/url" "net/url"
"strconv" "strconv"
@ -155,7 +155,7 @@ func getWebIdentityCredentials(clnt *http.Client, endpoint, roleARN, roleSession
defer resp.Body.Close() defer resp.Body.Close()
if resp.StatusCode != http.StatusOK { if resp.StatusCode != http.StatusOK {
var errResp ErrorResponse var errResp ErrorResponse
buf, err := ioutil.ReadAll(resp.Body) buf, err := io.ReadAll(resp.Body)
if err != nil { if err != nil {
return AssumeRoleWithWebIdentityResponse{}, err return AssumeRoleWithWebIdentityResponse{}, err
} }

View File

@ -34,19 +34,19 @@
// http://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html#notification-how-to-event-types-and-destinations // http://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html#notification-how-to-event-types-and-destinations
const ( const (
ObjectCreatedAll EventType = "s3:ObjectCreated:*" ObjectCreatedAll EventType = "s3:ObjectCreated:*"
ObjectCreatedPut = "s3:ObjectCreated:Put" ObjectCreatedPut EventType = "s3:ObjectCreated:Put"
ObjectCreatedPost = "s3:ObjectCreated:Post" ObjectCreatedPost EventType = "s3:ObjectCreated:Post"
ObjectCreatedCopy = "s3:ObjectCreated:Copy" ObjectCreatedCopy EventType = "s3:ObjectCreated:Copy"
ObjectCreatedCompleteMultipartUpload = "s3:ObjectCreated:CompleteMultipartUpload" ObjectCreatedCompleteMultipartUpload EventType = "s3:ObjectCreated:CompleteMultipartUpload"
ObjectAccessedGet = "s3:ObjectAccessed:Get" ObjectAccessedGet EventType = "s3:ObjectAccessed:Get"
ObjectAccessedHead = "s3:ObjectAccessed:Head" ObjectAccessedHead EventType = "s3:ObjectAccessed:Head"
ObjectAccessedAll = "s3:ObjectAccessed:*" ObjectAccessedAll EventType = "s3:ObjectAccessed:*"
ObjectRemovedAll = "s3:ObjectRemoved:*" ObjectRemovedAll EventType = "s3:ObjectRemoved:*"
ObjectRemovedDelete = "s3:ObjectRemoved:Delete" ObjectRemovedDelete EventType = "s3:ObjectRemoved:Delete"
ObjectRemovedDeleteMarkerCreated = "s3:ObjectRemoved:DeleteMarkerCreated" ObjectRemovedDeleteMarkerCreated EventType = "s3:ObjectRemoved:DeleteMarkerCreated"
ObjectReducedRedundancyLostObject = "s3:ReducedRedundancyLostObject" ObjectReducedRedundancyLostObject EventType = "s3:ReducedRedundancyLostObject"
BucketCreatedAll = "s3:BucketCreated:*" BucketCreatedAll EventType = "s3:BucketCreated:*"
BucketRemovedAll = "s3:BucketRemoved:*" BucketRemovedAll EventType = "s3:BucketRemoved:*"
) )
// FilterRule - child of S3Key, a tag in the notification xml which // FilterRule - child of S3Key, a tag in the notification xml which

View File

@ -700,6 +700,10 @@ type TargetMetrics struct {
PendingCount uint64 `json:"pendingReplicationCount"` PendingCount uint64 `json:"pendingReplicationCount"`
// Total number of failed operations including metadata updates // Total number of failed operations including metadata updates
FailedCount uint64 `json:"failedReplicationCount"` FailedCount uint64 `json:"failedReplicationCount"`
// Bandwidth limit in bytes/sec for this target
BandWidthLimitInBytesPerSecond int64 `json:"limitInBits"`
// Current bandwidth used in bytes/sec for this target
CurrentBandwidthInBytesPerSecond float64 `json:"currentBandwidth"`
} }
// Metrics represents inline replication metrics for a bucket. // Metrics represents inline replication metrics for a bucket.

View File

@ -21,7 +21,6 @@
"bytes" "bytes"
"fmt" "fmt"
"io" "io"
"io/ioutil"
"net/http" "net/http"
"strconv" "strconv"
"strings" "strings"
@ -132,7 +131,7 @@ func StreamingUnsignedV4(req *http.Request, sessionToken string, dataLen int64,
prepareUSStreamingRequest(req, sessionToken, dataLen, reqTime) prepareUSStreamingRequest(req, sessionToken, dataLen, reqTime)
if req.Body == nil { if req.Body == nil {
req.Body = ioutil.NopCloser(bytes.NewReader([]byte(""))) req.Body = io.NopCloser(bytes.NewReader([]byte("")))
} }
stReader := &StreamingUSReader{ stReader := &StreamingUSReader{

View File

@ -22,11 +22,12 @@
"encoding/hex" "encoding/hex"
"fmt" "fmt"
"io" "io"
"io/ioutil"
"net/http" "net/http"
"strconv" "strconv"
"strings" "strings"
"time" "time"
md5simd "github.com/minio/md5-simd"
) )
// Reference for constants used below - // Reference for constants used below -
@ -91,14 +92,14 @@ func getStreamLength(dataLen, chunkSize int64, trailers http.Header) int64 {
// buildChunkStringToSign - returns the string to sign given chunk data // buildChunkStringToSign - returns the string to sign given chunk data
// and previous signature. // and previous signature.
func buildChunkStringToSign(t time.Time, region, previousSig string, chunkData []byte) string { func buildChunkStringToSign(t time.Time, region, previousSig, chunkChecksum string) string {
stringToSignParts := []string{ stringToSignParts := []string{
streamingPayloadHdr, streamingPayloadHdr,
t.Format(iso8601DateFormat), t.Format(iso8601DateFormat),
getScope(region, t, ServiceTypeS3), getScope(region, t, ServiceTypeS3),
previousSig, previousSig,
emptySHA256, emptySHA256,
hex.EncodeToString(sum256(chunkData)), chunkChecksum,
} }
return strings.Join(stringToSignParts, "\n") return strings.Join(stringToSignParts, "\n")
@ -106,13 +107,13 @@ func buildChunkStringToSign(t time.Time, region, previousSig string, chunkData [
// buildTrailerChunkStringToSign - returns the string to sign given chunk data // buildTrailerChunkStringToSign - returns the string to sign given chunk data
// and previous signature. // and previous signature.
func buildTrailerChunkStringToSign(t time.Time, region, previousSig string, chunkData []byte) string { func buildTrailerChunkStringToSign(t time.Time, region, previousSig, chunkChecksum string) string {
stringToSignParts := []string{ stringToSignParts := []string{
streamingTrailerHdr, streamingTrailerHdr,
t.Format(iso8601DateFormat), t.Format(iso8601DateFormat),
getScope(region, t, ServiceTypeS3), getScope(region, t, ServiceTypeS3),
previousSig, previousSig,
hex.EncodeToString(sum256(chunkData)), chunkChecksum,
} }
return strings.Join(stringToSignParts, "\n") return strings.Join(stringToSignParts, "\n")
@ -149,21 +150,21 @@ func buildChunkHeader(chunkLen int64, signature string) []byte {
} }
// buildChunkSignature - returns chunk signature for a given chunk and previous signature. // buildChunkSignature - returns chunk signature for a given chunk and previous signature.
func buildChunkSignature(chunkData []byte, reqTime time.Time, region, func buildChunkSignature(chunkCheckSum string, reqTime time.Time, region,
previousSignature, secretAccessKey string, previousSignature, secretAccessKey string,
) string { ) string {
chunkStringToSign := buildChunkStringToSign(reqTime, region, chunkStringToSign := buildChunkStringToSign(reqTime, region,
previousSignature, chunkData) previousSignature, chunkCheckSum)
signingKey := getSigningKey(secretAccessKey, region, reqTime, ServiceTypeS3) signingKey := getSigningKey(secretAccessKey, region, reqTime, ServiceTypeS3)
return getSignature(signingKey, chunkStringToSign) return getSignature(signingKey, chunkStringToSign)
} }
// buildChunkSignature - returns chunk signature for a given chunk and previous signature. // buildChunkSignature - returns chunk signature for a given chunk and previous signature.
func buildTrailerChunkSignature(chunkData []byte, reqTime time.Time, region, func buildTrailerChunkSignature(chunkChecksum string, reqTime time.Time, region,
previousSignature, secretAccessKey string, previousSignature, secretAccessKey string,
) string { ) string {
chunkStringToSign := buildTrailerChunkStringToSign(reqTime, region, chunkStringToSign := buildTrailerChunkStringToSign(reqTime, region,
previousSignature, chunkData) previousSignature, chunkChecksum)
signingKey := getSigningKey(secretAccessKey, region, reqTime, ServiceTypeS3) signingKey := getSigningKey(secretAccessKey, region, reqTime, ServiceTypeS3)
return getSignature(signingKey, chunkStringToSign) return getSignature(signingKey, chunkStringToSign)
} }
@ -203,12 +204,17 @@ type StreamingReader struct {
totalChunks int totalChunks int
lastChunkSize int lastChunkSize int
trailer http.Header trailer http.Header
sh256 md5simd.Hasher
} }
// signChunk - signs a chunk read from s.baseReader of chunkLen size. // signChunk - signs a chunk read from s.baseReader of chunkLen size.
func (s *StreamingReader) signChunk(chunkLen int, addCrLf bool) { func (s *StreamingReader) signChunk(chunkLen int, addCrLf bool) {
// Compute chunk signature for next header // Compute chunk signature for next header
signature := buildChunkSignature(s.chunkBuf[:chunkLen], s.reqTime, s.sh256.Reset()
s.sh256.Write(s.chunkBuf[:chunkLen])
chunckChecksum := hex.EncodeToString(s.sh256.Sum(nil))
signature := buildChunkSignature(chunckChecksum, s.reqTime,
s.region, s.prevSignature, s.secretAccessKey) s.region, s.prevSignature, s.secretAccessKey)
// For next chunk signature computation // For next chunk signature computation
@ -240,8 +246,11 @@ func (s *StreamingReader) addSignedTrailer(h http.Header) {
s.chunkBuf = append(s.chunkBuf, []byte(strings.ToLower(k)+trailerKVSeparator+v[0]+"\n")...) s.chunkBuf = append(s.chunkBuf, []byte(strings.ToLower(k)+trailerKVSeparator+v[0]+"\n")...)
} }
s.sh256.Reset()
s.sh256.Write(s.chunkBuf)
chunkChecksum := hex.EncodeToString(s.sh256.Sum(nil))
// Compute chunk signature // Compute chunk signature
signature := buildTrailerChunkSignature(s.chunkBuf, s.reqTime, signature := buildTrailerChunkSignature(chunkChecksum, s.reqTime,
s.region, s.prevSignature, s.secretAccessKey) s.region, s.prevSignature, s.secretAccessKey)
// For next chunk signature computation // For next chunk signature computation
@ -274,13 +283,13 @@ func (s *StreamingReader) setStreamingAuthHeader(req *http.Request) {
// StreamingSignV4 - provides chunked upload signatureV4 support by // StreamingSignV4 - provides chunked upload signatureV4 support by
// implementing io.Reader. // implementing io.Reader.
func StreamingSignV4(req *http.Request, accessKeyID, secretAccessKey, sessionToken, func StreamingSignV4(req *http.Request, accessKeyID, secretAccessKey, sessionToken,
region string, dataLen int64, reqTime time.Time, region string, dataLen int64, reqTime time.Time, sh256 md5simd.Hasher,
) *http.Request { ) *http.Request {
// Set headers needed for streaming signature. // Set headers needed for streaming signature.
prepareStreamingRequest(req, sessionToken, dataLen, reqTime) prepareStreamingRequest(req, sessionToken, dataLen, reqTime)
if req.Body == nil { if req.Body == nil {
req.Body = ioutil.NopCloser(bytes.NewReader([]byte(""))) req.Body = io.NopCloser(bytes.NewReader([]byte("")))
} }
stReader := &StreamingReader{ stReader := &StreamingReader{
@ -295,6 +304,7 @@ func StreamingSignV4(req *http.Request, accessKeyID, secretAccessKey, sessionTok
chunkNum: 1, chunkNum: 1,
totalChunks: int((dataLen+payloadChunkSize-1)/payloadChunkSize) + 1, totalChunks: int((dataLen+payloadChunkSize-1)/payloadChunkSize) + 1,
lastChunkSize: int(dataLen % payloadChunkSize), lastChunkSize: int(dataLen % payloadChunkSize),
sh256: sh256,
} }
if len(req.Trailer) > 0 { if len(req.Trailer) > 0 {
stReader.trailer = req.Trailer stReader.trailer = req.Trailer
@ -385,5 +395,9 @@ func (s *StreamingReader) Read(buf []byte) (int, error) {
// Close - this method makes underlying io.ReadCloser's Close method available. // Close - this method makes underlying io.ReadCloser's Close method available.
func (s *StreamingReader) Close() error { func (s *StreamingReader) Close() error {
if s.sh256 != nil {
s.sh256.Close()
s.sh256 = nil
}
return s.baseReadCloser.Close() return s.baseReadCloser.Close()
} }

View File

@ -25,7 +25,7 @@
) )
// expirationDateFormat date format for expiration key in json policy. // expirationDateFormat date format for expiration key in json policy.
const expirationDateFormat = "2006-01-02T15:04:05.999Z" const expirationDateFormat = "2006-01-02T15:04:05.000Z"
// policyCondition explanation: // policyCondition explanation:
// http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-HTTPPOSTConstructPolicy.html // http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-HTTPPOSTConstructPolicy.html

View File

@ -23,7 +23,6 @@
import ( import (
"crypto/tls" "crypto/tls"
"crypto/x509" "crypto/x509"
"io/ioutil"
"net" "net"
"net/http" "net/http"
"os" "os"
@ -73,7 +72,7 @@ func mustGetSystemCertPool() *x509.CertPool {
} }
if f := os.Getenv("SSL_CERT_FILE"); f != "" { if f := os.Getenv("SSL_CERT_FILE"); f != "" {
rootCAs := mustGetSystemCertPool() rootCAs := mustGetSystemCertPool()
data, err := ioutil.ReadFile(f) data, err := os.ReadFile(f)
if err == nil { if err == nil {
rootCAs.AppendCertsFromPEM(data) rootCAs.AppendCertsFromPEM(data)
} }

View File

@ -28,7 +28,6 @@
"fmt" "fmt"
"hash" "hash"
"io" "io"
"io/ioutil"
"math/rand" "math/rand"
"net" "net"
"net/http" "net/http"
@ -142,7 +141,7 @@ func closeResponse(resp *http.Response) {
// Without this closing connection would disallow re-using // Without this closing connection would disallow re-using
// the same connection for future uses. // the same connection for future uses.
// - http://stackoverflow.com/a/17961593/4465767 // - http://stackoverflow.com/a/17961593/4465767
io.Copy(ioutil.Discard, resp.Body) io.Copy(io.Discard, resp.Body)
resp.Body.Close() resp.Body.Close()
} }
} }

8
vendor/modules.txt vendored
View File

@ -21,8 +21,8 @@ codeberg.org/gruf/go-cache/v3/ttl
# codeberg.org/gruf/go-debug v1.2.0 # codeberg.org/gruf/go-debug v1.2.0
## explicit; go 1.16 ## explicit; go 1.16
codeberg.org/gruf/go-debug codeberg.org/gruf/go-debug
# codeberg.org/gruf/go-errors/v2 v2.0.2 # codeberg.org/gruf/go-errors/v2 v2.1.1
## explicit; go 1.16 ## explicit; go 1.19
codeberg.org/gruf/go-errors/v2 codeberg.org/gruf/go-errors/v2
# codeberg.org/gruf/go-fastcopy v1.1.2 # codeberg.org/gruf/go-fastcopy v1.1.2
## explicit; go 1.17 ## explicit; go 1.17
@ -297,7 +297,7 @@ github.com/miekg/dns
# github.com/minio/md5-simd v1.1.2 # github.com/minio/md5-simd v1.1.2
## explicit; go 1.14 ## explicit; go 1.14
github.com/minio/md5-simd github.com/minio/md5-simd
# github.com/minio/minio-go/v7 v7.0.44 # github.com/minio/minio-go/v7 v7.0.47
## explicit; go 1.17 ## explicit; go 1.17
github.com/minio/minio-go/v7 github.com/minio/minio-go/v7
github.com/minio/minio-go/v7/pkg/credentials github.com/minio/minio-go/v7/pkg/credentials
@ -709,7 +709,7 @@ golang.org/x/net/internal/socket
golang.org/x/net/ipv4 golang.org/x/net/ipv4
golang.org/x/net/ipv6 golang.org/x/net/ipv6
golang.org/x/net/publicsuffix golang.org/x/net/publicsuffix
# golang.org/x/oauth2 v0.3.0 # golang.org/x/oauth2 v0.4.0
## explicit; go 1.17 ## explicit; go 1.17
golang.org/x/oauth2 golang.org/x/oauth2
golang.org/x/oauth2/internal golang.org/x/oauth2/internal