Optimize the apiSubmit function to ensure that it doesn't lead to errors from large insertions

This commit is contained in:
David Dworken 2022-11-26 10:31:43 -08:00
parent 8d87110405
commit c603537137
No known key found for this signature in database
4 changed files with 28 additions and 20 deletions

View File

@ -179,12 +179,20 @@ func apiSubmitHandler(ctx context.Context, w http.ResponseWriter, r *http.Reques
panic(fmt.Errorf("found no devices associated with user_id=%s, can't save history entry", entries[0].UserId))
}
fmt.Printf("apiSubmitHandler: Found %d devices\n", len(devices))
for _, device := range devices {
for _, entry := range entries {
entry.DeviceId = device.DeviceId
err = GLOBAL_DB.WithContext(ctx).Transaction(func(tx *gorm.DB) error {
for _, device := range devices {
for _, entry := range entries {
entry.DeviceId = device.DeviceId
}
// Chunk the inserts to prevent the `extended protocol limited to 65535 parameters` error
for _, entriesChunk := range shared.Chunks(entries, 1000) {
checkGormResult(tx.Create(&entriesChunk))
}
}
// TODO(bug): This fails with gorm if entries is VERY large. Chunk it.
checkGormResult(GLOBAL_DB.WithContext(ctx).Create(&entries))
return nil
})
if err != nil {
panic(fmt.Errorf("failed to execute transaction to add entries to DB: %v", err))
}
GLOBAL_STATSD.Count("hishtory.submit", int64(len(devices)), []string{}, 1.0)
}

View File

@ -1003,7 +1003,7 @@ func Reupload(ctx *context.Context) error {
if err != nil {
return fmt.Errorf("failed to reupload due to failed search: %v", err)
}
for _, chunk := range chunks(entries, 100) {
for _, chunk := range shared.Chunks(entries, 100) {
jsonValue, err := EncryptAndMarshal(config, chunk)
if err != nil {
return fmt.Errorf("failed to reupload due to failed encryption: %v", err)
@ -1016,18 +1016,6 @@ func Reupload(ctx *context.Context) error {
return nil
}
func chunks[k any](slice []k, chunkSize int) [][]k {
var chunks [][]k
for i := 0; i < len(slice); i += chunkSize {
end := i + chunkSize
if end > len(slice) {
end = len(slice)
}
chunks = append(chunks, slice[i:end])
}
return chunks
}
func RetrieveAdditionalEntriesFromRemote(ctx *context.Context) error {
db := hctx.GetDb(ctx)
config := hctx.GetConf(ctx)

View File

@ -11,6 +11,7 @@ import (
"github.com/ddworken/hishtory/client/data"
"github.com/ddworken/hishtory/client/hctx"
"github.com/ddworken/hishtory/shared"
"github.com/ddworken/hishtory/shared/testutils"
)
@ -360,13 +361,12 @@ func TestChunks(t *testing.T) {
{[]int{1, 2, 3, 4, 5}, 4, [][]int{{1, 2, 3, 4}, {5}}},
}
for _, tc := range testcases {
actual := chunks(tc.input, tc.chunkSize)
actual := shared.Chunks(tc.input, tc.chunkSize)
if !reflect.DeepEqual(actual, tc.output) {
t.Fatal("chunks failure")
}
}
}
func TestZshWeirdness(t *testing.T) {
testcases := []struct {
input string

View File

@ -91,3 +91,15 @@ type Feedback struct {
Date time.Time `json:"date" gorm:"not null"`
Feedback string `json:"feedback"`
}
func Chunks[k any](slice []k, chunkSize int) [][]k {
var chunks [][]k
for i := 0; i < len(slice); i += chunkSize {
end := i + chunkSize
if end > len(slice) {
end = len(slice)
}
chunks = append(chunks, slice[i:end])
}
return chunks
}