mirror of
https://github.com/atuinsh/atuin.git
synced 2024-11-22 08:13:57 +01:00
perf: write to the idx cache (#2225)
This commit is contained in:
parent
8b17690359
commit
0def653484
@ -1,3 +1,4 @@
|
|||||||
|
use std::collections::HashMap;
|
||||||
use std::fmt::Debug;
|
use std::fmt::Debug;
|
||||||
use std::ops::Range;
|
use std::ops::Range;
|
||||||
|
|
||||||
@ -519,6 +520,15 @@ impl Database for Postgres {
|
|||||||
async fn add_records(&self, user: &User, records: &[Record<EncryptedData>]) -> DbResult<()> {
|
async fn add_records(&self, user: &User, records: &[Record<EncryptedData>]) -> DbResult<()> {
|
||||||
let mut tx = self.pool.begin().await.map_err(fix_error)?;
|
let mut tx = self.pool.begin().await.map_err(fix_error)?;
|
||||||
|
|
||||||
|
// We won't have uploaded this data if it wasn't the max. Therefore, we can deduce the max
|
||||||
|
// idx without having to make further database queries. Doing the query on this small
|
||||||
|
// amount of data should be much, much faster.
|
||||||
|
//
|
||||||
|
// Worst case, say we get this wrong. We end up caching data that isn't actually the max
|
||||||
|
// idx, so clients upload again. The cache logic can be verified with a sql query anyway :)
|
||||||
|
|
||||||
|
let mut heads = HashMap::<(HostId, &str), u64>::new();
|
||||||
|
|
||||||
for i in records {
|
for i in records {
|
||||||
let id = atuin_common::utils::uuid_v7();
|
let id = atuin_common::utils::uuid_v7();
|
||||||
|
|
||||||
@ -542,6 +552,34 @@ impl Database for Postgres {
|
|||||||
.execute(&mut *tx)
|
.execute(&mut *tx)
|
||||||
.await
|
.await
|
||||||
.map_err(fix_error)?;
|
.map_err(fix_error)?;
|
||||||
|
|
||||||
|
// we're already iterating sooooo
|
||||||
|
heads
|
||||||
|
.entry((i.host.id, &i.tag))
|
||||||
|
.and_modify(|e| {
|
||||||
|
if i.idx > *e {
|
||||||
|
*e = i.idx
|
||||||
|
}
|
||||||
|
})
|
||||||
|
.or_insert(i.idx);
|
||||||
|
}
|
||||||
|
|
||||||
|
// we've built the map of heads for this push, so commit it to the database
|
||||||
|
for ((host, tag), idx) in heads {
|
||||||
|
sqlx::query(
|
||||||
|
"insert into store_idx_cache
|
||||||
|
(user_id, host, tag, idx)
|
||||||
|
values ($1, $2, $3, $4)
|
||||||
|
on conflict(user_id, host, tag) do update set idx = $4
|
||||||
|
",
|
||||||
|
)
|
||||||
|
.bind(user.id)
|
||||||
|
.bind(host)
|
||||||
|
.bind(tag)
|
||||||
|
.bind(idx as i64)
|
||||||
|
.execute(&mut *tx)
|
||||||
|
.await
|
||||||
|
.map_err(fix_error)?;
|
||||||
}
|
}
|
||||||
|
|
||||||
tx.commit().await.map_err(fix_error)?;
|
tx.commit().await.map_err(fix_error)?;
|
||||||
|
@ -14,8 +14,10 @@ pub struct Mail {
|
|||||||
|
|
||||||
/// Configuration for the postmark api client
|
/// Configuration for the postmark api client
|
||||||
/// This is what we use for Atuin Cloud, the forum, etc.
|
/// This is what we use for Atuin Cloud, the forum, etc.
|
||||||
|
#[serde(default)]
|
||||||
pub postmark: Postmark,
|
pub postmark: Postmark,
|
||||||
|
|
||||||
|
#[serde(default)]
|
||||||
pub verification: MailVerification,
|
pub verification: MailVerification,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user