mirror of
https://github.com/atuinsh/atuin.git
synced 2025-07-01 07:00:54 +02:00
Add new sync (#1093)
* Add record migration
* Add database functions for inserting history
No real tests yet :( I would like to avoid running postgres lol
* Add index handler, use UUIDs not strings
* Fix a bunch of tests, remove Option<Uuid>
* Add tests, all passing
* Working upload sync
* Record downloading works
* Sync download works
* Don't waste requests
* Use a page size for uploads, make it variable later
* Aaaaaand they're encrypted now too
* Add cek
* Allow reading tail across hosts
* Revert "Allow reading tail across hosts"
Not like that
This reverts commit 7b0c72e7e0
.
* Handle multiple shards properly
* format
* Format and make clippy happy
* use some fancy types (#1098)
* use some fancy types
* fmt
* Goodbye horrible tuple
* Update atuin-server-postgres/migrations/20230623070418_records.sql
Co-authored-by: Conrad Ludgate <conradludgate@gmail.com>
* fmt
* Sort tests too because time sucks
* fix features
---------
Co-authored-by: Conrad Ludgate <conradludgate@gmail.com>
This commit is contained in:
@ -18,4 +18,5 @@ chrono = { workspace = true }
|
||||
serde = { workspace = true }
|
||||
sqlx = { workspace = true }
|
||||
async-trait = { workspace = true }
|
||||
uuid = { workspace = true }
|
||||
futures-util = "0.3"
|
||||
|
5
atuin-server-postgres/build.rs
Normal file
5
atuin-server-postgres/build.rs
Normal file
@ -0,0 +1,5 @@
|
||||
// generated by `sqlx migrate build-script`
|
||||
fn main() {
|
||||
// trigger recompilation when a new migration is added
|
||||
println!("cargo:rerun-if-changed=migrations");
|
||||
}
|
15
atuin-server-postgres/migrations/20230623070418_records.sql
Normal file
15
atuin-server-postgres/migrations/20230623070418_records.sql
Normal file
@ -0,0 +1,15 @@
|
||||
-- Add migration script here
|
||||
create table records (
|
||||
id uuid primary key, -- remember to use uuidv7 for happy indices <3
|
||||
client_id uuid not null, -- I am too uncomfortable with the idea of a client-generated primary key
|
||||
host uuid not null, -- a unique identifier for the host
|
||||
parent uuid default null, -- the ID of the parent record, bearing in mind this is a linked list
|
||||
timestamp bigint not null, -- not a timestamp type, as those do not have nanosecond precision
|
||||
version text not null,
|
||||
tag text not null, -- what is this? history, kv, whatever. Remember clients get a log per tag per host
|
||||
data text not null, -- store the actual history data, encrypted. I don't wanna know!
|
||||
cek text not null,
|
||||
|
||||
user_id bigint not null, -- allow multiple users
|
||||
created_at timestamp not null default current_timestamp
|
||||
);
|
@ -1,14 +1,14 @@
|
||||
use async_trait::async_trait;
|
||||
use atuin_common::record::{EncryptedData, HostId, Record, RecordId, RecordIndex};
|
||||
use atuin_server_database::models::{History, NewHistory, NewSession, NewUser, Session, User};
|
||||
use atuin_server_database::{Database, DbError, DbResult};
|
||||
use futures_util::TryStreamExt;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use sqlx::postgres::PgPoolOptions;
|
||||
|
||||
use sqlx::Row;
|
||||
|
||||
use tracing::instrument;
|
||||
use wrappers::{DbHistory, DbSession, DbUser};
|
||||
use wrappers::{DbHistory, DbRecord, DbSession, DbUser};
|
||||
|
||||
mod wrappers;
|
||||
|
||||
@ -329,4 +329,102 @@ impl Database for Postgres {
|
||||
.map_err(fix_error)
|
||||
.map(|DbHistory(h)| h)
|
||||
}
|
||||
|
||||
#[instrument(skip_all)]
|
||||
async fn add_records(&self, user: &User, records: &[Record<EncryptedData>]) -> DbResult<()> {
|
||||
let mut tx = self.pool.begin().await.map_err(fix_error)?;
|
||||
|
||||
for i in records {
|
||||
let id = atuin_common::utils::uuid_v7();
|
||||
|
||||
sqlx::query(
|
||||
"insert into records
|
||||
(id, client_id, host, parent, timestamp, version, tag, data, cek, user_id)
|
||||
values ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10)
|
||||
on conflict do nothing
|
||||
",
|
||||
)
|
||||
.bind(id)
|
||||
.bind(i.id)
|
||||
.bind(i.host)
|
||||
.bind(i.parent)
|
||||
.bind(i.timestamp as i64) // throwing away some data, but i64 is still big in terms of time
|
||||
.bind(&i.version)
|
||||
.bind(&i.tag)
|
||||
.bind(&i.data.data)
|
||||
.bind(&i.data.content_encryption_key)
|
||||
.bind(user.id)
|
||||
.execute(&mut tx)
|
||||
.await
|
||||
.map_err(fix_error)?;
|
||||
}
|
||||
|
||||
tx.commit().await.map_err(fix_error)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[instrument(skip_all)]
|
||||
async fn next_records(
|
||||
&self,
|
||||
user: &User,
|
||||
host: HostId,
|
||||
tag: String,
|
||||
start: Option<RecordId>,
|
||||
count: u64,
|
||||
) -> DbResult<Vec<Record<EncryptedData>>> {
|
||||
tracing::debug!("{:?} - {:?} - {:?}", host, tag, start);
|
||||
let mut ret = Vec::with_capacity(count as usize);
|
||||
let mut parent = start;
|
||||
|
||||
// yeah let's do something better
|
||||
for _ in 0..count {
|
||||
// a very much not ideal query. but it's simple at least?
|
||||
// we are basically using postgres as a kv store here, so... maybe consider using an actual
|
||||
// kv store?
|
||||
let record: Result<DbRecord, DbError> = sqlx::query_as(
|
||||
"select client_id, host, parent, timestamp, version, tag, data, cek from records
|
||||
where user_id = $1
|
||||
and tag = $2
|
||||
and host = $3
|
||||
and parent is not distinct from $4",
|
||||
)
|
||||
.bind(user.id)
|
||||
.bind(tag.clone())
|
||||
.bind(host)
|
||||
.bind(parent)
|
||||
.fetch_one(&self.pool)
|
||||
.await
|
||||
.map_err(fix_error);
|
||||
|
||||
match record {
|
||||
Ok(record) => {
|
||||
let record: Record<EncryptedData> = record.into();
|
||||
ret.push(record.clone());
|
||||
|
||||
parent = Some(record.id);
|
||||
}
|
||||
Err(DbError::NotFound) => {
|
||||
tracing::debug!("hit tail of store: {:?}/{}", host, tag);
|
||||
return Ok(ret);
|
||||
}
|
||||
Err(e) => return Err(e),
|
||||
}
|
||||
}
|
||||
|
||||
Ok(ret)
|
||||
}
|
||||
|
||||
async fn tail_records(&self, user: &User) -> DbResult<RecordIndex> {
|
||||
const TAIL_RECORDS_SQL: &str = "select host, tag, client_id from records rp where (select count(1) from records where parent=rp.client_id and user_id = $1) = 0;";
|
||||
|
||||
let res = sqlx::query_as(TAIL_RECORDS_SQL)
|
||||
.bind(user.id)
|
||||
.fetch(&self.pool)
|
||||
.try_collect()
|
||||
.await
|
||||
.map_err(fix_error)?;
|
||||
|
||||
Ok(res)
|
||||
}
|
||||
}
|
||||
|
@ -1,10 +1,12 @@
|
||||
use ::sqlx::{FromRow, Result};
|
||||
use atuin_common::record::{EncryptedData, Record};
|
||||
use atuin_server_database::models::{History, Session, User};
|
||||
use sqlx::{postgres::PgRow, Row};
|
||||
|
||||
pub struct DbUser(pub User);
|
||||
pub struct DbSession(pub Session);
|
||||
pub struct DbHistory(pub History);
|
||||
pub struct DbRecord(pub Record<EncryptedData>);
|
||||
|
||||
impl<'a> FromRow<'a, PgRow> for DbUser {
|
||||
fn from_row(row: &'a PgRow) -> Result<Self> {
|
||||
@ -40,3 +42,30 @@ impl<'a> ::sqlx::FromRow<'a, PgRow> for DbHistory {
|
||||
}))
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> ::sqlx::FromRow<'a, PgRow> for DbRecord {
|
||||
fn from_row(row: &'a PgRow) -> ::sqlx::Result<Self> {
|
||||
let timestamp: i64 = row.try_get("timestamp")?;
|
||||
|
||||
let data = EncryptedData {
|
||||
data: row.try_get("data")?,
|
||||
content_encryption_key: row.try_get("cek")?,
|
||||
};
|
||||
|
||||
Ok(Self(Record {
|
||||
id: row.try_get("client_id")?,
|
||||
host: row.try_get("host")?,
|
||||
parent: row.try_get("parent")?,
|
||||
timestamp: timestamp as u64,
|
||||
version: row.try_get("version")?,
|
||||
tag: row.try_get("tag")?,
|
||||
data,
|
||||
}))
|
||||
}
|
||||
}
|
||||
|
||||
impl From<DbRecord> for Record<EncryptedData> {
|
||||
fn from(other: DbRecord) -> Record<EncryptedData> {
|
||||
Record { ..other.0 }
|
||||
}
|
||||
}
|
||||
|
Reference in New Issue
Block a user