mirror of
https://github.com/atuinsh/atuin.git
synced 2025-08-09 07:15:12 +02:00
chore: move crates into crates/ dir (#1958)
I'd like to tidy up the root a little, and it's nice to have all the rust crates in one place
This commit is contained in:
@ -0,0 +1,11 @@
|
||||
create table history (
|
||||
id bigserial primary key,
|
||||
client_id text not null unique, -- the client-generated ID
|
||||
user_id bigserial not null, -- allow multiple users
|
||||
hostname text not null, -- a unique identifier from the client (can be hashed, random, whatever)
|
||||
timestamp timestamp not null, -- one of the few non-encrypted metadatas
|
||||
|
||||
data varchar(8192) not null, -- store the actual history data, encrypted. I don't wanna know!
|
||||
|
||||
created_at timestamp not null default current_timestamp
|
||||
);
|
@ -0,0 +1,10 @@
|
||||
create table users (
|
||||
id bigserial primary key, -- also store our own ID
|
||||
username varchar(32) not null unique, -- being able to contact users is useful
|
||||
email varchar(128) not null unique, -- being able to contact users is useful
|
||||
password varchar(128) not null unique
|
||||
);
|
||||
|
||||
-- the prior index is case sensitive :(
|
||||
CREATE UNIQUE INDEX email_unique_idx on users (LOWER(email));
|
||||
CREATE UNIQUE INDEX username_unique_idx on users (LOWER(username));
|
@ -0,0 +1,6 @@
|
||||
-- Add migration script here
|
||||
create table sessions (
|
||||
id bigserial primary key,
|
||||
user_id bigserial,
|
||||
token varchar(128) unique not null
|
||||
);
|
@ -0,0 +1,51 @@
|
||||
-- Prior to this, the count endpoint was super naive and just ran COUNT(1).
|
||||
-- This is slow asf. Now that we have an amount of actual traffic,
|
||||
-- stop doing that!
|
||||
-- This basically maintains a count, so we can read ONE row, instead of ALL the
|
||||
-- rows. Much better.
|
||||
-- Future optimisation could use some sort of cache so we don't even need to hit
|
||||
-- postgres at all.
|
||||
|
||||
create table total_history_count_user(
|
||||
id bigserial primary key,
|
||||
user_id bigserial,
|
||||
total integer -- try and avoid using keywords - hence total, not count
|
||||
);
|
||||
|
||||
create or replace function user_history_count()
|
||||
returns trigger as
|
||||
$func$
|
||||
begin
|
||||
if (TG_OP='INSERT') then
|
||||
update total_history_count_user set total = total + 1 where user_id = new.user_id;
|
||||
|
||||
if not found then
|
||||
insert into total_history_count_user(user_id, total)
|
||||
values (
|
||||
new.user_id,
|
||||
(select count(1) from history where user_id = new.user_id)
|
||||
);
|
||||
end if;
|
||||
|
||||
elsif (TG_OP='DELETE') then
|
||||
update total_history_count_user set total = total - 1 where user_id = new.user_id;
|
||||
|
||||
if not found then
|
||||
insert into total_history_count_user(user_id, total)
|
||||
values (
|
||||
new.user_id,
|
||||
(select count(1) from history where user_id = new.user_id)
|
||||
);
|
||||
end if;
|
||||
end if;
|
||||
|
||||
return NEW; -- this is actually ignored for an after trigger, but oh well
|
||||
end;
|
||||
$func$
|
||||
language plpgsql volatile -- pldfplplpflh
|
||||
cost 100; -- default value
|
||||
|
||||
create trigger tg_user_history_count
|
||||
after insert or delete on history
|
||||
for each row
|
||||
execute procedure user_history_count();
|
@ -0,0 +1,35 @@
|
||||
-- the old version of this function used NEW in the delete part when it should
|
||||
-- use OLD
|
||||
|
||||
create or replace function user_history_count()
|
||||
returns trigger as
|
||||
$func$
|
||||
begin
|
||||
if (TG_OP='INSERT') then
|
||||
update total_history_count_user set total = total + 1 where user_id = new.user_id;
|
||||
|
||||
if not found then
|
||||
insert into total_history_count_user(user_id, total)
|
||||
values (
|
||||
new.user_id,
|
||||
(select count(1) from history where user_id = new.user_id)
|
||||
);
|
||||
end if;
|
||||
|
||||
elsif (TG_OP='DELETE') then
|
||||
update total_history_count_user set total = total - 1 where user_id = old.user_id;
|
||||
|
||||
if not found then
|
||||
insert into total_history_count_user(user_id, total)
|
||||
values (
|
||||
old.user_id,
|
||||
(select count(1) from history where user_id = old.user_id)
|
||||
);
|
||||
end if;
|
||||
end if;
|
||||
|
||||
return NEW; -- this is actually ignored for an after trigger, but oh well
|
||||
end;
|
||||
$func$
|
||||
language plpgsql volatile -- pldfplplpflh
|
||||
cost 100; -- default value
|
@ -0,0 +1,3 @@
|
||||
-- Make it 4x larger. Most commands are less than this, but as it's base64
|
||||
-- SOME are more than 8192. Should be enough for now.
|
||||
ALTER TABLE history ALTER COLUMN data TYPE varchar(32768);
|
@ -0,0 +1 @@
|
||||
alter table users add column created_at timestamp not null default now();
|
@ -0,0 +1,14 @@
|
||||
create type event_type as enum ('create', 'delete');
|
||||
|
||||
create table events (
|
||||
id bigserial primary key,
|
||||
client_id text not null unique, -- the client-generated ID
|
||||
user_id bigserial not null, -- allow multiple users
|
||||
hostname text not null, -- a unique identifier from the client (can be hashed, random, whatever)
|
||||
timestamp timestamp not null, -- one of the few non-encrypted metadatas
|
||||
|
||||
event_type event_type,
|
||||
data text not null, -- store the actual history data, encrypted. I don't wanna know!
|
||||
|
||||
created_at timestamp not null default current_timestamp
|
||||
);
|
@ -0,0 +1,2 @@
|
||||
-- Add migration script here
|
||||
alter table history alter column data type text;
|
@ -0,0 +1,2 @@
|
||||
-- Add migration script here
|
||||
drop table events;
|
@ -0,0 +1,5 @@
|
||||
-- Add migration script here
|
||||
alter table history add column if not exists deleted_at timestamp;
|
||||
|
||||
-- queries will all be selecting the ids of history for a user, that has been deleted
|
||||
create index if not exists history_deleted_index on history(client_id, user_id, deleted_at);
|
@ -0,0 +1,30 @@
|
||||
-- We do not need to run the trigger on deletes, as the only time we are deleting history is when the user
|
||||
-- has already been deleted
|
||||
-- This actually slows down deleting all the history a good bit!
|
||||
|
||||
create or replace function user_history_count()
|
||||
returns trigger as
|
||||
$func$
|
||||
begin
|
||||
if (TG_OP='INSERT') then
|
||||
update total_history_count_user set total = total + 1 where user_id = new.user_id;
|
||||
|
||||
if not found then
|
||||
insert into total_history_count_user(user_id, total)
|
||||
values (
|
||||
new.user_id,
|
||||
(select count(1) from history where user_id = new.user_id)
|
||||
);
|
||||
end if;
|
||||
end if;
|
||||
|
||||
return NEW; -- this is actually ignored for an after trigger, but oh well
|
||||
end;
|
||||
$func$
|
||||
language plpgsql volatile -- pldfplplpflh
|
||||
cost 100; -- default value
|
||||
|
||||
create or replace trigger tg_user_history_count
|
||||
after insert on history
|
||||
for each row
|
||||
execute procedure user_history_count();
|
@ -0,0 +1,15 @@
|
||||
-- Add migration script here
|
||||
create table records (
|
||||
id uuid primary key, -- remember to use uuidv7 for happy indices <3
|
||||
client_id uuid not null, -- I am too uncomfortable with the idea of a client-generated primary key
|
||||
host uuid not null, -- a unique identifier for the host
|
||||
parent uuid default null, -- the ID of the parent record, bearing in mind this is a linked list
|
||||
timestamp bigint not null, -- not a timestamp type, as those do not have nanosecond precision
|
||||
version text not null,
|
||||
tag text not null, -- what is this? history, kv, whatever. Remember clients get a log per tag per host
|
||||
data text not null, -- store the actual history data, encrypted. I don't wanna know!
|
||||
cek text not null,
|
||||
|
||||
user_id bigint not null, -- allow multiple users
|
||||
created_at timestamp not null default current_timestamp
|
||||
);
|
@ -0,0 +1,15 @@
|
||||
-- Add migration script here
|
||||
create table store (
|
||||
id uuid primary key, -- remember to use uuidv7 for happy indices <3
|
||||
client_id uuid not null, -- I am too uncomfortable with the idea of a client-generated primary key, even though it's fine mathematically
|
||||
host uuid not null, -- a unique identifier for the host
|
||||
idx bigint not null, -- the index of the record in this store, identified by (host, tag)
|
||||
timestamp bigint not null, -- not a timestamp type, as those do not have nanosecond precision
|
||||
version text not null,
|
||||
tag text not null, -- what is this? history, kv, whatever. Remember clients get a log per tag per host
|
||||
data text not null, -- store the actual history data, encrypted. I don't wanna know!
|
||||
cek text not null,
|
||||
|
||||
user_id bigint not null, -- allow multiple users
|
||||
created_at timestamp not null default current_timestamp
|
||||
);
|
@ -0,0 +1,2 @@
|
||||
-- Add migration script here
|
||||
create unique index record_uniq ON store(user_id, host, tag, idx);
|
@ -0,0 +1,4 @@
|
||||
-- Add migration script here
|
||||
alter table history alter column user_id drop default;
|
||||
alter table sessions alter column user_id drop default;
|
||||
alter table total_history_count_user alter column user_id drop default;
|
Reference in New Issue
Block a user