mirror of
https://github.com/atuinsh/atuin.git
synced 2024-11-22 08:13:57 +01:00
fix: add acquire timeout to sqlite database connection (#1590)
* fix: add acquire timeout to sqlite database connection This should fix #1503 I wasn't able to trigger enough IO pressure for the SQL connection to be a problem. This adds `local_timeout` to the client config. This is a float, and represents the number of seconds (units in line with the other timeouts, though those are ints). Users may well want to reduce this if they regularly have issues, but by default I think 2s is fine and avoids a non-responsive system in bad situations. * tests
This commit is contained in:
parent
10f465da8f
commit
8899ce5089
@ -134,6 +134,12 @@ enter_accept = true
|
||||
## the specified one.
|
||||
# keymap_mode = "auto"
|
||||
|
||||
# network_connect_timeout = 5
|
||||
# network_timeout = 5
|
||||
|
||||
## Timeout (in seconds) for acquiring a local database connection (sqlite)
|
||||
# local_timeout = 5
|
||||
|
||||
#[stats]
|
||||
# Set commands where we should consider the subcommand for statistics. Eg, kubectl get vs just kubectl
|
||||
#common_subcommands = [
|
||||
|
@ -2,6 +2,7 @@ use std::{
|
||||
env,
|
||||
path::{Path, PathBuf},
|
||||
str::FromStr,
|
||||
time::Duration,
|
||||
};
|
||||
|
||||
use async_trait::async_trait;
|
||||
@ -123,7 +124,7 @@ pub struct Sqlite {
|
||||
}
|
||||
|
||||
impl Sqlite {
|
||||
pub async fn new(path: impl AsRef<Path>) -> Result<Self> {
|
||||
pub async fn new(path: impl AsRef<Path>, timeout: f64) -> Result<Self> {
|
||||
let path = path.as_ref();
|
||||
debug!("opening sqlite database at {:?}", path);
|
||||
|
||||
@ -138,7 +139,10 @@ impl Sqlite {
|
||||
.journal_mode(SqliteJournalMode::Wal)
|
||||
.create_if_missing(true);
|
||||
|
||||
let pool = SqlitePoolOptions::new().connect_with(opts).await?;
|
||||
let pool = SqlitePoolOptions::new()
|
||||
.acquire_timeout(Duration::from_secs_f64(timeout))
|
||||
.connect_with(opts)
|
||||
.await?;
|
||||
|
||||
Self::setup_db(&pool).await?;
|
||||
|
||||
@ -786,7 +790,7 @@ mod test {
|
||||
|
||||
#[tokio::test(flavor = "multi_thread")]
|
||||
async fn test_search_prefix() {
|
||||
let mut db = Sqlite::new("sqlite::memory:").await.unwrap();
|
||||
let mut db = Sqlite::new("sqlite::memory:", 0.1).await.unwrap();
|
||||
new_history_item(&mut db, "ls /home/ellie").await.unwrap();
|
||||
|
||||
assert_search_eq(&db, SearchMode::Prefix, FilterMode::Global, "ls", 1)
|
||||
@ -802,7 +806,7 @@ mod test {
|
||||
|
||||
#[tokio::test(flavor = "multi_thread")]
|
||||
async fn test_search_fulltext() {
|
||||
let mut db = Sqlite::new("sqlite::memory:").await.unwrap();
|
||||
let mut db = Sqlite::new("sqlite::memory:", 0.1).await.unwrap();
|
||||
new_history_item(&mut db, "ls /home/ellie").await.unwrap();
|
||||
|
||||
assert_search_eq(&db, SearchMode::FullText, FilterMode::Global, "ls", 1)
|
||||
@ -818,7 +822,7 @@ mod test {
|
||||
|
||||
#[tokio::test(flavor = "multi_thread")]
|
||||
async fn test_search_fuzzy() {
|
||||
let mut db = Sqlite::new("sqlite::memory:").await.unwrap();
|
||||
let mut db = Sqlite::new("sqlite::memory:", 0.1).await.unwrap();
|
||||
new_history_item(&mut db, "ls /home/ellie").await.unwrap();
|
||||
new_history_item(&mut db, "ls /home/frank").await.unwrap();
|
||||
new_history_item(&mut db, "cd /home/Ellie").await.unwrap();
|
||||
@ -908,7 +912,7 @@ mod test {
|
||||
|
||||
#[tokio::test(flavor = "multi_thread")]
|
||||
async fn test_search_reordered_fuzzy() {
|
||||
let mut db = Sqlite::new("sqlite::memory:").await.unwrap();
|
||||
let mut db = Sqlite::new("sqlite::memory:", 0.1).await.unwrap();
|
||||
// test ordering of results: we should choose the first, even though it happened longer ago.
|
||||
|
||||
new_history_item(&mut db, "curl").await.unwrap();
|
||||
@ -942,7 +946,7 @@ mod test {
|
||||
git_root: None,
|
||||
};
|
||||
|
||||
let mut db = Sqlite::new("sqlite::memory:").await.unwrap();
|
||||
let mut db = Sqlite::new("sqlite::memory:", 0.1).await.unwrap();
|
||||
for _i in 1..10000 {
|
||||
new_history_item(&mut db, "i am a duplicated command")
|
||||
.await
|
||||
|
@ -221,7 +221,7 @@ mod tests {
|
||||
|
||||
#[tokio::test]
|
||||
async fn build_kv() {
|
||||
let mut store = SqliteStore::new(":memory:").await.unwrap();
|
||||
let mut store = SqliteStore::new(":memory:", 0.1).await.unwrap();
|
||||
let kv = KvStore::new();
|
||||
let key: [u8; 32] = XSalsa20Poly1305::generate_key(&mut OsRng).into();
|
||||
let host_id = atuin_common::record::HostId(atuin_common::utils::uuid_v7());
|
||||
|
@ -2,8 +2,8 @@
|
||||
// Multiple stores of multiple types are all stored in one chonky table (for now), and we just index
|
||||
// by tag/host
|
||||
|
||||
use std::path::Path;
|
||||
use std::str::FromStr;
|
||||
use std::{path::Path, time::Duration};
|
||||
|
||||
use async_trait::async_trait;
|
||||
use eyre::{eyre, Result};
|
||||
@ -27,7 +27,7 @@ pub struct SqliteStore {
|
||||
}
|
||||
|
||||
impl SqliteStore {
|
||||
pub async fn new(path: impl AsRef<Path>) -> Result<Self> {
|
||||
pub async fn new(path: impl AsRef<Path>, timeout: f64) -> Result<Self> {
|
||||
let path = path.as_ref();
|
||||
|
||||
debug!("opening sqlite database at {:?}", path);
|
||||
@ -44,7 +44,10 @@ impl SqliteStore {
|
||||
.foreign_keys(true)
|
||||
.create_if_missing(true);
|
||||
|
||||
let pool = SqlitePoolOptions::new().connect_with(opts).await?;
|
||||
let pool = SqlitePoolOptions::new()
|
||||
.acquire_timeout(Duration::from_secs_f64(timeout))
|
||||
.connect_with(opts)
|
||||
.await?;
|
||||
|
||||
Self::setup_db(&pool).await?;
|
||||
|
||||
@ -261,7 +264,7 @@ mod tests {
|
||||
|
||||
#[tokio::test]
|
||||
async fn create_db() {
|
||||
let db = SqliteStore::new(":memory:").await;
|
||||
let db = SqliteStore::new(":memory:", 0.1).await;
|
||||
|
||||
assert!(
|
||||
db.is_ok(),
|
||||
@ -272,7 +275,7 @@ mod tests {
|
||||
|
||||
#[tokio::test]
|
||||
async fn push_record() {
|
||||
let db = SqliteStore::new(":memory:").await.unwrap();
|
||||
let db = SqliteStore::new(":memory:", 0.1).await.unwrap();
|
||||
let record = test_record();
|
||||
|
||||
db.push(&record).await.expect("failed to insert record");
|
||||
@ -280,7 +283,7 @@ mod tests {
|
||||
|
||||
#[tokio::test]
|
||||
async fn get_record() {
|
||||
let db = SqliteStore::new(":memory:").await.unwrap();
|
||||
let db = SqliteStore::new(":memory:", 0.1).await.unwrap();
|
||||
let record = test_record();
|
||||
db.push(&record).await.unwrap();
|
||||
|
||||
@ -291,7 +294,7 @@ mod tests {
|
||||
|
||||
#[tokio::test]
|
||||
async fn last() {
|
||||
let db = SqliteStore::new(":memory:").await.unwrap();
|
||||
let db = SqliteStore::new(":memory:", 0.1).await.unwrap();
|
||||
let record = test_record();
|
||||
db.push(&record).await.unwrap();
|
||||
|
||||
@ -309,7 +312,7 @@ mod tests {
|
||||
|
||||
#[tokio::test]
|
||||
async fn first() {
|
||||
let db = SqliteStore::new(":memory:").await.unwrap();
|
||||
let db = SqliteStore::new(":memory:", 0.1).await.unwrap();
|
||||
let record = test_record();
|
||||
db.push(&record).await.unwrap();
|
||||
|
||||
@ -327,7 +330,7 @@ mod tests {
|
||||
|
||||
#[tokio::test]
|
||||
async fn len() {
|
||||
let db = SqliteStore::new(":memory:").await.unwrap();
|
||||
let db = SqliteStore::new(":memory:", 0.1).await.unwrap();
|
||||
let record = test_record();
|
||||
db.push(&record).await.unwrap();
|
||||
|
||||
@ -341,7 +344,7 @@ mod tests {
|
||||
|
||||
#[tokio::test]
|
||||
async fn len_different_tags() {
|
||||
let db = SqliteStore::new(":memory:").await.unwrap();
|
||||
let db = SqliteStore::new(":memory:", 0.1).await.unwrap();
|
||||
|
||||
// these have different tags, so the len should be the same
|
||||
// we model multiple stores within one database
|
||||
@ -361,7 +364,7 @@ mod tests {
|
||||
|
||||
#[tokio::test]
|
||||
async fn append_a_bunch() {
|
||||
let db = SqliteStore::new(":memory:").await.unwrap();
|
||||
let db = SqliteStore::new(":memory:", 0.1).await.unwrap();
|
||||
|
||||
let mut tail = test_record();
|
||||
db.push(&tail).await.expect("failed to push record");
|
||||
@ -380,7 +383,7 @@ mod tests {
|
||||
|
||||
#[tokio::test]
|
||||
async fn append_a_big_bunch() {
|
||||
let db = SqliteStore::new(":memory:").await.unwrap();
|
||||
let db = SqliteStore::new(":memory:", 0.1).await.unwrap();
|
||||
|
||||
let mut records: Vec<Record<EncryptedData>> = Vec::with_capacity(10000);
|
||||
|
||||
|
@ -317,10 +317,10 @@ mod tests {
|
||||
local_records: Vec<Record<EncryptedData>>,
|
||||
remote_records: Vec<Record<EncryptedData>>,
|
||||
) -> (SqliteStore, Vec<Diff>) {
|
||||
let local_store = SqliteStore::new(":memory:")
|
||||
let local_store = SqliteStore::new(":memory:", 0.1)
|
||||
.await
|
||||
.expect("failed to open in memory sqlite");
|
||||
let remote_store = SqliteStore::new(":memory:")
|
||||
let remote_store = SqliteStore::new(":memory:", 0.1)
|
||||
.await
|
||||
.expect("failed to open in memory sqlite"); // "remote"
|
||||
|
||||
|
@ -244,6 +244,7 @@ pub struct Settings {
|
||||
|
||||
pub network_connect_timeout: u64,
|
||||
pub network_timeout: u64,
|
||||
pub local_timeout: f64,
|
||||
pub enter_accept: bool,
|
||||
|
||||
#[serde(default)]
|
||||
@ -456,6 +457,7 @@ impl Settings {
|
||||
.set_default("secrets_filter", true)?
|
||||
.set_default("network_connect_timeout", 5)?
|
||||
.set_default("network_timeout", 30)?
|
||||
.set_default("local_timeout", 2.0)?
|
||||
// enter_accept defaults to false here, but true in the default config file. The dissonance is
|
||||
// intentional!
|
||||
// Existing users will get the default "False", so we don't mess with any potential
|
||||
|
@ -82,8 +82,8 @@ impl Cmd {
|
||||
let db_path = PathBuf::from(settings.db_path.as_str());
|
||||
let record_store_path = PathBuf::from(settings.record_store_path.as_str());
|
||||
|
||||
let db = Sqlite::new(db_path).await?;
|
||||
let sqlite_store = SqliteStore::new(record_store_path).await?;
|
||||
let db = Sqlite::new(db_path, settings.local_timeout).await?;
|
||||
let sqlite_store = SqliteStore::new(record_store_path, settings.local_timeout).await?;
|
||||
|
||||
match self {
|
||||
Self::History(history) => history.run(&settings, &db, sqlite_store).await,
|
||||
|
Loading…
Reference in New Issue
Block a user