Initial website work

This commit is contained in:
Ellie Huxtable
2022-05-31 09:59:46 +01:00
parent 9ac0c60cc4
commit 956e4fe0a3
138 changed files with 13266 additions and 11038 deletions

11
.editorconfig Normal file
View File

@@ -0,0 +1,11 @@
# editorconfig.org
root = true
[*]
indent_style = space
indent_size = 2
end_of_line = lf
charset = utf-8
trim_trailing_whitespace = true
insert_final_newline = true

4
.eslintignore Normal file
View File

@@ -0,0 +1,4 @@
assets/js/index.js
assets/js/katex.js
assets/js/vendor
node_modules

31
.eslintrc.json Normal file
View File

@@ -0,0 +1,31 @@
{
"env": {
"browser": true,
"commonjs": true,
"es6": true,
"node": true
},
"extends": "eslint:recommended",
"globals": {
"Atomics": "readonly",
"SharedArrayBuffer": "readonly"
},
"parserOptions": {
"ecmaVersion": 2018,
"sourceType": "module"
},
"rules": {
"no-console": 0,
"quotes": ["error", "single"],
"comma-dangle": [
"error",
{
"arrays": "always-multiline",
"objects": "always-multiline",
"imports": "always-multiline",
"exports": "always-multiline",
"functions": "ignore"
}
]
}
}

10
.gitignore vendored
View File

@@ -1,5 +1,5 @@
/target
*/target
.env
.idea/
.vscode/
node_modules
public
resources
.netlify
.hugo_build.lock

11
.markdownlint-cli2.jsonc Normal file
View File

@@ -0,0 +1,11 @@
{
"config": {
"default": true,
"MD013": false,
"MD024": false,
"MD026": false,
"MD033": false,
"MD034": false
},
"ignores": ["node_modules", "CHANGELOG.md", "README.md"]
}

3
.stylelintignore Normal file
View File

@@ -0,0 +1,3 @@
assets/scss/components/_syntax.scss
assets/scss/vendor
node_modules

48
.stylelintrc.json Normal file
View File

@@ -0,0 +1,48 @@
{
"extends": "stylelint-config-standard-scss",
"rules": {
"no-empty-source": null,
"string-quotes": "double",
"scss/comment-no-empty": null,
"max-line-length": null,
"scss/at-extend-no-missing-placeholder": null,
"scss/dollar-variable-colon-space-after": null,
"scss/dollar-variable-empty-line-before": null,
"color-function-notation": null,
"alpha-value-notation": null,
"selector-id-pattern": null,
"selector-class-pattern": null,
"scss/no-global-function-names": null,
"number-max-precision": null,
"hue-degree-notation": null,
"value-no-vendor-prefix": null,
"property-no-vendor-prefix": null,
"at-rule-no-unknown": [
true,
{
"ignoreAtRules": [
"extend",
"at-root",
"debug",
"warn",
"error",
"if",
"else",
"for",
"each",
"while",
"mixin",
"include",
"content",
"return",
"function",
"tailwind",
"apply",
"responsive",
"variants",
"screen"
]
}
]
}
}

View File

@@ -1,141 +1,103 @@
# Changelog
### Changelog
All notable changes to this project will be documented in this file.
All notable changes to this project will be documented in this file. Dates are displayed in UTC.
## [Unreleased]
Generated by [`auto-changelog`](https://github.com/CookPete/auto-changelog).
## [0.8.1] - 2022-04-12
#### [v0.4.3](https://github.com/h-enk/doks-child-theme/compare/v0.4.2...v0.4.3)
f861893 Update to clap 3.1.x (#289)
e8f7aac Add compact mode (#288)
1e04c4c Add rust-version to Cargo.toml (#287)
222e52b Update Dockerfile
fae118a Improve fuzzy search (#279)
7cde55a Add code of conduct (#281)
d270798 Update config-rs (#280)
3248883 Update README.md
7f58741 Fix `history list --cwd` errors (#278)
e117b62 Update fish bindings. (#265)
4223ac6 Restore bash 4.2 compatibility, only add hook once (#271)
7651f89 Add support for blesh (#267)
c2dd332 fix: get install.sh working on UbuntuWSL (#260)
84403a3 Bump reqwest from 0.11.7 to 0.11.9 (#261)
5005cf7 Bump serde_json from 1.0.73 to 1.0.75 (#262)
7fa3e1c Do not crash if the history timestamp is in the future (#250)
8d21506 use sqlite grouping rather than subquery (#181)
d36ff13 Replace dpkg with apt (#248)
- deps: bump versions to latest [`8ee2cb6`](https://github.com/h-enk/doks-child-theme/commit/8ee2cb6b8f2a7b38562f266c7db9c028a24c4523)
## [0.8.0] - 2021-12-17
#### [v0.4.2](https://github.com/h-enk/doks-child-theme/compare/v0.4.1...v0.4.2)
1339711 Add Alt+backspace and Ctrl+u keybinds for deleting by word and by line, respectively (#243)
059e858 remove unused environment var loading entire history into an env var (#242)
079d803 Enable help messages for command line arguments (#239)
87df7d8 Fish importing (#234)
6daaeb2 Bump serde_json from 1.0.64 to 1.0.72 (#219)
2df7428 Bump itertools from 0.10.1 to 0.10.3 (#236)
d81e452 Bump tui from 0.15.0 to 0.16.0 (#225)
0abd063 Support generating shell completions (#235)
28f78ba Update messages in install.sh about the AUR packages (#231)
b549095 Update install.sh to use `pacman` on Arch Linux (#229)
e242f89 Update installation instructions for Arch Linux (#228)
cd3af87 Bump sqlx from 0.5.5 to 0.5.7 (#210)
c8f60b2 fix: resolve some issues with install.sh (#188)
4bdf4c4 feat: login/register no longer blocking (#216)
9e6746a Remove dev dep with wildcard (#224)
> 1 April 2022
## [0.7.2] - 2021-12-08
- deps: bump @hyas/doks from 0.4.1 to 0.4.2 [`eda0e36`](https://github.com/h-enk/doks-child-theme/commit/eda0e360262707c7baa1af2b7d373865d5e534fd)
- meta: update changelog [`4e8f1a8`](https://github.com/h-enk/doks-child-theme/commit/4e8f1a8e51292c4010ee48a1f94b22db441d0998)
- meta: update sponsors + backers sections [`788e1e3`](https://github.com/h-enk/doks-child-theme/commit/788e1e3ab5b5447b4464018a0959e3cd1338a486)
6e8ec868 chore: improve build times (#213)
f2c1922e Bump itertools from 0.10.0 to 0.10.1 (#146)
e2c06052 Bump rmp-serde from 0.15.4 to 0.15.5 (#149)
d579b55d Bump rand from 0.8.3 to 0.8.4 (#152)
f539f60a chore: add more eyre contexts (#200)
2e59d6a5 Bump reqwest from 0.11.3 to 0.11.6 (#192)
e89de3f7 chore: supply pre-build docker image (#199)
07c06825 Bump tokio from 1.6.1 to 1.14.0 (#205)
46a1dab1 fix: dockerfile with correct glibc (#198)
8f91b141 chore: some new linting (#201)
27d3d81a feat: allow input of credentials from stdin (#185)
446ffb88 Resolve clippy warnings (#187)
2024884f Reordered fuzzy search (#179)
1babb41e Update README.md
0b9dc669 Add fuzzy text search mode (#142)
f0130571 Bump indicatif from 0.16.0 to 0.16.2 (#140)
cc7ce093 Bump sqlx from 0.5.2 to 0.5.5 (#139)
f8c80429 Bump tokio from 1.6.0 to 1.6.1 (#141)
802a2258 Bump tokio from 1.5.0 to 1.6.0 (#132)
4d52c5e8 Bump urlencoding from 1.3.1 to 1.3.3 (#133)
87c9f61e Bump serde from 1.0.125 to 1.0.126 (#124)
9303f482 Bump urlencoding from 1.1.1 to 1.3.1 (#125)
cb7d656c instructions to install without tap (#127)
f55d5cf0 Ignore commands beginning with a space, resolve #114 (#123)
a127408e run shellcheck (#97)
f041d7fe Adding plugin for zsh (#117)
fd90bd34 Fix doc links in sync.md (#115)
477c6852 Elementary Linux add as supported (#113)
#### [v0.4.1](https://github.com/h-enk/doks-child-theme/compare/v0.4.0...v0.4.1)
## [0.7.1] - 2021-05-10
> 31 March 2022
Very minor patch release
- deps: bump versions to latest [`a85f35d`](https://github.com/h-enk/doks-child-theme/commit/a85f35dc3fe0ca5a3080d4fe7d567f7e8863566e)
- ops: add continuous integration + stale workflows [`8134f51`](https://github.com/h-enk/doks-child-theme/commit/8134f518b7873be39ff2cd6eb3472bf681fa3390)
- feat: add files for custom scss + js [`12bf689`](https://github.com/h-enk/doks-child-theme/commit/12bf689da853e90ed51f25b8103e7beca68c23b0)
### Added
#### [v0.4.0](https://github.com/h-enk/doks-child-theme/compare/v0.3.5...v0.4.0)
### Changed
> 4 February 2022
### Deprecated
- deps: bump versions to latest [`fe5bf72`](https://github.com/h-enk/doks-child-theme/commit/fe5bf729e6e6754f3b6957b5d6c657c84326d070)
- fix: comment out mount content [`477ed68`](https://github.com/h-enk/doks-child-theme/commit/477ed686336826296a8b8e83c90af45b7a904bf4)
- config: update for doks v0.4.0 [`d00d595`](https://github.com/h-enk/doks-child-theme/commit/d00d595bc6add83780cf577d58099bbcecfbbe40)
### Removed
#### [v0.3.5](https://github.com/h-enk/doks-child-theme/compare/v0.3.4...v0.3.5)
### Fixed
> 5 October 2021
- Fix the atuin-common build (#107)
- chore(deps-dev): bump bootstrap from 5.1.0 to 5.1.1 [`#64`](https://github.com/h-enk/doks-child-theme/pull/64)
- chore(deps-dev): bump @babel/preset-env from 7.15.4 to 7.15.6 [`#65`](https://github.com/h-enk/doks-child-theme/pull/65)
- chore(deps-dev): bump autoprefixer from 10.3.4 to 10.3.7 [`#71`](https://github.com/h-enk/doks-child-theme/pull/71)
- feat: update for doks 0.3.5 [`3782cf5`](https://github.com/h-enk/doks-child-theme/commit/3782cf57ed43acadc426305dc64764048b78138a)
- meta: update changelog [`36436ff`](https://github.com/h-enk/doks-child-theme/commit/36436fff6fd1ba412fdc991f4acca35a2835d09c)
- meta: bump version doks-child-theme to 0.3.5 [`57eae76`](https://github.com/h-enk/doks-child-theme/commit/57eae76694755ab1b06691a3c5427f224b806661)
### Security
#### [v0.3.4](https://github.com/h-enk/doks-child-theme/compare/v0.3.3...v0.3.4)
## [0.7.0] - 2021-05-10
> 7 September 2021
Thank you so much to everyone that started contributing to Atuin for this release!
- feat: update for doks 0.3.4 [`b08cb80`](https://github.com/h-enk/doks-child-theme/commit/b08cb80fadc09f7ad7e7e960d09dc482666fa108)
- deps: bump versions to latest [`a790bbe`](https://github.com/h-enk/doks-child-theme/commit/a790bbe7fa9ac52d15270339bf1ec24be385a2a8)
- Create FUNDING.yml [`8613d4c`](https://github.com/h-enk/doks-child-theme/commit/8613d4caad869c0f1f80b5610f6e49b766935541)
- [@yuvipanda](https://github.com/yuvipanda)
- [@Sciencentistguy](https://github.com/Sciencentistguy)
- [@bl-ue](https://github.com/bl-ue)
- [@ElvishJerricco](https://github.com/ElvishJerricco)
- [@avinassh](https://github.com/avinassh)
- [@ismith](https://github.com/ismith)
- [@thedrow](https://github.com/thedrow)
#### [v0.3.3](https://github.com/h-enk/doks-child-theme/compare/v0.3.0...v0.3.3)
And a special thank you to [@conradludgate](https://github.com/conradludgate) for his ongoing contributions :)
> 5 July 2021
### Added
- feat: update for doks v0.3.3 [`f7b5720`](https://github.com/h-enk/doks-child-theme/commit/f7b57204c9de70f14e17337a8baa44815beb2b50)
- content: update readme [`14f1a3f`](https://github.com/h-enk/doks-child-theme/commit/14f1a3fede35e7f4a88d8ea063416958c2900a56)
- Update for doks v0.3.3 [`babdd77`](https://github.com/h-enk/doks-child-theme/commit/babdd7791e6002fd2272b1bc3c95c58654933454)
- Ctrl-C to exit (#53)
- Ctrl-D to exit (#65)
- Add option to not automatically bind keys (#62)
- Add importer for Resh history (#69)
- Retain the query entered if no results are found (#76)
- Support full-text querying (#75)
- Allow listing or searching with only the command as output (#89)
- Emacs-style ctrl-g, ctrl-n, ctrl-p (#77)
- `atuin logout` (#91)
- "quick access" to earlier commands via <kbd>Alt-N</kbd> (#79)
#### [v0.3.0](https://github.com/h-enk/doks-child-theme/compare/v0.2.3...v0.3.0)
### Changed
> 25 June 2021
- CI build caching (#49)
- Use an enum for dialect (#80)
- Generic importer trait (#71)
- Increased optimisation for release builds (#101)
- Shellcheck fixes for bash file (#81)
- Some general cleanup, bugfixes, and refactoring (#83, #90, #48)
- feat: update for doks v0.3.0 [`630c2a2`](https://github.com/h-enk/doks-child-theme/commit/630c2a2edd246f3fc26fbb799d2debb77857882c)
- fix: add data directory [`69cdc3c`](https://github.com/h-enk/doks-child-theme/commit/69cdc3cccea6a976552e654a86a85475a19ef448)
### Deprecated
#### [v0.2.3](https://github.com/h-enk/doks-child-theme/compare/v0.2.2...v0.2.3)
### Removed
> 2 April 2021
### Fixed
- feat: update for doks v0.2.3 [`e517462`](https://github.com/h-enk/doks-child-theme/commit/e517462127252a50d78cf34180ea10f898993585)
- chore(release): 0.2.3 [`1fcbc4e`](https://github.com/h-enk/doks-child-theme/commit/1fcbc4e2cb297f780004582612d904920044b181)
- Ubuntu install (#46)
- Bash integration (#88)
- Newline when editing shell RC files (#60)
#### v0.2.2
### Security
> 26 March 2021
- feat: add doks as a node module [`cfed05e`](https://github.com/h-enk/doks-child-theme/commit/cfed05efaf7b4191b2bdca4c91405c6cabc8396c)
- deps: bump versions to latest [`997a7dd`](https://github.com/h-enk/doks-child-theme/commit/997a7dd7250b3dc0fe23c92ebf83ed21c9ba2d6b)
- config: update for doks v0.2.2 [`03f51d5`](https://github.com/h-enk/doks-child-theme/commit/03f51d5fd1f66f7afa0957d92adf779d438a3946)
<!-- auto-changelog-above -->
### [0.2.3](https://github.com/h-enk/doks/compare/v0.2.2...v0.2.3) (2021-04-02)
### Features
* update for doks v0.2.3 ([e517462](https://github.com/h-enk/doks/commit/e517462127252a50d78cf34180ea10f898993585))
### 0.2.2 (2021-03-26)
### Features
* add doks as a node module ([cfed05e](https://github.com/h-enk/doks/commit/cfed05efaf7b4191b2bdca4c91405c6cabc8396c))
### Dependencies
* bump versions to latest ([997a7dd](https://github.com/h-enk/doks/commit/997a7dd7250b3dc0fe23c92ebf83ed21c9ba2d6b))

View File

@@ -2,127 +2,75 @@
## Our Pledge
We as members, contributors, and leaders pledge to make participation in our
community a harassment-free experience for everyone, regardless of age, body
size, visible or invisible disability, ethnicity, sex characteristics, gender
identity and expression, level of experience, education, socio-economic status,
nationality, personal appearance, race, religion, or sexual identity
and orientation.
We pledge to act and interact in ways that contribute to an open, welcoming,
diverse, inclusive, and healthy community.
In the interest of fostering an open and welcoming environment, we as
contributors and maintainers pledge to making participation in our project and
our community a harassment-free experience for everyone, regardless of age, body
size, disability, ethnicity, sex characteristics, gender identity and expression,
level of experience, education, socio-economic status, nationality, personal
appearance, race, religion, or sexual identity and orientation.
## Our Standards
Examples of behavior that contributes to a positive environment for our
community include:
Examples of behavior that contributes to creating a positive environment
include:
* Demonstrating empathy and kindness toward other people
* Being respectful of differing opinions, viewpoints, and experiences
* Giving and gracefully accepting constructive feedback
* Accepting responsibility and apologizing to those affected by our mistakes,
and learning from the experience
* Focusing on what is best not just for us as individuals, but for the
overall community
* Using welcoming and inclusive language
* Being respectful of differing viewpoints and experiences
* Gracefully accepting constructive criticism
* Focusing on what is best for the community
* Showing empathy towards other community members
Examples of unacceptable behavior include:
Examples of unacceptable behavior by participants include:
* The use of sexualized language or imagery, and sexual attention or
advances of any kind
* Trolling, insulting or derogatory comments, and personal or political attacks
* The use of sexualized language or imagery and unwelcome sexual attention or
advances
* Trolling, insulting/derogatory comments, and personal or political attacks
* Public or private harassment
* Publishing others' private information, such as a physical or email
address, without their explicit permission
* Publishing others' private information, such as a physical or electronic
address, without explicit permission
* Other conduct which could reasonably be considered inappropriate in a
professional setting
professional setting
## Enforcement Responsibilities
## Our Responsibilities
Community leaders are responsible for clarifying and enforcing our standards of
acceptable behavior and will take appropriate and fair corrective action in
response to any behavior that they deem inappropriate, threatening, offensive,
or harmful.
Project maintainers are responsible for clarifying the standards of acceptable
behavior and are expected to take appropriate and fair corrective action in
response to any instances of unacceptable behavior.
Community leaders have the right and responsibility to remove, edit, or reject
comments, commits, code, wiki edits, issues, and other contributions that are
not aligned to this Code of Conduct, and will communicate reasons for moderation
decisions when appropriate.
Project maintainers have the right and responsibility to remove, edit, or
reject comments, commits, code, wiki edits, issues, and other contributions
that are not aligned to this Code of Conduct, or to ban temporarily or
permanently any contributor for other behaviors that they deem inappropriate,
threatening, offensive, or harmful.
## Scope
This Code of Conduct applies within all community spaces, and also applies when
an individual is officially representing the community in public spaces.
Examples of representing our community include using an official e-mail address,
posting via an official social media account, or acting as an appointed
representative at an online or offline event.
This Code of Conduct applies both within project spaces and in public spaces
when an individual is representing the project or its community. Examples of
representing a project or community include using an official project e-mail
address, posting via an official social media account, or acting as an appointed
representative at an online or offline event. Representation of a project may be
further defined and clarified by project maintainers.
## Enforcement
Instances of abusive, harassing, or otherwise unacceptable behavior may be
reported to the community leaders responsible for enforcement at
ellie@elliehuxtable.com.
All complaints will be reviewed and investigated promptly and fairly.
reported by contacting the project team at hello@getdoks.org. All
complaints will be reviewed and investigated and will result in a response that
is deemed necessary and appropriate to the circumstances. The project team is
obligated to maintain confidentiality with regard to the reporter of an incident.
Further details of specific enforcement policies may be posted separately.
All community leaders are obligated to respect the privacy and security of the
reporter of any incident.
## Enforcement Guidelines
Community leaders will follow these Community Impact Guidelines in determining
the consequences for any action they deem in violation of this Code of Conduct:
### 1. Correction
**Community Impact**: Use of inappropriate language or other behavior deemed
unprofessional or unwelcome in the community.
**Consequence**: A private, written warning from community leaders, providing
clarity around the nature of the violation and an explanation of why the
behavior was inappropriate. A public apology may be requested.
### 2. Warning
**Community Impact**: A violation through a single incident or series
of actions.
**Consequence**: A warning with consequences for continued behavior. No
interaction with the people involved, including unsolicited interaction with
those enforcing the Code of Conduct, for a specified period of time. This
includes avoiding interactions in community spaces as well as external channels
like social media. Violating these terms may lead to a temporary or
permanent ban.
### 3. Temporary Ban
**Community Impact**: A serious violation of community standards, including
sustained inappropriate behavior.
**Consequence**: A temporary ban from any sort of interaction or public
communication with the community for a specified period of time. No public or
private interaction with the people involved, including unsolicited interaction
with those enforcing the Code of Conduct, is allowed during this period.
Violating these terms may lead to a permanent ban.
### 4. Permanent Ban
**Community Impact**: Demonstrating a pattern of violation of community
standards, including sustained inappropriate behavior, harassment of an
individual, or aggression toward or disparagement of classes of individuals.
**Consequence**: A permanent ban from any sort of public interaction within
the community.
Project maintainers who do not follow or enforce the Code of Conduct in good
faith may face temporary or permanent repercussions as determined by other
members of the project's leadership.
## Attribution
This Code of Conduct is adapted from the [Contributor Covenant][homepage],
version 2.0, available at
https://www.contributor-covenant.org/version/2/0/code_of_conduct.html.
Community Impact Guidelines were inspired by [Mozilla's code of conduct
enforcement ladder](https://github.com/mozilla/diversity).
This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4,
available at https://www.contributor-covenant.org/version/1/4/code-of-conduct.html
[homepage]: https://www.contributor-covenant.org
For answers to common questions about this code of conduct, see the FAQ at
https://www.contributor-covenant.org/faq. Translations are available at
https://www.contributor-covenant.org/translations.
For answers to common questions about this code of conduct, see
https://www.contributor-covenant.org/faq

2567
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -1,80 +0,0 @@
[package]
name = "atuin"
version = "0.9.1"
authors = ["Ellie Huxtable <ellie@elliehuxtable.com>"]
edition = "2018"
rust-version = "1.59"
license = "MIT"
description = "atuin - magical shell history"
homepage = "https://atuin.sh"
repository = "https://github.com/ellie/atuin"
readme = "README.md"
[package.metadata.deb]
maintainer = "Ellie Huxtable <ellie@elliehuxtable.com>"
copyright = "2021, Ellie Huxtable <ellie@elliehuxtable.com>"
license-file = ["LICENSE"]
depends = "$auto"
section = "utility"
[package.metadata.rpm]
package = "atuin"
[package.metadata.rpm.cargo]
buildflags = ["--release"]
[package.metadata.rpm.targets]
atuin = { path = "/usr/bin/atuin" }
[workspace]
members = ["./atuin-client", "./atuin-server", "./atuin-common"]
[features]
# TODO(conradludgate)
# Currently, this keeps the same default built behaviour for v0.8
# We should rethink this by the time we hit a new breaking change
default = ["client", "sync", "server"]
client = ["atuin-client"]
sync = ["atuin-client/sync"]
server = ["atuin-server", "tracing-subscriber"]
[dependencies]
atuin-server = { path = "atuin-server", version = "0.9.1", optional = true }
atuin-client = { path = "atuin-client", version = "0.9.1", optional = true, default-features = false }
atuin-common = { path = "atuin-common", version = "0.9.1" }
log = "0.4"
pretty_env_logger = "0.4"
chrono = { version = "0.4", features = ["serde"] }
eyre = "0.6"
directories = "4"
indicatif = "0.16.2"
serde = { version = "1.0.137", features = ["derive"] }
serde_json = "1.0.81"
tui = { version = "0.18", default-features = false, features = ["termion"] }
termion = "1.5"
unicode-width = "0.1"
itertools = "0.10.3"
tokio = { version = "1", features = ["full"] }
async-trait = "0.1.49"
chrono-english = "0.1.4"
cli-table = { version = "0.4", default-features = false }
base64 = "0.13.0"
humantime = "2.1.0"
crossbeam-channel = "0.5.1"
clap = { version = "3.1.18", features = ["derive"] }
clap_complete = "3.1.4"
fs-err = "2.7"
whoami = "1.1.2"
rpassword = "6.0"
[dependencies.tracing-subscriber]
version = "0.3"
default-features = false
features = [
"ansi",
"fmt",
"registry",
"env-filter",
]
optional = true

View File

@@ -1,27 +0,0 @@
FROM lukemathwalker/cargo-chef:latest-rust-1.59 AS chef
WORKDIR app
FROM chef AS planner
COPY . .
RUN cargo chef prepare --recipe-path recipe.json
FROM chef AS builder
# Ensure working C compile setup (not installed by default in arm64 images)
RUN apt update && apt install build-essential -y
COPY --from=planner /app/recipe.json recipe.json
RUN cargo chef cook --release --recipe-path recipe.json
COPY . .
RUN cargo build --release --bin atuin
FROM debian:bullseye-20211011-slim AS runtime
WORKDIR app
ENV TZ=Etc/UTC
ENV RUST_LOG=atuin::api=info
ENV ATUIN_CONFIG_DIR=/config
COPY --from=builder /app/target/release/atuin /usr/local/bin
ENTRYPOINT ["/usr/local/bin/atuin"]

View File

@@ -1,6 +1,7 @@
MIT License
Copyright (c) 2021 Ellie Huxtable
Copyright (c) 2018-present, Gridsome
Copyright (c) 2020-present, Henk Verlinde
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
@@ -18,4 +19,4 @@ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
SOFTWARE.

289
README.md
View File

@@ -1,223 +1,140 @@
<p align="center">
<img height="250" src="https://user-images.githubusercontent.com/53315310/167610618-284491ac-c5d3-4957-9e4b-604bb97e23e6.png"/>
<a href="https://getdoks.org/">
<img alt="Doks" src="https://doks.netlify.app/logo-doks.svg" width="60">
</a>
</p>
<h1 align="center">
Doks
</h1>
<h3 align="center">
Doks Child Theme
</h3>
<p align="center">
Doks is a Hugo theme for building secure, fast, and SEO-ready documentation websites, which you can easily update and customize.
</p>
<p align="center">
<em>magical shell history</em>
<a href="https://github.com/h-enk/doks-child-theme/blob/master/LICENSE">
<img src="https://img.shields.io/github/license/h-enk/doks-child-theme?style=flat-square" alt="GitHub">
</a>
<a href="https://github.com/h-enk/doks-child-theme/releases">
<img src="https://img.shields.io/github/v/release/h-enk/doks-child-theme?include_prereleases&style=flat-square"alt="GitHub release (latest SemVer including pre-releases)">
</a>
<a href="https://github.com/h-enk/doks-child-theme/actions/workflows/codeql-analysis.yml">
<img src="https://img.shields.io/github/workflow/status/h-enk/doks-child-theme/CodeQL/master?style=flat-square" alt="GitHub Workflow Status (branch)">
</a>
<a href="https://app.netlify.com/sites/hyas-child-theme/deploys">
<img src="https://img.shields.io/netlify/75395a37-8537-4410-a8c3-d56bf27ec963?style=flat-square" alt="Netlify">
</a>
</p>
<hr/>
![Doks — Modern Documentation Theme](https://raw.githubusercontent.com/h-enk/doks/master/images/tn.png)
<p align="center">
<a href="https://github.com/ellie/atuin/actions?query=workflow%3ARust"><img src="https://img.shields.io/github/workflow/status/ellie/atuin/Rust?style=flat-square" /></a>
<a href="https://crates.io/crates/atuin"><img src="https://img.shields.io/crates/v/atuin.svg?style=flat-square" /></a>
<a href="https://crates.io/crates/atuin"><img src="https://img.shields.io/crates/d/atuin.svg?style=flat-square" /></a>
<a href="https://github.com/ellie/atuin/blob/main/LICENSE"><img src="https://img.shields.io/crates/l/atuin.svg?style=flat-square" /></a>
<a href="https://discord.gg/Fq8bJSKPHh"><img src="https://img.shields.io/discord/954121165239115808" /></a>
</p>
## Demo
- [doks-child-theme.netlify.app](https://doks-child-theme.netlify.app/)
[English] | [简体中文]
## Why Doks?
Nine main reasons why you should use Doks:
Atuin replaces your existing shell history with a SQLite database, and records
additional context for your commands. Additionally, it provides optional and
_fully encrypted_ synchronisation of your history between machines, via an Atuin
server.
1. __Security aware__. Get A+ scores on [Mozilla Observatory](https://observatory.mozilla.org/analyze/doks.netlify.app) out of the box. Easily change the default Security Headers to suit your needs.
2. __Fast by default__. Get 100 scores on [Google Lighthouse](https://googlechrome.github.io/lighthouse/viewer/?gist=7731347bb8ce999eff7428a8e763b637) by default. Doks removes unused css, prefetches links, and lazy loads images.
3. __SEO-ready__. Use sensible defaults for structured data, open graph, and Twitter cards. Or easily change the SEO settings to your liking.
4. __Development tools__. Code with confidence. Check styles, scripts, and markdown for errors and fix automatically or manually.
<p align="center">
<img src="demo.gif" alt="animated" width="80%" />
</p>
5. __Bootstrap framework__. Build robust, flexible, and intuitive websites with Bootstrap 5. Easily customize your Doks site with the source Sass files.
<p align="center">
<em>exit code, duration, time and command shown</em>
</p>
6. __Netlify-ready__. Deploy to Netlify with sensible defaults. Easily use Netlify Functions, Netlify Redirects, and Netlify Headers.
7. __Full text search__. Search your Doks site with FlexSearch. Easily customize index settings and search options to your liking.
8. __Page layouts__. Build pages with a landing page, blog, or documentation layout. Add custom sections and components to suit your needs.
9. __Dark mode__. Switch to a low-light UI with the click of a button. Change colors with variables to match your branding.
### Other features
As well as the search UI, it can do things like this:
- __Multilingual and i18n__ support
- __Versioning__ documentation support
- __KaTeX__ math typesetting
- __Mermaid__ diagrams and visualization
- __highlight.js__ syntax highlighting
```
# search for all successful `make` commands, recorded after 3pm yesterday
atuin search --exit 0 --after "yesterday 3pm" make
## Requirements
Doks uses npm to centralize dependency management, making it [easy to update](https://getdoks.org/docs/help/how-to-update/) resources, build tooling, plugins, and build scripts:
- Download and install [Node.js](https://nodejs.org/) (it includes npm) for your platform.
## Get started
Start a new Doks project in three steps:
### 1. Create a new site
Doks is available as a child theme, and a starter theme:
- Use the Doks child theme, if you do __not__ plan to customize a lot, and/or need future Doks updates.
- Use the Doks starter theme, if you plan to customize a lot, and/or do __not__ need future Doks updates.
Not quite sure? Use the Doks child theme.
#### Doks child theme
```bash
git clone https://github.com/h-enk/doks-child-theme.git my-doks-site && cd my-doks-site
```
You may use either the server I host, or host your own! Or just don't use sync
at all. As all history sync is encrypted, I couldn't access your data even if
I wanted to. And I **really** don't want to.
#### Doks starter theme
## Features
```bash
git clone https://github.com/h-enk/doks.git my-doks-site && cd my-doks-site
```
- rebind `up` and `ctrl-r` with a full screen history search UI
- store shell history in a sqlite database
- backup and sync **encrypted** shell history
- the same history across terminals, across sessions, and across machines
- log exit code, cwd, hostname, session, command duration, etc
- calculate statistics such as "most used command"
- old history file is not replaced
- quick-jump to previous items with <kbd>Alt-\<num\></kbd>
- switch filter modes via ctrl-r; search history just from the current session, directory, or globally
### 2. Install dependencies
```bash
npm install
```
### 3. Start development server
```bash
npm run start
```
## Other commands
Doks comes with [commands](https://getdoks.org/docs/prologue/commands/) for common tasks.
## Documentation
- [Quickstart](#quickstart)
- [Install](#install)
- [Import](docs/import.md)
- [Configuration](docs/config.md)
- [Searching history](docs/search.md)
- [Cloud history sync](docs/sync.md)
- [History stats](docs/stats.md)
- [Running your own server](docs/server.md)
- [Key binding](docs/key-binding.md)
- [Shell completions](docs/shell-completions.md)
- [Netlify](https://docs.netlify.com/)
- [Hugo](https://gohugo.io/documentation/)
- [Doks](https://getdoks.org/)
## Supported Shells
## Communities
- zsh
- bash
- fish
## Community
- [Netlify Community](https://community.netlify.com/)
- [Hugo Forums](https://discourse.gohugo.io/)
- [Doks Discussions](https://github.com/h-enk/doks/discussions)
Atuin has a community Discord, available [here](https://discord.gg/Fq8bJSKPHh)
## Sponsors
# Quickstart
## With the default sync server
This will sign you up for the default sync server, hosted by me. Everything is end-to-end encrypted, so your secrets are safe!
Read more below for offline-only usage, or for hosting your own server.
Support this project by becoming a sponsor. Your logo will show up here with a link to your website.
```
bash <(curl https://raw.githubusercontent.com/ellie/atuin/main/install.sh)
[![OC sponsor 0](https://opencollective.com/doks/tiers/sponsor/0/avatar.svg)](https://opencollective.com/doks/tiers/sponsor/0/website)
[![OC sponsor 1](https://opencollective.com/doks/tiers/sponsor/1/avatar.svg)](https://opencollective.com/doks/tiers/sponsor/1/website)
atuin register -u <USERNAME> -e <EMAIL> -p <PASSWORD>
atuin import auto
atuin sync
```
## Offline only (no sync)
```
bash <(curl https://raw.githubusercontent.com/ellie/atuin/main/install.sh)
atuin import auto
```
## Backers
## Install
Support this project by becoming a backer. Your avatar will show up here.
### Script (recommended)
The install script will help you through the setup, ensuring your shell is
properly configured. It will also use one of the below methods, preferring the
system package manager where possible (pacman, homebrew, etc etc).
```
# do not run this as root, root will be asked for if required
bash <(curl https://raw.githubusercontent.com/ellie/atuin/main/install.sh)
```
### With cargo
It's best to use [rustup](https://rustup.rs/) to get setup with a Rust
toolchain, then you can run:
```
cargo install atuin
```
And then follow [the shell setup](#shell-plugin)
### Homebrew
```
brew install atuin
```
And then follow [the shell setup](#shell-plugin)
### MacPorts
Atuin is also available in [MacPorts](https://ports.macports.org/port/atuin/)
```
sudo port install atuin
```
And then follow [the shell setup](#shell-plugin)
### Pacman
Atuin is available in the Arch Linux [community repository](https://archlinux.org/packages/community/x86_64/atuin/):
```
pacman -S atuin
```
And then follow [the shell setup](#shell-plugin)
### From source
```
git clone https://github.com/ellie/atuin.git
cd atuin
cargo install --path .
```
And then follow [the shell setup](#shell-plugin)
## Shell plugin
Once the binary is installed, the shell plugin requires installing. If you use
the install script, this should all be done for you!
### zsh
```
echo 'eval "$(atuin init zsh)"' >> ~/.zshrc
```
Or using a plugin manager:
```
zinit load ellie/atuin
```
### bash
We need to setup some hooks, so first install bash-preexec:
```
curl https://raw.githubusercontent.com/rcaloras/bash-preexec/master/bash-preexec.sh -o ~/.bash-preexec.sh
echo '[[ -f ~/.bash-preexec.sh ]] && source ~/.bash-preexec.sh' >> ~/.bashrc
```
Then setup Atuin
```
echo 'eval "$(atuin init bash)"' >> ~/.bashrc
```
### fish
Add
```
atuin init fish | source
```
to your `is-interactive` block in your `~/.config/fish/config.fish` file
## ...what's with the name?
Atuin is named after "The Great A'Tuin", a giant turtle from Terry Pratchett's
Discworld series of books.
[English]: ./README.md
[简体中文]: ./docs/zh-CN/README.md
[![Backers](https://opencollective.com/doks/tiers/backer.svg)](https://opencollective.com/doks)

7
SECURITY.md Normal file
View File

@@ -0,0 +1,7 @@
# Reporting Security Issues
The Doks team and community take security issues in Doks seriously. We appreciate your efforts to responsibly disclose your findings, and will make every effort to acknowledge your contributions.
To report a security issue, email [security@getdoks.org](mailto:security@getdoks.org) and include the word "SECURITY" in the subject line.
We'll endeavor to respond quickly, and will keep you updated throughout the process.

1
assets/js/app.js Normal file
View File

@@ -0,0 +1 @@
/** Custom scripts */

38
assets/scss/app.scss Normal file
View File

@@ -0,0 +1,38 @@
/** Import Bootstrap functions */
@import "bootstrap/scss/functions";
/** Import theme variables */
@import "common/variables";
/** Import Bootstrap */
@import "bootstrap/scss/bootstrap";
/** Import highlight.js */
// @import "highlight.js/scss/github-dark-dimmed";
/** Import KaTeX */
@import "katex/dist/katex";
/** Import theme styles */
@import "common/fonts";
@import "common/global";
@import "common/dark";
@import "components/alerts";
@import "components/buttons";
@import "components/code";
@import "components/details";
@import "components/syntax";
@import "components/comments";
@import "components/forms";
@import "components/images";
@import "components/mermaid";
@import "components/search";
@import "components/tables";
@import "layouts/footer";
@import "layouts/header";
@import "layouts/pages";
@import "layouts/posts";
@import "layouts/sidebar";
/** Import custom styles */
@import "common/custom";

View File

@@ -0,0 +1 @@
/** Custom styles */

View File

@@ -1,68 +0,0 @@
[package]
name = "atuin-client"
version = "0.9.1"
authors = ["Ellie Huxtable <ellie@elliehuxtable.com>"]
edition = "2018"
license = "MIT"
description = "client library for atuin"
homepage = "https://atuin.sh"
repository = "https://github.com/ellie/atuin"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[features]
default = ["sync"]
sync = [
"urlencoding",
"sodiumoxide",
"reqwest",
"sha2",
"hex",
"rmp-serde",
"base64",
]
[dependencies]
atuin-common = { path = "../atuin-common", version = "0.9.1" }
log = "0.4"
chrono = { version = "0.4", features = ["serde"] }
eyre = "0.6"
directories = "4"
uuid = { version = "1.0", features = ["v4"] }
whoami = "1.1.2"
chrono-english = "0.1.4"
config = { version = "0.13", default-features = false, features = ["toml"] }
serde = { version = "1.0.137", features = ["derive"] }
serde_json = "1.0.81"
parse_duration = "2.1.1"
async-trait = "0.1.49"
itertools = "0.10.3"
shellexpand = "2"
sqlx = { version = "0.5", features = [
"runtime-tokio-rustls",
"chrono",
"sqlite",
] }
minspan = "0.1.1"
regex = "1.5.4"
fs-err = "2.7"
sql-builder = "3"
lazy_static = "1"
memchr = "2.5"
# sync
urlencoding = { version = "2.1.0", optional = true }
sodiumoxide = { version = "0.2.6", optional = true }
reqwest = { version = "0.11", features = [
"json",
"rustls-tls",
], default-features = false, optional = true }
hex = { version = "0.4", optional = true }
sha2 = { version = "0.10", optional = true }
rmp-serde = { version = "1.0.0", optional = true }
base64 = { version = "0.13.0", optional = true }
tokio = { version = "1", features = ["full"] }
[dev-dependencies]
tokio = { version = "1", features = ["full"] }

View File

@@ -1,28 +0,0 @@
## where to store your database, default is your system data directory
## mac: ~/Library/Application Support/com.elliehuxtable.atuin/history.db
## linux: ~/.local/share/atuin/history.db
# db_path = "~/.history.db"
## where to store your encryption key, default is your system data directory
# key_path = "~/.key"
## where to store your auth session token, default is your system data directory
# session_path = "~/.key"
## date format used, either "us" or "uk"
# dialect = "uk"
## enable or disable automatic sync
# auto_sync = true
## how often to sync history. note that this is only triggered when a command
## is ran, so sync intervals may well be longer
## set it to 0 to sync after every command
# sync_frequency = "5m"
## address of the sync server
# sync_address = "https://api.atuin.sh"
## which search mode to use
## possible values: prefix, fulltext, fuzzy
# search_mode = "prefix"

View File

@@ -1,16 +0,0 @@
-- Add migration script here
create table if not exists history (
id text primary key,
timestamp integer not null,
duration integer not null,
exit integer not null,
command text not null,
cwd text not null,
session text not null,
hostname text not null,
unique(timestamp, cwd, command)
);
create index if not exists idx_history_timestamp on history(timestamp);
create index if not exists idx_history_command on history(command);

View File

@@ -1,157 +0,0 @@
use std::collections::HashMap;
use chrono::Utc;
use eyre::{bail, Result};
use reqwest::{
header::{HeaderMap, AUTHORIZATION, USER_AGENT},
StatusCode, Url,
};
use sodiumoxide::crypto::secretbox;
use atuin_common::api::{
AddHistoryRequest, CountResponse, LoginRequest, LoginResponse, RegisterResponse,
SyncHistoryResponse,
};
use crate::{
encryption::{decode_key, decrypt},
history::History,
sync::hash_str,
};
static APP_USER_AGENT: &str = concat!("atuin/", env!("CARGO_PKG_VERSION"),);
// TODO: remove all references to the encryption key from this
// It should be handled *elsewhere*
pub struct Client<'a> {
sync_addr: &'a str,
key: secretbox::Key,
client: reqwest::Client,
}
pub async fn register(
address: &str,
username: &str,
email: &str,
password: &str,
) -> Result<RegisterResponse> {
let mut map = HashMap::new();
map.insert("username", username);
map.insert("email", email);
map.insert("password", password);
let url = format!("{}/user/{}", address, username);
let resp = reqwest::get(url).await?;
if resp.status().is_success() {
bail!("username already in use");
}
let url = format!("{}/register", address);
let client = reqwest::Client::new();
let resp = client
.post(url)
.header(USER_AGENT, APP_USER_AGENT)
.json(&map)
.send()
.await?;
if !resp.status().is_success() {
bail!("failed to register user");
}
let session = resp.json::<RegisterResponse>().await?;
Ok(session)
}
pub async fn login(address: &str, req: LoginRequest) -> Result<LoginResponse> {
let url = format!("{}/login", address);
let client = reqwest::Client::new();
let resp = client
.post(url)
.header(USER_AGENT, APP_USER_AGENT)
.json(&req)
.send()
.await?;
if resp.status() != reqwest::StatusCode::OK {
bail!("invalid login details");
}
let session = resp.json::<LoginResponse>().await?;
Ok(session)
}
impl<'a> Client<'a> {
pub fn new(sync_addr: &'a str, session_token: &'a str, key: String) -> Result<Self> {
let mut headers = HeaderMap::new();
headers.insert(AUTHORIZATION, format!("Token {}", session_token).parse()?);
Ok(Client {
sync_addr,
key: decode_key(key)?,
client: reqwest::Client::builder()
.user_agent(APP_USER_AGENT)
.default_headers(headers)
.build()?,
})
}
pub async fn count(&self) -> Result<i64> {
let url = format!("{}/sync/count", self.sync_addr);
let url = Url::parse(url.as_str())?;
let resp = self.client.get(url).send().await?;
if resp.status() != StatusCode::OK {
bail!("failed to get count (are you logged in?)");
}
let count = resp.json::<CountResponse>().await?;
Ok(count.count)
}
pub async fn get_history(
&self,
sync_ts: chrono::DateTime<Utc>,
history_ts: chrono::DateTime<Utc>,
host: Option<String>,
) -> Result<Vec<History>> {
let host = match host {
None => hash_str(&format!("{}:{}", whoami::hostname(), whoami::username())),
Some(h) => h,
};
let url = format!(
"{}/sync/history?sync_ts={}&history_ts={}&host={}",
self.sync_addr,
urlencoding::encode(sync_ts.to_rfc3339().as_str()),
urlencoding::encode(history_ts.to_rfc3339().as_str()),
host,
);
let resp = self.client.get(url).send().await?;
let history = resp.json::<SyncHistoryResponse>().await?;
let history = history
.history
.iter()
.map(|h| serde_json::from_str(h).expect("invalid base64"))
.map(|h| decrypt(&h, &self.key).expect("failed to decrypt history! check your key"))
.collect();
Ok(history)
}
pub async fn post_history(&self, history: &[AddHistoryRequest]) -> Result<()> {
let url = format!("{}/history", self.sync_addr);
let url = Url::parse(url.as_str())?;
self.client.post(url).json(history).send().await?;
Ok(())
}
}

View File

@@ -1,678 +0,0 @@
use std::{env, path::Path, str::FromStr};
use async_trait::async_trait;
use chrono::{prelude::*, Utc};
use fs_err as fs;
use itertools::Itertools;
use lazy_static::lazy_static;
use regex::Regex;
use sql_builder::{esc, quote, SqlBuilder, SqlName};
use sqlx::{
sqlite::{SqliteConnectOptions, SqliteJournalMode, SqlitePool, SqlitePoolOptions, SqliteRow},
Result, Row,
};
use super::{
history::History,
ordering,
settings::{FilterMode, SearchMode},
};
pub struct Context {
session: String,
cwd: String,
hostname: String,
}
pub fn current_context() -> Context {
let session =
env::var("ATUIN_SESSION").expect("failed to find ATUIN_SESSION - check your shell setup");
let hostname = format!("{}:{}", whoami::hostname(), whoami::username());
let cwd = match env::current_dir() {
Ok(dir) => dir.display().to_string(),
Err(_) => String::from(""),
};
Context {
session,
hostname,
cwd,
}
}
#[async_trait]
pub trait Database: Send + Sync {
async fn save(&mut self, h: &History) -> Result<()>;
async fn save_bulk(&mut self, h: &[History]) -> Result<()>;
async fn load(&self, id: &str) -> Result<History>;
async fn list(
&self,
filter: FilterMode,
context: &Context,
max: Option<usize>,
unique: bool,
) -> Result<Vec<History>>;
async fn range(
&self,
from: chrono::DateTime<Utc>,
to: chrono::DateTime<Utc>,
) -> Result<Vec<History>>;
async fn update(&self, h: &History) -> Result<()>;
async fn history_count(&self) -> Result<i64>;
async fn first(&self) -> Result<History>;
async fn last(&self) -> Result<History>;
async fn before(&self, timestamp: chrono::DateTime<Utc>, count: i64) -> Result<Vec<History>>;
async fn search(
&self,
limit: Option<i64>,
search_mode: SearchMode,
filter: FilterMode,
context: &Context,
query: &str,
) -> Result<Vec<History>>;
async fn query_history(&self, query: &str) -> Result<Vec<History>>;
}
// Intended for use on a developer machine and not a sync server.
// TODO: implement IntoIterator
pub struct Sqlite {
pool: SqlitePool,
}
impl Sqlite {
pub async fn new(path: impl AsRef<Path>) -> Result<Self> {
let path = path.as_ref();
debug!("opening sqlite database at {:?}", path);
let create = !path.exists();
if create {
if let Some(dir) = path.parent() {
fs::create_dir_all(dir)?;
}
}
let opts = SqliteConnectOptions::from_str(path.as_os_str().to_str().unwrap())?
.journal_mode(SqliteJournalMode::Wal)
.create_if_missing(true);
let pool = SqlitePoolOptions::new().connect_with(opts).await?;
Self::setup_db(&pool).await?;
Ok(Self { pool })
}
async fn setup_db(pool: &SqlitePool) -> Result<()> {
debug!("running sqlite database setup");
sqlx::migrate!("./migrations").run(pool).await?;
Ok(())
}
async fn save_raw(tx: &mut sqlx::Transaction<'_, sqlx::Sqlite>, h: &History) -> Result<()> {
sqlx::query(
"insert or ignore into history(id, timestamp, duration, exit, command, cwd, session, hostname)
values(?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8)",
)
.bind(h.id.as_str())
.bind(h.timestamp.timestamp_nanos())
.bind(h.duration)
.bind(h.exit)
.bind(h.command.as_str())
.bind(h.cwd.as_str())
.bind(h.session.as_str())
.bind(h.hostname.as_str())
.execute(tx)
.await?;
Ok(())
}
fn query_history(row: SqliteRow) -> History {
History {
id: row.get("id"),
timestamp: Utc.timestamp_nanos(row.get("timestamp")),
duration: row.get("duration"),
exit: row.get("exit"),
command: row.get("command"),
cwd: row.get("cwd"),
session: row.get("session"),
hostname: row.get("hostname"),
}
}
}
#[async_trait]
impl Database for Sqlite {
async fn save(&mut self, h: &History) -> Result<()> {
debug!("saving history to sqlite");
let mut tx = self.pool.begin().await?;
Self::save_raw(&mut tx, h).await?;
tx.commit().await?;
Ok(())
}
async fn save_bulk(&mut self, h: &[History]) -> Result<()> {
debug!("saving history to sqlite");
let mut tx = self.pool.begin().await?;
for i in h {
Self::save_raw(&mut tx, i).await?
}
tx.commit().await?;
Ok(())
}
async fn load(&self, id: &str) -> Result<History> {
debug!("loading history item {}", id);
let res = sqlx::query("select * from history where id = ?1")
.bind(id)
.map(Self::query_history)
.fetch_one(&self.pool)
.await?;
Ok(res)
}
async fn update(&self, h: &History) -> Result<()> {
debug!("updating sqlite history");
sqlx::query(
"update history
set timestamp = ?2, duration = ?3, exit = ?4, command = ?5, cwd = ?6, session = ?7, hostname = ?8
where id = ?1",
)
.bind(h.id.as_str())
.bind(h.timestamp.timestamp_nanos())
.bind(h.duration)
.bind(h.exit)
.bind(h.command.as_str())
.bind(h.cwd.as_str())
.bind(h.session.as_str())
.bind(h.hostname.as_str())
.execute(&self.pool)
.await?;
Ok(())
}
// make a unique list, that only shows the *newest* version of things
async fn list(
&self,
filter: FilterMode,
context: &Context,
max: Option<usize>,
unique: bool,
) -> Result<Vec<History>> {
debug!("listing history");
let mut query = SqlBuilder::select_from(SqlName::new("history").alias("h").baquoted());
query.field("*").order_desc("timestamp");
match filter {
FilterMode::Global => &mut query,
FilterMode::Host => query.and_where_eq("hostname", quote(&context.hostname)),
FilterMode::Session => query.and_where_eq("session", quote(&context.session)),
FilterMode::Directory => query.and_where_eq("cwd", quote(&context.cwd)),
};
if unique {
query.and_where_eq(
"timestamp",
"(select max(timestamp) from history where h.command = history.command)",
);
}
if let Some(max) = max {
query.limit(max);
}
let query = query.sql().expect("bug in list query. please report");
let res = sqlx::query(&query)
.map(Self::query_history)
.fetch_all(&self.pool)
.await?;
Ok(res)
}
async fn range(
&self,
from: chrono::DateTime<Utc>,
to: chrono::DateTime<Utc>,
) -> Result<Vec<History>> {
debug!("listing history from {:?} to {:?}", from, to);
let res = sqlx::query(
"select * from history where timestamp >= ?1 and timestamp <= ?2 order by timestamp asc",
)
.bind(from.timestamp_nanos())
.bind(to.timestamp_nanos())
.map(Self::query_history)
.fetch_all(&self.pool)
.await?;
Ok(res)
}
async fn first(&self) -> Result<History> {
let res =
sqlx::query("select * from history where duration >= 0 order by timestamp asc limit 1")
.map(Self::query_history)
.fetch_one(&self.pool)
.await?;
Ok(res)
}
async fn last(&self) -> Result<History> {
let res = sqlx::query(
"select * from history where duration >= 0 order by timestamp desc limit 1",
)
.map(Self::query_history)
.fetch_one(&self.pool)
.await?;
Ok(res)
}
async fn before(&self, timestamp: chrono::DateTime<Utc>, count: i64) -> Result<Vec<History>> {
let res = sqlx::query(
"select * from history where timestamp < ?1 order by timestamp desc limit ?2",
)
.bind(timestamp.timestamp_nanos())
.bind(count)
.map(Self::query_history)
.fetch_all(&self.pool)
.await?;
Ok(res)
}
async fn history_count(&self) -> Result<i64> {
let res: (i64,) = sqlx::query_as("select count(1) from history")
.fetch_one(&self.pool)
.await?;
Ok(res.0)
}
async fn search(
&self,
limit: Option<i64>,
search_mode: SearchMode,
filter: FilterMode,
context: &Context,
query: &str,
) -> Result<Vec<History>> {
let mut sql = SqlBuilder::select_from("history");
sql.group_by("command")
.having("max(timestamp)")
.order_desc("timestamp");
if let Some(limit) = limit {
sql.limit(limit);
}
match filter {
FilterMode::Global => &mut sql,
FilterMode::Host => sql.and_where_eq("hostname", quote(&context.hostname)),
FilterMode::Session => sql.and_where_eq("session", quote(&context.session)),
FilterMode::Directory => sql.and_where_eq("cwd", quote(&context.cwd)),
};
let orig_query = query;
let query = query.replace('*', "%"); // allow wildcard char
match search_mode {
SearchMode::Prefix => sql.and_where_like_left("command", query),
SearchMode::FullText => sql.and_where_like_any("command", query),
SearchMode::Fuzzy => {
// don't recompile the regex on successive calls!
lazy_static! {
static ref SPLIT_REGEX: Regex = Regex::new(r" +").unwrap();
}
let mut is_or = false;
for query_part in SPLIT_REGEX.split(query.as_str()) {
// TODO smart case mode could be made configurable like in fzf
let (is_glob, glob) = if query_part.contains(char::is_uppercase) {
(true, "*")
} else {
(false, "%")
};
let (is_inverse, query_part) = match query_part.strip_prefix('!') {
Some(stripped) => (true, stripped),
None => (false, query_part),
};
let param = if query_part == "|" {
if !is_or {
is_or = true;
continue;
} else {
format!("{glob}|{glob}")
}
} else if let Some(term) = query_part.strip_prefix('^') {
format!("{term}{glob}")
} else if let Some(term) = query_part.strip_suffix('$') {
format!("{glob}{term}")
} else if let Some(term) = query_part.strip_prefix('\'') {
format!("{glob}{term}{glob}")
} else if is_inverse {
format!("{glob}{term}{glob}", term = query_part)
} else {
query_part.split("").join(glob)
};
sql.fuzzy_condition("command", param, is_inverse, is_glob, is_or);
is_or = false;
}
&mut sql
}
};
let query = sql.sql().expect("bug in search query. please report");
let res = sqlx::query(&query)
.map(Self::query_history)
.fetch_all(&self.pool)
.await?;
Ok(ordering::reorder_fuzzy(search_mode, orig_query, res))
}
async fn query_history(&self, query: &str) -> Result<Vec<History>> {
let res = sqlx::query(query)
.map(Self::query_history)
.fetch_all(&self.pool)
.await?;
Ok(res)
}
}
#[cfg(test)]
mod test {
use super::*;
use std::time::{Duration, Instant};
async fn assert_search_eq<'a>(
db: &impl Database,
mode: SearchMode,
filter_mode: FilterMode,
query: &str,
expected: usize,
) -> Result<Vec<History>> {
let context = Context {
hostname: "test:host".to_string(),
session: "beepboopiamasession".to_string(),
cwd: "/home/ellie".to_string(),
};
let results = db.search(None, mode, filter_mode, &context, query).await?;
assert_eq!(
results.len(),
expected,
"query \"{}\", commands: {:?}",
query,
results.iter().map(|a| &a.command).collect::<Vec<&String>>()
);
Ok(results)
}
async fn assert_search_commands(
db: &impl Database,
mode: SearchMode,
filter_mode: FilterMode,
query: &str,
expected_commands: Vec<&str>,
) {
let results = assert_search_eq(db, mode, filter_mode, query, expected_commands.len())
.await
.unwrap();
let commands: Vec<&str> = results.iter().map(|a| a.command.as_str()).collect();
assert_eq!(commands, expected_commands);
}
async fn new_history_item(db: &mut impl Database, cmd: &str) -> Result<()> {
let history = History::new(
chrono::Utc::now(),
cmd.to_string(),
"/home/ellie".to_string(),
0,
1,
Some("beep boop".to_string()),
Some("booop".to_string()),
);
return db.save(&history).await;
}
#[tokio::test(flavor = "multi_thread")]
async fn test_search_prefix() {
let mut db = Sqlite::new("sqlite::memory:").await.unwrap();
new_history_item(&mut db, "ls /home/ellie").await.unwrap();
assert_search_eq(&db, SearchMode::Prefix, FilterMode::Global, "ls", 1)
.await
.unwrap();
assert_search_eq(&db, SearchMode::Prefix, FilterMode::Global, "/home", 0)
.await
.unwrap();
assert_search_eq(&db, SearchMode::Prefix, FilterMode::Global, "ls ", 0)
.await
.unwrap();
}
#[tokio::test(flavor = "multi_thread")]
async fn test_search_fulltext() {
let mut db = Sqlite::new("sqlite::memory:").await.unwrap();
new_history_item(&mut db, "ls /home/ellie").await.unwrap();
assert_search_eq(&db, SearchMode::FullText, FilterMode::Global, "ls", 1)
.await
.unwrap();
assert_search_eq(&db, SearchMode::FullText, FilterMode::Global, "/home", 1)
.await
.unwrap();
assert_search_eq(&db, SearchMode::FullText, FilterMode::Global, "ls ", 0)
.await
.unwrap();
}
#[tokio::test(flavor = "multi_thread")]
async fn test_search_fuzzy() {
let mut db = Sqlite::new("sqlite::memory:").await.unwrap();
new_history_item(&mut db, "ls /home/ellie").await.unwrap();
new_history_item(&mut db, "ls /home/frank").await.unwrap();
new_history_item(&mut db, "cd /home/Ellie").await.unwrap();
new_history_item(&mut db, "/home/ellie/.bin/rustup")
.await
.unwrap();
assert_search_eq(&db, SearchMode::Fuzzy, FilterMode::Global, "ls /", 3)
.await
.unwrap();
assert_search_eq(&db, SearchMode::Fuzzy, FilterMode::Global, "ls/", 2)
.await
.unwrap();
assert_search_eq(&db, SearchMode::Fuzzy, FilterMode::Global, "l/h/", 2)
.await
.unwrap();
assert_search_eq(&db, SearchMode::Fuzzy, FilterMode::Global, "/h/e", 3)
.await
.unwrap();
assert_search_eq(&db, SearchMode::Fuzzy, FilterMode::Global, "/hmoe/", 0)
.await
.unwrap();
assert_search_eq(&db, SearchMode::Fuzzy, FilterMode::Global, "ellie/home", 0)
.await
.unwrap();
assert_search_eq(&db, SearchMode::Fuzzy, FilterMode::Global, "lsellie", 1)
.await
.unwrap();
assert_search_eq(&db, SearchMode::Fuzzy, FilterMode::Global, " ", 4)
.await
.unwrap();
// single term operators
assert_search_eq(&db, SearchMode::Fuzzy, FilterMode::Global, "^ls", 2)
.await
.unwrap();
assert_search_eq(&db, SearchMode::Fuzzy, FilterMode::Global, "'ls", 2)
.await
.unwrap();
assert_search_eq(&db, SearchMode::Fuzzy, FilterMode::Global, "ellie$", 2)
.await
.unwrap();
assert_search_eq(&db, SearchMode::Fuzzy, FilterMode::Global, "!^ls", 2)
.await
.unwrap();
assert_search_eq(&db, SearchMode::Fuzzy, FilterMode::Global, "!ellie", 1)
.await
.unwrap();
assert_search_eq(&db, SearchMode::Fuzzy, FilterMode::Global, "!ellie$", 2)
.await
.unwrap();
// multiple terms
assert_search_eq(&db, SearchMode::Fuzzy, FilterMode::Global, "ls !ellie", 1)
.await
.unwrap();
assert_search_eq(&db, SearchMode::Fuzzy, FilterMode::Global, "^ls !e$", 1)
.await
.unwrap();
assert_search_eq(&db, SearchMode::Fuzzy, FilterMode::Global, "home !^ls", 2)
.await
.unwrap();
assert_search_eq(
&db,
SearchMode::Fuzzy,
FilterMode::Global,
"'frank | 'rustup",
2,
)
.await
.unwrap();
assert_search_eq(
&db,
SearchMode::Fuzzy,
FilterMode::Global,
"'frank | 'rustup 'ls",
1,
)
.await
.unwrap();
// case matching
assert_search_eq(&db, SearchMode::Fuzzy, FilterMode::Global, "Ellie", 1)
.await
.unwrap();
}
#[tokio::test(flavor = "multi_thread")]
async fn test_search_reordered_fuzzy() {
let mut db = Sqlite::new("sqlite::memory:").await.unwrap();
// test ordering of results: we should choose the first, even though it happened longer ago.
new_history_item(&mut db, "curl").await.unwrap();
new_history_item(&mut db, "corburl").await.unwrap();
// if fuzzy reordering is on, it should come back in a more sensible order
assert_search_commands(
&db,
SearchMode::Fuzzy,
FilterMode::Global,
"curl",
vec!["curl", "corburl"],
)
.await;
assert_search_eq(&db, SearchMode::Fuzzy, FilterMode::Global, "xxxx", 0)
.await
.unwrap();
assert_search_eq(&db, SearchMode::Fuzzy, FilterMode::Global, "", 2)
.await
.unwrap();
}
#[tokio::test(flavor = "multi_thread")]
async fn test_search_bench_dupes() {
let context = Context {
hostname: "test:host".to_string(),
session: "beepboopiamasession".to_string(),
cwd: "/home/ellie".to_string(),
};
let mut db = Sqlite::new("sqlite::memory:").await.unwrap();
for _i in 1..10000 {
new_history_item(&mut db, "i am a duplicated command")
.await
.unwrap();
}
let start = Instant::now();
let _results = db
.search(None, SearchMode::Fuzzy, FilterMode::Global, &context, "")
.await
.unwrap();
let duration = start.elapsed();
assert!(duration < Duration::from_secs(15));
}
}
trait SqlBuilderExt {
fn fuzzy_condition<S: ToString, T: ToString>(
&mut self,
field: S,
mask: T,
inverse: bool,
glob: bool,
is_or: bool,
) -> &mut Self;
}
impl SqlBuilderExt for SqlBuilder {
/// adapted from the sql-builder *like functions
fn fuzzy_condition<S: ToString, T: ToString>(
&mut self,
field: S,
mask: T,
inverse: bool,
glob: bool,
is_or: bool,
) -> &mut Self {
let mut cond = field.to_string();
if inverse {
cond.push_str(" NOT");
}
if glob {
cond.push_str(" GLOB '");
} else {
cond.push_str(" LIKE '");
}
cond.push_str(&esc(&mask.to_string()));
cond.push('\'');
if is_or {
self.or_where(cond)
} else {
self.and_where(cond)
}
}
}

View File

@@ -1,143 +0,0 @@
// The general idea is that we NEVER send cleartext history to the server
// This way the odds of anything private ending up where it should not are
// very low
// The server authenticates via the usual username and password. This has
// nothing to do with the encryption, and is purely authentication! The client
// generates its own secret key, and encrypts all shell history with libsodium's
// secretbox. The data is then sent to the server, where it is stored. All
// clients must share the secret in order to be able to sync, as it is needed
// to decrypt
use std::{io::prelude::*, path::PathBuf};
use eyre::{eyre, Context, Result};
use fs_err as fs;
use serde::{Deserialize, Serialize};
use sodiumoxide::crypto::secretbox;
use crate::{history::History, settings::Settings};
#[derive(Debug, Serialize, Deserialize)]
pub struct EncryptedHistory {
pub ciphertext: Vec<u8>,
pub nonce: secretbox::Nonce,
}
pub fn new_key(settings: &Settings) -> Result<secretbox::Key> {
let path = settings.key_path.as_str();
let key = secretbox::gen_key();
let encoded = encode_key(key.clone())?;
let mut file = fs::File::create(path)?;
file.write_all(encoded.as_bytes())?;
Ok(key)
}
// Loads the secret key, will create + save if it doesn't exist
pub fn load_key(settings: &Settings) -> Result<secretbox::Key> {
let path = settings.key_path.as_str();
let key = if PathBuf::from(path).exists() {
let key = fs_err::read_to_string(path)?;
decode_key(key)?
} else {
new_key(settings)?
};
Ok(key)
}
pub fn load_encoded_key(settings: &Settings) -> Result<String> {
let path = settings.key_path.as_str();
if PathBuf::from(path).exists() {
let key = fs::read_to_string(path)?;
Ok(key)
} else {
let key = secretbox::gen_key();
let encoded = encode_key(key)?;
let mut file = fs::File::create(path)?;
file.write_all(encoded.as_bytes())?;
Ok(encoded)
}
}
pub fn encode_key(key: secretbox::Key) -> Result<String> {
let buf = rmp_serde::to_vec(&key).wrap_err("could not encode key to message pack")?;
let buf = base64::encode(buf);
Ok(buf)
}
pub fn decode_key(key: String) -> Result<secretbox::Key> {
let buf = base64::decode(key).wrap_err("encryption key is not a valid base64 encoding")?;
let buf: secretbox::Key = rmp_serde::from_slice(&buf)
.wrap_err("encryption key is not a valid message pack encoding")?;
Ok(buf)
}
pub fn encrypt(history: &History, key: &secretbox::Key) -> Result<EncryptedHistory> {
// serialize with msgpack
let buf = rmp_serde::to_vec(history)?;
let nonce = secretbox::gen_nonce();
let ciphertext = secretbox::seal(&buf, &nonce, key);
Ok(EncryptedHistory { ciphertext, nonce })
}
pub fn decrypt(encrypted_history: &EncryptedHistory, key: &secretbox::Key) -> Result<History> {
let plaintext = secretbox::open(&encrypted_history.ciphertext, &encrypted_history.nonce, key)
.map_err(|_| eyre!("failed to open secretbox - invalid key?"))?;
let history = rmp_serde::from_slice(&plaintext)?;
Ok(history)
}
#[cfg(test)]
mod test {
use sodiumoxide::crypto::secretbox;
use crate::history::History;
use super::{decrypt, encrypt};
#[test]
fn test_encrypt_decrypt() {
let key1 = secretbox::gen_key();
let key2 = secretbox::gen_key();
let history = History::new(
chrono::Utc::now(),
"ls".to_string(),
"/home/ellie".to_string(),
0,
1,
Some("beep boop".to_string()),
Some("booop".to_string()),
);
let e1 = encrypt(&history, &key1).unwrap();
let e2 = encrypt(&history, &key2).unwrap();
assert_ne!(e1.ciphertext, e2.ciphertext);
assert_ne!(e1.nonce, e2.nonce);
// test decryption works
// this should pass
match decrypt(&e1, &key1) {
Err(e) => panic!("failed to decrypt, got {}", e),
Ok(h) => assert_eq!(h, history),
};
// this should err
let _ = decrypt(&e2, &key1).expect_err("expected an error decrypting with invalid key");
}
}

View File

@@ -1,52 +0,0 @@
use std::env;
use chrono::Utc;
use serde::{Deserialize, Serialize};
use atuin_common::utils::uuid_v4;
// Any new fields MUST be Optional<>!
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, sqlx::FromRow)]
pub struct History {
pub id: String,
pub timestamp: chrono::DateTime<Utc>,
pub duration: i64,
pub exit: i64,
pub command: String,
pub cwd: String,
pub session: String,
pub hostname: String,
}
impl History {
pub fn new(
timestamp: chrono::DateTime<Utc>,
command: String,
cwd: String,
exit: i64,
duration: i64,
session: Option<String>,
hostname: Option<String>,
) -> Self {
let session = session
.or_else(|| env::var("ATUIN_SESSION").ok())
.unwrap_or_else(uuid_v4);
let hostname =
hostname.unwrap_or_else(|| format!("{}:{}", whoami::hostname(), whoami::username()));
Self {
id: uuid_v4(),
timestamp,
command,
cwd,
exit,
duration,
session,
hostname,
}
}
pub fn success(&self) -> bool {
self.exit == 0 || self.duration == -1
}
}

View File

@@ -1,106 +0,0 @@
use std::{fs::File, io::Read, path::PathBuf};
use async_trait::async_trait;
use directories::UserDirs;
use eyre::{eyre, Result};
use super::{get_histpath, unix_byte_lines, Importer, Loader};
use crate::history::History;
#[derive(Debug)]
pub struct Bash {
bytes: Vec<u8>,
}
fn default_histpath() -> Result<PathBuf> {
let user_dirs = UserDirs::new().ok_or_else(|| eyre!("could not find user directories"))?;
let home_dir = user_dirs.home_dir();
Ok(home_dir.join(".bash_history"))
}
#[async_trait]
impl Importer for Bash {
const NAME: &'static str = "bash";
async fn new() -> Result<Self> {
let mut bytes = Vec::new();
let path = get_histpath(default_histpath)?;
let mut f = File::open(path)?;
f.read_to_end(&mut bytes)?;
Ok(Self { bytes })
}
async fn entries(&mut self) -> Result<usize> {
Ok(super::count_lines(&self.bytes))
}
async fn load(self, h: &mut impl Loader) -> Result<()> {
let now = chrono::Utc::now();
let mut line = String::new();
for (i, b) in unix_byte_lines(&self.bytes).enumerate() {
let s = match std::str::from_utf8(b) {
Ok(s) => s,
Err(_) => continue, // we can skip past things like invalid utf8
};
if let Some(s) = s.strip_suffix('\\') {
line.push_str(s);
line.push_str("\\\n");
} else {
line.push_str(s);
let command = std::mem::take(&mut line);
let offset = chrono::Duration::seconds(i as i64);
h.push(History::new(
now - offset, // preserve ordering
command,
String::from("unknown"),
-1,
-1,
None,
None,
))
.await?;
}
}
Ok(())
}
}
#[cfg(test)]
mod tests {
use itertools::assert_equal;
use crate::import::{tests::TestLoader, Importer};
use super::Bash;
#[tokio::test]
async fn test_parse_file() {
let bytes = r"cargo install atuin
cargo install atuin; \
cargo update
cargo :b̷i̶t̴r̵o̴t̴ ̵i̷s̴ ̷r̶e̵a̸l̷
"
.as_bytes()
.to_owned();
let mut bash = Bash { bytes };
assert_eq!(bash.entries().await.unwrap(), 4);
let mut loader = TestLoader::default();
bash.load(&mut loader).await.unwrap();
assert_equal(
loader.buf.iter().map(|h| h.command.as_str()),
[
"cargo install atuin",
"cargo install atuin; \\\ncargo update",
"cargo :b̷i̶t̴r̵o̴t̴ ̵i̷s̴ ̷r̶e̵a̸l̷",
],
);
}
}

View File

@@ -1,196 +0,0 @@
// import old shell history!
// automatically hoover up all that we can find
use std::{fs::File, io::Read, path::PathBuf};
use async_trait::async_trait;
use chrono::{prelude::*, Utc};
use directories::BaseDirs;
use eyre::{eyre, Result};
use super::{get_histpath, unix_byte_lines, Importer, Loader};
use crate::history::History;
#[derive(Debug)]
pub struct Fish {
bytes: Vec<u8>,
}
/// see https://fishshell.com/docs/current/interactive.html#searchable-command-history
fn default_histpath() -> Result<PathBuf> {
let base = BaseDirs::new().ok_or_else(|| eyre!("could not determine data directory"))?;
let data = base.data_local_dir();
// fish supports multiple history sessions
// If `fish_history` var is missing, or set to `default`, use `fish` as the session
let session = std::env::var("fish_history").unwrap_or_else(|_| String::from("fish"));
let session = if session == "default" {
String::from("fish")
} else {
session
};
let mut histpath = data.join("fish");
histpath.push(format!("{}_history", session));
if histpath.exists() {
Ok(histpath)
} else {
Err(eyre!("Could not find history file. Try setting $HISTFILE"))
}
}
#[async_trait]
impl Importer for Fish {
const NAME: &'static str = "fish";
async fn new() -> Result<Self> {
let mut bytes = Vec::new();
let path = get_histpath(default_histpath)?;
let mut f = File::open(path)?;
f.read_to_end(&mut bytes)?;
Ok(Self { bytes })
}
async fn entries(&mut self) -> Result<usize> {
Ok(super::count_lines(&self.bytes))
}
async fn load(self, loader: &mut impl Loader) -> Result<()> {
let now = Utc::now();
let mut time: Option<DateTime<Utc>> = None;
let mut cmd: Option<String> = None;
for b in unix_byte_lines(&self.bytes) {
let s = match std::str::from_utf8(b) {
Ok(s) => s,
Err(_) => continue, // we can skip past things like invalid utf8
};
if let Some(c) = s.strip_prefix("- cmd: ") {
// first, we must deal with the prev cmd
if let Some(cmd) = cmd.take() {
let time = time.unwrap_or(now);
loader
.push(History::new(
time,
cmd,
"unknown".into(),
-1,
-1,
None,
None,
))
.await?;
}
// using raw strings to avoid needing escaping.
// replaces double backslashes with single backslashes
let c = c.replace(r"\\", r"\");
// replaces escaped newlines
let c = c.replace(r"\n", "\n");
// TODO: any other escape characters?
cmd = Some(c);
} else if let Some(t) = s.strip_prefix(" when: ") {
// if t is not an int, just ignore this line
if let Ok(t) = t.parse::<i64>() {
time = Some(Utc.timestamp(t, 0));
}
} else {
// ... ignore paths lines
}
}
// we might have a trailing cmd
if let Some(cmd) = cmd.take() {
let time = time.unwrap_or(now);
loader
.push(History::new(
time,
cmd,
"unknown".into(),
-1,
-1,
None,
None,
))
.await?;
}
Ok(())
}
}
#[cfg(test)]
mod test {
use crate::import::{tests::TestLoader, Importer};
use super::Fish;
#[tokio::test]
async fn parse_complex() {
// complicated input with varying contents and escaped strings.
let bytes = r#"- cmd: history --help
when: 1639162832
- cmd: cat ~/.bash_history
when: 1639162851
paths:
- ~/.bash_history
- cmd: ls ~/.local/share/fish/fish_history
when: 1639162890
paths:
- ~/.local/share/fish/fish_history
- cmd: cat ~/.local/share/fish/fish_history
when: 1639162893
paths:
- ~/.local/share/fish/fish_history
ERROR
- CORRUPTED: ENTRY
CONTINUE:
- AS
- NORMAL
- cmd: echo "foo" \\\n'bar' baz
when: 1639162933
- cmd: cat ~/.local/share/fish/fish_history
when: 1639162939
paths:
- ~/.local/share/fish/fish_history
- cmd: echo "\\"" \\\\ "\\\\"
when: 1639163063
- cmd: cat ~/.local/share/fish/fish_history
when: 1639163066
paths:
- ~/.local/share/fish/fish_history
"#
.as_bytes()
.to_owned();
let fish = Fish { bytes };
let mut loader = TestLoader::default();
fish.load(&mut loader).await.unwrap();
let mut history = loader.buf.into_iter();
// simple wrapper for fish history entry
macro_rules! fishtory {
($timestamp:expr, $command:expr) => {
let h = history.next().expect("missing entry in history");
assert_eq!(h.command.as_str(), $command);
assert_eq!(h.timestamp.timestamp(), $timestamp);
};
}
fishtory!(1639162832, "history --help");
fishtory!(1639162851, "cat ~/.bash_history");
fishtory!(1639162890, "ls ~/.local/share/fish/fish_history");
fishtory!(1639162893, "cat ~/.local/share/fish/fish_history");
fishtory!(1639162933, "echo \"foo\" \\\n'bar' baz");
fishtory!(1639162939, "cat ~/.local/share/fish/fish_history");
fishtory!(1639163063, r#"echo "\"" \\ "\\""#);
fishtory!(1639163066, "cat ~/.local/share/fish/fish_history");
}
}

View File

@@ -1,99 +0,0 @@
use std::path::PathBuf;
use async_trait::async_trait;
use eyre::{bail, Result};
use memchr::Memchr;
use crate::history::History;
pub mod bash;
pub mod fish;
pub mod resh;
pub mod zsh;
pub mod zsh_histdb;
#[async_trait]
pub trait Importer: Sized {
const NAME: &'static str;
async fn new() -> Result<Self>;
async fn entries(&mut self) -> Result<usize>;
async fn load(self, loader: &mut impl Loader) -> Result<()>;
}
#[async_trait]
pub trait Loader: Sync + Send {
async fn push(&mut self, hist: History) -> eyre::Result<()>;
}
fn unix_byte_lines(input: &[u8]) -> impl Iterator<Item = &[u8]> {
UnixByteLines {
iter: memchr::memchr_iter(b'\n', input),
bytes: input,
i: 0,
}
}
struct UnixByteLines<'a> {
iter: Memchr<'a>,
bytes: &'a [u8],
i: usize,
}
impl<'a> Iterator for UnixByteLines<'a> {
type Item = &'a [u8];
fn next(&mut self) -> Option<Self::Item> {
let j = self.iter.next()?;
let out = &self.bytes[self.i..j];
self.i = j + 1;
Some(out)
}
fn count(self) -> usize
where
Self: Sized,
{
self.iter.count()
}
}
fn count_lines(input: &[u8]) -> usize {
unix_byte_lines(input).count()
}
fn get_histpath<D>(def: D) -> Result<PathBuf>
where
D: FnOnce() -> Result<PathBuf>,
{
if let Ok(p) = std::env::var("HISTFILE") {
is_file(PathBuf::from(p))
} else {
is_file(def()?)
}
}
fn is_file(p: PathBuf) -> Result<PathBuf> {
if p.is_file() {
Ok(p)
} else {
bail!("Could not find history file {:?}. Try setting $HISTFILE", p)
}
}
#[cfg(test)]
mod tests {
use super::*;
#[derive(Default)]
pub struct TestLoader {
pub buf: Vec<History>,
}
#[async_trait]
impl Loader for TestLoader {
async fn push(&mut self, hist: History) -> Result<()> {
self.buf.push(hist);
Ok(())
}
}
}

View File

@@ -1,140 +0,0 @@
use std::{fs::File, io::Read, path::PathBuf};
use async_trait::async_trait;
use chrono::{TimeZone, Utc};
use directories::UserDirs;
use eyre::{eyre, Result};
use serde::Deserialize;
use atuin_common::utils::uuid_v4;
use super::{get_histpath, unix_byte_lines, Importer, Loader};
use crate::history::History;
#[derive(Deserialize, Debug)]
#[serde(rename_all = "camelCase")]
pub struct ReshEntry {
pub cmd_line: String,
pub exit_code: i64,
pub shell: String,
pub uname: String,
pub session_id: String,
pub home: String,
pub lang: String,
pub lc_all: String,
pub login: String,
pub pwd: String,
pub pwd_after: String,
pub shell_env: String,
pub term: String,
pub real_pwd: String,
pub real_pwd_after: String,
pub pid: i64,
pub session_pid: i64,
pub host: String,
pub hosttype: String,
pub ostype: String,
pub machtype: String,
pub shlvl: i64,
pub timezone_before: String,
pub timezone_after: String,
pub realtime_before: f64,
pub realtime_after: f64,
pub realtime_before_local: f64,
pub realtime_after_local: f64,
pub realtime_duration: f64,
pub realtime_since_session_start: f64,
pub realtime_since_boot: f64,
pub git_dir: String,
pub git_real_dir: String,
pub git_origin_remote: String,
pub git_dir_after: String,
pub git_real_dir_after: String,
pub git_origin_remote_after: String,
pub machine_id: String,
pub os_release_id: String,
pub os_release_version_id: String,
pub os_release_id_like: String,
pub os_release_name: String,
pub os_release_pretty_name: String,
pub resh_uuid: String,
pub resh_version: String,
pub resh_revision: String,
pub parts_merged: bool,
pub recalled: bool,
pub recall_last_cmd_line: String,
pub cols: String,
pub lines: String,
}
#[derive(Debug)]
pub struct Resh {
bytes: Vec<u8>,
}
fn default_histpath() -> Result<PathBuf> {
let user_dirs = UserDirs::new().ok_or_else(|| eyre!("could not find user directories"))?;
let home_dir = user_dirs.home_dir();
Ok(home_dir.join(".resh_history.json"))
}
#[async_trait]
impl Importer for Resh {
const NAME: &'static str = "resh";
async fn new() -> Result<Self> {
let mut bytes = Vec::new();
let path = get_histpath(default_histpath)?;
let mut f = File::open(path)?;
f.read_to_end(&mut bytes)?;
Ok(Self { bytes })
}
async fn entries(&mut self) -> Result<usize> {
Ok(super::count_lines(&self.bytes))
}
async fn load(self, h: &mut impl Loader) -> Result<()> {
for b in unix_byte_lines(&self.bytes) {
let s = match std::str::from_utf8(b) {
Ok(s) => s,
Err(_) => continue, // we can skip past things like invalid utf8
};
let entry = match serde_json::from_str::<ReshEntry>(s) {
Ok(e) => e,
Err(_) => continue, // skip invalid json :shrug:
};
#[allow(clippy::cast_possible_truncation)]
#[allow(clippy::cast_sign_loss)]
let timestamp = {
let secs = entry.realtime_before.floor() as i64;
let nanosecs = (entry.realtime_before.fract() * 1_000_000_000_f64).round() as u32;
Utc.timestamp(secs, nanosecs)
};
#[allow(clippy::cast_possible_truncation)]
#[allow(clippy::cast_sign_loss)]
let duration = {
let secs = entry.realtime_after.floor() as i64;
let nanosecs = (entry.realtime_after.fract() * 1_000_000_000_f64).round() as u32;
let difference = Utc.timestamp(secs, nanosecs) - timestamp;
difference.num_nanoseconds().unwrap_or(0)
};
h.push(History {
id: uuid_v4(),
timestamp,
duration,
exit: entry.exit_code,
command: entry.cmd_line,
cwd: entry.pwd,
session: uuid_v4(),
hostname: entry.host,
})
.await?;
}
Ok(())
}
}

View File

@@ -1,187 +0,0 @@
// import old shell history!
// automatically hoover up all that we can find
use std::{fs::File, io::Read, path::PathBuf};
use async_trait::async_trait;
use chrono::{prelude::*, Utc};
use directories::UserDirs;
use eyre::{eyre, Result};
use super::{get_histpath, unix_byte_lines, Importer, Loader};
use crate::history::History;
#[derive(Debug)]
pub struct Zsh {
bytes: Vec<u8>,
}
fn default_histpath() -> Result<PathBuf> {
// oh-my-zsh sets HISTFILE=~/.zhistory
// zsh has no default value for this var, but uses ~/.zhistory.
// we could maybe be smarter about this in the future :)
let user_dirs = UserDirs::new().ok_or_else(|| eyre!("could not find user directories"))?;
let home_dir = user_dirs.home_dir();
let mut candidates = [".zhistory", ".zsh_history"].iter();
loop {
match candidates.next() {
Some(candidate) => {
let histpath = home_dir.join(candidate);
if histpath.exists() {
break Ok(histpath);
}
}
None => break Err(eyre!("Could not find history file. Try setting $HISTFILE")),
}
}
}
#[async_trait]
impl Importer for Zsh {
const NAME: &'static str = "bash";
async fn new() -> Result<Self> {
let mut bytes = Vec::new();
let path = get_histpath(default_histpath)?;
let mut f = File::open(path)?;
f.read_to_end(&mut bytes)?;
Ok(Self { bytes })
}
async fn entries(&mut self) -> Result<usize> {
Ok(super::count_lines(&self.bytes))
}
async fn load(self, h: &mut impl Loader) -> Result<()> {
let now = chrono::Utc::now();
let mut line = String::new();
let mut counter = 0;
for b in unix_byte_lines(&self.bytes) {
let s = match std::str::from_utf8(b) {
Ok(s) => s,
Err(_) => continue, // we can skip past things like invalid utf8
};
if let Some(s) = s.strip_suffix('\\') {
line.push_str(s);
line.push_str("\\\n");
} else {
line.push_str(s);
let command = std::mem::take(&mut line);
if let Some(command) = command.strip_prefix(": ") {
counter += 1;
h.push(parse_extended(command, counter)).await?;
} else {
let offset = chrono::Duration::seconds(counter);
counter += 1;
h.push(History::new(
now - offset, // preserve ordering
command.trim_end().to_string(),
String::from("unknown"),
-1,
-1,
None,
None,
))
.await?;
}
}
}
Ok(())
}
}
fn parse_extended(line: &str, counter: i64) -> History {
let (time, duration) = line.split_once(':').unwrap();
let (duration, command) = duration.split_once(';').unwrap();
let time = time
.parse::<i64>()
.unwrap_or_else(|_| chrono::Utc::now().timestamp());
let offset = chrono::Duration::milliseconds(counter);
let time = Utc.timestamp(time, 0);
let time = time + offset;
let duration = duration.parse::<i64>().map_or(-1, |t| t * 1_000_000_000);
// use nanos, because why the hell not? we won't display them.
History::new(
time,
command.trim_end().to_string(),
String::from("unknown"),
0, // assume 0, we have no way of knowing :(
duration,
None,
None,
)
}
#[cfg(test)]
mod test {
use chrono::prelude::*;
use chrono::Utc;
use itertools::assert_equal;
use crate::import::tests::TestLoader;
use super::*;
#[test]
fn test_parse_extended_simple() {
let parsed = parse_extended("1613322469:0;cargo install atuin", 0);
assert_eq!(parsed.command, "cargo install atuin");
assert_eq!(parsed.duration, 0);
assert_eq!(parsed.timestamp, Utc.timestamp(1_613_322_469, 0));
let parsed = parse_extended("1613322469:10;cargo install atuin;cargo update", 0);
assert_eq!(parsed.command, "cargo install atuin;cargo update");
assert_eq!(parsed.duration, 10_000_000_000);
assert_eq!(parsed.timestamp, Utc.timestamp(1_613_322_469, 0));
let parsed = parse_extended("1613322469:10;cargo :b̷i̶t̴r̵o̴t̴ ̵i̷s̴ ̷r̶e̵a̸l̷", 0);
assert_eq!(parsed.command, "cargo :b̷i̶t̴r̵o̴t̴ ̵i̷s̴ ̷r̶e̵a̸l̷");
assert_eq!(parsed.duration, 10_000_000_000);
assert_eq!(parsed.timestamp, Utc.timestamp(1_613_322_469, 0));
let parsed = parse_extended("1613322469:10;cargo install \\n atuin\n", 0);
assert_eq!(parsed.command, "cargo install \\n atuin");
assert_eq!(parsed.duration, 10_000_000_000);
assert_eq!(parsed.timestamp, Utc.timestamp(1_613_322_469, 0));
}
#[tokio::test]
async fn test_parse_file() {
let bytes = r": 1613322469:0;cargo install atuin
: 1613322469:10;cargo install atuin; \
cargo update
: 1613322469:10;cargo :b̷i̶t̴r̵o̴t̴ ̵i̷s̴ ̷r̶e̵a̸l̷
"
.as_bytes()
.to_owned();
let mut zsh = Zsh { bytes };
assert_eq!(zsh.entries().await.unwrap(), 4);
let mut loader = TestLoader::default();
zsh.load(&mut loader).await.unwrap();
assert_equal(
loader.buf.iter().map(|h| h.command.as_str()),
[
"cargo install atuin",
"cargo install atuin; \\\ncargo update",
"cargo :b̷i̶t̴r̵o̴t̴ ̵i̷s̴ ̷r̶e̵a̸l̷",
],
);
}
}

View File

@@ -1,219 +0,0 @@
// import old shell history from zsh-histdb!
// automatically hoover up all that we can find
// As far as i can tell there are no version numbers in the histdb sqlite DB, so we're going based
// on the schema from 2022-05-01
//
// I have run into some histories that will not import b/c of non UTF-8 characters.
//
//
// An Example sqlite query for hsitdb data:
//
//id|session|command_id|place_id|exit_status|start_time|duration|id|argv|id|host|dir
//
//
// select
// history.id,
// history.start_time,
// places.host,
// places.dir,
// commands.argv
// from history
// left join commands on history.command_id = commands.rowid
// left join places on history.place_id = places.rowid ;
//
// CREATE TABLE history (id integer primary key autoincrement,
// session int,
// command_id int references commands (id),
// place_id int references places (id),
// exit_status int,
// start_time int,
// duration int);
//
use std::path::{Path, PathBuf};
use async_trait::async_trait;
use chrono::{prelude::*, Utc};
use directories::UserDirs;
use eyre::{eyre, Result};
use sqlx::{sqlite::SqlitePool, Pool};
use super::Importer;
use crate::history::History;
use crate::import::Loader;
#[derive(sqlx::FromRow, Debug)]
pub struct HistDbEntryCount {
pub count: usize,
}
#[derive(sqlx::FromRow, Debug)]
pub struct HistDbEntry {
pub id: i64,
pub start_time: NaiveDateTime,
pub host: String,
pub dir: String,
pub argv: Vec<u8>,
pub duration: i64,
}
impl From<HistDbEntry> for History {
fn from(histdb_item: HistDbEntry) -> Self {
History::new(
DateTime::from_utc(histdb_item.start_time, Utc), // must assume UTC?
String::from_utf8(histdb_item.argv)
.unwrap_or_else(|_e| String::from(""))
.trim_end()
.to_string(),
histdb_item.dir,
0, // assume 0, we have no way of knowing :(
histdb_item.duration,
None,
Some(histdb_item.host),
)
}
}
#[derive(Debug)]
pub struct ZshHistDb {
histdb: Vec<HistDbEntry>,
}
/// Read db at given file, return vector of entries.
async fn hist_from_db(dbpath: PathBuf) -> Result<Vec<HistDbEntry>> {
let pool = SqlitePool::connect(dbpath.to_str().unwrap()).await?;
hist_from_db_conn(pool).await
}
async fn hist_from_db_conn(pool: Pool<sqlx::Sqlite>) -> Result<Vec<HistDbEntry>> {
let query = "select history.id,history.start_time,history.duration,places.host,places.dir,commands.argv from history left join commands on history.command_id = commands.rowid left join places on history.place_id = places.rowid order by history.start_time";
let histdb_vec: Vec<HistDbEntry> = sqlx::query_as::<_, HistDbEntry>(query)
.fetch_all(&pool)
.await?;
Ok(histdb_vec)
}
impl ZshHistDb {
pub fn histpath_candidate() -> PathBuf {
// By default histdb database is `${HOME}/.histdb/zsh-history.db`
// This can be modified by ${HISTDB_FILE}
//
// if [[ -z ${HISTDB_FILE} ]]; then
// typeset -g HISTDB_FILE="${HOME}/.histdb/zsh-history.db"
let user_dirs = UserDirs::new().unwrap(); // should catch error here?
let home_dir = user_dirs.home_dir();
std::env::var("HISTDB_FILE")
.as_ref()
.map(|x| Path::new(x).to_path_buf())
.unwrap_or_else(|_err| home_dir.join(".histdb/zsh-history.db"))
}
pub fn histpath() -> Result<PathBuf> {
let histdb_path = ZshHistDb::histpath_candidate();
if histdb_path.exists() {
Ok(histdb_path)
} else {
Err(eyre!(
"Could not find history file. Try setting $HISTDB_FILE"
))
}
}
}
#[async_trait]
impl Importer for ZshHistDb {
// Not sure how this is used
const NAME: &'static str = "zsh_histdb";
/// Creates a new ZshHistDb and populates the history based on the pre-populated data
/// structure.
async fn new() -> Result<Self> {
let dbpath = ZshHistDb::histpath()?;
let histdb_entry_vec = hist_from_db(dbpath).await?;
Ok(Self {
histdb: histdb_entry_vec,
})
}
async fn entries(&mut self) -> Result<usize> {
Ok(self.histdb.len())
}
async fn load(self, h: &mut impl Loader) -> Result<()> {
for i in self.histdb {
h.push(i.into()).await?;
}
Ok(())
}
}
#[cfg(test)]
mod test {
use super::*;
use sqlx::sqlite::SqlitePoolOptions;
use std::env;
#[tokio::test(flavor = "multi_thread")]
async fn test_env_vars() {
let test_env_db = "nonstd-zsh-history.db";
let key = "HISTDB_FILE";
env::set_var(key, test_env_db);
// test the env got set
assert_eq!(env::var(key).unwrap(), test_env_db.to_string());
// test histdb returns the proper db from previous step
let histdb_path = ZshHistDb::histpath_candidate();
assert_eq!(histdb_path.to_str().unwrap(), test_env_db);
}
#[tokio::test(flavor = "multi_thread")]
async fn test_import() {
let pool: SqlitePool = SqlitePoolOptions::new()
.min_connections(2)
.connect(":memory:")
.await
.unwrap();
// sql dump directly from a test database.
let db_sql = r#"
PRAGMA foreign_keys=OFF;
BEGIN TRANSACTION;
CREATE TABLE commands (id integer primary key autoincrement, argv text, unique(argv) on conflict ignore);
INSERT INTO commands VALUES(1,'pwd');
INSERT INTO commands VALUES(2,'curl google.com');
INSERT INTO commands VALUES(3,'bash');
CREATE TABLE places (id integer primary key autoincrement, host text, dir text, unique(host, dir) on conflict ignore);
INSERT INTO places VALUES(1,'mbp16.local','/home/noyez');
CREATE TABLE history (id integer primary key autoincrement,
session int,
command_id int references commands (id),
place_id int references places (id),
exit_status int,
start_time int,
duration int);
INSERT INTO history VALUES(1,0,1,1,0,1651497918,1);
INSERT INTO history VALUES(2,0,2,1,0,1651497923,1);
INSERT INTO history VALUES(3,0,3,1,NULL,1651497930,NULL);
DELETE FROM sqlite_sequence;
INSERT INTO sqlite_sequence VALUES('commands',3);
INSERT INTO sqlite_sequence VALUES('places',3);
INSERT INTO sqlite_sequence VALUES('history',3);
CREATE INDEX hist_time on history(start_time);
CREATE INDEX place_dir on places(dir);
CREATE INDEX place_host on places(host);
CREATE INDEX history_command_place on history(command_id, place_id);
COMMIT; "#;
sqlx::query(db_sql).execute(&pool).await.unwrap();
// test histdb iterator
let histdb_vec = hist_from_db_conn(pool).await.unwrap();
let histdb = ZshHistDb { histdb: histdb_vec };
println!("h: {:#?}", histdb.histdb);
println!("counter: {:?}", histdb.histdb.len());
for i in histdb.histdb {
println!("{:?}", i);
}
}
}

View File

@@ -1,17 +0,0 @@
#![forbid(unsafe_code)]
#[macro_use]
extern crate log;
#[cfg(feature = "sync")]
pub mod api_client;
#[cfg(feature = "sync")]
pub mod encryption;
#[cfg(feature = "sync")]
pub mod sync;
pub mod database;
pub mod history;
pub mod import;
pub mod ordering;
pub mod settings;

View File

@@ -1,32 +0,0 @@
use minspan::minspan;
use super::{history::History, settings::SearchMode};
pub fn reorder_fuzzy(mode: SearchMode, query: &str, res: Vec<History>) -> Vec<History> {
match mode {
SearchMode::Fuzzy => reorder(query, |x| &x.command, res),
_ => res,
}
}
fn reorder<F, A>(query: &str, f: F, res: Vec<A>) -> Vec<A>
where
F: Fn(&A) -> &String,
A: Clone,
{
let mut r = res.clone();
let qvec = &query.chars().collect();
r.sort_by_cached_key(|h| {
// TODO for fzf search we should sum up scores for each matched term
let (from, to) = match minspan::span(qvec, &(f(h).chars().collect())) {
Some(x) => x,
// this is a little unfortunate: when we are asked to match a query that is found nowhere,
// we don't want to return a None, as the comparison behaviour would put the worst matches
// at the front. therefore, we'll return a set of indices that are one larger than the longest
// possible legitimate match. This is meaningless except as a comparison.
None => (0, res.len()),
};
1 + to - from
});
r
}

View File

@@ -1,218 +0,0 @@
use std::{
io::prelude::*,
path::{Path, PathBuf},
};
use chrono::{prelude::*, Utc};
use config::{Config, Environment, File as ConfigFile, FileFormat};
use eyre::{eyre, Context, Result};
use fs_err::{create_dir_all, File};
use parse_duration::parse;
use serde::Deserialize;
pub const HISTORY_PAGE_SIZE: i64 = 100;
#[derive(Clone, Debug, Deserialize, Copy)]
pub enum SearchMode {
#[serde(rename = "prefix")]
Prefix,
#[serde(rename = "fulltext")]
FullText,
#[serde(rename = "fuzzy")]
Fuzzy,
}
#[derive(Clone, Debug, Deserialize, Copy)]
pub enum FilterMode {
#[serde(rename = "global")]
Global,
#[serde(rename = "host")]
Host,
#[serde(rename = "session")]
Session,
#[serde(rename = "directory")]
Directory,
}
// FIXME: Can use upstream Dialect enum if https://github.com/stevedonovan/chrono-english/pull/16 is merged
#[derive(Clone, Debug, Deserialize, Copy)]
pub enum Dialect {
#[serde(rename = "us")]
Us,
#[serde(rename = "uk")]
Uk,
}
impl From<Dialect> for chrono_english::Dialect {
fn from(d: Dialect) -> chrono_english::Dialect {
match d {
Dialect::Uk => chrono_english::Dialect::Uk,
Dialect::Us => chrono_english::Dialect::Us,
}
}
}
#[derive(Clone, Debug, Deserialize, Copy)]
pub enum Style {
#[serde(rename = "auto")]
Auto,
#[serde(rename = "full")]
Full,
#[serde(rename = "compact")]
Compact,
}
#[derive(Clone, Debug, Deserialize)]
pub struct Settings {
pub dialect: Dialect,
pub style: Style,
pub auto_sync: bool,
pub sync_address: String,
pub sync_frequency: String,
pub db_path: String,
pub key_path: String,
pub session_path: String,
pub search_mode: SearchMode,
pub filter_mode: FilterMode,
// This is automatically loaded when settings is created. Do not set in
// config! Keep secrets and settings apart.
pub session_token: String,
}
impl Settings {
pub fn save_sync_time() -> Result<()> {
let data_dir = atuin_common::utils::data_dir();
let data_dir = data_dir.as_path();
let sync_time_path = data_dir.join("last_sync_time");
fs_err::write(sync_time_path, Utc::now().to_rfc3339())?;
Ok(())
}
pub fn last_sync() -> Result<chrono::DateTime<Utc>> {
let data_dir = atuin_common::utils::data_dir();
let data_dir = data_dir.as_path();
let sync_time_path = data_dir.join("last_sync_time");
if !sync_time_path.exists() {
return Ok(Utc.ymd(1970, 1, 1).and_hms(0, 0, 0));
}
let time = fs_err::read_to_string(sync_time_path)?;
let time = chrono::DateTime::parse_from_rfc3339(time.as_str())?;
Ok(time.with_timezone(&Utc))
}
pub fn should_sync(&self) -> Result<bool> {
let session_path = atuin_common::utils::data_dir().join("session");
if !self.auto_sync || !session_path.exists() {
return Ok(false);
}
match parse(self.sync_frequency.as_str()) {
Ok(d) => {
let d = chrono::Duration::from_std(d).unwrap();
Ok(Utc::now() - Settings::last_sync()? >= d)
}
Err(e) => Err(eyre!("failed to check sync: {}", e)),
}
}
pub fn new() -> Result<Self> {
let config_dir = atuin_common::utils::config_dir();
let data_dir = atuin_common::utils::data_dir();
create_dir_all(&config_dir)
.wrap_err_with(|| format!("could not create dir {:?}", config_dir))?;
create_dir_all(&data_dir)
.wrap_err_with(|| format!("could not create dir {:?}", data_dir))?;
let mut config_file = if let Ok(p) = std::env::var("ATUIN_CONFIG_DIR") {
PathBuf::from(p)
} else {
let mut config_file = PathBuf::new();
config_file.push(config_dir);
config_file
};
config_file.push("config.toml");
let db_path = data_dir.join("history.db");
let key_path = data_dir.join("key");
let session_path = data_dir.join("session");
let mut config_builder = Config::builder()
.set_default("db_path", db_path.to_str())?
.set_default("key_path", key_path.to_str())?
.set_default("session_path", session_path.to_str())?
.set_default("dialect", "us")?
.set_default("auto_sync", true)?
.set_default("sync_frequency", "1h")?
.set_default("sync_address", "https://api.atuin.sh")?
.set_default("search_mode", "prefix")?
.set_default("filter_mode", "global")?
.set_default("session_token", "")?
.set_default("style", "auto")?
.add_source(
Environment::with_prefix("atuin")
.prefix_separator("_")
.separator("__"),
);
config_builder = if config_file.exists() {
config_builder.add_source(ConfigFile::new(
config_file.to_str().unwrap(),
FileFormat::Toml,
))
} else {
let example_config = include_bytes!("../config.toml");
let mut file = File::create(config_file).wrap_err("could not create config file")?;
file.write_all(example_config)
.wrap_err("could not write default config file")?;
config_builder
};
let config = config_builder.build()?;
let mut settings: Settings = config
.try_deserialize()
.map_err(|e| eyre!("failed to deserialize: {}", e))?;
// all paths should be expanded
let db_path = settings.db_path;
let db_path = shellexpand::full(&db_path)?;
settings.db_path = db_path.to_string();
let key_path = settings.key_path;
let key_path = shellexpand::full(&key_path)?;
settings.key_path = key_path.to_string();
let session_path = settings.session_path;
let session_path = shellexpand::full(&session_path)?;
settings.session_path = session_path.to_string();
// Finally, set the auth token
if Path::new(session_path.to_string().as_str()).exists() {
let token = fs_err::read_to_string(session_path.to_string())?;
settings.session_token = token.trim().to_string();
} else {
settings.session_token = String::from("not logged in");
}
Ok(settings)
}
}

View File

@@ -1,158 +0,0 @@
use std::convert::TryInto;
use chrono::prelude::*;
use eyre::Result;
use atuin_common::api::AddHistoryRequest;
use crate::{
api_client,
database::Database,
encryption::{encrypt, load_encoded_key, load_key},
settings::{Settings, HISTORY_PAGE_SIZE},
};
pub fn hash_str(string: &str) -> String {
use sha2::{Digest, Sha256};
let mut hasher = Sha256::new();
hasher.update(string.as_bytes());
hex::encode(hasher.finalize())
}
// Currently sync is kinda naive, and basically just pages backwards through
// history. This means newly added stuff shows up properly! We also just use
// the total count in each database to indicate whether a sync is needed.
// I think this could be massively improved! If we had a way of easily
// indicating count per time period (hour, day, week, year, etc) then we can
// easily pinpoint where we are missing data and what needs downloading. Start
// with year, then find the week, then the day, then the hour, then download it
// all! The current naive approach will do for now.
// Check if remote has things we don't, and if so, download them.
// Returns (num downloaded, total local)
async fn sync_download(
force: bool,
client: &api_client::Client<'_>,
db: &mut (impl Database + Send),
) -> Result<(i64, i64)> {
debug!("starting sync download");
let remote_count = client.count().await?;
let initial_local = db.history_count().await?;
let mut local_count = initial_local;
let mut last_sync = if force {
Utc.timestamp_millis(0)
} else {
Settings::last_sync()?
};
let mut last_timestamp = Utc.timestamp_millis(0);
let host = if force { Some(String::from("")) } else { None };
while remote_count > local_count {
let page = client
.get_history(last_sync, last_timestamp, host.clone())
.await?;
db.save_bulk(&page).await?;
local_count = db.history_count().await?;
if page.len() < HISTORY_PAGE_SIZE.try_into().unwrap() {
break;
}
let page_last = page
.last()
.expect("could not get last element of page")
.timestamp;
// in the case of a small sync frequency, it's possible for history to
// be "lost" between syncs. In this case we need to rewind the sync
// timestamps
if page_last == last_timestamp {
last_timestamp = Utc.timestamp_millis(0);
last_sync = last_sync - chrono::Duration::hours(1);
} else {
last_timestamp = page_last;
}
}
Ok((local_count - initial_local, local_count))
}
// Check if we have things remote doesn't, and if so, upload them
async fn sync_upload(
settings: &Settings,
_force: bool,
client: &api_client::Client<'_>,
db: &mut (impl Database + Send),
) -> Result<()> {
debug!("starting sync upload");
let initial_remote_count = client.count().await?;
let mut remote_count = initial_remote_count;
let local_count = db.history_count().await?;
debug!("remote has {}, we have {}", remote_count, local_count);
let key = load_key(settings)?; // encryption key
// first just try the most recent set
let mut cursor = Utc::now();
while local_count > remote_count {
let last = db.before(cursor, HISTORY_PAGE_SIZE).await?;
let mut buffer = Vec::new();
if last.is_empty() {
break;
}
for i in last {
let data = encrypt(&i, &key)?;
let data = serde_json::to_string(&data)?;
let add_hist = AddHistoryRequest {
id: i.id,
timestamp: i.timestamp,
data,
hostname: hash_str(&i.hostname),
};
buffer.push(add_hist);
}
// anything left over outside of the 100 block size
client.post_history(&buffer).await?;
cursor = buffer.last().unwrap().timestamp;
remote_count = client.count().await?;
debug!("upload cursor: {:?}", cursor);
}
Ok(())
}
pub async fn sync(settings: &Settings, force: bool, db: &mut (impl Database + Send)) -> Result<()> {
let client = api_client::Client::new(
&settings.sync_address,
&settings.session_token,
load_encoded_key(settings)?,
)?;
sync_upload(settings, force, &client, db).await?;
let download = sync_download(force, &client, db).await?;
debug!("sync downloaded {}", download.0);
Settings::save_sync_time()?;
Ok(())
}

View File

@@ -1,16 +0,0 @@
[package]
name = "atuin-common"
version = "0.9.1"
authors = ["Ellie Huxtable <ellie@elliehuxtable.com>"]
edition = "2018"
license = "MIT"
description = "common library for atuin"
homepage = "https://atuin.sh"
repository = "https://github.com/ellie/atuin"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
chrono = { version = "0.4", features = ["serde"] }
serde = { version = "1.0.137", features = ["derive"] }
uuid = { version = "1.0", features = ["v4"] }

View File

@@ -1,55 +0,0 @@
use chrono::Utc;
use serde::{Deserialize, Serialize};
#[derive(Debug, Serialize, Deserialize)]
pub struct UserResponse {
pub username: String,
}
#[derive(Debug, Serialize, Deserialize)]
pub struct RegisterRequest {
pub email: String,
pub username: String,
pub password: String,
}
#[derive(Debug, Serialize, Deserialize)]
pub struct RegisterResponse {
pub session: String,
}
#[derive(Debug, Serialize, Deserialize)]
pub struct LoginRequest {
pub username: String,
pub password: String,
}
#[derive(Debug, Serialize, Deserialize)]
pub struct LoginResponse {
pub session: String,
}
#[derive(Debug, Serialize, Deserialize)]
pub struct AddHistoryRequest {
pub id: String,
pub timestamp: chrono::DateTime<Utc>,
pub data: String,
pub hostname: String,
}
#[derive(Debug, Serialize, Deserialize)]
pub struct CountResponse {
pub count: i64,
}
#[derive(Debug, Serialize, Deserialize)]
pub struct SyncHistoryRequest {
pub sync_ts: chrono::DateTime<chrono::FixedOffset>,
pub history_ts: chrono::DateTime<chrono::FixedOffset>,
pub host: String,
}
#[derive(Debug, Serialize, Deserialize)]
pub struct SyncHistoryResponse {
pub history: Vec<String>,
}

View File

@@ -1,16 +0,0 @@
// Calendar data
use serde::{Serialize, Deserialize};
pub enum TimePeriod {
YEAR,
MONTH,
DAY,
}
#[derive(Debug, Serialize, Deserialize)]
pub struct TimePeriodInfo {
pub count: u64,
// TODO: Use this for merkle tree magic
pub hash: String,
}

View File

@@ -1,4 +0,0 @@
#![forbid(unsafe_code)]
pub mod api;
pub mod utils;

View File

@@ -1,86 +0,0 @@
use std::path::PathBuf;
use chrono::NaiveDate;
use uuid::Uuid;
pub fn uuid_v4() -> String {
Uuid::new_v4().as_simple().to_string()
}
// TODO: more reliable, more tested
// I don't want to use ProjectDirs, it puts config in awkward places on
// mac. Data too. Seems to be more intended for GUI apps.
pub fn home_dir() -> PathBuf {
let home = std::env::var("HOME").expect("$HOME not found");
PathBuf::from(home)
}
pub fn config_dir() -> PathBuf {
let config_dir =
std::env::var("XDG_CONFIG_HOME").map_or_else(|_| home_dir().join(".config"), PathBuf::from);
config_dir.join("atuin")
}
pub fn data_dir() -> PathBuf {
let data_dir = std::env::var("XDG_DATA_HOME")
.map_or_else(|_| home_dir().join(".local").join("share"), PathBuf::from);
data_dir.join("atuin")
}
pub fn get_days_from_month(year: i32, month: u32) -> i64 {
NaiveDate::from_ymd(
match month {
12 => year + 1,
_ => year,
},
match month {
12 => 1,
_ => month + 1,
},
1,
)
.signed_duration_since(NaiveDate::from_ymd(year, month, 1))
.num_days()
}
#[cfg(test)]
mod tests {
use super::*;
use std::env;
#[test]
fn test_config_dir_xdg() {
env::remove_var("HOME");
env::set_var("XDG_CONFIG_HOME", "/home/user/custom_config");
assert_eq!(
config_dir(),
PathBuf::from("/home/user/custom_config/atuin")
);
env::remove_var("XDG_CONFIG_HOME");
}
#[test]
fn test_config_dir() {
env::set_var("HOME", "/home/user");
env::remove_var("XDG_CONFIG_HOME");
assert_eq!(config_dir(), PathBuf::from("/home/user/.config/atuin"));
env::remove_var("HOME");
}
#[test]
fn test_data_dir_xdg() {
env::remove_var("HOME");
env::set_var("XDG_DATA_HOME", "/home/user/custom_data");
assert_eq!(data_dir(), PathBuf::from("/home/user/custom_data/atuin"));
env::remove_var("XDG_DATA_HOME");
}
#[test]
fn test_data_dir() {
env::set_var("HOME", "/home/user");
env::remove_var("XDG_DATA_HOME");
assert_eq!(data_dir(), PathBuf::from("/home/user/.local/share/atuin"));
env::remove_var("HOME");
}
}

View File

@@ -1,33 +0,0 @@
[package]
name = "atuin-server"
version = "0.9.1"
authors = ["Ellie Huxtable <ellie@elliehuxtable.com>"]
edition = "2018"
license = "MIT"
description = "server library for atuin"
homepage = "https://atuin.sh"
repository = "https://github.com/ellie/atuin"
[dependencies]
atuin-common = { path = "../atuin-common", version = "0.9.1" }
tracing = "0.1"
chrono = { version = "0.4", features = ["serde"] }
eyre = "0.6"
uuid = { version = "1.0", features = ["v4"] }
whoami = "1.1.2"
config = { version = "0.13", default-features = false, features = ["toml"] }
serde = { version = "1.0.137", features = ["derive"] }
serde_json = "1.0.81"
sodiumoxide = "0.2.6"
base64 = "0.13.0"
rand = "0.8.4"
tokio = { version = "1", features = ["full"] }
sqlx = { version = "0.5", features = [ "runtime-tokio-rustls", "chrono", "postgres" ] }
async-trait = "0.1.49"
axum = "0.5"
http = "0.2"
fs-err = "2.7"
chronoutil = "0.2.3"
tower = "0.4"
tower-http = { version = "0.3", features = ["trace"] }

View File

@@ -1,11 +0,0 @@
create table history (
id bigserial primary key,
client_id text not null unique, -- the client-generated ID
user_id bigserial not null, -- allow multiple users
hostname text not null, -- a unique identifier from the client (can be hashed, random, whatever)
timestamp timestamp not null, -- one of the few non-encrypted metadatas
data varchar(8192) not null, -- store the actual history data, encrypted. I don't wanna know!
created_at timestamp not null default current_timestamp
);

View File

@@ -1,10 +0,0 @@
create table users (
id bigserial primary key, -- also store our own ID
username varchar(32) not null unique, -- being able to contact users is useful
email varchar(128) not null unique, -- being able to contact users is useful
password varchar(128) not null unique
);
-- the prior index is case sensitive :(
CREATE UNIQUE INDEX email_unique_idx on users (LOWER(email));
CREATE UNIQUE INDEX username_unique_idx on users (LOWER(username));

View File

@@ -1,6 +0,0 @@
-- Add migration script here
create table sessions (
id bigserial primary key,
user_id bigserial,
token varchar(128) unique not null
);

View File

@@ -1,51 +0,0 @@
-- Prior to this, the count endpoint was super naive and just ran COUNT(1).
-- This is slow asf. Now that we have an amount of actual traffic,
-- stop doing that!
-- This basically maintains a count, so we can read ONE row, instead of ALL the
-- rows. Much better.
-- Future optimisation could use some sort of cache so we don't even need to hit
-- postgres at all.
create table total_history_count_user(
id bigserial primary key,
user_id bigserial,
total integer -- try and avoid using keywords - hence total, not count
);
create or replace function user_history_count()
returns trigger as
$func$
begin
if (TG_OP='INSERT') then
update total_history_count_user set total = total + 1 where user_id = new.user_id;
if not found then
insert into total_history_count_user(user_id, total)
values (
new.user_id,
(select count(1) from history where user_id = new.user_id)
);
end if;
elsif (TG_OP='DELETE') then
update total_history_count_user set total = total - 1 where user_id = new.user_id;
if not found then
insert into total_history_count_user(user_id, total)
values (
new.user_id,
(select count(1) from history where user_id = new.user_id)
);
end if;
end if;
return NEW; -- this is actually ignored for an after trigger, but oh well
end;
$func$
language plpgsql volatile -- pldfplplpflh
cost 100; -- default value
create trigger tg_user_history_count
after insert or delete on history
for each row
execute procedure user_history_count();

View File

@@ -1,35 +0,0 @@
-- the old version of this function used NEW in the delete part when it should
-- use OLD
create or replace function user_history_count()
returns trigger as
$func$
begin
if (TG_OP='INSERT') then
update total_history_count_user set total = total + 1 where user_id = new.user_id;
if not found then
insert into total_history_count_user(user_id, total)
values (
new.user_id,
(select count(1) from history where user_id = new.user_id)
);
end if;
elsif (TG_OP='DELETE') then
update total_history_count_user set total = total - 1 where user_id = old.user_id;
if not found then
insert into total_history_count_user(user_id, total)
values (
old.user_id,
(select count(1) from history where user_id = old.user_id)
);
end if;
end if;
return NEW; -- this is actually ignored for an after trigger, but oh well
end;
$func$
language plpgsql volatile -- pldfplplpflh
cost 100; -- default value

View File

@@ -1,3 +0,0 @@
-- Make it 4x larger. Most commands are less than this, but as it's base64
-- SOME are more than 8192. Should be enough for now.
ALTER TABLE history ALTER COLUMN data TYPE varchar(32768);

View File

@@ -1 +0,0 @@
alter table users add column created_at timestamp not null default now();

View File

@@ -1,11 +0,0 @@
## host to bind, can also be passed via CLI args
# host = "127.0.0.1"
## port to bind, can also be passed via CLI args
# port = 8888
## whether to allow anyone to register an account
# open_registration = false
## URI for postgres (using development creds here)
# db_uri="postgres://username:password@localhost/atuin"

View File

@@ -1,222 +0,0 @@
/*
use self::diesel::prelude::*;
use eyre::Result;
use rocket::http::Status;
use rocket::request::{self, FromRequest, Outcome, Request};
use rocket::State;
use rocket_contrib::databases::diesel;
use sodiumoxide::crypto::pwhash::argon2id13;
use rocket_contrib::json::Json;
use uuid::Uuid;
use super::models::{NewSession, NewUser, Session, User};
use super::views::ApiResponse;
use crate::api::{LoginRequest, RegisterRequest};
use crate::schema::{sessions, users};
use crate::settings::Settings;
use crate::utils::hash_secret;
use super::database::AtuinDbConn;
#[derive(Debug)]
pub enum KeyError {
Missing,
Invalid,
}
pub fn verify_str(secret: &str, verify: &str) -> bool {
sodiumoxide::init().unwrap();
let mut padded = [0_u8; 128];
secret.as_bytes().iter().enumerate().for_each(|(i, val)| {
padded[i] = *val;
});
match argon2id13::HashedPassword::from_slice(&padded) {
Some(hp) => argon2id13::pwhash_verify(&hp, verify.as_bytes()),
None => false,
}
}
impl<'a, 'r> FromRequest<'a, 'r> for User {
type Error = KeyError;
fn from_request(request: &'a Request<'r>) -> request::Outcome<User, Self::Error> {
let session: Vec<_> = request.headers().get("authorization").collect();
if session.is_empty() {
return Outcome::Failure((Status::BadRequest, KeyError::Missing));
} else if session.len() > 1 {
return Outcome::Failure((Status::BadRequest, KeyError::Invalid));
}
let session: Vec<_> = session[0].split(' ').collect();
if session.len() != 2 {
return Outcome::Failure((Status::BadRequest, KeyError::Invalid));
}
if session[0] != "Token" {
return Outcome::Failure((Status::BadRequest, KeyError::Invalid));
}
let session = session[1];
let db = request
.guard::<AtuinDbConn>()
.succeeded()
.expect("failed to load database");
let session = sessions::table
.filter(sessions::token.eq(session))
.first::<Session>(&*db);
if session.is_err() {
return Outcome::Failure((Status::Unauthorized, KeyError::Invalid));
}
let session = session.unwrap();
let user = users::table.find(session.user_id).first(&*db);
match user {
Ok(user) => Outcome::Success(user),
Err(_) => Outcome::Failure((Status::Unauthorized, KeyError::Invalid)),
}
}
}
#[get("/user/<user>")]
#[allow(clippy::clippy::needless_pass_by_value)]
pub fn get_user(user: String, conn: AtuinDbConn) -> ApiResponse {
use crate::schema::users::dsl::{username, users};
let user: Result<String, diesel::result::Error> = users
.select(username)
.filter(username.eq(user))
.first(&*conn);
if user.is_err() {
return ApiResponse {
json: json!({
"message": "could not find user",
}),
status: Status::NotFound,
};
}
let user = user.unwrap();
ApiResponse {
json: json!({ "username": user.as_str() }),
status: Status::Ok,
}
}
#[post("/register", data = "<register>")]
#[allow(clippy::clippy::needless_pass_by_value)]
pub fn register(
conn: AtuinDbConn,
register: Json<RegisterRequest>,
settings: State<Settings>,
) -> ApiResponse {
if !settings.server.open_registration {
return ApiResponse {
status: Status::BadRequest,
json: json!({
"message": "registrations are not open"
}),
};
}
let hashed = hash_secret(register.password.as_str());
let new_user = NewUser {
email: register.email.as_str(),
username: register.username.as_str(),
password: hashed.as_str(),
};
let user = diesel::insert_into(users::table)
.values(&new_user)
.get_result(&*conn);
if user.is_err() {
return ApiResponse {
status: Status::BadRequest,
json: json!({
"message": "failed to create user - username or email in use?",
}),
};
}
let user: User = user.unwrap();
let token = Uuid::new_v4().to_simple().to_string();
let new_session = NewSession {
user_id: user.id,
token: token.as_str(),
};
match diesel::insert_into(sessions::table)
.values(&new_session)
.execute(&*conn)
{
Ok(_) => ApiResponse {
status: Status::Ok,
json: json!({"message": "user created!", "session": token}),
},
Err(_) => ApiResponse {
status: Status::BadRequest,
json: json!({ "message": "failed to create user"}),
},
}
}
#[post("/login", data = "<login>")]
#[allow(clippy::clippy::needless_pass_by_value)]
pub fn login(conn: AtuinDbConn, login: Json<LoginRequest>) -> ApiResponse {
let user = users::table
.filter(users::username.eq(login.username.as_str()))
.first(&*conn);
if user.is_err() {
return ApiResponse {
status: Status::NotFound,
json: json!({"message": "user not found"}),
};
}
let user: User = user.unwrap();
let session = sessions::table
.filter(sessions::user_id.eq(user.id))
.first(&*conn);
// a session should exist...
if session.is_err() {
return ApiResponse {
status: Status::InternalServerError,
json: json!({"message": "something went wrong"}),
};
}
let verified = verify_str(user.password.as_str(), login.password.as_str());
if !verified {
return ApiResponse {
status: Status::NotFound,
json: json!({"message": "user not found"}),
};
}
let session: Session = session.unwrap();
ApiResponse {
status: Status::Ok,
json: json!({"session": session.token}),
}
}
*/

View File

@@ -1,17 +0,0 @@
// Calendar data
use serde::{Deserialize, Serialize};
pub enum TimePeriod {
YEAR,
MONTH,
DAY,
}
#[derive(Debug, Serialize, Deserialize)]
pub struct TimePeriodInfo {
pub count: u64,
// TODO: Use this for merkle tree magic
pub hash: String,
}

View File

@@ -1,425 +0,0 @@
use std::collections::HashMap;
use async_trait::async_trait;
use chrono::{Datelike, TimeZone};
use chronoutil::RelativeDuration;
use sqlx::{postgres::PgPoolOptions, Result};
use tracing::{debug, instrument};
use super::{
calendar::{TimePeriod, TimePeriodInfo},
models::{History, NewHistory, NewSession, NewUser, Session, User},
};
use crate::settings::HISTORY_PAGE_SIZE;
use atuin_common::utils::get_days_from_month;
#[async_trait]
pub trait Database {
async fn get_session(&self, token: &str) -> Result<Session>;
async fn get_session_user(&self, token: &str) -> Result<User>;
async fn add_session(&self, session: &NewSession) -> Result<()>;
async fn get_user(&self, username: &str) -> Result<User>;
async fn get_user_session(&self, u: &User) -> Result<Session>;
async fn add_user(&self, user: &NewUser) -> Result<i64>;
async fn count_history(&self, user: &User) -> Result<i64>;
async fn count_history_cached(&self, user: &User) -> Result<i64>;
async fn count_history_range(
&self,
user: &User,
start: chrono::NaiveDateTime,
end: chrono::NaiveDateTime,
) -> Result<i64>;
async fn count_history_day(&self, user: &User, date: chrono::NaiveDate) -> Result<i64>;
async fn count_history_month(&self, user: &User, date: chrono::NaiveDate) -> Result<i64>;
async fn count_history_year(&self, user: &User, year: i32) -> Result<i64>;
async fn list_history(
&self,
user: &User,
created_after: chrono::NaiveDateTime,
since: chrono::NaiveDateTime,
host: &str,
) -> Result<Vec<History>>;
async fn add_history(&self, history: &[NewHistory]) -> Result<()>;
async fn oldest_history(&self, user: &User) -> Result<History>;
async fn calendar(
&self,
user: &User,
period: TimePeriod,
year: u64,
month: u64,
) -> Result<HashMap<u64, TimePeriodInfo>>;
}
#[derive(Clone)]
pub struct Postgres {
pool: sqlx::Pool<sqlx::postgres::Postgres>,
}
impl Postgres {
pub async fn new(uri: &str) -> Result<Self> {
let pool = PgPoolOptions::new()
.max_connections(100)
.connect(uri)
.await?;
sqlx::migrate!("./migrations").run(&pool).await?;
Ok(Self { pool })
}
}
#[async_trait]
impl Database for Postgres {
#[instrument(skip_all)]
async fn get_session(&self, token: &str) -> Result<Session> {
sqlx::query_as::<_, Session>("select id, user_id, token from sessions where token = $1")
.bind(token)
.fetch_one(&self.pool)
.await
}
#[instrument(skip_all)]
async fn get_user(&self, username: &str) -> Result<User> {
sqlx::query_as::<_, User>(
"select id, username, email, password from users where username = $1",
)
.bind(username)
.fetch_one(&self.pool)
.await
}
#[instrument(skip_all)]
async fn get_session_user(&self, token: &str) -> Result<User> {
sqlx::query_as::<_, User>(
"select users.id, users.username, users.email, users.password from users
inner join sessions
on users.id = sessions.user_id
and sessions.token = $1",
)
.bind(token)
.fetch_one(&self.pool)
.await
}
#[instrument(skip_all)]
async fn count_history(&self, user: &User) -> Result<i64> {
// The cache is new, and the user might not yet have a cache value.
// They will have one as soon as they post up some new history, but handle that
// edge case.
let res: (i64,) = sqlx::query_as(
"select count(1) from history
where user_id = $1",
)
.bind(user.id)
.fetch_one(&self.pool)
.await?;
Ok(res.0)
}
#[instrument(skip_all)]
async fn count_history_cached(&self, user: &User) -> Result<i64> {
let res: (i32,) = sqlx::query_as(
"select total from total_history_count_user
where user_id = $1",
)
.bind(user.id)
.fetch_one(&self.pool)
.await?;
Ok(res.0 as i64)
}
#[instrument(skip_all)]
async fn count_history_range(
&self,
user: &User,
start: chrono::NaiveDateTime,
end: chrono::NaiveDateTime,
) -> Result<i64> {
let res: (i64,) = sqlx::query_as(
"select count(1) from history
where user_id = $1
and timestamp >= $2::date
and timestamp < $3::date",
)
.bind(user.id)
.bind(start)
.bind(end)
.fetch_one(&self.pool)
.await?;
Ok(res.0)
}
// Count the history for a given year
#[instrument(skip_all)]
async fn count_history_year(&self, user: &User, year: i32) -> Result<i64> {
let start = chrono::Utc.ymd(year, 1, 1).and_hms_nano(0, 0, 0, 0);
let end = start + RelativeDuration::years(1);
let res = self
.count_history_range(user, start.naive_utc(), end.naive_utc())
.await?;
Ok(res)
}
// Count the history for a given month
#[instrument(skip_all)]
async fn count_history_month(&self, user: &User, month: chrono::NaiveDate) -> Result<i64> {
let start = chrono::Utc
.ymd(month.year(), month.month(), 1)
.and_hms_nano(0, 0, 0, 0);
// ofc...
let end = if month.month() < 12 {
chrono::Utc
.ymd(month.year(), month.month() + 1, 1)
.and_hms_nano(0, 0, 0, 0)
} else {
chrono::Utc
.ymd(month.year() + 1, 1, 1)
.and_hms_nano(0, 0, 0, 0)
};
debug!("start: {}, end: {}", start, end);
let res = self
.count_history_range(user, start.naive_utc(), end.naive_utc())
.await?;
Ok(res)
}
// Count the history for a given day
#[instrument(skip_all)]
async fn count_history_day(&self, user: &User, day: chrono::NaiveDate) -> Result<i64> {
let start = chrono::Utc
.ymd(day.year(), day.month(), day.day())
.and_hms_nano(0, 0, 0, 0);
let end = chrono::Utc
.ymd(day.year(), day.month(), day.day() + 1)
.and_hms_nano(0, 0, 0, 0);
let res = self
.count_history_range(user, start.naive_utc(), end.naive_utc())
.await?;
Ok(res)
}
#[instrument(skip_all)]
async fn list_history(
&self,
user: &User,
created_after: chrono::NaiveDateTime,
since: chrono::NaiveDateTime,
host: &str,
) -> Result<Vec<History>> {
let res = sqlx::query_as::<_, History>(
"select id, client_id, user_id, hostname, timestamp, data, created_at from history
where user_id = $1
and hostname != $2
and created_at >= $3
and timestamp >= $4
order by timestamp asc
limit $5",
)
.bind(user.id)
.bind(host)
.bind(created_after)
.bind(since)
.bind(HISTORY_PAGE_SIZE)
.fetch_all(&self.pool)
.await?;
Ok(res)
}
#[instrument(skip_all)]
async fn add_history(&self, history: &[NewHistory]) -> Result<()> {
let mut tx = self.pool.begin().await?;
for i in history {
let client_id: &str = &i.client_id;
let hostname: &str = &i.hostname;
let data: &str = &i.data;
sqlx::query(
"insert into history
(client_id, user_id, hostname, timestamp, data)
values ($1, $2, $3, $4, $5)
on conflict do nothing
",
)
.bind(client_id)
.bind(i.user_id)
.bind(hostname)
.bind(i.timestamp)
.bind(data)
.execute(&mut tx)
.await?;
}
tx.commit().await?;
Ok(())
}
#[instrument(skip_all)]
async fn add_user(&self, user: &NewUser) -> Result<i64> {
let email: &str = &user.email;
let username: &str = &user.username;
let password: &str = &user.password;
let res: (i64,) = sqlx::query_as(
"insert into users
(username, email, password)
values($1, $2, $3)
returning id",
)
.bind(username)
.bind(email)
.bind(password)
.fetch_one(&self.pool)
.await?;
Ok(res.0)
}
#[instrument(skip_all)]
async fn add_session(&self, session: &NewSession) -> Result<()> {
let token: &str = &session.token;
sqlx::query(
"insert into sessions
(user_id, token)
values($1, $2)",
)
.bind(session.user_id)
.bind(token)
.execute(&self.pool)
.await?;
Ok(())
}
#[instrument(skip_all)]
async fn get_user_session(&self, u: &User) -> Result<Session> {
sqlx::query_as::<_, Session>("select id, user_id, token from sessions where user_id = $1")
.bind(u.id)
.fetch_one(&self.pool)
.await
}
#[instrument(skip_all)]
async fn oldest_history(&self, user: &User) -> Result<History> {
let res = sqlx::query_as::<_, History>(
"select id, client_id, user_id, hostname, timestamp, data, created_at from history
where user_id = $1
order by timestamp asc
limit 1",
)
.bind(user.id)
.fetch_one(&self.pool)
.await?;
Ok(res)
}
#[instrument(skip_all)]
async fn calendar(
&self,
user: &User,
period: TimePeriod,
year: u64,
month: u64,
) -> Result<HashMap<u64, TimePeriodInfo>> {
// TODO: Support different timezones. Right now we assume UTC and
// everything is stored as such. But it _should_ be possible to
// interpret the stored date with a different TZ
match period {
TimePeriod::YEAR => {
let mut ret = HashMap::new();
// First we need to work out how far back to calculate. Get the
// oldest history item
let oldest = self.oldest_history(user).await?.timestamp.year();
let current_year = chrono::Utc::now().year();
// All the years we need to get data for
// The upper bound is exclusive, so include current +1
let years = oldest..current_year + 1;
for year in years {
let count = self.count_history_year(user, year).await?;
ret.insert(
year as u64,
TimePeriodInfo {
count: count as u64,
hash: "".to_string(),
},
);
}
Ok(ret)
}
TimePeriod::MONTH => {
let mut ret = HashMap::new();
for month in 1..13 {
let count = self
.count_history_month(
user,
chrono::Utc.ymd(year as i32, month, 1).naive_utc(),
)
.await?;
ret.insert(
month as u64,
TimePeriodInfo {
count: count as u64,
hash: "".to_string(),
},
);
}
Ok(ret)
}
TimePeriod::DAY => {
let mut ret = HashMap::new();
for day in 1..get_days_from_month(year as i32, month as u32) {
let count = self
.count_history_day(
user,
chrono::Utc
.ymd(year as i32, month as u32, day as u32)
.naive_utc(),
)
.await?;
ret.insert(
day as u64,
TimePeriodInfo {
count: count as u64,
hash: "".to_string(),
},
);
}
Ok(ret)
}
}
}
}

View File

@@ -1,145 +0,0 @@
use std::collections::HashMap;
use axum::{
extract::{Path, Query},
Extension, Json,
};
use http::StatusCode;
use tracing::{debug, error, instrument};
use super::{ErrorResponse, ErrorResponseStatus};
use crate::{
calendar::{TimePeriod, TimePeriodInfo},
database::{Database, Postgres},
models::{NewHistory, User},
};
use atuin_common::api::*;
#[instrument(skip_all, fields(user.id = user.id))]
pub async fn count(
user: User,
db: Extension<Postgres>,
) -> Result<Json<CountResponse>, ErrorResponseStatus<'static>> {
match db.count_history_cached(&user).await {
// By default read out the cached value
Ok(count) => Ok(Json(CountResponse { count })),
// If that fails, fallback on a full COUNT. Cache is built on a POST
// only
Err(_) => match db.count_history(&user).await {
Ok(count) => Ok(Json(CountResponse { count })),
Err(_) => Err(ErrorResponse::reply("failed to query history count")
.with_status(StatusCode::INTERNAL_SERVER_ERROR)),
},
}
}
#[instrument(skip_all, fields(user.id = user.id))]
pub async fn list(
req: Query<SyncHistoryRequest>,
user: User,
db: Extension<Postgres>,
) -> Result<Json<SyncHistoryResponse>, ErrorResponseStatus<'static>> {
let history = db
.list_history(
&user,
req.sync_ts.naive_utc(),
req.history_ts.naive_utc(),
&req.host,
)
.await;
if let Err(e) = history {
error!("failed to load history: {}", e);
return Err(ErrorResponse::reply("failed to load history")
.with_status(StatusCode::INTERNAL_SERVER_ERROR));
}
let history: Vec<String> = history
.unwrap()
.iter()
.map(|i| i.data.to_string())
.collect();
debug!(
"loaded {} items of history for user {}",
history.len(),
user.id
);
Ok(Json(SyncHistoryResponse { history }))
}
#[instrument(skip_all, fields(user.id = user.id))]
pub async fn add(
Json(req): Json<Vec<AddHistoryRequest>>,
user: User,
db: Extension<Postgres>,
) -> Result<(), ErrorResponseStatus<'static>> {
debug!("request to add {} history items", req.len());
let history: Vec<NewHistory> = req
.into_iter()
.map(|h| NewHistory {
client_id: h.id,
user_id: user.id,
hostname: h.hostname,
timestamp: h.timestamp.naive_utc(),
data: h.data,
})
.collect();
if let Err(e) = db.add_history(&history).await {
error!("failed to add history: {}", e);
return Err(ErrorResponse::reply("failed to add history")
.with_status(StatusCode::INTERNAL_SERVER_ERROR));
};
Ok(())
}
#[instrument(skip_all, fields(user.id = user.id))]
pub async fn calendar(
Path(focus): Path<String>,
Query(params): Query<HashMap<String, u64>>,
user: User,
db: Extension<Postgres>,
) -> Result<Json<HashMap<u64, TimePeriodInfo>>, ErrorResponseStatus<'static>> {
let focus = focus.as_str();
let year = params.get("year").unwrap_or(&0);
let month = params.get("month").unwrap_or(&1);
let focus = match focus {
"year" => db
.calendar(&user, TimePeriod::YEAR, *year, *month)
.await
.map_err(|_| {
ErrorResponse::reply("failed to query calendar")
.with_status(StatusCode::INTERNAL_SERVER_ERROR)
}),
"month" => db
.calendar(&user, TimePeriod::MONTH, *year, *month)
.await
.map_err(|_| {
ErrorResponse::reply("failed to query calendar")
.with_status(StatusCode::INTERNAL_SERVER_ERROR)
}),
"day" => db
.calendar(&user, TimePeriod::DAY, *year, *month)
.await
.map_err(|_| {
ErrorResponse::reply("failed to query calendar")
.with_status(StatusCode::INTERNAL_SERVER_ERROR)
}),
_ => Err(ErrorResponse::reply("invalid focus: use year/month/day")
.with_status(StatusCode::BAD_REQUEST)),
}?;
Ok(Json(focus))
}

View File

@@ -1,42 +0,0 @@
use std::borrow::Cow;
use axum::{response::IntoResponse, Json};
use serde::{Deserialize, Serialize};
pub mod history;
pub mod user;
pub async fn index() -> &'static str {
"\"Through the fathomless deeps of space swims the star turtle Great A\u{2019}Tuin, bearing on its back the four giant elephants who carry on their shoulders the mass of the Discworld.\"\n\t-- Sir Terry Pratchett"
}
#[derive(Debug, Serialize, Deserialize)]
pub struct ErrorResponse<'a> {
pub reason: Cow<'a, str>,
}
impl<'a> IntoResponse for ErrorResponseStatus<'a> {
fn into_response(self) -> axum::response::Response {
(self.status, Json(self.error)).into_response()
}
}
pub struct ErrorResponseStatus<'a> {
pub error: ErrorResponse<'a>,
pub status: http::StatusCode,
}
impl<'a> ErrorResponse<'a> {
pub fn with_status(self, status: http::StatusCode) -> ErrorResponseStatus<'a> {
ErrorResponseStatus {
error: self,
status,
}
}
pub fn reply(reason: &'a str) -> ErrorResponse {
Self {
reason: reason.into(),
}
}
}

View File

@@ -1,157 +0,0 @@
use std::borrow::Borrow;
use axum::{extract::Path, Extension, Json};
use http::StatusCode;
use sodiumoxide::crypto::pwhash::argon2id13;
use tracing::{debug, error, instrument};
use uuid::Uuid;
use super::{ErrorResponse, ErrorResponseStatus};
use crate::{
database::{Database, Postgres},
models::{NewSession, NewUser},
settings::Settings,
};
use atuin_common::api::*;
pub fn verify_str(secret: &str, verify: &str) -> bool {
sodiumoxide::init().unwrap();
let mut padded = [0_u8; 128];
secret.as_bytes().iter().enumerate().for_each(|(i, val)| {
padded[i] = *val;
});
match argon2id13::HashedPassword::from_slice(&padded) {
Some(hp) => argon2id13::pwhash_verify(&hp, verify.as_bytes()),
None => false,
}
}
#[instrument(skip_all, fields(user.username = username.as_str()))]
pub async fn get(
Path(username): Path<String>,
db: Extension<Postgres>,
) -> Result<Json<UserResponse>, ErrorResponseStatus<'static>> {
let user = match db.get_user(username.as_ref()).await {
Ok(user) => user,
Err(sqlx::Error::RowNotFound) => {
debug!("user not found: {}", username);
return Err(ErrorResponse::reply("user not found").with_status(StatusCode::NOT_FOUND));
}
Err(err) => {
error!("database error: {}", err);
return Err(ErrorResponse::reply("database error")
.with_status(StatusCode::INTERNAL_SERVER_ERROR));
}
};
Ok(Json(UserResponse {
username: user.username,
}))
}
#[instrument(skip_all)]
pub async fn register(
Json(register): Json<RegisterRequest>,
settings: Extension<Settings>,
db: Extension<Postgres>,
) -> Result<Json<RegisterResponse>, ErrorResponseStatus<'static>> {
if !settings.open_registration {
return Err(
ErrorResponse::reply("this server is not open for registrations")
.with_status(StatusCode::BAD_REQUEST),
);
}
let hashed = hash_secret(&register.password);
let new_user = NewUser {
email: register.email,
username: register.username,
password: hashed,
};
let user_id = match db.add_user(&new_user).await {
Ok(id) => id,
Err(e) => {
error!("failed to add user: {}", e);
return Err(
ErrorResponse::reply("failed to add user").with_status(StatusCode::BAD_REQUEST)
);
}
};
let token = Uuid::new_v4().as_simple().to_string();
let new_session = NewSession {
user_id,
token: (&token).into(),
};
match db.add_session(&new_session).await {
Ok(_) => Ok(Json(RegisterResponse { session: token })),
Err(e) => {
error!("failed to add session: {}", e);
Err(ErrorResponse::reply("failed to register user")
.with_status(StatusCode::BAD_REQUEST))
}
}
}
#[instrument(skip_all, fields(user.username = login.username.as_str()))]
pub async fn login(
login: Json<LoginRequest>,
db: Extension<Postgres>,
) -> Result<Json<LoginResponse>, ErrorResponseStatus<'static>> {
let user = match db.get_user(login.username.borrow()).await {
Ok(u) => u,
Err(sqlx::Error::RowNotFound) => {
return Err(ErrorResponse::reply("user not found").with_status(StatusCode::NOT_FOUND));
}
Err(e) => {
error!("failed to get user {}: {}", login.username.clone(), e);
return Err(ErrorResponse::reply("database error")
.with_status(StatusCode::INTERNAL_SERVER_ERROR));
}
};
let session = match db.get_user_session(&user).await {
Ok(u) => u,
Err(sqlx::Error::RowNotFound) => {
debug!("user session not found for user id={}", user.id);
return Err(ErrorResponse::reply("user not found").with_status(StatusCode::NOT_FOUND));
}
Err(err) => {
error!("database error for user {}: {}", login.username, err);
return Err(ErrorResponse::reply("database error")
.with_status(StatusCode::INTERNAL_SERVER_ERROR));
}
};
let verified = verify_str(user.password.as_str(), login.password.borrow());
if !verified {
return Err(ErrorResponse::reply("user not found").with_status(StatusCode::NOT_FOUND));
}
Ok(Json(LoginResponse {
session: session.token,
}))
}
fn hash_secret(secret: &str) -> String {
sodiumoxide::init().unwrap();
let hash = argon2id13::pwhash(
secret.as_bytes(),
argon2id13::OPSLIMIT_INTERACTIVE,
argon2id13::MEMLIMIT_INTERACTIVE,
)
.unwrap();
let texthash = std::str::from_utf8(&hash.0).unwrap().to_string();
// postgres hates null chars. don't do that to postgres
texthash.trim_end_matches('\u{0}').to_string()
}

View File

@@ -1,33 +0,0 @@
#![forbid(unsafe_code)]
use std::net::{IpAddr, SocketAddr};
use axum::Server;
use database::Postgres;
use eyre::{Context, Result};
use crate::settings::Settings;
pub mod auth;
pub mod calendar;
pub mod database;
pub mod handlers;
pub mod models;
pub mod router;
pub mod settings;
pub async fn launch(settings: Settings, host: String, port: u16) -> Result<()> {
let host = host.parse::<IpAddr>()?;
let postgres = Postgres::new(settings.db_uri.as_str())
.await
.wrap_err_with(|| format!("failed to connect to db: {}", settings.db_uri))?;
let r = router::router(postgres, settings);
Server::bind(&SocketAddr::new(host, port))
.serve(r.into_make_service())
.await?;
Ok(())
}

View File

@@ -1,49 +0,0 @@
use chrono::prelude::*;
#[derive(sqlx::FromRow)]
pub struct History {
pub id: i64,
pub client_id: String, // a client generated ID
pub user_id: i64,
pub hostname: String,
pub timestamp: NaiveDateTime,
pub data: String,
pub created_at: NaiveDateTime,
}
pub struct NewHistory {
pub client_id: String,
pub user_id: i64,
pub hostname: String,
pub timestamp: chrono::NaiveDateTime,
pub data: String,
}
#[derive(sqlx::FromRow)]
pub struct User {
pub id: i64,
pub username: String,
pub email: String,
pub password: String,
}
#[derive(sqlx::FromRow)]
pub struct Session {
pub id: i64,
pub user_id: i64,
pub token: String,
}
pub struct NewUser {
pub username: String,
pub email: String,
pub password: String,
}
pub struct NewSession {
pub user_id: i64,
pub token: String,
}

View File

@@ -1,76 +0,0 @@
use async_trait::async_trait;
use axum::{
extract::{FromRequest, RequestParts},
handler::Handler,
response::IntoResponse,
routing::{get, post},
Extension, Router,
};
use eyre::Result;
use tower::ServiceBuilder;
use tower_http::trace::TraceLayer;
use super::{
database::{Database, Postgres},
handlers,
};
use crate::{models::User, settings::Settings};
#[async_trait]
impl<B> FromRequest<B> for User
where
B: Send,
{
type Rejection = http::StatusCode;
async fn from_request(req: &mut RequestParts<B>) -> Result<Self, Self::Rejection> {
let postgres = req
.extensions()
.get::<Postgres>()
.ok_or(http::StatusCode::INTERNAL_SERVER_ERROR)?;
let auth_header = req
.headers()
.get(http::header::AUTHORIZATION)
.ok_or(http::StatusCode::FORBIDDEN)?;
let auth_header = auth_header
.to_str()
.map_err(|_| http::StatusCode::FORBIDDEN)?;
let (typ, token) = auth_header
.split_once(' ')
.ok_or(http::StatusCode::FORBIDDEN)?;
if typ != "Token" {
return Err(http::StatusCode::FORBIDDEN);
}
let user = postgres
.get_session_user(token)
.await
.map_err(|_| http::StatusCode::FORBIDDEN)?;
Ok(user)
}
}
async fn teapot() -> impl IntoResponse {
(http::StatusCode::IM_A_TEAPOT, "")
}
pub fn router(postgres: Postgres, settings: Settings) -> Router {
Router::new()
.route("/", get(handlers::index))
.route("/sync/count", get(handlers::history::count))
.route("/sync/history", get(handlers::history::list))
.route("/sync/calendar/:focus", get(handlers::history::calendar))
.route("/history", post(handlers::history::add))
.route("/user/:username", get(handlers::user::get))
.route("/register", post(handlers::user::register))
.route("/login", post(handlers::user::login))
.fallback(teapot.into_service())
.layer(
ServiceBuilder::new()
.layer(TraceLayer::new_for_http())
.layer(Extension(postgres))
.layer(Extension(settings)),
)
}

View File

@@ -1,62 +0,0 @@
use std::{io::prelude::*, path::PathBuf};
use config::{Config, Environment, File as ConfigFile, FileFormat};
use eyre::{eyre, Result};
use fs_err::{create_dir_all, File};
use serde::{Deserialize, Serialize};
pub const HISTORY_PAGE_SIZE: i64 = 100;
#[derive(Clone, Debug, Deserialize, Serialize)]
pub struct Settings {
pub host: String,
pub port: u16,
pub db_uri: String,
pub open_registration: bool,
}
impl Settings {
pub fn new() -> Result<Self> {
let mut config_file = if let Ok(p) = std::env::var("ATUIN_CONFIG_DIR") {
PathBuf::from(p)
} else {
let mut config_file = PathBuf::new();
let config_dir = atuin_common::utils::config_dir();
config_file.push(config_dir);
config_file
};
config_file.push("server.toml");
// create the config file if it does not exist
let mut config_builder = Config::builder()
.set_default("host", "127.0.0.1")?
.set_default("port", 8888)?
.set_default("open_registration", false)?
.add_source(
Environment::with_prefix("atuin")
.prefix_separator("_")
.separator("__"),
);
config_builder = if config_file.exists() {
config_builder.add_source(ConfigFile::new(
config_file.to_str().unwrap(),
FileFormat::Toml,
))
} else {
let example_config = include_bytes!("../server.toml");
create_dir_all(config_file.parent().unwrap())?;
let mut file = File::create(config_file)?;
file.write_all(example_config)?;
config_builder
};
let config = config_builder.build()?;
config
.try_deserialize()
.map_err(|e| eyre!("failed to deserialize: {}", e))
}
}

View File

@@ -1,6 +0,0 @@
# shellcheck disable=2148,SC2168,SC1090,SC2125
local FOUND_ATUIN=$+commands[atuin]
if [[ $FOUND_ATUIN -eq 1 ]]; then
source <(atuin init zsh)
fi

17
babel.config.js Normal file
View File

@@ -0,0 +1,17 @@
module.exports = {
presets: [
[
'@babel/preset-env',
{
targets: {
browsers: [
// Best practice: https://github.com/babel/babel/issues/7789
'>=1%',
'not ie 11',
'not op_mini all'
]
}
}
]
]
};

119
config/_default/config.toml Normal file
View File

@@ -0,0 +1,119 @@
baseurl = "https://doks-child-theme.netlify.app/"
canonifyURLs = false
disableAliases = true
disableHugoGeneratorInject = true
enableEmoji = true
enableGitInfo = false
enableRobotsTXT = true
languageCode = "en-US"
paginate = 7
rssLimit = 10
# Multilingual
defaultContentLanguage = "en"
disableLanguages = ["de", "nl"]
# defaultContentLanguageInSubdir = true
# add redirects/headers
[outputs]
home = ["HTML", "RSS", "REDIRECTS", "HEADERS"]
section = ["HTML", "RSS", "SITEMAP"]
# remove .{ext} from text/netlify
[mediaTypes."text/netlify"]
suffixes = [""]
delimiter = ""
# add output format for netlify _redirects
[outputFormats.REDIRECTS]
mediaType = "text/netlify"
baseName = "_redirects"
isPlainText = true
notAlternative = true
# add output format for netlify _headers
[outputFormats.HEADERS]
mediaType = "text/netlify"
baseName = "_headers"
isPlainText = true
notAlternative = true
# add output format for section sitemap.xml
[outputFormats.SITEMAP]
mediaType = "application/xml"
baseName = "sitemap"
isHTML = false
isPlainText = true
noUgly = true
rel = "sitemap"
[caches]
[caches.getjson]
dir = ":cacheDir/:project"
maxAge = "10s"
[sitemap]
changefreq = "weekly"
filename = "sitemap.xml"
priority = 0.5
[taxonomies]
contributor = "contributors"
[permalinks]
blog = "/blog/:title/"
# docs = "/docs/1.0/:sections[1:]/:title/"
[minify.tdewolff.html]
keepWhitespace = false
[module]
[module.hugoVersion]
extended = true
min = "0.80.0"
max = ""
[[module.mounts]]
source = "node_modules/@hyas/doks/archetypes"
target = "archetypes"
[[module.mounts]]
source = "node_modules/@hyas/doks/assets"
target = "assets"
# [[module.mounts]]
# source = "node_modules/@hyas/doks/content"
# target = "content"
[[module.mounts]]
source = "node_modules/@hyas/doks/data"
target = "data"
[[module.mounts]]
source = "node_modules/@hyas/doks/layouts"
target = "layouts"
[[module.mounts]]
source = "node_modules/@hyas/doks/static"
target = "static"
[[module.mounts]]
source = "node_modules/flexsearch"
target = "assets/js/vendor/flexsearch"
[[module.mounts]]
source = "node_modules/katex"
target = "assets/js/vendor/katex"
[[module.mounts]]
source = "node_modules/mermaid"
target = "assets/js/vendor/mermaid"
[[module.mounts]]
source = "assets"
target = "assets"
[[module.mounts]]
source = "static"
target = "static"
# [[module.mounts]]
# source = "content"
# target = "content"
[[module.mounts]]
source = "layouts"
target = "layouts"
[[module.mounts]]
source = "archetypes"
target = "archetypes"
[[module.mounts]]
source = "data"
target = "data"

View File

@@ -0,0 +1,25 @@
[en]
languageName = "English"
contentDir = "content/en"
weight = 10
[en.params]
languageISO = "EN"
[de]
languageName = "German"
contentDir = "content/de"
weight = 15
[de.params]
languageISO = "DE"
[nl]
languageName = "Nederlands"
contentDir = "content/nl"
weight = 20
[nl.params]
languageISO = "NL"
titleAddition = "Modern documentatie-thema"
description = "Doks is een Hugo-thema waarmee je moderne documentatie-websites kunt bouwen die veilig, snel en klaar voor SEO zijn — standaard."
titleHome = "Doks thema"
footer = "Mogelijk gemaakt door <a href=\"https://www.netlify.com/\">Netlify</a>, <a href=\"https://gohugo.io/\">Hugo</a>, en <a href=\"https://getdoks.org/\">Doks</a>"
alertText = "Introductie van het Doks-kinderthema, verschillende DX + UX-updates en meer! <a class=\"alert-link stretched-link\" href=\"https://getdoks.org/blog/doks-v0.2/\">Bekijk Doks v0.2</a>"

View File

@@ -0,0 +1,29 @@
defaultMarkdownHandler = "goldmark"
[goldmark]
[goldmark.extensions]
linkify = false
[goldmark.parser]
autoHeadingID = true
autoHeadingIDType = "github"
[goldmark.parser.attribute]
block = true
title = true
[goldmark.renderer]
unsafe = true
[highlight]
codeFences = false
guessSyntax = false
hl_Lines = ""
lineNoStart = 1
lineNos = false
lineNumbersInTable = true
noClasses = false
style = "dracula"
tabWidth = 4
[tableOfContents]
endLevel = 3
ordered = false
startLevel = 2

View File

@@ -0,0 +1,83 @@
[[docs]]
name = "Prologue"
weight = 10
identifier = "prologue"
url = "/docs/prologue/"
[[docs]]
name = "Help"
weight = 60
identifier = "help"
url = "/docs/help/"
# [[docs]]
# name = "Lorem"
# weight = 70
# identifier = "lorem"
# url = "/docs/lorem/"
[[guide]]
name = "Lorem"
weight = 10
identifier = "lorem"
url = "/guide/lorem/"
[[tutorial]]
name = "Lorem"
weight = 10
identifier = "lorem"
url = "/tutorial/lorem/"
[[main]]
name = "Docs"
url = "/docs/prologue/introduction/"
# url = "/docs/1.0/prologue/introduction/"
weight = 10
# [[main]]
# name = "Tutorial"
# url = "/tutorial/lorem/ipsum/"
# weight = 15
[[main]]
name = "Blog"
url = "/blog/"
weight = 20
[[main]]
name = "Get Started"
weight = 30
identifier = "get-started"
url = "/docs/prologue/introduction/"
[[main]]
name = "Quick Start"
weight = 40
identifier = "quick-start"
url = "/docs/prologue/quick-start/"
parent = "get-started"
[[main]]
name = "Tutorial"
weight = 50
identifier = "tutorial"
url = "https://getdoks.org/tutorial/introduction/"
parent = "get-started"
[[social]]
name = "GitHub"
pre = "<svg xmlns=\"http://www.w3.org/2000/svg\" width=\"20\" height=\"20\" viewBox=\"0 0 24 24\" fill=\"none\" stroke=\"currentColor\" stroke-width=\"2\" stroke-linecap=\"round\" stroke-linejoin=\"round\" class=\"feather feather-github\"><path d=\"M9 19c-5 1.5-5-2.5-7-3m14 6v-3.87a3.37 3.37 0 0 0-.94-2.61c3.14-.35 6.44-1.54 6.44-7A5.44 5.44 0 0 0 20 4.77 5.07 5.07 0 0 0 19.91 1S18.73.65 16 2.48a13.38 13.38 0 0 0-7 0C6.27.65 5.09 1 5.09 1A5.07 5.07 0 0 0 5 4.77a5.44 5.44 0 0 0-1.5 3.78c0 5.42 3.3 6.61 6.44 7A3.37 3.37 0 0 0 9 18.13V22\"></path></svg>"
url = "https://github.com/h-enk/doks"
post = "v0.1.0"
weight = 10
[[social]]
name = "Twitter"
pre = "<svg xmlns=\"http://www.w3.org/2000/svg\" width=\"20\" height=\"20\" viewBox=\"0 0 24 24\" fill=\"none\" stroke=\"currentColor\" stroke-width=\"2\" stroke-linecap=\"round\" stroke-linejoin=\"round\" class=\"feather feather-twitter\"><path d=\"M23 3a10.9 10.9 0 0 1-3.14 1.53 4.48 4.48 0 0 0-7.86 3v1A10.66 10.66 0 0 1 3 4s-4 9 5 13a11.64 11.64 0 0 1-7 2c9 5 20 0 20-11.5a4.5 4.5 0 0 0-.08-.83A7.72 7.72 0 0 0 23 3z\"></path></svg>"
url = "https://twitter.com/getdoks"
weight = 20
# [[footer]]
# name = "Privacy"
# url = "/privacy-policy/"
# weight = 10

View File

@@ -0,0 +1,39 @@
[[docs]]
name = "Prologue"
weight = 10
identifier = "prologue"
url = "/docs/prologue/"
[[docs]]
name = "Help"
weight = 60
identifier = "help"
url = "/docs/help/"
[[main]]
name = "Docs"
url = "/docs/prologue/introduction/"
weight = 10
# [[main]]
# name = "Blog"
# url = "/blog/"
# weight = 20
[[social]]
name = "GitHub"
pre = "<svg xmlns=\"http://www.w3.org/2000/svg\" width=\"20\" height=\"20\" viewBox=\"0 0 24 24\" fill=\"none\" stroke=\"currentColor\" stroke-width=\"2\" stroke-linecap=\"round\" stroke-linejoin=\"round\" class=\"feather feather-github\"><path d=\"M9 19c-5 1.5-5-2.5-7-3m14 6v-3.87a3.37 3.37 0 0 0-.94-2.61c3.14-.35 6.44-1.54 6.44-7A5.44 5.44 0 0 0 20 4.77 5.07 5.07 0 0 0 19.91 1S18.73.65 16 2.48a13.38 13.38 0 0 0-7 0C6.27.65 5.09 1 5.09 1A5.07 5.07 0 0 0 5 4.77a5.44 5.44 0 0 0-1.5 3.78c0 5.42 3.3 6.61 6.44 7A3.37 3.37 0 0 0 9 18.13V22\"></path></svg>"
url = "https://github.com/h-enk/doks"
post = "v0.1.0"
weight = 10
[[social]]
name = "Twitter"
pre = "<svg xmlns=\"http://www.w3.org/2000/svg\" width=\"20\" height=\"20\" viewBox=\"0 0 24 24\" fill=\"none\" stroke=\"currentColor\" stroke-width=\"2\" stroke-linecap=\"round\" stroke-linejoin=\"round\" class=\"feather feather-twitter\"><path d=\"M23 3a10.9 10.9 0 0 1-3.14 1.53 4.48 4.48 0 0 0-7.86 3v1A10.66 10.66 0 0 1 3 4s-4 9 5 13a11.64 11.64 0 0 1-7 2c9 5 20 0 20-11.5a4.5 4.5 0 0 0-.08-.83A7.72 7.72 0 0 0 23 3z\"></path></svg>"
url = "https://twitter.com/getdoks"
weight = 20
# [[footer]]
# name = "Privacy"
# url = "/privacy-policy/"
# weight = 10

View File

@@ -0,0 +1,95 @@
# Meta Data for SEO
## Homepage
title = "Atuin"
titleSeparator = "-"
titleAddition = "Magical Shell History"
description = "Atuin replaces your existing shell history with a SQLite database, recording additional context for your commands. It also provides optional and fully encrypted synchronisation of your history between machines"
## Documentation
# docsVersion = "0.3"
## Open Graph
images = ["screenshot.png"]
ogLocale = "en_US"
domainTLD = "atuin.sh"
titleHome = "Atuin"
## Twitter Cards
twitterSite = "@ellie_huxtable"
twitterCreator = "@ellie_huxtable"
## JSON-LD
# schemaType = "Person"
schemaType = "Organization"
schemaName = "Atuin"
schemaAuthor = "Ellie Huxtable"
schemaAuthorTwitter = "https://twitter.com/ellie_huxtable"
schemaAuthorLinkedIn = "https://www.linkedin.com/in//"
schemaAuthorGitHub = "https://github.com/ellie"
schemaLocale = "en-US"
schemaLogo = "logo-doks.png"
schemaLogoWidth = 512
schemaLogoHeight = 512
schemaImage = "doks.png"
schemaImageWidth = 1280
schemaImageHeight = 640
schemaTwitter = "https://twitter.com/ellie_huxtable"
schemaLinkedIn = ""
schemaGitHub = "https://github.com/ellie/atuin"
schemaSection = "blog"
## Sitelinks Search Box
siteLinksSearchBox = false
## Chrome Browser
themeColor = "#fff"
# Images
quality = 85
bgColor = "#fff"
landscapePhotoWidths = [900, 800, 700, 600, 500]
portraitPhotoWidths = [800, 700, 600, 500]
lqipWidth = "20x"
smallLimit = "300"
# Footer
footer = "Powered by ☕️ and 🦀. Made with ❤️ in London"
# Feed
copyRight = "Copyright (c) 2021-2022 Ellie Huxtable"
# Alert
alert = false
alertDismissable = true
# alertText = "Introducing the Doks child theme, several DX + UX updates, and more! <a class=\"alert-link stretched-link\" href=\"https://getdoks.org/blog/doks-v0.2/\" target=\"_blank\" rel=\"noopener\">Check out Doks v0.2</a>"
alertText = "Introducing the Doks child theme, several DX + UX updates, and more! <a class=\"alert-link stretched-link\" href=\"https://getdoks.org/blog/doks-v0.2/\">Check out Doks v0.2</a>"
# Edit Page
# repoHost [Github | Gitea | GitLab | Bitbucket | BitbucketServer ] is used for building the edit link based on git hoster
repoHost = "GitHub"
#repoHost = "Gitea"
docsRepo = "https://github.com/ellie/atuin"
docsRepoBranch = "web"
docsRepoSubPath = ""
editPage = false
lastMod = false
[options]
lazySizes = true
clipBoard = true
instantPage = true
flexSearch = true
darkMode = true
bootStrapJs = true
breadCrumb = false
highLight = true
kaTex = false
multilingualMode = true
docsVersioning = false
fullWidth = false
[menu]
[menu.section]
auto = true
collapsibleSidebar = true

1
config/next/config.toml Normal file
View File

@@ -0,0 +1 @@
canonifyURLs = false

40
config/postcss.config.js Normal file
View File

@@ -0,0 +1,40 @@
const autoprefixer = require('autoprefixer');
const purgecss = require('@fullhuman/postcss-purgecss');
const whitelister = require('purgecss-whitelister');
module.exports = {
plugins: [
autoprefixer(),
purgecss({
content: [
'./node_modules/@hyas/doks/layouts/**/*.html',
'./node_modules/@hyas/doks/content/**/*.md',
'./layouts/**/*.html',
'./content/**/*.md',
],
safelist: [
'lazyloaded',
'table',
'thead',
'tbody',
'tr',
'th',
'td',
'h5',
'alert-link',
'container-xxl',
'container-fluid',
...whitelister([
'./node_modules/@hyas/doks/assets/scss/common/_variables.scss',
'./node_modules/@hyas/doks/assets/scss/components/_alerts.scss',
'./node_modules/@hyas/doks/assets/scss/components/_buttons.scss',
'./node_modules/@hyas/doks/assets/scss/components/_code.scss',
'./node_modules/@hyas/doks/assets/scss/components/_syntax.scss',
'./node_modules/@hyas/doks/assets/scss/components/_search.scss',
'./node_modules/@hyas/doks/assets/scss/common/_dark.scss',
'./node_modules/katex/dist/katex.css',
]),
],
}),
],
}

View File

@@ -0,0 +1 @@
canonifyURLs = false

7
content/en/_index.md Normal file
View File

@@ -0,0 +1,7 @@
---
title : "Atuin"
description: "Atuin replaces your existing shell history with a SQLite database, recording additional context for your commands. It also provides optional and fully encrypted synchronisation of your history between machines"
lead: "Atuin replaces your existing shell history with a SQLite database, recording additional context for your commands. It also provides optional and fully encrypted synchronisation of your history between machines"
draft: false
images: []
---

View File

@@ -0,0 +1,8 @@
---
title: "Blog"
description: "The Atuin Blog"
date: 2020-10-06T08:49:55+00:00
lastmod: 2020-10-06T08:49:55+00:00
draft: false
images: []
---

View File

@@ -0,0 +1,10 @@
---
title: "Contact"
description: "Drop us an email."
date: 2020-08-27T19:25:12+02:00
lastmod: 2020-08-27T19:25:12+02:00
draft: true
images: []
---
{{< email user="hello" domain="getdoks.org" >}}

View File

@@ -0,0 +1,10 @@
---
title: "Contributors"
description: "The Doks contributors."
date: 2020-10-06T08:50:29+00:00
lastmod: 2020-10-06T08:50:29+00:00
draft: false
images: []
---
The Doks contributors.

View File

@@ -0,0 +1,12 @@
---
title: "Henk Verlinde"
description: "Creator of Hyas."
date: 2020-10-06T08:50:45+00:00
lastmod: 2020-10-06T08:50:45+00:00
draft: false
images: []
---
Creator of Hyas.
[@HenkVerlinde](https://twitter.com/henkverlinde)

View File

@@ -0,0 +1,9 @@
---
title : "Docs"
description: "Atuin Docs"
lead: ""
date: 2020-10-06T08:48:23+00:00
lastmod: 2020-10-06T08:48:23+00:00
draft: false
images: []
---

View File

@@ -0,0 +1,36 @@
---
title: "Privacy Policy"
description: "We do not use cookies and we do not collect any personal data."
date: 2020-08-27T19:23:18+02:00
lastmod: 2020-08-27T19:23:18+02:00
draft: true
images: []
---
__TLDR__: We do not use cookies and we do not collect any personal data.
## Website visitors
- No personal information is collected.
- No information is stored in the browser.
- No information is shared with, sent to or sold to third-parties.
- No information is shared with advertising companies.
- No information is mined and harvested for personal and behavioral trends.
- No information is monetized.
### Information we collect and what we use it for
We run [Plausible](https://plausible.io/) analytics on getdoks.org. The following information is collected:
- __Page URL__. We track the page URL of each page view on this website. We use this to understand which pages have been viewed and how many times a particular page has been viewed. For example: _https://getdoks.org/_.
- __HTTP Referrer__. We use the referrer string to understand the number of visitors referred to this website from links on other sites. For example: _https://github.com/_.
- __Browser__. We use this to understand what browsers people use when visiting this website. This is derived from the User-Agent HTTP header. The full User-Agent is discarded. For example: _Chrome_.
- __Operating system__. We use this to understand what operating systems people use when visiting this website. We only use the brand of the operating system and dont include the version number or any other details. This is derived from the User-Agent HTTP header. The full User-Agent is discarded. For example: _GNU/Linux_.
- __Device type__. We use this to understand what devices people use when visiting this website. This is derived from window.innerWidth. The actual width of the browser in pixels is discarded. For example: _Desktop_.
- __Visitor Country__. We look up the visitors country using the IP address. We do not track anything more granular than the country of origin and the IP address of the visitor is discarded. We never store IP addresses in our database or logs. For example: _Canada_.
## Contact us
[Contact us]({{< ref "contact/index.md" >}}) if you have any questions.
Effective Date: _27th August 2020_

11
content/en/versions.md Normal file
View File

@@ -0,0 +1,11 @@
---
title: "Versions"
description: ""
lead: "An appendix of hosted documentation for nearly every release of Doks, from v0 through v3."
date: 2021-09-24T08:50:23+02:00
lastmod: 2021-09-24T08:50:23+02:00
draft: true
images: []
layout: versions
url: "/docs/versions/"
---

60
data/docs-versions.yml Normal file
View File

@@ -0,0 +1,60 @@
# - group: v1.x
# baseurl: "https://getbootstrap.com"
# description: "Every minor and patch release from v1 is listed below."
# versions:
# - v: "1.0.0"
# - v: "1.1.0"
# - v: "1.1.1"
# - v: "1.2.0"
# - v: "1.3.0"
# - v: "1.4.0"
#
# - group: v2.x
# baseurl: "https://getbootstrap.com"
# description: "Every minor and patch release from v2 is listed below."
# versions:
# - v: "2.0.0"
# - v: "2.0.1"
# - v: "2.0.2"
# - v: "2.0.3"
# - v: "2.0.4"
# - v: "2.1.0"
# - v: "2.1.1"
# - v: "2.2.0"
# - v: "2.2.1"
# - v: "2.2.2"
# - v: "2.3.0"
# - v: "2.3.1"
# - v: "2.3.2"
#
# - group: v3.x
# baseurl: "https://getbootstrap.com/docs"
# description: "Every minor and patch release from v3 is listed below. Last update was v3.4.1."
# versions:
# - v: "3.3"
# - v: "3.4"
#
# - group: v4.x
# baseurl: "https://getbootstrap.com/docs"
# description: "Our previous major release with its minor releases. Last update was v4.6.0."
# versions:
# - v: "4.0"
# - v: "4.1"
# - v: "4.2"
# - v: "4.3"
# - v: "4.4"
# - v: "4.5"
# - v: "4.6"
- group: v0.x
baseurl: "/docs"
description: "Current major release. Last update was v0.2.0."
versions:
- v: "0.1"
- v: "0.2"
- group: v1.x
baseurl: "/docs"
description: "Every minor and patch release from v1 is listed below. Last update was v1.0.0."
versions:
- v: "1.0"

View File

@@ -1,25 +0,0 @@
version: '3.5'
services:
atuin:
restart: always
image: ghcr.io/ellie/atuin:main
command: server start
volumes:
- "./config:/config"
links:
- postgresql:db
ports:
- 8888:8888
environment:
ATUIN_HOST: "0.0.0.0"
ATUIN_OPEN_REGISTRATION: "true"
ATUIN_DB_URI: postgres://atuin:really-insecure@db/atuin
postgresql:
image: postgres:14
restart: unless-stopped
volumes: # Don't remove permanent storage for index database files!
- "./database:/var/lib/postgresql/data/"
environment:
POSTGRES_USER: atuin
POSTGRES_PASSWORD: really-insecure
POSTGRES_DB: atuin

View File

@@ -1,148 +0,0 @@
# Config
Atuin maintains two configuration files, stored in `~/.config/atuin/`. We store
data in `~/.local/share/atuin` (unless overridden by XDG\_\*).
You can also change the path to the configuration directory by setting
`ATUIN_CONFIG_DIR`. For example
```
export ATUIN_CONFIG_DIR = /home/ellie/.atuin
```
## Client config
```
~/.config/atuin/config.toml
```
The client runs on a user's machine, and unless you're running a server, this
is what you care about.
See [config.toml](../atuin-client/config.toml) for an example
### `dialect`
This configures how the [stats](stats.md) command parses dates. It has two
possible values
```
dialect = "uk"
```
or
```
dialect = "us"
```
and defaults to "us".
### `auto_sync`
Configures whether or not to automatically sync, when logged in. Defaults to
true
```
auto_sync = true/false
```
### `sync_address`
The address of the server to sync with! Defaults to `https://api.atuin.sh`.
```
sync_address = "https://api.atuin.sh"
```
### `sync_frequency`
How often to automatically sync with the server. This can be given in a
"human-readable" format. For example, `10s`, `20m`, `1h`, etc. Defaults to `1h`.
If set to `0`, Atuin will sync after every command. Some servers may potentially
rate limit, which won't cause any issues.
```
sync_frequency = "1h"
```
### `db_path`
The path to the Atuin SQlite database. Defaults to
`~/.local/share/atuin/history.db`.
```
db_path = "~/.history.db"
```
### `key_path`
The path to the Atuin encryption key. Defaults to
`~/.local/share/atuin/key`.
```
key = "~/.atuin-key"
```
### `session_path`
The path to the Atuin server session file. Defaults to
`~/.local/share/atuin/session`. This is essentially just an API token
```
key = "~/.atuin-session"
```
### `search_mode`
Which search mode to use. Atuin supports "prefix", full text and "fuzzy" search
modes. The prefix searches for "query\*", fulltext "\*query\*", and fuzzy applies
the search syntax [described below](#fuzzy-search-syntax).
Defaults to "prefix"
### `filter_mode`
The default filter to use when searching
| Column1 | Column2 |
|--------------- | --------------- |
| global (default) | Search history from all hosts, all sessions, all directories |
| host | Search history just from this host |
| session | Search history just from the current session |
| directory | Search history just from the current directory|
Filter modes can still be toggled via ctrl-r
```
search_mode = "fulltext"
```
#### `fuzzy` search syntax
The "fuzzy" search syntax is based on the
[fzf search syntax](https://github.com/junegunn/fzf#search-syntax).
| Token | Match type | Description |
| --------- | -------------------------- | ------------------------------------ |
| `sbtrkt` | fuzzy-match | Items that match `sbtrkt` |
| `'wild` | exact-match (quoted) | Items that include `wild` |
| `^music` | prefix-exact-match | Items that start with `music` |
| `.mp3$` | suffix-exact-match | Items that end with `.mp3` |
| `!fire` | inverse-exact-match | Items that do not include `fire` |
| `!^music` | inverse-prefix-exact-match | Items that do not start with `music` |
| `!.mp3$` | inverse-suffix-exact-match | Items that do not end with `.mp3` |
A single bar character term acts as an OR operator. For example, the following
query matches entries that start with `core` and end with either `go`, `rb`,
or `py`.
```
^core go$ | rb$ | py$
```
## Server config
`// TODO`

View File

@@ -1,27 +0,0 @@
# `atuin import`
Atuin can import your history from your "old" history file
`atuin import auto` will attempt to figure out your shell (via \$SHELL) and run
the correct importer
Unfortunately these older files do not store as much information as Atuin does,
so not all features are available with imported data.
# zsh
```
atuin import zsh
```
If you've set HISTFILE, this should be picked up! If not, try
```
HISTFILE=/path/to/history/file atuin import zsh
```
This supports both the simple and extended format
# bash
TODO

View File

@@ -1,48 +0,0 @@
# Key binding
By default, Atuin will rebind both <kbd>Ctrl-r</kbd> and the up arrow. If you do not want
this to happen, set ATUIN_NOBIND before the call to `atuin init`
For example
```
export ATUIN_NOBIND="true"
eval "$(atuin init zsh)"
```
You can then choose to bind Atuin if needed, do this after the call to init.
# zsh
Atuin defines the ZLE widget "\_atuin_search_widget"
```
export ATUIN_NOBIND="true"
eval "$(atuin init zsh)"
bindkey '^r' _atuin_search_widget
# depends on terminal mode
bindkey '^[[A' _atuin_search_widget
bindkey '^[OA' _atuin_search_widget
```
# bash
```
export ATUIN_NOBIND="true"
eval "$(atuin init bash)"
# bind to ctrl-r, add any other bindings you want here too
bind -x '"\C-r": __atuin_history'
```
# fish
```
set -gx ATUIN_NOBIND "true"
atuin init fish | source
# bind to ctrl-r in normal and insert mode, add any other bindings you want here too
bind \cr _atuin_search
bind -M insert \cr _atuin_search
```

View File

@@ -1,11 +0,0 @@
# Listing history
```
atuin history list
```
| Arg | Description |
| -------------- | ----------------------------------------------------------------------------- |
| `--cwd/-c` | The directory to list history for (default: all dirs) |
| `--session/-s` | Enable listing history for the current session only (default: false) |
| `--human/-h` | Use human-readable formatting for the timestamp and duration (default: false) |

View File

@@ -1,146 +0,0 @@
# Конфигурация
Autin использует два файла конфигурации. Они хранятся в `~/.config/atuin/`. Данные
хранятся в `~/.local/share/atuin` (если не определено другое в XDG\_\*).
Путь до катклога конфигурации может быть изменён установкой
параметра `ATUIN_CONFIG_DIR`. Например
```
export ATUIN_CONFIG_DIR = /home/ellie/.atuin
```
## Пользовательская конфигурация
```
~/.config/atuin/config.toml
```
Этот файл используется когда клиент работает на локальной машине (не сервере).
See [config.toml](../atuin-client/config.toml) for an example
### `dialect`
Этот параметр контролирует как [stats](stats.md) команда обрабатывает данные.
Может принимать одно из двух допустимых значений:
```
dialect = "uk"
```
или
```
dialect = "us"
```
По умолчанию - "us".
### `auto_sync`
Синхронизироваться ли автоматически если выполнен вход. По умолчанию - да (true)
```
auto_sync = true/false
```
### `sync_address`
Адрес сервера для синхронизации. По умолчанию `https://api.atuin.sh`.
```
sync_address = "https://api.atuin.sh"
```
### `sync_frequency`
Как часто клиент синхронизируется с сервером. Может быть указано в
понятном для человека формате. Например, `10s`, `20m`, `1h`, и т.д.
По умолчанию `1h`
Если стоит значение 0, Autin будет синхронизироваться после каждой выполненной команды.
Помните, что сервера могут иметь ограничение на количество отправленных запросов.
```
sync_frequency = "1h"
```
### `db_path`
Путь до базы данных SQlite. По умолчанию это
`~/.local/share/atuin/history.db`.
```
db_path = "~/.history.db"
```
### `key_path`
Путь до ключа шифрования Autin. По умолчанию,
`~/.local/share/atuin/key`.
```
key = "~/.atuin-key"
```
### `session_path`
Путь до серверного файла сессии Autin. По умолчанию,
`~/.local/share/atuin/session`. На самом деле это просто API токен.
```
key = "~/.atuin-session"
```
### `search_mode`
Определяет, какой режим поиска будет использоваться. Autin поддерживает "prefix",
текст целиком (fulltext) и неточный ("fuzzy") поиск. Режим "prefix" производит
поиск по "запрос\*", "fulltext" по "\*запрос\*", и "fuzzy" использует
[вот такой](#fuzzy-search-syntax) синтаксис.
По умолчанию стоит значение "prefix"
### `filter_mode`
Фильтр, по-умолчанию использующийся для поиска
| Столбец 1 | Столбец 2 |
|------------------|----------------------------------------------------------|
| global (default) | Искать историю команд со всех хостов, сессий и каталогов |
| host | Искать историю команд с этого хоста |
| session | Искать историю команд этой сессии |
| directory | Искать историю команд, выполненных в текущей папке |
Режимы поиска могут быть изменены через ctrl-r
```
search_mode = "fulltext"
```
#### fuzzy search syntax
Режим поиска "fuzzy" основан на
[fzf search syntax](https://github.com/junegunn/fzf#search-syntax).
| Токен | Тип совпадений | Описание |
|-----------|----------------------------|-------------------------------------|
| `sbtrkt` | fuzzy-match | Всё, что совпадает с `sbtrkt` |
| `'wild` | exact-match (В кавычках) | Всё, что включает в себя `wild` |
| `^music` | prefix-exact-match | Всё, что начинается с `music` |
| `.mp3$` | suffix-exact-match | Всё, что заканчивается на `.mp3` |
| `!fire` | inverse-exact-match | Всё, что не включает в себя `fire` |
| `!^music` | inverse-prefix-exact-match | Всё, что не начинается с `music` |
| `!.mp3$` | inverse-suffix-exact-match | Всё, что не заканчивается на `.mp3` |
Знак вертикальной черты означает логическое ИЛИ. Например, запрос ниже вернет
всё, что начинается с `core` и заканчивается либо на `go`, либо на `rb`, либо на `py`.
```
^core go$ | rb$ | py$
```
## Серверная конфигурация
`// TODO`

View File

@@ -1,27 +0,0 @@
# `atuin import`
Autin может импортировать историю из "старого" файла истории
`atuin import auto` предпринимает попытку определить тип командного интерфейса
(через \$SHELL) и запускает нужный скрипт импорта.
К сожалению, эти файлы содержат не так много информации, как Autin, так что не
все функции будут доступны с импортированными данными.
# zsh
```
atuin import zsh
```
Если у вас есть HISTFILE, то эта команда должна сработать. Иначе, попробуйте
```
HISTFILE=/path/to/history/file atuin import zsh
```
Этот параметр поддерживает как и упрощённый, так и полный формат.
# bash
TODO

View File

@@ -1,39 +0,0 @@
# Key binding
По умолчанию, Autin будет переназначать <kbd>Ctrl-r</kbd> и клавишу 'стрелка вверх'.
Если вы не хотите этого, установите параметр ATUIN_NOBIND прежде чем вызывать `atuin init`
Например,
```
export ATUIN_NOBIND="true"
eval "$(atuin init zsh)"
```
Таким образом вы можете разрешить переназначение клавиш Autin, если это необходимо.
Делайте это до инициализирующего вызова.
# zsh
Autin устанавливает виджет ZLE "\_atuin_search_widget"
```
export ATUIN_NOBIND="true"
eval "$(atuin init zsh)"
bindkey '^r' _atuin_search_widget
# зависит от режима терминала
bindkey '^[[A' _atuin_search_widget
bindkey '^[OA' _atuin_search_widget
```
# bash
```
export ATUIN_NOBIND="true"
eval "$(atuin init bash)"
# Переопределите ctrl-r, и любые другие сочетания горячих клавиш тут
bind -x '"\C-r": __atuin_history'
```

View File

@@ -1,11 +0,0 @@
# Вывад истории на экран
```
atuin history list
```
| Аргумент | Описание |
|----------------|--------------------------------------------------------------------------------|
| `--cwd/-c` | Каталог, историю команд которой необходимо вывести (по умолчанию все каталоги) |
| `--session/-s` | Выводит историю команд только текущей сессии (по умолчанию false) |
| `--human/-h` | Читаемый формат для времени и периодов времени (по умолчанию false) |

View File

@@ -1,39 +0,0 @@
# `atuin search`
```
atuin search <query>
```
Поиск в Atuin также поддерживает wildcards со знаками `*` или `%`.
По умолчанию, должен быть указан префикс (т.е. все запросы автоматически дополняются wildcard -ами)
| Аргумент | Описание |
|--------------------|---------------------------------------------------------------------------------------------|
| `--cwd/-c` | Каталог, для которого отображается история (по умолчанию, все каталоги)) |
| `--exclude-cwd` | Исключить команды которые запускались в этом каталоге (по умолчанию none) |
| `--exit/-e` | Фильтровать по exit code (по умолчанию none) |
| `--exclude-exit` | Исключить команды, которые завершились с указанным значением (по умолчанию none) |
| `--before` | Включить только команды, которые были запущены до указанного времени (по умолчанию none) |
| `--after` | Включить только команды, которые были запущены после указанного времени (по умолчанию none) |
| `--interactive/-i` | Открыть интерактивный поисковой графический интерфейс (по умолчанию false) |
| `--human/-h` | Использовать читаемое формавтирование для времени и периодов времени (по умолчанию false) |
## Примеры
```
# Начать интерактивный поиск с текстовым пользовательским интерфейсом
atuin search -i
# Начать интерактивный поиск с текстовым пользовательским интерфейсом и уже введённым запросом
atuin search -i atuin
# Искать по всем командам, начиная с cargo, которые успешно завершились
atuin search --exit 0 cargo
# Искать по всем командам которые завершились ошибками и были вызваны в текущей папке и были запущены до первого апреля 2021
atuin search --exclude-exit 0 --before 01/04/2021 --cwd .
# Искать по всем командам, начиная с cargo, которые успешно завершились и были запущены после трёх часо дня вчера
atuin search --exit 0 --after "yesterday 3pm" cargo
```

View File

@@ -1,160 +0,0 @@
# `atuin server`
Autin позволяет запустить свой собственный сервер синхронизации, если вы
не хотите использовать мой :)
Здесь есть только одна субкоманда, `atuin server start`, которая запустит
Autin http-сервер синхронизации
```
USAGE:
atuin server start [OPTIONS]
FLAGS:
--help Prints help information
-V, --version Prints version information
OPTIONS:
-h, --host <host>
-p, --port <port>
```
## config
Серверная конфигурация лежит отдельно от файла пользовательсокй, даже если
это один и тот же бинарный файл. Серверная конфигурация лежит в `~/.config/atuin/server.toml`.
Этот файл выглядит как-то так:
```toml
host = "0.0.0.0"
port = 8888
open_registration = true
db_uri="postgres://user:password@hostname/database"
```
Конфигурация так же может находииться в переменных окружения.
```sh
ATUIN_HOST="0.0.0.0"
ATUIN_PORT=8888
ATUIN_OPEN_REGISTRATION=true
ATUIN_DB_URI="postgres://user:password@hostname/database"
```
### host
Адрес хоста, который будет прослушиваться сервером Autin
По умолчанию это `127.0.0.1`.
### post
POST, который будет прослушиваться сервером Autin.
По умолчанию это `8888`.
### open_registration
Если `true`, autin будет разрешать регистрацию новых пользователей.
Установите флаг `false`, если после создания вашего аккаута вы не хотите, чтобы другие
могли пользоваться вашим сервером.
По умолчанию `false`.
### db_uri
Действующий URI postgres, где будет сохранён аккаунт пользователя и история.
## Docker
Поддерживается образ Docker чтобы сделать проще развертывание сервера в контейнере.
```sh
docker run -d -v "$USER/.config/atuin:/config" ghcr.io/ellie/atuin:latest server start
```
## Docker Compose
Использование вашего собственного docker-образа с хостингом вашего собственного Autin может быть реализовано через
файл docker-compose.
Создайте файл `.env` рядом с `docker-compode.yml` с содержанием наподобие этому:
```
ATUIN_DB_USERNAME=atuin
# Choose your own secure password
ATUIN_DB_PASSWORD=really-insecure
```
Создайте `docker-compose.yml`:
```yaml
version: '3.5'
services:
atuin:
restart: always
image: ghcr.io/ellie/atuin:main
command: server start
volumes:
- "./config:/config"
links:
- postgresql:db
ports:
- 8888:8888
environment:
ATUIN_HOST: "0.0.0.0"
ATUIN_OPEN_REGISTRATION: "true"
ATUIN_DB_URI: postgres://$ATUIN_DB_USERNAME:$ATUIN_DB_PASSWORD@db/atuin
postgresql:
image: postgres:14
restart: unless-stopped
volumes: # Don't remove permanent storage for index database files!
- "./database:/var/lib/postgresql/data/"
environment:
POSTGRES_USER: $ATUIN_DB_USERNAME
POSTGRES_PASSWORD: $ATUIN_DB_PASSWORD
POSTGRES_DB: atuin
```
Запустите службы с помощью `docker-compose`:
```sh
docker-compose up -d
```
### Использование systemd для управления сервером Autin
`systemd` юнит чтобы управлять службами, контролируемыми `docker-compose`:
```
[Unit]
Description=Docker Compose Atuin Service
Requires=docker.service
After=docker.service
[Service]
# Where the docker-compose file is located
WorkingDirectory=/srv/atuin-server
ExecStart=/usr/bin/docker-compose up
ExecStop=/usr/bin/docker-compose down
TimeoutStartSec=0
Restart=on-failure
StartLimitBurst=3
[Install]
WantedBy=multi-user.target
```
Включите и запустите службу командой:
```sh
systemctl enable --now atuin
```
Проверьте, работает ли:
```sh
systemctl status atuin
```

View File

@@ -1,20 +0,0 @@
# `atuin gen-completions`
[Shell completions](https://en.wikipedia.org/wiki/Command-line_completion) для Atuin
могут бять сгенерированы путём указания каталога для вывода и желаемого shell через субкомманду `gen-completions`.
```
$ atuin gen-completions --shell bash --out-dir $HOME
Shell completion for BASH is generated in "/home/user"
```
Возможные команды для аргумента `--shell`могут быть следующими:
- `bash`
- `fish`
- `zsh`
- `powershell`
- `elvish`
Также рекомендуем прочитать [supported shells](./../../README.md#supported-shells).

View File

@@ -1,40 +0,0 @@
# `atuin stats`
Atuin также может выводить статистику, основанную на истории. Пока что в очень простом виде,
но скоро должно появиться больше возможностей.
Статистика выводится пока только на английском
Statistics in english only
# TODO
```
$ atuin stats day last friday
+---------------------+------------+
| Statistic | Value |
+---------------------+------------+
| Most used command | git status |
+---------------------+------------+
| Commands ran | 450 |
+---------------------+------------+
| Unique commands ran | 213 |
+---------------------+------------+
$ atuin stats day 01/01/21 # also accepts absolute dates
```
Также, может быть выведена статистика всей известной Autin истории:
```
$ atuin stats all
+---------------------+-------+
| Statistic | Value |
+---------------------+-------+
| Most used command | ls |
+---------------------+-------+
| Commands ran | 8190 |
+---------------------+-------+
| Unique commands ran | 2996 |
+---------------------+-------+
```

View File

@@ -1,60 +0,0 @@
# `atuin sync`
Autin может сделать резервную копию вашей истории на сервер чтобы обеспечить использование
разными компьютерами одной и той же истории. Вся история будет зашифрована двусторонним шифрованием,
так что сервер _никогда_ не получит ваши данные!
Можно сделать свой сервер (запустив `atuin server start`, об этом написано в других
файлах документациии), но у меня есть свой https://api.atuin.sh. Это серверный адрес по умолчанию,
который может быть изменён в [конфигурации](config_ru.md). Опять же, я е_ могу получить ваши данные
и они мне не нужны.
## Частота синхронизации
Синхронизация будет происходить автоматически, если обратное не было указано в конфигурации.
Отконфигурировать сей параметр можно в [config](config_ru.md)
## Синхронизация
Синхронизироваться также можно вручную, используя команду `atuin sync`
## Регистрация
Можно зарегистрировать аккаунт для синхронизации:
```
atuin register -u <USERNAME> -e <EMAIL> -p <PASSWORD>
```
Имена пользователей должны быть уникальны, и электронная почта должна использваться
только для срочных уведомлений (изменения политик, нарушения безопасности и т.д.)
Псоле регистрации, вы уже сразу вошли в свой аккаунт :) С этого момента синхронизация
будет проходить автоматически
## Ключ
Поскольку все данные шифруются, Autin при работе сгенерирует ваш ключ. Он будет сохранён в
каталоге с данными Autin (`~/.local/share/atuin` на системах с GNU/Linux)
Также можно сделать это самим:
```
atuin key
```
Никогда не передавайте никому этот ключ!
## Вход
Если вы хотите войти с другого компьютера, вам потребуется ключ безопасности (`atuin key`).
```
atuin login -u <USERNAME> -p <PASSWORD> -k <KEY>
```
## Выход
```
atuin logout
```

View File

@@ -1,39 +0,0 @@
# `atuin search`
```
atuin search <query>
```
Atuin search also supports wildcards, with either the `*` or `%` character. By
default, a prefix search is performed (ie, all queries are automatically
appended with a wildcard).
| Arg | Description |
| ------------------ | ----------------------------------------------------------------------------- |
| `--cwd/-c` | The directory to list history for (default: all dirs) |
| `--exclude-cwd` | Do not include commands that ran in this directory (default: none) |
| `--exit/-e` | Filter by exit code (default: none) |
| `--exclude-exit` | Do not include commands that exited with this value (default: none) |
| `--before` | Only include commands ran before this time(default: none) |
| `--after` | Only include commands ran after this time(default: none) |
| `--interactive/-i` | Open the interactive search UI (default: false) |
| `--human/-h` | Use human-readable formatting for the timestamp and duration (default: false) |
## Examples
```
# Open the interactive search TUI
atuin search -i
# Open the interactive search TUI preloaded with a query
atuin search -i atuin
# Search for all commands, beginning with cargo, that exited successfully
atuin search --exit 0 cargo
# Search for all commands, that failed, from the current dir, and were ran before April 1st 2021
atuin search --exclude-exit 0 --before 01/04/2021 --cwd .
# Search for all commands, beginning with cargo, that exited successfully, and were ran after yesterday at 3pm
atuin search --exit 0 --after "yesterday 3pm" cargo
```

View File

@@ -1,160 +0,0 @@
# `atuin server`
Atuin allows you to run your own sync server, in case you don't want to use the
one I host :)
There's currently only one subcommand, `atuin server start` which will start the
Atuin http sync server
```
USAGE:
atuin server start [OPTIONS]
FLAGS:
--help Prints help information
-V, --version Prints version information
OPTIONS:
-h, --host <host>
-p, --port <port>
```
## Configuration
The config for the server is kept separate from the config for the client, even
though they are the same binary. Server config can be found at
`~/.config/atuin/server.toml`.
It looks something like this:
```toml
host = "0.0.0.0"
port = 8888
open_registration = true
db_uri="postgres://user:password@hostname/database"
```
Alternatively, configuration can also be provided with environment variables.
```sh
ATUIN_HOST="0.0.0.0"
ATUIN_PORT=8888
ATUIN_OPEN_REGISTRATION=true
ATUIN_DB_URI="postgres://user:password@hostname/database"
```
### host
The host address the atuin server should listen on.
Defaults to `127.0.0.1`.
### port
The port the atuin server should listen on.
Defaults to `8888`.
### open_registration
If `true`, atuin will accept new user registrations.
Set this to `false` after making your own account if you don't want others to be
able to use your server.
Defaults to `false`.
### db_uri
A valid postgres URI, where the user and history data will be saved to.
## Docker
There is a supplied docker image to make deploying a server as a container easier.
```sh
docker run -d -v "$USER/.config/atuin:/config" ghcr.io/ellie/atuin:latest server start
```
## Docker Compose
Using the already build docker image hosting your own Atuin can be done using the supplied docker-compose file.
Create a `.env` file next to `docker-compose.yml` with contents like this:
```
ATUIN_DB_USERNAME=atuin
# Choose your own secure password
ATUIN_DB_PASSWORD=really-insecure
```
Create a `docker-compose.yml`:
```yaml
version: '3.5'
services:
atuin:
restart: always
image: ghcr.io/ellie/atuin:main
command: server start
volumes:
- "./config:/config"
links:
- postgresql:db
ports:
- 8888:8888
environment:
ATUIN_HOST: "0.0.0.0"
ATUIN_OPEN_REGISTRATION: "true"
ATUIN_DB_URI: postgres://$ATUIN_DB_USERNAME:$ATUIN_DB_PASSWORD@db/atuin
postgresql:
image: postgres:14
restart: unless-stopped
volumes: # Don't remove permanent storage for index database files!
- "./database:/var/lib/postgresql/data/"
environment:
POSTGRES_USER: $ATUIN_DB_USERNAME
POSTGRES_PASSWORD: $ATUIN_DB_PASSWORD
POSTGRES_DB: atuin
```
Start the services using `docker-compose`:
```sh
docker-compose up -d
```
### Using systemd to manage your atuin server
The following `systemd` unit file to manage your `docker-compose` managed service:
```
[Unit]
Description=Docker Compose Atuin Service
Requires=docker.service
After=docker.service
[Service]
# Where the docker-compose file is located
WorkingDirectory=/srv/atuin-server
ExecStart=/usr/bin/docker-compose up
ExecStop=/usr/bin/docker-compose down
TimeoutStartSec=0
Restart=on-failure
StartLimitBurst=3
[Install]
WantedBy=multi-user.target
```
Start and enable the service with:
```sh
systemctl enable --now atuin
```
Check if its running with:
```sh
systemctl status atuin
```

View File

@@ -1,19 +0,0 @@
# `atuin gen-completions`
[Shell completions](https://en.wikipedia.org/wiki/Command-line_completion) for Atuin can be generated by specifying the output directory and desired shell via `gen-completions` subcommand.
```
$ atuin gen-completions --shell bash --out-dir $HOME
Shell completion for BASH is generated in "/home/user"
```
Possible values for the `--shell` argument are the following:
- `bash`
- `fish`
- `zsh`
- `powershell`
- `elvish`
Also, see the [supported shells](./../README.md#supported-shells).

View File

@@ -1,36 +0,0 @@
# `atuin stats`
Atuin can also calculate stats based on your history - this is currently a
little basic, but more features to come
```
$ atuin stats day last friday
+---------------------+------------+
| Statistic | Value |
+---------------------+------------+
| Most used command | git status |
+---------------------+------------+
| Commands ran | 450 |
+---------------------+------------+
| Unique commands ran | 213 |
+---------------------+------------+
$ atuin stats day 01/01/21 # also accepts absolute dates
```
It can also calculate statistics for all of known history:
```
$ atuin stats all
+---------------------+-------+
| Statistic | Value |
+---------------------+-------+
| Most used command | ls |
+---------------------+-------+
| Commands ran | 8190 |
+---------------------+-------+
| Unique commands ran | 2996 |
+---------------------+-------+
```

View File

@@ -1,61 +0,0 @@
# `atuin sync`
Atuin can back up your history to a server, and use this to ensure multiple
machines have the same shell history. This is all encrypted end-to-end, so the
server operator can _never_ see your data!
Anyone can host a server (try `atuin server start`, more docs to follow), but I
host one at https://api.atuin.sh. This is the default server address, which can
be changed in the [config](config.md). Again, I _cannot_ see your data, and
do not want to.
## Sync frequency
Syncing will happen automatically, unless configured otherwise. The sync
frequency is configurable in [config](config.md)
## Sync
You can manually trigger a sync with `atuin sync`
## Register
Register for a sync account with
```
atuin register -u <USERNAME> -e <EMAIL> -p <PASSWORD>
```
Usernames must be unique, and emails shall only be used for important
notifications (security breaches, changes to service, etc).
Upon success, you are also logged in :) Syncing should happen automatically from
here!
## Key
As all your data is encrypted, Atuin generates a key for you. It's stored in the
Atuin data directory (`~/.local/share/atuin` on Linux).
You can also get this with
```
atuin key
```
Never share this with anyone!
## Login
If you want to log in to a new machine, you will require your encryption key
(`atuin key`).
```
atuin login -u <USERNAME> -p <PASSWORD> -k <KEY>
```
## Logout
```
atuin logout
```

View File

@@ -1,196 +0,0 @@
<p align="center">
<img height="250" src="https://user-images.githubusercontent.com/53315310/167610618-284491ac-c5d3-4957-9e4b-604bb97e23e6.png"/>
</p>
<p align="center">
<em>神奇的 shell 历史记录</em>
</p>
<hr/>
<p align="center">
<a href="https://github.com/ellie/atuin/actions?query=workflow%3ARust"><img src="https://img.shields.io/github/workflow/status/ellie/atuin/Rust?style=flat-square" /></a>
<a href="https://crates.io/crates/atuin"><img src="https://img.shields.io/crates/v/atuin.svg?style=flat-square" /></a>
<a href="https://crates.io/crates/atuin"><img src="https://img.shields.io/crates/d/atuin.svg?style=flat-square" /></a>
<a href="https://github.com/ellie/atuin/blob/main/LICENSE"><img src="https://img.shields.io/crates/l/atuin.svg?style=flat-square" /></a>
<a href="https://discord.gg/Fq8bJSKPHh"><img src="https://img.shields.io/discord/954121165239115808" /></a>
</p>
[English] | [简体中文]
Atuin 使用 SQLite 数据库取代了你现有的 shell 历史,并为你的命令记录了额外的内容。此外,它还通过 Atuin 服务器,在机器之间提供可选的、完全加密的历史记录同步功能。
<p align="center">
<img src="../../demo.gif" alt="animated" width="80%" />
</p>
<p align="center">
<em>显示退出代码、命令持续时间、上次执行时间和执行的命令</em>
</p>
除了搜索 UI它还可以执行以下操作
```
# 搜索昨天下午3点之后记录的所有成功的 `make` 命令
atuin search --exit 0 --after "yesterday 3pm" make
```
你可以使用我(ellie)托管的服务器,也可以使用你自己的服务器!或者干脆不使用 sync 功能。所有的历史记录同步都是加密,即使我想,也无法访问你的数据。且我**真的**不想。
## 特点
- 重新绑定 `up``ctrl-r` 的全屏历史记录搜索UI界面
- 使用 sqlite 数据库存储 shell 历史记录
- 备份以及同步已加密的 shell 历史记录
- 在不同的终端、不同的会话以及不同的机器上都有相同的历史记录
- 记录退出代码、cwd、主机名、会话、命令持续时间等等。
- 计算统计数据,如 "最常用的命令"。
- 不替换旧的历史文件
- 通过 <kbd>Alt-\<num\></kbd> 快捷键快速跳转到之前的记录
- 通过 ctrl-r 切换过滤模式;可以仅从当前会话、目录或全局来搜索历史记录
## 文档
- [快速开始](#快速开始)
- [安装](#安装)
- [导入](./import.md)
- [配置](./config.md)
- [历史记录搜索](./search.md)
- [历史记录云端同步](./sync.md)
- [历史记录统计](./stats.md)
- [运行你自己的服务器](./server.md)
- [键绑定](./key-binding.md)
- [shell补全](./shell-completions.md)
## 支持的 Shells
- zsh
- bas
- fish
## 社区
Atuin 有一个 Discord 社区, 可以在 [这里](https://discord.gg/Fq8bJSKPHh) 获得
# 快速开始
## 使用默认的同步服务器
这将为您注册由我托管的默认同步服务器。 一切都是端到端加密的,所以你的秘密是安全的!
阅读下面的更多信息,了解仅供离线使用或托管您自己的服务器。
```
bash <(curl https://raw.githubusercontent.com/ellie/atuin/main/install.sh)
atuin register -u <USERNAME> -e <EMAIL> -p <PASSWORD>
atuin import auto
atuin sync
```
## 仅离线 (不同步)
```
bash <(curl https://raw.githubusercontent.com/ellie/atuin/main/install.sh)
atuin import auto
```
## 安装
### 脚本 (推荐)
安装脚本将帮助您完成设置,确保您的 shell 正确配置。 它还将使用以下方法之一在可能的情况下首选系统包管理器pacman、homebrew 等)。
```
# 不要以root身份运行如果需要的话会要求root。
bash <(curl https://raw.githubusercontent.com/ellie/atuin/main/install.sh)
```
### 使用cargo
最好使用 [rustup](https://rustup.rs/) 来设置 Rust 工具链,然后你就可以运行下面的命令:
```
cargo install atuin
```
### Homebrew
```
brew install atuin
```
### MacPorts
Atuin 也可以在 [MacPorts](https://ports.macports.org/port/atuin/) 中找到
```
sudo port install atuin
```
### Pacman
Atuin 在 Arch Linux 的 [社区存储库](https://archlinux.org/packages/community/x86_64/atuin/) 中可用。
```
pacman -S atuin
```
### 从源码编译安装
```
git clone https://github.com/ellie/atuin.git
cd atuin
cargo install --path .
```
## Shell 插件
安装二进制文件后,需要安装 shell 插件。
如果你使用的是脚本安装,那么这一切应该都会帮您完成!
### zsh
```
echo 'eval "$(atuin init zsh)"' >> ~/.zshrc
```
或使用插件管理器:
```
zinit load ellie/atuin
```
### bash
我们需要设置一些钩子(hooks), 所以首先需要安装 bash-preexec :
```
curl https://raw.githubusercontent.com/rcaloras/bash-preexec/master/bash-preexec.sh -o ~/.bash-preexec.sh
echo '[[ -f ~/.bash-preexec.sh ]] && source ~/.bash-preexec.sh' >> ~/.bashrc
```
然后设置Atuin
```
echo 'eval "$(atuin init bash)"' >> ~/.bashrc
```
### fish
添加
```
atuin init fish | source
```
到 ~/.config/fish/config.fish 文件中的 is-interactive 块中
## ...这个名字是什么意思?
Atuin 以 "The Great A'Tuin" 命名, 这是一只来自 Terry Pratchett 的 Discworld 系列书籍的巨龟。
[English]: ../../README.md
[简体中文]: ./README.md

View File

@@ -1,137 +0,0 @@
# 配置
Atuin 维护两个配置文件,存储在 `~/.config/atuin/` 中。 我们将数据存储在 `~/.local/share/atuin` 中(除非被 XDG\_\* 覆盖)。
您可以通过设置更改配置目录的路径 `ATUIN_CONFIG_DIR`。 例如
```
export ATUIN_CONFIG_DIR = /home/ellie/.atuin
```
## 客户端配置
```
~/.config/atuin/config.toml
```
客户端运行在用户的机器上,除非你运行的是服务器,否则这就是你所关心的。
见 [config.toml](../../atuin-client/config.toml) 中的例子
### `dialect`
这配置了 [stats](stats.md) 命令解析日期的方式。 它有两个可能的值
```
dialect = "uk"
```
或者
```
dialect = "us"
```
默认为 "us".
### `auto_sync`
配置登录时是否自动同步。默认为 true
```
auto_sync = true/false
```
### `sync_address`
同步的服务器地址! 默认为 `https://api.atuin.sh`
```
sync_address = "https://api.atuin.sh"
```
### `sync_frequency`
多长时间与服务器自动同步一次。这可以用一种"人类可读"的格式给出。例如,`10s``20m``1h`,等等。默认为 `1h`
如果设置为 `0`Atuin将在每个命令之后进行同步。一些服务器可能有潜在的速率限制这不会造成任何问题。
```
sync_frequency = "1h"
```
### `db_path`
Atuin SQlite数据库的路径。默认为
`~/.local/share/atuin/history.db`
```
db_path = "~/.history.db"
```
### `key_path`
Atuin加密密钥的路径。默认为
`~/.local/share/atuin/key`
```
key = "~/.atuin-key"
```
### `session_path`
Atuin服务器会话文件的路径。默认为
`~/.local/share/atuin/session` 。 这本质上只是一个API令牌
```
key = "~/.atuin-session"
```
### `search_mode`
使用哪种搜索模式。Atuin 支持 "prefix"(前缀)、"fulltext"(全文) 和 "fuzzy"(模糊)搜索模式。前缀(prefix)搜索语法为 "query\*",全文(full text)搜索语法为 "\*query\*",而模糊搜索适用的搜索语法 [如下所述](#fuzzy-search-syntax) 。
默认配置为 "prefix"
### `filter_mode`
搜索时要使用的默认过滤器
| 模式 | 描述 |
|--------------- | --------------- |
| global (default) | 从所有主机、所有会话、所有目录中搜索历史记录 |
| host | 仅从该主机搜索历史记录 |
| session | 仅从当前会话中搜索历史记录 |
| directory | 仅从当前目录搜索历史记录|
过滤模式仍然可以通过 ctrl-r 来切换
```
search_mode = "fulltext"
```
#### `fuzzy` 的搜索语法
`fuzzy` 搜索语法的基础是 [fzf 搜索语法](https://github.com/junegunn/fzf#search-syntax) 。
| 内容 | 匹配类型 | 描述 |
| --------- | -------------------------- | ------------------------------------ |
| `sbtrkt` | fuzzy-match | 匹配 `sbtrkt` 的项目 |
| `'wild` | exact-match (quoted) | 包含 `wild` 的项目 |
| `^music` | prefix-exact-match | 以 `music` 开头的项目 |
| `.mp3$` | suffix-exact-match | 以 `.mp3` 结尾的项目 |
| `!fire` | inverse-exact-match | 不包括 `fire` 的项目 |
| `!^music` | inverse-prefix-exact-match | 不以 `music` 开头的项目 |
| `!.mp3$` | inverse-suffix-exact-match | 不以 `.mp3` 结尾的项目 |
单个条形字符术语充当 OR 运算符。 例如,以下查询匹配以 `core` 开头并以 `go``rb``py` 结尾的条目。
```
^core go$ | rb$ | py$
```
## 服务端配置
`// TODO`

Some files were not shown because too many files have changed in this diff Show More