2022-04-05 07:07:01 +02:00
package lib
2022-01-09 06:59:28 +01:00
import (
2022-09-17 20:21:42 +02:00
"bufio"
2022-03-30 06:56:28 +02:00
"bytes"
2022-09-21 07:28:40 +02:00
"context"
2023-10-31 01:50:47 +01:00
"database/sql"
2022-01-09 20:00:53 +01:00
"encoding/json"
2022-04-20 06:05:54 +02:00
"errors"
2022-01-09 06:59:28 +01:00
"fmt"
2022-01-10 00:48:20 +01:00
"io"
2022-01-09 20:00:53 +01:00
"log"
2022-06-05 07:06:50 +02:00
"math/rand"
2022-04-07 03:18:46 +02:00
"net/http"
2022-01-09 06:59:28 +01:00
"os"
"os/user"
2022-09-17 20:21:42 +02:00
"path/filepath"
2022-09-02 08:22:53 +02:00
"regexp"
2022-04-12 07:36:52 +02:00
"runtime"
2023-09-15 06:01:13 +02:00
"strconv"
2022-01-09 06:59:28 +01:00
"strings"
"time"
2022-04-04 06:27:32 +02:00
2022-04-12 07:36:52 +02:00
_ "embed" // for embedding config.sh
2023-10-13 04:33:41 +02:00
"golang.org/x/exp/slices"
2022-04-07 07:44:10 +02:00
"gorm.io/gorm"
2022-04-06 08:31:24 +02:00
2022-11-01 18:23:35 +01:00
"github.com/araddon/dateparse"
2022-01-09 06:59:28 +01:00
"github.com/google/uuid"
2023-10-13 04:33:41 +02:00
"github.com/schollz/progressbar/v3"
2022-01-09 06:59:28 +01:00
2022-04-08 05:59:40 +02:00
"github.com/ddworken/hishtory/client/data"
2022-09-21 07:28:40 +02:00
"github.com/ddworken/hishtory/client/hctx"
2022-04-05 07:07:01 +02:00
"github.com/ddworken/hishtory/shared"
2022-01-09 06:59:28 +01:00
)
2022-04-08 06:40:22 +02:00
//go:embed config.sh
var ConfigShContents string
2022-01-10 01:39:13 +01:00
2022-04-18 04:54:17 +02:00
//go:embed config.zsh
var ConfigZshContents string
2022-10-19 04:55:41 +02:00
//go:embed config.fish
var ConfigFishContents string
2022-04-17 05:50:02 +02:00
var Version string = "Unknown"
2022-11-15 04:26:56 +01:00
var GitCommit string = "Unknown"
2022-04-17 05:50:02 +02:00
2023-09-20 03:59:09 +02:00
// 512KB ought to be enough for any reasonable cmd
// Funnily enough, 256KB actually wasn't enough. See https://github.com/ddworken/hishtory/issues/93
var maxSupportedLineLengthForImport = 512_000
2022-09-17 20:21:42 +02:00
2022-04-08 05:59:40 +02:00
func AddToDbIfNew ( db * gorm . DB , entry data . HistoryEntry ) {
2022-04-06 08:31:24 +02:00
tx := db . Where ( "local_username = ?" , entry . LocalUsername )
tx = tx . Where ( "hostname = ?" , entry . Hostname )
tx = tx . Where ( "command = ?" , entry . Command )
tx = tx . Where ( "current_working_directory = ?" , entry . CurrentWorkingDirectory )
2022-09-08 08:20:31 +02:00
tx = tx . Where ( "home_directory = ?" , entry . HomeDirectory )
2022-04-06 08:31:24 +02:00
tx = tx . Where ( "exit_code = ?" , entry . ExitCode )
tx = tx . Where ( "start_time = ?" , entry . StartTime )
tx = tx . Where ( "end_time = ?" , entry . EndTime )
2022-04-08 05:59:40 +02:00
var results [ ] data . HistoryEntry
2022-04-06 08:31:24 +02:00
tx . Limit ( 1 ) . Find ( & results )
if len ( results ) == 0 {
2023-09-09 21:28:01 +02:00
db . Create ( normalizeEntryTimezone ( entry ) )
2022-11-16 08:20:19 +01:00
// TODO: check the error here and bubble it up
2022-04-06 08:31:24 +02:00
}
}
2023-09-05 21:45:17 +02:00
func getCustomColumnValue ( ctx context . Context , header string , entry data . HistoryEntry ) ( string , error ) {
2022-10-26 09:35:36 +02:00
for _ , c := range entry . CustomColumns {
if strings . EqualFold ( c . Name , header ) {
return c . Val , nil
}
}
2022-10-26 09:44:26 +02:00
config := hctx . GetConf ( ctx )
for _ , c := range config . CustomColumns {
if strings . EqualFold ( c . ColumnName , header ) {
return "" , nil
}
}
2022-10-26 09:35:36 +02:00
return "" , fmt . Errorf ( "failed to find a column matching the column name %#v (is there a typo?)" , header )
}
2024-02-07 05:04:21 +01:00
func BuildTableRow ( ctx context . Context , columnNames [ ] string , entry data . HistoryEntry , commandRenderer func ( string ) string ) ( [ ] string , error ) {
2022-10-27 07:11:07 +02:00
row := make ( [ ] string , 0 )
for _ , header := range columnNames {
switch header {
2023-10-19 04:17:05 +02:00
case "Hostname" , "hostname" :
2022-10-27 07:11:07 +02:00
row = append ( row , entry . Hostname )
2023-10-19 04:17:05 +02:00
case "CWD" , "cwd" :
2022-10-27 07:11:07 +02:00
row = append ( row , entry . CurrentWorkingDirectory )
2023-10-19 04:17:05 +02:00
case "Timestamp" , "timestamp" :
2023-11-12 02:41:24 +01:00
if entry . StartTime . UnixMilli ( ) == 0 {
row = append ( row , "N/A" )
} else {
row = append ( row , entry . StartTime . Local ( ) . Format ( hctx . GetConf ( ctx ) . TimestampFormat ) )
}
2023-10-19 04:17:05 +02:00
case "Runtime" , "runtime" :
2023-09-14 04:20:15 +02:00
if entry . EndTime . UnixMilli ( ) == 0 {
2023-08-27 23:24:59 +02:00
// An EndTime of zero means this is a pre-saved entry that never finished
row = append ( row , "N/A" )
} else {
2023-09-09 21:28:01 +02:00
row = append ( row , entry . EndTime . Local ( ) . Sub ( entry . StartTime . Local ( ) ) . Round ( time . Millisecond ) . String ( ) )
2023-08-27 23:24:59 +02:00
}
2023-10-19 04:17:05 +02:00
case "Exit Code" , "Exit_Code" , "ExitCode" , "exitcode" :
2022-10-27 07:11:07 +02:00
row = append ( row , fmt . Sprintf ( "%d" , entry . ExitCode ) )
2023-10-19 04:17:05 +02:00
case "Command" , "command" :
2024-02-07 05:04:21 +01:00
row = append ( row , commandRenderer ( entry . Command ) )
2023-10-19 04:17:05 +02:00
case "User" , "user" :
2023-08-28 21:19:14 +02:00
row = append ( row , entry . LocalUsername )
2022-10-27 07:11:07 +02:00
default :
customColumnValue , err := getCustomColumnValue ( ctx , header , entry )
if err != nil {
return nil , err
}
row = append ( row , customColumnValue )
}
}
return row , nil
}
2023-09-30 03:21:23 +02:00
// Make a regex that matches the non-tokenized bits of the given query
func MakeRegexFromQuery ( query string ) string {
tokens := tokenize ( strings . TrimSpace ( query ) )
r := ""
for _ , token := range tokens {
if ! strings . HasPrefix ( token , "-" ) && ! containsUnescaped ( token , ":" ) {
if r != "" {
r += "|"
}
r += fmt . Sprintf ( "(%s)" , regexp . QuoteMeta ( token ) )
}
}
return r
}
2022-01-09 20:00:53 +01:00
func CheckFatalError ( err error ) {
if err != nil {
2022-04-12 07:36:52 +02:00
_ , filename , line , _ := runtime . Caller ( 1 )
2022-11-27 17:54:34 +01:00
log . Fatalf ( "hishtory v0.%s fatal error at %s:%d: %v" , Version , filename , line , err )
2022-01-09 20:00:53 +01:00
}
}
2022-01-10 00:48:20 +01:00
2023-10-03 07:27:24 +02:00
var ZSH_FIRST_COMMAND_BUG_REGEX = regexp . MustCompile ( ` : \d+:\d;(.*) ` )
2023-08-28 07:05:24 +02:00
func stripZshWeirdness ( cmd string ) string {
// Zsh has this weird behavior where sometimes commands are saved in the hishtory file
// with a weird prefix. I've never been able to figure out why this happens, but we
// can at least strip it.
2023-10-03 07:27:24 +02:00
matches := ZSH_FIRST_COMMAND_BUG_REGEX . FindStringSubmatch ( cmd )
2023-08-28 07:05:24 +02:00
if len ( matches ) == 2 {
return matches [ 1 ]
}
return cmd
}
2023-10-03 07:27:24 +02:00
var BASH_FIRST_COMMAND_BUG_REGEX = regexp . MustCompile ( ` ^#\d+\s+$ ` )
2023-08-28 07:05:24 +02:00
func isBashWeirdness ( cmd string ) bool {
// Bash has this weird behavior where the it has entries like `#1664342754` in the
// history file. We want to skip these.
2023-10-03 07:27:24 +02:00
return BASH_FIRST_COMMAND_BUG_REGEX . MatchString ( cmd )
2023-08-28 07:05:24 +02:00
}
2023-10-13 04:33:41 +02:00
func countLinesInFile ( filename string ) ( int , error ) {
if _ , err := os . Stat ( filename ) ; errors . Is ( err , os . ErrNotExist ) {
return 0 , nil
}
file , err := os . Open ( filename )
if err != nil {
return 0 , err
}
buf := make ( [ ] byte , 32 * 1024 )
count := 0
lineSep := [ ] byte { '\n' }
for {
c , err := file . Read ( buf )
count += bytes . Count ( buf [ : c ] , lineSep )
switch {
case err == io . EOF :
return count , nil
case err != nil :
return count , err
}
}
}
func countLinesInFiles ( filenames ... string ) ( int , error ) {
total := 0
for _ , f := range filenames {
l , err := countLinesInFile ( f )
if err != nil {
return 0 , err
}
total += l
}
return total , nil
}
// The number of entries where if we're importing more than this many entries, the import is likely to be
// slow, and it is then worth displaying a progress bar.
const NUM_IMPORTED_ENTRIES_SLOW int = 20_000
2023-09-05 21:45:17 +02:00
func ImportHistory ( ctx context . Context , shouldReadStdin , force bool ) ( int , error ) {
2022-09-21 07:28:40 +02:00
config := hctx . GetConf ( ctx )
2022-11-13 01:30:59 +01:00
if config . HaveCompletedInitialImport && ! force {
2022-09-17 20:21:42 +02:00
// Don't run an import if we already have run one. This avoids importing the same entry multiple times.
2022-09-17 20:49:31 +02:00
return 0 , nil
2022-09-17 20:21:42 +02:00
}
2022-09-22 05:19:11 +02:00
homedir := hctx . GetHome ( ctx )
2023-10-13 04:33:41 +02:00
inputFiles := [ ] string {
filepath . Join ( homedir , ".bash_history" ) ,
filepath . Join ( homedir , ".zsh_history" ) ,
}
if histfile := os . Getenv ( "HISTFILE" ) ; histfile != "" && ! slices . Contains [ string ] ( inputFiles , histfile ) {
inputFiles = append ( inputFiles , histfile )
2022-11-13 01:39:21 +01:00
}
2023-10-08 00:19:51 +02:00
zHistPath := filepath . Join ( homedir , ".zhistory" )
2023-10-13 04:33:41 +02:00
if ! slices . Contains ( inputFiles , zHistPath ) {
inputFiles = append ( inputFiles , zHistPath )
}
entriesIter := parseFishHistory ( homedir )
for _ , file := range inputFiles {
entriesIter = concatIterators ( entriesIter , readFileToIterator ( file ) )
}
totalNumEntries , err := countLinesInFiles ( inputFiles ... )
if err != nil {
return 0 , fmt . Errorf ( "failed to count input lines during hishtory import: %w" , err )
2023-10-08 00:19:51 +02:00
}
2022-10-16 18:51:52 +02:00
if shouldReadStdin {
2023-09-23 21:19:06 +02:00
extraEntries , err := readStdin ( )
2022-10-16 18:51:52 +02:00
if err != nil {
2023-09-05 21:08:55 +02:00
return 0 , fmt . Errorf ( "failed to read stdin: %w" , err )
2022-10-16 18:51:52 +02:00
}
2023-09-23 21:19:06 +02:00
entriesIter = concatIterators ( entriesIter , Values ( extraEntries ) )
2023-10-13 04:33:41 +02:00
totalNumEntries += len ( extraEntries )
2022-10-16 18:51:52 +02:00
}
2023-10-13 04:33:41 +02:00
fishLines , err := countLinesInFile ( getFishHistoryPath ( homedir ) )
if err != nil {
return 0 , fmt . Errorf ( "failed to count fish history lines during hishtory import: %w" , err )
}
totalNumEntries += fishLines
2022-09-21 07:28:40 +02:00
db := hctx . GetDb ( ctx )
2022-09-17 20:21:42 +02:00
currentUser , err := user . Current ( )
if err != nil {
2022-09-17 20:49:31 +02:00
return 0 , err
2022-09-17 20:21:42 +02:00
}
hostname , err := os . Hostname ( )
if err != nil {
2022-09-17 20:49:31 +02:00
return 0 , err
2022-09-17 20:21:42 +02:00
}
2023-09-23 21:19:06 +02:00
numEntriesImported := 0
var iteratorError error = nil
2023-10-03 07:27:24 +02:00
var batch [ ] data . HistoryEntry
2023-10-07 20:05:19 +02:00
importTimestamp := time . Now ( ) . UTC ( )
2023-10-03 07:27:24 +02:00
batchSize := 100
2023-10-07 23:41:34 +02:00
importEntryId := uuid . Must ( uuid . NewRandom ( ) ) . String ( )
2023-10-13 04:33:41 +02:00
var bar * progressbar . ProgressBar
if totalNumEntries > NUM_IMPORTED_ENTRIES_SLOW {
fmt . Println ( "Importing existing history entries" )
bar = progressbar . Default ( int64 ( totalNumEntries ) )
defer bar . Finish ( )
}
2023-09-23 21:19:06 +02:00
entriesIter ( func ( cmd string , err error ) bool {
if err != nil {
iteratorError = err
return false
}
cmd = stripZshWeirdness ( cmd )
2022-11-13 01:30:59 +01:00
if isBashWeirdness ( cmd ) || strings . HasPrefix ( cmd , " " ) {
2023-09-23 21:19:06 +02:00
return true
2022-09-23 03:09:51 +02:00
}
2023-10-07 23:41:34 +02:00
// Set the timestamps so that they are monotonically increasing
2023-10-07 20:05:19 +02:00
startTime := importTimestamp . Add ( time . Millisecond * time . Duration ( numEntriesImported * 2 ) )
endTime := startTime . Add ( time . Millisecond )
2023-10-07 23:41:34 +02:00
// And set the entryId in a similar way. This isn't critical from a correctness POV, but uuid.NewRandom() is
// quite slow, so this makes imports considerably faster
entryId := importEntryId + fmt . Sprintf ( "%d" , numEntriesImported )
2023-10-03 07:27:24 +02:00
entry := normalizeEntryTimezone ( data . HistoryEntry {
2022-09-17 20:21:42 +02:00
LocalUsername : currentUser . Name ,
Hostname : hostname ,
Command : cmd ,
CurrentWorkingDirectory : "Unknown" ,
HomeDirectory : homedir ,
2022-09-23 03:09:51 +02:00
ExitCode : 0 ,
2023-10-07 20:05:19 +02:00
StartTime : startTime ,
EndTime : endTime ,
2022-09-17 20:21:42 +02:00
DeviceId : config . DeviceId ,
2023-10-07 23:41:34 +02:00
EntryId : entryId ,
2023-10-03 07:27:24 +02:00
} )
batch = append ( batch , entry )
if len ( batch ) > batchSize {
err = RetryingDbFunction ( func ( ) error {
2023-10-09 05:25:53 +02:00
if err := db . Create ( batch ) . Error ; err != nil {
return fmt . Errorf ( "failed to import batch of history entries: %w" , err )
}
return nil
2023-10-03 07:27:24 +02:00
} )
if err != nil {
iteratorError = fmt . Errorf ( "failed to insert imported history entry: %w" , err )
return false
}
batch = make ( [ ] data . HistoryEntry , 0 )
2022-09-21 08:30:57 +02:00
}
2023-09-23 21:19:06 +02:00
numEntriesImported += 1
2023-10-13 04:33:41 +02:00
if bar != nil {
_ = bar . Add ( 1 )
if numEntriesImported > totalNumEntries {
bar . ChangeMax ( - 1 )
}
}
2023-09-23 21:19:06 +02:00
return true
} )
if iteratorError != nil {
return 0 , iteratorError
2022-09-17 20:21:42 +02:00
}
2023-10-03 07:27:24 +02:00
// Also create any entries remaining in an unfinished batch
2023-10-09 05:25:53 +02:00
if len ( batch ) > 0 {
err = RetryingDbFunction ( func ( ) error {
if err := db . Create ( batch ) . Error ; err != nil {
return fmt . Errorf ( "failed to import final batch of history entries: %w" , err )
}
return nil
} )
2023-10-09 05:26:33 +02:00
if err != nil {
return 0 , err
}
2023-10-03 07:27:24 +02:00
}
2022-10-10 02:19:15 +02:00
err = Reupload ( ctx )
if err != nil {
2023-09-05 21:08:55 +02:00
return 0 , fmt . Errorf ( "failed to upload hishtory import: %w" , err )
2022-10-10 02:19:15 +02:00
}
2022-09-17 20:21:42 +02:00
config . HaveCompletedInitialImport = true
2022-09-21 07:28:40 +02:00
err = hctx . SetConfig ( config )
2022-09-17 20:21:42 +02:00
if err != nil {
2023-09-05 21:08:55 +02:00
return 0 , fmt . Errorf ( "failed to mark initial import as completed, this may lead to duplicate history entries: %w" , err )
2022-09-17 20:21:42 +02:00
}
2022-11-11 23:14:11 +01:00
// Trigger a checkpoint so that these bulk entries are added from the WAL to the main DB
db . Exec ( "PRAGMA wal_checkpoint" )
2023-09-23 21:19:06 +02:00
return numEntriesImported , nil
2022-09-17 20:21:42 +02:00
}
2022-10-01 08:50:25 +02:00
func readStdin ( ) ( [ ] string , error ) {
ret := make ( [ ] string , 0 )
in := bufio . NewReader ( os . Stdin )
for {
s , err := in . ReadString ( '\n' )
if err != nil {
if err != io . EOF {
return nil , err
}
break
}
s = strings . TrimSpace ( s )
if s != "" {
ret = append ( ret , s )
}
}
return ret , nil
}
2023-10-13 04:33:41 +02:00
func getFishHistoryPath ( homedir string ) string {
return filepath . Join ( homedir , ".local/share/fish/fish_history" )
}
2023-09-23 21:19:06 +02:00
func parseFishHistory ( homedir string ) Seq2 [ string , error ] {
2023-10-13 04:33:41 +02:00
lines := readFileToIterator ( getFishHistoryPath ( homedir ) )
2023-09-23 21:19:06 +02:00
return func ( yield func ( string , error ) bool ) bool {
return lines ( func ( line string , err error ) bool {
if err != nil {
return yield ( line , err )
}
line = strings . TrimSpace ( line )
if strings . HasPrefix ( line , "- cmd: " ) {
yield ( strings . SplitN ( line , ": " , 2 ) [ 1 ] , nil )
}
return true
} )
2022-11-04 06:32:55 +01:00
}
2023-09-23 21:19:06 +02:00
}
type (
// Represents an iterator of (K,V). Equivalent of the future Go stdlib type iter.Seq2.
2023-11-12 14:04:55 +01:00
// TODO: Swap this to the stdlib function once it has been released, along with the below two functions
2023-09-23 21:19:06 +02:00
Seq2 [ K , V any ] func ( yield func ( K , V ) bool ) bool
)
// Concatenate two iterators. Equivalent of the future Go stdlib function iter.Concat2.
func concatIterators ( iters ... Seq2 [ string , error ] ) Seq2 [ string , error ] {
return func ( yield func ( string , error ) bool ) bool {
for _ , seq := range iters {
if ! seq ( yield ) {
return false
}
2022-11-04 06:32:55 +01:00
}
2023-09-23 21:19:06 +02:00
return true
2022-11-04 06:32:55 +01:00
}
}
2023-09-23 21:19:06 +02:00
// Convert a slice into an iterator. Equivalent of the future Go stdlib function iter.Values
func Values [ Slice ~ [ ] Elem , Elem any ] ( s Slice ) Seq2 [ Elem , error ] {
return func ( yield func ( Elem , error ) bool ) bool {
for _ , v := range s {
if ! yield ( v , nil ) {
return false
}
}
return true
2022-09-17 20:21:42 +02:00
}
2023-09-23 21:19:06 +02:00
}
2022-09-17 20:21:42 +02:00
2023-09-23 21:19:06 +02:00
func readFileToIterator ( path string ) Seq2 [ string , error ] {
return func ( yield func ( string , error ) bool ) bool {
if _ , err := os . Stat ( path ) ; errors . Is ( err , os . ErrNotExist ) {
2023-09-24 05:37:41 +02:00
return true
2023-09-23 21:19:06 +02:00
}
file , err := os . Open ( path )
if err != nil {
return yield ( "" , fmt . Errorf ( "failed to open file: %w" , err ) )
}
defer file . Close ( )
scanner := bufio . NewScanner ( file )
buf := make ( [ ] byte , maxSupportedLineLengthForImport )
scanner . Buffer ( buf , maxSupportedLineLengthForImport )
for scanner . Scan ( ) {
line := scanner . Text ( )
if ! yield ( line , nil ) {
return false
}
}
2022-09-17 20:21:42 +02:00
2023-09-23 21:19:06 +02:00
if err := scanner . Err ( ) ; err != nil {
return yield ( "" , fmt . Errorf ( "scanner.Err()=%w" , err ) )
}
2022-09-17 20:21:42 +02:00
2023-09-23 21:19:06 +02:00
return true
}
2022-09-17 20:21:42 +02:00
}
2024-02-11 22:54:00 +01:00
const DefaultServerHostname = "https://api.hishtory.dev"
func GetServerHostname ( ) string {
2023-09-14 07:45:49 +02:00
if server := os . Getenv ( "HISHTORY_SERVER" ) ; server != "" {
return server
}
2024-02-11 22:54:00 +01:00
return DefaultServerHostname
2023-09-14 07:45:49 +02:00
}
2022-10-01 18:50:06 +02:00
func httpClient ( ) * http . Client {
return & http . Client { }
}
2023-10-14 19:52:35 +02:00
func ApiGet ( ctx context . Context , path string ) ( [ ] byte , error ) {
2022-09-05 03:37:46 +02:00
if os . Getenv ( "HISHTORY_SIMULATE_NETWORK_ERROR" ) != "" {
return nil , fmt . Errorf ( "simulated network error: dial tcp: lookup api.hishtory.dev" )
}
2022-04-16 09:44:47 +02:00
start := time . Now ( )
2024-02-11 22:54:00 +01:00
req , err := http . NewRequest ( "GET" , GetServerHostname ( ) + path , nil )
2022-10-01 18:50:06 +02:00
if err != nil {
2023-09-05 21:08:55 +02:00
return nil , fmt . Errorf ( "failed to create GET: %w" , err )
2022-10-01 18:50:06 +02:00
}
req . Header . Set ( "X-Hishtory-Version" , "v0." + Version )
2023-10-14 19:52:35 +02:00
req . Header . Set ( "X-Hishtory-Device-Id" , hctx . GetConf ( ctx ) . DeviceId )
req . Header . Set ( "X-Hishtory-User-Id" , data . UserId ( hctx . GetConf ( ctx ) . UserSecret ) )
2022-10-01 18:50:06 +02:00
resp , err := httpClient ( ) . Do ( req )
2022-04-16 09:44:47 +02:00
if err != nil {
2024-02-11 22:54:00 +01:00
return nil , fmt . Errorf ( "failed to GET %s%s: %w" , GetServerHostname ( ) , path , err )
2022-04-16 09:44:47 +02:00
}
defer resp . Body . Close ( )
if resp . StatusCode != 200 {
2024-02-11 22:54:00 +01:00
return nil , fmt . Errorf ( "failed to GET %s%s: status_code=%d" , GetServerHostname ( ) , path , resp . StatusCode )
2022-04-16 09:44:47 +02:00
}
2022-11-27 20:59:06 +01:00
respBody , err := io . ReadAll ( resp . Body )
2022-04-16 09:44:47 +02:00
if err != nil {
2024-02-11 22:54:00 +01:00
return nil , fmt . Errorf ( "failed to read response body from GET %s%s: %w" , GetServerHostname ( ) , path , err )
2022-04-16 09:44:47 +02:00
}
duration := time . Since ( start )
2024-04-15 07:53:28 +02:00
hctx . GetLogger ( ) . Infof ( "ApiGet(%#v): %d bytes - %s\n" , GetServerHostname ( ) + path , len ( respBody ) , duration . String ( ) )
2022-04-16 09:44:47 +02:00
return respBody , nil
}
2023-10-14 19:52:35 +02:00
func ApiPost ( ctx context . Context , path , contentType string , reqBody [ ] byte ) ( [ ] byte , error ) {
2022-09-05 03:37:46 +02:00
if os . Getenv ( "HISHTORY_SIMULATE_NETWORK_ERROR" ) != "" {
return nil , fmt . Errorf ( "simulated network error: dial tcp: lookup api.hishtory.dev" )
}
2022-04-16 09:44:47 +02:00
start := time . Now ( )
2024-02-11 22:54:00 +01:00
req , err := http . NewRequest ( "POST" , GetServerHostname ( ) + path , bytes . NewBuffer ( reqBody ) )
2022-10-01 18:50:06 +02:00
if err != nil {
2023-09-05 21:08:55 +02:00
return nil , fmt . Errorf ( "failed to create POST: %w" , err )
2022-10-01 18:50:06 +02:00
}
req . Header . Set ( "Content-Type" , contentType )
req . Header . Set ( "X-Hishtory-Version" , "v0." + Version )
2023-10-14 19:52:35 +02:00
req . Header . Set ( "X-Hishtory-Device-Id" , hctx . GetConf ( ctx ) . DeviceId )
req . Header . Set ( "X-Hishtory-User-Id" , data . UserId ( hctx . GetConf ( ctx ) . UserSecret ) )
2022-10-01 18:50:06 +02:00
resp , err := httpClient ( ) . Do ( req )
2022-04-16 09:44:47 +02:00
if err != nil {
2024-02-11 22:54:00 +01:00
return nil , fmt . Errorf ( "failed to POST %s: %w" , GetServerHostname ( ) + path , err )
2022-04-16 09:44:47 +02:00
}
defer resp . Body . Close ( )
if resp . StatusCode != 200 {
2024-02-11 22:54:00 +01:00
return nil , fmt . Errorf ( "failed to POST %s: status_code=%d" , GetServerHostname ( ) + path , resp . StatusCode )
2022-04-16 09:44:47 +02:00
}
2022-11-27 20:59:06 +01:00
respBody , err := io . ReadAll ( resp . Body )
2022-04-16 09:44:47 +02:00
if err != nil {
2024-02-11 22:54:00 +01:00
return nil , fmt . Errorf ( "failed to read response body from POST %s: %w" , GetServerHostname ( ) + path , err )
2022-04-16 09:44:47 +02:00
}
duration := time . Since ( start )
2024-04-15 07:53:28 +02:00
hctx . GetLogger ( ) . Infof ( "ApiPost(%#v): %d bytes - %s\n" , GetServerHostname ( ) + path , len ( respBody ) , duration . String ( ) )
2022-04-16 09:44:47 +02:00
return respBody , nil
}
2022-04-26 06:42:28 +02:00
2023-10-14 19:52:35 +02:00
func IsOfflineError ( ctx context . Context , err error ) bool {
2022-09-18 18:42:24 +02:00
if err == nil {
return false
}
2023-09-24 01:40:03 +02:00
if strings . Contains ( err . Error ( ) , "dial tcp: lookup api.hishtory.dev" ) ||
2023-09-24 04:37:25 +02:00
strings . Contains ( err . Error ( ) , ": no such host" ) ||
2022-09-30 07:43:03 +02:00
strings . Contains ( err . Error ( ) , "connect: network is unreachable" ) ||
strings . Contains ( err . Error ( ) , "read: connection reset by peer" ) ||
2022-10-15 19:12:18 +02:00
strings . Contains ( err . Error ( ) , ": EOF" ) ||
strings . Contains ( err . Error ( ) , ": status_code=502" ) ||
2022-10-18 20:52:52 +02:00
strings . Contains ( err . Error ( ) , ": status_code=503" ) ||
2022-11-15 01:29:55 +01:00
strings . Contains ( err . Error ( ) , ": i/o timeout" ) ||
2022-12-10 18:43:02 +01:00
strings . Contains ( err . Error ( ) , "connect: operation timed out" ) ||
2023-09-14 07:49:40 +02:00
strings . Contains ( err . Error ( ) , "net/http: TLS handshake timeout" ) ||
2023-09-24 01:40:03 +02:00
strings . Contains ( err . Error ( ) , "connect: connection refused" ) {
return true
}
2023-10-22 20:43:56 +02:00
if ! CanReachHishtoryServer ( ctx ) {
2023-09-24 01:40:03 +02:00
// If the backend server is down, then treat all errors as offline errors
return true
}
// A truly unexpected error, bubble this up
return false
}
2023-10-22 20:43:56 +02:00
func CanReachHishtoryServer ( ctx context . Context ) bool {
2023-10-14 19:52:35 +02:00
_ , err := ApiGet ( ctx , "/api/v1/ping" )
2023-09-24 01:40:03 +02:00
return err == nil
2022-05-02 04:37:26 +02:00
}
2022-06-05 07:06:50 +02:00
2023-09-09 21:28:01 +02:00
func normalizeEntryTimezone ( entry data . HistoryEntry ) data . HistoryEntry {
entry . StartTime = entry . StartTime . UTC ( )
entry . EndTime = entry . EndTime . UTC ( )
return entry
}
2023-09-19 04:22:26 +02:00
const SQLITE_LOCKED_ERR_MSG = "database is locked ("
2023-09-13 03:55:13 +02:00
func RetryingDbFunction ( dbFunc func ( ) error ) error {
2022-06-05 07:06:50 +02:00
var err error = nil
i := 0
for i = 0 ; i < 10 ; i ++ {
2023-09-13 03:55:13 +02:00
err = dbFunc ( )
2023-08-27 23:24:59 +02:00
if err == nil {
return nil
}
2023-09-13 03:55:13 +02:00
errMsg := err . Error ( )
2023-09-19 04:22:26 +02:00
if strings . Contains ( errMsg , SQLITE_LOCKED_ERR_MSG ) {
2023-09-13 03:55:13 +02:00
time . Sleep ( time . Duration ( i * rand . Intn ( 100 ) ) * time . Millisecond )
continue
2023-09-07 05:21:00 +02:00
}
2023-09-14 04:20:15 +02:00
if strings . Contains ( errMsg , "UNIQUE constraint failed: history_entries." ) {
return nil
2022-06-05 07:06:50 +02:00
}
2023-09-13 03:55:13 +02:00
return fmt . Errorf ( "unrecoverable sqlite error: %w" , err )
2022-06-05 07:06:50 +02:00
}
2023-09-13 03:55:13 +02:00
return fmt . Errorf ( "failed to execute DB transaction even with %d retries: %w" , i , err )
}
2023-09-23 04:59:19 +02:00
func RetryingDbFunctionWithResult [ T any ] ( dbFunc func ( ) ( T , error ) ) ( T , error ) {
var t T
var err error = nil
i := 0
for i = 0 ; i < 10 ; i ++ {
t , err = dbFunc ( )
if err == nil {
return t , nil
}
errMsg := err . Error ( )
if strings . Contains ( errMsg , SQLITE_LOCKED_ERR_MSG ) {
time . Sleep ( time . Duration ( i * rand . Intn ( 100 ) ) * time . Millisecond )
continue
}
return t , fmt . Errorf ( "unrecoverable sqlite error: %w" , err )
}
return t , fmt . Errorf ( "failed to execute DB transaction even with %d retries: %w" , i , err )
}
2023-09-13 03:55:13 +02:00
func ReliableDbCreate ( db * gorm . DB , entry data . HistoryEntry ) error {
entry = normalizeEntryTimezone ( entry )
return RetryingDbFunction ( func ( ) error {
return db . Create ( entry ) . Error
} )
2022-06-05 07:06:50 +02:00
}
2022-09-05 03:37:46 +02:00
2023-09-23 21:40:57 +02:00
func EncryptAndMarshal ( config * hctx . ClientConfig , entries [ ] * data . HistoryEntry ) ( [ ] byte , error ) {
2022-10-10 02:10:11 +02:00
var encEntries [ ] shared . EncHistoryEntry
for _ , entry := range entries {
encEntry , err := data . EncryptHistoryEntry ( config . UserSecret , * entry )
if err != nil {
2023-10-08 05:01:36 +02:00
return nil , fmt . Errorf ( "failed to encrypt history entry: %w" , err )
2022-10-10 02:10:11 +02:00
}
encEntry . DeviceId = config . DeviceId
encEntries = append ( encEntries , encEntry )
2022-09-05 03:37:46 +02:00
}
2022-10-10 02:10:11 +02:00
jsonValue , err := json . Marshal ( encEntries )
2022-09-05 03:37:46 +02:00
if err != nil {
2023-09-05 21:08:55 +02:00
return jsonValue , fmt . Errorf ( "failed to marshal encrypted history entry: %w" , err )
2022-09-05 03:37:46 +02:00
}
return jsonValue , nil
}
2022-09-20 07:49:48 +02:00
2023-09-05 21:45:17 +02:00
func Reupload ( ctx context . Context ) error {
2022-10-10 02:10:11 +02:00
config := hctx . GetConf ( ctx )
2022-11-03 21:16:45 +01:00
if config . IsOffline {
return nil
}
2024-02-05 06:44:00 +01:00
numEntries , err := CountStoredEntries ( hctx . GetDb ( ctx ) )
2022-10-10 02:10:11 +02:00
if err != nil {
2024-02-05 06:44:00 +01:00
return fmt . Errorf ( "failed to upload history entries due to error in counting entries: %v" , err )
2022-10-10 02:10:11 +02:00
}
2023-10-13 04:33:41 +02:00
var bar * progressbar . ProgressBar
2024-02-05 06:44:00 +01:00
if numEntries > int64 ( NUM_IMPORTED_ENTRIES_SLOW ) {
2023-10-13 04:33:41 +02:00
fmt . Println ( "Persisting history entries" )
2024-02-05 06:44:00 +01:00
bar = progressbar . Default ( int64 ( numEntries ) )
2023-10-13 04:33:41 +02:00
defer bar . Finish ( )
}
2024-02-05 06:44:00 +01:00
// This number is a balance between speed and memory usage. If we make it too high, then
// it will mean we use a ton of memory (since we retrieve all of those entries). But if
// we make it too low, then it will have to do repeated SQL queries with OFFSETs, which
// are inherently slow.
searchChunkSize := 300_000
currentOffset := 0
for {
entries , err := SearchWithOffset ( ctx , hctx . GetDb ( ctx ) , "" , searchChunkSize , currentOffset )
2022-10-11 07:04:59 +02:00
if err != nil {
2024-02-05 06:44:00 +01:00
return fmt . Errorf ( "failed to reupload due to failed search: %w" , err )
2022-10-11 07:04:59 +02:00
}
2024-02-05 06:44:00 +01:00
if len ( entries ) == 0 {
2024-02-06 06:29:32 +01:00
if currentOffset == 0 && numEntries != 0 {
2024-02-05 06:44:00 +01:00
return fmt . Errorf ( "found no entries for reuploading, something went wrong" )
} else {
return nil
}
2022-10-11 07:04:59 +02:00
}
2024-02-05 06:44:00 +01:00
currentOffset += searchChunkSize
// This number is a balance between speed, and ensuring that we don't send too much data
// in a single request (since large individual requests are extremely slow). From benchmarking,
// it is apparent that this value seems to work quite well.
uploadChunkSize := 500
2024-02-05 07:19:11 +01:00
chunks := shared . Chunks ( entries , uploadChunkSize )
2024-02-05 06:44:00 +01:00
err = shared . ForEach ( chunks , 10 , func ( chunk [ ] * data . HistoryEntry ) error {
jsonValue , err := EncryptAndMarshal ( config , chunk )
if err != nil {
return fmt . Errorf ( "failed to reupload due to failed encryption: %w" , err )
}
_ , err = ApiPost ( ctx , "/api/v1/submit?source_device_id=" + config . DeviceId , "application/json" , jsonValue )
if err != nil {
return fmt . Errorf ( "failed to reupload due to failed POST: %w" , err )
}
if bar != nil {
_ = bar . Add ( uploadChunkSize )
}
return nil
} )
if err != nil {
return err
2023-10-13 04:33:41 +02:00
}
2024-02-05 06:44:00 +01:00
}
2022-10-10 02:10:11 +02:00
}
2022-10-11 07:04:59 +02:00
2023-10-15 21:29:50 +02:00
func RetrieveAdditionalEntriesFromRemote ( ctx context . Context , queryReason string ) error {
2022-10-16 18:22:34 +02:00
db := hctx . GetDb ( ctx )
config := hctx . GetConf ( ctx )
2022-11-03 21:16:45 +01:00
if config . IsOffline {
return nil
}
2023-10-15 21:29:50 +02:00
respBody , err := ApiGet ( ctx , "/api/v1/query?device_id=" + config . DeviceId + "&user_id=" + data . UserId ( config . UserSecret ) + "&queryReason=" + queryReason )
2023-10-14 19:52:35 +02:00
if IsOfflineError ( ctx , err ) {
2022-10-16 18:22:34 +02:00
return nil
}
if err != nil {
return err
}
var retrievedEntries [ ] * shared . EncHistoryEntry
err = json . Unmarshal ( respBody , & retrievedEntries )
if err != nil {
2023-09-05 21:08:55 +02:00
return fmt . Errorf ( "failed to load JSON response: %w" , err )
2022-10-16 18:22:34 +02:00
}
for _ , entry := range retrievedEntries {
decEntry , err := data . DecryptHistoryEntry ( config . UserSecret , * entry )
if err != nil {
2023-09-05 21:08:55 +02:00
return fmt . Errorf ( "failed to decrypt history entry from server: %w" , err )
2022-10-16 18:22:34 +02:00
}
AddToDbIfNew ( db , decEntry )
}
return ProcessDeletionRequests ( ctx )
}
2023-09-05 21:45:17 +02:00
func ProcessDeletionRequests ( ctx context . Context ) error {
2022-10-16 18:22:34 +02:00
config := hctx . GetConf ( ctx )
2022-11-03 21:16:45 +01:00
if config . IsOffline {
return nil
}
2023-10-14 19:52:35 +02:00
resp , err := ApiGet ( ctx , "/api/v1/get-deletion-requests?user_id=" + data . UserId ( config . UserSecret ) + "&device_id=" + config . DeviceId )
if IsOfflineError ( ctx , err ) {
2022-10-16 18:22:34 +02:00
return nil
}
if err != nil {
return err
}
var deletionRequests [ ] * shared . DeletionRequest
err = json . Unmarshal ( resp , & deletionRequests )
if err != nil {
return err
}
2023-09-22 22:49:29 +02:00
return HandleDeletionRequests ( ctx , deletionRequests )
}
func HandleDeletionRequests ( ctx context . Context , deletionRequests [ ] * shared . DeletionRequest ) error {
2022-10-16 18:22:34 +02:00
db := hctx . GetDb ( ctx )
for _ , request := range deletionRequests {
for _ , entry := range request . Messages . Ids {
2023-09-23 04:47:10 +02:00
err := RetryingDbFunction ( func ( ) error {
// Note that entry.EndTime is not always present (for pre-saved entries). And likewise,
// entry.EntryId is not always present for older entries. So we just check that one of them matches.
tx := db . Where ( "device_id = ? AND (end_time = ? OR entry_id = ?)" , entry . DeviceId , entry . EndTime , entry . EntryId )
return tx . Delete ( & data . HistoryEntry { } ) . Error
} )
if err != nil {
return fmt . Errorf ( "DB error when deleting entries: %w" , err )
2022-10-16 18:22:34 +02:00
}
}
}
return nil
}
2023-09-05 21:45:17 +02:00
func GetBanner ( ctx context . Context ) ( [ ] byte , error ) {
2022-10-16 18:22:34 +02:00
config := hctx . GetConf ( ctx )
2022-11-03 21:16:45 +01:00
if config . IsOffline {
return [ ] byte { } , nil
}
2022-11-15 04:26:56 +01:00
url := "/api/v1/banner?commit_hash=" + GitCommit + "&user_id=" + data . UserId ( config . UserSecret ) + "&device_id=" + config . DeviceId + "&version=" + Version + "&forced_banner=" + os . Getenv ( "FORCED_BANNER" )
2023-10-14 19:52:35 +02:00
return ApiGet ( ctx , url )
2022-10-16 18:22:34 +02:00
}
2022-10-24 02:35:02 +02:00
2022-11-01 18:23:35 +01:00
func parseTimeGenerously ( input string ) ( time . Time , error ) {
input = strings . ReplaceAll ( input , "_" , " " )
return dateparse . ParseLocal ( input )
}
2023-09-18 05:24:48 +02:00
// A wrapper around tx.Where(...) that filters out nil-values
func where ( tx * gorm . DB , s string , v1 any , v2 any ) * gorm . DB {
if v1 == nil && v2 == nil {
return tx . Where ( s )
}
if v1 != nil && v2 == nil {
return tx . Where ( s , v1 )
}
if v1 != nil && v2 != nil {
return tx . Where ( s , v1 , v2 )
}
panic ( fmt . Sprintf ( "Impossible state: v1=%#v, v2=%#v" , v1 , v2 ) )
}
2023-09-05 21:45:17 +02:00
func MakeWhereQueryFromSearch ( ctx context . Context , db * gorm . DB , query string ) ( * gorm . DB , error ) {
2023-09-30 03:21:23 +02:00
tokens := tokenize ( query )
2022-11-01 18:23:35 +01:00
tx := db . Model ( & data . HistoryEntry { } ) . Where ( "true" )
for _ , token := range tokens {
if strings . HasPrefix ( token , "-" ) {
2023-02-19 06:46:51 +01:00
if token == "-" {
// The entire token is a -, just ignore this token. Otherwise we end up
// interpreting "-" as exluding literally all results which is pretty useless.
continue
}
2023-02-04 08:55:55 +01:00
if containsUnescaped ( token , ":" ) {
2022-11-01 18:23:35 +01:00
query , v1 , v2 , err := parseAtomizedToken ( ctx , token [ 1 : ] )
if err != nil {
return nil , err
}
2023-09-18 05:24:48 +02:00
tx = where ( tx , "NOT " + query , v1 , v2 )
2022-11-01 18:23:35 +01:00
} else {
query , v1 , v2 , v3 , err := parseNonAtomizedToken ( token [ 1 : ] )
if err != nil {
return nil , err
}
tx = tx . Where ( "NOT " + query , v1 , v2 , v3 )
}
2023-02-04 08:55:55 +01:00
} else if containsUnescaped ( token , ":" ) {
2022-11-01 18:23:35 +01:00
query , v1 , v2 , err := parseAtomizedToken ( ctx , token )
if err != nil {
return nil , err
}
2023-09-18 05:24:48 +02:00
tx = where ( tx , query , v1 , v2 )
2022-11-01 18:23:35 +01:00
} else {
query , v1 , v2 , v3 , err := parseNonAtomizedToken ( token )
if err != nil {
return nil , err
}
tx = tx . Where ( query , v1 , v2 , v3 )
}
}
return tx , nil
}
2023-09-05 21:45:17 +02:00
func Search ( ctx context . Context , db * gorm . DB , query string , limit int ) ( [ ] * data . HistoryEntry , error ) {
2024-02-05 06:44:00 +01:00
return SearchWithOffset ( ctx , db , query , limit , 0 )
}
func SearchWithOffset ( ctx context . Context , db * gorm . DB , query string , limit , offset int ) ( [ ] * data . HistoryEntry , error ) {
return retryingSearch ( ctx , db , query , limit , offset , 0 )
2023-09-19 04:22:26 +02:00
}
const SEARCH_RETRY_COUNT = 3
2024-02-05 06:44:00 +01:00
func retryingSearch ( ctx context . Context , db * gorm . DB , query string , limit , offset int , currentRetryNum int ) ( [ ] * data . HistoryEntry , error ) {
2022-11-01 18:23:35 +01:00
if ctx == nil && query != "" {
return nil , fmt . Errorf ( "lib.Search called with a nil context and a non-empty query (this should never happen)" )
}
tx , err := MakeWhereQueryFromSearch ( ctx , db , query )
if err != nil {
return nil , err
}
2023-11-12 14:19:31 +01:00
if hctx . GetConf ( ctx ) . EnablePresaving {
// Sort by StartTime when presaving is enabled, since presaved entries may not have an end time
2023-08-28 00:20:40 +02:00
tx = tx . Order ( "start_time DESC" )
} else {
tx = tx . Order ( "end_time DESC" )
}
2022-11-01 18:23:35 +01:00
if limit > 0 {
tx = tx . Limit ( limit )
}
2024-02-05 06:44:00 +01:00
if offset > 0 {
tx = tx . Offset ( offset )
}
2022-11-01 18:23:35 +01:00
var historyEntries [ ] * data . HistoryEntry
result := tx . Find ( & historyEntries )
if result . Error != nil {
2023-09-19 04:22:26 +02:00
if strings . Contains ( result . Error . Error ( ) , SQLITE_LOCKED_ERR_MSG ) && currentRetryNum < SEARCH_RETRY_COUNT {
hctx . GetLogger ( ) . Infof ( "Ignoring err=%v and retrying search query, cnt=%d" , result . Error , currentRetryNum )
time . Sleep ( time . Duration ( currentRetryNum * rand . Intn ( 50 ) ) * time . Millisecond )
2024-02-05 06:44:00 +01:00
return retryingSearch ( ctx , db , query , limit , offset , currentRetryNum + 1 )
2023-09-19 04:22:26 +02:00
}
2023-09-05 21:08:55 +02:00
return nil , fmt . Errorf ( "DB query error: %w" , result . Error )
2022-11-01 18:23:35 +01:00
}
return historyEntries , nil
}
2023-10-08 00:11:49 +02:00
func parseNonAtomizedToken ( token string ) ( string , any , any , any , error ) {
2023-02-21 00:46:39 +01:00
wildcardedToken := "%" + unescape ( token ) + "%"
2022-11-01 18:23:35 +01:00
return "(command LIKE ? OR hostname LIKE ? OR current_working_directory LIKE ?)" , wildcardedToken , wildcardedToken , wildcardedToken , nil
}
2023-10-08 00:11:49 +02:00
func parseAtomizedToken ( ctx context . Context , token string ) ( string , any , any , error ) {
2023-02-04 08:55:55 +01:00
splitToken := splitEscaped ( token , ':' , 2 )
2023-02-21 00:46:39 +01:00
field := unescape ( splitToken [ 0 ] )
val := unescape ( splitToken [ 1 ] )
2022-11-01 18:23:35 +01:00
switch field {
case "user" :
return "(local_username = ?)" , val , nil , nil
case "host" :
fallthrough
case "hostname" :
return "(instr(hostname, ?) > 0)" , val , nil , nil
case "cwd" :
return "(instr(current_working_directory, ?) > 0 OR instr(REPLACE(current_working_directory, '~/', home_directory), ?) > 0)" , strings . TrimSuffix ( val , "/" ) , strings . TrimSuffix ( val , "/" ) , nil
case "exit_code" :
return "(exit_code = ?)" , val , nil , nil
case "before" :
t , err := parseTimeGenerously ( val )
if err != nil {
2023-09-05 21:08:55 +02:00
return "" , nil , nil , fmt . Errorf ( "failed to parse before:%s as a timestamp: %w" , val , err )
2022-11-01 18:23:35 +01:00
}
return "(CAST(strftime(\"%s\",start_time) AS INTEGER) < ?)" , t . Unix ( ) , nil , nil
case "after" :
t , err := parseTimeGenerously ( val )
if err != nil {
2023-09-05 21:08:55 +02:00
return "" , nil , nil , fmt . Errorf ( "failed to parse after:%s as a timestamp: %w" , val , err )
2022-11-01 18:23:35 +01:00
}
return "(CAST(strftime(\"%s\",start_time) AS INTEGER) > ?)" , t . Unix ( ) , nil , nil
2023-08-27 23:24:59 +02:00
case "start_time" :
// Note that this atom probably isn't useful for interactive usage since it does exact matching, but we use it
// internally for pre-saving history entries.
t , err := parseTimeGenerously ( val )
if err != nil {
2023-09-13 03:55:13 +02:00
return "" , nil , nil , fmt . Errorf ( "failed to parse start_time:%s as a timestamp: %w" , val , err )
2023-08-27 23:24:59 +02:00
}
2023-09-18 05:24:48 +02:00
return "(CAST(strftime(\"%s\",start_time) AS INTEGER) = ?)" , strconv . FormatInt ( t . Unix ( ) , 10 ) , nil , nil
2023-09-13 03:55:13 +02:00
case "end_time" :
// Note that this atom probably isn't useful for interactive usage since it does exact matching, but we use it
// internally for pre-saving history entries.
t , err := parseTimeGenerously ( val )
if err != nil {
return "" , nil , nil , fmt . Errorf ( "failed to parse end_time:%s as a timestamp: %w" , val , err )
}
2023-09-18 05:24:48 +02:00
return "(CAST(strftime(\"%s\",end_time) AS INTEGER) = ?)" , strconv . FormatInt ( t . Unix ( ) , 10 ) , nil , nil
2023-08-27 23:24:59 +02:00
case "command" :
return "(instr(command, ?) > 0)" , val , nil , nil
2022-11-01 18:23:35 +01:00
default :
knownCustomColumns := make ( [ ] string , 0 )
// Get custom columns that are defined on this machine
conf := hctx . GetConf ( ctx )
for _ , c := range conf . CustomColumns {
knownCustomColumns = append ( knownCustomColumns , c . ColumnName )
}
// Also get all ones that are in the DB
names , err := getAllCustomColumnNames ( ctx )
if err != nil {
2023-09-05 21:08:55 +02:00
return "" , nil , nil , fmt . Errorf ( "failed to get custom column names from the DB: %w" , err )
2022-11-01 18:23:35 +01:00
}
knownCustomColumns = append ( knownCustomColumns , names ... )
// Check if the atom is for a custom column that exists and if it isn't, return an error
isCustomColumn := false
for _ , ccName := range knownCustomColumns {
if ccName == field {
isCustomColumn = true
}
}
if ! isCustomColumn {
2022-11-27 17:54:34 +01:00
return "" , nil , nil , fmt . Errorf ( "search query contains unknown search atom '%s' that doesn't match any column names" , field )
2022-11-01 18:23:35 +01:00
}
// Build the where clause for the custom column
return "EXISTS (SELECT 1 FROM json_each(custom_columns) WHERE json_extract(value, '$.name') = ? and instr(json_extract(value, '$.value'), ?) > 0)" , field , val , nil
}
}
2023-09-05 21:45:17 +02:00
func getAllCustomColumnNames ( ctx context . Context ) ( [ ] string , error ) {
2022-11-01 18:23:35 +01:00
db := hctx . GetDb ( ctx )
2023-10-31 01:50:47 +01:00
rows , err := RetryingDbFunctionWithResult ( func ( ) ( * sql . Rows , error ) {
query := `
SELECT DISTINCT json_extract ( value , ' $ . name ' ) as cc_name
FROM history_entries
JOIN json_each ( custom_columns )
2023-10-31 01:51:07 +01:00
WHERE value IS NOT NULL `
2023-10-31 01:50:47 +01:00
return db . Raw ( query ) . Rows ( )
} )
2022-11-01 18:23:35 +01:00
if err != nil {
2023-11-11 20:24:16 +01:00
return nil , fmt . Errorf ( "failed to query for list of custom columns: %w" , err )
2022-11-01 18:23:35 +01:00
}
ccNames := make ( [ ] string , 0 )
for rows . Next ( ) {
var ccName string
err = rows . Scan ( & ccName )
if err != nil {
return nil , err
}
ccNames = append ( ccNames , ccName )
}
return ccNames , nil
}
2023-09-30 03:21:23 +02:00
func tokenize ( query string ) [ ] string {
2022-11-01 18:23:35 +01:00
if query == "" {
2023-09-30 03:21:23 +02:00
return [ ] string { }
2022-11-01 18:23:35 +01:00
}
2023-09-30 03:21:23 +02:00
return splitEscaped ( query , ' ' , - 1 )
2023-02-04 08:55:55 +01:00
}
2023-12-13 07:20:49 +01:00
// TODO: Maybe add support for searching for the backslash character itself?
2023-02-04 18:58:27 +01:00
func splitEscaped ( query string , separator rune , maxSplit int ) [ ] string {
var token [ ] rune
2023-02-04 08:55:55 +01:00
var tokens [ ] string
2023-02-04 18:58:27 +01:00
splits := 1
runeQuery := [ ] rune ( query )
2023-12-20 00:01:15 +01:00
isInDoubleQuotedString := false
isInSingleQuotedString := false
2023-02-04 18:58:27 +01:00
for i := 0 ; i < len ( runeQuery ) ; i ++ {
2023-12-20 00:01:15 +01:00
if ( maxSplit < 0 || splits < maxSplit ) && runeQuery [ i ] == separator && ! isInSingleQuotedString && ! isInDoubleQuotedString {
2023-02-04 08:55:55 +01:00
tokens = append ( tokens , string ( token ) )
token = token [ : 0 ]
splits ++
2023-02-04 18:58:27 +01:00
} else if runeQuery [ i ] == '\\' && i + 1 < len ( runeQuery ) {
2023-12-13 07:20:49 +01:00
if runeQuery [ i + 1 ] == '-' || runeQuery [ i + 1 ] == ':' || runeQuery [ i + 1 ] == '\\' {
// Note that we need to keep the backslash before the dash to support searches like `ls \-Slah`.
// And we need it before the colon so that we can search for things like `foo\:bar`
// And we need it before the backslash so that we can search for literal backslashes.
token = append ( token , runeQuery [ i ] )
}
2023-02-04 08:55:55 +01:00
i ++
2023-12-13 07:20:49 +01:00
token = append ( token , runeQuery [ i ] )
2023-12-20 00:01:15 +01:00
} else if runeQuery [ i ] == '"' && ! isInSingleQuotedString && ! heuristicIgnoreUnclosedQuote ( isInDoubleQuotedString , '"' , runeQuery , i ) {
isInDoubleQuotedString = ! isInDoubleQuotedString
} else if runeQuery [ i ] == '\'' && ! isInDoubleQuotedString && ! heuristicIgnoreUnclosedQuote ( isInSingleQuotedString , '\'' , runeQuery , i ) {
isInSingleQuotedString = ! isInSingleQuotedString
2023-02-04 08:55:55 +01:00
} else {
2024-01-08 03:56:30 +01:00
if ( isInSingleQuotedString || isInDoubleQuotedString ) && separator == ' ' && runeQuery [ i ] == ':' {
token = append ( token , '\\' )
}
2023-02-04 18:58:27 +01:00
token = append ( token , runeQuery [ i ] )
2023-02-04 08:55:55 +01:00
}
}
tokens = append ( tokens , string ( token ) )
return tokens
}
2023-12-20 00:01:15 +01:00
func heuristicIgnoreUnclosedQuote ( isCurrentlyInQuotedString bool , quoteType rune , query [ ] rune , idx int ) bool {
if isCurrentlyInQuotedString {
// We're already in a quoted string, so the heuristic doesn't apply
return false
}
idx ++
for idx < len ( query ) {
if query [ idx ] == quoteType {
// There is a close quote, so the heuristic doesn't apply
return false
}
idx ++
}
// There is no unclosed quote, so we apply the heuristic and ignore the single quote
return true
}
2023-02-04 08:55:55 +01:00
func containsUnescaped ( query string , token string ) bool {
2023-02-04 18:58:27 +01:00
runeQuery := [ ] rune ( query )
for i := 0 ; i < len ( runeQuery ) ; i ++ {
if runeQuery [ i ] == '\\' && i + 1 < len ( runeQuery ) {
2023-02-04 08:55:55 +01:00
i ++
2023-02-04 18:58:27 +01:00
} else if string ( runeQuery [ i : i + len ( token ) ] ) == token {
2023-02-04 08:55:55 +01:00
return true
}
}
return false
}
2023-02-21 00:46:39 +01:00
func unescape ( query string ) string {
runeQuery := [ ] rune ( query )
2023-02-04 18:58:27 +01:00
var newQuery [ ] rune
2023-02-21 00:46:39 +01:00
for i := 0 ; i < len ( runeQuery ) ; i ++ {
if runeQuery [ i ] == '\\' {
i ++
}
if i < len ( runeQuery ) {
newQuery = append ( newQuery , runeQuery [ i ] )
2023-02-04 08:55:55 +01:00
}
}
return string ( newQuery )
2022-11-01 18:23:35 +01:00
}
2022-11-03 03:41:49 +01:00
2023-10-14 19:52:35 +02:00
func SendDeletionRequest ( ctx context . Context , deletionRequest shared . DeletionRequest ) error {
2022-12-18 09:19:52 +01:00
data , err := json . Marshal ( deletionRequest )
if err != nil {
return err
}
2023-10-14 19:52:35 +02:00
_ , err = ApiPost ( ctx , "/api/v1/add-deletion-request" , "application/json" , data )
2022-12-18 09:19:52 +01:00
if err != nil {
2023-09-05 21:08:55 +02:00
return fmt . Errorf ( "failed to send deletion request to backend service, this may cause commands to not get deleted on other instances of hishtory: %w" , err )
2022-12-18 09:19:52 +01:00
}
return nil
}
2024-02-05 06:44:00 +01:00
func CountStoredEntries ( db * gorm . DB ) ( int64 , error ) {
return RetryingDbFunctionWithResult ( func ( ) ( int64 , error ) {
var count int64
return count , db . Model ( & data . HistoryEntry { } ) . Count ( & count ) . Error
} )
}