2022-01-09 05:27:18 +01:00
package main
import (
2022-11-26 05:04:40 +01:00
"context"
2022-01-09 05:27:18 +01:00
"encoding/json"
"fmt"
2022-10-15 18:58:47 +02:00
"html"
2022-04-06 08:31:24 +02:00
"io/ioutil"
2022-04-07 03:18:46 +02:00
"log"
2022-01-09 05:27:18 +01:00
"net/http"
2022-04-08 05:59:40 +02:00
"os"
2022-11-26 05:04:40 +01:00
"reflect"
2022-10-03 04:41:00 +02:00
"runtime"
2022-04-22 07:02:28 +02:00
"strconv"
2022-04-09 21:50:01 +02:00
"strings"
2022-04-09 08:47:13 +02:00
"time"
2022-01-09 05:27:18 +01:00
2022-11-26 05:04:40 +01:00
"github.com/DataDog/datadog-go/statsd"
2022-01-09 06:59:28 +01:00
"github.com/ddworken/hishtory/shared"
2022-11-26 05:04:40 +01:00
"github.com/jackc/pgx/v4/stdlib"
2022-03-30 06:56:28 +02:00
_ "github.com/lib/pq"
2022-10-01 08:38:35 +02:00
"github.com/rodaine/table"
2022-11-26 05:04:40 +01:00
sqltrace "gopkg.in/DataDog/dd-trace-go.v1/contrib/database/sql"
gormtrace "gopkg.in/DataDog/dd-trace-go.v1/contrib/gorm.io/gorm.v1"
httptrace "gopkg.in/DataDog/dd-trace-go.v1/contrib/net/http"
"gopkg.in/DataDog/dd-trace-go.v1/ddtrace/ext"
"gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer"
"gopkg.in/DataDog/dd-trace-go.v1/profiler"
"gorm.io/gorm/logger"
2022-04-06 08:31:24 +02:00
2022-11-26 05:04:40 +01:00
"gorm.io/driver/postgres"
2022-04-06 08:31:24 +02:00
"gorm.io/driver/sqlite"
2022-03-30 06:56:28 +02:00
"gorm.io/gorm"
)
const (
2022-11-01 20:14:20 +01:00
PostgresDb = "postgresql://postgres:%s@postgres:5432/hishtory?sslmode=disable"
2022-01-09 06:59:28 +01:00
)
2022-01-09 05:27:18 +01:00
2022-04-09 07:37:03 +02:00
var (
GLOBAL_DB * gorm . DB
2022-11-26 05:04:40 +01:00
GLOBAL_STATSD * statsd . Client
2022-04-09 07:37:03 +02:00
ReleaseVersion string = "UNKNOWN"
)
2022-04-03 07:27:20 +02:00
2022-04-16 20:37:43 +02:00
type UsageData struct {
2022-09-30 08:32:13 +02:00
UserId string ` json:"user_id" gorm:"not null; uniqueIndex:usageDataUniqueIndex" `
DeviceId string ` json:"device_id" gorm:"not null; uniqueIndex:usageDataUniqueIndex" `
LastUsed time . Time ` json:"last_used" `
2022-09-30 08:51:45 +02:00
LastIp string ` json:"last_ip" `
2022-09-30 08:32:13 +02:00
NumEntriesHandled int ` json:"num_entries_handled" `
2022-10-01 08:38:35 +02:00
LastQueried time . Time ` json:"last_queried" `
NumQueries int ` json:"num_queries" `
2022-10-03 05:39:52 +02:00
Version string ` json:"version" `
2022-04-16 20:37:43 +02:00
}
2022-06-05 08:03:05 +02:00
func getRequiredQueryParam ( r * http . Request , queryParam string ) string {
val := r . URL . Query ( ) . Get ( queryParam )
if val == "" {
panic ( fmt . Sprintf ( "request to %s is missing required query param=%#v" , r . URL , queryParam ) )
}
return val
}
2022-10-03 05:39:52 +02:00
func getHishtoryVersion ( r * http . Request ) string {
return r . Header . Get ( "X-Hishtory-Version" )
}
2022-11-26 05:04:40 +01:00
func updateUsageData ( ctx context . Context , r * http . Request , userId , deviceId string , numEntriesHandled int , isQuery bool ) {
2022-04-16 20:37:43 +02:00
var usageData [ ] UsageData
2022-11-26 05:04:40 +01:00
GLOBAL_DB . WithContext ( ctx ) . Where ( "user_id = ? AND device_id = ?" , userId , deviceId ) . Find ( & usageData )
2022-11-27 03:59:32 +01:00
if len ( usageData ) == 0 {
GLOBAL_DB . WithContext ( ctx ) . Create ( & UsageData { UserId : userId , DeviceId : deviceId , LastUsed : time . Now ( ) , NumEntriesHandled : numEntriesHandled , Version : getHishtoryVersion ( r ) } )
} else {
usage := usageData [ 0 ]
GLOBAL_DB . WithContext ( ctx ) . Model ( & UsageData { } ) . Where ( "user_id = ? AND device_id = ?" , userId , deviceId ) . Update ( "last_used" , time . Now ( ) ) . Update ( "last_ip" , getRemoteAddr ( r ) )
if numEntriesHandled > 0 {
GLOBAL_DB . WithContext ( ctx ) . Exec ( "UPDATE usage_data SET num_entries_handled = COALESCE(num_entries_handled, 0) + ? WHERE user_id = ? AND device_id = ?" , numEntriesHandled , userId , deviceId )
2022-09-30 08:32:13 +02:00
}
2022-11-27 03:59:32 +01:00
if usage . Version != getHishtoryVersion ( r ) {
GLOBAL_DB . WithContext ( ctx ) . Exec ( "UPDATE usage_data SET version = ? WHERE user_id = ? AND device_id = ?" , getHishtoryVersion ( r ) , userId , deviceId )
2022-10-03 05:39:52 +02:00
}
2022-11-27 03:59:32 +01:00
}
if isQuery {
GLOBAL_DB . WithContext ( ctx ) . Exec ( "UPDATE usage_data SET num_queries = COALESCE(num_queries, 0) + 1, last_queried = ? WHERE user_id = ? AND device_id = ?" , time . Now ( ) , userId , deviceId )
2022-10-01 08:38:35 +02:00
}
2022-09-30 08:32:13 +02:00
}
2022-11-26 05:04:40 +01:00
func usageStatsHandler ( ctx context . Context , w http . ResponseWriter , r * http . Request ) {
2022-09-30 08:32:13 +02:00
query := `
SELECT
MIN ( devices . registration_date ) as registration_date ,
COUNT ( DISTINCT devices . device_id ) as num_devices ,
SUM ( usage_data . num_entries_handled ) as num_history_entries ,
2022-09-30 08:51:45 +02:00
MAX ( usage_data . last_used ) as last_active ,
2022-11-17 05:26:44 +01:00
COALESCE ( STRING_AGG ( DISTINCT usage_data . last_ip , ' , ' ) FILTER ( WHERE usage_data . last_ip != ' Unknown ' AND usage_data . last_ip != ' UnknownIp ' ) , ' Unknown ' ) as ip_addresses ,
2022-10-01 08:38:35 +02:00
COALESCE ( SUM ( usage_data . num_queries ) , 0 ) as num_queries ,
2022-10-03 05:39:52 +02:00
COALESCE ( MAX ( usage_data . last_queried ) , ' January 1 , 1970 ' ) as last_queried ,
2022-10-03 05:42:10 +02:00
STRING_AGG ( DISTINCT usage_data . version , ' , ' ) as versions
2022-09-30 08:32:13 +02:00
FROM devices
INNER JOIN usage_data ON devices . device_id = usage_data . device_id
GROUP BY devices . user_id
ORDER BY registration_date
`
2022-11-26 05:04:40 +01:00
rows , err := GLOBAL_DB . WithContext ( ctx ) . Raw ( query ) . Rows ( )
2022-09-30 08:32:13 +02:00
if err != nil {
panic ( err )
}
2022-11-27 07:15:16 +01:00
defer rows . Close ( )
2022-10-03 05:39:52 +02:00
tbl := table . New ( "Registration Date" , "Num Devices" , "Num Entries" , "Num Queries" , "Last Active" , "Last Query" , "Versions" , "IPs" )
2022-10-01 08:38:35 +02:00
tbl . WithWriter ( w )
2022-09-30 08:32:13 +02:00
for rows . Next ( ) {
var registrationDate time . Time
var numDevices int
var numEntries int
var lastUsedDate time . Time
2022-09-30 08:51:45 +02:00
var ipAddresses string
2022-10-01 08:38:35 +02:00
var numQueries int
var lastQueried time . Time
2022-10-03 05:39:52 +02:00
var versions string
err = rows . Scan ( & registrationDate , & numDevices , & numEntries , & lastUsedDate , & ipAddresses , & numQueries , & lastQueried , & versions )
2022-09-30 08:32:13 +02:00
if err != nil {
panic ( err )
}
2022-10-04 21:58:00 +02:00
versions = strings . ReplaceAll ( strings . ReplaceAll ( versions , "Unknown" , "" ) , ", " , "" )
lastQueryStr := strings . ReplaceAll ( lastQueried . Format ( "2006-01-02" ) , "1970-01-01" , "" )
tbl . AddRow ( registrationDate . Format ( "2006-01-02" ) , numDevices , numEntries , numQueries , lastUsedDate . Format ( "2006-01-02" ) , lastQueryStr , versions , ipAddresses )
2022-04-16 20:37:43 +02:00
}
2022-10-01 08:38:35 +02:00
tbl . Print ( )
2022-04-16 20:37:43 +02:00
}
2022-11-26 05:04:40 +01:00
func statsHandler ( ctx context . Context , w http . ResponseWriter , r * http . Request ) {
2022-11-01 01:32:27 +01:00
var numDevices int64 = 0
2022-11-26 05:04:40 +01:00
checkGormResult ( GLOBAL_DB . WithContext ( ctx ) . Model ( & shared . Device { } ) . Count ( & numDevices ) )
2022-11-01 01:32:27 +01:00
type numEntriesProcessed struct {
Total int
}
nep := numEntriesProcessed { }
2022-11-26 05:04:40 +01:00
checkGormResult ( GLOBAL_DB . WithContext ( ctx ) . Model ( & UsageData { } ) . Select ( "SUM(num_entries_handled) as total" ) . Find ( & nep ) )
2022-11-01 01:32:27 +01:00
var numDbEntries int64 = 0
2022-11-26 05:04:40 +01:00
checkGormResult ( GLOBAL_DB . WithContext ( ctx ) . Model ( & shared . EncHistoryEntry { } ) . Count ( & numDbEntries ) )
2022-11-01 01:32:27 +01:00
lastWeek := time . Now ( ) . AddDate ( 0 , 0 , - 7 )
var weeklyActiveInstalls int64 = 0
2022-11-26 05:04:40 +01:00
checkGormResult ( GLOBAL_DB . WithContext ( ctx ) . Model ( & UsageData { } ) . Where ( "last_used > ?" , lastWeek ) . Count ( & weeklyActiveInstalls ) )
2022-11-01 01:32:27 +01:00
var weeklyQueryUsers int64 = 0
2022-11-26 05:04:40 +01:00
checkGormResult ( GLOBAL_DB . WithContext ( ctx ) . Model ( & UsageData { } ) . Where ( "last_queried > ?" , lastWeek ) . Count ( & weeklyQueryUsers ) )
2022-11-19 18:41:40 +01:00
var lastRegistration string = ""
2022-11-27 07:15:16 +01:00
row := GLOBAL_DB . WithContext ( ctx ) . Raw ( "select to_char(max(registration_date), 'DD Month YYYY HH24:MI') from devices" ) . Row ( )
err := row . Scan ( & lastRegistration )
2022-11-19 18:41:40 +01:00
if err != nil {
panic ( err )
}
2022-11-01 01:32:27 +01:00
w . Write ( [ ] byte ( fmt . Sprintf ( "Num devices: %d\n" , numDevices ) ) )
w . Write ( [ ] byte ( fmt . Sprintf ( "Num history entries processed: %d\n" , nep . Total ) ) )
w . Write ( [ ] byte ( fmt . Sprintf ( "Num DB entries: %d\n" , numDbEntries ) ) )
w . Write ( [ ] byte ( fmt . Sprintf ( "Weekly active installs: %d\n" , weeklyActiveInstalls ) ) )
w . Write ( [ ] byte ( fmt . Sprintf ( "Weekly active queries: %d\n" , weeklyQueryUsers ) ) )
2022-11-19 18:41:40 +01:00
w . Write ( [ ] byte ( fmt . Sprintf ( "Last registration: %s\n" , lastRegistration ) ) )
2022-11-01 01:32:27 +01:00
}
2022-11-26 05:04:40 +01:00
func apiSubmitHandler ( ctx context . Context , w http . ResponseWriter , r * http . Request ) {
2022-04-06 08:31:24 +02:00
data , err := ioutil . ReadAll ( r . Body )
2022-03-30 06:56:28 +02:00
if err != nil {
panic ( err )
}
2022-10-18 23:52:35 +02:00
var entries [ ] * shared . EncHistoryEntry
2022-04-06 08:31:24 +02:00
err = json . Unmarshal ( data , & entries )
if err != nil {
panic ( fmt . Sprintf ( "body=%#v, err=%v" , data , err ) )
}
2022-04-28 19:56:59 +02:00
fmt . Printf ( "apiSubmitHandler: received request containg %d EncHistoryEntry\n" , len ( entries ) )
2022-10-18 23:52:35 +02:00
if len ( entries ) == 0 {
return
}
2022-11-26 05:04:40 +01:00
updateUsageData ( ctx , r , entries [ 0 ] . UserId , entries [ 0 ] . DeviceId , len ( entries ) , false )
tx := GLOBAL_DB . WithContext ( ctx ) . Where ( "user_id = ?" , entries [ 0 ] . UserId )
2022-10-18 23:52:35 +02:00
var devices [ ] * shared . Device
checkGormResult ( tx . Find ( & devices ) )
if len ( devices ) == 0 {
panic ( fmt . Errorf ( "found no devices associated with user_id=%s, can't save history entry" , entries [ 0 ] . UserId ) )
}
fmt . Printf ( "apiSubmitHandler: Found %d devices\n" , len ( devices ) )
2022-11-26 19:31:43 +01:00
err = GLOBAL_DB . WithContext ( ctx ) . Transaction ( func ( tx * gorm . DB ) error {
for _ , device := range devices {
for _ , entry := range entries {
entry . DeviceId = device . DeviceId
}
// Chunk the inserts to prevent the `extended protocol limited to 65535 parameters` error
for _ , entriesChunk := range shared . Chunks ( entries , 1000 ) {
checkGormResult ( tx . Create ( & entriesChunk ) )
}
2022-04-03 19:08:18 +02:00
}
2022-11-26 19:31:43 +01:00
return nil
} )
if err != nil {
panic ( fmt . Errorf ( "failed to execute transaction to add entries to DB: %v" , err ) )
2022-04-03 07:27:20 +02:00
}
2022-11-26 05:04:40 +01:00
GLOBAL_STATSD . Count ( "hishtory.submit" , int64 ( len ( devices ) ) , [ ] string { } , 1.0 )
2022-04-03 07:27:20 +02:00
}
2022-11-26 05:04:40 +01:00
func apiBootstrapHandler ( ctx context . Context , w http . ResponseWriter , r * http . Request ) {
2022-06-05 08:03:05 +02:00
userId := getRequiredQueryParam ( r , "user_id" )
deviceId := getRequiredQueryParam ( r , "device_id" )
2022-11-26 05:04:40 +01:00
updateUsageData ( ctx , r , userId , deviceId , 0 , false )
tx := GLOBAL_DB . WithContext ( ctx ) . Where ( "user_id = ?" , userId )
2022-04-28 20:05:56 +02:00
var historyEntries [ ] * shared . EncHistoryEntry
2022-10-03 04:41:00 +02:00
checkGormResult ( tx . Find ( & historyEntries ) )
2022-11-17 05:26:44 +01:00
fmt . Printf ( "apiBootstrapHandler: Found %d entries\n" , len ( historyEntries ) )
2022-04-28 20:05:56 +02:00
resp , err := json . Marshal ( historyEntries )
if err != nil {
panic ( err )
}
w . Write ( resp )
}
2022-11-26 05:04:40 +01:00
func apiQueryHandler ( ctx context . Context , w http . ResponseWriter , r * http . Request ) {
2022-06-05 08:03:05 +02:00
userId := getRequiredQueryParam ( r , "user_id" )
deviceId := getRequiredQueryParam ( r , "device_id" )
2022-11-26 05:04:40 +01:00
updateUsageData ( ctx , r , userId , deviceId , 0 , true )
2022-04-03 07:27:20 +02:00
2022-09-21 06:15:20 +02:00
// Delete any entries that match a pending deletion request
2022-09-21 06:28:49 +02:00
var deletionRequests [ ] * shared . DeletionRequest
2022-11-26 05:04:40 +01:00
checkGormResult ( GLOBAL_DB . WithContext ( ctx ) . Where ( "destination_device_id = ? AND user_id = ?" , deviceId , userId ) . Find ( & deletionRequests ) )
2022-09-21 06:28:49 +02:00
for _ , request := range deletionRequests {
2022-11-26 05:04:40 +01:00
_ , err := applyDeletionRequestsToBackend ( ctx , * request )
2022-09-21 06:28:49 +02:00
if err != nil {
panic ( err )
}
}
2022-09-21 06:15:20 +02:00
2022-11-26 21:10:18 +01:00
// Then retrieve
2022-11-26 05:04:40 +01:00
tx := GLOBAL_DB . WithContext ( ctx ) . Where ( "device_id = ? AND read_count < 5" , deviceId )
2022-04-03 07:27:20 +02:00
var historyEntries [ ] * shared . EncHistoryEntry
2022-10-03 04:41:00 +02:00
checkGormResult ( tx . Find ( & historyEntries ) )
2022-09-23 06:45:20 +02:00
fmt . Printf ( "apiQueryHandler: Found %d entries for %s\n" , len ( historyEntries ) , r . URL )
2022-04-03 07:27:20 +02:00
resp , err := json . Marshal ( historyEntries )
if err != nil {
panic ( err )
}
w . Write ( resp )
2022-11-26 21:10:18 +01:00
// And finally, kick off a background goroutine that will increment the read count. Doing it in the background avoids
// blocking the entire response. This does have a potential race condition, but that is fine.
if isProductionEnvironment ( ) {
go func ( ) {
span , ctx := tracer . StartSpanFromContext ( ctx , "apiQueryHandler.incrementReadCount" )
err = incrementReadCounts ( ctx , deviceId )
span . Finish ( tracer . WithError ( err ) )
} ( )
} else {
err = incrementReadCounts ( ctx , deviceId )
if err != nil {
panic ( fmt . Sprintf ( "failed to increment read counts" ) )
}
}
2022-11-26 05:04:40 +01:00
GLOBAL_STATSD . Incr ( "hishtory.query" , [ ] string { } , 1.0 )
2022-04-03 07:27:20 +02:00
}
2022-11-26 21:10:18 +01:00
func incrementReadCounts ( ctx context . Context , deviceId string ) error {
return GLOBAL_DB . WithContext ( ctx ) . Exec ( "UPDATE enc_history_entries SET read_count = read_count + 1 WHERE device_id = ?" , deviceId ) . Error
}
2022-09-30 08:51:45 +02:00
func getRemoteAddr ( r * http . Request ) string {
addr , ok := r . Header [ "X-Real-Ip" ]
if ! ok || len ( addr ) == 0 {
2022-11-17 05:26:44 +01:00
return "UnknownIp"
2022-09-30 08:51:45 +02:00
}
return addr [ 0 ]
}
2022-11-26 05:04:40 +01:00
func apiRegisterHandler ( ctx context . Context , w http . ResponseWriter , r * http . Request ) {
2022-06-05 08:03:05 +02:00
userId := getRequiredQueryParam ( r , "user_id" )
deviceId := getRequiredQueryParam ( r , "device_id" )
2022-05-23 04:45:46 +02:00
var existingDevicesCount int64 = - 1
2022-11-26 05:04:40 +01:00
checkGormResult ( GLOBAL_DB . WithContext ( ctx ) . Model ( & shared . Device { } ) . Where ( "user_id = ?" , userId ) . Count ( & existingDevicesCount ) )
2022-05-23 04:45:46 +02:00
fmt . Printf ( "apiRegisterHandler: existingDevicesCount=%d\n" , existingDevicesCount )
2022-11-26 05:04:40 +01:00
checkGormResult ( GLOBAL_DB . WithContext ( ctx ) . Create ( & shared . Device { UserId : userId , DeviceId : deviceId , RegistrationIp : getRemoteAddr ( r ) , RegistrationDate : time . Now ( ) } ) )
2022-05-23 04:45:46 +02:00
if existingDevicesCount > 0 {
2022-11-26 05:04:40 +01:00
checkGormResult ( GLOBAL_DB . WithContext ( ctx ) . Create ( & shared . DumpRequest { UserId : userId , RequestingDeviceId : deviceId , RequestTime : time . Now ( ) } ) )
2022-05-23 04:45:46 +02:00
}
2022-11-26 05:04:40 +01:00
updateUsageData ( ctx , r , userId , deviceId , 0 , false )
GLOBAL_STATSD . Incr ( "hishtory.register" , [ ] string { } , 1.0 )
2022-04-28 19:56:59 +02:00
}
2022-11-26 05:04:40 +01:00
func apiGetPendingDumpRequestsHandler ( ctx context . Context , w http . ResponseWriter , r * http . Request ) {
2022-06-05 08:03:05 +02:00
userId := getRequiredQueryParam ( r , "user_id" )
deviceId := getRequiredQueryParam ( r , "device_id" )
2022-04-28 20:46:14 +02:00
var dumpRequests [ ] * shared . DumpRequest
// Filter out ones requested by the hishtory instance that sent this request
2022-11-26 05:04:40 +01:00
checkGormResult ( GLOBAL_DB . WithContext ( ctx ) . Where ( "user_id = ? AND requesting_device_id != ?" , userId , deviceId ) . Find ( & dumpRequests ) )
2022-04-28 19:56:59 +02:00
respBody , err := json . Marshal ( dumpRequests )
2022-01-09 05:27:18 +01:00
if err != nil {
2022-04-28 19:56:59 +02:00
panic ( fmt . Errorf ( "failed to JSON marshall the dump requests: %v" , err ) )
2022-01-09 05:27:18 +01:00
}
2022-04-28 19:56:59 +02:00
w . Write ( respBody )
2022-04-03 07:27:20 +02:00
}
2022-11-26 05:04:40 +01:00
func apiSubmitDumpHandler ( ctx context . Context , w http . ResponseWriter , r * http . Request ) {
2022-06-05 08:03:05 +02:00
userId := getRequiredQueryParam ( r , "user_id" )
2022-09-30 08:32:13 +02:00
srcDeviceId := getRequiredQueryParam ( r , "source_device_id" )
2022-06-05 08:03:05 +02:00
requestingDeviceId := getRequiredQueryParam ( r , "requesting_device_id" )
2022-04-28 19:56:59 +02:00
data , err := ioutil . ReadAll ( r . Body )
if err != nil {
panic ( err )
}
var entries [ ] shared . EncHistoryEntry
err = json . Unmarshal ( data , & entries )
if err != nil {
panic ( fmt . Sprintf ( "body=%#v, err=%v" , data , err ) )
}
fmt . Printf ( "apiSubmitDumpHandler: received request containg %d EncHistoryEntry\n" , len ( entries ) )
2022-11-26 05:04:40 +01:00
err = GLOBAL_DB . WithContext ( ctx ) . Transaction ( func ( tx * gorm . DB ) error {
2022-04-28 19:56:59 +02:00
for _ , entry := range entries {
entry . DeviceId = requestingDeviceId
if entry . UserId != userId {
return fmt . Errorf ( "batch contains an entry with UserId=%#v, when the query param contained the user_id=%#v" , entry . UserId , userId )
}
2022-10-03 04:41:00 +02:00
checkGormResult ( tx . Create ( & entry ) )
2022-04-28 19:56:59 +02:00
}
return nil
} )
if err != nil {
panic ( fmt . Errorf ( "failed to execute transaction to add dumped DB: %v" , err ) )
}
2022-11-26 05:04:40 +01:00
checkGormResult ( GLOBAL_DB . WithContext ( ctx ) . Delete ( & shared . DumpRequest { } , "user_id = ? AND requesting_device_id = ?" , userId , requestingDeviceId ) )
updateUsageData ( ctx , r , userId , srcDeviceId , len ( entries ) , false )
2022-01-09 05:27:18 +01:00
}
2022-11-26 05:04:40 +01:00
func apiBannerHandler ( ctx context . Context , w http . ResponseWriter , r * http . Request ) {
2022-06-05 08:03:05 +02:00
commitHash := getRequiredQueryParam ( r , "commit_hash" )
deviceId := getRequiredQueryParam ( r , "device_id" )
2022-04-07 07:43:07 +02:00
forcedBanner := r . URL . Query ( ) . Get ( "forced_banner" )
fmt . Printf ( "apiBannerHandler: commit_hash=%#v, device_id=%#v, forced_banner=%#v\n" , commitHash , deviceId , forcedBanner )
2022-11-03 04:52:37 +01:00
if getHishtoryVersion ( r ) == "v0.160" {
w . Write ( [ ] byte ( "Warning: hiSHtory v0.160 has a bug that slows down your shell! Please run `hishtory update` to upgrade hiSHtory." ) )
return
}
2022-10-15 18:58:47 +02:00
w . Write ( [ ] byte ( html . EscapeString ( forcedBanner ) ) )
2022-04-07 07:43:07 +02:00
}
2022-11-26 05:04:40 +01:00
func getDeletionRequestsHandler ( ctx context . Context , w http . ResponseWriter , r * http . Request ) {
2022-09-20 07:49:48 +02:00
userId := getRequiredQueryParam ( r , "user_id" )
deviceId := getRequiredQueryParam ( r , "device_id" )
2022-09-21 06:13:59 +02:00
// Increment the ReadCount
2022-11-26 05:04:40 +01:00
checkGormResult ( GLOBAL_DB . WithContext ( ctx ) . Exec ( "UPDATE deletion_requests SET read_count = read_count + 1 WHERE destination_device_id = ? AND user_id = ?" , deviceId , userId ) )
2022-09-21 06:13:59 +02:00
// Return all the deletion requests
2022-09-20 07:49:48 +02:00
var deletionRequests [ ] * shared . DeletionRequest
2022-11-26 05:04:40 +01:00
checkGormResult ( GLOBAL_DB . WithContext ( ctx ) . Where ( "user_id = ? AND destination_device_id = ?" , userId , deviceId ) . Find ( & deletionRequests ) )
2022-09-20 07:49:48 +02:00
respBody , err := json . Marshal ( deletionRequests )
if err != nil {
panic ( fmt . Errorf ( "failed to JSON marshall the dump requests: %v" , err ) )
}
w . Write ( respBody )
}
2022-11-26 05:04:40 +01:00
func addDeletionRequestHandler ( ctx context . Context , w http . ResponseWriter , r * http . Request ) {
2022-09-20 07:49:48 +02:00
data , err := ioutil . ReadAll ( r . Body )
if err != nil {
panic ( err )
}
var request shared . DeletionRequest
err = json . Unmarshal ( data , & request )
if err != nil {
panic ( fmt . Sprintf ( "body=%#v, err=%v" , data , err ) )
}
2022-09-22 06:46:46 +02:00
request . ReadCount = 0
2022-09-20 07:49:48 +02:00
fmt . Printf ( "addDeletionRequestHandler: received request containg %d messages to be deleted\n" , len ( request . Messages . Ids ) )
// Store the deletion request so all the devices will get it
2022-11-26 05:04:40 +01:00
tx := GLOBAL_DB . WithContext ( ctx ) . Where ( "user_id = ?" , request . UserId )
2022-09-20 07:49:48 +02:00
var devices [ ] * shared . Device
2022-10-03 04:41:00 +02:00
checkGormResult ( tx . Find ( & devices ) )
2022-09-20 07:49:48 +02:00
if len ( devices ) == 0 {
panic ( fmt . Errorf ( "found no devices associated with user_id=%s, can't save history entry" , request . UserId ) )
}
fmt . Printf ( "addDeletionRequestHandler: Found %d devices\n" , len ( devices ) )
for _ , device := range devices {
request . DestinationDeviceId = device . DeviceId
2022-11-26 05:04:40 +01:00
checkGormResult ( GLOBAL_DB . WithContext ( ctx ) . Create ( & request ) )
2022-09-20 07:49:48 +02:00
}
// Also delete anything currently in the DB matching it
2022-11-26 05:04:40 +01:00
numDeleted , err := applyDeletionRequestsToBackend ( ctx , request )
2022-09-21 06:28:49 +02:00
if err != nil {
panic ( err )
}
fmt . Printf ( "addDeletionRequestHandler: Deleted %d rows in the backend\n" , numDeleted )
}
2022-11-26 05:04:40 +01:00
func healthCheckHandler ( ctx context . Context , w http . ResponseWriter , r * http . Request ) {
if isProductionEnvironment ( ) {
2022-11-18 08:43:25 +01:00
// Check that we have a reasonable looking set of devices/entries in the DB
2022-11-26 05:35:17 +01:00
rows , err := GLOBAL_DB . Raw ( "SELECT true FROM enc_history_entries LIMIT 1 OFFSET 1000" ) . Rows ( )
if err != nil {
panic ( fmt . Sprintf ( "failed to count entries in DB: %v" , err ) )
}
2022-11-27 07:15:16 +01:00
defer rows . Close ( )
2022-11-26 05:35:17 +01:00
if ! rows . Next ( ) {
2022-11-17 05:58:19 +01:00
panic ( "Suspiciously few enc history entries!" )
}
2022-11-26 05:35:17 +01:00
var count int64
2022-11-26 05:04:40 +01:00
checkGormResult ( GLOBAL_DB . WithContext ( ctx ) . Model ( & shared . Device { } ) . Count ( & count ) )
2022-11-18 08:43:25 +01:00
if count < 100 {
2022-11-17 05:58:19 +01:00
panic ( "Suspiciously few devices!" )
}
2022-11-18 08:43:25 +01:00
// Check that we can write to the DB. This entry will get written and then eventually cleaned by the cron.
2022-11-26 05:04:40 +01:00
checkGormResult ( GLOBAL_DB . WithContext ( ctx ) . Create ( & shared . EncHistoryEntry {
2022-11-18 08:43:25 +01:00
EncryptedData : [ ] byte ( "data" ) ,
Nonce : [ ] byte ( "nonce" ) ,
DeviceId : "healthcheck_device_id" ,
UserId : "healthcheck_user_id" ,
Date : time . Now ( ) ,
EncryptedId : "healthcheck_enc_id" ,
ReadCount : 10000 ,
} ) )
2022-11-27 03:33:54 +01:00
} else {
db , err := GLOBAL_DB . DB ( )
if err != nil {
panic ( fmt . Sprintf ( "failed to get DB: %v" , err ) )
}
err = db . Ping ( )
if err != nil {
panic ( fmt . Sprintf ( "failed to ping DB: %v" , err ) )
}
2022-10-24 02:54:07 +02:00
}
ok := "OK"
w . Write ( [ ] byte ( ok ) )
}
2022-11-26 05:04:40 +01:00
func applyDeletionRequestsToBackend ( ctx context . Context , request shared . DeletionRequest ) ( int , error ) {
tx := GLOBAL_DB . WithContext ( ctx ) . Where ( "false" )
2022-09-20 07:49:48 +02:00
for _ , message := range request . Messages . Ids {
2022-11-26 05:04:40 +01:00
tx = tx . Or ( GLOBAL_DB . WithContext ( ctx ) . Where ( "user_id = ? AND device_id = ? AND date = ?" , request . UserId , message . DeviceId , message . Date ) )
2022-09-21 06:36:02 +02:00
}
result := tx . Delete ( & shared . EncHistoryEntry { } )
2022-10-03 04:41:00 +02:00
checkGormResult ( result )
2022-09-21 06:36:02 +02:00
return int ( result . RowsAffected ) , nil
2022-09-20 07:49:48 +02:00
}
2022-11-26 05:04:40 +01:00
func wipeDbHandler ( ctx context . Context , w http . ResponseWriter , r * http . Request ) {
2022-11-27 07:40:43 +01:00
if r . Host == "api.hishtory.dev" || isProductionEnvironment ( ) {
2022-11-26 05:04:40 +01:00
panic ( "refusing to wipe the DB for prod" )
}
checkGormResult ( GLOBAL_DB . WithContext ( ctx ) . Exec ( "DELETE FROM enc_history_entries" ) )
2022-05-02 04:37:26 +02:00
}
2022-11-27 07:40:43 +01:00
func getNumConnectionsHandler ( ctx context . Context , w http . ResponseWriter , r * http . Request ) {
sqlDb , err := GLOBAL_DB . DB ( )
if err != nil {
panic ( err )
}
w . Write ( [ ] byte ( fmt . Sprintf ( "%#v" , sqlDb . Stats ( ) . OpenConnections ) ) )
}
2022-04-08 05:59:40 +02:00
func isTestEnvironment ( ) bool {
2022-11-17 05:54:47 +01:00
return os . Getenv ( "HISHTORY_TEST" ) != ""
2022-04-08 05:59:40 +02:00
}
2022-11-26 05:04:40 +01:00
func isProductionEnvironment ( ) bool {
return os . Getenv ( "HISHTORY_ENV" ) == "prod"
2022-11-17 05:58:19 +01:00
}
2022-03-30 06:56:28 +02:00
func OpenDB ( ) ( * gorm . DB , error ) {
2022-04-08 05:59:40 +02:00
if isTestEnvironment ( ) {
2022-04-06 08:31:24 +02:00
db , err := gorm . Open ( sqlite . Open ( "file::memory:?cache=shared" ) , & gorm . Config { } )
if err != nil {
return nil , fmt . Errorf ( "failed to connect to the DB: %v" , err )
}
db . AutoMigrate ( & shared . EncHistoryEntry { } )
db . AutoMigrate ( & shared . Device { } )
2022-04-16 20:37:43 +02:00
db . AutoMigrate ( & UsageData { } )
2022-04-28 20:46:14 +02:00
db . AutoMigrate ( & shared . DumpRequest { } )
2022-09-20 07:49:48 +02:00
db . AutoMigrate ( & shared . DeletionRequest { } )
2022-11-17 06:54:05 +01:00
db . AutoMigrate ( & shared . Feedback { } )
2022-04-28 19:56:59 +02:00
db . Exec ( "PRAGMA journal_mode = WAL" )
2022-04-06 08:31:24 +02:00
return db , nil
2022-03-30 06:56:28 +02:00
}
2022-11-26 05:04:40 +01:00
// The same as the default logger, except with a higher SlowThreshold
customLogger := logger . New ( log . New ( os . Stdout , "\r\n" , log . LstdFlags ) , logger . Config {
SlowThreshold : 1000 * time . Millisecond ,
LogLevel : logger . Warn ,
IgnoreRecordNotFoundError : false ,
Colorful : true ,
} )
2022-11-14 17:35:05 +01:00
var sqliteDb string
if os . Getenv ( "HISHTORY_SQLITE_DB" ) != "" {
sqliteDb = os . Getenv ( "HISHTORY_SQLITE_DB" )
}
var db * gorm . DB
var err error
if sqliteDb != "" {
2022-11-26 05:04:40 +01:00
db , err = gorm . Open ( sqlite . Open ( sqliteDb ) , & gorm . Config { Logger : customLogger } )
2022-11-14 17:35:05 +01:00
} else {
postgresDb := fmt . Sprintf ( PostgresDb , os . Getenv ( "POSTGRESQL_PASSWORD" ) )
if os . Getenv ( "HISHTORY_POSTGRES_DB" ) != "" {
postgresDb = os . Getenv ( "HISHTORY_POSTGRES_DB" )
}
2022-11-26 05:04:40 +01:00
sqltrace . Register ( "pgx" , & stdlib . Driver { } , sqltrace . WithServiceName ( "hishtory-api" ) )
sqlDb , err := sqltrace . Open ( "pgx" , postgresDb )
if err != nil {
log . Fatal ( err )
}
db , err = gormtrace . Open ( postgres . New ( postgres . Config { Conn : sqlDb } ) , & gorm . Config { Logger : customLogger } )
2022-10-15 18:21:10 +02:00
}
2022-03-30 06:56:28 +02:00
if err != nil {
return nil , fmt . Errorf ( "failed to connect to the DB: %v" , err )
}
2022-11-14 17:35:05 +01:00
2022-04-03 07:27:20 +02:00
db . AutoMigrate ( & shared . EncHistoryEntry { } )
db . AutoMigrate ( & shared . Device { } )
2022-04-16 20:37:43 +02:00
db . AutoMigrate ( & UsageData { } )
2022-04-28 20:46:14 +02:00
db . AutoMigrate ( & shared . DumpRequest { } )
2022-09-20 07:49:48 +02:00
db . AutoMigrate ( & shared . DeletionRequest { } )
2022-11-20 09:12:41 +01:00
db . AutoMigrate ( & shared . Feedback { } )
2022-03-30 06:56:28 +02:00
return db , nil
}
2022-04-03 07:27:20 +02:00
func init ( ) {
2022-04-09 08:47:13 +02:00
if ReleaseVersion == "UNKNOWN" && ! isTestEnvironment ( ) {
2022-04-09 07:37:03 +02:00
panic ( "server.go was built without a ReleaseVersion!" )
}
2022-04-03 07:54:09 +02:00
InitDB ( )
2022-11-26 05:04:40 +01:00
go runBackgroundJobs ( context . Background ( ) )
2022-04-03 07:54:09 +02:00
}
2022-11-26 05:04:40 +01:00
func cron ( ctx context . Context ) error {
2022-04-17 01:28:53 +02:00
err := updateReleaseVersion ( )
if err != nil {
fmt . Println ( err )
}
2022-11-26 05:04:40 +01:00
err = cleanDatabase ( ctx )
2022-04-17 01:28:53 +02:00
if err != nil {
fmt . Println ( err )
}
return nil
}
2022-11-26 05:04:40 +01:00
func runBackgroundJobs ( ctx context . Context ) {
2022-04-28 19:56:59 +02:00
time . Sleep ( 5 * time . Second )
2022-04-09 08:47:13 +02:00
for {
2022-11-26 05:04:40 +01:00
err := cron ( ctx )
2022-04-09 21:57:58 +02:00
if err != nil {
2022-04-17 01:28:53 +02:00
fmt . Printf ( "Cron failure: %v" , err )
2022-04-16 20:37:43 +02:00
}
2022-04-09 08:47:13 +02:00
time . Sleep ( 10 * time . Minute )
}
}
2022-11-26 05:04:40 +01:00
func triggerCronHandler ( ctx context . Context , w http . ResponseWriter , r * http . Request ) {
err := cron ( ctx )
2022-04-17 01:28:53 +02:00
if err != nil {
panic ( err )
}
}
2022-04-09 08:47:13 +02:00
type releaseInfo struct {
Name string ` json:"name" `
}
2022-04-09 21:57:58 +02:00
func updateReleaseVersion ( ) error {
2022-04-09 08:47:13 +02:00
resp , err := http . Get ( "https://api.github.com/repos/ddworken/hishtory/releases/latest" )
if err != nil {
2022-04-09 22:02:30 +02:00
return fmt . Errorf ( "failed to get latest release version: %v" , err )
2022-04-09 08:47:13 +02:00
}
respBody , err := ioutil . ReadAll ( resp . Body )
if err != nil {
2022-04-09 22:02:30 +02:00
return fmt . Errorf ( "failed to read github API response body: %v" , err )
2022-04-09 08:47:13 +02:00
}
2022-04-09 21:50:01 +02:00
if resp . StatusCode == 403 && strings . Contains ( string ( respBody ) , "API rate limit exceeded for " ) {
2022-04-09 21:57:58 +02:00
return nil
2022-04-09 21:50:01 +02:00
}
if resp . StatusCode != 200 {
2022-04-09 22:02:30 +02:00
return fmt . Errorf ( "failed to call github API, status_code=%d, body=%#v" , resp . StatusCode , string ( respBody ) )
2022-04-09 21:50:01 +02:00
}
2022-04-09 08:47:13 +02:00
var info releaseInfo
err = json . Unmarshal ( respBody , & info )
if err != nil {
2022-04-09 21:57:58 +02:00
return fmt . Errorf ( "failed to parse github API response: %v" , err )
2022-04-09 08:47:13 +02:00
}
2022-04-22 07:02:28 +02:00
latestVersionTag := info . Name
ReleaseVersion = decrementVersionIfInvalid ( latestVersionTag )
return nil
}
func decrementVersionIfInvalid ( initialVersion string ) string {
// Decrements the version up to 5 times if the version doesn't have valid binaries yet.
version := initialVersion
for i := 0 ; i < 5 ; i ++ {
updateInfo := buildUpdateInfo ( version )
err := assertValidUpdate ( updateInfo )
if err == nil {
fmt . Printf ( "Found a valid version: %v\n" , version )
return version
}
fmt . Printf ( "Found %s to be an invalid version: %v\n" , version , err )
version , err = decrementVersion ( version )
if err != nil {
fmt . Printf ( "Failed to decrement version after finding the latest version was invalid: %v\n" , err )
return initialVersion
}
}
fmt . Printf ( "Decremented the version 5 times and failed to find a valid version version number, initial version number: %v, last checked version number: %v\n" , initialVersion , version )
return initialVersion
}
func assertValidUpdate ( updateInfo shared . UpdateInfo ) error {
2022-05-27 08:45:08 +02:00
urls := [ ] string { updateInfo . LinuxAmd64Url , updateInfo . LinuxAmd64AttestationUrl ,
updateInfo . DarwinAmd64Url , updateInfo . DarwinAmd64UnsignedUrl , updateInfo . DarwinAmd64AttestationUrl ,
updateInfo . DarwinArm64Url , updateInfo . DarwinArm64UnsignedUrl , updateInfo . DarwinArm64AttestationUrl }
2022-04-22 07:02:28 +02:00
for _ , url := range urls {
resp , err := http . Get ( url )
if err != nil {
return fmt . Errorf ( "failed to retrieve URL %#v: %v" , url , err )
}
if resp . StatusCode == 404 {
return fmt . Errorf ( "URL %#v returned 404" , url )
}
}
2022-04-09 21:57:58 +02:00
return nil
2022-04-09 08:47:13 +02:00
}
2022-04-03 07:54:09 +02:00
func InitDB ( ) {
2022-04-03 07:27:20 +02:00
var err error
GLOBAL_DB , err = OpenDB ( )
2022-01-09 05:27:18 +01:00
if err != nil {
panic ( err )
}
2022-11-27 07:16:11 +01:00
sqlDb , err := GLOBAL_DB . DB ( )
2022-04-03 07:27:20 +02:00
if err != nil {
panic ( err )
}
2022-11-27 07:16:11 +01:00
err = sqlDb . Ping ( )
2022-04-03 07:27:20 +02:00
if err != nil {
panic ( err )
}
2022-11-27 03:33:54 +01:00
if isProductionEnvironment ( ) {
2022-11-27 07:16:11 +01:00
sqlDb . SetMaxIdleConns ( 10 )
2022-11-27 03:33:54 +01:00
}
2022-11-27 07:40:43 +01:00
if isTestEnvironment ( ) {
sqlDb . SetMaxIdleConns ( 1 )
}
2022-04-03 07:27:20 +02:00
}
2022-04-22 07:02:28 +02:00
func decrementVersion ( version string ) ( string , error ) {
if version == "UNKNOWN" {
return "" , fmt . Errorf ( "cannot decrement UNKNOWN" )
}
parts := strings . Split ( version , "." )
if len ( parts ) != 2 {
return "" , fmt . Errorf ( "invalid version: %s" , version )
2022-04-17 01:34:09 +02:00
}
2022-04-22 07:02:28 +02:00
versionNumber , err := strconv . Atoi ( parts [ 1 ] )
if err != nil {
return "" , fmt . Errorf ( "invalid version: %s" , version )
}
return parts [ 0 ] + "." + strconv . Itoa ( versionNumber - 1 ) , nil
}
func buildUpdateInfo ( version string ) shared . UpdateInfo {
return shared . UpdateInfo {
2022-06-05 05:36:34 +02:00
LinuxAmd64Url : fmt . Sprintf ( "https://github.com/ddworken/hishtory/releases/download/%s/hishtory-linux-amd64" , version ) ,
LinuxAmd64AttestationUrl : fmt . Sprintf ( "https://github.com/ddworken/hishtory/releases/download/%s/hishtory-linux-amd64.intoto.jsonl" , version ) ,
DarwinAmd64Url : fmt . Sprintf ( "https://github.com/ddworken/hishtory/releases/download/%s/hishtory-darwin-amd64" , version ) ,
DarwinAmd64UnsignedUrl : fmt . Sprintf ( "https://github.com/ddworken/hishtory/releases/download/%s/hishtory-darwin-amd64-unsigned" , version ) ,
DarwinAmd64AttestationUrl : fmt . Sprintf ( "https://github.com/ddworken/hishtory/releases/download/%s/hishtory-darwin-amd64.intoto.jsonl" , version ) ,
DarwinArm64Url : fmt . Sprintf ( "https://github.com/ddworken/hishtory/releases/download/%s/hishtory-darwin-arm64" , version ) ,
DarwinArm64UnsignedUrl : fmt . Sprintf ( "https://github.com/ddworken/hishtory/releases/download/%s/hishtory-darwin-arm64-unsigned" , version ) ,
DarwinArm64AttestationUrl : fmt . Sprintf ( "https://github.com/ddworken/hishtory/releases/download/%s/hishtory-darwin-arm64.intoto.jsonl" , version ) ,
2022-04-22 07:02:28 +02:00
Version : version ,
}
}
2022-11-26 05:04:40 +01:00
func apiDownloadHandler ( ctx context . Context , w http . ResponseWriter , r * http . Request ) {
2022-04-22 07:02:28 +02:00
updateInfo := buildUpdateInfo ( ReleaseVersion )
2022-04-17 01:34:09 +02:00
resp , err := json . Marshal ( updateInfo )
if err != nil {
panic ( err )
}
w . Write ( resp )
}
2022-11-26 05:04:40 +01:00
func slsaStatusHandler ( ctx context . Context , w http . ResponseWriter , r * http . Request ) {
2022-11-01 01:32:27 +01:00
// returns "OK" unless there is a current SLSA bug
2022-11-01 18:51:08 +01:00
v := getHishtoryVersion ( r )
if ! strings . Contains ( v , "v0." ) {
w . Write ( [ ] byte ( "OK" ) )
return
}
vNum , err := strconv . Atoi ( strings . Split ( v , "." ) [ 1 ] )
if err != nil {
w . Write ( [ ] byte ( "OK" ) )
return
}
2022-11-01 19:16:23 +01:00
if vNum < 159 {
2022-11-01 18:51:08 +01:00
w . Write ( [ ] byte ( "Sigstore deployed a broken change. See https://github.com/slsa-framework/slsa-github-generator/issues/1163" ) )
return
}
w . Write ( [ ] byte ( "OK" ) )
2022-11-01 01:32:27 +01:00
}
2022-11-26 05:04:40 +01:00
func feedbackHandler ( ctx context . Context , w http . ResponseWriter , r * http . Request ) {
2022-11-17 06:42:07 +01:00
data , err := ioutil . ReadAll ( r . Body )
if err != nil {
panic ( err )
}
2022-11-17 06:54:05 +01:00
var feedback shared . Feedback
2022-11-17 06:42:07 +01:00
err = json . Unmarshal ( data , & feedback )
if err != nil {
panic ( fmt . Sprintf ( "feedbackHandler: body=%#v, err=%v" , data , err ) )
}
fmt . Printf ( "feedbackHandler: received request containg feedback %#v\n" , feedback )
2022-11-26 05:04:40 +01:00
checkGormResult ( GLOBAL_DB . WithContext ( ctx ) . Create ( feedback ) )
GLOBAL_STATSD . Incr ( "hishtory.uninstall" , [ ] string { } , 1.0 )
2022-11-17 06:42:07 +01:00
}
2022-04-16 09:44:47 +02:00
type loggedResponseData struct {
size int
}
type loggingResponseWriter struct {
http . ResponseWriter
responseData * loggedResponseData
}
func ( r * loggingResponseWriter ) Write ( b [ ] byte ) ( int , error ) {
size , err := r . ResponseWriter . Write ( b )
r . responseData . size += size
return size , err
}
func ( r * loggingResponseWriter ) WriteHeader ( statusCode int ) {
r . ResponseWriter . WriteHeader ( statusCode )
}
2022-11-26 05:04:40 +01:00
func getFunctionName ( temp interface { } ) string {
strs := strings . Split ( ( runtime . FuncForPC ( reflect . ValueOf ( temp ) . Pointer ( ) ) . Name ( ) ) , "." )
return strs [ len ( strs ) - 1 ]
}
func withLogging ( h func ( context . Context , http . ResponseWriter , * http . Request ) ) http . Handler {
2022-04-16 08:19:39 +02:00
logFn := func ( rw http . ResponseWriter , r * http . Request ) {
2022-04-16 09:44:47 +02:00
var responseData loggedResponseData
lrw := loggingResponseWriter {
ResponseWriter : rw ,
responseData : & responseData ,
}
2022-04-16 08:19:39 +02:00
start := time . Now ( )
2022-11-26 05:04:40 +01:00
span , ctx := tracer . StartSpanFromContext (
context . Background ( ) ,
getFunctionName ( h ) ,
tracer . SpanType ( ext . SpanTypeSQL ) ,
tracer . ServiceName ( "hishtory-api" ) ,
)
defer span . Finish ( )
2022-04-16 08:19:39 +02:00
2022-11-26 05:04:40 +01:00
h ( ctx , & lrw , r )
2022-04-16 08:19:39 +02:00
duration := time . Since ( start )
2022-10-18 23:35:03 +02:00
fmt . Printf ( "%s %s %#v %s %s %s\n" , getRemoteAddr ( r ) , r . Method , r . RequestURI , getHishtoryVersion ( r ) , duration . String ( ) , byteCountToString ( responseData . size ) )
2022-11-26 05:04:40 +01:00
GLOBAL_STATSD . Distribution ( "hishtory.request_duration" , float64 ( duration . Microseconds ( ) ) / 1_000 , [ ] string { "HANDLER=" + getFunctionName ( h ) } , 1.0 )
GLOBAL_STATSD . Incr ( "hishtory.request" , [ ] string { } , 1.0 )
2022-04-16 08:19:39 +02:00
}
return http . HandlerFunc ( logFn )
}
2022-04-16 09:44:47 +02:00
func byteCountToString ( b int ) string {
const unit = 1000
if b < unit {
return fmt . Sprintf ( "%d B" , b )
}
div , exp := int64 ( unit ) , 0
for n := b / unit ; n >= unit ; n /= unit {
div *= unit
exp ++
}
return fmt . Sprintf ( "%.1f %cB" , float64 ( b ) / float64 ( div ) , "kMG" [ exp ] )
}
2022-11-26 05:04:40 +01:00
func cleanDatabase ( ctx context . Context ) error {
checkGormResult ( GLOBAL_DB . WithContext ( ctx ) . Exec ( "DELETE FROM enc_history_entries WHERE read_count > 10" ) )
checkGormResult ( GLOBAL_DB . WithContext ( ctx ) . Exec ( "DELETE FROM deletion_requests WHERE read_count > 100" ) )
2022-09-23 03:22:06 +02:00
// TODO(optimization): Clean the database by deleting entries for users that haven't been used in X amount of time
2022-04-16 20:37:43 +02:00
return nil
}
2022-04-03 07:27:20 +02:00
func main ( ) {
2022-11-26 05:04:40 +01:00
if isProductionEnvironment ( ) {
err := profiler . Start (
profiler . WithService ( "hishtory-api" ) ,
profiler . WithVersion ( ReleaseVersion ) ,
profiler . WithAPIKey ( os . Getenv ( "DD_API_KEY" ) ) ,
profiler . WithUDS ( "/var/run/datadog/apm.socket" ) ,
profiler . WithProfileTypes (
profiler . CPUProfile ,
profiler . HeapProfile ,
) ,
)
if err != nil {
fmt . Printf ( "Failed to start DataDog profiler: %v\n" , err )
}
defer profiler . Stop ( )
tracer . Start (
tracer . WithRuntimeMetrics ( ) ,
tracer . WithService ( "hishtory-api" ) ,
tracer . WithUDS ( "/var/run/datadog/apm.socket" ) ,
)
defer tracer . Stop ( )
ddStats , err := statsd . New ( "unix:///var/run/datadog/dsd.socket" )
if err != nil {
fmt . Printf ( "Failed to start DataDog statsd: %v\n" , err )
}
GLOBAL_STATSD = ddStats
}
mux := httptrace . NewServeMux ( )
mux . Handle ( "/api/v1/submit" , withLogging ( apiSubmitHandler ) )
mux . Handle ( "/api/v1/get-dump-requests" , withLogging ( apiGetPendingDumpRequestsHandler ) )
mux . Handle ( "/api/v1/submit-dump" , withLogging ( apiSubmitDumpHandler ) )
mux . Handle ( "/api/v1/query" , withLogging ( apiQueryHandler ) )
mux . Handle ( "/api/v1/bootstrap" , withLogging ( apiBootstrapHandler ) )
mux . Handle ( "/api/v1/register" , withLogging ( apiRegisterHandler ) )
mux . Handle ( "/api/v1/banner" , withLogging ( apiBannerHandler ) )
mux . Handle ( "/api/v1/download" , withLogging ( apiDownloadHandler ) )
mux . Handle ( "/api/v1/trigger-cron" , withLogging ( triggerCronHandler ) )
mux . Handle ( "/api/v1/get-deletion-requests" , withLogging ( getDeletionRequestsHandler ) )
mux . Handle ( "/api/v1/add-deletion-request" , withLogging ( addDeletionRequestHandler ) )
mux . Handle ( "/api/v1/slsa-status" , withLogging ( slsaStatusHandler ) )
mux . Handle ( "/api/v1/feedback" , withLogging ( feedbackHandler ) )
mux . Handle ( "/healthcheck" , withLogging ( healthCheckHandler ) )
mux . Handle ( "/internal/api/v1/usage-stats" , withLogging ( usageStatsHandler ) )
mux . Handle ( "/internal/api/v1/stats" , withLogging ( statsHandler ) )
2022-05-02 04:37:26 +02:00
if isTestEnvironment ( ) {
2022-11-26 05:04:40 +01:00
mux . Handle ( "/api/v1/wipe-db" , withLogging ( wipeDbHandler ) )
2022-11-27 07:40:43 +01:00
mux . Handle ( "/api/v1/get-num-connections" , withLogging ( getNumConnectionsHandler ) )
2022-05-02 04:37:26 +02:00
}
2022-11-26 05:04:40 +01:00
fmt . Println ( "Listening on localhost:8080" )
log . Fatal ( http . ListenAndServe ( ":8080" , mux ) )
2022-01-09 05:27:18 +01:00
}
2022-09-22 04:59:31 +02:00
2022-10-03 04:41:00 +02:00
func checkGormResult ( result * gorm . DB ) {
if result . Error != nil {
_ , filename , line , _ := runtime . Caller ( 1 )
panic ( fmt . Sprintf ( "DB error at %s:%d: %v" , filename , line , result . Error ) )
}
}
2022-09-23 03:22:06 +02:00
// TODO(optimization): Maybe optimize the endpoints a bit to reduce the number of round trips required?