2021-09-11 00:00:04 +02:00
|
|
|
package sql
|
2021-07-12 06:56:30 +02:00
|
|
|
|
|
|
|
import (
|
|
|
|
"database/sql"
|
|
|
|
"errors"
|
2021-07-16 04:07:30 +02:00
|
|
|
"fmt"
|
2021-07-12 06:56:30 +02:00
|
|
|
"log"
|
2021-07-12 10:25:25 +02:00
|
|
|
"strings"
|
|
|
|
"time"
|
2021-07-12 06:56:30 +02:00
|
|
|
|
|
|
|
"github.com/TwinProduction/gatus/core"
|
2021-08-13 03:54:23 +02:00
|
|
|
"github.com/TwinProduction/gatus/storage/store/common"
|
|
|
|
"github.com/TwinProduction/gatus/storage/store/common/paging"
|
2021-07-12 06:56:30 +02:00
|
|
|
"github.com/TwinProduction/gatus/util"
|
2021-09-11 00:00:04 +02:00
|
|
|
_ "github.com/lib/pq"
|
2021-07-12 06:56:30 +02:00
|
|
|
_ "modernc.org/sqlite"
|
|
|
|
)
|
|
|
|
|
2021-07-14 04:59:43 +02:00
|
|
|
//////////////////////////////////////////////////////////////////////////////////////////////////
|
|
|
|
// Note that only exported functions in this file may create, commit, or rollback a transaction //
|
|
|
|
//////////////////////////////////////////////////////////////////////////////////////////////////
|
|
|
|
|
2021-07-12 10:25:25 +02:00
|
|
|
const (
|
2021-08-07 17:46:58 +02:00
|
|
|
// arraySeparator is the separator used to separate multiple strings in a single column.
|
|
|
|
// It's a dirty hack, but it's only used for persisting errors, and since this data will likely only ever be used
|
|
|
|
// for aesthetic purposes, I deemed it wasn't worth the performance impact of yet another one-to-many table.
|
2021-07-12 10:25:25 +02:00
|
|
|
arraySeparator = "|~|"
|
2021-07-14 07:40:27 +02:00
|
|
|
|
2021-08-13 03:54:23 +02:00
|
|
|
uptimeCleanUpThreshold = 10 * 24 * time.Hour // Maximum uptime age before triggering a clean up
|
|
|
|
eventsCleanUpThreshold = common.MaximumNumberOfEvents + 10 // Maximum number of events before triggering a clean up
|
|
|
|
resultsCleanUpThreshold = common.MaximumNumberOfResults + 10 // Maximum number of results before triggering a clean up
|
2021-07-14 07:40:27 +02:00
|
|
|
|
|
|
|
uptimeRetention = 7 * 24 * time.Hour
|
2021-07-12 10:25:25 +02:00
|
|
|
)
|
|
|
|
|
2021-07-12 06:56:30 +02:00
|
|
|
var (
|
|
|
|
// ErrFilePathNotSpecified is the error returned when path parameter passed in NewStore is blank
|
|
|
|
ErrFilePathNotSpecified = errors.New("file path cannot be empty")
|
|
|
|
|
|
|
|
// ErrDatabaseDriverNotSpecified is the error returned when the driver parameter passed in NewStore is blank
|
|
|
|
ErrDatabaseDriverNotSpecified = errors.New("database driver cannot be empty")
|
|
|
|
|
2021-08-13 03:54:23 +02:00
|
|
|
errNoRowsReturned = errors.New("expected a row to be returned, but none was")
|
2021-07-12 06:56:30 +02:00
|
|
|
)
|
|
|
|
|
|
|
|
// Store that leverages a database
|
|
|
|
type Store struct {
|
|
|
|
driver, file string
|
|
|
|
|
|
|
|
db *sql.DB
|
|
|
|
}
|
|
|
|
|
|
|
|
// NewStore initializes the database and creates the schema if it doesn't already exist in the file specified
|
|
|
|
func NewStore(driver, path string) (*Store, error) {
|
|
|
|
if len(driver) == 0 {
|
|
|
|
return nil, ErrDatabaseDriverNotSpecified
|
|
|
|
}
|
|
|
|
if len(path) == 0 {
|
|
|
|
return nil, ErrFilePathNotSpecified
|
|
|
|
}
|
|
|
|
store := &Store{driver: driver, file: path}
|
|
|
|
var err error
|
|
|
|
if store.db, err = sql.Open(driver, path); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2021-07-13 04:53:14 +02:00
|
|
|
if driver == "sqlite" {
|
|
|
|
_, _ = store.db.Exec("PRAGMA foreign_keys=ON")
|
|
|
|
_, _ = store.db.Exec("PRAGMA journal_mode=WAL")
|
2021-07-17 02:17:02 +02:00
|
|
|
_, _ = store.db.Exec("PRAGMA synchronous=NORMAL")
|
2021-07-13 04:53:14 +02:00
|
|
|
// Prevents driver from running into "database is locked" errors
|
|
|
|
// This is because we're using WAL to improve performance
|
|
|
|
store.db.SetMaxOpenConns(1)
|
|
|
|
}
|
2021-07-12 06:56:30 +02:00
|
|
|
if err = store.createSchema(); err != nil {
|
|
|
|
_ = store.db.Close()
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
return store, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// createSchema creates the schema required to perform all database operations.
|
|
|
|
func (s *Store) createSchema() error {
|
2021-09-11 00:00:04 +02:00
|
|
|
if s.driver == "sqlite" {
|
|
|
|
return s.createSQLiteSchema()
|
2021-07-12 06:56:30 +02:00
|
|
|
}
|
2021-09-11 00:00:04 +02:00
|
|
|
return s.createPostgresSchema()
|
2021-07-12 06:56:30 +02:00
|
|
|
}
|
|
|
|
|
2021-07-15 04:26:51 +02:00
|
|
|
// GetAllServiceStatuses returns all monitored core.ServiceStatus
|
2021-07-12 10:25:25 +02:00
|
|
|
// with a subset of core.Result defined by the page and pageSize parameters
|
2021-09-11 00:00:04 +02:00
|
|
|
func (s *Store) GetAllServiceStatuses(params *paging.ServiceStatusParams) ([]*core.ServiceStatus, error) {
|
2021-07-14 04:59:43 +02:00
|
|
|
tx, err := s.db.Begin()
|
|
|
|
if err != nil {
|
2021-09-11 00:00:04 +02:00
|
|
|
return nil, err
|
2021-07-14 04:59:43 +02:00
|
|
|
}
|
|
|
|
keys, err := s.getAllServiceKeys(tx)
|
|
|
|
if err != nil {
|
|
|
|
_ = tx.Rollback()
|
2021-09-11 00:00:04 +02:00
|
|
|
return nil, err
|
2021-07-14 04:59:43 +02:00
|
|
|
}
|
2021-09-02 20:57:50 +02:00
|
|
|
serviceStatuses := make([]*core.ServiceStatus, 0, len(keys))
|
2021-07-14 04:59:43 +02:00
|
|
|
for _, key := range keys {
|
2021-07-15 04:26:51 +02:00
|
|
|
serviceStatus, err := s.getServiceStatusByKey(tx, key, params)
|
2021-07-14 04:59:43 +02:00
|
|
|
if err != nil {
|
|
|
|
continue
|
|
|
|
}
|
2021-09-02 20:57:50 +02:00
|
|
|
serviceStatuses = append(serviceStatuses, serviceStatus)
|
2021-07-14 04:17:27 +02:00
|
|
|
}
|
2021-07-14 04:59:43 +02:00
|
|
|
if err = tx.Commit(); err != nil {
|
|
|
|
_ = tx.Rollback()
|
|
|
|
}
|
2021-09-11 00:00:04 +02:00
|
|
|
return serviceStatuses, err
|
2021-07-12 06:56:30 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// GetServiceStatus returns the service status for a given service name in the given group
|
2021-09-11 00:00:04 +02:00
|
|
|
func (s *Store) GetServiceStatus(groupName, serviceName string, params *paging.ServiceStatusParams) (*core.ServiceStatus, error) {
|
2021-07-17 01:12:14 +02:00
|
|
|
return s.GetServiceStatusByKey(util.ConvertGroupAndServiceToKey(groupName, serviceName), params)
|
2021-07-12 06:56:30 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// GetServiceStatusByKey returns the service status for a given key
|
2021-09-11 00:00:04 +02:00
|
|
|
func (s *Store) GetServiceStatusByKey(key string, params *paging.ServiceStatusParams) (*core.ServiceStatus, error) {
|
2021-07-14 04:59:43 +02:00
|
|
|
tx, err := s.db.Begin()
|
|
|
|
if err != nil {
|
2021-09-11 00:00:04 +02:00
|
|
|
return nil, err
|
2021-07-14 04:59:43 +02:00
|
|
|
}
|
2021-07-15 04:26:51 +02:00
|
|
|
serviceStatus, err := s.getServiceStatusByKey(tx, key, params)
|
2021-07-14 04:59:43 +02:00
|
|
|
if err != nil {
|
|
|
|
_ = tx.Rollback()
|
2021-09-11 00:00:04 +02:00
|
|
|
return nil, err
|
2021-07-14 04:59:43 +02:00
|
|
|
}
|
|
|
|
if err = tx.Commit(); err != nil {
|
|
|
|
_ = tx.Rollback()
|
|
|
|
}
|
2021-09-11 00:00:04 +02:00
|
|
|
return serviceStatus, err
|
2021-07-12 06:56:30 +02:00
|
|
|
}
|
|
|
|
|
2021-08-13 03:54:23 +02:00
|
|
|
// GetUptimeByKey returns the uptime percentage during a time range
|
|
|
|
func (s *Store) GetUptimeByKey(key string, from, to time.Time) (float64, error) {
|
|
|
|
if from.After(to) {
|
|
|
|
return 0, common.ErrInvalidTimeRange
|
|
|
|
}
|
|
|
|
tx, err := s.db.Begin()
|
|
|
|
if err != nil {
|
|
|
|
return 0, err
|
|
|
|
}
|
|
|
|
serviceID, _, _, err := s.getServiceIDGroupAndNameByKey(tx, key)
|
|
|
|
if err != nil {
|
|
|
|
_ = tx.Rollback()
|
|
|
|
return 0, err
|
|
|
|
}
|
|
|
|
uptime, _, err := s.getServiceUptime(tx, serviceID, from, to)
|
|
|
|
if err != nil {
|
|
|
|
_ = tx.Rollback()
|
|
|
|
return 0, err
|
|
|
|
}
|
|
|
|
if err = tx.Commit(); err != nil {
|
|
|
|
_ = tx.Rollback()
|
|
|
|
}
|
|
|
|
return uptime, nil
|
|
|
|
}
|
|
|
|
|
2021-08-21 16:59:09 +02:00
|
|
|
// GetAverageResponseTimeByKey returns the average response time in milliseconds (value) during a time range
|
|
|
|
func (s *Store) GetAverageResponseTimeByKey(key string, from, to time.Time) (int, error) {
|
|
|
|
if from.After(to) {
|
|
|
|
return 0, common.ErrInvalidTimeRange
|
|
|
|
}
|
|
|
|
tx, err := s.db.Begin()
|
|
|
|
if err != nil {
|
|
|
|
return 0, err
|
|
|
|
}
|
|
|
|
serviceID, _, _, err := s.getServiceIDGroupAndNameByKey(tx, key)
|
|
|
|
if err != nil {
|
|
|
|
_ = tx.Rollback()
|
|
|
|
return 0, err
|
|
|
|
}
|
|
|
|
averageResponseTime, err := s.getServiceAverageResponseTime(tx, serviceID, from, to)
|
|
|
|
if err != nil {
|
|
|
|
_ = tx.Rollback()
|
|
|
|
return 0, err
|
|
|
|
}
|
|
|
|
if err = tx.Commit(); err != nil {
|
|
|
|
_ = tx.Rollback()
|
|
|
|
}
|
|
|
|
return averageResponseTime, nil
|
|
|
|
}
|
|
|
|
|
2021-08-20 05:07:21 +02:00
|
|
|
// GetHourlyAverageResponseTimeByKey returns a map of hourly (key) average response time in milliseconds (value) during a time range
|
|
|
|
func (s *Store) GetHourlyAverageResponseTimeByKey(key string, from, to time.Time) (map[int64]int, error) {
|
|
|
|
if from.After(to) {
|
|
|
|
return nil, common.ErrInvalidTimeRange
|
|
|
|
}
|
|
|
|
tx, err := s.db.Begin()
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
serviceID, _, _, err := s.getServiceIDGroupAndNameByKey(tx, key)
|
|
|
|
if err != nil {
|
|
|
|
_ = tx.Rollback()
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
hourlyAverageResponseTimes, err := s.getServiceHourlyAverageResponseTimes(tx, serviceID, from, to)
|
|
|
|
if err != nil {
|
|
|
|
_ = tx.Rollback()
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
if err = tx.Commit(); err != nil {
|
|
|
|
_ = tx.Rollback()
|
|
|
|
}
|
|
|
|
return hourlyAverageResponseTimes, nil
|
|
|
|
}
|
|
|
|
|
2021-07-13 04:53:14 +02:00
|
|
|
// Insert adds the observed result for the specified service into the store
|
2021-09-11 00:00:04 +02:00
|
|
|
func (s *Store) Insert(service *core.Service, result *core.Result) error {
|
2021-07-13 04:53:14 +02:00
|
|
|
tx, err := s.db.Begin()
|
|
|
|
if err != nil {
|
2021-09-11 00:00:04 +02:00
|
|
|
return err
|
2021-07-13 04:53:14 +02:00
|
|
|
}
|
|
|
|
//start := time.Now()
|
|
|
|
serviceID, err := s.getServiceID(tx, service)
|
|
|
|
if err != nil {
|
2021-08-13 03:54:23 +02:00
|
|
|
if err == common.ErrServiceNotFound {
|
2021-07-13 04:53:14 +02:00
|
|
|
// Service doesn't exist in the database, insert it
|
|
|
|
if serviceID, err = s.insertService(tx, service); err != nil {
|
2021-07-14 04:30:30 +02:00
|
|
|
_ = tx.Rollback()
|
2021-09-11 00:00:04 +02:00
|
|
|
log.Printf("[sql][Insert] Failed to create service with group=%s; service=%s: %s", service.Group, service.Name, err.Error())
|
|
|
|
return err
|
2021-07-13 04:53:14 +02:00
|
|
|
}
|
|
|
|
} else {
|
2021-07-14 04:30:30 +02:00
|
|
|
_ = tx.Rollback()
|
2021-09-11 00:00:04 +02:00
|
|
|
log.Printf("[sql][Insert] Failed to retrieve id of service with group=%s; service=%s: %s", service.Group, service.Name, err.Error())
|
|
|
|
return err
|
2021-07-13 04:53:14 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
// First, we need to check if we need to insert a new event.
|
|
|
|
//
|
|
|
|
// A new event must be added if either of the following cases happen:
|
|
|
|
// 1. There is only 1 event. The total number of events for a service can only be 1 if the only existing event is
|
|
|
|
// of type EventStart, in which case we will have to create a new event of type EventHealthy or EventUnhealthy
|
|
|
|
// based on result.Success.
|
|
|
|
// 2. The lastResult.Success != result.Success. This implies that the service went from healthy to unhealthy or
|
|
|
|
// vice-versa, in which case we will have to create a new event of type EventHealthy or EventUnhealthy
|
|
|
|
// based on result.Success.
|
|
|
|
numberOfEvents, err := s.getNumberOfEventsByServiceID(tx, serviceID)
|
|
|
|
if err != nil {
|
2021-09-11 00:00:04 +02:00
|
|
|
log.Printf("[sql][Insert] Failed to retrieve total number of events for group=%s; service=%s: %s", service.Group, service.Name, err.Error())
|
2021-07-13 04:53:14 +02:00
|
|
|
}
|
|
|
|
if numberOfEvents == 0 {
|
|
|
|
// There's no events yet, which means we need to add the EventStart and the first healthy/unhealthy event
|
|
|
|
err = s.insertEvent(tx, serviceID, &core.Event{
|
|
|
|
Type: core.EventStart,
|
|
|
|
Timestamp: result.Timestamp.Add(-50 * time.Millisecond),
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
// Silently fail
|
2021-09-11 00:00:04 +02:00
|
|
|
log.Printf("[sql][Insert] Failed to insert event=%s for group=%s; service=%s: %s", core.EventStart, service.Group, service.Name, err.Error())
|
2021-07-13 04:53:14 +02:00
|
|
|
}
|
2021-07-17 01:51:09 +02:00
|
|
|
event := core.NewEventFromResult(result)
|
2021-07-14 07:40:27 +02:00
|
|
|
if err = s.insertEvent(tx, serviceID, event); err != nil {
|
2021-07-13 04:53:14 +02:00
|
|
|
// Silently fail
|
2021-09-11 00:00:04 +02:00
|
|
|
log.Printf("[sql][Insert] Failed to insert event=%s for group=%s; service=%s: %s", event.Type, service.Group, service.Name, err.Error())
|
2021-07-13 04:53:14 +02:00
|
|
|
}
|
|
|
|
} else {
|
|
|
|
// Get the success value of the previous result
|
|
|
|
var lastResultSuccess bool
|
2021-07-14 07:40:27 +02:00
|
|
|
if lastResultSuccess, err = s.getLastServiceResultSuccessValue(tx, serviceID); err != nil {
|
2021-09-11 00:00:04 +02:00
|
|
|
log.Printf("[sql][Insert] Failed to retrieve outcome of previous result for group=%s; service=%s: %s", service.Group, service.Name, err.Error())
|
2021-07-13 04:53:14 +02:00
|
|
|
} else {
|
|
|
|
// If we managed to retrieve the outcome of the previous result, we'll compare it with the new result.
|
|
|
|
// If the final outcome (success or failure) of the previous and the new result aren't the same, it means
|
|
|
|
// that the service either went from Healthy to Unhealthy or Unhealthy -> Healthy, therefore, we'll add
|
|
|
|
// an event to mark the change in state
|
|
|
|
if lastResultSuccess != result.Success {
|
2021-07-17 01:51:09 +02:00
|
|
|
event := core.NewEventFromResult(result)
|
2021-07-14 07:40:27 +02:00
|
|
|
if err = s.insertEvent(tx, serviceID, event); err != nil {
|
2021-07-13 04:53:14 +02:00
|
|
|
// Silently fail
|
2021-09-11 00:00:04 +02:00
|
|
|
log.Printf("[sql][Insert] Failed to insert event=%s for group=%s; service=%s: %s", event.Type, service.Group, service.Name, err.Error())
|
2021-07-13 04:53:14 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// Clean up old events if there's more than twice the maximum number of events
|
|
|
|
// This lets us both keep the table clean without impacting performance too much
|
|
|
|
// (since we're only deleting MaximumNumberOfEvents at a time instead of 1)
|
2021-07-15 07:56:49 +02:00
|
|
|
if numberOfEvents > eventsCleanUpThreshold {
|
2021-07-14 07:40:27 +02:00
|
|
|
if err = s.deleteOldServiceEvents(tx, serviceID); err != nil {
|
2021-09-11 00:00:04 +02:00
|
|
|
log.Printf("[sql][Insert] Failed to delete old events for group=%s; service=%s: %s", service.Group, service.Name, err.Error())
|
2021-07-13 04:53:14 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// Second, we need to insert the result.
|
2021-07-14 07:40:27 +02:00
|
|
|
if err = s.insertResult(tx, serviceID, result); err != nil {
|
2021-09-11 00:00:04 +02:00
|
|
|
log.Printf("[sql][Insert] Failed to insert result for group=%s; service=%s: %s", service.Group, service.Name, err.Error())
|
2021-07-14 07:40:27 +02:00
|
|
|
_ = tx.Rollback() // If we can't insert the result, we'll rollback now since there's no point continuing
|
2021-09-11 00:00:04 +02:00
|
|
|
return err
|
2021-07-13 04:53:14 +02:00
|
|
|
}
|
|
|
|
// Clean up old results
|
|
|
|
numberOfResults, err := s.getNumberOfResultsByServiceID(tx, serviceID)
|
|
|
|
if err != nil {
|
2021-09-11 00:00:04 +02:00
|
|
|
log.Printf("[sql][Insert] Failed to retrieve total number of results for group=%s; service=%s: %s", service.Group, service.Name, err.Error())
|
2021-07-14 07:40:27 +02:00
|
|
|
} else {
|
2021-07-15 07:56:49 +02:00
|
|
|
if numberOfResults > resultsCleanUpThreshold {
|
2021-07-14 07:40:27 +02:00
|
|
|
if err = s.deleteOldServiceResults(tx, serviceID); err != nil {
|
2021-09-11 00:00:04 +02:00
|
|
|
log.Printf("[sql][Insert] Failed to delete old results for group=%s; service=%s: %s", service.Group, service.Name, err.Error())
|
2021-07-14 07:40:27 +02:00
|
|
|
}
|
|
|
|
}
|
2021-07-13 04:53:14 +02:00
|
|
|
}
|
2021-07-14 07:40:27 +02:00
|
|
|
// Finally, we need to insert the uptime data.
|
|
|
|
// Because the uptime data significantly outlives the results, we can't rely on the results for determining the uptime
|
|
|
|
if err = s.updateServiceUptime(tx, serviceID, result); err != nil {
|
2021-09-11 00:00:04 +02:00
|
|
|
log.Printf("[sql][Insert] Failed to update uptime for group=%s; service=%s: %s", service.Group, service.Name, err.Error())
|
2021-07-14 07:40:27 +02:00
|
|
|
}
|
|
|
|
// Clean up old uptime entries
|
|
|
|
ageOfOldestUptimeEntry, err := s.getAgeOfOldestServiceUptimeEntry(tx, serviceID)
|
|
|
|
if err != nil {
|
2021-09-11 00:00:04 +02:00
|
|
|
log.Printf("[sql][Insert] Failed to retrieve oldest service uptime entry for group=%s; service=%s: %s", service.Group, service.Name, err.Error())
|
2021-07-14 07:40:27 +02:00
|
|
|
} else {
|
|
|
|
if ageOfOldestUptimeEntry > uptimeCleanUpThreshold {
|
|
|
|
if err = s.deleteOldUptimeEntries(tx, serviceID, time.Now().Add(-(uptimeRetention + time.Hour))); err != nil {
|
2021-09-11 00:00:04 +02:00
|
|
|
log.Printf("[sql][Insert] Failed to delete old uptime entries for group=%s; service=%s: %s", service.Group, service.Name, err.Error())
|
2021-07-14 07:40:27 +02:00
|
|
|
}
|
2021-07-13 04:53:14 +02:00
|
|
|
}
|
|
|
|
}
|
2021-09-11 00:00:04 +02:00
|
|
|
//log.Printf("[sql][Insert] Successfully inserted result in duration=%dms", time.Since(start).Milliseconds())
|
2021-07-13 04:53:14 +02:00
|
|
|
if err = tx.Commit(); err != nil {
|
|
|
|
_ = tx.Rollback()
|
|
|
|
}
|
2021-09-11 00:00:04 +02:00
|
|
|
return err
|
2021-07-13 04:53:14 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// DeleteAllServiceStatusesNotInKeys removes all rows owned by a service whose key is not within the keys provided
|
|
|
|
func (s *Store) DeleteAllServiceStatusesNotInKeys(keys []string) int {
|
2021-07-18 06:34:22 +02:00
|
|
|
var err error
|
|
|
|
var result sql.Result
|
2021-07-16 04:07:30 +02:00
|
|
|
if len(keys) == 0 {
|
2021-07-18 06:34:22 +02:00
|
|
|
// Delete everything
|
|
|
|
result, err = s.db.Exec("DELETE FROM service")
|
|
|
|
} else {
|
|
|
|
args := make([]interface{}, 0, len(keys))
|
2021-09-11 00:00:04 +02:00
|
|
|
query := "DELETE FROM service WHERE service_key NOT IN ("
|
2021-07-18 06:34:22 +02:00
|
|
|
for i := range keys {
|
2021-09-11 00:00:04 +02:00
|
|
|
query += fmt.Sprintf("$%d,", i+1)
|
2021-07-18 06:34:22 +02:00
|
|
|
args = append(args, keys[i])
|
|
|
|
}
|
2021-09-11 00:00:04 +02:00
|
|
|
query = query[:len(query)-1] + ")"
|
|
|
|
result, err = s.db.Exec(query, args...)
|
2021-07-16 04:07:30 +02:00
|
|
|
}
|
|
|
|
if err != nil {
|
2021-09-11 00:00:04 +02:00
|
|
|
log.Printf("[sql][DeleteAllServiceStatusesNotInKeys] Failed to delete rows that do not belong to any of keys=%v: %s", keys, err.Error())
|
2021-07-18 06:34:22 +02:00
|
|
|
return 0
|
2021-07-16 04:07:30 +02:00
|
|
|
}
|
2021-07-18 06:34:22 +02:00
|
|
|
rowsAffects, _ := result.RowsAffected()
|
|
|
|
return int(rowsAffects)
|
2021-07-13 04:53:14 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// Clear deletes everything from the store
|
|
|
|
func (s *Store) Clear() {
|
|
|
|
_, _ = s.db.Exec("DELETE FROM service")
|
|
|
|
}
|
|
|
|
|
|
|
|
// Save does nothing, because this store is immediately persistent.
|
|
|
|
func (s *Store) Save() error {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Close the database handle
|
|
|
|
func (s *Store) Close() {
|
|
|
|
_ = s.db.Close()
|
|
|
|
}
|
|
|
|
|
2021-07-14 05:23:14 +02:00
|
|
|
// insertService inserts a service in the store and returns the generated id of said service
|
|
|
|
func (s *Store) insertService(tx *sql.Tx, service *core.Service) (int64, error) {
|
2021-09-11 00:00:04 +02:00
|
|
|
//log.Printf("[sql][insertService] Inserting service with group=%s and name=%s", service.Group, service.Name)
|
|
|
|
var id int64
|
|
|
|
err := tx.QueryRow(
|
|
|
|
"INSERT INTO service (service_key, service_name, service_group) VALUES ($1, $2, $3) RETURNING service_id",
|
2021-07-14 05:23:14 +02:00
|
|
|
service.Key(),
|
|
|
|
service.Name,
|
|
|
|
service.Group,
|
2021-09-11 00:00:04 +02:00
|
|
|
).Scan(&id)
|
2021-07-14 05:23:14 +02:00
|
|
|
if err != nil {
|
|
|
|
return 0, err
|
|
|
|
}
|
2021-09-11 00:00:04 +02:00
|
|
|
return id, nil
|
2021-07-14 05:23:14 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// insertEvent inserts a service event in the store
|
|
|
|
func (s *Store) insertEvent(tx *sql.Tx, serviceID int64, event *core.Event) error {
|
|
|
|
_, err := tx.Exec(
|
|
|
|
"INSERT INTO service_event (service_id, event_type, event_timestamp) VALUES ($1, $2, $3)",
|
|
|
|
serviceID,
|
|
|
|
event.Type,
|
2021-09-11 00:00:04 +02:00
|
|
|
event.Timestamp.UTC(),
|
2021-07-14 05:23:14 +02:00
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// insertResult inserts a result in the store
|
|
|
|
func (s *Store) insertResult(tx *sql.Tx, serviceID int64, result *core.Result) error {
|
2021-09-11 00:00:04 +02:00
|
|
|
var serviceResultID int64
|
|
|
|
err := tx.QueryRow(
|
2021-07-14 05:23:14 +02:00
|
|
|
`
|
|
|
|
INSERT INTO service_result (service_id, success, errors, connected, status, dns_rcode, certificate_expiration, hostname, ip, duration, timestamp)
|
|
|
|
VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11)
|
2021-09-11 00:00:04 +02:00
|
|
|
RETURNING service_result_id
|
2021-07-14 05:23:14 +02:00
|
|
|
`,
|
|
|
|
serviceID,
|
|
|
|
result.Success,
|
|
|
|
strings.Join(result.Errors, arraySeparator),
|
|
|
|
result.Connected,
|
|
|
|
result.HTTPStatus,
|
|
|
|
result.DNSRCode,
|
|
|
|
result.CertificateExpiration,
|
|
|
|
result.Hostname,
|
|
|
|
result.IP,
|
|
|
|
result.Duration,
|
2021-09-11 00:00:04 +02:00
|
|
|
result.Timestamp.UTC(),
|
|
|
|
).Scan(&serviceResultID)
|
2021-07-14 05:23:14 +02:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
return s.insertConditionResults(tx, serviceResultID, result.ConditionResults)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *Store) insertConditionResults(tx *sql.Tx, serviceResultID int64, conditionResults []*core.ConditionResult) error {
|
|
|
|
var err error
|
|
|
|
for _, cr := range conditionResults {
|
|
|
|
_, err = tx.Exec("INSERT INTO service_result_condition (service_result_id, condition, success) VALUES ($1, $2, $3)",
|
|
|
|
serviceResultID,
|
|
|
|
cr.Condition,
|
|
|
|
cr.Success,
|
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2021-07-14 07:40:27 +02:00
|
|
|
func (s *Store) updateServiceUptime(tx *sql.Tx, serviceID int64, result *core.Result) error {
|
|
|
|
unixTimestampFlooredAtHour := result.Timestamp.Truncate(time.Hour).Unix()
|
|
|
|
var successfulExecutions int
|
|
|
|
if result.Success {
|
|
|
|
successfulExecutions = 1
|
|
|
|
}
|
|
|
|
_, err := tx.Exec(
|
|
|
|
`
|
|
|
|
INSERT INTO service_uptime (service_id, hour_unix_timestamp, total_executions, successful_executions, total_response_time)
|
|
|
|
VALUES ($1, $2, $3, $4, $5)
|
|
|
|
ON CONFLICT(service_id, hour_unix_timestamp) DO UPDATE SET
|
2021-09-11 00:00:04 +02:00
|
|
|
total_executions = excluded.total_executions + service_uptime.total_executions,
|
|
|
|
successful_executions = excluded.successful_executions + service_uptime.successful_executions,
|
|
|
|
total_response_time = excluded.total_response_time + service_uptime.total_response_time
|
2021-07-14 07:40:27 +02:00
|
|
|
`,
|
|
|
|
serviceID,
|
|
|
|
unixTimestampFlooredAtHour,
|
|
|
|
1,
|
|
|
|
successfulExecutions,
|
|
|
|
result.Duration.Milliseconds(),
|
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2021-07-14 04:59:43 +02:00
|
|
|
func (s *Store) getAllServiceKeys(tx *sql.Tx) (keys []string, err error) {
|
2021-09-03 05:09:29 +02:00
|
|
|
rows, err := tx.Query("SELECT service_key FROM service ORDER BY service_key")
|
2021-07-14 04:17:27 +02:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
for rows.Next() {
|
|
|
|
var key string
|
|
|
|
_ = rows.Scan(&key)
|
|
|
|
keys = append(keys, key)
|
|
|
|
}
|
|
|
|
_ = rows.Close()
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2021-07-15 04:26:51 +02:00
|
|
|
func (s *Store) getServiceStatusByKey(tx *sql.Tx, key string, parameters *paging.ServiceStatusParams) (*core.ServiceStatus, error) {
|
2021-07-16 04:07:30 +02:00
|
|
|
serviceID, serviceGroup, serviceName, err := s.getServiceIDGroupAndNameByKey(tx, key)
|
2021-07-14 04:17:27 +02:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2021-07-14 07:53:14 +02:00
|
|
|
serviceStatus := core.NewServiceStatus(key, serviceGroup, serviceName)
|
2021-07-15 04:26:51 +02:00
|
|
|
if parameters.EventsPageSize > 0 {
|
|
|
|
if serviceStatus.Events, err = s.getEventsByServiceID(tx, serviceID, parameters.EventsPage, parameters.EventsPageSize); err != nil {
|
2021-09-11 00:00:04 +02:00
|
|
|
log.Printf("[sql][getServiceStatusByKey] Failed to retrieve events for key=%s: %s", key, err.Error())
|
2021-07-14 04:17:27 +02:00
|
|
|
}
|
|
|
|
}
|
2021-07-15 04:26:51 +02:00
|
|
|
if parameters.ResultsPageSize > 0 {
|
|
|
|
if serviceStatus.Results, err = s.getResultsByServiceID(tx, serviceID, parameters.ResultsPage, parameters.ResultsPageSize); err != nil {
|
2021-09-11 00:00:04 +02:00
|
|
|
log.Printf("[sql][getServiceStatusByKey] Failed to retrieve results for key=%s: %s", key, err.Error())
|
2021-07-14 04:17:27 +02:00
|
|
|
}
|
|
|
|
}
|
2021-08-13 06:38:39 +02:00
|
|
|
//if parameters.IncludeUptime {
|
|
|
|
// now := time.Now()
|
|
|
|
// serviceStatus.Uptime.LastHour, _, err = s.getServiceUptime(tx, serviceID, now.Add(-time.Hour), now)
|
|
|
|
// serviceStatus.Uptime.LastTwentyFourHours, _, err = s.getServiceUptime(tx, serviceID, now.Add(-24*time.Hour), now)
|
|
|
|
// serviceStatus.Uptime.LastSevenDays, _, err = s.getServiceUptime(tx, serviceID, now.Add(-7*24*time.Hour), now)
|
|
|
|
//}
|
2021-07-14 04:17:27 +02:00
|
|
|
return serviceStatus, nil
|
|
|
|
}
|
|
|
|
|
2021-07-14 04:59:43 +02:00
|
|
|
func (s *Store) getServiceIDGroupAndNameByKey(tx *sql.Tx, key string) (id int64, group, name string, err error) {
|
2021-09-11 00:00:04 +02:00
|
|
|
err = tx.QueryRow(
|
2021-07-18 05:59:17 +02:00
|
|
|
`
|
|
|
|
SELECT service_id, service_group, service_name
|
|
|
|
FROM service
|
|
|
|
WHERE service_key = $1
|
|
|
|
LIMIT 1
|
|
|
|
`,
|
|
|
|
key,
|
2021-09-11 00:00:04 +02:00
|
|
|
).Scan(&id, &group, &name)
|
2021-07-12 06:56:30 +02:00
|
|
|
if err != nil {
|
2021-09-11 00:00:04 +02:00
|
|
|
if err == sql.ErrNoRows {
|
|
|
|
return 0, "", "", common.ErrServiceNotFound
|
|
|
|
}
|
2021-07-12 06:56:30 +02:00
|
|
|
return 0, "", "", err
|
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2021-07-14 04:59:43 +02:00
|
|
|
func (s *Store) getEventsByServiceID(tx *sql.Tx, serviceID int64, page, pageSize int) (events []*core.Event, err error) {
|
|
|
|
rows, err := tx.Query(
|
2021-07-14 04:17:27 +02:00
|
|
|
`
|
|
|
|
SELECT event_type, event_timestamp
|
|
|
|
FROM service_event
|
|
|
|
WHERE service_id = $1
|
2021-07-16 04:07:30 +02:00
|
|
|
ORDER BY service_event_id ASC
|
2021-07-14 04:17:27 +02:00
|
|
|
LIMIT $2 OFFSET $3
|
|
|
|
`,
|
|
|
|
serviceID,
|
|
|
|
pageSize,
|
|
|
|
(page-1)*pageSize,
|
|
|
|
)
|
2021-07-12 06:56:30 +02:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
for rows.Next() {
|
|
|
|
event := &core.Event{}
|
|
|
|
_ = rows.Scan(&event.Type, &event.Timestamp)
|
|
|
|
events = append(events, event)
|
|
|
|
}
|
|
|
|
_ = rows.Close()
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2021-07-14 04:59:43 +02:00
|
|
|
func (s *Store) getResultsByServiceID(tx *sql.Tx, serviceID int64, page, pageSize int) (results []*core.Result, err error) {
|
2021-07-14 04:17:27 +02:00
|
|
|
rows, err := tx.Query(
|
2021-07-12 10:25:25 +02:00
|
|
|
`
|
|
|
|
SELECT service_result_id, success, errors, connected, status, dns_rcode, certificate_expiration, hostname, ip, duration, timestamp
|
2021-07-12 06:56:30 +02:00
|
|
|
FROM service_result
|
|
|
|
WHERE service_id = $1
|
2021-07-16 23:48:38 +02:00
|
|
|
ORDER BY service_result_id DESC -- Normally, we'd sort by timestamp, but sorting by service_result_id is faster
|
2021-07-14 04:17:27 +02:00
|
|
|
LIMIT $2 OFFSET $3
|
2021-07-13 04:53:14 +02:00
|
|
|
`,
|
2021-07-16 23:48:38 +02:00
|
|
|
//`
|
|
|
|
// SELECT * FROM (
|
|
|
|
// SELECT service_result_id, success, errors, connected, status, dns_rcode, certificate_expiration, hostname, ip, duration, timestamp
|
|
|
|
// FROM service_result
|
|
|
|
// WHERE service_id = $1
|
|
|
|
// ORDER BY service_result_id DESC -- Normally, we'd sort by timestamp, but sorting by service_result_id is faster
|
|
|
|
// LIMIT $2 OFFSET $3
|
|
|
|
// )
|
|
|
|
// ORDER BY service_result_id ASC -- Normally, we'd sort by timestamp, but sorting by service_result_id is faster
|
|
|
|
//`,
|
2021-07-12 06:56:30 +02:00
|
|
|
serviceID,
|
2021-07-14 04:17:27 +02:00
|
|
|
pageSize,
|
|
|
|
(page-1)*pageSize,
|
2021-07-12 06:56:30 +02:00
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2021-07-12 07:06:44 +02:00
|
|
|
idResultMap := make(map[int64]*core.Result)
|
2021-07-12 06:56:30 +02:00
|
|
|
for rows.Next() {
|
|
|
|
result := &core.Result{}
|
|
|
|
var id int64
|
2021-07-12 10:25:25 +02:00
|
|
|
var joinedErrors string
|
|
|
|
_ = rows.Scan(&id, &result.Success, &joinedErrors, &result.Connected, &result.HTTPStatus, &result.DNSRCode, &result.CertificateExpiration, &result.Hostname, &result.IP, &result.Duration, &result.Timestamp)
|
2021-07-16 04:07:30 +02:00
|
|
|
if len(joinedErrors) != 0 {
|
|
|
|
result.Errors = strings.Split(joinedErrors, arraySeparator)
|
|
|
|
}
|
2021-07-16 23:48:38 +02:00
|
|
|
//results = append(results, result)
|
|
|
|
// This is faster than using a subselect
|
|
|
|
results = append([]*core.Result{result}, results...)
|
2021-07-12 07:06:44 +02:00
|
|
|
idResultMap[id] = result
|
2021-07-12 06:56:30 +02:00
|
|
|
}
|
|
|
|
_ = rows.Close()
|
|
|
|
// Get the conditionResults
|
2021-07-12 07:06:44 +02:00
|
|
|
for serviceResultID, result := range idResultMap {
|
2021-07-14 04:17:27 +02:00
|
|
|
rows, err = tx.Query(
|
2021-07-12 10:25:25 +02:00
|
|
|
`
|
2021-07-16 04:07:30 +02:00
|
|
|
SELECT condition, success
|
2021-07-12 10:25:25 +02:00
|
|
|
FROM service_result_condition
|
|
|
|
WHERE service_result_id = $1
|
|
|
|
`,
|
2021-07-12 06:56:30 +02:00
|
|
|
serviceResultID,
|
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
for rows.Next() {
|
|
|
|
conditionResult := &core.ConditionResult{}
|
2021-07-16 04:07:30 +02:00
|
|
|
if err = rows.Scan(&conditionResult.Condition, &conditionResult.Success); err != nil {
|
|
|
|
return
|
|
|
|
}
|
2021-07-12 07:06:44 +02:00
|
|
|
result.ConditionResults = append(result.ConditionResults, conditionResult)
|
2021-07-12 06:56:30 +02:00
|
|
|
}
|
|
|
|
_ = rows.Close()
|
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2021-07-14 07:40:27 +02:00
|
|
|
func (s *Store) getServiceUptime(tx *sql.Tx, serviceID int64, from, to time.Time) (uptime float64, avgResponseTime time.Duration, err error) {
|
|
|
|
rows, err := tx.Query(
|
|
|
|
`
|
|
|
|
SELECT SUM(total_executions), SUM(successful_executions), SUM(total_response_time)
|
|
|
|
FROM service_uptime
|
|
|
|
WHERE service_id = $1
|
|
|
|
AND hour_unix_timestamp >= $2
|
|
|
|
AND hour_unix_timestamp <= $3
|
|
|
|
`,
|
|
|
|
serviceID,
|
|
|
|
from.Unix(),
|
|
|
|
to.Unix(),
|
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
return 0, 0, err
|
|
|
|
}
|
|
|
|
var totalExecutions, totalSuccessfulExecutions, totalResponseTime int
|
|
|
|
for rows.Next() {
|
|
|
|
_ = rows.Scan(&totalExecutions, &totalSuccessfulExecutions, &totalResponseTime)
|
|
|
|
break
|
|
|
|
}
|
|
|
|
_ = rows.Close()
|
|
|
|
if totalExecutions > 0 {
|
|
|
|
uptime = float64(totalSuccessfulExecutions) / float64(totalExecutions)
|
|
|
|
avgResponseTime = time.Duration(float64(totalResponseTime)/float64(totalExecutions)) * time.Millisecond
|
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2021-08-21 16:59:09 +02:00
|
|
|
func (s *Store) getServiceAverageResponseTime(tx *sql.Tx, serviceID int64, from, to time.Time) (int, error) {
|
|
|
|
rows, err := tx.Query(
|
|
|
|
`
|
|
|
|
SELECT SUM(total_executions), SUM(total_response_time)
|
|
|
|
FROM service_uptime
|
|
|
|
WHERE service_id = $1
|
|
|
|
AND total_executions > 0
|
|
|
|
AND hour_unix_timestamp >= $2
|
|
|
|
AND hour_unix_timestamp <= $3
|
|
|
|
`,
|
|
|
|
serviceID,
|
|
|
|
from.Unix(),
|
|
|
|
to.Unix(),
|
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
return 0, err
|
|
|
|
}
|
|
|
|
var totalExecutions, totalResponseTime int
|
|
|
|
for rows.Next() {
|
|
|
|
_ = rows.Scan(&totalExecutions, &totalResponseTime)
|
|
|
|
}
|
|
|
|
_ = rows.Close()
|
|
|
|
if totalExecutions == 0 {
|
|
|
|
return 0, nil
|
|
|
|
}
|
|
|
|
return int(float64(totalResponseTime) / float64(totalExecutions)), nil
|
|
|
|
}
|
|
|
|
|
2021-08-20 05:07:21 +02:00
|
|
|
func (s *Store) getServiceHourlyAverageResponseTimes(tx *sql.Tx, serviceID int64, from, to time.Time) (map[int64]int, error) {
|
|
|
|
rows, err := tx.Query(
|
|
|
|
`
|
|
|
|
SELECT hour_unix_timestamp, total_executions, total_response_time
|
|
|
|
FROM service_uptime
|
|
|
|
WHERE service_id = $1
|
|
|
|
AND total_executions > 0
|
|
|
|
AND hour_unix_timestamp >= $2
|
|
|
|
AND hour_unix_timestamp <= $3
|
|
|
|
`,
|
|
|
|
serviceID,
|
|
|
|
from.Unix(),
|
|
|
|
to.Unix(),
|
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
var totalExecutions, totalResponseTime int
|
|
|
|
var unixTimestampFlooredAtHour int64
|
|
|
|
hourlyAverageResponseTimes := make(map[int64]int)
|
|
|
|
for rows.Next() {
|
|
|
|
_ = rows.Scan(&unixTimestampFlooredAtHour, &totalExecutions, &totalResponseTime)
|
|
|
|
hourlyAverageResponseTimes[unixTimestampFlooredAtHour] = int(float64(totalResponseTime) / float64(totalExecutions))
|
|
|
|
}
|
|
|
|
_ = rows.Close()
|
|
|
|
return hourlyAverageResponseTimes, nil
|
|
|
|
}
|
|
|
|
|
2021-07-12 08:54:16 +02:00
|
|
|
func (s *Store) getServiceID(tx *sql.Tx, service *core.Service) (int64, error) {
|
2021-09-11 00:00:04 +02:00
|
|
|
var id int64
|
|
|
|
err := tx.QueryRow("SELECT service_id FROM service WHERE service_key = $1", service.Key()).Scan(&id)
|
2021-07-12 06:56:30 +02:00
|
|
|
if err != nil {
|
2021-09-11 00:00:04 +02:00
|
|
|
if err == sql.ErrNoRows {
|
|
|
|
return 0, common.ErrServiceNotFound
|
|
|
|
}
|
2021-07-12 06:56:30 +02:00
|
|
|
return 0, err
|
|
|
|
}
|
|
|
|
return id, nil
|
|
|
|
}
|
|
|
|
|
2021-07-12 08:54:16 +02:00
|
|
|
func (s *Store) getNumberOfEventsByServiceID(tx *sql.Tx, serviceID int64) (int64, error) {
|
2021-07-12 06:56:30 +02:00
|
|
|
var numberOfEvents int64
|
2021-09-11 00:00:04 +02:00
|
|
|
err := tx.QueryRow("SELECT COUNT(1) FROM service_event WHERE service_id = $1", serviceID).Scan(&numberOfEvents)
|
|
|
|
return numberOfEvents, err
|
2021-07-12 06:56:30 +02:00
|
|
|
}
|
|
|
|
|
2021-07-13 04:53:14 +02:00
|
|
|
func (s *Store) getNumberOfResultsByServiceID(tx *sql.Tx, serviceID int64) (int64, error) {
|
|
|
|
var numberOfResults int64
|
2021-09-11 00:00:04 +02:00
|
|
|
err := tx.QueryRow("SELECT COUNT(1) FROM service_result WHERE service_id = $1", serviceID).Scan(&numberOfResults)
|
|
|
|
return numberOfResults, err
|
2021-07-13 04:53:14 +02:00
|
|
|
}
|
|
|
|
|
2021-07-14 07:40:27 +02:00
|
|
|
func (s *Store) getAgeOfOldestServiceUptimeEntry(tx *sql.Tx, serviceID int64) (time.Duration, error) {
|
|
|
|
rows, err := tx.Query(
|
|
|
|
`
|
|
|
|
SELECT hour_unix_timestamp
|
|
|
|
FROM service_uptime
|
|
|
|
WHERE service_id = $1
|
|
|
|
ORDER BY hour_unix_timestamp
|
|
|
|
LIMIT 1
|
|
|
|
`,
|
|
|
|
serviceID,
|
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
return 0, err
|
|
|
|
}
|
|
|
|
var oldestServiceUptimeUnixTimestamp int64
|
|
|
|
var found bool
|
|
|
|
for rows.Next() {
|
|
|
|
_ = rows.Scan(&oldestServiceUptimeUnixTimestamp)
|
|
|
|
found = true
|
|
|
|
break
|
|
|
|
}
|
|
|
|
_ = rows.Close()
|
|
|
|
if !found {
|
|
|
|
return 0, errNoRowsReturned
|
|
|
|
}
|
|
|
|
return time.Since(time.Unix(oldestServiceUptimeUnixTimestamp, 0)), nil
|
|
|
|
}
|
|
|
|
|
2021-07-12 08:54:16 +02:00
|
|
|
func (s *Store) getLastServiceResultSuccessValue(tx *sql.Tx, serviceID int64) (bool, error) {
|
2021-09-11 00:00:04 +02:00
|
|
|
var success bool
|
|
|
|
err := tx.QueryRow("SELECT success FROM service_result WHERE service_id = $1 ORDER BY service_result_id DESC LIMIT 1", serviceID).Scan(&success)
|
2021-07-12 06:56:30 +02:00
|
|
|
if err != nil {
|
2021-09-11 00:00:04 +02:00
|
|
|
if err == sql.ErrNoRows {
|
|
|
|
return false, errNoRowsReturned
|
|
|
|
}
|
2021-07-12 06:56:30 +02:00
|
|
|
return false, err
|
|
|
|
}
|
|
|
|
return success, nil
|
|
|
|
}
|
|
|
|
|
2021-07-13 05:11:33 +02:00
|
|
|
// deleteOldServiceEvents deletes old service events that are no longer needed
|
|
|
|
func (s *Store) deleteOldServiceEvents(tx *sql.Tx, serviceID int64) error {
|
2021-07-13 04:53:14 +02:00
|
|
|
_, err := tx.Exec(
|
2021-07-12 10:25:25 +02:00
|
|
|
`
|
|
|
|
DELETE FROM service_event
|
2021-07-19 05:07:24 +02:00
|
|
|
WHERE service_id = $1
|
|
|
|
AND service_event_id NOT IN (
|
|
|
|
SELECT service_event_id
|
|
|
|
FROM service_event
|
|
|
|
WHERE service_id = $1
|
|
|
|
ORDER BY service_event_id DESC
|
|
|
|
LIMIT $2
|
|
|
|
)
|
2021-07-12 10:25:25 +02:00
|
|
|
`,
|
|
|
|
serviceID,
|
2021-08-13 03:54:23 +02:00
|
|
|
common.MaximumNumberOfEvents,
|
2021-07-12 10:25:25 +02:00
|
|
|
)
|
2021-09-11 00:00:04 +02:00
|
|
|
return err
|
2021-07-12 10:25:25 +02:00
|
|
|
}
|
|
|
|
|
2021-07-13 05:11:33 +02:00
|
|
|
// deleteOldServiceResults deletes old service results that are no longer needed
|
|
|
|
func (s *Store) deleteOldServiceResults(tx *sql.Tx, serviceID int64) error {
|
2021-07-13 04:53:14 +02:00
|
|
|
_, err := tx.Exec(
|
|
|
|
`
|
|
|
|
DELETE FROM service_result
|
|
|
|
WHERE service_id = $1
|
2021-07-19 05:07:24 +02:00
|
|
|
AND service_result_id NOT IN (
|
|
|
|
SELECT service_result_id
|
|
|
|
FROM service_result
|
|
|
|
WHERE service_id = $1
|
|
|
|
ORDER BY service_result_id DESC
|
|
|
|
LIMIT $2
|
|
|
|
)
|
2021-07-13 04:53:14 +02:00
|
|
|
`,
|
|
|
|
serviceID,
|
2021-08-13 03:54:23 +02:00
|
|
|
common.MaximumNumberOfResults,
|
2021-07-13 04:53:14 +02:00
|
|
|
)
|
2021-09-11 00:00:04 +02:00
|
|
|
return err
|
2021-07-12 06:56:30 +02:00
|
|
|
}
|
2021-07-14 07:40:27 +02:00
|
|
|
|
|
|
|
func (s *Store) deleteOldUptimeEntries(tx *sql.Tx, serviceID int64, maxAge time.Time) error {
|
|
|
|
_, err := tx.Exec("DELETE FROM service_uptime WHERE service_id = $1 AND hour_unix_timestamp < $2", serviceID, maxAge.Unix())
|
|
|
|
return err
|
|
|
|
}
|