2021-09-11 00:00:04 +02:00
package sql
2021-07-12 06:56:30 +02:00
import (
2024-05-16 03:29:45 +02:00
"errors"
2024-08-12 04:40:19 +02:00
"fmt"
2021-07-12 06:56:30 +02:00
"testing"
"time"
2024-05-16 03:29:45 +02:00
"github.com/TwiN/gatus/v5/alerting/alert"
2024-05-10 04:56:16 +02:00
"github.com/TwiN/gatus/v5/config/endpoint"
2022-12-06 07:41:09 +01:00
"github.com/TwiN/gatus/v5/storage/store/common"
"github.com/TwiN/gatus/v5/storage/store/common/paging"
2021-07-12 06:56:30 +02:00
)
var (
2024-05-10 04:56:16 +02:00
firstCondition = endpoint . Condition ( "[STATUS] == 200" )
secondCondition = endpoint . Condition ( "[RESPONSE_TIME] < 500" )
thirdCondition = endpoint . Condition ( "[CERTIFICATE_EXPIRATION] < 72h" )
2021-07-12 06:56:30 +02:00
2021-07-17 01:12:14 +02:00
now = time . Now ( )
2021-07-12 06:56:30 +02:00
2024-05-10 04:56:16 +02:00
testEndpoint = endpoint . Endpoint {
2021-07-12 06:56:30 +02:00
Name : "name" ,
Group : "group" ,
URL : "https://example.org/what/ever" ,
Method : "GET" ,
Body : "body" ,
Interval : 30 * time . Second ,
2024-05-10 04:56:16 +02:00
Conditions : [ ] endpoint . Condition { firstCondition , secondCondition , thirdCondition } ,
2021-07-12 06:56:30 +02:00
Alerts : nil ,
NumberOfFailuresInARow : 0 ,
NumberOfSuccessesInARow : 0 ,
}
2024-05-10 04:56:16 +02:00
testSuccessfulResult = endpoint . Result {
2021-07-12 06:56:30 +02:00
Hostname : "example.org" ,
IP : "127.0.0.1" ,
HTTPStatus : 200 ,
Errors : nil ,
Connected : true ,
Success : true ,
2021-07-17 01:12:14 +02:00
Timestamp : now ,
2021-07-12 06:56:30 +02:00
Duration : 150 * time . Millisecond ,
CertificateExpiration : 10 * time . Hour ,
2024-05-10 04:56:16 +02:00
ConditionResults : [ ] * endpoint . ConditionResult {
2021-07-12 06:56:30 +02:00
{
Condition : "[STATUS] == 200" ,
Success : true ,
} ,
{
Condition : "[RESPONSE_TIME] < 500" ,
Success : true ,
} ,
{
Condition : "[CERTIFICATE_EXPIRATION] < 72h" ,
Success : true ,
} ,
} ,
}
2024-05-10 04:56:16 +02:00
testUnsuccessfulResult = endpoint . Result {
2021-07-12 06:56:30 +02:00
Hostname : "example.org" ,
IP : "127.0.0.1" ,
HTTPStatus : 200 ,
Errors : [ ] string { "error-1" , "error-2" } ,
Connected : true ,
Success : false ,
2021-07-17 01:12:14 +02:00
Timestamp : now ,
2021-07-12 06:56:30 +02:00
Duration : 750 * time . Millisecond ,
CertificateExpiration : 10 * time . Hour ,
2024-05-10 04:56:16 +02:00
ConditionResults : [ ] * endpoint . ConditionResult {
2021-07-12 06:56:30 +02:00
{
Condition : "[STATUS] == 200" ,
Success : true ,
} ,
{
Condition : "[RESPONSE_TIME] < 500" ,
Success : false ,
} ,
{
Condition : "[CERTIFICATE_EXPIRATION] < 72h" ,
Success : false ,
} ,
} ,
}
)
func TestNewStore ( t * testing . T ) {
2024-05-16 03:29:45 +02:00
if _ , err := NewStore ( "" , t . TempDir ( ) + "/TestNewStore.db" , false ) ; ! errors . Is ( err , ErrDatabaseDriverNotSpecified ) {
2021-07-12 06:56:30 +02:00
t . Error ( "expected error due to blank driver parameter" )
}
2024-05-16 03:29:45 +02:00
if _ , err := NewStore ( "sqlite" , "" , false ) ; ! errors . Is ( err , ErrPathNotSpecified ) {
2021-07-12 06:56:30 +02:00
t . Error ( "expected error due to blank path parameter" )
}
2024-05-16 03:29:45 +02:00
if store , err := NewStore ( "sqlite" , t . TempDir ( ) + "/TestNewStore.db" , true ) ; err != nil {
2021-07-12 06:56:30 +02:00
t . Error ( "shouldn't have returned any error, got" , err . Error ( ) )
} else {
_ = store . db . Close ( )
}
}
2021-07-16 04:07:30 +02:00
func TestStore_InsertCleansUpOldUptimeEntriesProperly ( t * testing . T ) {
2022-08-12 02:47:29 +02:00
store , _ := NewStore ( "sqlite" , t . TempDir ( ) + "/TestStore_InsertCleansUpOldUptimeEntriesProperly.db" , false )
2021-07-16 04:07:30 +02:00
defer store . Close ( )
2023-02-02 04:59:31 +01:00
now := time . Now ( ) . Truncate ( time . Hour )
2021-07-16 04:07:30 +02:00
now = time . Date ( now . Year ( ) , now . Month ( ) , now . Day ( ) , now . Hour ( ) , 0 , 0 , 0 , now . Location ( ) )
2024-05-10 04:56:16 +02:00
store . Insert ( & testEndpoint , & endpoint . Result { Timestamp : now . Add ( - 5 * time . Hour ) , Success : true } )
2021-07-16 04:07:30 +02:00
tx , _ := store . db . Begin ( )
2021-10-23 22:47:12 +02:00
oldest , _ := store . getAgeOfOldestEndpointUptimeEntry ( tx , 1 )
2021-07-16 04:07:30 +02:00
_ = tx . Commit ( )
if oldest . Truncate ( time . Hour ) != 5 * time . Hour {
2021-10-23 22:47:12 +02:00
t . Errorf ( "oldest endpoint uptime entry should've been ~5 hours old, was %s" , oldest )
2021-07-16 04:07:30 +02:00
}
// The oldest cache entry should remain at ~5 hours old, because this entry is more recent
2024-05-10 04:56:16 +02:00
store . Insert ( & testEndpoint , & endpoint . Result { Timestamp : now . Add ( - 3 * time . Hour ) , Success : true } )
2021-07-16 04:07:30 +02:00
tx , _ = store . db . Begin ( )
2021-10-23 22:47:12 +02:00
oldest , _ = store . getAgeOfOldestEndpointUptimeEntry ( tx , 1 )
2021-07-16 04:07:30 +02:00
_ = tx . Commit ( )
if oldest . Truncate ( time . Hour ) != 5 * time . Hour {
2021-10-23 22:47:12 +02:00
t . Errorf ( "oldest endpoint uptime entry should've been ~5 hours old, was %s" , oldest )
2021-07-16 04:07:30 +02:00
}
// The oldest cache entry should now become at ~8 hours old, because this entry is older
2024-05-10 04:56:16 +02:00
store . Insert ( & testEndpoint , & endpoint . Result { Timestamp : now . Add ( - 8 * time . Hour ) , Success : true } )
2021-07-16 04:07:30 +02:00
tx , _ = store . db . Begin ( )
2021-10-23 22:47:12 +02:00
oldest , _ = store . getAgeOfOldestEndpointUptimeEntry ( tx , 1 )
2021-07-16 04:07:30 +02:00
_ = tx . Commit ( )
if oldest . Truncate ( time . Hour ) != 8 * time . Hour {
2021-10-23 22:47:12 +02:00
t . Errorf ( "oldest endpoint uptime entry should've been ~8 hours old, was %s" , oldest )
2021-07-16 04:07:30 +02:00
}
// Since this is one hour before reaching the clean up threshold, the oldest entry should now be this one
2024-08-12 04:40:19 +02:00
store . Insert ( & testEndpoint , & endpoint . Result { Timestamp : now . Add ( - ( uptimeAgeCleanUpThreshold - time . Hour ) ) , Success : true } )
2021-07-16 04:07:30 +02:00
tx , _ = store . db . Begin ( )
2021-10-23 22:47:12 +02:00
oldest , _ = store . getAgeOfOldestEndpointUptimeEntry ( tx , 1 )
2021-07-16 04:07:30 +02:00
_ = tx . Commit ( )
2024-08-12 04:40:19 +02:00
if oldest . Truncate ( time . Hour ) != uptimeAgeCleanUpThreshold - time . Hour {
t . Errorf ( "oldest endpoint uptime entry should've been ~%s hours old, was %s" , uptimeAgeCleanUpThreshold - time . Hour , oldest )
2021-07-16 04:07:30 +02:00
}
2024-08-12 04:40:19 +02:00
// Since this entry is after the uptimeAgeCleanUpThreshold, both this entry as well as the previous
2021-07-16 04:07:30 +02:00
// one should be deleted since they both surpass uptimeRetention
2024-08-12 04:40:19 +02:00
store . Insert ( & testEndpoint , & endpoint . Result { Timestamp : now . Add ( - ( uptimeAgeCleanUpThreshold + time . Hour ) ) , Success : true } )
2021-07-16 04:07:30 +02:00
tx , _ = store . db . Begin ( )
2021-10-23 22:47:12 +02:00
oldest , _ = store . getAgeOfOldestEndpointUptimeEntry ( tx , 1 )
2021-07-16 04:07:30 +02:00
_ = tx . Commit ( )
if oldest . Truncate ( time . Hour ) != 8 * time . Hour {
2021-10-23 22:47:12 +02:00
t . Errorf ( "oldest endpoint uptime entry should've been ~8 hours old, was %s" , oldest )
2021-07-16 04:07:30 +02:00
}
}
2024-08-12 04:40:19 +02:00
func TestStore_HourlyUptimeEntriesAreMergedIntoDailyUptimeEntriesProperly ( t * testing . T ) {
store , _ := NewStore ( "sqlite" , t . TempDir ( ) + "/TestStore_HourlyUptimeEntriesAreMergedIntoDailyUptimeEntriesProperly.db" , false )
defer store . Close ( )
now := time . Now ( ) . Truncate ( time . Hour )
now = time . Date ( now . Year ( ) , now . Month ( ) , now . Day ( ) , now . Hour ( ) , 0 , 0 , 0 , now . Location ( ) )
scenarios := [ ] struct {
numberOfHours int
expectedMaxUptimeEntries int64
} {
{ numberOfHours : 1 , expectedMaxUptimeEntries : 1 } ,
{ numberOfHours : 10 , expectedMaxUptimeEntries : 10 } ,
{ numberOfHours : 50 , expectedMaxUptimeEntries : 50 } ,
{ numberOfHours : 75 , expectedMaxUptimeEntries : 75 } ,
{ numberOfHours : 99 , expectedMaxUptimeEntries : 99 } ,
{ numberOfHours : 150 , expectedMaxUptimeEntries : 100 } ,
{ numberOfHours : 300 , expectedMaxUptimeEntries : 100 } ,
{ numberOfHours : 768 , expectedMaxUptimeEntries : 100 } , // 32 days (in hours), which means anything beyond that won't be persisted anyway
{ numberOfHours : 1000 , expectedMaxUptimeEntries : 100 } ,
}
// Note that is not technically an accurate real world representation, because uptime entries are always added in
// the present, while this test is inserting results from the past to simulate long term uptime entries.
// Since we want to test the behavior and not the test itself, this is a "best effort" approach.
for _ , scenario := range scenarios {
t . Run ( fmt . Sprintf ( "num-hours-%d-expected-max-entries-%d" , scenario . numberOfHours , scenario . expectedMaxUptimeEntries ) , func ( t * testing . T ) {
for i := scenario . numberOfHours ; i > 0 ; i -- {
//fmt.Printf("i: %d (%s)\n", i, now.Add(-time.Duration(i)*time.Hour))
// Create an uptime entry
err := store . Insert ( & testEndpoint , & endpoint . Result { Timestamp : now . Add ( - time . Duration ( i ) * time . Hour ) , Success : true } )
if err != nil {
t . Log ( err )
}
//// DEBUGGING: check number of uptime entries for endpoint
//tx, _ := store.db.Begin()
//numberOfUptimeEntriesForEndpoint, err := store.getNumberOfUptimeEntriesByEndpointID(tx, 1)
//if err != nil {
// t.Log(err)
//}
//_ = tx.Commit()
//t.Logf("i=%d; numberOfHours=%d; There are currently %d uptime entries for endpointID=%d", i, scenario.numberOfHours, numberOfUptimeEntriesForEndpoint, 1)
}
// check number of uptime entries for endpoint
tx , _ := store . db . Begin ( )
numberOfUptimeEntriesForEndpoint , err := store . getNumberOfUptimeEntriesByEndpointID ( tx , 1 )
if err != nil {
t . Log ( err )
}
_ = tx . Commit ( )
//t.Logf("numberOfHours=%d; There are currently %d uptime entries for endpointID=%d", scenario.numberOfHours, numberOfUptimeEntriesForEndpoint, 1)
if scenario . expectedMaxUptimeEntries < numberOfUptimeEntriesForEndpoint {
t . Errorf ( "expected %d (uptime entries) to be smaller than %d" , numberOfUptimeEntriesForEndpoint , scenario . expectedMaxUptimeEntries )
}
store . Clear ( )
} )
}
}
func TestStore_getEndpointUptime ( t * testing . T ) {
store , _ := NewStore ( "sqlite" , t . TempDir ( ) + "/TestStore_InsertCleansUpEventsAndResultsProperly.db" , false )
defer store . Clear ( )
defer store . Close ( )
// Add 768 hourly entries (32 days)
// Daily entries should be merged from hourly entries automatically
for i := 768 ; i > 0 ; i -- {
err := store . Insert ( & testEndpoint , & endpoint . Result { Timestamp : time . Now ( ) . Add ( - time . Duration ( i ) * time . Hour ) , Duration : time . Second , Success : true } )
if err != nil {
t . Log ( err )
}
}
// Check the number of uptime entries
tx , _ := store . db . Begin ( )
numberOfUptimeEntriesForEndpoint , err := store . getNumberOfUptimeEntriesByEndpointID ( tx , 1 )
if err != nil {
t . Log ( err )
}
if numberOfUptimeEntriesForEndpoint < 20 || numberOfUptimeEntriesForEndpoint > 200 {
t . Errorf ( "expected number of uptime entries to be between 20 and 200, got %d" , numberOfUptimeEntriesForEndpoint )
}
// Retrieve uptime for the past 30d
uptime , avgResponseTime , err := store . getEndpointUptime ( tx , 1 , time . Now ( ) . Add ( - ( 30 * 24 * time . Hour ) ) , time . Now ( ) )
if err != nil {
t . Log ( err )
}
_ = tx . Commit ( )
if avgResponseTime != time . Second {
t . Errorf ( "expected average response time to be %s, got %s" , time . Second , avgResponseTime )
}
if uptime != 1 {
t . Errorf ( "expected uptime to be 1, got %f" , uptime )
}
// Add a new unsuccessful result, which should impact the uptime
err = store . Insert ( & testEndpoint , & endpoint . Result { Timestamp : time . Now ( ) , Duration : time . Second , Success : false } )
if err != nil {
t . Log ( err )
}
// Retrieve uptime for the past 30d
tx , _ = store . db . Begin ( )
uptime , _ , err = store . getEndpointUptime ( tx , 1 , time . Now ( ) . Add ( - ( 30 * 24 * time . Hour ) ) , time . Now ( ) )
if err != nil {
t . Log ( err )
}
_ = tx . Commit ( )
if uptime == 1 {
t . Errorf ( "expected uptime to be less than 1, got %f" , uptime )
}
// Retrieve uptime for the past 30d, but excluding the last 24h
// This is not a real use case as there is no way for users to exclude the last 24h, but this is a great way
// to ensure that hourly merging works as intended
tx , _ = store . db . Begin ( )
uptimeExcludingLast24h , _ , err := store . getEndpointUptime ( tx , 1 , time . Now ( ) . Add ( - ( 30 * 24 * time . Hour ) ) , time . Now ( ) . Add ( - 24 * time . Hour ) )
if err != nil {
t . Log ( err )
}
_ = tx . Commit ( )
if uptimeExcludingLast24h == uptime {
t . Error ( "expected uptimeExcludingLast24h to to be different from uptime, got" )
}
}
2021-07-16 04:07:30 +02:00
func TestStore_InsertCleansUpEventsAndResultsProperly ( t * testing . T ) {
2022-08-12 02:47:29 +02:00
store , _ := NewStore ( "sqlite" , t . TempDir ( ) + "/TestStore_InsertCleansUpEventsAndResultsProperly.db" , false )
2024-08-12 04:40:19 +02:00
defer store . Clear ( )
2021-07-16 04:07:30 +02:00
defer store . Close ( )
for i := 0 ; i < resultsCleanUpThreshold + eventsCleanUpThreshold ; i ++ {
2021-10-23 22:47:12 +02:00
store . Insert ( & testEndpoint , & testSuccessfulResult )
store . Insert ( & testEndpoint , & testUnsuccessfulResult )
ss , _ := store . GetEndpointStatusByKey ( testEndpoint . Key ( ) , paging . NewEndpointStatusParams ( ) . WithResults ( 1 , common . MaximumNumberOfResults * 5 ) . WithEvents ( 1 , common . MaximumNumberOfEvents * 5 ) )
2021-07-16 04:07:30 +02:00
if len ( ss . Results ) > resultsCleanUpThreshold + 1 {
t . Errorf ( "number of results shouldn't have exceeded %d, reached %d" , resultsCleanUpThreshold , len ( ss . Results ) )
}
if len ( ss . Events ) > eventsCleanUpThreshold + 1 {
t . Errorf ( "number of events shouldn't have exceeded %d, reached %d" , eventsCleanUpThreshold , len ( ss . Events ) )
}
}
}
2021-07-18 03:06:15 +02:00
2024-05-16 03:29:45 +02:00
func TestStore_InsertWithCaching ( t * testing . T ) {
store , _ := NewStore ( "sqlite" , t . TempDir ( ) + "/TestStore_InsertWithCaching.db" , true )
defer store . Close ( )
// Add 2 results
store . Insert ( & testEndpoint , & testSuccessfulResult )
store . Insert ( & testEndpoint , & testSuccessfulResult )
// Verify that they exist
endpointStatuses , _ := store . GetAllEndpointStatuses ( paging . NewEndpointStatusParams ( ) . WithResults ( 1 , 20 ) )
if numberOfEndpointStatuses := len ( endpointStatuses ) ; numberOfEndpointStatuses != 1 {
t . Fatalf ( "expected 1 EndpointStatus, got %d" , numberOfEndpointStatuses )
}
if len ( endpointStatuses [ 0 ] . Results ) != 2 {
t . Fatalf ( "expected 2 results, got %d" , len ( endpointStatuses [ 0 ] . Results ) )
}
// Add 2 more results
store . Insert ( & testEndpoint , & testUnsuccessfulResult )
store . Insert ( & testEndpoint , & testUnsuccessfulResult )
// Verify that they exist
endpointStatuses , _ = store . GetAllEndpointStatuses ( paging . NewEndpointStatusParams ( ) . WithResults ( 1 , 20 ) )
if numberOfEndpointStatuses := len ( endpointStatuses ) ; numberOfEndpointStatuses != 1 {
t . Fatalf ( "expected 1 EndpointStatus, got %d" , numberOfEndpointStatuses )
}
if len ( endpointStatuses [ 0 ] . Results ) != 4 {
t . Fatalf ( "expected 4 results, got %d" , len ( endpointStatuses [ 0 ] . Results ) )
}
// Clear the store, which should also clear the cache
store . Clear ( )
// Verify that they no longer exist
endpointStatuses , _ = store . GetAllEndpointStatuses ( paging . NewEndpointStatusParams ( ) . WithResults ( 1 , 20 ) )
if numberOfEndpointStatuses := len ( endpointStatuses ) ; numberOfEndpointStatuses != 0 {
t . Fatalf ( "expected 0 EndpointStatus, got %d" , numberOfEndpointStatuses )
}
}
2021-07-18 03:06:15 +02:00
func TestStore_Persistence ( t * testing . T ) {
2021-11-05 02:40:05 +01:00
path := t . TempDir ( ) + "/TestStore_Persistence.db"
2022-08-12 02:47:29 +02:00
store , _ := NewStore ( "sqlite" , path , false )
2021-10-23 22:47:12 +02:00
store . Insert ( & testEndpoint , & testSuccessfulResult )
store . Insert ( & testEndpoint , & testUnsuccessfulResult )
if uptime , _ := store . GetUptimeByKey ( testEndpoint . Key ( ) , time . Now ( ) . Add ( - time . Hour ) , time . Now ( ) ) ; uptime != 0.5 {
2021-08-13 06:38:39 +02:00
t . Errorf ( "the uptime over the past 1h should've been 0.5, got %f" , uptime )
}
2021-10-23 22:47:12 +02:00
if uptime , _ := store . GetUptimeByKey ( testEndpoint . Key ( ) , time . Now ( ) . Add ( - time . Hour * 24 ) , time . Now ( ) ) ; uptime != 0.5 {
2021-08-13 06:38:39 +02:00
t . Errorf ( "the uptime over the past 24h should've been 0.5, got %f" , uptime )
}
2021-10-23 22:47:12 +02:00
if uptime , _ := store . GetUptimeByKey ( testEndpoint . Key ( ) , time . Now ( ) . Add ( - time . Hour * 24 * 7 ) , time . Now ( ) ) ; uptime != 0.5 {
2021-08-13 06:38:39 +02:00
t . Errorf ( "the uptime over the past 7d should've been 0.5, got %f" , uptime )
}
2024-08-12 04:40:19 +02:00
if uptime , _ := store . GetUptimeByKey ( testEndpoint . Key ( ) , time . Now ( ) . Add ( - time . Hour * 24 * 30 ) , time . Now ( ) ) ; uptime != 0.5 {
t . Errorf ( "the uptime over the past 30d should've been 0.5, got %f" , uptime )
}
2021-10-23 22:47:12 +02:00
ssFromOldStore , _ := store . GetEndpointStatus ( testEndpoint . Group , testEndpoint . Name , paging . NewEndpointStatusParams ( ) . WithResults ( 1 , common . MaximumNumberOfResults ) . WithEvents ( 1 , common . MaximumNumberOfEvents ) )
2021-08-13 06:38:39 +02:00
if ssFromOldStore == nil || ssFromOldStore . Group != "group" || ssFromOldStore . Name != "name" || len ( ssFromOldStore . Events ) != 3 || len ( ssFromOldStore . Results ) != 2 {
2021-07-18 03:06:15 +02:00
store . Close ( )
t . Fatal ( "sanity check failed" )
}
store . Close ( )
2022-08-12 02:47:29 +02:00
store , _ = NewStore ( "sqlite" , path , false )
2021-07-18 03:06:15 +02:00
defer store . Close ( )
2021-10-23 22:47:12 +02:00
ssFromNewStore , _ := store . GetEndpointStatus ( testEndpoint . Group , testEndpoint . Name , paging . NewEndpointStatusParams ( ) . WithResults ( 1 , common . MaximumNumberOfResults ) . WithEvents ( 1 , common . MaximumNumberOfEvents ) )
2021-08-13 06:38:39 +02:00
if ssFromNewStore == nil || ssFromNewStore . Group != "group" || ssFromNewStore . Name != "name" || len ( ssFromNewStore . Events ) != 3 || len ( ssFromNewStore . Results ) != 2 {
2021-07-18 03:06:15 +02:00
t . Fatal ( "failed sanity check" )
}
if ssFromNewStore == ssFromOldStore {
t . Fatal ( "ss from the old and new store should have a different memory address" )
}
for i := range ssFromNewStore . Events {
if ssFromNewStore . Events [ i ] . Timestamp != ssFromOldStore . Events [ i ] . Timestamp {
t . Error ( "new and old should've been the same" )
}
if ssFromNewStore . Events [ i ] . Type != ssFromOldStore . Events [ i ] . Type {
t . Error ( "new and old should've been the same" )
}
}
for i := range ssFromOldStore . Results {
if ssFromNewStore . Results [ i ] . Timestamp != ssFromOldStore . Results [ i ] . Timestamp {
t . Error ( "new and old should've been the same" )
}
if ssFromNewStore . Results [ i ] . Success != ssFromOldStore . Results [ i ] . Success {
t . Error ( "new and old should've been the same" )
}
if ssFromNewStore . Results [ i ] . Connected != ssFromOldStore . Results [ i ] . Connected {
t . Error ( "new and old should've been the same" )
}
if ssFromNewStore . Results [ i ] . IP != ssFromOldStore . Results [ i ] . IP {
t . Error ( "new and old should've been the same" )
}
if ssFromNewStore . Results [ i ] . Hostname != ssFromOldStore . Results [ i ] . Hostname {
t . Error ( "new and old should've been the same" )
}
if ssFromNewStore . Results [ i ] . HTTPStatus != ssFromOldStore . Results [ i ] . HTTPStatus {
t . Error ( "new and old should've been the same" )
}
if ssFromNewStore . Results [ i ] . DNSRCode != ssFromOldStore . Results [ i ] . DNSRCode {
t . Error ( "new and old should've been the same" )
}
if len ( ssFromNewStore . Results [ i ] . Errors ) != len ( ssFromOldStore . Results [ i ] . Errors ) {
t . Error ( "new and old should've been the same" )
} else {
for j := range ssFromOldStore . Results [ i ] . Errors {
if ssFromNewStore . Results [ i ] . Errors [ j ] != ssFromOldStore . Results [ i ] . Errors [ j ] {
t . Error ( "new and old should've been the same" )
}
}
}
if len ( ssFromNewStore . Results [ i ] . ConditionResults ) != len ( ssFromOldStore . Results [ i ] . ConditionResults ) {
t . Error ( "new and old should've been the same" )
} else {
for j := range ssFromOldStore . Results [ i ] . ConditionResults {
if ssFromNewStore . Results [ i ] . ConditionResults [ j ] . Condition != ssFromOldStore . Results [ i ] . ConditionResults [ j ] . Condition {
t . Error ( "new and old should've been the same" )
}
if ssFromNewStore . Results [ i ] . ConditionResults [ j ] . Success != ssFromOldStore . Results [ i ] . ConditionResults [ j ] . Success {
t . Error ( "new and old should've been the same" )
}
}
}
}
}
2021-07-18 05:59:17 +02:00
2021-07-18 06:34:22 +02:00
func TestStore_Save ( t * testing . T ) {
2022-08-12 02:47:29 +02:00
store , _ := NewStore ( "sqlite" , t . TempDir ( ) + "/TestStore_Save.db" , false )
2021-07-18 06:34:22 +02:00
defer store . Close ( )
if store . Save ( ) != nil {
t . Error ( "Save shouldn't do anything for this store" )
}
}
2021-07-19 05:13:19 +02:00
// Note that are much more extensive tests in /storage/store/store_test.go.
2021-07-19 05:02:27 +02:00
// This test is simply an extra sanity check
2021-07-19 05:13:19 +02:00
func TestStore_SanityCheck ( t * testing . T ) {
2022-08-12 02:47:29 +02:00
store , _ := NewStore ( "sqlite" , t . TempDir ( ) + "/TestStore_SanityCheck.db" , false )
2021-07-19 05:02:27 +02:00
defer store . Close ( )
2021-10-23 22:47:12 +02:00
store . Insert ( & testEndpoint , & testSuccessfulResult )
endpointStatuses , _ := store . GetAllEndpointStatuses ( paging . NewEndpointStatusParams ( ) )
if numberOfEndpointStatuses := len ( endpointStatuses ) ; numberOfEndpointStatuses != 1 {
t . Fatalf ( "expected 1 EndpointStatus, got %d" , numberOfEndpointStatuses )
}
store . Insert ( & testEndpoint , & testUnsuccessfulResult )
// Both results inserted are for the same endpoint, therefore, the count shouldn't have increased
endpointStatuses , _ = store . GetAllEndpointStatuses ( paging . NewEndpointStatusParams ( ) )
if numberOfEndpointStatuses := len ( endpointStatuses ) ; numberOfEndpointStatuses != 1 {
t . Fatalf ( "expected 1 EndpointStatus, got %d" , numberOfEndpointStatuses )
}
if hourlyAverageResponseTime , err := store . GetHourlyAverageResponseTimeByKey ( testEndpoint . Key ( ) , time . Now ( ) . Add ( - 24 * time . Hour ) , time . Now ( ) ) ; err != nil {
2021-08-20 05:07:21 +02:00
t . Errorf ( "expected no error, got %v" , err )
} else if len ( hourlyAverageResponseTime ) != 1 {
t . Errorf ( "expected 1 hour to have had a result in the past 24 hours, got %d" , len ( hourlyAverageResponseTime ) )
}
2021-10-23 22:47:12 +02:00
if uptime , _ := store . GetUptimeByKey ( testEndpoint . Key ( ) , time . Now ( ) . Add ( - 24 * time . Hour ) , time . Now ( ) ) ; uptime != 0.5 {
2021-08-21 17:17:39 +02:00
t . Errorf ( "expected uptime of last 24h to be 0.5, got %f" , uptime )
}
2021-10-23 22:47:12 +02:00
if averageResponseTime , _ := store . GetAverageResponseTimeByKey ( testEndpoint . Key ( ) , time . Now ( ) . Add ( - 24 * time . Hour ) , time . Now ( ) ) ; averageResponseTime != 450 {
2021-08-21 17:17:39 +02:00
t . Errorf ( "expected average response time of last 24h to be 450, got %d" , averageResponseTime )
}
2021-10-23 22:47:12 +02:00
ss , _ := store . GetEndpointStatus ( testEndpoint . Group , testEndpoint . Name , paging . NewEndpointStatusParams ( ) . WithResults ( 1 , 20 ) . WithEvents ( 1 , 20 ) )
2021-07-19 05:02:27 +02:00
if ss == nil {
2021-10-23 22:47:12 +02:00
t . Fatalf ( "Store should've had key '%s', but didn't" , testEndpoint . Key ( ) )
2021-07-19 05:02:27 +02:00
}
if len ( ss . Events ) != 3 {
2021-10-23 22:47:12 +02:00
t . Errorf ( "Endpoint '%s' should've had 3 events, got %d" , ss . Name , len ( ss . Events ) )
2021-07-19 05:02:27 +02:00
}
if len ( ss . Results ) != 2 {
2021-10-23 22:47:12 +02:00
t . Errorf ( "Endpoint '%s' should've had 2 results, got %d" , ss . Name , len ( ss . Results ) )
2021-07-19 05:13:19 +02:00
}
2021-10-23 22:47:12 +02:00
if deleted := store . DeleteAllEndpointStatusesNotInKeys ( [ ] string { "invalid-key-which-means-everything-should-get-deleted" } ) ; deleted != 1 {
2021-07-19 05:13:19 +02:00
t . Errorf ( "%d entries should've been deleted, got %d" , 1 , deleted )
2021-07-19 05:02:27 +02:00
}
2021-10-23 22:47:12 +02:00
if deleted := store . DeleteAllEndpointStatusesNotInKeys ( [ ] string { } ) ; deleted != 0 {
2021-09-11 00:52:09 +02:00
t . Errorf ( "There should've been no entries left to delete, got %d" , deleted )
}
2021-07-19 05:02:27 +02:00
}
2021-07-18 05:59:17 +02:00
// TestStore_InvalidTransaction tests what happens if an invalid transaction is passed as parameter
func TestStore_InvalidTransaction ( t * testing . T ) {
2022-08-12 02:47:29 +02:00
store , _ := NewStore ( "sqlite" , t . TempDir ( ) + "/TestStore_InvalidTransaction.db" , false )
2021-07-18 05:59:17 +02:00
defer store . Close ( )
tx , _ := store . db . Begin ( )
tx . Commit ( )
2021-10-23 22:47:12 +02:00
if _ , err := store . insertEndpoint ( tx , & testEndpoint ) ; err == nil {
2021-07-18 06:34:22 +02:00
t . Error ( "should've returned an error, because the transaction was already committed" )
}
2024-05-10 04:56:16 +02:00
if err := store . insertEndpointEvent ( tx , 1 , endpoint . NewEventFromResult ( & testSuccessfulResult ) ) ; err == nil {
2021-07-18 06:05:22 +02:00
t . Error ( "should've returned an error, because the transaction was already committed" )
}
2021-10-23 22:47:12 +02:00
if err := store . insertEndpointResult ( tx , 1 , & testSuccessfulResult ) ; err == nil {
2021-07-18 05:59:17 +02:00
t . Error ( "should've returned an error, because the transaction was already committed" )
}
if err := store . insertConditionResults ( tx , 1 , testSuccessfulResult . ConditionResults ) ; err == nil {
t . Error ( "should've returned an error, because the transaction was already committed" )
}
2021-10-23 22:47:12 +02:00
if err := store . updateEndpointUptime ( tx , 1 , & testSuccessfulResult ) ; err == nil {
2021-07-18 05:59:17 +02:00
t . Error ( "should've returned an error, because the transaction was already committed" )
}
2021-10-23 22:47:12 +02:00
if _ , err := store . getAllEndpointKeys ( tx ) ; err == nil {
2021-07-18 05:59:17 +02:00
t . Error ( "should've returned an error, because the transaction was already committed" )
}
2021-10-23 22:47:12 +02:00
if _ , err := store . getEndpointStatusByKey ( tx , testEndpoint . Key ( ) , paging . NewEndpointStatusParams ( ) . WithResults ( 1 , 20 ) ) ; err == nil {
2021-07-18 05:59:17 +02:00
t . Error ( "should've returned an error, because the transaction was already committed" )
}
2021-10-23 22:47:12 +02:00
if _ , err := store . getEndpointEventsByEndpointID ( tx , 1 , 1 , 50 ) ; err == nil {
2021-07-18 05:59:17 +02:00
t . Error ( "should've returned an error, because the transaction was already committed" )
}
2021-10-23 22:47:12 +02:00
if _ , err := store . getEndpointResultsByEndpointID ( tx , 1 , 1 , 50 ) ; err == nil {
2021-07-18 05:59:17 +02:00
t . Error ( "should've returned an error, because the transaction was already committed" )
}
2021-10-23 22:47:12 +02:00
if err := store . deleteOldEndpointEvents ( tx , 1 ) ; err == nil {
2021-07-18 05:59:17 +02:00
t . Error ( "should've returned an error, because the transaction was already committed" )
}
2021-10-23 22:47:12 +02:00
if err := store . deleteOldEndpointResults ( tx , 1 ) ; err == nil {
2021-07-18 05:59:17 +02:00
t . Error ( "should've returned an error, because the transaction was already committed" )
}
2021-10-23 22:47:12 +02:00
if _ , _ , err := store . getEndpointUptime ( tx , 1 , time . Now ( ) , time . Now ( ) ) ; err == nil {
2021-07-18 05:59:17 +02:00
t . Error ( "should've returned an error, because the transaction was already committed" )
}
2021-10-23 22:47:12 +02:00
if _ , err := store . getEndpointID ( tx , & testEndpoint ) ; err == nil {
2021-07-18 05:59:17 +02:00
t . Error ( "should've returned an error, because the transaction was already committed" )
}
2021-10-23 22:47:12 +02:00
if _ , err := store . getNumberOfEventsByEndpointID ( tx , 1 ) ; err == nil {
2021-07-18 05:59:17 +02:00
t . Error ( "should've returned an error, because the transaction was already committed" )
}
2021-10-23 22:47:12 +02:00
if _ , err := store . getNumberOfResultsByEndpointID ( tx , 1 ) ; err == nil {
2021-07-18 05:59:17 +02:00
t . Error ( "should've returned an error, because the transaction was already committed" )
}
2021-10-23 22:47:12 +02:00
if _ , err := store . getAgeOfOldestEndpointUptimeEntry ( tx , 1 ) ; err == nil {
2021-07-18 05:59:17 +02:00
t . Error ( "should've returned an error, because the transaction was already committed" )
}
2021-10-23 22:47:12 +02:00
if _ , err := store . getLastEndpointResultSuccessValue ( tx , 1 ) ; err == nil {
2021-07-18 05:59:17 +02:00
t . Error ( "should've returned an error, because the transaction was already committed" )
}
}
func TestStore_NoRows ( t * testing . T ) {
2022-08-12 02:47:29 +02:00
store , _ := NewStore ( "sqlite" , t . TempDir ( ) + "/TestStore_NoRows.db" , false )
2021-07-18 05:59:17 +02:00
defer store . Close ( )
tx , _ := store . db . Begin ( )
defer tx . Rollback ( )
2024-05-16 03:29:45 +02:00
if _ , err := store . getLastEndpointResultSuccessValue ( tx , 1 ) ; ! errors . Is ( err , errNoRowsReturned ) {
2021-07-18 05:59:17 +02:00
t . Errorf ( "should've %v, got %v" , errNoRowsReturned , err )
}
2024-05-16 03:29:45 +02:00
if _ , err := store . getAgeOfOldestEndpointUptimeEntry ( tx , 1 ) ; ! errors . Is ( err , errNoRowsReturned ) {
2021-07-18 05:59:17 +02:00
t . Errorf ( "should've %v, got %v" , errNoRowsReturned , err )
}
}
2021-09-11 00:52:09 +02:00
// This tests very unlikely cases where a table is deleted.
func TestStore_BrokenSchema ( t * testing . T ) {
2022-08-12 02:47:29 +02:00
store , _ := NewStore ( "sqlite" , t . TempDir ( ) + "/TestStore_BrokenSchema.db" , false )
2021-09-11 00:52:09 +02:00
defer store . Close ( )
2021-10-23 22:47:12 +02:00
if err := store . Insert ( & testEndpoint , & testSuccessfulResult ) ; err != nil {
2021-09-11 00:52:09 +02:00
t . Fatal ( "expected no error, got" , err . Error ( ) )
}
2021-10-23 22:47:12 +02:00
if _ , err := store . GetAverageResponseTimeByKey ( testEndpoint . Key ( ) , time . Now ( ) . Add ( - time . Hour ) , time . Now ( ) ) ; err != nil {
2021-09-11 00:52:09 +02:00
t . Fatal ( "expected no error, got" , err . Error ( ) )
}
2021-10-23 22:47:12 +02:00
if _ , err := store . GetAllEndpointStatuses ( paging . NewEndpointStatusParams ( ) ) ; err != nil {
2021-09-11 00:52:09 +02:00
t . Fatal ( "expected no error, got" , err . Error ( ) )
}
// Break
2021-10-23 22:47:12 +02:00
_ , _ = store . db . Exec ( "DROP TABLE endpoints" )
2022-08-12 02:47:29 +02:00
// And now we'll try to insert something in our broken schema
2021-10-23 22:47:12 +02:00
if err := store . Insert ( & testEndpoint , & testSuccessfulResult ) ; err == nil {
2021-09-11 00:52:09 +02:00
t . Fatal ( "expected an error" )
}
2021-10-23 22:47:12 +02:00
if _ , err := store . GetAverageResponseTimeByKey ( testEndpoint . Key ( ) , time . Now ( ) . Add ( - time . Hour ) , time . Now ( ) ) ; err == nil {
2021-09-11 00:52:09 +02:00
t . Fatal ( "expected an error" )
}
2021-10-23 22:47:12 +02:00
if _ , err := store . GetHourlyAverageResponseTimeByKey ( testEndpoint . Key ( ) , time . Now ( ) . Add ( - time . Hour ) , time . Now ( ) ) ; err == nil {
2021-09-11 00:52:09 +02:00
t . Fatal ( "expected an error" )
}
2021-10-23 22:47:12 +02:00
if _ , err := store . GetAllEndpointStatuses ( paging . NewEndpointStatusParams ( ) ) ; err == nil {
2021-09-11 00:52:09 +02:00
t . Fatal ( "expected an error" )
}
2021-10-23 22:47:12 +02:00
if _ , err := store . GetUptimeByKey ( testEndpoint . Key ( ) , time . Now ( ) . Add ( - time . Hour ) , time . Now ( ) ) ; err == nil {
2021-09-11 00:52:09 +02:00
t . Fatal ( "expected an error" )
}
2021-10-23 22:47:12 +02:00
if _ , err := store . GetEndpointStatusByKey ( testEndpoint . Key ( ) , paging . NewEndpointStatusParams ( ) ) ; err == nil {
2021-09-11 00:52:09 +02:00
t . Fatal ( "expected an error" )
}
// Repair
if err := store . createSchema ( ) ; err != nil {
t . Fatal ( "schema should've been repaired" )
}
store . Clear ( )
2021-10-23 22:47:12 +02:00
if err := store . Insert ( & testEndpoint , & testSuccessfulResult ) ; err != nil {
2021-09-11 00:52:09 +02:00
t . Fatal ( "expected no error, got" , err . Error ( ) )
}
// Break
2021-10-23 22:47:12 +02:00
_ , _ = store . db . Exec ( "DROP TABLE endpoint_events" )
if err := store . Insert ( & testEndpoint , & testSuccessfulResult ) ; err != nil {
2021-09-11 00:52:09 +02:00
t . Fatal ( "expected no error, because this should silently fails, got" , err . Error ( ) )
}
2021-10-23 22:47:12 +02:00
if _ , err := store . GetAllEndpointStatuses ( paging . NewEndpointStatusParams ( ) . WithResults ( 1 , 1 ) . WithEvents ( 1 , 1 ) ) ; err != nil {
2021-09-11 00:52:09 +02:00
t . Fatal ( "expected no error, because this should silently fail, got" , err . Error ( ) )
}
// Repair
if err := store . createSchema ( ) ; err != nil {
t . Fatal ( "schema should've been repaired" )
}
store . Clear ( )
2021-10-23 22:47:12 +02:00
if err := store . Insert ( & testEndpoint , & testSuccessfulResult ) ; err != nil {
2021-09-11 00:52:09 +02:00
t . Fatal ( "expected no error, got" , err . Error ( ) )
}
// Break
2021-10-23 22:47:12 +02:00
_ , _ = store . db . Exec ( "DROP TABLE endpoint_results" )
if err := store . Insert ( & testEndpoint , & testSuccessfulResult ) ; err == nil {
2021-09-11 00:52:09 +02:00
t . Fatal ( "expected an error" )
}
2021-10-23 22:47:12 +02:00
if _ , err := store . GetAllEndpointStatuses ( paging . NewEndpointStatusParams ( ) . WithResults ( 1 , 1 ) . WithEvents ( 1 , 1 ) ) ; err != nil {
2021-09-11 00:52:09 +02:00
t . Fatal ( "expected no error, because this should silently fail, got" , err . Error ( ) )
}
// Repair
if err := store . createSchema ( ) ; err != nil {
t . Fatal ( "schema should've been repaired" )
}
store . Clear ( )
2021-10-23 22:47:12 +02:00
if err := store . Insert ( & testEndpoint , & testSuccessfulResult ) ; err != nil {
2021-09-11 00:52:09 +02:00
t . Fatal ( "expected no error, got" , err . Error ( ) )
}
// Break
2021-10-23 22:47:12 +02:00
_ , _ = store . db . Exec ( "DROP TABLE endpoint_result_conditions" )
if err := store . Insert ( & testEndpoint , & testSuccessfulResult ) ; err == nil {
2021-09-11 00:52:09 +02:00
t . Fatal ( "expected an error" )
}
// Repair
if err := store . createSchema ( ) ; err != nil {
t . Fatal ( "schema should've been repaired" )
}
store . Clear ( )
2021-10-23 22:47:12 +02:00
if err := store . Insert ( & testEndpoint , & testSuccessfulResult ) ; err != nil {
2021-09-11 00:52:09 +02:00
t . Fatal ( "expected no error, got" , err . Error ( ) )
}
// Break
2021-10-23 22:47:12 +02:00
_ , _ = store . db . Exec ( "DROP TABLE endpoint_uptimes" )
if err := store . Insert ( & testEndpoint , & testSuccessfulResult ) ; err != nil {
2021-09-11 00:52:09 +02:00
t . Fatal ( "expected no error, because this should silently fails, got" , err . Error ( ) )
}
2021-10-23 22:47:12 +02:00
if _ , err := store . GetAverageResponseTimeByKey ( testEndpoint . Key ( ) , time . Now ( ) . Add ( - time . Hour ) , time . Now ( ) ) ; err == nil {
2021-09-11 00:52:09 +02:00
t . Fatal ( "expected an error" )
}
2021-10-23 22:47:12 +02:00
if _ , err := store . GetHourlyAverageResponseTimeByKey ( testEndpoint . Key ( ) , time . Now ( ) . Add ( - time . Hour ) , time . Now ( ) ) ; err == nil {
2021-09-11 00:52:09 +02:00
t . Fatal ( "expected an error" )
}
2021-10-23 22:47:12 +02:00
if _ , err := store . GetUptimeByKey ( testEndpoint . Key ( ) , time . Now ( ) . Add ( - time . Hour ) , time . Now ( ) ) ; err == nil {
2021-09-11 00:52:09 +02:00
t . Fatal ( "expected an error" )
}
}
2022-08-12 02:47:29 +02:00
func TestCacheKey ( t * testing . T ) {
scenarios := [ ] struct {
endpointKey string
params paging . EndpointStatusParams
overrideCacheKey string
expectedCacheKey string
wantErr bool
} {
{
endpointKey : "simple" ,
params : paging . EndpointStatusParams { EventsPage : 1 , EventsPageSize : 2 , ResultsPage : 3 , ResultsPageSize : 4 } ,
expectedCacheKey : "simple-1-2-3-4" ,
wantErr : false ,
} ,
{
endpointKey : "with-hyphen" ,
params : paging . EndpointStatusParams { EventsPage : 0 , EventsPageSize : 0 , ResultsPage : 1 , ResultsPageSize : 20 } ,
expectedCacheKey : "with-hyphen-0-0-1-20" ,
wantErr : false ,
} ,
{
endpointKey : "with-multiple-hyphens" ,
params : paging . EndpointStatusParams { EventsPage : 0 , EventsPageSize : 0 , ResultsPage : 2 , ResultsPageSize : 20 } ,
expectedCacheKey : "with-multiple-hyphens-0-0-2-20" ,
wantErr : false ,
} ,
{
overrideCacheKey : "invalid-a-2-3-4" ,
wantErr : true ,
} ,
{
overrideCacheKey : "invalid-1-a-3-4" ,
wantErr : true ,
} ,
{
overrideCacheKey : "invalid-1-2-a-4" ,
wantErr : true ,
} ,
{
overrideCacheKey : "invalid-1-2-3-a" ,
wantErr : true ,
} ,
{
overrideCacheKey : "notenoughhyphen1-2-3-4" ,
wantErr : true ,
} ,
}
for _ , scenario := range scenarios {
t . Run ( scenario . expectedCacheKey + scenario . overrideCacheKey , func ( t * testing . T ) {
var cacheKey string
if len ( scenario . overrideCacheKey ) > 0 {
cacheKey = scenario . overrideCacheKey
} else {
cacheKey = generateCacheKey ( scenario . endpointKey , & scenario . params )
if cacheKey != scenario . expectedCacheKey {
t . Errorf ( "expected %s, got %s" , scenario . expectedCacheKey , cacheKey )
}
}
extractedEndpointKey , extractedParams , err := extractKeyAndParamsFromCacheKey ( cacheKey )
if ( err != nil ) != scenario . wantErr {
t . Errorf ( "expected error %v, got %v" , scenario . wantErr , err )
return
}
if err != nil {
// If there's an error, we don't need to check the extracted values
return
}
if extractedEndpointKey != scenario . endpointKey {
t . Errorf ( "expected endpointKey %s, got %s" , scenario . endpointKey , extractedEndpointKey )
}
if extractedParams . EventsPage != scenario . params . EventsPage {
t . Errorf ( "expected EventsPage %d, got %d" , scenario . params . EventsPage , extractedParams . EventsPage )
}
if extractedParams . EventsPageSize != scenario . params . EventsPageSize {
t . Errorf ( "expected EventsPageSize %d, got %d" , scenario . params . EventsPageSize , extractedParams . EventsPageSize )
}
if extractedParams . ResultsPage != scenario . params . ResultsPage {
t . Errorf ( "expected ResultsPage %d, got %d" , scenario . params . ResultsPage , extractedParams . ResultsPage )
}
if extractedParams . ResultsPageSize != scenario . params . ResultsPageSize {
t . Errorf ( "expected ResultsPageSize %d, got %d" , scenario . params . ResultsPageSize , extractedParams . ResultsPageSize )
}
} )
}
}
2024-05-16 03:29:45 +02:00
func TestTriggeredEndpointAlertsPersistence ( t * testing . T ) {
store , _ := NewStore ( "sqlite" , t . TempDir ( ) + "/TestTriggeredEndpointAlertsPersistence.db" , false )
defer store . Close ( )
yes , desc := false , "description"
ep := testEndpoint
ep . NumberOfSuccessesInARow = 0
alrt := & alert . Alert {
Type : alert . TypePagerDuty ,
Enabled : & yes ,
FailureThreshold : 4 ,
SuccessThreshold : 2 ,
Description : & desc ,
SendOnResolved : & yes ,
Triggered : true ,
ResolveKey : "1234567" ,
}
// Alert just triggered, so NumberOfSuccessesInARow is 0
if err := store . UpsertTriggeredEndpointAlert ( & ep , alrt ) ; err != nil {
t . Fatal ( "expected no error, got" , err . Error ( ) )
}
exists , resolveKey , numberOfSuccessesInARow , err := store . GetTriggeredEndpointAlert ( & ep , alrt )
if err != nil {
t . Fatal ( "expected no error, got" , err . Error ( ) )
}
if ! exists {
t . Error ( "expected triggered alert to exist" )
}
if resolveKey != alrt . ResolveKey {
t . Errorf ( "expected resolveKey %s, got %s" , alrt . ResolveKey , resolveKey )
}
if numberOfSuccessesInARow != ep . NumberOfSuccessesInARow {
t . Errorf ( "expected persisted NumberOfSuccessesInARow to be %d, got %d" , ep . NumberOfSuccessesInARow , numberOfSuccessesInARow )
}
// Endpoint just had a successful evaluation, so NumberOfSuccessesInARow is now 1
ep . NumberOfSuccessesInARow ++
if err := store . UpsertTriggeredEndpointAlert ( & ep , alrt ) ; err != nil {
t . Fatal ( "expected no error, got" , err . Error ( ) )
}
exists , resolveKey , numberOfSuccessesInARow , err = store . GetTriggeredEndpointAlert ( & ep , alrt )
if err != nil {
t . Error ( "expected no error, got" , err . Error ( ) )
}
if ! exists {
t . Error ( "expected triggered alert to exist" )
}
if resolveKey != alrt . ResolveKey {
t . Errorf ( "expected resolveKey %s, got %s" , alrt . ResolveKey , resolveKey )
}
if numberOfSuccessesInARow != ep . NumberOfSuccessesInARow {
t . Errorf ( "expected persisted NumberOfSuccessesInARow to be %d, got %d" , ep . NumberOfSuccessesInARow , numberOfSuccessesInARow )
}
// Simulate the endpoint having another successful evaluation, which means the alert is now resolved,
// and we should delete the triggered alert from the store
ep . NumberOfSuccessesInARow ++
if err := store . DeleteTriggeredEndpointAlert ( & ep , alrt ) ; err != nil {
t . Fatal ( "expected no error, got" , err . Error ( ) )
}
exists , _ , _ , err = store . GetTriggeredEndpointAlert ( & ep , alrt )
if err != nil {
t . Error ( "expected no error, got" , err . Error ( ) )
}
if exists {
t . Error ( "expected triggered alert to no longer exist as it has been deleted" )
}
}
func TestStore_DeleteAllTriggeredAlertsNotInChecksumsByEndpoint ( t * testing . T ) {
store , _ := NewStore ( "sqlite" , t . TempDir ( ) + "/TestStore_DeleteAllTriggeredAlertsNotInChecksumsByEndpoint.db" , false )
defer store . Close ( )
yes , desc := false , "description"
ep1 := testEndpoint
ep1 . Name = "ep1"
ep2 := testEndpoint
ep2 . Name = "ep2"
alert1 := alert . Alert {
Type : alert . TypePagerDuty ,
Enabled : & yes ,
FailureThreshold : 4 ,
SuccessThreshold : 2 ,
Description : & desc ,
SendOnResolved : & yes ,
Triggered : true ,
ResolveKey : "1234567" ,
}
alert2 := alert1
alert2 . Type , alert2 . ResolveKey = alert . TypeSlack , ""
alert3 := alert2
if err := store . UpsertTriggeredEndpointAlert ( & ep1 , & alert1 ) ; err != nil {
t . Fatal ( "expected no error, got" , err . Error ( ) )
}
if err := store . UpsertTriggeredEndpointAlert ( & ep1 , & alert2 ) ; err != nil {
t . Fatal ( "expected no error, got" , err . Error ( ) )
}
if err := store . UpsertTriggeredEndpointAlert ( & ep2 , & alert3 ) ; err != nil {
t . Fatal ( "expected no error, got" , err . Error ( ) )
}
if exists , _ , _ , _ := store . GetTriggeredEndpointAlert ( & ep1 , & alert1 ) ; ! exists {
t . Error ( "expected alert1 to have been deleted" )
}
if exists , _ , _ , _ := store . GetTriggeredEndpointAlert ( & ep1 , & alert2 ) ; ! exists {
t . Error ( "expected alert2 to exist for ep1" )
}
if exists , _ , _ , _ := store . GetTriggeredEndpointAlert ( & ep2 , & alert3 ) ; ! exists {
t . Error ( "expected alert3 to exist for ep2" )
}
// Now we simulate the alert configuration being updated, and the alert being resolved
if deleted := store . DeleteAllTriggeredAlertsNotInChecksumsByEndpoint ( & ep1 , [ ] string { alert2 . Checksum ( ) } ) ; deleted != 1 {
t . Errorf ( "expected 1 triggered alert to be deleted, got %d" , deleted )
}
if exists , _ , _ , _ := store . GetTriggeredEndpointAlert ( & ep1 , & alert1 ) ; exists {
t . Error ( "expected alert1 to have been deleted" )
}
if exists , _ , _ , _ := store . GetTriggeredEndpointAlert ( & ep1 , & alert2 ) ; ! exists {
t . Error ( "expected alert2 to exist for ep1" )
}
if exists , _ , _ , _ := store . GetTriggeredEndpointAlert ( & ep2 , & alert3 ) ; ! exists {
t . Error ( "expected alert3 to exist for ep2" )
}
// Now let's just assume all alerts for ep1 were removed
if deleted := store . DeleteAllTriggeredAlertsNotInChecksumsByEndpoint ( & ep1 , [ ] string { } ) ; deleted != 1 {
t . Errorf ( "expected 1 triggered alert to be deleted, got %d" , deleted )
}
// Make sure the alert for ep2 still exists
if exists , _ , _ , _ := store . GetTriggeredEndpointAlert ( & ep2 , & alert3 ) ; ! exists {
t . Error ( "expected alert3 to exist for ep2" )
}
}