Major fixes and improvements

This commit is contained in:
TwinProduction 2021-07-15 22:07:30 -04:00 committed by Chris
parent fed32d3909
commit d3a81a2d57
14 changed files with 378 additions and 158 deletions

View File

@ -176,7 +176,9 @@ func parseAndValidateConfigBytes(yamlBytes []byte) (config *Config, err error) {
func validateStorageConfig(config *Config) error {
if config.Storage == nil {
config.Storage = &storage.Config{}
config.Storage = &storage.Config{
Type: storage.TypeInMemory,
}
}
err := storage.Initialize(config.Storage)
if err != nil {

View File

@ -1028,6 +1028,49 @@ services:
}
}
func TestParseAndValidateConfigBytesWithInvalidServiceName(t *testing.T) {
_, err := parseAndValidateConfigBytes([]byte(`
services:
- name: ""
url: https://twinnation.org/health
conditions:
- "[STATUS] == 200"
`))
if err != core.ErrServiceWithNoName {
t.Error("should've returned an error")
}
}
func TestParseAndValidateConfigBytesWithInvalidStorageConfig(t *testing.T) {
_, err := parseAndValidateConfigBytes([]byte(`
storage:
type: sqlite
services:
- name: example
url: https://example.org
conditions:
- "[STATUS] == 200"
`))
if err == nil {
t.Error("should've returned an error, because a file must be specified for a storage of type sqlite")
}
}
func TestParseAndValidateConfigBytesWithInvalidYAML(t *testing.T) {
_, err := parseAndValidateConfigBytes([]byte(`
storage:
invalid yaml
services:
- name: example
url: https://example.org
conditions:
- "[STATUS] == 200"
`))
if err == nil {
t.Error("should've returned an error")
}
}
func TestParseAndValidateConfigBytesWithInvalidSecurityConfig(t *testing.T) {
_, err := parseAndValidateConfigBytes([]byte(`
security:
@ -1041,7 +1084,7 @@ services:
- "[STATUS] == 200"
`))
if err == nil {
t.Error("Function should've returned an error")
t.Error("should've returned an error")
}
}
@ -1173,7 +1216,7 @@ kubernetes:
target-path: "/health"
`))
if err == nil {
t.Error("Function should've returned an error because providing a service-template is mandatory")
t.Error("should've returned an error because providing a service-template is mandatory")
}
}
@ -1192,7 +1235,7 @@ kubernetes:
target-path: "/health"
`))
if err == nil {
t.Error("Function should've returned an error because testing with ClusterModeIn isn't supported")
t.Error("should've returned an error because testing with ClusterModeIn isn't supported")
}
}

View File

@ -39,11 +39,11 @@ var (
server *http.Server
)
func init() {
if err := cache.StartJanitor(); err != nil {
log.Fatal("[controller][init] Failed to start cache janitor:", err.Error())
}
}
//func init() { XXX: Don't think there's any value in using the janitor since the cache max size is this small
// if err := cache.StartJanitor(); err != nil {
// log.Fatal("[controller][init] Failed to start cache janitor:", err.Error())
// }
//}
// Handle creates the router and starts the server
func Handle(securityConfig *security.Config, webConfig *config.WebConfig, enableMetrics bool) {

View File

@ -42,6 +42,7 @@ func start(cfg *config.Config) {
func stop() {
watchdog.Shutdown()
storage.Get().Close()
controller.Shutdown()
}

View File

@ -1,8 +1,12 @@
package storage
// Config is the configuration for alerting providers
// Config is the configuration for storage
type Config struct {
// File is the path of the file to use for persistence
// If blank, persistence is disabled.
// If blank, persistence is disabled
File string `yaml:"file"`
// Type of store
// If blank, uses the default in-memory store
Type Type `yaml:"type"`
}

View File

@ -6,6 +6,7 @@ import (
"time"
"github.com/TwinProduction/gatus/storage/store"
"github.com/TwinProduction/gatus/storage/store/database"
"github.com/TwinProduction/gatus/storage/store/memory"
)
@ -38,36 +39,52 @@ func Initialize(cfg *Config) error {
initialized = true
var err error
if cancelFunc != nil {
// Stop the active autoSave task
// Stop the active autoSaveStore task, if there's already one
cancelFunc()
}
if cfg == nil || len(cfg.File) == 0 {
log.Println("[storage][Initialize] Creating storage provider")
provider, _ = memory.NewStore("")
if cfg == nil {
cfg = &Config{}
}
if len(cfg.File) == 0 {
log.Printf("[storage][Initialize] Creating storage provider with type=%s", cfg.Type)
} else {
log.Printf("[storage][Initialize] Creating storage provider with type=%s and file=%s", cfg.Type, cfg.File)
}
ctx, cancelFunc = context.WithCancel(context.Background())
log.Printf("[storage][Initialize] Creating storage provider with file=%s", cfg.File)
switch cfg.Type {
case TypeSQLite:
provider, err = database.NewStore(string(cfg.Type), cfg.File)
if err != nil {
return err
}
case TypeInMemory:
fallthrough
default:
if len(cfg.File) > 0 {
provider, err = memory.NewStore(cfg.File)
if err != nil {
return err
}
go autoSave(7*time.Minute, ctx)
go autoSaveStore(ctx, provider, 7*time.Minute)
} else {
provider, _ = memory.NewStore("")
}
}
return nil
}
// autoSave automatically calls the SaveFunc function of the provider at every interval
func autoSave(interval time.Duration, ctx context.Context) {
// autoSaveStore automatically calls the Save function of the provider at every interval
func autoSaveStore(ctx context.Context, provider store.Store, interval time.Duration) {
for {
select {
case <-ctx.Done():
log.Printf("[storage][autoSave] Stopping active job")
log.Printf("[storage][autoSaveStore] Stopping active job")
return
case <-time.After(interval):
log.Printf("[storage][autoSave] Saving")
log.Printf("[storage][autoSaveStore] Saving")
err := provider.Save()
if err != nil {
log.Println("[storage][autoSave] Save failed:", err.Error())
log.Println("[storage][autoSaveStore] Save failed:", err.Error())
}
}
}

View File

@ -1,15 +1,59 @@
package storage
import (
"fmt"
"testing"
"time"
"github.com/TwinProduction/gatus/storage/store/database"
)
func TestInitialize(t *testing.T) {
file := t.TempDir() + "/test.db"
err := Initialize(&Config{File: file})
type Scenario struct {
Name string
Cfg *Config
ExpectedErr error
}
scenarios := []Scenario{
{
Name: "nil",
Cfg: nil,
ExpectedErr: nil,
},
{
Name: "blank",
Cfg: &Config{},
ExpectedErr: nil,
},
{
Name: "inmemory-no-file",
Cfg: &Config{Type: TypeInMemory},
ExpectedErr: nil,
},
{
Name: "inmemory-with-file",
Cfg: &Config{Type: TypeInMemory, File: t.TempDir() + "/TestInitialize_inmemory-with-file.db"},
ExpectedErr: nil,
},
{
Name: "sqlite-no-file",
Cfg: &Config{Type: TypeSQLite},
ExpectedErr: database.ErrFilePathNotSpecified,
},
{
Name: "sqlite-with-file",
Cfg: &Config{Type: TypeSQLite, File: t.TempDir() + "/TestInitialize_sqlite-with-file.db"},
ExpectedErr: nil,
},
}
for _, scenario := range scenarios {
t.Run(scenario.Name, func(t *testing.T) {
err := Initialize(scenario.Cfg)
if err != scenario.ExpectedErr {
t.Errorf("expected %v, got %v", scenario.ExpectedErr, err)
}
if err != nil {
t.Fatal("shouldn't have returned an error")
return
}
if cancelFunc == nil {
t.Error("cancelFunc shouldn't have been nil")
@ -17,21 +61,29 @@ func TestInitialize(t *testing.T) {
if ctx == nil {
t.Error("ctx shouldn't have been nil")
}
// Try to initialize it again
err = Initialize(&Config{File: file})
if err != nil {
t.Fatal("shouldn't have returned an error")
if provider == nil {
fmt.Println("wtf?")
}
provider.Close()
// Try to initialize it again
err = Initialize(scenario.Cfg)
if err != scenario.ExpectedErr {
t.Errorf("expected %v, got %v", scenario.ExpectedErr, err)
return
}
provider.Close()
provider = nil
})
}
cancelFunc()
}
func TestAutoSave(t *testing.T) {
file := t.TempDir() + "/test.db"
file := t.TempDir() + "/TestAutoSave.db"
if err := Initialize(&Config{File: file}); err != nil {
t.Fatal("shouldn't have returned an error")
}
go autoSave(3*time.Millisecond, ctx)
go autoSaveStore(ctx, provider, 3*time.Millisecond)
time.Sleep(15 * time.Millisecond)
cancelFunc()
time.Sleep(5 * time.Millisecond)
time.Sleep(10 * time.Millisecond)
}

View File

@ -3,6 +3,7 @@ package database
import (
"database/sql"
"errors"
"fmt"
"log"
"strings"
"time"
@ -306,7 +307,18 @@ func (s *Store) Insert(service *core.Service, result *core.Result) {
// DeleteAllServiceStatusesNotInKeys removes all rows owned by a service whose key is not within the keys provided
func (s *Store) DeleteAllServiceStatusesNotInKeys(keys []string) int {
panic("implement me")
if len(keys) == 0 {
return 0
}
args := make([]interface{}, 0, len(keys))
for i := range keys {
args = append(args, keys[i])
}
_, err := s.db.Exec(fmt.Sprintf("DELETE FROM service WHERE service_key NOT IN (%s)", strings.Trim(strings.Repeat("?,", len(keys)), ",")), args...)
if err != nil {
log.Printf("err: %v", err)
}
return 0
}
// Clear deletes everything from the store
@ -439,7 +451,7 @@ func (s *Store) getAllServiceKeys(tx *sql.Tx) (keys []string, err error) {
}
func (s *Store) getServiceStatusByKey(tx *sql.Tx, key string, parameters *paging.ServiceStatusParams) (*core.ServiceStatus, error) {
serviceID, serviceName, serviceGroup, err := s.getServiceIDGroupAndNameByKey(tx, key)
serviceID, serviceGroup, serviceName, err := s.getServiceIDGroupAndNameByKey(tx, key)
if err != nil {
return nil, err
}
@ -484,7 +496,7 @@ func (s *Store) getEventsByServiceID(tx *sql.Tx, serviceID int64, page, pageSize
SELECT event_type, event_timestamp
FROM service_event
WHERE service_id = $1
ORDER BY service_event_id DESC
ORDER BY service_event_id ASC
LIMIT $2 OFFSET $3
`,
serviceID,
@ -509,7 +521,7 @@ func (s *Store) getResultsByServiceID(tx *sql.Tx, serviceID int64, page, pageSiz
SELECT service_result_id, success, errors, connected, status, dns_rcode, certificate_expiration, hostname, ip, duration, timestamp
FROM service_result
WHERE service_id = $1
ORDER BY timestamp DESC
ORDER BY timestamp ASC
LIMIT $2 OFFSET $3
`,
serviceID,
@ -525,7 +537,9 @@ func (s *Store) getResultsByServiceID(tx *sql.Tx, serviceID int64, page, pageSiz
var id int64
var joinedErrors string
_ = rows.Scan(&id, &result.Success, &joinedErrors, &result.Connected, &result.HTTPStatus, &result.DNSRCode, &result.CertificateExpiration, &result.Hostname, &result.IP, &result.Duration, &result.Timestamp)
if len(joinedErrors) != 0 {
result.Errors = strings.Split(joinedErrors, arraySeparator)
}
results = append(results, result)
idResultMap[id] = result
}
@ -534,7 +548,7 @@ func (s *Store) getResultsByServiceID(tx *sql.Tx, serviceID int64, page, pageSiz
for serviceResultID, result := range idResultMap {
rows, err = tx.Query(
`
SELECT service_result_id, condition, success
SELECT condition, success
FROM service_result_condition
WHERE service_result_id = $1
`,
@ -545,7 +559,9 @@ func (s *Store) getResultsByServiceID(tx *sql.Tx, serviceID int64, page, pageSiz
}
for rows.Next() {
conditionResult := &core.ConditionResult{}
_ = rows.Scan(&conditionResult.Condition, &conditionResult.Success)
if err = rows.Scan(&conditionResult.Condition, &conditionResult.Success); err != nil {
return
}
result.ConditionResults = append(result.ConditionResults, conditionResult)
}
_ = rows.Close()

View File

@ -97,7 +97,7 @@ func TestNewStore(t *testing.T) {
func TestStore_Insert(t *testing.T) {
store, _ := NewStore("sqlite", t.TempDir()+"/TestStore_Insert.db")
defer store.db.Close()
defer store.Close()
store.Insert(&testService, &testSuccessfulResult)
store.Insert(&testService, &testUnsuccessfulResult)
@ -147,102 +147,9 @@ func TestStore_Insert(t *testing.T) {
}
}
func TestStore_GetServiceStatus(t *testing.T) {
store, _ := NewStore("sqlite", t.TempDir()+"/TestStore_GetServiceStatus.db")
defer store.db.Close()
store.Insert(&testService, &testSuccessfulResult)
store.Insert(&testService, &testUnsuccessfulResult)
serviceStatus := store.GetServiceStatus(testService.Group, testService.Name, paging.NewServiceStatusParams().WithEvents(1, core.MaximumNumberOfEvents).WithResults(1, core.MaximumNumberOfResults).WithUptime())
if serviceStatus == nil {
t.Fatalf("serviceStatus shouldn't have been nil")
}
if serviceStatus.Uptime == nil {
t.Fatalf("serviceStatus.Uptime shouldn't have been nil")
}
if serviceStatus.Uptime.LastHour != 0.5 {
t.Errorf("serviceStatus.Uptime.LastHour should've been 0.5")
}
if serviceStatus.Uptime.LastTwentyFourHours != 0.5 {
t.Errorf("serviceStatus.Uptime.LastTwentyFourHours should've been 0.5")
}
if serviceStatus.Uptime.LastSevenDays != 0.5 {
t.Errorf("serviceStatus.Uptime.LastSevenDays should've been 0.5")
}
}
func TestStore_GetServiceStatusForMissingStatusReturnsNil(t *testing.T) {
store, _ := NewStore("sqlite", t.TempDir()+"/TestStore_GetServiceStatusForMissingStatusReturnsNil.db")
defer store.db.Close()
store.Insert(&testService, &testSuccessfulResult)
serviceStatus := store.GetServiceStatus("nonexistantgroup", "nonexistantname", paging.NewServiceStatusParams().WithEvents(1, core.MaximumNumberOfEvents).WithResults(1, core.MaximumNumberOfResults).WithUptime())
if serviceStatus != nil {
t.Errorf("Returned service status for group '%s' and name '%s' not nil after inserting the service into the store", testService.Group, testService.Name)
}
serviceStatus = store.GetServiceStatus(testService.Group, "nonexistantname", paging.NewServiceStatusParams().WithEvents(1, core.MaximumNumberOfEvents).WithResults(1, core.MaximumNumberOfResults).WithUptime())
if serviceStatus != nil {
t.Errorf("Returned service status for group '%s' and name '%s' not nil after inserting the service into the store", testService.Group, "nonexistantname")
}
serviceStatus = store.GetServiceStatus("nonexistantgroup", testService.Name, paging.NewServiceStatusParams().WithEvents(1, core.MaximumNumberOfEvents).WithResults(1, core.MaximumNumberOfResults).WithUptime())
if serviceStatus != nil {
t.Errorf("Returned service status for group '%s' and name '%s' not nil after inserting the service into the store", "nonexistantgroup", testService.Name)
}
}
func TestStore_GetServiceStatusByKey(t *testing.T) {
store, _ := NewStore("sqlite", t.TempDir()+"/TestStore_GetServiceStatusByKey.db")
defer store.db.Close()
store.Insert(&testService, &testSuccessfulResult)
store.Insert(&testService, &testUnsuccessfulResult)
serviceStatus := store.GetServiceStatusByKey(testService.Key(), paging.NewServiceStatusParams().WithEvents(1, core.MaximumNumberOfEvents).WithResults(1, core.MaximumNumberOfResults).WithUptime())
if serviceStatus == nil {
t.Fatalf("serviceStatus shouldn't have been nil")
}
if serviceStatus.Uptime == nil {
t.Fatalf("serviceStatus.Uptime shouldn't have been nil")
}
if serviceStatus.Uptime.LastHour != 0.5 {
t.Errorf("serviceStatus.Uptime.LastHour should've been 0.5")
}
if serviceStatus.Uptime.LastTwentyFourHours != 0.5 {
t.Errorf("serviceStatus.Uptime.LastTwentyFourHours should've been 0.5")
}
if serviceStatus.Uptime.LastSevenDays != 0.5 {
t.Errorf("serviceStatus.Uptime.LastSevenDays should've been 0.5")
}
}
func TestStore_GetAllServiceStatuses(t *testing.T) {
store, _ := NewStore("sqlite", t.TempDir()+"/TestStore_GetAllServiceStatuses.db")
defer store.db.Close()
firstResult := &testSuccessfulResult
secondResult := &testUnsuccessfulResult
store.Insert(&testService, firstResult)
store.Insert(&testService, secondResult)
// Can't be bothered dealing with timezone issues on the worker that runs the automated tests
firstResult.Timestamp = time.Time{}
secondResult.Timestamp = time.Time{}
serviceStatuses := store.GetAllServiceStatuses(paging.NewServiceStatusParams().WithResults(1, 20))
if len(serviceStatuses) != 1 {
t.Fatal("expected 1 service status")
}
actual, exists := serviceStatuses[testService.Key()]
if !exists {
t.Fatal("expected service status to exist")
}
if len(actual.Results) != 2 {
t.Error("expected 2 results, got", len(actual.Results))
}
if len(actual.Events) != 0 {
t.Error("expected 0 events, got", len(actual.Events))
}
}
func TestStore_InsertCleansUpOldUptimeEntriesProperly(t *testing.T) {
store, _ := NewStore("sqlite", t.TempDir()+"/TestStore_InsertCleansUpOldUptimeEntriesProperly.db")
defer store.db.Close()
defer store.Close()
now := time.Now().Round(time.Minute)
now = time.Date(now.Year(), now.Month(), now.Day(), now.Hour(), 0, 0, 0, now.Location())
@ -297,9 +204,9 @@ func TestStore_InsertCleansUpOldUptimeEntriesProperly(t *testing.T) {
}
}
func TestStore_InsertCleansUpProperly(t *testing.T) {
store, _ := NewStore("sqlite", t.TempDir()+"/TestStore_deleteOldServiceResults.db")
defer store.db.Close()
func TestStore_InsertCleansUpEventsAndResultsProperly(t *testing.T) {
store, _ := NewStore("sqlite", t.TempDir()+"/TestStore_InsertCleansUpEventsAndResultsProperly.db")
defer store.Close()
for i := 0; i < resultsCleanUpThreshold+eventsCleanUpThreshold; i++ {
store.Insert(&testService, &testSuccessfulResult)
store.Insert(&testService, &testUnsuccessfulResult)
@ -311,4 +218,137 @@ func TestStore_InsertCleansUpProperly(t *testing.T) {
t.Errorf("number of events shouldn't have exceeded %d, reached %d", eventsCleanUpThreshold, len(ss.Events))
}
}
store.Clear()
}
func TestStore_GetServiceStatus(t *testing.T) {
store, _ := NewStore("sqlite", t.TempDir()+"/TestStore_GetServiceStatus.db")
defer store.Close()
firstResult := testSuccessfulResult
firstResult.Timestamp = timestamp.Add(-time.Minute)
secondResult := testUnsuccessfulResult
secondResult.Timestamp = timestamp
store.Insert(&testService, &firstResult)
store.Insert(&testService, &secondResult)
serviceStatus := store.GetServiceStatus(testService.Group, testService.Name, paging.NewServiceStatusParams().WithEvents(1, core.MaximumNumberOfEvents).WithResults(1, core.MaximumNumberOfResults).WithUptime())
if serviceStatus == nil {
t.Fatalf("serviceStatus shouldn't have been nil")
}
if serviceStatus.Uptime == nil {
t.Fatalf("serviceStatus.Uptime shouldn't have been nil")
}
if len(serviceStatus.Results) != 2 {
t.Fatalf("serviceStatus.Results should've had 2 entries")
}
if serviceStatus.Results[0].Timestamp.After(serviceStatus.Results[1].Timestamp) {
t.Fatalf("The result at index 0 should've been older than the result at index 1")
}
if serviceStatus.Uptime.LastHour != 0.5 {
t.Errorf("serviceStatus.Uptime.LastHour should've been 0.5")
}
if serviceStatus.Uptime.LastTwentyFourHours != 0.5 {
t.Errorf("serviceStatus.Uptime.LastTwentyFourHours should've been 0.5")
}
if serviceStatus.Uptime.LastSevenDays != 0.5 {
t.Errorf("serviceStatus.Uptime.LastSevenDays should've been 0.5")
}
}
func TestStore_GetServiceStatusForMissingStatusReturnsNil(t *testing.T) {
store, _ := NewStore("sqlite", t.TempDir()+"/TestStore_GetServiceStatusForMissingStatusReturnsNil.db")
defer store.Close()
store.Insert(&testService, &testSuccessfulResult)
serviceStatus := store.GetServiceStatus("nonexistantgroup", "nonexistantname", paging.NewServiceStatusParams().WithEvents(1, core.MaximumNumberOfEvents).WithResults(1, core.MaximumNumberOfResults).WithUptime())
if serviceStatus != nil {
t.Errorf("Returned service status for group '%s' and name '%s' not nil after inserting the service into the store", testService.Group, testService.Name)
}
serviceStatus = store.GetServiceStatus(testService.Group, "nonexistantname", paging.NewServiceStatusParams().WithEvents(1, core.MaximumNumberOfEvents).WithResults(1, core.MaximumNumberOfResults).WithUptime())
if serviceStatus != nil {
t.Errorf("Returned service status for group '%s' and name '%s' not nil after inserting the service into the store", testService.Group, "nonexistantname")
}
serviceStatus = store.GetServiceStatus("nonexistantgroup", testService.Name, paging.NewServiceStatusParams().WithEvents(1, core.MaximumNumberOfEvents).WithResults(1, core.MaximumNumberOfResults).WithUptime())
if serviceStatus != nil {
t.Errorf("Returned service status for group '%s' and name '%s' not nil after inserting the service into the store", "nonexistantgroup", testService.Name)
}
}
func TestStore_GetServiceStatusByKey(t *testing.T) {
store, _ := NewStore("sqlite", t.TempDir()+"/TestStore_GetServiceStatusByKey.db")
defer store.Close()
store.Insert(&testService, &testSuccessfulResult)
store.Insert(&testService, &testUnsuccessfulResult)
serviceStatus := store.GetServiceStatusByKey(testService.Key(), paging.NewServiceStatusParams().WithEvents(1, core.MaximumNumberOfEvents).WithResults(1, core.MaximumNumberOfResults).WithUptime())
if serviceStatus == nil {
t.Fatalf("serviceStatus shouldn't have been nil")
}
if serviceStatus.Name != testService.Name {
t.Fatalf("serviceStatus.Name should've been %s, got %s", testService.Name, serviceStatus.Name)
}
if serviceStatus.Group != testService.Group {
t.Fatalf("serviceStatus.Group should've been %s, got %s", testService.Group, serviceStatus.Group)
}
if serviceStatus.Uptime == nil {
t.Fatalf("serviceStatus.Uptime shouldn't have been nil")
}
if serviceStatus.Uptime.LastHour != 0.5 {
t.Errorf("serviceStatus.Uptime.LastHour should've been 0.5")
}
if serviceStatus.Uptime.LastTwentyFourHours != 0.5 {
t.Errorf("serviceStatus.Uptime.LastTwentyFourHours should've been 0.5")
}
if serviceStatus.Uptime.LastSevenDays != 0.5 {
t.Errorf("serviceStatus.Uptime.LastSevenDays should've been 0.5")
}
}
func TestStore_GetAllServiceStatuses(t *testing.T) {
store, _ := NewStore("sqlite", t.TempDir()+"/TestStore_GetAllServiceStatuses.db")
defer store.Close()
firstResult := &testSuccessfulResult
secondResult := &testUnsuccessfulResult
store.Insert(&testService, firstResult)
store.Insert(&testService, secondResult)
// Can't be bothered dealing with timezone issues on the worker that runs the automated tests
firstResult.Timestamp = time.Time{}
secondResult.Timestamp = time.Time{}
serviceStatuses := store.GetAllServiceStatuses(paging.NewServiceStatusParams().WithResults(1, 20))
if len(serviceStatuses) != 1 {
t.Fatal("expected 1 service status")
}
actual, exists := serviceStatuses[testService.Key()]
if !exists {
t.Fatal("expected service status to exist")
}
if len(actual.Results) != 2 {
t.Error("expected 2 results, got", len(actual.Results))
}
if len(actual.Events) != 0 {
t.Error("expected 0 events, got", len(actual.Events))
}
}
func TestStore_DeleteAllServiceStatusesNotInKeys(t *testing.T) {
store, _ := NewStore("sqlite", t.TempDir()+"/TestStore_DeleteAllServiceStatusesNotInKeys.db")
defer store.Close()
firstService := core.Service{Name: "service-1", Group: "group"}
secondService := core.Service{Name: "service-2", Group: "group"}
result := &testSuccessfulResult
store.Insert(&firstService, result)
store.Insert(&secondService, result)
if store.GetServiceStatusByKey(firstService.Key(), paging.NewServiceStatusParams()) == nil {
t.Fatal("firstService should exist")
}
if store.GetServiceStatusByKey(secondService.Key(), paging.NewServiceStatusParams()) == nil {
t.Fatal("secondService should exist")
}
store.DeleteAllServiceStatusesNotInKeys([]string{firstService.Key()})
if store.GetServiceStatusByKey(firstService.Key(), paging.NewServiceStatusParams()) == nil {
t.Error("secondService should've been deleted")
}
if store.GetServiceStatusByKey(secondService.Key(), paging.NewServiceStatusParams()) != nil {
t.Error("firstService should still exist")
}
}

View File

@ -112,3 +112,8 @@ func (s *Store) Save() error {
}
return nil
}
// Close does nothing, because there's nothing to close
func (s *Store) Close() {
return
}

View File

@ -37,6 +37,9 @@ func ShallowCopyServiceStatus(ss *core.ServiceStatus, params *paging.ServiceStat
}
func getStartAndEndIndex(numberOfResults int, page, pageSize int) (int, int) {
if page < 1 || pageSize < 0 {
return -1, -1
}
start := numberOfResults - (page * pageSize)
end := numberOfResults - ((page - 1) * pageSize)
if start > numberOfResults {

View File

@ -11,45 +11,69 @@ import (
func TestAddResult(t *testing.T) {
service := &core.Service{Name: "name", Group: "group"}
serviceStatus := core.NewServiceStatus(service.Key(), service.Group, service.Name)
for i := 0; i < core.MaximumNumberOfResults+10; i++ {
AddResult(serviceStatus, &core.Result{Timestamp: time.Now()})
for i := 0; i < (core.MaximumNumberOfResults+core.MaximumNumberOfEvents)*2; i++ {
AddResult(serviceStatus, &core.Result{Success: i%2 == 0, Timestamp: time.Now()})
}
if len(serviceStatus.Results) != core.MaximumNumberOfResults {
t.Errorf("expected serviceStatus.Results to not exceed a length of %d", core.MaximumNumberOfResults)
}
if len(serviceStatus.Events) != core.MaximumNumberOfEvents {
t.Errorf("expected serviceStatus.Events to not exceed a length of %d", core.MaximumNumberOfEvents)
}
// Try to add nil serviceStatus
AddResult(nil, &core.Result{Timestamp: time.Now()})
}
func TestShallowCopyServiceStatus(t *testing.T) {
service := &core.Service{Name: "name", Group: "group"}
serviceStatus := core.NewServiceStatus(service.Key(), service.Group, service.Name)
ts := time.Now().Add(-25 * time.Hour)
for i := 0; i < 25; i++ {
AddResult(serviceStatus, &core.Result{Timestamp: time.Now()})
AddResult(serviceStatus, &core.Result{Success: i%2 == 0, Timestamp: ts})
ts = ts.Add(time.Hour)
}
if len(ShallowCopyServiceStatus(serviceStatus, paging.NewServiceStatusParams().WithResults(-1, -1)).Results) != 0 {
t.Error("expected to have 0 result")
}
if len(ShallowCopyServiceStatus(serviceStatus, paging.NewServiceStatusParams().WithResults(1, 1)).Results) != 1 {
t.Errorf("expected to have 1 result")
t.Error("expected to have 1 result")
}
if len(ShallowCopyServiceStatus(serviceStatus, paging.NewServiceStatusParams().WithResults(5, 0)).Results) != 0 {
t.Errorf("expected to have 0 results")
t.Error("expected to have 0 results")
}
if len(ShallowCopyServiceStatus(serviceStatus, paging.NewServiceStatusParams().WithResults(-1, 20)).Results) != 0 {
t.Errorf("expected to have 0 result, because the page was invalid")
t.Error("expected to have 0 result, because the page was invalid")
}
if len(ShallowCopyServiceStatus(serviceStatus, paging.NewServiceStatusParams().WithResults(1, -1)).Results) != 0 {
t.Errorf("expected to have 0 result, because the page size was invalid")
t.Error("expected to have 0 result, because the page size was invalid")
}
if len(ShallowCopyServiceStatus(serviceStatus, paging.NewServiceStatusParams().WithResults(1, 10)).Results) != 10 {
t.Errorf("expected to have 10 results, because given a page size of 10, page 1 should have 10 elements")
t.Error("expected to have 10 results, because given a page size of 10, page 1 should have 10 elements")
}
if len(ShallowCopyServiceStatus(serviceStatus, paging.NewServiceStatusParams().WithResults(2, 10)).Results) != 10 {
t.Errorf("expected to have 10 results, because given a page size of 10, page 2 should have 10 elements")
t.Error("expected to have 10 results, because given a page size of 10, page 2 should have 10 elements")
}
if len(ShallowCopyServiceStatus(serviceStatus, paging.NewServiceStatusParams().WithResults(3, 10)).Results) != 5 {
t.Errorf("expected to have 5 results, because given a page size of 10, page 3 should have 5 elements")
t.Error("expected to have 5 results, because given a page size of 10, page 3 should have 5 elements")
}
if len(ShallowCopyServiceStatus(serviceStatus, paging.NewServiceStatusParams().WithResults(4, 10)).Results) != 0 {
t.Errorf("expected to have 0 results, because given a page size of 10, page 4 should have 0 elements")
t.Error("expected to have 0 results, because given a page size of 10, page 4 should have 0 elements")
}
if len(ShallowCopyServiceStatus(serviceStatus, paging.NewServiceStatusParams().WithResults(1, 50)).Results) != 25 {
t.Errorf("expected to have 25 results, because there's only 25 results")
t.Error("expected to have 25 results, because there's only 25 results")
}
uptime := ShallowCopyServiceStatus(serviceStatus, paging.NewServiceStatusParams().WithUptime()).Uptime
if uptime == nil {
t.Error("expected uptime to not be nil")
} else {
if uptime.LastHour != 1 {
t.Error("expected uptime.LastHour to not be 1, got", uptime.LastHour)
}
if uptime.LastTwentyFourHours != 0.5 {
t.Error("expected uptime.LastTwentyFourHours to not be 0.5, got", uptime.LastTwentyFourHours)
}
if uptime.LastSevenDays != 0.52 {
t.Error("expected uptime.LastSevenDays to not be 0.52, got", uptime.LastSevenDays)
}
}
}

View File

@ -32,6 +32,10 @@ type Store interface {
// Save persists the data if and where it needs to be persisted
Save() error
// Close terminates every connections and closes the store, if applicable.
// Should only be used before stopping the application.
Close()
}
// TODO: add method to check state of store (by keeping track of silent errors)

9
storage/type.go Normal file
View File

@ -0,0 +1,9 @@
package storage
// Type of the store.
type Type string
const (
TypeInMemory Type = "inmemory" // In-memory store
TypeSQLite Type = "sqlite" // SQLite store
)