mirror of
https://github.com/TwiN/gatus.git
synced 2024-11-25 01:13:40 +01:00
Lazily retry triggered alerts in case of failure
This commit is contained in:
parent
e9ac115a95
commit
3204a79eb6
@ -2,6 +2,7 @@ package custom
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"net/http"
|
"net/http"
|
||||||
@ -76,6 +77,9 @@ func (provider *AlertProvider) buildHTTPRequest(serviceName, alertDescription st
|
|||||||
// Send a request to the alert provider and return the body
|
// Send a request to the alert provider and return the body
|
||||||
func (provider *AlertProvider) Send(serviceName, alertDescription string, resolved bool) ([]byte, error) {
|
func (provider *AlertProvider) Send(serviceName, alertDescription string, resolved bool) ([]byte, error) {
|
||||||
if os.Getenv("MOCK_ALERT_PROVIDER") == "true" {
|
if os.Getenv("MOCK_ALERT_PROVIDER") == "true" {
|
||||||
|
if os.Getenv("MOCK_ALERT_PROVIDER_ERROR") == "true" {
|
||||||
|
return nil, errors.New("error")
|
||||||
|
}
|
||||||
return []byte("{}"), nil
|
return []byte("{}"), nil
|
||||||
}
|
}
|
||||||
request := provider.buildHTTPRequest(serviceName, alertDescription, resolved)
|
request := provider.buildHTTPRequest(serviceName, alertDescription, resolved)
|
||||||
|
@ -26,6 +26,12 @@ type Alert struct {
|
|||||||
|
|
||||||
// Triggered is used to determine whether an alert has been triggered. When an alert is resolved, this value
|
// Triggered is used to determine whether an alert has been triggered. When an alert is resolved, this value
|
||||||
// should be set back to false. It is used to prevent the same alert from going out twice.
|
// should be set back to false. It is used to prevent the same alert from going out twice.
|
||||||
|
//
|
||||||
|
// This value should only be modified if the provider.AlertProvider's Send function does not return an error for an
|
||||||
|
// alert that hasn't been triggered yet. This doubles as a lazy retry. The reason why this behavior isn't also
|
||||||
|
// applied for alerts that are already triggered and has become "healthy" again is to prevent a case where, for
|
||||||
|
// some reason, the alert provider always returns errors when trying to send the resolved notification
|
||||||
|
// (SendOnResolved).
|
||||||
Triggered bool
|
Triggered bool
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -26,18 +26,18 @@ func handleAlertsToTrigger(service *core.Service, result *core.Result, cfg *conf
|
|||||||
service.NumberOfFailuresInARow++
|
service.NumberOfFailuresInARow++
|
||||||
for _, alert := range service.Alerts {
|
for _, alert := range service.Alerts {
|
||||||
// If the alert hasn't been triggered, move to the next one
|
// If the alert hasn't been triggered, move to the next one
|
||||||
if !alert.Enabled || alert.FailureThreshold != service.NumberOfFailuresInARow {
|
if !alert.Enabled || alert.FailureThreshold > service.NumberOfFailuresInARow {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if alert.Triggered {
|
if alert.Triggered {
|
||||||
if cfg.Debug {
|
if cfg.Debug {
|
||||||
log.Printf("[watchdog][handleAlertsToTrigger] Alert with description='%s' has already been TRIGGERED, skipping", alert.Description)
|
log.Printf("[watchdog][handleAlertsToTrigger] Alert for service=%s with description='%s' has already been TRIGGERED, skipping", service.Name, alert.Description)
|
||||||
}
|
}
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
alertProvider := config.GetAlertingProviderByAlertType(cfg, alert.Type)
|
alertProvider := config.GetAlertingProviderByAlertType(cfg, alert.Type)
|
||||||
if alertProvider != nil && alertProvider.IsValid() {
|
if alertProvider != nil && alertProvider.IsValid() {
|
||||||
log.Printf("[watchdog][handleAlertsToTrigger] Sending %s alert because alert with description='%s' has been TRIGGERED", alert.Type, alert.Description)
|
log.Printf("[watchdog][handleAlertsToTrigger] Sending %s alert because alert for service=%s with description='%s' has been TRIGGERED", alert.Type, service.Name, alert.Description)
|
||||||
customAlertProvider := alertProvider.ToCustomAlertProvider(service, alert, result, false)
|
customAlertProvider := alertProvider.ToCustomAlertProvider(service, alert, result, false)
|
||||||
// TODO: retry on error
|
// TODO: retry on error
|
||||||
var err error
|
var err error
|
||||||
@ -47,7 +47,7 @@ func handleAlertsToTrigger(service *core.Service, result *core.Result, cfg *conf
|
|||||||
if body, err = customAlertProvider.Send(service.Name, alert.Description, false); err == nil {
|
if body, err = customAlertProvider.Send(service.Name, alert.Description, false); err == nil {
|
||||||
var response pagerDutyResponse
|
var response pagerDutyResponse
|
||||||
if err = json.Unmarshal(body, &response); err != nil {
|
if err = json.Unmarshal(body, &response); err != nil {
|
||||||
log.Printf("[watchdog][handleAlertsToTrigger] Ran into error unmarshaling pager duty response: %s", err.Error())
|
log.Printf("[watchdog][handleAlertsToTrigger] Ran into error unmarshaling pagerduty response: %s", err.Error())
|
||||||
} else {
|
} else {
|
||||||
alert.ResolveKey = response.DedupKey
|
alert.ResolveKey = response.DedupKey
|
||||||
}
|
}
|
||||||
@ -57,7 +57,7 @@ func handleAlertsToTrigger(service *core.Service, result *core.Result, cfg *conf
|
|||||||
_, err = customAlertProvider.Send(service.Name, alert.Description, false)
|
_, err = customAlertProvider.Send(service.Name, alert.Description, false)
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Printf("[watchdog][handleAlertsToTrigger] Ran into error sending an alert: %s", err.Error())
|
log.Printf("[watchdog][handleAlertsToTrigger] Failed to send an alert for service=%s: %s", service.Name, err.Error())
|
||||||
} else {
|
} else {
|
||||||
alert.Triggered = true
|
alert.Triggered = true
|
||||||
}
|
}
|
||||||
@ -73,18 +73,20 @@ func handleAlertsToResolve(service *core.Service, result *core.Result, cfg *conf
|
|||||||
if !alert.Enabled || !alert.Triggered || alert.SuccessThreshold > service.NumberOfSuccessesInARow {
|
if !alert.Enabled || !alert.Triggered || alert.SuccessThreshold > service.NumberOfSuccessesInARow {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
// Even if the alert provider returns an error, we still set the alert's Triggered variable to false.
|
||||||
|
// Further explanation can be found on Alert's Triggered field.
|
||||||
alert.Triggered = false
|
alert.Triggered = false
|
||||||
if !alert.SendOnResolved {
|
if !alert.SendOnResolved {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
alertProvider := config.GetAlertingProviderByAlertType(cfg, alert.Type)
|
alertProvider := config.GetAlertingProviderByAlertType(cfg, alert.Type)
|
||||||
if alertProvider != nil && alertProvider.IsValid() {
|
if alertProvider != nil && alertProvider.IsValid() {
|
||||||
log.Printf("[watchdog][handleAlertsToResolve] Sending %s alert because alert with description='%s' has been RESOLVED", alert.Type, alert.Description)
|
log.Printf("[watchdog][handleAlertsToResolve] Sending %s alert because alert for service=%s with description='%s' has been RESOLVED", alert.Type, service.Name, alert.Description)
|
||||||
customAlertProvider := alertProvider.ToCustomAlertProvider(service, alert, result, true)
|
customAlertProvider := alertProvider.ToCustomAlertProvider(service, alert, result, true)
|
||||||
// TODO: retry on error
|
// TODO: retry on error
|
||||||
_, err := customAlertProvider.Send(service.Name, alert.Description, true)
|
_, err := customAlertProvider.Send(service.Name, alert.Description, true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Printf("[watchdog][handleAlertsToResolve] Ran into error sending an alert: %s", err.Error())
|
log.Printf("[watchdog][handleAlertsToResolve] Failed to send an alert for service=%s: %s", service.Name, err.Error())
|
||||||
} else {
|
} else {
|
||||||
if alert.Type == core.PagerDutyAlert {
|
if alert.Type == core.PagerDutyAlert {
|
||||||
alert.ResolveKey = ""
|
alert.ResolveKey = ""
|
||||||
|
@ -39,103 +39,23 @@ func TestHandleAlerting(t *testing.T) {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
if service.NumberOfFailuresInARow != 0 {
|
verify(t, service, 0, 0, false, "The alert shouldn't start triggered")
|
||||||
t.Fatal("service.NumberOfFailuresInARow should've started at 0, got", service.NumberOfFailuresInARow)
|
|
||||||
}
|
|
||||||
if service.NumberOfSuccessesInARow != 0 {
|
|
||||||
t.Fatal("service.NumberOfSuccessesInARow should've started at 0, got", service.NumberOfSuccessesInARow)
|
|
||||||
}
|
|
||||||
if service.Alerts[0].Triggered {
|
|
||||||
t.Fatal("The alert shouldn't start triggered")
|
|
||||||
}
|
|
||||||
|
|
||||||
HandleAlerting(service, &core.Result{Success: false})
|
HandleAlerting(service, &core.Result{Success: false})
|
||||||
if service.NumberOfFailuresInARow != 1 {
|
verify(t, service, 1, 0, false, "The alert shouldn't have triggered")
|
||||||
t.Fatal("service.NumberOfFailuresInARow should've increased from 0 to 1, got", service.NumberOfFailuresInARow)
|
|
||||||
}
|
|
||||||
if service.NumberOfSuccessesInARow != 0 {
|
|
||||||
t.Fatal("service.NumberOfSuccessesInARow should've stayed at 0, got", service.NumberOfSuccessesInARow)
|
|
||||||
}
|
|
||||||
if service.Alerts[0].Triggered {
|
|
||||||
t.Fatal("The alert shouldn't have triggered")
|
|
||||||
}
|
|
||||||
|
|
||||||
HandleAlerting(service, &core.Result{Success: false})
|
HandleAlerting(service, &core.Result{Success: false})
|
||||||
if service.NumberOfFailuresInARow != 2 {
|
verify(t, service, 2, 0, true, "The alert should've triggered")
|
||||||
t.Fatal("service.NumberOfFailuresInARow should've increased from 1 to 2, got", service.NumberOfFailuresInARow)
|
|
||||||
}
|
|
||||||
if service.NumberOfSuccessesInARow != 0 {
|
|
||||||
t.Fatal("service.NumberOfSuccessesInARow should've stayed at 0, got", service.NumberOfSuccessesInARow)
|
|
||||||
}
|
|
||||||
if !service.Alerts[0].Triggered {
|
|
||||||
t.Fatal("The alert should've triggered")
|
|
||||||
}
|
|
||||||
|
|
||||||
HandleAlerting(service, &core.Result{Success: false})
|
HandleAlerting(service, &core.Result{Success: false})
|
||||||
if service.NumberOfFailuresInARow != 3 {
|
verify(t, service, 3, 0, true, "The alert should still be triggered")
|
||||||
t.Fatal("service.NumberOfFailuresInARow should've increased from 2 to 3, got", service.NumberOfFailuresInARow)
|
|
||||||
}
|
|
||||||
if service.NumberOfSuccessesInARow != 0 {
|
|
||||||
t.Fatal("service.NumberOfSuccessesInARow should've stayed at 0, got", service.NumberOfSuccessesInARow)
|
|
||||||
}
|
|
||||||
if !service.Alerts[0].Triggered {
|
|
||||||
t.Fatal("The alert should still show as triggered")
|
|
||||||
}
|
|
||||||
|
|
||||||
HandleAlerting(service, &core.Result{Success: false})
|
HandleAlerting(service, &core.Result{Success: false})
|
||||||
if service.NumberOfFailuresInARow != 4 {
|
verify(t, service, 4, 0, true, "The alert should still be triggered")
|
||||||
t.Fatal("service.NumberOfFailuresInARow should've increased from 3 to 4, got", service.NumberOfFailuresInARow)
|
|
||||||
}
|
|
||||||
if service.NumberOfSuccessesInARow != 0 {
|
|
||||||
t.Fatal("service.NumberOfSuccessesInARow should've stayed at 0, got", service.NumberOfSuccessesInARow)
|
|
||||||
}
|
|
||||||
if !service.Alerts[0].Triggered {
|
|
||||||
t.Fatal("The alert should still show as triggered")
|
|
||||||
}
|
|
||||||
|
|
||||||
HandleAlerting(service, &core.Result{Success: true})
|
HandleAlerting(service, &core.Result{Success: true})
|
||||||
if service.NumberOfFailuresInARow != 0 {
|
verify(t, service, 0, 1, true, "The alert should still be triggered (because service.Alerts[0].SuccessThreshold is 3)")
|
||||||
t.Fatal("service.NumberOfFailuresInARow should've reset to 0, got", service.NumberOfFailuresInARow)
|
|
||||||
}
|
|
||||||
if service.NumberOfSuccessesInARow != 1 {
|
|
||||||
t.Fatal("service.NumberOfSuccessesInARow should've increased from 0 to 1, got", service.NumberOfSuccessesInARow)
|
|
||||||
}
|
|
||||||
if !service.Alerts[0].Triggered {
|
|
||||||
t.Fatal("The alert should still be triggered (because service.Alerts[0].SuccessThreshold is 3)")
|
|
||||||
}
|
|
||||||
|
|
||||||
HandleAlerting(service, &core.Result{Success: true})
|
HandleAlerting(service, &core.Result{Success: true})
|
||||||
if service.NumberOfFailuresInARow != 0 {
|
verify(t, service, 0, 2, true, "The alert should still be triggered (because service.Alerts[0].SuccessThreshold is 3)")
|
||||||
t.Fatal("service.NumberOfFailuresInARow should've stayed at 0, got", service.NumberOfFailuresInARow)
|
|
||||||
}
|
|
||||||
if service.NumberOfSuccessesInARow != 2 {
|
|
||||||
t.Fatal("service.NumberOfSuccessesInARow should've increased from 1 to 2, got", service.NumberOfSuccessesInARow)
|
|
||||||
}
|
|
||||||
if !service.Alerts[0].Triggered {
|
|
||||||
t.Fatal("The alert should still be triggered")
|
|
||||||
}
|
|
||||||
|
|
||||||
HandleAlerting(service, &core.Result{Success: true})
|
HandleAlerting(service, &core.Result{Success: true})
|
||||||
if service.NumberOfFailuresInARow != 0 {
|
verify(t, service, 0, 3, false, "The alert should've been resolved")
|
||||||
t.Fatal("service.NumberOfFailuresInARow should've stayed at 0, got", service.NumberOfFailuresInARow)
|
|
||||||
}
|
|
||||||
if service.NumberOfSuccessesInARow != 3 {
|
|
||||||
t.Fatal("service.NumberOfSuccessesInARow should've increased from 2 to 3, got", service.NumberOfSuccessesInARow)
|
|
||||||
}
|
|
||||||
if service.Alerts[0].Triggered {
|
|
||||||
t.Fatal("The alert should not be triggered")
|
|
||||||
}
|
|
||||||
|
|
||||||
HandleAlerting(service, &core.Result{Success: true})
|
HandleAlerting(service, &core.Result{Success: true})
|
||||||
if service.NumberOfFailuresInARow != 0 {
|
verify(t, service, 0, 4, false, "The alert should no longer be triggered")
|
||||||
t.Fatal("service.NumberOfFailuresInARow should've stayed at 0, got", service.NumberOfFailuresInARow)
|
|
||||||
}
|
|
||||||
if service.NumberOfSuccessesInARow != 4 {
|
|
||||||
t.Fatal("service.NumberOfSuccessesInARow should've increased from 3 to 4, got", service.NumberOfSuccessesInARow)
|
|
||||||
}
|
|
||||||
if service.Alerts[0].Triggered {
|
|
||||||
t.Fatal("The alert should no longer be triggered")
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestHandleAlertingWhenAlertingConfigIsNil(t *testing.T) {
|
func TestHandleAlertingWhenAlertingConfigIsNil(t *testing.T) {
|
||||||
@ -172,92 +92,11 @@ func TestHandleAlertingWithBadAlertProvider(t *testing.T) {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
if service.NumberOfFailuresInARow != 0 {
|
verify(t, service, 0, 0, false, "The alert shouldn't start triggered")
|
||||||
t.Fatal("service.NumberOfFailuresInARow should've started at 0, got", service.NumberOfFailuresInARow)
|
|
||||||
}
|
|
||||||
if service.NumberOfSuccessesInARow != 0 {
|
|
||||||
t.Fatal("service.NumberOfSuccessesInARow should've started at 0, got", service.NumberOfSuccessesInARow)
|
|
||||||
}
|
|
||||||
if service.Alerts[0].Triggered {
|
|
||||||
t.Fatal("The alert shouldn't start triggered")
|
|
||||||
}
|
|
||||||
|
|
||||||
HandleAlerting(service, &core.Result{Success: false})
|
HandleAlerting(service, &core.Result{Success: false})
|
||||||
if service.NumberOfFailuresInARow != 1 {
|
verify(t, service, 1, 0, false, "The alert shouldn't have triggered")
|
||||||
t.Fatal("service.NumberOfFailuresInARow should've increased from 0 to 1, got", service.NumberOfFailuresInARow)
|
|
||||||
}
|
|
||||||
if service.NumberOfSuccessesInARow != 0 {
|
|
||||||
t.Fatal("service.NumberOfSuccessesInARow should've stayed at 0, got", service.NumberOfSuccessesInARow)
|
|
||||||
}
|
|
||||||
if service.Alerts[0].Triggered {
|
|
||||||
t.Fatal("The alert shouldn't have triggered")
|
|
||||||
}
|
|
||||||
|
|
||||||
HandleAlerting(service, &core.Result{Success: false})
|
HandleAlerting(service, &core.Result{Success: false})
|
||||||
if service.NumberOfFailuresInARow != 2 {
|
verify(t, service, 2, 0, false, "The alert shouldn't have triggered, because the provider wasn't configured properly")
|
||||||
t.Fatal("service.NumberOfFailuresInARow should've increased from 1 to 2, got", service.NumberOfFailuresInARow)
|
|
||||||
}
|
|
||||||
if service.NumberOfSuccessesInARow != 0 {
|
|
||||||
t.Fatal("service.NumberOfSuccessesInARow should've stayed at 0, got", service.NumberOfSuccessesInARow)
|
|
||||||
}
|
|
||||||
if service.Alerts[0].Triggered {
|
|
||||||
t.Fatal("The alert shouldn't have triggered, because the provider wasn't configured properly")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestHandleAlertingWithoutSendingAlertOnResolve(t *testing.T) {
|
|
||||||
_ = os.Setenv("MOCK_ALERT_PROVIDER", "true")
|
|
||||||
defer os.Clearenv()
|
|
||||||
|
|
||||||
cfg := &config.Config{
|
|
||||||
Alerting: &alerting.Config{},
|
|
||||||
}
|
|
||||||
config.Set(cfg)
|
|
||||||
service := &core.Service{
|
|
||||||
URL: "http://example.com",
|
|
||||||
Alerts: []*core.Alert{
|
|
||||||
{
|
|
||||||
Type: core.CustomAlert,
|
|
||||||
Enabled: true,
|
|
||||||
FailureThreshold: 1,
|
|
||||||
SuccessThreshold: 1,
|
|
||||||
SendOnResolved: false,
|
|
||||||
Triggered: false,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
if service.NumberOfFailuresInARow != 0 {
|
|
||||||
t.Fatal("service.NumberOfFailuresInARow should've started at 0, got", service.NumberOfFailuresInARow)
|
|
||||||
}
|
|
||||||
if service.NumberOfSuccessesInARow != 0 {
|
|
||||||
t.Fatal("service.NumberOfSuccessesInARow should've started at 0, got", service.NumberOfSuccessesInARow)
|
|
||||||
}
|
|
||||||
if service.Alerts[0].Triggered {
|
|
||||||
t.Fatal("The alert shouldn't start triggered")
|
|
||||||
}
|
|
||||||
|
|
||||||
HandleAlerting(service, &core.Result{Success: false})
|
|
||||||
if service.NumberOfFailuresInARow != 1 {
|
|
||||||
t.Fatal("service.NumberOfFailuresInARow should've increased from 0 to 1, got", service.NumberOfFailuresInARow)
|
|
||||||
}
|
|
||||||
if service.NumberOfSuccessesInARow != 0 {
|
|
||||||
t.Fatal("service.NumberOfSuccessesInARow should've stayed at 0, got", service.NumberOfSuccessesInARow)
|
|
||||||
}
|
|
||||||
if service.Alerts[0].Triggered {
|
|
||||||
t.Fatal("The alert shouldn't have triggered")
|
|
||||||
}
|
|
||||||
|
|
||||||
HandleAlerting(service, &core.Result{Success: false})
|
|
||||||
if service.NumberOfFailuresInARow != 2 {
|
|
||||||
t.Fatal("service.NumberOfFailuresInARow should've increased from 1 to 2, got", service.NumberOfFailuresInARow)
|
|
||||||
}
|
|
||||||
if service.NumberOfSuccessesInARow != 0 {
|
|
||||||
t.Fatal("service.NumberOfSuccessesInARow should've stayed at 0, got", service.NumberOfSuccessesInARow)
|
|
||||||
}
|
|
||||||
if service.Alerts[0].Triggered {
|
|
||||||
t.Fatal("The alert shouldn't have triggered, because the provider wasn't configured properly")
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestHandleAlertingWhenTriggeredAlertIsAlmostResolvedButServiceStartFailingAgain(t *testing.T) {
|
func TestHandleAlertingWhenTriggeredAlertIsAlmostResolvedButServiceStartFailingAgain(t *testing.T) {
|
||||||
@ -291,15 +130,7 @@ func TestHandleAlertingWhenTriggeredAlertIsAlmostResolvedButServiceStartFailingA
|
|||||||
|
|
||||||
// This test simulate an alert that was already triggered
|
// This test simulate an alert that was already triggered
|
||||||
HandleAlerting(service, &core.Result{Success: false})
|
HandleAlerting(service, &core.Result{Success: false})
|
||||||
if service.NumberOfFailuresInARow != 2 {
|
verify(t, service, 2, 0, true, "The alert was already triggered at the beginning of this test")
|
||||||
t.Fatal("service.NumberOfFailuresInARow should've increased from 1 to 2, got", service.NumberOfFailuresInARow)
|
|
||||||
}
|
|
||||||
if service.NumberOfSuccessesInARow != 0 {
|
|
||||||
t.Fatal("service.NumberOfSuccessesInARow should've stayed at 0, got", service.NumberOfSuccessesInARow)
|
|
||||||
}
|
|
||||||
if !service.Alerts[0].Triggered {
|
|
||||||
t.Fatal("The alert was already triggered at the beginning of this test")
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestHandleAlertingWhenTriggeredAlertIsResolvedButSendOnResolvedIsFalse(t *testing.T) {
|
func TestHandleAlertingWhenTriggeredAlertIsResolvedButSendOnResolvedIsFalse(t *testing.T) {
|
||||||
@ -332,15 +163,7 @@ func TestHandleAlertingWhenTriggeredAlertIsResolvedButSendOnResolvedIsFalse(t *t
|
|||||||
}
|
}
|
||||||
|
|
||||||
HandleAlerting(service, &core.Result{Success: true})
|
HandleAlerting(service, &core.Result{Success: true})
|
||||||
if service.NumberOfFailuresInARow != 0 {
|
verify(t, service, 0, 1, false, "The alert should've been resolved")
|
||||||
t.Fatal("service.NumberOfFailuresInARow should've decreased from 1 to 0, got", service.NumberOfFailuresInARow)
|
|
||||||
}
|
|
||||||
if service.NumberOfSuccessesInARow != 1 {
|
|
||||||
t.Fatal("service.NumberOfSuccessesInARow should've increased from 0 to 1, got", service.NumberOfSuccessesInARow)
|
|
||||||
}
|
|
||||||
if service.Alerts[0].Triggered {
|
|
||||||
t.Fatal("The alert shouldn't be triggered anymore")
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestHandleAlertingWhenTriggeredAlertIsResolvedPagerDuty(t *testing.T) {
|
func TestHandleAlertingWhenTriggeredAlertIsResolvedPagerDuty(t *testing.T) {
|
||||||
@ -372,24 +195,138 @@ func TestHandleAlertingWhenTriggeredAlertIsResolvedPagerDuty(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
HandleAlerting(service, &core.Result{Success: false})
|
HandleAlerting(service, &core.Result{Success: false})
|
||||||
if service.NumberOfFailuresInARow != 1 {
|
verify(t, service, 1, 0, true, "")
|
||||||
t.Fatal("service.NumberOfFailuresInARow should've increased from 0 to 1, got", service.NumberOfFailuresInARow)
|
|
||||||
}
|
|
||||||
if service.NumberOfSuccessesInARow != 0 {
|
|
||||||
t.Fatal("service.NumberOfSuccessesInARow should've stayed at 0, got", service.NumberOfSuccessesInARow)
|
|
||||||
}
|
|
||||||
if !service.Alerts[0].Triggered {
|
|
||||||
t.Fatal("The alert should've been triggered")
|
|
||||||
}
|
|
||||||
|
|
||||||
HandleAlerting(service, &core.Result{Success: true})
|
HandleAlerting(service, &core.Result{Success: true})
|
||||||
if service.NumberOfFailuresInARow != 0 {
|
verify(t, service, 0, 1, false, "The alert should've been resolved")
|
||||||
t.Fatal("service.NumberOfFailuresInARow should've decreased from 1 to 0, got", service.NumberOfFailuresInARow)
|
}
|
||||||
|
|
||||||
|
func TestHandleAlertingWithProviderThatReturnsAnError(t *testing.T) {
|
||||||
|
_ = os.Setenv("MOCK_ALERT_PROVIDER", "true")
|
||||||
|
defer os.Clearenv()
|
||||||
|
|
||||||
|
cfg := &config.Config{
|
||||||
|
Debug: true,
|
||||||
|
Alerting: &alerting.Config{
|
||||||
|
Custom: &custom.AlertProvider{
|
||||||
|
URL: "https://twinnation.org/health",
|
||||||
|
Method: "GET",
|
||||||
|
},
|
||||||
|
},
|
||||||
}
|
}
|
||||||
if service.NumberOfSuccessesInARow != 1 {
|
config.Set(cfg)
|
||||||
t.Fatal("service.NumberOfSuccessesInARow should've increased from 0 to 1, got", service.NumberOfSuccessesInARow)
|
service := &core.Service{
|
||||||
|
URL: "http://example.com",
|
||||||
|
Alerts: []*core.Alert{
|
||||||
|
{
|
||||||
|
Type: core.CustomAlert,
|
||||||
|
Enabled: true,
|
||||||
|
FailureThreshold: 2,
|
||||||
|
SuccessThreshold: 2,
|
||||||
|
SendOnResolved: true,
|
||||||
|
Triggered: false,
|
||||||
|
},
|
||||||
|
},
|
||||||
}
|
}
|
||||||
if service.Alerts[0].Triggered {
|
|
||||||
t.Fatal("The alert shouldn't be triggered anymore")
|
_ = os.Setenv("MOCK_ALERT_PROVIDER_ERROR", "true")
|
||||||
|
HandleAlerting(service, &core.Result{Success: false})
|
||||||
|
verify(t, service, 1, 0, false, "")
|
||||||
|
HandleAlerting(service, &core.Result{Success: false})
|
||||||
|
verify(t, service, 2, 0, false, "The alert should have failed to trigger, because the alert provider is returning an error")
|
||||||
|
HandleAlerting(service, &core.Result{Success: false})
|
||||||
|
verify(t, service, 3, 0, false, "The alert should still not be triggered, because the alert provider is still returning an error")
|
||||||
|
HandleAlerting(service, &core.Result{Success: false})
|
||||||
|
verify(t, service, 4, 0, false, "The alert should still not be triggered, because the alert provider is still returning an error")
|
||||||
|
_ = os.Setenv("MOCK_ALERT_PROVIDER_ERROR", "false")
|
||||||
|
HandleAlerting(service, &core.Result{Success: false})
|
||||||
|
verify(t, service, 5, 0, true, "The alert should've been triggered because the alert provider is no longer returning an error")
|
||||||
|
HandleAlerting(service, &core.Result{Success: true})
|
||||||
|
verify(t, service, 0, 1, true, "The alert should've still been triggered")
|
||||||
|
_ = os.Setenv("MOCK_ALERT_PROVIDER_ERROR", "true")
|
||||||
|
HandleAlerting(service, &core.Result{Success: true})
|
||||||
|
verify(t, service, 0, 2, false, "The alert should've been resolved DESPITE THE ALERT PROVIDER RETURNING AN ERROR. See Alert.Triggered for further explanation.")
|
||||||
|
_ = os.Setenv("MOCK_ALERT_PROVIDER_ERROR", "false")
|
||||||
|
|
||||||
|
// Make sure that everything's working as expected after a rough patch
|
||||||
|
HandleAlerting(service, &core.Result{Success: false})
|
||||||
|
verify(t, service, 1, 0, false, "")
|
||||||
|
HandleAlerting(service, &core.Result{Success: false})
|
||||||
|
verify(t, service, 2, 0, true, "The alert should have triggered")
|
||||||
|
HandleAlerting(service, &core.Result{Success: true})
|
||||||
|
verify(t, service, 0, 1, true, "The alert should still be triggered")
|
||||||
|
HandleAlerting(service, &core.Result{Success: true})
|
||||||
|
verify(t, service, 0, 2, false, "The alert should have been resolved")
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestHandleAlertingWithProviderThatOnlyReturnsErrorOnResolve(t *testing.T) {
|
||||||
|
_ = os.Setenv("MOCK_ALERT_PROVIDER", "true")
|
||||||
|
defer os.Clearenv()
|
||||||
|
|
||||||
|
cfg := &config.Config{
|
||||||
|
Debug: true,
|
||||||
|
Alerting: &alerting.Config{
|
||||||
|
Custom: &custom.AlertProvider{
|
||||||
|
URL: "https://twinnation.org/health",
|
||||||
|
Method: "GET",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
config.Set(cfg)
|
||||||
|
service := &core.Service{
|
||||||
|
URL: "http://example.com",
|
||||||
|
Alerts: []*core.Alert{
|
||||||
|
{
|
||||||
|
Type: core.CustomAlert,
|
||||||
|
Enabled: true,
|
||||||
|
FailureThreshold: 1,
|
||||||
|
SuccessThreshold: 1,
|
||||||
|
SendOnResolved: true,
|
||||||
|
Triggered: false,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
HandleAlerting(service, &core.Result{Success: false})
|
||||||
|
verify(t, service, 1, 0, true, "")
|
||||||
|
_ = os.Setenv("MOCK_ALERT_PROVIDER_ERROR", "true")
|
||||||
|
HandleAlerting(service, &core.Result{Success: true})
|
||||||
|
verify(t, service, 0, 1, false, "")
|
||||||
|
_ = os.Setenv("MOCK_ALERT_PROVIDER_ERROR", "false")
|
||||||
|
HandleAlerting(service, &core.Result{Success: false})
|
||||||
|
verify(t, service, 1, 0, true, "")
|
||||||
|
_ = os.Setenv("MOCK_ALERT_PROVIDER_ERROR", "true")
|
||||||
|
HandleAlerting(service, &core.Result{Success: true})
|
||||||
|
verify(t, service, 0, 1, true, "")
|
||||||
|
_ = os.Setenv("MOCK_ALERT_PROVIDER_ERROR", "false")
|
||||||
|
|
||||||
|
// Make sure that everything's working as expected after a rough patch
|
||||||
|
HandleAlerting(service, &core.Result{Success: false})
|
||||||
|
verify(t, service, 1, 0, false, "")
|
||||||
|
HandleAlerting(service, &core.Result{Success: false})
|
||||||
|
verify(t, service, 2, 0, true, "The alert should have triggered")
|
||||||
|
HandleAlerting(service, &core.Result{Success: true})
|
||||||
|
verify(t, service, 0, 1, true, "The alert should still be triggered")
|
||||||
|
HandleAlerting(service, &core.Result{Success: true})
|
||||||
|
verify(t, service, 0, 2, false, "The alert should have been resolved")
|
||||||
|
}
|
||||||
|
|
||||||
|
func verify(t *testing.T, service *core.Service, expectedNumberOfFailuresInARow, expectedNumberOfSuccessInARow int, expectedTriggered bool, expectedTriggeredReason string) {
|
||||||
|
if service.NumberOfFailuresInARow != expectedNumberOfFailuresInARow {
|
||||||
|
t.Fatalf("service.NumberOfFailuresInARow should've been %d, got %d", expectedNumberOfFailuresInARow, service.NumberOfFailuresInARow)
|
||||||
|
}
|
||||||
|
if service.NumberOfSuccessesInARow != expectedNumberOfSuccessInARow {
|
||||||
|
t.Fatalf("service.NumberOfSuccessesInARow should've been %d, got %d", expectedNumberOfSuccessInARow, service.NumberOfSuccessesInARow)
|
||||||
|
}
|
||||||
|
if service.Alerts[0].Triggered != expectedTriggered {
|
||||||
|
if len(expectedTriggeredReason) != 0 {
|
||||||
|
t.Fatal(expectedTriggeredReason)
|
||||||
|
} else {
|
||||||
|
if expectedTriggered {
|
||||||
|
t.Fatal("The alert should've been triggered")
|
||||||
|
} else {
|
||||||
|
t.Fatal("The alert shouldn't have been triggered")
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user