2019-09-05 01:37:13 +02:00
|
|
|
package watchdog
|
2019-09-07 03:59:50 +02:00
|
|
|
|
|
|
|
import (
|
2020-09-05 03:31:28 +02:00
|
|
|
"encoding/json"
|
2020-07-24 22:45:51 +02:00
|
|
|
"fmt"
|
2019-09-09 03:07:08 +02:00
|
|
|
"log"
|
2019-09-07 03:59:50 +02:00
|
|
|
"sync"
|
|
|
|
"time"
|
2020-10-30 16:30:03 +01:00
|
|
|
|
|
|
|
"github.com/TwinProduction/gatus/config"
|
|
|
|
"github.com/TwinProduction/gatus/core"
|
|
|
|
"github.com/TwinProduction/gatus/metric"
|
2019-09-07 03:59:50 +02:00
|
|
|
)
|
|
|
|
|
|
|
|
var (
|
2020-11-27 00:09:01 +01:00
|
|
|
serviceStatuses = make(map[string]*core.ServiceStatus)
|
2020-09-05 03:31:28 +02:00
|
|
|
|
2020-11-27 00:09:01 +01:00
|
|
|
// serviceStatusesMutex is used to prevent concurrent map access
|
|
|
|
serviceStatusesMutex sync.RWMutex
|
2020-09-05 03:31:28 +02:00
|
|
|
|
|
|
|
// monitoringMutex is used to prevent multiple services from being evaluated at the same time.
|
|
|
|
// Without this, conditions using response time may become inaccurate.
|
|
|
|
monitoringMutex sync.Mutex
|
2019-09-07 03:59:50 +02:00
|
|
|
)
|
|
|
|
|
2020-11-27 00:09:01 +01:00
|
|
|
// GetJSONEncodedServiceStatuses returns a list of core.ServiceStatus for each services encoded using json.Marshal.
|
2020-09-05 03:31:28 +02:00
|
|
|
// The reason why the encoding is done here is because we use a mutex to prevent concurrent map access.
|
2020-11-27 00:09:01 +01:00
|
|
|
func GetJSONEncodedServiceStatuses() ([]byte, error) {
|
|
|
|
serviceStatusesMutex.RLock()
|
|
|
|
data, err := json.Marshal(serviceStatuses)
|
|
|
|
serviceStatusesMutex.RUnlock()
|
2020-09-05 03:31:28 +02:00
|
|
|
return data, err
|
2019-09-07 03:59:50 +02:00
|
|
|
}
|
|
|
|
|
2020-12-30 02:22:17 +01:00
|
|
|
// GetUptimeByServiceGroupAndName returns the uptime of a service based on its group and name
|
|
|
|
func GetUptimeByServiceGroupAndName(group, name string) *core.Uptime {
|
|
|
|
key := fmt.Sprintf("%s_%s", group, name)
|
2020-12-30 03:04:07 +01:00
|
|
|
serviceStatusesMutex.RLock()
|
2020-12-30 02:22:17 +01:00
|
|
|
serviceStatus, exists := serviceStatuses[key]
|
2020-12-30 03:04:07 +01:00
|
|
|
serviceStatusesMutex.RUnlock()
|
2020-12-30 02:22:17 +01:00
|
|
|
if !exists {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
return serviceStatus.Uptime
|
|
|
|
}
|
|
|
|
|
2020-08-18 02:25:29 +02:00
|
|
|
// Monitor loops over each services and starts a goroutine to monitor each services separately
|
2019-12-04 22:44:35 +01:00
|
|
|
func Monitor(cfg *config.Config) {
|
|
|
|
for _, service := range cfg.Services {
|
2020-04-07 00:58:13 +02:00
|
|
|
go monitor(service)
|
|
|
|
// To prevent multiple requests from running at the same time
|
2020-04-15 01:20:00 +02:00
|
|
|
time.Sleep(1111 * time.Millisecond)
|
2020-04-07 00:58:13 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-08-18 02:25:29 +02:00
|
|
|
// monitor monitors a single service in a loop
|
2020-04-07 00:58:13 +02:00
|
|
|
func monitor(service *core.Service) {
|
2020-09-05 03:31:28 +02:00
|
|
|
cfg := config.Get()
|
2020-04-07 00:58:13 +02:00
|
|
|
for {
|
2020-10-17 05:07:14 +02:00
|
|
|
if !cfg.DisableMonitoringLock {
|
|
|
|
// By placing the lock here, we prevent multiple services from being monitored at the exact same time, which
|
|
|
|
// could cause performance issues and return inaccurate results
|
|
|
|
monitoringMutex.Lock()
|
|
|
|
}
|
2020-09-05 03:31:28 +02:00
|
|
|
if cfg.Debug {
|
|
|
|
log.Printf("[watchdog][monitor] Monitoring serviceName=%s", service.Name)
|
|
|
|
}
|
2020-09-25 01:49:32 +02:00
|
|
|
result := service.EvaluateHealth()
|
2020-04-07 00:58:13 +02:00
|
|
|
metric.PublishMetricsForService(service, result)
|
2020-11-27 00:09:01 +01:00
|
|
|
UpdateServiceStatuses(service, result)
|
2020-07-24 22:45:51 +02:00
|
|
|
var extra string
|
|
|
|
if !result.Success {
|
|
|
|
extra = fmt.Sprintf("responseBody=%s", result.Body)
|
|
|
|
}
|
2020-04-07 00:58:13 +02:00
|
|
|
log.Printf(
|
2020-09-05 03:31:28 +02:00
|
|
|
"[watchdog][monitor] Monitored serviceName=%s; success=%v; errors=%d; requestDuration=%s; %s",
|
2020-04-07 00:58:13 +02:00
|
|
|
service.Name,
|
2020-09-05 03:31:28 +02:00
|
|
|
result.Success,
|
2020-04-07 00:58:13 +02:00
|
|
|
len(result.Errors),
|
|
|
|
result.Duration.Round(time.Millisecond),
|
2020-07-24 22:45:51 +02:00
|
|
|
extra,
|
2020-04-07 00:58:13 +02:00
|
|
|
)
|
2020-09-19 22:22:12 +02:00
|
|
|
HandleAlerting(service, result)
|
2020-09-05 03:31:28 +02:00
|
|
|
if cfg.Debug {
|
|
|
|
log.Printf("[watchdog][monitor] Waiting for interval=%s before monitoring serviceName=%s again", service.Interval, service.Name)
|
|
|
|
}
|
2020-10-17 05:07:14 +02:00
|
|
|
if !cfg.DisableMonitoringLock {
|
|
|
|
monitoringMutex.Unlock()
|
|
|
|
}
|
2020-09-05 00:23:56 +02:00
|
|
|
time.Sleep(service.Interval)
|
|
|
|
}
|
|
|
|
}
|
2020-11-27 00:09:01 +01:00
|
|
|
|
|
|
|
// UpdateServiceStatuses updates the slice of service statuses
|
|
|
|
func UpdateServiceStatuses(service *core.Service, result *core.Result) {
|
2020-11-30 14:44:58 +01:00
|
|
|
key := fmt.Sprintf("%s_%s", service.Group, service.Name)
|
2020-11-27 00:09:01 +01:00
|
|
|
serviceStatusesMutex.Lock()
|
2020-11-30 14:44:58 +01:00
|
|
|
serviceStatus, exists := serviceStatuses[key]
|
2020-11-27 00:09:01 +01:00
|
|
|
if !exists {
|
|
|
|
serviceStatus = core.NewServiceStatus(service)
|
2020-11-30 14:44:58 +01:00
|
|
|
serviceStatuses[key] = serviceStatus
|
2020-11-27 00:09:01 +01:00
|
|
|
}
|
|
|
|
serviceStatus.AddResult(result)
|
|
|
|
serviceStatusesMutex.Unlock()
|
|
|
|
}
|