mirror of
https://github.com/TwiN/gatus.git
synced 2024-11-24 08:53:48 +01:00
Merge branch 'master' into RawUptimeData
This commit is contained in:
commit
7d028f5a5f
29
.examples/docker-compose-mtls/certs/client/client.crt
Normal file
29
.examples/docker-compose-mtls/certs/client/client.crt
Normal file
@ -0,0 +1,29 @@
|
|||||||
|
-----BEGIN CERTIFICATE-----
|
||||||
|
MIIFBjCCAu6gAwIBAgIUHJXHAqywj2v25AgX7pDSZ+LX4iAwDQYJKoZIhvcNAQEL
|
||||||
|
BQAwEjEQMA4GA1UEAwwHZXhhbXBsZTAeFw0yNDA0MjUwMTQ1MDFaFw0yOTA0MjQw
|
||||||
|
MTQ1MDFaMBExDzANBgNVBAMMBmNsaWVudDCCAiIwDQYJKoZIhvcNAQEBBQADggIP
|
||||||
|
ADCCAgoCggIBANTmRlS5BNG82mOdrhtRPIBD5U40nEW4CVFm85ZJ4Bge4Ty86juf
|
||||||
|
aoCnI6AEfwpVnJhXPzjUsMBxJFMbiCB+QTJRpxTphtK7orpbwRHjaDZNaLr1MrUO
|
||||||
|
ieADGiHw93zVDikD8FP5vG+2XWWA56hY84Ac0TR9GqPjsW0nobMgBNgsRtbYUD0B
|
||||||
|
T5QOItK180xQRn4jbys5jRnr161S+Sbg6mglz1LBFBCLmZnhZFZ8FAn87gumbnWN
|
||||||
|
etSnu9kX6iOXBIaB+3nuHOL4xmAan8tAyen6mPfkXrE5ogovjqFFMTUJOKQoJVp3
|
||||||
|
zzm/0XYANxoItFGtdjGMTl5IgI220/6kfpn6PYN7y1kYn5EI+UbobD/CuAhd94p6
|
||||||
|
aQwOXU53/l+eNH/XnTsL/32QQ6qdq8sYqevlslk1M39kKNewWYCeRzYlCVscQk14
|
||||||
|
O3fkyXrtRkz30xrzfjvJQ/VzMi+e5UlemsCuCXTVZ5YyBnuWyY+mI6lZICltZSSX
|
||||||
|
VinKzpz+t4Jl7glhKiGHaNAkBX2oLddyf280zw4Cx7nDMPs4uOHONYpm90IxEOJe
|
||||||
|
zgJ9YxPK9aaKv2AoYLbvhYyKrVT+TFqoEsbQk4vK0t0Gc1j5z4dET31CSOuxVnnU
|
||||||
|
LYwtbILFc0uZrbuOAbEbXtjPpw2OGqWagD0QpkE8TjN0Hd0ibyXyUuz5AgMBAAGj
|
||||||
|
VTBTMBEGA1UdEQQKMAiCBmNsaWVudDAdBgNVHQ4EFgQUleILTHG5lT2RhSe9H4fV
|
||||||
|
xUh0bNUwHwYDVR0jBBgwFoAUbh9Tg4oxxnHJTSaa0WLBTesYwxEwDQYJKoZIhvcN
|
||||||
|
AQELBQADggIBABq8zjRrDaljl867MXAlmbV7eJkSnaWRFct+N//jCVNnKMYaxyQm
|
||||||
|
+UG12xYP0U9Zr9vhsqwyTZTQFx/ZFiiz2zfXPtUAppV3AjE67IlKRbec3qmUhj0H
|
||||||
|
Rv20eNNWXTl1XTX5WDV5887TF+HLZm/4W2ZSBbS3V89cFhBLosy7HnBGrP0hACne
|
||||||
|
ZbdQWnnLHJMDKXkZey1H1ZLQQCQdAKGS147firj29M8uzSRHgrR6pvsNQnRT0zDL
|
||||||
|
TlTJoxyGTMaoj+1IZvRsAYMZCRb8Yct/v2i/ukIykFWUJZ+1Z3UZhGrX+gdhLfZM
|
||||||
|
jAP4VQ+vFgwD6NEXAA2DatoRqxbN1ZGJQkvnobWJdZDiYu4hBCs8ugKUTE+0iXWt
|
||||||
|
hSyrAVUspFCIeDN4xsXT5b0j2Ps4bpSAiGx+aDDTPUnd881I6JGCiIavgvdFMLCW
|
||||||
|
yOXJOZvXcNQwsndkob5fZAEqetjrARsHhQuygEq/LnPc6lWsO8O6UzYArEiKWTMx
|
||||||
|
N/5hx12Pb7aaQd1f4P3gmmHMb/YiCQK1Qy5d4v68POeqyrLvAHbvCwEMhBAbnLvw
|
||||||
|
gne3psql8s5wxhnzwYltcBUmmAw1t33CwzRBGEKifRdLGtA9pbua4G/tomcDDjVS
|
||||||
|
ChsHGebJvNxOnsQqoGgozqM2x8ScxmJzIflGxrKmEA8ybHpU0d02Xp3b
|
||||||
|
-----END CERTIFICATE-----
|
51
.examples/docker-compose-mtls/certs/client/client.key
Normal file
51
.examples/docker-compose-mtls/certs/client/client.key
Normal file
@ -0,0 +1,51 @@
|
|||||||
|
-----BEGIN RSA PRIVATE KEY-----
|
||||||
|
MIIJKQIBAAKCAgEA1OZGVLkE0bzaY52uG1E8gEPlTjScRbgJUWbzlkngGB7hPLzq
|
||||||
|
O59qgKcjoAR/ClWcmFc/ONSwwHEkUxuIIH5BMlGnFOmG0ruiulvBEeNoNk1ouvUy
|
||||||
|
tQ6J4AMaIfD3fNUOKQPwU/m8b7ZdZYDnqFjzgBzRNH0ao+OxbSehsyAE2CxG1thQ
|
||||||
|
PQFPlA4i0rXzTFBGfiNvKzmNGevXrVL5JuDqaCXPUsEUEIuZmeFkVnwUCfzuC6Zu
|
||||||
|
dY161Ke72RfqI5cEhoH7ee4c4vjGYBqfy0DJ6fqY9+ResTmiCi+OoUUxNQk4pCgl
|
||||||
|
WnfPOb/RdgA3Ggi0Ua12MYxOXkiAjbbT/qR+mfo9g3vLWRifkQj5RuhsP8K4CF33
|
||||||
|
inppDA5dTnf+X540f9edOwv/fZBDqp2ryxip6+WyWTUzf2Qo17BZgJ5HNiUJWxxC
|
||||||
|
TXg7d+TJeu1GTPfTGvN+O8lD9XMyL57lSV6awK4JdNVnljIGe5bJj6YjqVkgKW1l
|
||||||
|
JJdWKcrOnP63gmXuCWEqIYdo0CQFfagt13J/bzTPDgLHucMw+zi44c41imb3QjEQ
|
||||||
|
4l7OAn1jE8r1poq/YChgtu+FjIqtVP5MWqgSxtCTi8rS3QZzWPnPh0RPfUJI67FW
|
||||||
|
edQtjC1sgsVzS5mtu44BsRte2M+nDY4apZqAPRCmQTxOM3Qd3SJvJfJS7PkCAwEA
|
||||||
|
AQKCAgAPwAALUStib3aMkLlfpfve1VGyc8FChcySrBYbKS3zOt2Y27T3DOJuesRE
|
||||||
|
7fA5Yyn+5H1129jo87XR5s3ZnDLV4SUw2THd3H8RCwFWgcdPinHUBZhnEpial5V9
|
||||||
|
q1DzzY3gSj1OSRcVVfLE3pYaEIflvhFasQ1L0JLAq4I9OSzX5+FPEEOnWmB5Ey6k
|
||||||
|
/fbuJLDXsLwPAOadDfiFBwgNm0KxdRKdtvugBGPW9s4Fzo9rnxLmjmfKOdmQv96Y
|
||||||
|
FI/Vat0Cgmfd661RZpbDvKnTpIsLdzw3zTpAIYOzqImvCT+3AmP2qPhSdV3sPMeR
|
||||||
|
047qqyLZOVxEFXLQFiGvL4uxYUPy8k0ZI9xkgOfZ/uASozMWsHkaD04+UDi1+kw5
|
||||||
|
nfasZLvOWBW/WE/E1Rfz8IiYTeZbgTnY4CraiLrIRc0LGgD1Df4gNr25+P+LKLyK
|
||||||
|
/WW89dl6/397HOFnA7CHi7DaA8+9uZAjOWhoCNDdqAVa3QpDD/3/iRiih26bjJfH
|
||||||
|
2+sarxU8GovDZFxWd59BUP3jkukCFH+CliQy72JtLXiuPNPAWeGV9UXxtIu40sRX
|
||||||
|
Sax/TQytYi2J9NJFZFMTwVueIfzsWc8dyM+IPAYJQxN94xYKQU4+Rb/wqqHgUfjT
|
||||||
|
1ZQJb8Cmg56IDY/0EPJWQ0qgnE7TZbY2BOEYbpOzdccwUbcEjQKCAQEA8kVyw4Hw
|
||||||
|
nqcDWXjzMhOOoRoF8CNwXBvE2KBzpuAioivGcSkjkm8vLGfQYAbDOVMPFt3xlZS0
|
||||||
|
0lQm894176Kk8BiMqtyPRWWOsv4vYMBTqbehKn09Kbh6lM7d7jO7sh5iWf4jt3Bw
|
||||||
|
Sk4XhZ9oQ/kpnEKiHPymHQY3pVYEyFCGJ8mdS6g/TWiYmjMjkQDVFA4xkiyJ0S5J
|
||||||
|
NGYxI+YXtHVTVNSePKvY0h51EqTxsexAphGjXnQ3xoe6e3tVGBkeEkcZlESFD/91
|
||||||
|
0iqdc5VtKQOwy6Tj4Awk7oK5/u3tfpyIyo31LQIqreTqMO534838lpyp3CbRdvCF
|
||||||
|
QdCNpKFX1gZgmwKCAQEA4Pa9VKO3Aw95fpp0T81xNi+Js/NhdsvQyv9NI9xOKKQU
|
||||||
|
hiWxmYmyyna3zliDGlqtlw113JFTNQYl1k1yi4JQPu2gnj8te9nB0yv0RVxvbTOq
|
||||||
|
u8K1j9Xmj8XVpcKftusQsZ2xu52ONj3ZOOf22wE4Y6mdQcps+rN6XTHRBn7a5b0v
|
||||||
|
ZCvWf4CIttdIh51pZUIbZKHTU51uU7AhTCY/wEUtiHwYTT9Wiy9Lmay5Lh2s2PCz
|
||||||
|
yPE5Y970nOzlSCUl3bVgY1t0xbQtaO5AJ/iuw/vNw+YAiAIPNDUcbcK5njb//+0E
|
||||||
|
uTEtDA6SHeYfsNXGDzxipueKXFHfJLCTXnnT5/1v+wKCAQEA0pF78uNAQJSGe8B9
|
||||||
|
F3waDnmwyYvzv4q/J00l19edIniLrJUF/uM2DBFa8etOyMchKU3UCJ9MHjbX+EOd
|
||||||
|
e19QngGoWWUD/VwMkBQPF7dxv+QDZwudGmLl3+qAx+Uc8O4pq3AQmQJYBq0jEpd/
|
||||||
|
Jv0rpk3f2vPYaQebW8+MrpIWWASK+1QLWPtdD0D9W61uhVTkzth5HF9vbuSXN01o
|
||||||
|
Mwd6WxPFSJRQCihAtui3zV26vtw7sv+t7pbPhT2nsx85nMdBOzXmtQXi4Lz7RpeM
|
||||||
|
XgaAJi91g6jqfIcQo7smHVJuLib9/pWQhL2estLBTzUcocced2Mh0Y+xMofSZFF7
|
||||||
|
J2E5mwKCAQAO9npbUdRPYM0c7ZsE385C42COVobKBv5pMhfoZbPRIjC3R3SLmMwK
|
||||||
|
iWDqWZrGuvdGz79iH0xgf3suyNHwk4dQ2C9RtzQIQ9CPgiHqJx7GLaSSfn3jBkAi
|
||||||
|
me7+6nYDDZl7pth2eSFHXE/BaDRUFr2wa0ypXpRnDF78Kd8URoW6uB2Z1QycSGlP
|
||||||
|
d/w8AO1Mrdvykozix9rZuCJO1VByMme350EaijbwZQHrQ8DBX3nqp//dQqYljWPJ
|
||||||
|
uDv703S0TWcO1LtslvJaQ1aDEhhVsr7Z48dvRGvMdifg6Q29hzz5wcMJqkqrvaBc
|
||||||
|
Wr0K3v0gcEzDey0JvOxRnWj/5KyChqnXAoIBAQDq6Dsks6BjVP4Y1HaA/NWcZxUU
|
||||||
|
EZfNCTA19jIHSUiPbWzWHNdndrUq33HkPorNmFaEIrTqd/viqahr2nXpYiY/7E+V
|
||||||
|
cpn9eSxot5J8DB4VI92UG9kixxY4K7QTMKvV43Rt6BLosW/cHxW5XTNhB4JDK+TO
|
||||||
|
NlHH48fUp2qJh7/qwSikDG130RVHKwK/5Fv3NQyXTw1/n9bhnaC4eSvV39CNSeb5
|
||||||
|
rWNEZcnc9zHT2z1UespzVTxVy4hscrkssXxcCq4bOF4bnDFjfblE43o/KrVr2/Ub
|
||||||
|
jzpXQrAwXNq7pAkIpin0v40lCeTMosSgQLFqMWmtmlCpBVkyEAc9ZYXc3Vs0
|
||||||
|
-----END RSA PRIVATE KEY-----
|
29
.examples/docker-compose-mtls/certs/server/ca.crt
Normal file
29
.examples/docker-compose-mtls/certs/server/ca.crt
Normal file
@ -0,0 +1,29 @@
|
|||||||
|
-----BEGIN CERTIFICATE-----
|
||||||
|
MIIE9DCCAtygAwIBAgIUCXgA3IbeA2mn8DQ0E5IxaKBLtf8wDQYJKoZIhvcNAQEL
|
||||||
|
BQAwEjEQMA4GA1UEAwwHZXhhbXBsZTAeFw0yNDA0MjUwMTE5MzRaFw0zNDA0MjMw
|
||||||
|
MTE5MzRaMBIxEDAOBgNVBAMMB2V4YW1wbGUwggIiMA0GCSqGSIb3DQEBAQUAA4IC
|
||||||
|
DwAwggIKAoICAQDLE4aTrVJrAVYksFJt5fIVhEJT5T0cLqvtDRf9hXA5Gowremsl
|
||||||
|
VJPBm4qbdImzJZCfCcbVjFEBw8h9xID1JUqRWjJ8BfTnpa4qc1e+xRtnvC+OsUeT
|
||||||
|
CCgZvK3TZ5vFsaEbRoNGuiaNq9WSTfjLwTxkK6C3Xogm9uDx73PdRob1TNK5A9mE
|
||||||
|
Ws3ZyV91+g1phKdlNMRaK+wUrjUjEMLgr0t5A5t6WKefsGrFUDaT3sye3ZxDYuEa
|
||||||
|
ljt+F8hLVyvkDBAhh6B4S5dQILjp7L3VgOsG7Hx9py1TwCbpWXZEuee/1/2OD8tA
|
||||||
|
ALsxkvRE1w4AZzLPYRL/dOMllLjROQ4VugU8GVpNU7saK5SeWBw3XHyJ9m8vne3R
|
||||||
|
cPWaZTfkwfj8NjCgi9BzBPW8/uw7XZMmQFyTj494OKM3T5JQ5jZ5XD97ONm9h+C/
|
||||||
|
oOmkcWHz6IwEUu7XV5IESxiFlrq8ByAYF98XPhn2wMMrm2OvHMOwrfw2+5U8je5C
|
||||||
|
z70p9kpiGK8qCyjbOl9im975jwFCbl7LSj3Y+0+vRlTG/JA4jNZhXsMJcAxeJpvr
|
||||||
|
pmm/IzN+uXNQzmKzBHVDw+mTUMPziRsUq4q6WrcuQFZa6kQFGNYWI/eWV8o4AAvp
|
||||||
|
HtrOGdSyU19w0QqPW0wHmhsV2XFcn6H/E1Qg6sxWpl45YWJFhNaITxm1EQIDAQAB
|
||||||
|
o0IwQDAOBgNVHQ8BAf8EBAMCAgQwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQU
|
||||||
|
bh9Tg4oxxnHJTSaa0WLBTesYwxEwDQYJKoZIhvcNAQELBQADggIBAKvOh81Gag0r
|
||||||
|
0ipYS9aK6rp58b6jPpF6shr3xFiJQVovgSvxNS3aWolh+ZupTCC3H2Q1ZUgatak0
|
||||||
|
VyEJVO4a7Tz+1XlA6KErhnORC6HB/fgr5KEGraO3Q1uWonPal5QU8xHFStbRaXfx
|
||||||
|
hl/k4LLhIdJqcJE+XX/AL8ekZ3NPDtf9+k4V+RBuarLGuKgOtBB8+1qjSpClmW2B
|
||||||
|
DaWPlrLPOr2Sd29WOeWHifwVc6kBGpwM3g5VGdDsNX4Ba5eIG3lX2kUzJ8wNGEf0
|
||||||
|
bZxcVbTBY+D4JaV4WXoeFmajjK3EdizRpJRZw3fM0ZIeqVYysByNu/TovYLJnBPs
|
||||||
|
5AybnO4RzYONKJtZ1GtQgJyG+80/VffDJeBmHKEiYvE6mvOFEBAcU4VLU6sfwfT1
|
||||||
|
y1dZq5G9Km72Fg5kCuYDXTT+PB5VAV3Z6k819tG3TyI4hPlEphpoidRbZ+QS9tK5
|
||||||
|
RgHah9EJoM7tDAN/mUVHJHQhhLJDBn+iCBYgSJVLwoE+F39NO9oFPD/ZxhJkbk9b
|
||||||
|
LkFnpjrVbwD1CNnawX3I2Eytg1IbbzyviQIbpSAEpotk9pCLMAxTR3a08wrVMwst
|
||||||
|
2XVSrgK0uUKsZhCIc+q21k98aeNIINor15humizngyBWYOk8SqV84ZNcD6VlM3Qv
|
||||||
|
ShSKoAkdKxcGG1+MKPt5b7zqvTo8BBPM
|
||||||
|
-----END CERTIFICATE-----
|
30
.examples/docker-compose-mtls/certs/server/server.crt
Normal file
30
.examples/docker-compose-mtls/certs/server/server.crt
Normal file
@ -0,0 +1,30 @@
|
|||||||
|
-----BEGIN CERTIFICATE-----
|
||||||
|
MIIFDjCCAvagAwIBAgITc5Ejz7RzBJ2/PcUMsVhj41RtQDANBgkqhkiG9w0BAQsF
|
||||||
|
ADASMRAwDgYDVQQDDAdleGFtcGxlMB4XDTI0MDQyNTAxNDQ1N1oXDTI5MDQyNDAx
|
||||||
|
NDQ1N1owEDEOMAwGA1UEAwwFbmdpbngwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAw
|
||||||
|
ggIKAoICAQCgbLBnVrBdRkBF2XmJgDTiRqWFPQledzCrkHF4eiUvtEytJhkpoRv2
|
||||||
|
+SiRPsjCo3XjwcgQIgSy1sHUV8Sazn7V5ux/XBRovhdhUivzI8JSRYj6qwqdUnOy
|
||||||
|
dG1ZEy/VRLsIVfoFB0jKJrZCXMT256xkYTlsgPePDsduO7IPPrTN0/I/qBvINFet
|
||||||
|
zgWCl2qlZgF4c/MHljo2TR1KlBv0RJUZbfXPwemUazyMrh/MfQHaHE5pfrmMWFGA
|
||||||
|
6yLYHEhG+fy5d3F/1+4J24D2j7deIFmmuJMPSlAPt1UjDm7M/bmoTxDG+1MRXSnN
|
||||||
|
647EzzS0TFZspHe2+yBbw6j0MMiWMzNZX2iXGVcswXwrphe7ro6OITynM76gDTuM
|
||||||
|
ISYXKYHayqW0rHFRlKxMcnmrpf5tBuK7XKyoQv/LbFKI1e+j1bNVe7OZtC88EWRc
|
||||||
|
SD8WDLqo/3rsxJkRXRW/49hO1nynHrknXJEpZeRnTyglS+VCzXYD0XzwzPKN7CyN
|
||||||
|
CHpYpOcWrAMF+EJnE4WRVyJAAt4C1pGhiwn0yCvLEGXXedI/rR5zmUBKitSe7oMT
|
||||||
|
J82H/VaGtwH0lOD9Jjsv9cb+s1c3tChPDKvgGGDaFnlehKg9TM7p+xc9mnEsitfv
|
||||||
|
ovSGzYHk29nQu/S4QrPfWuCNwM2vP9OQ+VJyzDzSyH8iuPPmkfmK5wIDAQABo18w
|
||||||
|
XTAbBgNVHREEFDASggVuZ2lueIIJbG9jYWxob3N0MB0GA1UdDgQWBBT89oboWPBC
|
||||||
|
oNsSbaNquzrjTza6xDAfBgNVHSMEGDAWgBRuH1ODijHGcclNJprRYsFN6xjDETAN
|
||||||
|
BgkqhkiG9w0BAQsFAAOCAgEAeg8QwBTne1IGZMDvIGgs95lifzuTXGVQWEid7VVp
|
||||||
|
MmXGRYsweb0MwTUq3gSUc+3OPibR0i5HCJRR04H4U+cIjR6em1foIV/bW6nTaSls
|
||||||
|
xQAj92eMmzOo/KtOYqMnk//+Da5NvY0myWa/8FgJ7rK1tOZYiTZqFOlIsaiQMHgp
|
||||||
|
/PEkZBP5V57h0PY7T7tEj4SCw3DJ6qzzIdpD8T3+9kXd9dcrrjbivBkkJ23agcG5
|
||||||
|
wBcI862ELNJOD7p7+OFsv7IRsoXXYrydaDg8OJQovh4RccRqVEQu3hZdi7cPb8xJ
|
||||||
|
G7Gxn8SfSVcPg/UObiggydMl8E8QwqWAzJHvl1KUECd5QG6eq984JTR7zQB2iGb6
|
||||||
|
1qq+/d9uciuB2YY2h/0rl3Fjy6J6k3fpQK577TlJjZc0F4WH8fW5bcsyGTszxQLI
|
||||||
|
jQ6FuSOr55lZ9O3R3+95tAdJTrWsxX7j7xMIAXSYrfNt5HM91XNhqISF4SIZOBB6
|
||||||
|
enVrrJ/oCFqVSbYf6RVQz3XmPEEMh+k9KdwvIvwoS9NivLD3QH0RjhTyzHbf+LlR
|
||||||
|
rWM46XhmBwajlpnIuuMp6jZcXnbhTO1SheoRVMdijcnW+zrmx5oyn3peCfPqOVLz
|
||||||
|
95YfJUIFCt+0p/87/0Mm76uVemK6kFKZJQPnfbAdsKF7igPZfUQx6wZZP1qK9ZEU
|
||||||
|
eOk=
|
||||||
|
-----END CERTIFICATE-----
|
51
.examples/docker-compose-mtls/certs/server/server.key
Normal file
51
.examples/docker-compose-mtls/certs/server/server.key
Normal file
@ -0,0 +1,51 @@
|
|||||||
|
-----BEGIN RSA PRIVATE KEY-----
|
||||||
|
MIIJKQIBAAKCAgEAoGywZ1awXUZARdl5iYA04kalhT0JXncwq5BxeHolL7RMrSYZ
|
||||||
|
KaEb9vkokT7IwqN148HIECIEstbB1FfEms5+1ebsf1wUaL4XYVIr8yPCUkWI+qsK
|
||||||
|
nVJzsnRtWRMv1US7CFX6BQdIyia2QlzE9uesZGE5bID3jw7HbjuyDz60zdPyP6gb
|
||||||
|
yDRXrc4FgpdqpWYBeHPzB5Y6Nk0dSpQb9ESVGW31z8HplGs8jK4fzH0B2hxOaX65
|
||||||
|
jFhRgOsi2BxIRvn8uXdxf9fuCduA9o+3XiBZpriTD0pQD7dVIw5uzP25qE8QxvtT
|
||||||
|
EV0pzeuOxM80tExWbKR3tvsgW8Oo9DDIljMzWV9olxlXLMF8K6YXu66OjiE8pzO+
|
||||||
|
oA07jCEmFymB2sqltKxxUZSsTHJ5q6X+bQbiu1ysqEL/y2xSiNXvo9WzVXuzmbQv
|
||||||
|
PBFkXEg/Fgy6qP967MSZEV0Vv+PYTtZ8px65J1yRKWXkZ08oJUvlQs12A9F88Mzy
|
||||||
|
jewsjQh6WKTnFqwDBfhCZxOFkVciQALeAtaRoYsJ9MgryxBl13nSP60ec5lASorU
|
||||||
|
nu6DEyfNh/1WhrcB9JTg/SY7L/XG/rNXN7QoTwyr4Bhg2hZ5XoSoPUzO6fsXPZpx
|
||||||
|
LIrX76L0hs2B5NvZ0Lv0uEKz31rgjcDNrz/TkPlScsw80sh/Irjz5pH5iucCAwEA
|
||||||
|
AQKCAgADiEEeFV+OvjQ+FXrCl0sSzGFqnJxvMwqkTGrjLzVQZpTlnxggvYZjGrtU
|
||||||
|
71/2QSkgWazxBf66fVYJOeF/Uxqh1RLR/xIH+F+FagzDrr7hltxcQJXcPuuDO2MI
|
||||||
|
+g4skPXZSiNWJwHoSY/ryCUiFpnKIAXmqLRKtxWXDMNv6H6MpaUI18e80cI4dnfS
|
||||||
|
l0jm2Wcg4tSwDxO7DFmfwcEX0MbDp5Mo/ukIto+/vTnAA+Sdi9ACLKMjPvKUdxju
|
||||||
|
TzkcLvbskn+yQ+ve1bFyPFnaPbYboKbESGuY3P2H5xJzewayeQMyjmgW0slP2mbr
|
||||||
|
WHCdo6ynebuVENR2kMlQjx5riDcSMMX5TLGPgNL7ZBf2b52mUgFyQb27eO2WXeyH
|
||||||
|
YLtInlKA44bdi76sDK+s8zYywZnxsUy7xrKhHE5rqz964EfoLRcY/fCm7XnMo6uK
|
||||||
|
VviBtdPebsMqkZOUKSaYSRpUgXILTud5FD+m68FeVjUvQFQqHYEa3gx+rAIjKBIn
|
||||||
|
082NzfDZSHVsvG+iB5q+37R8C0/YUzSb3TXys5pA82YsjIFeQiVE4hrV1yeNIZf6
|
||||||
|
2iaPD/r5H3vt0rFEDINZafC+6bTTRQoq8TOCZFh/Lu+ynXKOPrVUF8/y3sd8+T2v
|
||||||
|
kRDOL37reUotjE1lbO4RhLgHbeWHlT/PPnF7RDKCe6/erg2MqQKCAQEAy3f8B6I8
|
||||||
|
7CP4CZmMDWwHWsjMS/HGZgvPPbmWhaeZZmFyYi7I8MruJPhlhlw6YoUIV9Vvp8zE
|
||||||
|
eLtDvZ5WXuL38aRElWzNyrhrU1/vH4pkaFk+OgRcaleGUof+go0lE8BIYnWoWovo
|
||||||
|
/F7lQMQmHY4SuwF4oj6dpus7jMm41PQqDTsjofdLgwVAGy30LIkVt8qYha77sL8N
|
||||||
|
0ohXomDGik0nVa+i2mOJ0UuooGYF8WhujzVcELcerYvvg9kFDqJaEXdfTx4DRwiz
|
||||||
|
6f5gSbZHME7moqEkcJRtwj8TXSJYRHTI8ngS0xzyV0u2RL3FOxTcgikJIkmU6W3L
|
||||||
|
IcbP6XVlrCdoswKCAQEAydfBcsYcS2mMqCOdKkGVj6zBriT78/5dtPYeId9WkrnX
|
||||||
|
1vz6ErjHQ8vZkduvCm3KkijQvva+DFV0sv24qTyA2BIoDUJdk7cY962nR4Q9FHTX
|
||||||
|
Dkn1kgeKg4TtNdgo2KsIUn7bCibKASCExo6rO3PWiQyF+jTJVDD3rXx7+7N7WJaz
|
||||||
|
zTVt6BNOWoIjTufdXfRWt3wi0H6sSkqvRWoIAaguXkKXH7oBx0gKs+oAVovFvg7A
|
||||||
|
LLEtTszsv2LmbpGWaiT3Ny215mA0ZGI9T4utK7oUgd+DlV0+vj5tFfsye4COpCyG
|
||||||
|
V/ZQ7CBbxHDDak3R3fYy5pOwmh6814wHMyKKfdGm/QKCAQEAiW4Pk3BnyfA5lvJZ
|
||||||
|
gK9ZAF7kbt9tbHvJjR2Pp9Meb+KeCecj3lCTLfGBUZF19hl5GyqU8jgC9LE3/hm2
|
||||||
|
qPyREGwtzufg0G5kP7pqn1kwnLK6ryFG8qUPmys0IyYGxyJ3QdnKzu31fpDyNB7I
|
||||||
|
x+mwiRNjUeMNRTNZ06xk5aHNzYYGeV25aVPgivstE++79ZooDxOz+Rvy0CM7XfgT
|
||||||
|
4lJeoSeyzeOxsOZzjXObzAUHuD8IYlntpLcCHoI1Qj8yqt2ASMYy3IXqT8B7dQ5j
|
||||||
|
YyPH8Ez7efcnc656+8s453QiTnP/8wx4O7Jt+FxdnZxnnJrvCnO82zZHoBbTVBLx
|
||||||
|
i6hKtQKCAQA0j3SWmLRBhwjTuAJzQITb1xbQbF0X2oM4XmbWVzxKFQ75swLD4U4y
|
||||||
|
f2D2tIhOZOy9RtelAsfWmmI7QgrWNyUuHvxDB6cqkiF0Tcoju3HUY+CknenOzxvo
|
||||||
|
x7KltNZeJZuTL+mGKTetN3Sb6Ab7Al05bwNsdlZ/EAlPKf13O/PAy+2iYGlwZ6ad
|
||||||
|
twnOwF5K2xfBzBecx3/CENS3dLcFB3CbpyeHYX6ZEE+JLkRMRTWHGnw8px6vSHnW
|
||||||
|
FMEAxfSvS1T9D3Awv5ilE1f34N2FZ31znGq9eHygOc1aTgGFW6LJabbKLSBBfOOo
|
||||||
|
sdyRUBZ4gGYc2RTB7YMrdhFh5Xq+7NtZAoIBAQCOJ3CLecp/rS+lGy7oyx4f6QDd
|
||||||
|
zH/30Y/uvXLPUj+Ljg9bMTG9chjaKfyApXv6rcQI0d6wrqAunNl1b3opBQjsGCSt
|
||||||
|
bpBV/rGg3sl752og6KU1PCZ2KkVYPjugNhqPGonNh8tlw+1xFyBdt0c68g/auIHq
|
||||||
|
WaT5tWVfP01Ri43RjyCgNtJ2TJUzbA40BteDHPWKeM1lZ6e92fJTp5IjQ/Okc41u
|
||||||
|
Elr7p22fx/N04JTX9G6oGdxM7Gh2Uf4i4PnNOi+C3xqLrtUEi/OLof2UHlatypt9
|
||||||
|
pix0bXJtZE7WfFfesQIxGffVBhgN3UgqhAf2wquHgm1O17JXrmkR6JSYNpKc
|
||||||
|
-----END RSA PRIVATE KEY-----
|
13
.examples/docker-compose-mtls/config/config.yaml
Normal file
13
.examples/docker-compose-mtls/config/config.yaml
Normal file
@ -0,0 +1,13 @@
|
|||||||
|
endpoints:
|
||||||
|
- name: example
|
||||||
|
url: https://nginx
|
||||||
|
interval: 30s
|
||||||
|
conditions:
|
||||||
|
- "[STATUS] == 200"
|
||||||
|
client:
|
||||||
|
# mtls
|
||||||
|
insecure: true
|
||||||
|
tls:
|
||||||
|
certificate-file: /certs/client.crt
|
||||||
|
private-key-file: /certs/client.key
|
||||||
|
renegotiation: once
|
27
.examples/docker-compose-mtls/docker-compose.yml
Normal file
27
.examples/docker-compose-mtls/docker-compose.yml
Normal file
@ -0,0 +1,27 @@
|
|||||||
|
version: "3.9"
|
||||||
|
services:
|
||||||
|
nginx:
|
||||||
|
image: nginx:stable
|
||||||
|
volumes:
|
||||||
|
- ./certs/server:/etc/nginx/certs
|
||||||
|
- ./nginx:/etc/nginx/conf.d
|
||||||
|
ports:
|
||||||
|
- "8443:443"
|
||||||
|
networks:
|
||||||
|
- mtls
|
||||||
|
|
||||||
|
gatus:
|
||||||
|
image: twinproduction/gatus:latest
|
||||||
|
restart: always
|
||||||
|
ports:
|
||||||
|
- "8080:8080"
|
||||||
|
volumes:
|
||||||
|
- ./config:/config
|
||||||
|
- ./certs/client:/certs
|
||||||
|
environment:
|
||||||
|
- GATUS_CONFIG_PATH=/config
|
||||||
|
networks:
|
||||||
|
- mtls
|
||||||
|
|
||||||
|
networks:
|
||||||
|
mtls:
|
16
.examples/docker-compose-mtls/nginx/default.conf
Normal file
16
.examples/docker-compose-mtls/nginx/default.conf
Normal file
@ -0,0 +1,16 @@
|
|||||||
|
server {
|
||||||
|
listen 443 ssl;
|
||||||
|
|
||||||
|
ssl_certificate /etc/nginx/certs/server.crt;
|
||||||
|
ssl_certificate_key /etc/nginx/certs/server.key;
|
||||||
|
ssl_client_certificate /etc/nginx/certs/ca.crt;
|
||||||
|
ssl_verify_client on;
|
||||||
|
|
||||||
|
location / {
|
||||||
|
if ($ssl_client_verify != SUCCESS) {
|
||||||
|
return 403;
|
||||||
|
}
|
||||||
|
root /usr/share/nginx/html;
|
||||||
|
index index.html index.htm;
|
||||||
|
}
|
||||||
|
}
|
BIN
.github/assets/gitea-alerts.png
vendored
Normal file
BIN
.github/assets/gitea-alerts.png
vendored
Normal file
Binary file not shown.
After Width: | Height: | Size: 638 KiB |
2
.github/workflows/benchmark.yml
vendored
2
.github/workflows/benchmark.yml
vendored
@ -22,7 +22,7 @@ jobs:
|
|||||||
steps:
|
steps:
|
||||||
- uses: actions/setup-go@v5
|
- uses: actions/setup-go@v5
|
||||||
with:
|
with:
|
||||||
go-version: 1.19
|
go-version: 1.22.2
|
||||||
repository: "${{ github.event.inputs.repository || 'TwiN/gatus' }}"
|
repository: "${{ github.event.inputs.repository || 'TwiN/gatus' }}"
|
||||||
ref: "${{ github.event.inputs.ref || 'master' }}"
|
ref: "${{ github.event.inputs.ref || 'master' }}"
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
|
2
.github/workflows/publish-experimental.yml
vendored
2
.github/workflows/publish-experimental.yml
vendored
@ -18,7 +18,7 @@ jobs:
|
|||||||
username: ${{ secrets.DOCKER_USERNAME }}
|
username: ${{ secrets.DOCKER_USERNAME }}
|
||||||
password: ${{ secrets.DOCKER_PASSWORD }}
|
password: ${{ secrets.DOCKER_PASSWORD }}
|
||||||
- name: Build and push Docker image
|
- name: Build and push Docker image
|
||||||
uses: docker/build-push-action@v5
|
uses: docker/build-push-action@v6
|
||||||
with:
|
with:
|
||||||
platforms: linux/amd64
|
platforms: linux/amd64
|
||||||
pull: true
|
pull: true
|
||||||
|
4
.github/workflows/publish-latest-to-ghcr.yml
vendored
4
.github/workflows/publish-latest-to-ghcr.yml
vendored
@ -30,9 +30,9 @@ jobs:
|
|||||||
username: ${{ github.actor }}
|
username: ${{ github.actor }}
|
||||||
password: ${{ secrets.GITHUB_TOKEN }}
|
password: ${{ secrets.GITHUB_TOKEN }}
|
||||||
- name: Build and push Docker image
|
- name: Build and push Docker image
|
||||||
uses: docker/build-push-action@v5
|
uses: docker/build-push-action@v6
|
||||||
with:
|
with:
|
||||||
platforms: linux/amd64,linux/arm/v6,linux/arm/v7,linux/arm64
|
platforms: linux/amd64,linux/arm/v7,linux/arm64
|
||||||
pull: true
|
pull: true
|
||||||
push: true
|
push: true
|
||||||
tags: ${{ env.IMAGE_REPOSITORY }}:latest
|
tags: ${{ env.IMAGE_REPOSITORY }}:latest
|
||||||
|
4
.github/workflows/publish-latest.yml
vendored
4
.github/workflows/publish-latest.yml
vendored
@ -26,9 +26,9 @@ jobs:
|
|||||||
username: ${{ secrets.DOCKER_USERNAME }}
|
username: ${{ secrets.DOCKER_USERNAME }}
|
||||||
password: ${{ secrets.DOCKER_PASSWORD }}
|
password: ${{ secrets.DOCKER_PASSWORD }}
|
||||||
- name: Build and push Docker image
|
- name: Build and push Docker image
|
||||||
uses: docker/build-push-action@v5
|
uses: docker/build-push-action@v6
|
||||||
with:
|
with:
|
||||||
platforms: linux/amd64,linux/arm/v6,linux/arm/v7,linux/arm64
|
platforms: linux/amd64,linux/arm/v7,linux/arm64
|
||||||
pull: true
|
pull: true
|
||||||
push: true
|
push: true
|
||||||
tags: ${{ env.IMAGE_REPOSITORY }}:latest
|
tags: ${{ env.IMAGE_REPOSITORY }}:latest
|
||||||
|
@ -26,9 +26,9 @@ jobs:
|
|||||||
username: ${{ github.actor }}
|
username: ${{ github.actor }}
|
||||||
password: ${{ secrets.GITHUB_TOKEN }}
|
password: ${{ secrets.GITHUB_TOKEN }}
|
||||||
- name: Build and push Docker image
|
- name: Build and push Docker image
|
||||||
uses: docker/build-push-action@v5
|
uses: docker/build-push-action@v6
|
||||||
with:
|
with:
|
||||||
platforms: linux/amd64,linux/arm/v6,linux/arm/v7,linux/arm64
|
platforms: linux/amd64,linux/arm/v7,linux/arm64
|
||||||
pull: true
|
pull: true
|
||||||
push: true
|
push: true
|
||||||
tags: |
|
tags: |
|
||||||
|
4
.github/workflows/publish-release.yml
vendored
4
.github/workflows/publish-release.yml
vendored
@ -23,9 +23,9 @@ jobs:
|
|||||||
username: ${{ secrets.DOCKER_USERNAME }}
|
username: ${{ secrets.DOCKER_USERNAME }}
|
||||||
password: ${{ secrets.DOCKER_PASSWORD }}
|
password: ${{ secrets.DOCKER_PASSWORD }}
|
||||||
- name: Build and push Docker image
|
- name: Build and push Docker image
|
||||||
uses: docker/build-push-action@v5
|
uses: docker/build-push-action@v6
|
||||||
with:
|
with:
|
||||||
platforms: linux/amd64,linux/arm/v6,linux/arm/v7,linux/arm64
|
platforms: linux/amd64,linux/arm/v7,linux/arm64
|
||||||
pull: true
|
pull: true
|
||||||
push: true
|
push: true
|
||||||
tags: |
|
tags: |
|
||||||
|
4
.github/workflows/test.yml
vendored
4
.github/workflows/test.yml
vendored
@ -18,7 +18,7 @@ jobs:
|
|||||||
steps:
|
steps:
|
||||||
- uses: actions/setup-go@v5
|
- uses: actions/setup-go@v5
|
||||||
with:
|
with:
|
||||||
go-version: 1.21
|
go-version: 1.22.2
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
- name: Build binary to make sure it works
|
- name: Build binary to make sure it works
|
||||||
run: go build
|
run: go build
|
||||||
@ -28,7 +28,7 @@ jobs:
|
|||||||
# was configured by the "Set up Go" step (otherwise, it'd use sudo's "go" executable)
|
# was configured by the "Set up Go" step (otherwise, it'd use sudo's "go" executable)
|
||||||
run: sudo env "PATH=$PATH" "GOROOT=$GOROOT" go test ./... -race -coverprofile=coverage.txt -covermode=atomic
|
run: sudo env "PATH=$PATH" "GOROOT=$GOROOT" go test ./... -race -coverprofile=coverage.txt -covermode=atomic
|
||||||
- name: Codecov
|
- name: Codecov
|
||||||
uses: codecov/codecov-action@v4.3.0
|
uses: codecov/codecov-action@v4.5.0
|
||||||
with:
|
with:
|
||||||
files: ./coverage.txt
|
files: ./coverage.txt
|
||||||
token: ${{ secrets.CODECOV_TOKEN }}
|
token: ${{ secrets.CODECOV_TOKEN }}
|
||||||
|
139
README.md
139
README.md
@ -7,7 +7,6 @@
|
|||||||
[![Docker pulls](https://img.shields.io/docker/pulls/twinproduction/gatus.svg)](https://cloud.docker.com/repository/docker/twinproduction/gatus)
|
[![Docker pulls](https://img.shields.io/docker/pulls/twinproduction/gatus.svg)](https://cloud.docker.com/repository/docker/twinproduction/gatus)
|
||||||
[![Follow TwiN](https://img.shields.io/github/followers/TwiN?label=Follow&style=social)](https://github.com/TwiN)
|
[![Follow TwiN](https://img.shields.io/github/followers/TwiN?label=Follow&style=social)](https://github.com/TwiN)
|
||||||
|
|
||||||
|
|
||||||
Gatus is a developer-oriented health dashboard that gives you the ability to monitor your services using HTTP, ICMP, TCP, and even DNS
|
Gatus is a developer-oriented health dashboard that gives you the ability to monitor your services using HTTP, ICMP, TCP, and even DNS
|
||||||
queries as well as evaluate the result of said queries by using a list of conditions on values like the status code,
|
queries as well as evaluate the result of said queries by using a list of conditions on values like the status code,
|
||||||
the response time, the certificate expiration, the body and many others. The icing on top is that each of these health
|
the response time, the certificate expiration, the body and many others. The icing on top is that each of these health
|
||||||
@ -54,6 +53,7 @@ Have any feedback or questions? [Create a discussion](https://github.com/TwiN/ga
|
|||||||
- [Alerting](#alerting)
|
- [Alerting](#alerting)
|
||||||
- [Configuring Discord alerts](#configuring-discord-alerts)
|
- [Configuring Discord alerts](#configuring-discord-alerts)
|
||||||
- [Configuring Email alerts](#configuring-email-alerts)
|
- [Configuring Email alerts](#configuring-email-alerts)
|
||||||
|
- [Configuring Gitea alerts](#configuring-gitea-alerts)
|
||||||
- [Configuring GitHub alerts](#configuring-github-alerts)
|
- [Configuring GitHub alerts](#configuring-github-alerts)
|
||||||
- [Configuring GitLab alerts](#configuring-gitlab-alerts)
|
- [Configuring GitLab alerts](#configuring-gitlab-alerts)
|
||||||
- [Configuring Google Chat alerts](#configuring-google-chat-alerts)
|
- [Configuring Google Chat alerts](#configuring-google-chat-alerts)
|
||||||
@ -72,6 +72,7 @@ Have any feedback or questions? [Create a discussion](https://github.com/TwiN/ga
|
|||||||
- [Configuring Twilio alerts](#configuring-twilio-alerts)
|
- [Configuring Twilio alerts](#configuring-twilio-alerts)
|
||||||
- [Configuring AWS SES alerts](#configuring-aws-ses-alerts)
|
- [Configuring AWS SES alerts](#configuring-aws-ses-alerts)
|
||||||
- [Configuring custom alerts](#configuring-custom-alerts)
|
- [Configuring custom alerts](#configuring-custom-alerts)
|
||||||
|
- [Configuring Zulip alerts](#configuring-zulip-alerts)
|
||||||
- [Setting a default alert](#setting-a-default-alert)
|
- [Setting a default alert](#setting-a-default-alert)
|
||||||
- [Maintenance](#maintenance)
|
- [Maintenance](#maintenance)
|
||||||
- [Security](#security)
|
- [Security](#security)
|
||||||
@ -109,6 +110,7 @@ Have any feedback or questions? [Create a discussion](https://github.com/TwiN/ga
|
|||||||
- [Configuring a startup delay](#configuring-a-startup-delay)
|
- [Configuring a startup delay](#configuring-a-startup-delay)
|
||||||
- [Keeping your configuration small](#keeping-your-configuration-small)
|
- [Keeping your configuration small](#keeping-your-configuration-small)
|
||||||
- [Proxy client configuration](#proxy-client-configuration)
|
- [Proxy client configuration](#proxy-client-configuration)
|
||||||
|
- [How to fix 431 Request Header Fields Too Large error](#how-to-fix-431-request-header-fields-too-large-error)
|
||||||
- [Badges](#badges)
|
- [Badges](#badges)
|
||||||
- [Uptime](#uptime)
|
- [Uptime](#uptime)
|
||||||
- [Health](#health)
|
- [Health](#health)
|
||||||
@ -279,7 +281,7 @@ This allows you to monitor anything you want, even when what you want to check l
|
|||||||
|
|
||||||
For instance:
|
For instance:
|
||||||
- You can create your own agent that lives in a private network and pushes the status of your services to a publicly-exposed Gatus instance
|
- You can create your own agent that lives in a private network and pushes the status of your services to a publicly-exposed Gatus instance
|
||||||
- You can monitor services that are not supported by Gatus
|
- You can monitor services that are not supported by Gatus
|
||||||
- You can implement your own monitoring system while using Gatus as the dashboard
|
- You can implement your own monitoring system while using Gatus as the dashboard
|
||||||
|
|
||||||
| Parameter | Description | Default |
|
| Parameter | Description | Default |
|
||||||
@ -305,12 +307,13 @@ external-endpoints:
|
|||||||
|
|
||||||
To push the status of an external endpoint, the request would have to look like this:
|
To push the status of an external endpoint, the request would have to look like this:
|
||||||
```
|
```
|
||||||
POST /api/v1/endpoints/{key}/external?success={success}
|
POST /api/v1/endpoints/{key}/external?success={success}&error={error}
|
||||||
```
|
```
|
||||||
Where:
|
Where:
|
||||||
- `{key}` has the pattern `<GROUP_NAME>_<ENDPOINT_NAME>` in which both variables have ` `, `/`, `_`, `,` and `.` replaced by `-`.
|
- `{key}` has the pattern `<GROUP_NAME>_<ENDPOINT_NAME>` in which both variables have ` `, `/`, `_`, `,` and `.` replaced by `-`.
|
||||||
- Using the example configuration above, the key would be `core_ext-ep-test`.
|
- Using the example configuration above, the key would be `core_ext-ep-test`.
|
||||||
- `{success}` is a boolean (`true` or `false`) value indicating whether the health check was successful or not.
|
- `{success}` is a boolean (`true` or `false`) value indicating whether the health check was successful or not.
|
||||||
|
- `{error}`: a string describing the reason for a failed health check. If {success} is false, this should contain the error message; if the check is successful, it can be omitted or left empty.
|
||||||
|
|
||||||
You must also pass the token as a `Bearer` token in the `Authorization` header.
|
You must also pass the token as a `Bearer` token in the `Authorization` header.
|
||||||
|
|
||||||
@ -660,8 +663,45 @@ endpoints:
|
|||||||
|
|
||||||
> ⚠ Some mail servers are painfully slow.
|
> ⚠ Some mail servers are painfully slow.
|
||||||
|
|
||||||
|
#### Configuring Gitea alerts
|
||||||
|
|
||||||
|
| Parameter | Description | Default |
|
||||||
|
|:---------------------------------|:-----------------------------------------------------------------------------------------------------------|:--------------|
|
||||||
|
| `alerting.gitea` | Configuration for alerts of type `gitea` | `{}` |
|
||||||
|
| `alerting.gitea.repository-url` | Gitea repository URL (e.g. `https://gitea.com/TwiN/example`) | Required `""` |
|
||||||
|
| `alerting.gitea.token` | Personal access token to use for authentication. <br />Must have at least RW on issues and RO on metadata. | Required `""` |
|
||||||
|
| `alerting.github.default-alert` | Default alert configuration. <br />See [Setting a default alert](#setting-a-default-alert). | N/A |
|
||||||
|
|
||||||
|
The Gitea alerting provider creates an issue prefixed with `alert(gatus):` and suffixed with the endpoint's display
|
||||||
|
name for each alert. If `send-on-resolved` is set to `true` on the endpoint alert, the issue will be automatically
|
||||||
|
closed when the alert is resolved.
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
alerting:
|
||||||
|
gitea:
|
||||||
|
repository-url: "https://gitea.com/TwiN/test"
|
||||||
|
token: "349d63f16......"
|
||||||
|
|
||||||
|
endpoints:
|
||||||
|
- name: example
|
||||||
|
url: "https://twin.sh/health"
|
||||||
|
interval: 5m
|
||||||
|
conditions:
|
||||||
|
- "[STATUS] == 200"
|
||||||
|
- "[BODY].status == UP"
|
||||||
|
- "[RESPONSE_TIME] < 75"
|
||||||
|
alerts:
|
||||||
|
- type: gitea
|
||||||
|
failure-threshold: 2
|
||||||
|
success-threshold: 3
|
||||||
|
send-on-resolved: true
|
||||||
|
description: "Everything's burning AAAAAHHHHHHHHHHHHHHH"
|
||||||
|
```
|
||||||
|
|
||||||
|
![Gitea alert](.github/assets/gitea-alerts.png)
|
||||||
|
|
||||||
#### Configuring GitHub alerts
|
#### Configuring GitHub alerts
|
||||||
|
|
||||||
| Parameter | Description | Default |
|
| Parameter | Description | Default |
|
||||||
|:---------------------------------|:-----------------------------------------------------------------------------------------------------------|:--------------|
|
|:---------------------------------|:-----------------------------------------------------------------------------------------------------------|:--------------|
|
||||||
| `alerting.github` | Configuration for alerts of type `github` | `{}` |
|
| `alerting.github` | Configuration for alerts of type `github` | `{}` |
|
||||||
@ -697,7 +737,6 @@ endpoints:
|
|||||||
|
|
||||||
![GitHub alert](.github/assets/github-alerts.png)
|
![GitHub alert](.github/assets/github-alerts.png)
|
||||||
|
|
||||||
|
|
||||||
#### Configuring GitLab alerts
|
#### Configuring GitLab alerts
|
||||||
| Parameter | Description | Default |
|
| Parameter | Description | Default |
|
||||||
|:------------------------------------|:--------------------------------------------------------------------------------------------------------------------|:--------------|
|
|:------------------------------------|:--------------------------------------------------------------------------------------------------------------------|:--------------|
|
||||||
@ -877,6 +916,7 @@ endpoints:
|
|||||||
|:----------------------------------------------|:--------------------------------------------------------------------------------------------|:--------------|
|
|:----------------------------------------------|:--------------------------------------------------------------------------------------------|:--------------|
|
||||||
| `alerting.mattermost` | Configuration for alerts of type `mattermost` | `{}` |
|
| `alerting.mattermost` | Configuration for alerts of type `mattermost` | `{}` |
|
||||||
| `alerting.mattermost.webhook-url` | Mattermost Webhook URL | Required `""` |
|
| `alerting.mattermost.webhook-url` | Mattermost Webhook URL | Required `""` |
|
||||||
|
| `alerting.mattermost.channel` | Mattermost channel name override (optional) | `""` |
|
||||||
| `alerting.mattermost.client` | Client configuration. <br />See [Client configuration](#client-configuration). | `{}` |
|
| `alerting.mattermost.client` | Client configuration. <br />See [Client configuration](#client-configuration). | `{}` |
|
||||||
| `alerting.mattermost.default-alert` | Default alert configuration. <br />See [Setting a default alert](#setting-a-default-alert). | N/A |
|
| `alerting.mattermost.default-alert` | Default alert configuration. <br />See [Setting a default alert](#setting-a-default-alert). | N/A |
|
||||||
| `alerting.mattermost.overrides` | List of overrides that may be prioritized over the default configuration | `[]` |
|
| `alerting.mattermost.overrides` | List of overrides that may be prioritized over the default configuration | `[]` |
|
||||||
@ -943,14 +983,18 @@ endpoints:
|
|||||||
|
|
||||||
|
|
||||||
#### Configuring Ntfy alerts
|
#### Configuring Ntfy alerts
|
||||||
| Parameter | Description | Default |
|
| Parameter | Description | Default |
|
||||||
|:------------------------------|:-------------------------------------------------------------------------------------------|:------------------|
|
|:---------------------------------|:---------------------------------------------------------------------------------------------------------------------------------------------|:------------------|
|
||||||
| `alerting.ntfy` | Configuration for alerts of type `ntfy` | `{}` |
|
| `alerting.ntfy` | Configuration for alerts of type `ntfy` | `{}` |
|
||||||
| `alerting.ntfy.topic` | Topic at which the alert will be sent | Required `""` |
|
| `alerting.ntfy.topic` | Topic at which the alert will be sent | Required `""` |
|
||||||
| `alerting.ntfy.url` | The URL of the target server | `https://ntfy.sh` |
|
| `alerting.ntfy.url` | The URL of the target server | `https://ntfy.sh` |
|
||||||
| `alerting.ntfy.token` | [Access token](https://docs.ntfy.sh/publish/#access-tokens) for restricted topics | `""` |
|
| `alerting.ntfy.token` | [Access token](https://docs.ntfy.sh/publish/#access-tokens) for restricted topics | `""` |
|
||||||
| `alerting.ntfy.priority` | The priority of the alert | `3` |
|
| `alerting.ntfy.email` | E-mail address for additional e-mail notifications | `""` |
|
||||||
| `alerting.ntfy.default-alert` | Default alert configuration. <br />See [Setting a default alert](#setting-a-default-alert) | N/A |
|
| `alerting.ntfy.click` | Website opened when notification is clicked | `""` |
|
||||||
|
| `alerting.ntfy.priority` | The priority of the alert | `3` |
|
||||||
|
| `alerting.ntfy.disable-firebase` | Whether message push delivery via firebase should be disabled. [ntfy.sh defaults to enabled](https://docs.ntfy.sh/publish/#disable-firebase) | `false` |
|
||||||
|
| `alerting.ntfy.disable-cache` | Whether server side message caching should be disabled. [ntfy.sh defaults to enabled](https://docs.ntfy.sh/publish/#message-caching) | `false` |
|
||||||
|
| `alerting.ntfy.default-alert` | Default alert configuration. <br />See [Setting a default alert](#setting-a-default-alert) | N/A |
|
||||||
|
|
||||||
[ntfy](https://github.com/binwiederhier/ntfy) is an amazing project that allows you to subscribe to desktop
|
[ntfy](https://github.com/binwiederhier/ntfy) is an amazing project that allows you to subscribe to desktop
|
||||||
and mobile notifications, making it an awesome addition to Gatus.
|
and mobile notifications, making it an awesome addition to Gatus.
|
||||||
@ -1142,11 +1186,14 @@ Here's an example of what the notifications look like:
|
|||||||
| `alerting.teams.title` | Title of the notification | `"🚨 Gatus"` |
|
| `alerting.teams.title` | Title of the notification | `"🚨 Gatus"` |
|
||||||
| `alerting.teams.overrides[].group` | Endpoint group for which the configuration will be overridden by this configuration | `""` |
|
| `alerting.teams.overrides[].group` | Endpoint group for which the configuration will be overridden by this configuration | `""` |
|
||||||
| `alerting.teams.overrides[].webhook-url` | Teams Webhook URL | `""` |
|
| `alerting.teams.overrides[].webhook-url` | Teams Webhook URL | `""` |
|
||||||
|
| `alerting.teams.client.insecure` | Whether to skip TLS verification | `false` |
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
alerting:
|
alerting:
|
||||||
teams:
|
teams:
|
||||||
webhook-url: "https://********.webhook.office.com/webhookb2/************"
|
webhook-url: "https://********.webhook.office.com/webhookb2/************"
|
||||||
|
client:
|
||||||
|
insecure: false
|
||||||
# You can also add group-specific to keys, which will
|
# You can also add group-specific to keys, which will
|
||||||
# override the to key above for the specified groups
|
# override the to key above for the specified groups
|
||||||
overrides:
|
overrides:
|
||||||
@ -1185,14 +1232,18 @@ Here's an example of what the notifications look like:
|
|||||||
|
|
||||||
|
|
||||||
#### Configuring Telegram alerts
|
#### Configuring Telegram alerts
|
||||||
| Parameter | Description | Default |
|
| Parameter | Description | Default |
|
||||||
|:----------------------------------|:-------------------------------------------------------------------------------------------|:---------------------------|
|
|:--------------------------------------|:-------------------------------------------------------------------------------------------|:---------------------------|
|
||||||
| `alerting.telegram` | Configuration for alerts of type `telegram` | `{}` |
|
| `alerting.telegram` | Configuration for alerts of type `telegram` | `{}` |
|
||||||
| `alerting.telegram.token` | Telegram Bot Token | Required `""` |
|
| `alerting.telegram.token` | Telegram Bot Token | Required `""` |
|
||||||
| `alerting.telegram.id` | Telegram User ID | Required `""` |
|
| `alerting.telegram.id` | Telegram User ID | Required `""` |
|
||||||
| `alerting.telegram.api-url` | Telegram API URL | `https://api.telegram.org` |
|
| `alerting.telegram.api-url` | Telegram API URL | `https://api.telegram.org` |
|
||||||
| `alerting.telegram.client` | Client configuration. <br />See [Client configuration](#client-configuration). | `{}` |
|
| `alerting.telegram.client` | Client configuration. <br />See [Client configuration](#client-configuration). | `{}` |
|
||||||
| `alerting.telegram.default-alert` | Default alert configuration. <br />See [Setting a default alert](#setting-a-default-alert) | N/A |
|
| `alerting.telegram.default-alert` | Default alert configuration. <br />See [Setting a default alert](#setting-a-default-alert) | N/A |
|
||||||
|
| `alerting.telegram.overrides` | List of overrides that may be prioritized over the default configuration | `[]` |
|
||||||
|
| `alerting.telegram.overrides[].group` | Endpoint group for which the configuration will be overridden by this configuration | `""` |
|
||||||
|
| `alerting.telegram.overrides[].token` | Telegram Bot Token for override default value | `""` |
|
||||||
|
| `alerting.telegram.overrides[].id` | Telegram User ID for override default value | `""` |
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
alerting:
|
alerting:
|
||||||
@ -1314,6 +1365,7 @@ Furthermore, you may use the following placeholders in the body (`alerting.custo
|
|||||||
- `[ENDPOINT_NAME]` (resolved from `endpoints[].name`)
|
- `[ENDPOINT_NAME]` (resolved from `endpoints[].name`)
|
||||||
- `[ENDPOINT_GROUP]` (resolved from `endpoints[].group`)
|
- `[ENDPOINT_GROUP]` (resolved from `endpoints[].group`)
|
||||||
- `[ENDPOINT_URL]` (resolved from `endpoints[].url`)
|
- `[ENDPOINT_URL]` (resolved from `endpoints[].url`)
|
||||||
|
- `[RESULT_ERRORS]` (resolved from the health evaluation of a given health check)
|
||||||
|
|
||||||
If you have an alert using the `custom` provider with `send-on-resolved` set to `true`, you can use the
|
If you have an alert using the `custom` provider with `send-on-resolved` set to `true`, you can use the
|
||||||
`[ALERT_TRIGGERED_OR_RESOLVED]` placeholder to differentiate the notifications.
|
`[ALERT_TRIGGERED_OR_RESOLVED]` placeholder to differentiate the notifications.
|
||||||
@ -1328,7 +1380,7 @@ alerting:
|
|||||||
method: "POST"
|
method: "POST"
|
||||||
body: |
|
body: |
|
||||||
{
|
{
|
||||||
"text": "[ALERT_TRIGGERED_OR_RESOLVED]: [ENDPOINT_GROUP] - [ENDPOINT_NAME] - [ALERT_DESCRIPTION]"
|
"text": "[ALERT_TRIGGERED_OR_RESOLVED]: [ENDPOINT_GROUP] - [ENDPOINT_NAME] - [ALERT_DESCRIPTION] - [RESULT_ERRORS]"
|
||||||
}
|
}
|
||||||
endpoints:
|
endpoints:
|
||||||
- name: website
|
- name: website
|
||||||
@ -1449,6 +1501,42 @@ endpoints:
|
|||||||
- type: pagerduty
|
- type: pagerduty
|
||||||
```
|
```
|
||||||
|
|
||||||
|
#### Configuring Zulip alerts
|
||||||
|
| Parameter | Description | Default |
|
||||||
|
|:-----------------------------------------|:------------------------------------------------------------------------------------|:------------------------------------|
|
||||||
|
| `alerting.zulip` | Configuration for alerts of type `discord` | `{}` |
|
||||||
|
| `alerting.zulip.bot-email` | Bot Email | Required `""` |
|
||||||
|
| `alerting.zulip.bot-api-key` | Bot API key | Required `""` |
|
||||||
|
| `alerting.zulip.domain` | Full organization domain (e.g.: yourZulipDomain.zulipchat.com) | Required `""` |
|
||||||
|
| `alerting.zulip.channel-id` | The channel ID where Gatus will send the alerts | Required `""` |
|
||||||
|
| `alerting.zulip.overrides[].group` | Endpoint group for which the configuration will be overridden by this configuration | `""` |
|
||||||
|
| `alerting.zulip.overrides[].bot-email` | . | `""` |
|
||||||
|
| `alerting.zulip.overrides[].bot-api-key` | . | `""` |
|
||||||
|
| `alerting.zulip.overrides[].domain` | . | `""` |
|
||||||
|
| `alerting.zulip.overrides[].channel-id` | . | `""` |
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
alerting:
|
||||||
|
zulip:
|
||||||
|
bot-email: gatus-bot@some.zulip.org
|
||||||
|
bot-api-key: "********************************"
|
||||||
|
domain: some.zulip.org
|
||||||
|
channel-id: 123456
|
||||||
|
|
||||||
|
endpoints:
|
||||||
|
- name: website
|
||||||
|
url: "https://twin.sh/health"
|
||||||
|
interval: 5m
|
||||||
|
conditions:
|
||||||
|
- "[STATUS] == 200"
|
||||||
|
- "[BODY].status == UP"
|
||||||
|
- "[RESPONSE_TIME] < 300"
|
||||||
|
alerts:
|
||||||
|
- type: zulip
|
||||||
|
description: "healthcheck failed"
|
||||||
|
send-on-resolved: true
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
### Maintenance
|
### Maintenance
|
||||||
If you have maintenance windows, you may not want to be annoyed by alerts.
|
If you have maintenance windows, you may not want to be annoyed by alerts.
|
||||||
@ -1459,15 +1547,15 @@ To do that, you'll have to use the maintenance configuration:
|
|||||||
| `maintenance.enabled` | Whether the maintenance period is enabled | `true` |
|
| `maintenance.enabled` | Whether the maintenance period is enabled | `true` |
|
||||||
| `maintenance.start` | Time at which the maintenance window starts in `hh:mm` format (e.g. `23:00`) | Required `""` |
|
| `maintenance.start` | Time at which the maintenance window starts in `hh:mm` format (e.g. `23:00`) | Required `""` |
|
||||||
| `maintenance.duration` | Duration of the maintenance window (e.g. `1h`, `30m`) | Required `""` |
|
| `maintenance.duration` | Duration of the maintenance window (e.g. `1h`, `30m`) | Required `""` |
|
||||||
|
| `maintenance.timezone` | Timezone of the maintenance window format (e.g. `Europe/Amsterdam`).<br />See [List of tz database time zones](https://en.wikipedia.org/wiki/List_of_tz_database_time_zones) for more info | `UTC` |
|
||||||
| `maintenance.every` | Days on which the maintenance period applies (e.g. `[Monday, Thursday]`).<br />If left empty, the maintenance window applies every day | `[]` |
|
| `maintenance.every` | Days on which the maintenance period applies (e.g. `[Monday, Thursday]`).<br />If left empty, the maintenance window applies every day | `[]` |
|
||||||
|
|
||||||
> 📝 The maintenance configuration uses UTC
|
|
||||||
|
|
||||||
Here's an example:
|
Here's an example:
|
||||||
```yaml
|
```yaml
|
||||||
maintenance:
|
maintenance:
|
||||||
start: 23:00
|
start: 23:00
|
||||||
duration: 1h
|
duration: 1h
|
||||||
|
timezone: "Europe/Amsterdam"
|
||||||
every: [Monday, Thursday]
|
every: [Monday, Thursday]
|
||||||
```
|
```
|
||||||
Note that you can also specify each day on separate lines:
|
Note that you can also specify each day on separate lines:
|
||||||
@ -1475,6 +1563,7 @@ Note that you can also specify each day on separate lines:
|
|||||||
maintenance:
|
maintenance:
|
||||||
start: 23:00
|
start: 23:00
|
||||||
duration: 1h
|
duration: 1h
|
||||||
|
timezone: "Europe/Amsterdam"
|
||||||
every:
|
every:
|
||||||
- Monday
|
- Monday
|
||||||
- Thursday
|
- Thursday
|
||||||
@ -2122,7 +2211,7 @@ The path to generate a badge is the following:
|
|||||||
/api/v1/endpoints/{key}/uptimes/{duration}/badge.svg
|
/api/v1/endpoints/{key}/uptimes/{duration}/badge.svg
|
||||||
```
|
```
|
||||||
Where:
|
Where:
|
||||||
- `{duration}` is `7d`, `24h` or `1h`
|
- `{duration}` is `30d` (alpha), `7d`, `24h` or `1h`
|
||||||
- `{key}` has the pattern `<GROUP_NAME>_<ENDPOINT_NAME>` in which both variables have ` `, `/`, `_`, `,` and `.` replaced by `-`.
|
- `{key}` has the pattern `<GROUP_NAME>_<ENDPOINT_NAME>` in which both variables have ` `, `/`, `_`, `,` and `.` replaced by `-`.
|
||||||
|
|
||||||
For instance, if you want the uptime during the last 24 hours from the endpoint `frontend` in the group `core`,
|
For instance, if you want the uptime during the last 24 hours from the endpoint `frontend` in the group `core`,
|
||||||
@ -2187,7 +2276,7 @@ The endpoint to generate a badge is the following:
|
|||||||
/api/v1/endpoints/{key}/response-times/{duration}/badge.svg
|
/api/v1/endpoints/{key}/response-times/{duration}/badge.svg
|
||||||
```
|
```
|
||||||
Where:
|
Where:
|
||||||
- `{duration}` is `7d`, `24h` or `1h`
|
- `{duration}` is `30d` (alpha), `7d`, `24h` or `1h`
|
||||||
- `{key}` has the pattern `<GROUP_NAME>_<ENDPOINT_NAME>` in which both variables have ` `, `/`, `_`, `,` and `.` replaced by `-`.
|
- `{key}` has the pattern `<GROUP_NAME>_<ENDPOINT_NAME>` in which both variables have ` `, `/`, `_`, `,` and `.` replaced by `-`.
|
||||||
|
|
||||||
|
|
||||||
|
@ -1,7 +1,10 @@
|
|||||||
package alert
|
package alert
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"crypto/sha256"
|
||||||
|
"encoding/hex"
|
||||||
"errors"
|
"errors"
|
||||||
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -26,6 +29,9 @@ type Alert struct {
|
|||||||
// FailureThreshold is the number of failures in a row needed before triggering the alert
|
// FailureThreshold is the number of failures in a row needed before triggering the alert
|
||||||
FailureThreshold int `yaml:"failure-threshold"`
|
FailureThreshold int `yaml:"failure-threshold"`
|
||||||
|
|
||||||
|
// SuccessThreshold defines how many successful executions must happen in a row before an ongoing incident is marked as resolved
|
||||||
|
SuccessThreshold int `yaml:"success-threshold"`
|
||||||
|
|
||||||
// Description of the alert. Will be included in the alert sent.
|
// Description of the alert. Will be included in the alert sent.
|
||||||
//
|
//
|
||||||
// This is a pointer, because it is populated by YAML and we need to know whether it was explicitly set to a value
|
// This is a pointer, because it is populated by YAML and we need to know whether it was explicitly set to a value
|
||||||
@ -38,9 +44,6 @@ type Alert struct {
|
|||||||
// or not for provider.ParseWithDefaultAlert to work. Use Alert.IsSendingOnResolved() for a non-pointer
|
// or not for provider.ParseWithDefaultAlert to work. Use Alert.IsSendingOnResolved() for a non-pointer
|
||||||
SendOnResolved *bool `yaml:"send-on-resolved"`
|
SendOnResolved *bool `yaml:"send-on-resolved"`
|
||||||
|
|
||||||
// SuccessThreshold defines how many successful executions must happen in a row before an ongoing incident is marked as resolved
|
|
||||||
SuccessThreshold int `yaml:"success-threshold"`
|
|
||||||
|
|
||||||
// ResolveKey is an optional field that is used by some providers (i.e. PagerDuty's dedup_key) to resolve
|
// ResolveKey is an optional field that is used by some providers (i.e. PagerDuty's dedup_key) to resolve
|
||||||
// ongoing/triggered incidents
|
// ongoing/triggered incidents
|
||||||
ResolveKey string `yaml:"-"`
|
ResolveKey string `yaml:"-"`
|
||||||
@ -94,3 +97,17 @@ func (alert *Alert) IsSendingOnResolved() bool {
|
|||||||
}
|
}
|
||||||
return *alert.SendOnResolved
|
return *alert.SendOnResolved
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Checksum returns a checksum of the alert
|
||||||
|
// Used to determine which persisted triggered alert should be deleted on application start
|
||||||
|
func (alert *Alert) Checksum() string {
|
||||||
|
hash := sha256.New()
|
||||||
|
hash.Write([]byte(string(alert.Type) + "_" +
|
||||||
|
strconv.FormatBool(alert.IsEnabled()) + "_" +
|
||||||
|
strconv.FormatBool(alert.IsSendingOnResolved()) + "_" +
|
||||||
|
strconv.Itoa(alert.SuccessThreshold) + "_" +
|
||||||
|
strconv.Itoa(alert.FailureThreshold) + "_" +
|
||||||
|
alert.GetDescription()),
|
||||||
|
)
|
||||||
|
return hex.EncodeToString(hash.Sum(nil))
|
||||||
|
}
|
||||||
|
@ -84,3 +84,109 @@ func TestAlert_IsSendingOnResolved(t *testing.T) {
|
|||||||
t.Error("alert.IsSendingOnResolved() should've returned true, because SendOnResolved was set to true")
|
t.Error("alert.IsSendingOnResolved() should've returned true, because SendOnResolved was set to true")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestAlert_Checksum(t *testing.T) {
|
||||||
|
description1, description2 := "a", "b"
|
||||||
|
yes, no := true, false
|
||||||
|
scenarios := []struct {
|
||||||
|
name string
|
||||||
|
alert Alert
|
||||||
|
expected string
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "barebone",
|
||||||
|
alert: Alert{
|
||||||
|
Type: TypeDiscord,
|
||||||
|
},
|
||||||
|
expected: "fed0580e44ed5701dbba73afa1f14b2c53ca5a7b8067a860441c212916057fe3",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "with-description-1",
|
||||||
|
alert: Alert{
|
||||||
|
Type: TypeDiscord,
|
||||||
|
Description: &description1,
|
||||||
|
},
|
||||||
|
expected: "005f407ebe506e74a4aeb46f74c28b376debead7011e1b085da3840f72ba9707",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "with-description-2",
|
||||||
|
alert: Alert{
|
||||||
|
Type: TypeDiscord,
|
||||||
|
Description: &description2,
|
||||||
|
},
|
||||||
|
expected: "3c2c4a9570cdc614006993c21f79a860a7f5afea10cf70d1a79d3c49342ef2c8",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "with-description-2-and-enabled-false",
|
||||||
|
alert: Alert{
|
||||||
|
Type: TypeDiscord,
|
||||||
|
Enabled: &no,
|
||||||
|
Description: &description2,
|
||||||
|
},
|
||||||
|
expected: "837945c2b4cd5e961db3e63e10c348d4f1c3446ba68cf5a48e35a1ae22cf0c22",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "with-description-2-and-enabled-true",
|
||||||
|
alert: Alert{
|
||||||
|
Type: TypeDiscord,
|
||||||
|
Enabled: &yes, // it defaults to true if not set, but just to make sure
|
||||||
|
Description: &description2,
|
||||||
|
},
|
||||||
|
expected: "3c2c4a9570cdc614006993c21f79a860a7f5afea10cf70d1a79d3c49342ef2c8",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "with-description-2-and-enabled-true-and-send-on-resolved-true",
|
||||||
|
alert: Alert{
|
||||||
|
Type: TypeDiscord,
|
||||||
|
Enabled: &yes,
|
||||||
|
SendOnResolved: &yes,
|
||||||
|
Description: &description2,
|
||||||
|
},
|
||||||
|
expected: "bf1436995a880eb4a352c74c5dfee1f1b5ff6b9fc55aef9bf411b3631adfd80c",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "with-description-2-and-failure-threshold-7",
|
||||||
|
alert: Alert{
|
||||||
|
Type: TypeSlack,
|
||||||
|
FailureThreshold: 7,
|
||||||
|
Description: &description2,
|
||||||
|
},
|
||||||
|
expected: "8bd479e18bda393d4c924f5a0d962e825002168dedaa88b445e435db7bacffd3",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "with-description-2-and-failure-threshold-9",
|
||||||
|
alert: Alert{
|
||||||
|
Type: TypeSlack,
|
||||||
|
FailureThreshold: 9,
|
||||||
|
Description: &description2,
|
||||||
|
},
|
||||||
|
expected: "5abdfce5236e344996d264d526e769c07cb0d3d329a999769a1ff84b157ca6f1",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "with-description-2-and-success-threshold-5",
|
||||||
|
alert: Alert{
|
||||||
|
Type: TypeSlack,
|
||||||
|
SuccessThreshold: 7,
|
||||||
|
Description: &description2,
|
||||||
|
},
|
||||||
|
expected: "c0000e73626b80e212cfc24830de7094568f648e37f3e16f9e68c7f8ef75c34c",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "with-description-2-and-success-threshold-1",
|
||||||
|
alert: Alert{
|
||||||
|
Type: TypeSlack,
|
||||||
|
SuccessThreshold: 1,
|
||||||
|
Description: &description2,
|
||||||
|
},
|
||||||
|
expected: "5c28963b3a76104cfa4a0d79c89dd29ec596c8cfa4b1af210ec83d6d41587b5f",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, scenario := range scenarios {
|
||||||
|
t.Run(scenario.name, func(t *testing.T) {
|
||||||
|
scenario.alert.ValidateAndSetDefaults()
|
||||||
|
if checksum := scenario.alert.Checksum(); checksum != scenario.expected {
|
||||||
|
t.Errorf("expected checksum %v, got %v", scenario.expected, checksum)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
@ -23,6 +23,9 @@ const (
|
|||||||
// TypeGitLab is the Type for the gitlab alerting provider
|
// TypeGitLab is the Type for the gitlab alerting provider
|
||||||
TypeGitLab Type = "gitlab"
|
TypeGitLab Type = "gitlab"
|
||||||
|
|
||||||
|
// TypeGitea is the Type for the gitea alerting provider
|
||||||
|
TypeGitea Type = "gitea"
|
||||||
|
|
||||||
// TypeGoogleChat is the Type for the googlechat alerting provider
|
// TypeGoogleChat is the Type for the googlechat alerting provider
|
||||||
TypeGoogleChat Type = "googlechat"
|
TypeGoogleChat Type = "googlechat"
|
||||||
|
|
||||||
@ -64,4 +67,7 @@ const (
|
|||||||
|
|
||||||
// TypeTwilio is the Type for the twilio alerting provider
|
// TypeTwilio is the Type for the twilio alerting provider
|
||||||
TypeTwilio Type = "twilio"
|
TypeTwilio Type = "twilio"
|
||||||
|
|
||||||
|
// TypeZulip is the Type for the Zulip alerting provider
|
||||||
|
TypeZulip Type = "zulip"
|
||||||
)
|
)
|
||||||
|
@ -11,6 +11,7 @@ import (
|
|||||||
"github.com/TwiN/gatus/v5/alerting/provider/custom"
|
"github.com/TwiN/gatus/v5/alerting/provider/custom"
|
||||||
"github.com/TwiN/gatus/v5/alerting/provider/discord"
|
"github.com/TwiN/gatus/v5/alerting/provider/discord"
|
||||||
"github.com/TwiN/gatus/v5/alerting/provider/email"
|
"github.com/TwiN/gatus/v5/alerting/provider/email"
|
||||||
|
"github.com/TwiN/gatus/v5/alerting/provider/gitea"
|
||||||
"github.com/TwiN/gatus/v5/alerting/provider/github"
|
"github.com/TwiN/gatus/v5/alerting/provider/github"
|
||||||
"github.com/TwiN/gatus/v5/alerting/provider/gitlab"
|
"github.com/TwiN/gatus/v5/alerting/provider/gitlab"
|
||||||
"github.com/TwiN/gatus/v5/alerting/provider/googlechat"
|
"github.com/TwiN/gatus/v5/alerting/provider/googlechat"
|
||||||
@ -27,6 +28,7 @@ import (
|
|||||||
"github.com/TwiN/gatus/v5/alerting/provider/teams"
|
"github.com/TwiN/gatus/v5/alerting/provider/teams"
|
||||||
"github.com/TwiN/gatus/v5/alerting/provider/telegram"
|
"github.com/TwiN/gatus/v5/alerting/provider/telegram"
|
||||||
"github.com/TwiN/gatus/v5/alerting/provider/twilio"
|
"github.com/TwiN/gatus/v5/alerting/provider/twilio"
|
||||||
|
"github.com/TwiN/gatus/v5/alerting/provider/zulip"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Config is the configuration for alerting providers
|
// Config is the configuration for alerting providers
|
||||||
@ -49,6 +51,9 @@ type Config struct {
|
|||||||
// GitLab is the configuration for the gitlab alerting provider
|
// GitLab is the configuration for the gitlab alerting provider
|
||||||
GitLab *gitlab.AlertProvider `yaml:"gitlab,omitempty"`
|
GitLab *gitlab.AlertProvider `yaml:"gitlab,omitempty"`
|
||||||
|
|
||||||
|
// Gitea is the configuration for the gitea alerting provider
|
||||||
|
Gitea *gitea.AlertProvider `yaml:"gitea,omitempty"`
|
||||||
|
|
||||||
// GoogleChat is the configuration for the googlechat alerting provider
|
// GoogleChat is the configuration for the googlechat alerting provider
|
||||||
GoogleChat *googlechat.AlertProvider `yaml:"googlechat,omitempty"`
|
GoogleChat *googlechat.AlertProvider `yaml:"googlechat,omitempty"`
|
||||||
|
|
||||||
@ -90,6 +95,9 @@ type Config struct {
|
|||||||
|
|
||||||
// Twilio is the configuration for the twilio alerting provider
|
// Twilio is the configuration for the twilio alerting provider
|
||||||
Twilio *twilio.AlertProvider `yaml:"twilio,omitempty"`
|
Twilio *twilio.AlertProvider `yaml:"twilio,omitempty"`
|
||||||
|
|
||||||
|
// Zulip is the configuration for the zulip alerting provider
|
||||||
|
Zulip *zulip.AlertProvider `yaml:"zulip,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetAlertingProviderByAlertType returns an provider.AlertProvider by its corresponding alert.Type
|
// GetAlertingProviderByAlertType returns an provider.AlertProvider by its corresponding alert.Type
|
||||||
|
@ -50,7 +50,7 @@ func (provider *AlertProvider) GetAlertStatePlaceholderValue(resolved bool) stri
|
|||||||
return status
|
return status
|
||||||
}
|
}
|
||||||
|
|
||||||
func (provider *AlertProvider) buildHTTPRequest(ep *endpoint.Endpoint, alert *alert.Alert, resolved bool) *http.Request {
|
func (provider *AlertProvider) buildHTTPRequest(ep *endpoint.Endpoint, alert *alert.Alert, result *endpoint.Result, resolved bool) *http.Request {
|
||||||
body, url, method := provider.Body, provider.URL, provider.Method
|
body, url, method := provider.Body, provider.URL, provider.Method
|
||||||
body = strings.ReplaceAll(body, "[ALERT_DESCRIPTION]", alert.GetDescription())
|
body = strings.ReplaceAll(body, "[ALERT_DESCRIPTION]", alert.GetDescription())
|
||||||
url = strings.ReplaceAll(url, "[ALERT_DESCRIPTION]", alert.GetDescription())
|
url = strings.ReplaceAll(url, "[ALERT_DESCRIPTION]", alert.GetDescription())
|
||||||
@ -60,6 +60,8 @@ func (provider *AlertProvider) buildHTTPRequest(ep *endpoint.Endpoint, alert *al
|
|||||||
url = strings.ReplaceAll(url, "[ENDPOINT_GROUP]", ep.Group)
|
url = strings.ReplaceAll(url, "[ENDPOINT_GROUP]", ep.Group)
|
||||||
body = strings.ReplaceAll(body, "[ENDPOINT_URL]", ep.URL)
|
body = strings.ReplaceAll(body, "[ENDPOINT_URL]", ep.URL)
|
||||||
url = strings.ReplaceAll(url, "[ENDPOINT_URL]", ep.URL)
|
url = strings.ReplaceAll(url, "[ENDPOINT_URL]", ep.URL)
|
||||||
|
body = strings.ReplaceAll(body, "[RESULT_ERRORS]", strings.Join(result.Errors, ","))
|
||||||
|
url = strings.ReplaceAll(url, "[RESULT_ERRORS]", strings.Join(result.Errors, ","))
|
||||||
if resolved {
|
if resolved {
|
||||||
body = strings.ReplaceAll(body, "[ALERT_TRIGGERED_OR_RESOLVED]", provider.GetAlertStatePlaceholderValue(true))
|
body = strings.ReplaceAll(body, "[ALERT_TRIGGERED_OR_RESOLVED]", provider.GetAlertStatePlaceholderValue(true))
|
||||||
url = strings.ReplaceAll(url, "[ALERT_TRIGGERED_OR_RESOLVED]", provider.GetAlertStatePlaceholderValue(true))
|
url = strings.ReplaceAll(url, "[ALERT_TRIGGERED_OR_RESOLVED]", provider.GetAlertStatePlaceholderValue(true))
|
||||||
@ -79,7 +81,7 @@ func (provider *AlertProvider) buildHTTPRequest(ep *endpoint.Endpoint, alert *al
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (provider *AlertProvider) Send(ep *endpoint.Endpoint, alert *alert.Alert, result *endpoint.Result, resolved bool) error {
|
func (provider *AlertProvider) Send(ep *endpoint.Endpoint, alert *alert.Alert, result *endpoint.Result, resolved bool) error {
|
||||||
request := provider.buildHTTPRequest(ep, alert, resolved)
|
request := provider.buildHTTPRequest(ep, alert, result, resolved)
|
||||||
response, err := client.GetHTTPClient(provider.ClientConfig).Do(request)
|
response, err := client.GetHTTPClient(provider.ClientConfig).Do(request)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
|
@ -140,6 +140,53 @@ func TestAlertProvider_buildHTTPRequest(t *testing.T) {
|
|||||||
request := customAlertProvider.buildHTTPRequest(
|
request := customAlertProvider.buildHTTPRequest(
|
||||||
&endpoint.Endpoint{Name: "endpoint-name", Group: "endpoint-group", URL: "https://example.com"},
|
&endpoint.Endpoint{Name: "endpoint-name", Group: "endpoint-group", URL: "https://example.com"},
|
||||||
&alert.Alert{Description: &alertDescription},
|
&alert.Alert{Description: &alertDescription},
|
||||||
|
&endpoint.Result{Errors: []string{}},
|
||||||
|
scenario.Resolved,
|
||||||
|
)
|
||||||
|
if request.URL.String() != scenario.ExpectedURL {
|
||||||
|
t.Error("expected URL to be", scenario.ExpectedURL, "got", request.URL.String())
|
||||||
|
}
|
||||||
|
body, _ := io.ReadAll(request.Body)
|
||||||
|
if string(body) != scenario.ExpectedBody {
|
||||||
|
t.Error("expected body to be", scenario.ExpectedBody, "got", string(body))
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAlertProviderWithResultErrors_buildHTTPRequest(t *testing.T) {
|
||||||
|
customAlertWithErrorsProvider := &AlertProvider{
|
||||||
|
URL: "https://example.com/[ENDPOINT_GROUP]/[ENDPOINT_NAME]?event=[ALERT_TRIGGERED_OR_RESOLVED]&description=[ALERT_DESCRIPTION]&url=[ENDPOINT_URL]&error=[RESULT_ERRORS]",
|
||||||
|
Body: "[ENDPOINT_NAME],[ENDPOINT_GROUP],[ALERT_DESCRIPTION],[ENDPOINT_URL],[ALERT_TRIGGERED_OR_RESOLVED],[RESULT_ERRORS]",
|
||||||
|
}
|
||||||
|
alertDescription := "alert-description"
|
||||||
|
scenarios := []struct {
|
||||||
|
AlertProvider *AlertProvider
|
||||||
|
Resolved bool
|
||||||
|
ExpectedURL string
|
||||||
|
ExpectedBody string
|
||||||
|
Errors []string
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
AlertProvider: customAlertWithErrorsProvider,
|
||||||
|
Resolved: true,
|
||||||
|
ExpectedURL: "https://example.com/endpoint-group/endpoint-name?event=RESOLVED&description=alert-description&url=https://example.com&error=",
|
||||||
|
ExpectedBody: "endpoint-name,endpoint-group,alert-description,https://example.com,RESOLVED,",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
AlertProvider: customAlertWithErrorsProvider,
|
||||||
|
Resolved: false,
|
||||||
|
ExpectedURL: "https://example.com/endpoint-group/endpoint-name?event=TRIGGERED&description=alert-description&url=https://example.com&error=error1,error2",
|
||||||
|
ExpectedBody: "endpoint-name,endpoint-group,alert-description,https://example.com,TRIGGERED,error1,error2",
|
||||||
|
Errors: []string{"error1", "error2"},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, scenario := range scenarios {
|
||||||
|
t.Run(fmt.Sprintf("resolved-%v-with-default-placeholders-and-result-errors", scenario.Resolved), func(t *testing.T) {
|
||||||
|
request := customAlertWithErrorsProvider.buildHTTPRequest(
|
||||||
|
&endpoint.Endpoint{Name: "endpoint-name", Group: "endpoint-group", URL: "https://example.com"},
|
||||||
|
&alert.Alert{Description: &alertDescription},
|
||||||
|
&endpoint.Result{Errors: scenario.Errors},
|
||||||
scenario.Resolved,
|
scenario.Resolved,
|
||||||
)
|
)
|
||||||
if request.URL.String() != scenario.ExpectedURL {
|
if request.URL.String() != scenario.ExpectedURL {
|
||||||
@ -190,6 +237,7 @@ func TestAlertProvider_buildHTTPRequestWithCustomPlaceholder(t *testing.T) {
|
|||||||
request := customAlertProvider.buildHTTPRequest(
|
request := customAlertProvider.buildHTTPRequest(
|
||||||
&endpoint.Endpoint{Name: "endpoint-name", Group: "endpoint-group"},
|
&endpoint.Endpoint{Name: "endpoint-name", Group: "endpoint-group"},
|
||||||
&alert.Alert{Description: &alertDescription},
|
&alert.Alert{Description: &alertDescription},
|
||||||
|
&endpoint.Result{},
|
||||||
scenario.Resolved,
|
scenario.Resolved,
|
||||||
)
|
)
|
||||||
if request.URL.String() != scenario.ExpectedURL {
|
if request.URL.String() != scenario.ExpectedURL {
|
||||||
|
167
alerting/provider/gitea/gitea.go
Normal file
167
alerting/provider/gitea/gitea.go
Normal file
@ -0,0 +1,167 @@
|
|||||||
|
package gitea
|
||||||
|
|
||||||
|
import (
|
||||||
|
"crypto/tls"
|
||||||
|
"fmt"
|
||||||
|
"net/http"
|
||||||
|
"net/url"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"code.gitea.io/sdk/gitea"
|
||||||
|
"github.com/TwiN/gatus/v5/alerting/alert"
|
||||||
|
"github.com/TwiN/gatus/v5/client"
|
||||||
|
"github.com/TwiN/gatus/v5/config/endpoint"
|
||||||
|
)
|
||||||
|
|
||||||
|
// AlertProvider is the configuration necessary for sending an alert using Discord
|
||||||
|
type AlertProvider struct {
|
||||||
|
RepositoryURL string `yaml:"repository-url"` // The URL of the Gitea repository to create issues in
|
||||||
|
Token string `yaml:"token"` // Token requires at least RW on issues and RO on metadata
|
||||||
|
|
||||||
|
// DefaultAlert is the default alert configuration to use for endpoints with an alert of the appropriate type
|
||||||
|
DefaultAlert *alert.Alert `yaml:"default-alert,omitempty"`
|
||||||
|
|
||||||
|
// ClientConfig is the configuration of the client used to communicate with the provider's target
|
||||||
|
ClientConfig *client.Config `yaml:"client,omitempty"`
|
||||||
|
|
||||||
|
// Assignees is a list of users to assign the issue to
|
||||||
|
Assignees []string `yaml:"assignees,omitempty"`
|
||||||
|
|
||||||
|
username string
|
||||||
|
repositoryOwner string
|
||||||
|
repositoryName string
|
||||||
|
giteaClient *gitea.Client
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsValid returns whether the provider's configuration is valid
|
||||||
|
func (provider *AlertProvider) IsValid() bool {
|
||||||
|
if provider.ClientConfig == nil {
|
||||||
|
provider.ClientConfig = client.GetDefaultConfig()
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(provider.Token) == 0 || len(provider.RepositoryURL) == 0 {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
// Validate format of the repository URL
|
||||||
|
repositoryURL, err := url.Parse(provider.RepositoryURL)
|
||||||
|
if err != nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
baseURL := repositoryURL.Scheme + "://" + repositoryURL.Host
|
||||||
|
pathParts := strings.Split(repositoryURL.Path, "/")
|
||||||
|
if len(pathParts) != 3 {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
provider.repositoryOwner = pathParts[1]
|
||||||
|
provider.repositoryName = pathParts[2]
|
||||||
|
|
||||||
|
opts := []gitea.ClientOption{
|
||||||
|
gitea.SetToken(provider.Token),
|
||||||
|
}
|
||||||
|
|
||||||
|
if provider.ClientConfig != nil && provider.ClientConfig.Insecure {
|
||||||
|
// add new http client for skip verify
|
||||||
|
httpClient := &http.Client{
|
||||||
|
Transport: &http.Transport{
|
||||||
|
TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
opts = append(opts, gitea.SetHTTPClient(httpClient))
|
||||||
|
}
|
||||||
|
|
||||||
|
provider.giteaClient, err = gitea.NewClient(baseURL, opts...)
|
||||||
|
if err != nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
user, _, err := provider.giteaClient.GetMyUserInfo()
|
||||||
|
if err != nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
provider.username = user.UserName
|
||||||
|
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// Send creates an issue in the designed RepositoryURL if the resolved parameter passed is false,
|
||||||
|
// or closes the relevant issue(s) if the resolved parameter passed is true.
|
||||||
|
func (provider *AlertProvider) Send(ep *endpoint.Endpoint, alert *alert.Alert, result *endpoint.Result, resolved bool) error {
|
||||||
|
title := "alert(gatus): " + ep.DisplayName()
|
||||||
|
if !resolved {
|
||||||
|
_, _, err := provider.giteaClient.CreateIssue(
|
||||||
|
provider.repositoryOwner,
|
||||||
|
provider.repositoryName,
|
||||||
|
gitea.CreateIssueOption{
|
||||||
|
Title: title,
|
||||||
|
Body: provider.buildIssueBody(ep, alert, result),
|
||||||
|
Assignees: provider.Assignees,
|
||||||
|
},
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to create issue: %w", err)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
issues, _, err := provider.giteaClient.ListRepoIssues(
|
||||||
|
provider.repositoryOwner,
|
||||||
|
provider.repositoryName,
|
||||||
|
gitea.ListIssueOption{
|
||||||
|
State: gitea.StateOpen,
|
||||||
|
CreatedBy: provider.username,
|
||||||
|
ListOptions: gitea.ListOptions{
|
||||||
|
Page: 100,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to list issues: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, issue := range issues {
|
||||||
|
if issue.Title == title {
|
||||||
|
stateClosed := gitea.StateClosed
|
||||||
|
_, _, err = provider.giteaClient.EditIssue(
|
||||||
|
provider.repositoryOwner,
|
||||||
|
provider.repositoryName,
|
||||||
|
issue.ID,
|
||||||
|
gitea.EditIssueOption{
|
||||||
|
State: &stateClosed,
|
||||||
|
},
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to close issue: %w", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// buildIssueBody builds the body of the issue
|
||||||
|
func (provider *AlertProvider) buildIssueBody(ep *endpoint.Endpoint, alert *alert.Alert, result *endpoint.Result) string {
|
||||||
|
var formattedConditionResults string
|
||||||
|
if len(result.ConditionResults) > 0 {
|
||||||
|
formattedConditionResults = "\n\n## Condition results\n"
|
||||||
|
for _, conditionResult := range result.ConditionResults {
|
||||||
|
var prefix string
|
||||||
|
if conditionResult.Success {
|
||||||
|
prefix = ":white_check_mark:"
|
||||||
|
} else {
|
||||||
|
prefix = ":x:"
|
||||||
|
}
|
||||||
|
formattedConditionResults += fmt.Sprintf("- %s - `%s`\n", prefix, conditionResult.Condition)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
var description string
|
||||||
|
if alertDescription := alert.GetDescription(); len(alertDescription) > 0 {
|
||||||
|
description = ":\n> " + alertDescription
|
||||||
|
}
|
||||||
|
message := fmt.Sprintf("An alert for **%s** has been triggered due to having failed %d time(s) in a row", ep.DisplayName(), alert.FailureThreshold)
|
||||||
|
return message + description + formattedConditionResults
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetDefaultAlert returns the provider's default alert configuration
|
||||||
|
func (provider *AlertProvider) GetDefaultAlert() *alert.Alert {
|
||||||
|
return provider.DefaultAlert
|
||||||
|
}
|
169
alerting/provider/gitea/gitea_test.go
Normal file
169
alerting/provider/gitea/gitea_test.go
Normal file
@ -0,0 +1,169 @@
|
|||||||
|
package gitea
|
||||||
|
|
||||||
|
import (
|
||||||
|
"net/http"
|
||||||
|
"strings"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"code.gitea.io/sdk/gitea"
|
||||||
|
"github.com/TwiN/gatus/v5/alerting/alert"
|
||||||
|
"github.com/TwiN/gatus/v5/client"
|
||||||
|
"github.com/TwiN/gatus/v5/config/endpoint"
|
||||||
|
"github.com/TwiN/gatus/v5/test"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestAlertDefaultProvider_IsValid(t *testing.T) {
|
||||||
|
scenarios := []struct {
|
||||||
|
Name string
|
||||||
|
Provider AlertProvider
|
||||||
|
Expected bool
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
Name: "invalid",
|
||||||
|
Provider: AlertProvider{RepositoryURL: "", Token: ""},
|
||||||
|
Expected: false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "invalid-token",
|
||||||
|
Provider: AlertProvider{RepositoryURL: "https://gitea.com/TwiN/test", Token: "12345"},
|
||||||
|
Expected: false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "missing-repository-name",
|
||||||
|
Provider: AlertProvider{RepositoryURL: "https://gitea.com/TwiN", Token: "12345"},
|
||||||
|
Expected: false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "enterprise-client",
|
||||||
|
Provider: AlertProvider{RepositoryURL: "https://gitea.example.com/TwiN/test", Token: "12345"},
|
||||||
|
Expected: false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "invalid-url",
|
||||||
|
Provider: AlertProvider{RepositoryURL: "gitea.com/TwiN/test", Token: "12345"},
|
||||||
|
Expected: false,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, scenario := range scenarios {
|
||||||
|
t.Run(scenario.Name, func(t *testing.T) {
|
||||||
|
if scenario.Provider.IsValid() != scenario.Expected {
|
||||||
|
t.Errorf("expected %t, got %t", scenario.Expected, scenario.Provider.IsValid())
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAlertProvider_Send(t *testing.T) {
|
||||||
|
defer client.InjectHTTPClient(nil)
|
||||||
|
firstDescription := "description-1"
|
||||||
|
secondDescription := "description-2"
|
||||||
|
scenarios := []struct {
|
||||||
|
Name string
|
||||||
|
Provider AlertProvider
|
||||||
|
Alert alert.Alert
|
||||||
|
Resolved bool
|
||||||
|
MockRoundTripper test.MockRoundTripper
|
||||||
|
ExpectedError bool
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
Name: "triggered-error",
|
||||||
|
Provider: AlertProvider{RepositoryURL: "https://gitea.com/TwiN/test", Token: "12345"},
|
||||||
|
Alert: alert.Alert{Description: &firstDescription, SuccessThreshold: 5, FailureThreshold: 3},
|
||||||
|
Resolved: false,
|
||||||
|
ExpectedError: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "resolved-error",
|
||||||
|
Provider: AlertProvider{RepositoryURL: "https://gitea.com/TwiN/test", Token: "12345"},
|
||||||
|
Alert: alert.Alert{Description: &secondDescription, SuccessThreshold: 5, FailureThreshold: 3},
|
||||||
|
Resolved: true,
|
||||||
|
ExpectedError: true,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, scenario := range scenarios {
|
||||||
|
t.Run(scenario.Name, func(t *testing.T) {
|
||||||
|
scenario.Provider.giteaClient, _ = gitea.NewClient("https://gitea.com")
|
||||||
|
client.InjectHTTPClient(&http.Client{Transport: scenario.MockRoundTripper})
|
||||||
|
err := scenario.Provider.Send(
|
||||||
|
&endpoint.Endpoint{Name: "endpoint-name", Group: "endpoint-group"},
|
||||||
|
&scenario.Alert,
|
||||||
|
&endpoint.Result{
|
||||||
|
ConditionResults: []*endpoint.ConditionResult{
|
||||||
|
{Condition: "[CONNECTED] == true", Success: scenario.Resolved},
|
||||||
|
{Condition: "[STATUS] == 200", Success: scenario.Resolved},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
scenario.Resolved,
|
||||||
|
)
|
||||||
|
if scenario.ExpectedError && err == nil {
|
||||||
|
t.Error("expected error, got none")
|
||||||
|
}
|
||||||
|
if !scenario.ExpectedError && err != nil {
|
||||||
|
t.Error("expected no error, got", err.Error())
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAlertProvider_buildRequestBody(t *testing.T) {
|
||||||
|
firstDescription := "description-1"
|
||||||
|
scenarios := []struct {
|
||||||
|
Name string
|
||||||
|
Endpoint endpoint.Endpoint
|
||||||
|
Provider AlertProvider
|
||||||
|
Alert alert.Alert
|
||||||
|
NoConditions bool
|
||||||
|
ExpectedBody string
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
Name: "triggered",
|
||||||
|
Endpoint: endpoint.Endpoint{Name: "endpoint-name", URL: "https://example.org"},
|
||||||
|
Provider: AlertProvider{},
|
||||||
|
Alert: alert.Alert{Description: &firstDescription, FailureThreshold: 3},
|
||||||
|
ExpectedBody: "An alert for **endpoint-name** has been triggered due to having failed 3 time(s) in a row:\n> description-1\n\n## Condition results\n- :white_check_mark: - `[CONNECTED] == true`\n- :x: - `[STATUS] == 200`",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "triggered-with-no-description",
|
||||||
|
Endpoint: endpoint.Endpoint{Name: "endpoint-name", URL: "https://example.org"},
|
||||||
|
Provider: AlertProvider{},
|
||||||
|
Alert: alert.Alert{FailureThreshold: 10},
|
||||||
|
ExpectedBody: "An alert for **endpoint-name** has been triggered due to having failed 10 time(s) in a row\n\n## Condition results\n- :white_check_mark: - `[CONNECTED] == true`\n- :x: - `[STATUS] == 200`",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "triggered-with-no-conditions",
|
||||||
|
NoConditions: true,
|
||||||
|
Endpoint: endpoint.Endpoint{Name: "endpoint-name", URL: "https://example.org"},
|
||||||
|
Provider: AlertProvider{},
|
||||||
|
Alert: alert.Alert{Description: &firstDescription, FailureThreshold: 10},
|
||||||
|
ExpectedBody: "An alert for **endpoint-name** has been triggered due to having failed 10 time(s) in a row:\n> description-1",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, scenario := range scenarios {
|
||||||
|
t.Run(scenario.Name, func(t *testing.T) {
|
||||||
|
var conditionResults []*endpoint.ConditionResult
|
||||||
|
if !scenario.NoConditions {
|
||||||
|
conditionResults = []*endpoint.ConditionResult{
|
||||||
|
{Condition: "[CONNECTED] == true", Success: true},
|
||||||
|
{Condition: "[STATUS] == 200", Success: false},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
body := scenario.Provider.buildIssueBody(
|
||||||
|
&scenario.Endpoint,
|
||||||
|
&scenario.Alert,
|
||||||
|
&endpoint.Result{ConditionResults: conditionResults},
|
||||||
|
)
|
||||||
|
if strings.TrimSpace(body) != strings.TrimSpace(scenario.ExpectedBody) {
|
||||||
|
t.Errorf("expected:\n%s\ngot:\n%s", scenario.ExpectedBody, body)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAlertProvider_GetDefaultAlert(t *testing.T) {
|
||||||
|
if (&AlertProvider{DefaultAlert: &alert.Alert{}}).GetDefaultAlert() == nil {
|
||||||
|
t.Error("expected default alert to be not nil")
|
||||||
|
}
|
||||||
|
if (&AlertProvider{DefaultAlert: nil}).GetDefaultAlert() != nil {
|
||||||
|
t.Error("expected default alert to be nil")
|
||||||
|
}
|
||||||
|
}
|
@ -16,6 +16,9 @@ import (
|
|||||||
type AlertProvider struct {
|
type AlertProvider struct {
|
||||||
WebhookURL string `yaml:"webhook-url"`
|
WebhookURL string `yaml:"webhook-url"`
|
||||||
|
|
||||||
|
// Channel is the optional setting to override the default webhook's channel
|
||||||
|
Channel string `yaml:"channel,omitempty"`
|
||||||
|
|
||||||
// ClientConfig is the configuration of the client used to communicate with the provider's target
|
// ClientConfig is the configuration of the client used to communicate with the provider's target
|
||||||
ClientConfig *client.Config `yaml:"client,omitempty"`
|
ClientConfig *client.Config `yaml:"client,omitempty"`
|
||||||
|
|
||||||
@ -70,6 +73,7 @@ func (provider *AlertProvider) Send(ep *endpoint.Endpoint, alert *alert.Alert, r
|
|||||||
}
|
}
|
||||||
|
|
||||||
type Body struct {
|
type Body struct {
|
||||||
|
Channel string `json:"channel,omitempty"` // Optional channel override
|
||||||
Text string `json:"text"`
|
Text string `json:"text"`
|
||||||
Username string `json:"username"`
|
Username string `json:"username"`
|
||||||
IconURL string `json:"icon_url"`
|
IconURL string `json:"icon_url"`
|
||||||
@ -118,6 +122,7 @@ func (provider *AlertProvider) buildRequestBody(ep *endpoint.Endpoint, alert *al
|
|||||||
description = ":\n> " + alertDescription
|
description = ":\n> " + alertDescription
|
||||||
}
|
}
|
||||||
body := Body{
|
body := Body{
|
||||||
|
Channel: provider.Channel,
|
||||||
Text: "",
|
Text: "",
|
||||||
Username: "gatus",
|
Username: "gatus",
|
||||||
IconURL: "https://raw.githubusercontent.com/TwiN/gatus/master/.github/assets/logo.png",
|
IconURL: "https://raw.githubusercontent.com/TwiN/gatus/master/.github/assets/logo.png",
|
||||||
|
@ -21,10 +21,14 @@ const (
|
|||||||
|
|
||||||
// AlertProvider is the configuration necessary for sending an alert using Slack
|
// AlertProvider is the configuration necessary for sending an alert using Slack
|
||||||
type AlertProvider struct {
|
type AlertProvider struct {
|
||||||
Topic string `yaml:"topic"`
|
Topic string `yaml:"topic"`
|
||||||
URL string `yaml:"url,omitempty"` // Defaults to DefaultURL
|
URL string `yaml:"url,omitempty"` // Defaults to DefaultURL
|
||||||
Priority int `yaml:"priority,omitempty"` // Defaults to DefaultPriority
|
Priority int `yaml:"priority,omitempty"` // Defaults to DefaultPriority
|
||||||
Token string `yaml:"token,omitempty"` // Defaults to ""
|
Token string `yaml:"token,omitempty"` // Defaults to ""
|
||||||
|
Email string `yaml:"email,omitempty"` // Defaults to ""
|
||||||
|
Click string `yaml:"click,omitempty"` // Defaults to ""
|
||||||
|
DisableFirebase bool `yaml:"disable-firebase,omitempty"` // Defaults to false
|
||||||
|
DisableCache bool `yaml:"disable-cache,omitempty"` // Defaults to false
|
||||||
|
|
||||||
// DefaultAlert is the default alert configuration to use for endpoints with an alert of the appropriate type
|
// DefaultAlert is the default alert configuration to use for endpoints with an alert of the appropriate type
|
||||||
DefaultAlert *alert.Alert `yaml:"default-alert,omitempty"`
|
DefaultAlert *alert.Alert `yaml:"default-alert,omitempty"`
|
||||||
@ -56,6 +60,12 @@ func (provider *AlertProvider) Send(ep *endpoint.Endpoint, alert *alert.Alert, r
|
|||||||
if len(provider.Token) > 0 {
|
if len(provider.Token) > 0 {
|
||||||
request.Header.Set("Authorization", "Bearer "+provider.Token)
|
request.Header.Set("Authorization", "Bearer "+provider.Token)
|
||||||
}
|
}
|
||||||
|
if provider.DisableFirebase {
|
||||||
|
request.Header.Set("Firebase", "no")
|
||||||
|
}
|
||||||
|
if provider.DisableCache {
|
||||||
|
request.Header.Set("Cache", "no")
|
||||||
|
}
|
||||||
response, err := client.GetHTTPClient(nil).Do(request)
|
response, err := client.GetHTTPClient(nil).Do(request)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@ -74,6 +84,8 @@ type Body struct {
|
|||||||
Message string `json:"message"`
|
Message string `json:"message"`
|
||||||
Tags []string `json:"tags"`
|
Tags []string `json:"tags"`
|
||||||
Priority int `json:"priority"`
|
Priority int `json:"priority"`
|
||||||
|
Email string `json:"email,omitempty"`
|
||||||
|
Click string `json:"click,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// buildRequestBody builds the request body for the provider
|
// buildRequestBody builds the request body for the provider
|
||||||
@ -105,6 +117,8 @@ func (provider *AlertProvider) buildRequestBody(ep *endpoint.Endpoint, alert *al
|
|||||||
Message: message,
|
Message: message,
|
||||||
Tags: []string{tag},
|
Tags: []string{tag},
|
||||||
Priority: provider.Priority,
|
Priority: provider.Priority,
|
||||||
|
Email: provider.Email,
|
||||||
|
Click: provider.Click,
|
||||||
})
|
})
|
||||||
return body
|
return body
|
||||||
}
|
}
|
||||||
|
@ -2,6 +2,9 @@ package ntfy
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
|
"io"
|
||||||
|
"net/http"
|
||||||
|
"net/http/httptest"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/TwiN/gatus/v5/alerting/alert"
|
"github.com/TwiN/gatus/v5/alerting/alert"
|
||||||
@ -88,6 +91,20 @@ func TestAlertProvider_buildRequestBody(t *testing.T) {
|
|||||||
Resolved: true,
|
Resolved: true,
|
||||||
ExpectedBody: `{"topic":"example","title":"Gatus: endpoint-name","message":"An alert has been resolved after passing successfully 5 time(s) in a row with the following description: description-2\n🟢 [CONNECTED] == true\n🟢 [STATUS] == 200","tags":["white_check_mark"],"priority":2}`,
|
ExpectedBody: `{"topic":"example","title":"Gatus: endpoint-name","message":"An alert has been resolved after passing successfully 5 time(s) in a row with the following description: description-2\n🟢 [CONNECTED] == true\n🟢 [STATUS] == 200","tags":["white_check_mark"],"priority":2}`,
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
Name: "triggered-email",
|
||||||
|
Provider: AlertProvider{URL: "https://ntfy.sh", Topic: "example", Priority: 1, Email: "test@example.com", Click: "example.com"},
|
||||||
|
Alert: alert.Alert{Description: &firstDescription, SuccessThreshold: 5, FailureThreshold: 3},
|
||||||
|
Resolved: false,
|
||||||
|
ExpectedBody: `{"topic":"example","title":"Gatus: endpoint-name","message":"An alert has been triggered due to having failed 3 time(s) in a row with the following description: description-1\n🔴 [CONNECTED] == true\n🔴 [STATUS] == 200","tags":["rotating_light"],"priority":1,"email":"test@example.com","click":"example.com"}`,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "resolved-email",
|
||||||
|
Provider: AlertProvider{URL: "https://ntfy.sh", Topic: "example", Priority: 2, Email: "test@example.com", Click: "example.com"},
|
||||||
|
Alert: alert.Alert{Description: &secondDescription, SuccessThreshold: 5, FailureThreshold: 3},
|
||||||
|
Resolved: true,
|
||||||
|
ExpectedBody: `{"topic":"example","title":"Gatus: endpoint-name","message":"An alert has been resolved after passing successfully 5 time(s) in a row with the following description: description-2\n🟢 [CONNECTED] == true\n🟢 [STATUS] == 200","tags":["white_check_mark"],"priority":2,"email":"test@example.com","click":"example.com"}`,
|
||||||
|
},
|
||||||
}
|
}
|
||||||
for _, scenario := range scenarios {
|
for _, scenario := range scenarios {
|
||||||
t.Run(scenario.Name, func(t *testing.T) {
|
t.Run(scenario.Name, func(t *testing.T) {
|
||||||
@ -112,3 +129,99 @@ func TestAlertProvider_buildRequestBody(t *testing.T) {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestAlertProvider_Send(t *testing.T) {
|
||||||
|
description := "description-1"
|
||||||
|
scenarios := []struct {
|
||||||
|
Name string
|
||||||
|
Provider AlertProvider
|
||||||
|
Alert alert.Alert
|
||||||
|
Resolved bool
|
||||||
|
ExpectedBody string
|
||||||
|
ExpectedHeaders map[string]string
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
Name: "triggered",
|
||||||
|
Provider: AlertProvider{URL: "https://ntfy.sh", Topic: "example", Priority: 1, Email: "test@example.com", Click: "example.com"},
|
||||||
|
Alert: alert.Alert{Description: &description, SuccessThreshold: 5, FailureThreshold: 3},
|
||||||
|
Resolved: false,
|
||||||
|
ExpectedBody: `{"topic":"example","title":"Gatus: endpoint-name","message":"An alert has been triggered due to having failed 3 time(s) in a row with the following description: description-1\n🔴 [CONNECTED] == true\n🔴 [STATUS] == 200","tags":["rotating_light"],"priority":1,"email":"test@example.com","click":"example.com"}`,
|
||||||
|
ExpectedHeaders: map[string]string{
|
||||||
|
"Content-Type": "application/json",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "no firebase",
|
||||||
|
Provider: AlertProvider{URL: "https://ntfy.sh", Topic: "example", Priority: 1, Email: "test@example.com", Click: "example.com", DisableFirebase: true},
|
||||||
|
Alert: alert.Alert{Description: &description, SuccessThreshold: 5, FailureThreshold: 3},
|
||||||
|
Resolved: false,
|
||||||
|
ExpectedBody: `{"topic":"example","title":"Gatus: endpoint-name","message":"An alert has been triggered due to having failed 3 time(s) in a row with the following description: description-1\n🔴 [CONNECTED] == true\n🔴 [STATUS] == 200","tags":["rotating_light"],"priority":1,"email":"test@example.com","click":"example.com"}`,
|
||||||
|
ExpectedHeaders: map[string]string{
|
||||||
|
"Content-Type": "application/json",
|
||||||
|
"Firebase": "no",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "no cache",
|
||||||
|
Provider: AlertProvider{URL: "https://ntfy.sh", Topic: "example", Priority: 1, Email: "test@example.com", Click: "example.com", DisableCache: true},
|
||||||
|
Alert: alert.Alert{Description: &description, SuccessThreshold: 5, FailureThreshold: 3},
|
||||||
|
Resolved: false,
|
||||||
|
ExpectedBody: `{"topic":"example","title":"Gatus: endpoint-name","message":"An alert has been triggered due to having failed 3 time(s) in a row with the following description: description-1\n🔴 [CONNECTED] == true\n🔴 [STATUS] == 200","tags":["rotating_light"],"priority":1,"email":"test@example.com","click":"example.com"}`,
|
||||||
|
ExpectedHeaders: map[string]string{
|
||||||
|
"Content-Type": "application/json",
|
||||||
|
"Cache": "no",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "neither firebase & cache",
|
||||||
|
Provider: AlertProvider{URL: "https://ntfy.sh", Topic: "example", Priority: 1, Email: "test@example.com", Click: "example.com", DisableFirebase: true, DisableCache: true},
|
||||||
|
Alert: alert.Alert{Description: &description, SuccessThreshold: 5, FailureThreshold: 3},
|
||||||
|
Resolved: false,
|
||||||
|
ExpectedBody: `{"topic":"example","title":"Gatus: endpoint-name","message":"An alert has been triggered due to having failed 3 time(s) in a row with the following description: description-1\n🔴 [CONNECTED] == true\n🔴 [STATUS] == 200","tags":["rotating_light"],"priority":1,"email":"test@example.com","click":"example.com"}`,
|
||||||
|
ExpectedHeaders: map[string]string{
|
||||||
|
"Content-Type": "application/json",
|
||||||
|
"Firebase": "no",
|
||||||
|
"Cache": "no",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, scenario := range scenarios {
|
||||||
|
t.Run(scenario.Name, func(t *testing.T) {
|
||||||
|
// Start a local HTTP server
|
||||||
|
server := httptest.NewServer(http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) {
|
||||||
|
// Test request parameters
|
||||||
|
for header, value := range scenario.ExpectedHeaders {
|
||||||
|
if value != req.Header.Get(header) {
|
||||||
|
t.Errorf("expected: %s, got: %s", value, req.Header.Get(header))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
body, _ := io.ReadAll(req.Body)
|
||||||
|
if string(body) != scenario.ExpectedBody {
|
||||||
|
t.Errorf("expected:\n%s\ngot:\n%s", scenario.ExpectedBody, body)
|
||||||
|
}
|
||||||
|
// Send response to be tested
|
||||||
|
rw.Write([]byte(`OK`))
|
||||||
|
}))
|
||||||
|
// Close the server when test finishes
|
||||||
|
defer server.Close()
|
||||||
|
|
||||||
|
scenario.Provider.URL = server.URL
|
||||||
|
err := scenario.Provider.Send(
|
||||||
|
&endpoint.Endpoint{Name: "endpoint-name"},
|
||||||
|
&scenario.Alert,
|
||||||
|
&endpoint.Result{
|
||||||
|
ConditionResults: []*endpoint.ConditionResult{
|
||||||
|
{Condition: "[CONNECTED] == true", Success: scenario.Resolved},
|
||||||
|
{Condition: "[STATUS] == 200", Success: scenario.Resolved},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
scenario.Resolved,
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
t.Error("Encountered an error on Send: ", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
@ -6,6 +6,7 @@ import (
|
|||||||
"github.com/TwiN/gatus/v5/alerting/provider/custom"
|
"github.com/TwiN/gatus/v5/alerting/provider/custom"
|
||||||
"github.com/TwiN/gatus/v5/alerting/provider/discord"
|
"github.com/TwiN/gatus/v5/alerting/provider/discord"
|
||||||
"github.com/TwiN/gatus/v5/alerting/provider/email"
|
"github.com/TwiN/gatus/v5/alerting/provider/email"
|
||||||
|
"github.com/TwiN/gatus/v5/alerting/provider/gitea"
|
||||||
"github.com/TwiN/gatus/v5/alerting/provider/github"
|
"github.com/TwiN/gatus/v5/alerting/provider/github"
|
||||||
"github.com/TwiN/gatus/v5/alerting/provider/gitlab"
|
"github.com/TwiN/gatus/v5/alerting/provider/gitlab"
|
||||||
"github.com/TwiN/gatus/v5/alerting/provider/googlechat"
|
"github.com/TwiN/gatus/v5/alerting/provider/googlechat"
|
||||||
@ -21,6 +22,7 @@ import (
|
|||||||
"github.com/TwiN/gatus/v5/alerting/provider/teams"
|
"github.com/TwiN/gatus/v5/alerting/provider/teams"
|
||||||
"github.com/TwiN/gatus/v5/alerting/provider/telegram"
|
"github.com/TwiN/gatus/v5/alerting/provider/telegram"
|
||||||
"github.com/TwiN/gatus/v5/alerting/provider/twilio"
|
"github.com/TwiN/gatus/v5/alerting/provider/twilio"
|
||||||
|
"github.com/TwiN/gatus/v5/alerting/provider/zulip"
|
||||||
"github.com/TwiN/gatus/v5/config/endpoint"
|
"github.com/TwiN/gatus/v5/config/endpoint"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -66,6 +68,7 @@ var (
|
|||||||
_ AlertProvider = (*email.AlertProvider)(nil)
|
_ AlertProvider = (*email.AlertProvider)(nil)
|
||||||
_ AlertProvider = (*github.AlertProvider)(nil)
|
_ AlertProvider = (*github.AlertProvider)(nil)
|
||||||
_ AlertProvider = (*gitlab.AlertProvider)(nil)
|
_ AlertProvider = (*gitlab.AlertProvider)(nil)
|
||||||
|
_ AlertProvider = (*gitea.AlertProvider)(nil)
|
||||||
_ AlertProvider = (*googlechat.AlertProvider)(nil)
|
_ AlertProvider = (*googlechat.AlertProvider)(nil)
|
||||||
_ AlertProvider = (*jetbrainsspace.AlertProvider)(nil)
|
_ AlertProvider = (*jetbrainsspace.AlertProvider)(nil)
|
||||||
_ AlertProvider = (*matrix.AlertProvider)(nil)
|
_ AlertProvider = (*matrix.AlertProvider)(nil)
|
||||||
@ -79,4 +82,5 @@ var (
|
|||||||
_ AlertProvider = (*teams.AlertProvider)(nil)
|
_ AlertProvider = (*teams.AlertProvider)(nil)
|
||||||
_ AlertProvider = (*telegram.AlertProvider)(nil)
|
_ AlertProvider = (*telegram.AlertProvider)(nil)
|
||||||
_ AlertProvider = (*twilio.AlertProvider)(nil)
|
_ AlertProvider = (*twilio.AlertProvider)(nil)
|
||||||
|
_ AlertProvider = (*zulip.AlertProvider)(nil)
|
||||||
)
|
)
|
||||||
|
@ -19,6 +19,9 @@ type AlertProvider struct {
|
|||||||
// DefaultAlert is the default alert configuration to use for endpoints with an alert of the appropriate type
|
// DefaultAlert is the default alert configuration to use for endpoints with an alert of the appropriate type
|
||||||
DefaultAlert *alert.Alert `yaml:"default-alert,omitempty"`
|
DefaultAlert *alert.Alert `yaml:"default-alert,omitempty"`
|
||||||
|
|
||||||
|
// ClientConfig is the configuration of the client used to communicate with the provider's target
|
||||||
|
ClientConfig *client.Config `yaml:"client,omitempty"`
|
||||||
|
|
||||||
// Overrides is a list of Override that may be prioritized over the default configuration
|
// Overrides is a list of Override that may be prioritized over the default configuration
|
||||||
Overrides []Override `yaml:"overrides,omitempty"`
|
Overrides []Override `yaml:"overrides,omitempty"`
|
||||||
|
|
||||||
@ -54,7 +57,7 @@ func (provider *AlertProvider) Send(ep *endpoint.Endpoint, alert *alert.Alert, r
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
request.Header.Set("Content-Type", "application/json")
|
request.Header.Set("Content-Type", "application/json")
|
||||||
response, err := client.GetHTTPClient(nil).Do(request)
|
response, err := client.GetHTTPClient(provider.ClientConfig).Do(request)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -25,6 +25,16 @@ type AlertProvider struct {
|
|||||||
|
|
||||||
// DefaultAlert is the default alert configuration to use for endpoints with an alert of the appropriate type
|
// DefaultAlert is the default alert configuration to use for endpoints with an alert of the appropriate type
|
||||||
DefaultAlert *alert.Alert `yaml:"default-alert,omitempty"`
|
DefaultAlert *alert.Alert `yaml:"default-alert,omitempty"`
|
||||||
|
|
||||||
|
// Overrides is a list of Overrid that may be prioritized over the default configuration
|
||||||
|
Overrides []*Override `yaml:"overrides,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// Override is a configuration that may be prioritized over the default configuration
|
||||||
|
type Override struct {
|
||||||
|
group string `yaml:"group"`
|
||||||
|
token string `yaml:"token"`
|
||||||
|
id string `yaml:"id"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// IsValid returns whether the provider's configuration is valid
|
// IsValid returns whether the provider's configuration is valid
|
||||||
@ -32,6 +42,18 @@ func (provider *AlertProvider) IsValid() bool {
|
|||||||
if provider.ClientConfig == nil {
|
if provider.ClientConfig == nil {
|
||||||
provider.ClientConfig = client.GetDefaultConfig()
|
provider.ClientConfig = client.GetDefaultConfig()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
registerGroups := make(map[string]bool)
|
||||||
|
for _, override := range provider.Overrides {
|
||||||
|
if len(override.group) == 0 {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if _, ok := registerGroups[override.group]; ok {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
registerGroups[override.group] = true
|
||||||
|
}
|
||||||
|
|
||||||
return len(provider.Token) > 0 && len(provider.ID) > 0
|
return len(provider.Token) > 0 && len(provider.ID) > 0
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -42,7 +64,7 @@ func (provider *AlertProvider) Send(ep *endpoint.Endpoint, alert *alert.Alert, r
|
|||||||
if apiURL == "" {
|
if apiURL == "" {
|
||||||
apiURL = defaultAPIURL
|
apiURL = defaultAPIURL
|
||||||
}
|
}
|
||||||
request, err := http.NewRequest(http.MethodPost, fmt.Sprintf("%s/bot%s/sendMessage", apiURL, provider.Token), buffer)
|
request, err := http.NewRequest(http.MethodPost, fmt.Sprintf("%s/bot%s/sendMessage", apiURL, provider.getTokenForGroup(ep.Group)), buffer)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -59,6 +81,15 @@ func (provider *AlertProvider) Send(ep *endpoint.Endpoint, alert *alert.Alert, r
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (provider *AlertProvider) getTokenForGroup(group string) string {
|
||||||
|
for _, override := range provider.Overrides {
|
||||||
|
if override.group == group && len(override.token) > 0 {
|
||||||
|
return override.token
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return provider.Token
|
||||||
|
}
|
||||||
|
|
||||||
type Body struct {
|
type Body struct {
|
||||||
ChatID string `json:"chat_id"`
|
ChatID string `json:"chat_id"`
|
||||||
Text string `json:"text"`
|
Text string `json:"text"`
|
||||||
@ -93,13 +124,22 @@ func (provider *AlertProvider) buildRequestBody(ep *endpoint.Endpoint, alert *al
|
|||||||
text = fmt.Sprintf("⛑ *Gatus* \n%s%s", message, formattedConditionResults)
|
text = fmt.Sprintf("⛑ *Gatus* \n%s%s", message, formattedConditionResults)
|
||||||
}
|
}
|
||||||
bodyAsJSON, _ := json.Marshal(Body{
|
bodyAsJSON, _ := json.Marshal(Body{
|
||||||
ChatID: provider.ID,
|
ChatID: provider.getIDForGroup(ep.Group),
|
||||||
Text: text,
|
Text: text,
|
||||||
ParseMode: "MARKDOWN",
|
ParseMode: "MARKDOWN",
|
||||||
})
|
})
|
||||||
return bodyAsJSON
|
return bodyAsJSON
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (provider *AlertProvider) getIDForGroup(group string) string {
|
||||||
|
for _, override := range provider.Overrides {
|
||||||
|
if override.group == group && len(override.id) > 0 {
|
||||||
|
return override.id
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return provider.ID
|
||||||
|
}
|
||||||
|
|
||||||
// GetDefaultAlert returns the provider's default alert configuration
|
// GetDefaultAlert returns the provider's default alert configuration
|
||||||
func (provider *AlertProvider) GetDefaultAlert() *alert.Alert {
|
func (provider *AlertProvider) GetDefaultAlert() *alert.Alert {
|
||||||
return provider.DefaultAlert
|
return provider.DefaultAlert
|
||||||
|
@ -11,7 +11,7 @@ import (
|
|||||||
"github.com/TwiN/gatus/v5/test"
|
"github.com/TwiN/gatus/v5/test"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestAlertProvider_IsValid(t *testing.T) {
|
func TestAlertDefaultProvider_IsValid(t *testing.T) {
|
||||||
t.Run("invalid-provider", func(t *testing.T) {
|
t.Run("invalid-provider", func(t *testing.T) {
|
||||||
invalidProvider := AlertProvider{Token: "", ID: ""}
|
invalidProvider := AlertProvider{Token: "", ID: ""}
|
||||||
if invalidProvider.IsValid() {
|
if invalidProvider.IsValid() {
|
||||||
@ -32,6 +32,69 @@ func TestAlertProvider_IsValid(t *testing.T) {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestAlertProvider_IsValidWithOverrides(t *testing.T) {
|
||||||
|
t.Run("invalid-provider-override-nonexist-group", func(t *testing.T) {
|
||||||
|
invalidProvider := AlertProvider{Token: "123456:ABC-DEF1234ghIkl-zyx57W2v1u123ew11", ID: "12345678", Overrides: []*Override{{token: "token", id: "id"}}}
|
||||||
|
if invalidProvider.IsValid() {
|
||||||
|
t.Error("provider shouldn't have been valid")
|
||||||
|
}
|
||||||
|
})
|
||||||
|
t.Run("invalid-provider-override-duplicate-group", func(t *testing.T) {
|
||||||
|
invalidProvider := AlertProvider{Token: "123456:ABC-DEF1234ghIkl-zyx57W2v1u123ew11", ID: "12345678", Overrides: []*Override{{group: "group1", token: "token", id: "id"}, {group: "group1", id: "id2"}}}
|
||||||
|
if invalidProvider.IsValid() {
|
||||||
|
t.Error("provider shouldn't have been valid")
|
||||||
|
}
|
||||||
|
})
|
||||||
|
t.Run("valid-provider", func(t *testing.T) {
|
||||||
|
validProvider := AlertProvider{Token: "123456:ABC-DEF1234ghIkl-zyx57W2v1u123ew11", ID: "12345678", Overrides: []*Override{{group: "group", token: "token", id: "id"}}}
|
||||||
|
if validProvider.ClientConfig != nil {
|
||||||
|
t.Error("provider client config should have been nil prior to IsValid() being executed")
|
||||||
|
}
|
||||||
|
if !validProvider.IsValid() {
|
||||||
|
t.Error("provider should've been valid")
|
||||||
|
}
|
||||||
|
if validProvider.ClientConfig == nil {
|
||||||
|
t.Error("provider client config should have been set after IsValid() was executed")
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAlertProvider_getTokenAndIDForGroup(t *testing.T) {
|
||||||
|
t.Run("get-token-with-override", func(t *testing.T) {
|
||||||
|
provider := AlertProvider{Token: "123456:ABC-DEF1234ghIkl-zyx57W2v1u123ew11", ID: "12345678", Overrides: []*Override{{group: "group", token: "overrideToken", id: "overrideID"}}}
|
||||||
|
token := provider.getTokenForGroup("group")
|
||||||
|
if token != "overrideToken" {
|
||||||
|
t.Error("token should have been 'overrideToken'")
|
||||||
|
}
|
||||||
|
id := provider.getIDForGroup("group")
|
||||||
|
if id != "overrideID" {
|
||||||
|
t.Error("id should have been 'overrideID'")
|
||||||
|
}
|
||||||
|
})
|
||||||
|
t.Run("get-default-token-with-overridden-id", func(t *testing.T) {
|
||||||
|
provider := AlertProvider{Token: "123456:ABC-DEF1234ghIkl-zyx57W2v1u123ew11", ID: "12345678", Overrides: []*Override{{group: "group", id: "overrideID"}}}
|
||||||
|
token := provider.getTokenForGroup("group")
|
||||||
|
if token != provider.Token {
|
||||||
|
t.Error("token should have been the default token")
|
||||||
|
}
|
||||||
|
id := provider.getIDForGroup("group")
|
||||||
|
if id != "overrideID" {
|
||||||
|
t.Error("id should have been 'overrideID'")
|
||||||
|
}
|
||||||
|
})
|
||||||
|
t.Run("get-default-token-with-overridden-token", func(t *testing.T) {
|
||||||
|
provider := AlertProvider{Token: "123456:ABC-DEF1234ghIkl-zyx57W2v1u123ew11", ID: "12345678", Overrides: []*Override{{group: "group", token: "overrideToken"}}}
|
||||||
|
token := provider.getTokenForGroup("group")
|
||||||
|
if token != "overrideToken" {
|
||||||
|
t.Error("token should have been 'overrideToken'")
|
||||||
|
}
|
||||||
|
id := provider.getIDForGroup("group")
|
||||||
|
if id != provider.ID {
|
||||||
|
t.Error("id should have been the default id")
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
func TestAlertProvider_Send(t *testing.T) {
|
func TestAlertProvider_Send(t *testing.T) {
|
||||||
defer client.InjectHTTPClient(nil)
|
defer client.InjectHTTPClient(nil)
|
||||||
firstDescription := "description-1"
|
firstDescription := "description-1"
|
||||||
|
132
alerting/provider/zulip/zulip.go
Normal file
132
alerting/provider/zulip/zulip.go
Normal file
@ -0,0 +1,132 @@
|
|||||||
|
package zulip
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"net/http"
|
||||||
|
"net/url"
|
||||||
|
|
||||||
|
"github.com/TwiN/gatus/v5/alerting/alert"
|
||||||
|
"github.com/TwiN/gatus/v5/client"
|
||||||
|
"github.com/TwiN/gatus/v5/config/endpoint"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Config struct {
|
||||||
|
// BotEmail is the email of the bot user
|
||||||
|
BotEmail string `yaml:"bot-email"`
|
||||||
|
// BotAPIKey is the API key of the bot user
|
||||||
|
BotAPIKey string `yaml:"bot-api-key"`
|
||||||
|
// Domain is the domain of the Zulip server
|
||||||
|
Domain string `yaml:"domain"`
|
||||||
|
// ChannelID is the ID of the channel to send the message to
|
||||||
|
ChannelID string `yaml:"channel-id"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// AlertProvider is the configuration necessary for sending an alert using Zulip
|
||||||
|
type AlertProvider struct {
|
||||||
|
Config `yaml:",inline"`
|
||||||
|
// DefaultAlert is the default alert configuration to use for endpoints with an alert of the appropriate type
|
||||||
|
DefaultAlert *alert.Alert `yaml:"default-alert,omitempty"`
|
||||||
|
// Overrides is a list of Override that may be prioritized over the default configuration
|
||||||
|
Overrides []Override `yaml:"overrides,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// Override is a case under which the default integration is overridden
|
||||||
|
type Override struct {
|
||||||
|
Config
|
||||||
|
Group string `yaml:"group"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (provider *AlertProvider) validateConfig(conf *Config) bool {
|
||||||
|
return len(conf.BotEmail) > 0 && len(conf.BotAPIKey) > 0 && len(conf.Domain) > 0 && len(conf.ChannelID) > 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsValid returns whether the provider's configuration is valid
|
||||||
|
func (provider *AlertProvider) IsValid() bool {
|
||||||
|
registeredGroups := make(map[string]bool)
|
||||||
|
if provider.Overrides != nil {
|
||||||
|
for _, override := range provider.Overrides {
|
||||||
|
isAlreadyRegistered := registeredGroups[override.Group]
|
||||||
|
if isAlreadyRegistered || override.Group == "" || !provider.validateConfig(&override.Config) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
registeredGroups[override.Group] = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return provider.validateConfig(&provider.Config)
|
||||||
|
}
|
||||||
|
|
||||||
|
// getChannelIdForGroup returns the channel ID for the provided group
|
||||||
|
func (provider *AlertProvider) getChannelIdForGroup(group string) string {
|
||||||
|
for _, override := range provider.Overrides {
|
||||||
|
if override.Group == group {
|
||||||
|
return override.ChannelID
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return provider.ChannelID
|
||||||
|
}
|
||||||
|
|
||||||
|
// buildRequestBody builds the request body for the provider
|
||||||
|
func (provider *AlertProvider) buildRequestBody(ep *endpoint.Endpoint, alert *alert.Alert, result *endpoint.Result, resolved bool) string {
|
||||||
|
var message string
|
||||||
|
if resolved {
|
||||||
|
message = fmt.Sprintf("An alert for **%s** has been resolved after passing successfully %d time(s) in a row", ep.DisplayName(), alert.SuccessThreshold)
|
||||||
|
} else {
|
||||||
|
message = fmt.Sprintf("An alert for **%s** has been triggered due to having failed %d time(s) in a row", ep.DisplayName(), alert.FailureThreshold)
|
||||||
|
}
|
||||||
|
|
||||||
|
if alertDescription := alert.GetDescription(); len(alertDescription) > 0 {
|
||||||
|
message += "\n> " + alertDescription + "\n"
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, conditionResult := range result.ConditionResults {
|
||||||
|
var prefix string
|
||||||
|
if conditionResult.Success {
|
||||||
|
prefix = ":check:"
|
||||||
|
} else {
|
||||||
|
prefix = ":cross_mark:"
|
||||||
|
}
|
||||||
|
message += fmt.Sprintf("\n%s - `%s`", prefix, conditionResult.Condition)
|
||||||
|
}
|
||||||
|
|
||||||
|
postData := map[string]string{
|
||||||
|
"type": "channel",
|
||||||
|
"to": provider.getChannelIdForGroup(ep.Group),
|
||||||
|
"topic": "Gatus",
|
||||||
|
"content": message,
|
||||||
|
}
|
||||||
|
bodyParams := url.Values{}
|
||||||
|
for field, value := range postData {
|
||||||
|
bodyParams.Add(field, value)
|
||||||
|
}
|
||||||
|
return bodyParams.Encode()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Send an alert using the provider
|
||||||
|
func (provider *AlertProvider) Send(ep *endpoint.Endpoint, alert *alert.Alert, result *endpoint.Result, resolved bool) error {
|
||||||
|
buffer := bytes.NewBufferString(provider.buildRequestBody(ep, alert, result, resolved))
|
||||||
|
zulipEndpoint := fmt.Sprintf("https://%s/api/v1/messages", provider.Domain)
|
||||||
|
request, err := http.NewRequest(http.MethodPost, zulipEndpoint, buffer)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
request.SetBasicAuth(provider.BotEmail, provider.BotAPIKey)
|
||||||
|
request.Header.Set("Content-Type", "application/x-www-form-urlencoded")
|
||||||
|
request.Header.Set("User-Agent", "Gatus")
|
||||||
|
response, err := client.GetHTTPClient(nil).Do(request)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer response.Body.Close()
|
||||||
|
if response.StatusCode > 399 {
|
||||||
|
body, _ := io.ReadAll(response.Body)
|
||||||
|
return fmt.Errorf("call to provider alert returned status code %d: %s", response.StatusCode, string(body))
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetDefaultAlert returns the provider's default alert configuration
|
||||||
|
func (provider *AlertProvider) GetDefaultAlert() *alert.Alert {
|
||||||
|
return provider.DefaultAlert
|
||||||
|
}
|
488
alerting/provider/zulip/zulip_test.go
Normal file
488
alerting/provider/zulip/zulip_test.go
Normal file
@ -0,0 +1,488 @@
|
|||||||
|
package zulip
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"net/http"
|
||||||
|
"net/url"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/TwiN/gatus/v5/alerting/alert"
|
||||||
|
"github.com/TwiN/gatus/v5/client"
|
||||||
|
"github.com/TwiN/gatus/v5/config/endpoint"
|
||||||
|
"github.com/TwiN/gatus/v5/test"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestAlertProvider_IsValid(t *testing.T) {
|
||||||
|
testCase := []struct {
|
||||||
|
name string
|
||||||
|
alertProvider AlertProvider
|
||||||
|
expected bool
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "Empty provider",
|
||||||
|
alertProvider: AlertProvider{},
|
||||||
|
expected: false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Empty channel id",
|
||||||
|
alertProvider: AlertProvider{
|
||||||
|
Config: Config{
|
||||||
|
BotEmail: "something",
|
||||||
|
BotAPIKey: "something",
|
||||||
|
Domain: "something",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
expected: false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Empty domain",
|
||||||
|
alertProvider: AlertProvider{
|
||||||
|
Config: Config{
|
||||||
|
BotEmail: "something",
|
||||||
|
BotAPIKey: "something",
|
||||||
|
ChannelID: "something",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
expected: false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Empty bot api key",
|
||||||
|
alertProvider: AlertProvider{
|
||||||
|
Config: Config{
|
||||||
|
BotEmail: "something",
|
||||||
|
Domain: "something",
|
||||||
|
ChannelID: "something",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
expected: false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Empty bot email",
|
||||||
|
alertProvider: AlertProvider{
|
||||||
|
Config: Config{
|
||||||
|
BotAPIKey: "something",
|
||||||
|
Domain: "something",
|
||||||
|
ChannelID: "something",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
expected: false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Valid provider",
|
||||||
|
alertProvider: AlertProvider{
|
||||||
|
Config: Config{
|
||||||
|
BotEmail: "something",
|
||||||
|
BotAPIKey: "something",
|
||||||
|
Domain: "something",
|
||||||
|
ChannelID: "something",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
expected: true,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, tc := range testCase {
|
||||||
|
t.Run(tc.name, func(t *testing.T) {
|
||||||
|
if tc.alertProvider.IsValid() != tc.expected {
|
||||||
|
t.Errorf("IsValid assertion failed (expected %v, got %v)", tc.expected, !tc.expected)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAlertProvider_IsValidWithOverride(t *testing.T) {
|
||||||
|
validConfig := Config{
|
||||||
|
BotEmail: "something",
|
||||||
|
BotAPIKey: "something",
|
||||||
|
Domain: "something",
|
||||||
|
ChannelID: "something",
|
||||||
|
}
|
||||||
|
|
||||||
|
testCase := []struct {
|
||||||
|
name string
|
||||||
|
alertProvider AlertProvider
|
||||||
|
expected bool
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "Empty group",
|
||||||
|
alertProvider: AlertProvider{
|
||||||
|
Config: validConfig,
|
||||||
|
Overrides: []Override{
|
||||||
|
{
|
||||||
|
Config: validConfig,
|
||||||
|
Group: "",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
expected: false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Empty override config",
|
||||||
|
alertProvider: AlertProvider{
|
||||||
|
Config: validConfig,
|
||||||
|
Overrides: []Override{
|
||||||
|
{
|
||||||
|
Group: "something",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
expected: false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Empty channel id",
|
||||||
|
alertProvider: AlertProvider{
|
||||||
|
Config: validConfig,
|
||||||
|
Overrides: []Override{
|
||||||
|
{
|
||||||
|
Group: "something",
|
||||||
|
Config: Config{
|
||||||
|
BotEmail: "something",
|
||||||
|
BotAPIKey: "something",
|
||||||
|
Domain: "something",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
expected: false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Empty domain",
|
||||||
|
alertProvider: AlertProvider{
|
||||||
|
Config: validConfig,
|
||||||
|
Overrides: []Override{
|
||||||
|
{
|
||||||
|
Group: "something",
|
||||||
|
Config: Config{
|
||||||
|
BotEmail: "something",
|
||||||
|
BotAPIKey: "something",
|
||||||
|
ChannelID: "something",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
expected: false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Empty bot api key",
|
||||||
|
alertProvider: AlertProvider{
|
||||||
|
Config: validConfig,
|
||||||
|
Overrides: []Override{
|
||||||
|
{
|
||||||
|
Group: "something",
|
||||||
|
Config: Config{
|
||||||
|
BotEmail: "something",
|
||||||
|
Domain: "something",
|
||||||
|
ChannelID: "something",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
expected: false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Empty bot email",
|
||||||
|
alertProvider: AlertProvider{
|
||||||
|
Config: validConfig,
|
||||||
|
Overrides: []Override{
|
||||||
|
{
|
||||||
|
Group: "something",
|
||||||
|
Config: Config{
|
||||||
|
BotAPIKey: "something",
|
||||||
|
Domain: "something",
|
||||||
|
ChannelID: "something",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
expected: false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Valid provider",
|
||||||
|
alertProvider: AlertProvider{
|
||||||
|
Config: validConfig,
|
||||||
|
Overrides: []Override{
|
||||||
|
{
|
||||||
|
Group: "something",
|
||||||
|
Config: validConfig,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
expected: true,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, tc := range testCase {
|
||||||
|
t.Run(tc.name, func(t *testing.T) {
|
||||||
|
if tc.alertProvider.IsValid() != tc.expected {
|
||||||
|
t.Errorf("IsValid assertion failed (expected %v, got %v)", tc.expected, !tc.expected)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAlertProvider_GetChannelIdForGroup(t *testing.T) {
|
||||||
|
provider := AlertProvider{
|
||||||
|
Config: Config{
|
||||||
|
ChannelID: "default",
|
||||||
|
},
|
||||||
|
Overrides: []Override{
|
||||||
|
{
|
||||||
|
Group: "group1",
|
||||||
|
Config: Config{ChannelID: "group1"},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Group: "group2",
|
||||||
|
Config: Config{ChannelID: "group2"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
if provider.getChannelIdForGroup("") != "default" {
|
||||||
|
t.Error("Expected default channel ID")
|
||||||
|
}
|
||||||
|
if provider.getChannelIdForGroup("group2") != "group2" {
|
||||||
|
t.Error("Expected group2 channel ID")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAlertProvider_BuildRequestBody(t *testing.T) {
|
||||||
|
basicConfig := Config{
|
||||||
|
BotEmail: "bot-email",
|
||||||
|
BotAPIKey: "bot-api-key",
|
||||||
|
Domain: "domain",
|
||||||
|
ChannelID: "channel-id",
|
||||||
|
}
|
||||||
|
alertDesc := "Description"
|
||||||
|
basicAlert := alert.Alert{
|
||||||
|
SuccessThreshold: 2,
|
||||||
|
FailureThreshold: 3,
|
||||||
|
Description: &alertDesc,
|
||||||
|
}
|
||||||
|
testCases := []struct {
|
||||||
|
name string
|
||||||
|
provider AlertProvider
|
||||||
|
alert alert.Alert
|
||||||
|
resolved bool
|
||||||
|
hasConditions bool
|
||||||
|
expectedBody url.Values
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "Resolved alert with no conditions",
|
||||||
|
provider: AlertProvider{
|
||||||
|
Config: basicConfig,
|
||||||
|
},
|
||||||
|
alert: basicAlert,
|
||||||
|
resolved: true,
|
||||||
|
hasConditions: false,
|
||||||
|
expectedBody: url.Values{
|
||||||
|
"content": {`An alert for **endpoint-name** has been resolved after passing successfully 2 time(s) in a row
|
||||||
|
> Description
|
||||||
|
`},
|
||||||
|
"to": {"channel-id"},
|
||||||
|
"topic": {"Gatus"},
|
||||||
|
"type": {"channel"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Resolved alert with conditions",
|
||||||
|
provider: AlertProvider{
|
||||||
|
Config: basicConfig,
|
||||||
|
},
|
||||||
|
alert: basicAlert,
|
||||||
|
resolved: true,
|
||||||
|
hasConditions: true,
|
||||||
|
expectedBody: url.Values{
|
||||||
|
"content": {`An alert for **endpoint-name** has been resolved after passing successfully 2 time(s) in a row
|
||||||
|
> Description
|
||||||
|
|
||||||
|
:check: - ` + "`[CONNECTED] == true`" + `
|
||||||
|
:check: - ` + "`[STATUS] == 200`" + `
|
||||||
|
:check: - ` + "`[BODY] != \"\"`"},
|
||||||
|
"to": {"channel-id"},
|
||||||
|
"topic": {"Gatus"},
|
||||||
|
"type": {"channel"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Failed alert with no conditions",
|
||||||
|
provider: AlertProvider{
|
||||||
|
Config: basicConfig,
|
||||||
|
},
|
||||||
|
alert: basicAlert,
|
||||||
|
resolved: false,
|
||||||
|
hasConditions: false,
|
||||||
|
expectedBody: url.Values{
|
||||||
|
"content": {`An alert for **endpoint-name** has been triggered due to having failed 3 time(s) in a row
|
||||||
|
> Description
|
||||||
|
`},
|
||||||
|
"to": {"channel-id"},
|
||||||
|
"topic": {"Gatus"},
|
||||||
|
"type": {"channel"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Failed alert with conditions",
|
||||||
|
provider: AlertProvider{
|
||||||
|
Config: basicConfig,
|
||||||
|
},
|
||||||
|
alert: basicAlert,
|
||||||
|
resolved: false,
|
||||||
|
hasConditions: true,
|
||||||
|
expectedBody: url.Values{
|
||||||
|
"content": {`An alert for **endpoint-name** has been triggered due to having failed 3 time(s) in a row
|
||||||
|
> Description
|
||||||
|
|
||||||
|
:cross_mark: - ` + "`[CONNECTED] == true`" + `
|
||||||
|
:cross_mark: - ` + "`[STATUS] == 200`" + `
|
||||||
|
:cross_mark: - ` + "`[BODY] != \"\"`"},
|
||||||
|
"to": {"channel-id"},
|
||||||
|
"topic": {"Gatus"},
|
||||||
|
"type": {"channel"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, tc := range testCases {
|
||||||
|
t.Run(tc.name, func(t *testing.T) {
|
||||||
|
var conditionResults []*endpoint.ConditionResult
|
||||||
|
if tc.hasConditions {
|
||||||
|
conditionResults = []*endpoint.ConditionResult{
|
||||||
|
{Condition: "[CONNECTED] == true", Success: tc.resolved},
|
||||||
|
{Condition: "[STATUS] == 200", Success: tc.resolved},
|
||||||
|
{Condition: "[BODY] != \"\"", Success: tc.resolved},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
body := tc.provider.buildRequestBody(
|
||||||
|
&endpoint.Endpoint{Name: "endpoint-name"},
|
||||||
|
&tc.alert,
|
||||||
|
&endpoint.Result{
|
||||||
|
ConditionResults: conditionResults,
|
||||||
|
},
|
||||||
|
tc.resolved,
|
||||||
|
)
|
||||||
|
valuesResult, err := url.ParseQuery(body)
|
||||||
|
if err != nil {
|
||||||
|
t.Error(err)
|
||||||
|
}
|
||||||
|
if fmt.Sprintf("%v", valuesResult) != fmt.Sprintf("%v", tc.expectedBody) {
|
||||||
|
t.Errorf("Expected body:\n%v\ngot:\n%v", tc.expectedBody, valuesResult)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAlertProvider_GetDefaultAlert(t *testing.T) {
|
||||||
|
if (&AlertProvider{DefaultAlert: &alert.Alert{}}).GetDefaultAlert() == nil {
|
||||||
|
t.Error("expected default alert to be not nil")
|
||||||
|
}
|
||||||
|
if (&AlertProvider{DefaultAlert: nil}).GetDefaultAlert() != nil {
|
||||||
|
t.Error("expected default alert to be nil")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAlertProvider_Send(t *testing.T) {
|
||||||
|
defer client.InjectHTTPClient(nil)
|
||||||
|
validateRequest := func(req *http.Request) {
|
||||||
|
if req.URL.String() != "https://custom-domain/api/v1/messages" {
|
||||||
|
t.Errorf("expected url https://custom-domain.zulipchat.com/api/v1/messages, got %s", req.URL.String())
|
||||||
|
}
|
||||||
|
if req.Method != http.MethodPost {
|
||||||
|
t.Errorf("expected POST request, got %s", req.Method)
|
||||||
|
}
|
||||||
|
if req.Header.Get("Content-Type") != "application/x-www-form-urlencoded" {
|
||||||
|
t.Errorf("expected Content-Type header to be application/x-www-form-urlencoded, got %s", req.Header.Get("Content-Type"))
|
||||||
|
}
|
||||||
|
if req.Header.Get("User-Agent") != "Gatus" {
|
||||||
|
t.Errorf("expected User-Agent header to be Gatus, got %s", req.Header.Get("User-Agent"))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
basicConfig := Config{
|
||||||
|
BotEmail: "bot-email",
|
||||||
|
BotAPIKey: "bot-api-key",
|
||||||
|
Domain: "custom-domain",
|
||||||
|
ChannelID: "channel-id",
|
||||||
|
}
|
||||||
|
basicAlert := alert.Alert{
|
||||||
|
SuccessThreshold: 2,
|
||||||
|
FailureThreshold: 3,
|
||||||
|
}
|
||||||
|
testCases := []struct {
|
||||||
|
name string
|
||||||
|
provider AlertProvider
|
||||||
|
alert alert.Alert
|
||||||
|
resolved bool
|
||||||
|
mockRoundTripper test.MockRoundTripper
|
||||||
|
expectedError bool
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "resolved",
|
||||||
|
provider: AlertProvider{
|
||||||
|
Config: basicConfig,
|
||||||
|
},
|
||||||
|
alert: basicAlert,
|
||||||
|
resolved: true,
|
||||||
|
mockRoundTripper: test.MockRoundTripper(func(req *http.Request) *http.Response {
|
||||||
|
validateRequest(req)
|
||||||
|
return &http.Response{StatusCode: http.StatusOK}
|
||||||
|
}),
|
||||||
|
expectedError: false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "resolved error",
|
||||||
|
provider: AlertProvider{
|
||||||
|
Config: basicConfig,
|
||||||
|
},
|
||||||
|
alert: basicAlert,
|
||||||
|
resolved: true,
|
||||||
|
mockRoundTripper: test.MockRoundTripper(func(req *http.Request) *http.Response {
|
||||||
|
validateRequest(req)
|
||||||
|
return &http.Response{StatusCode: http.StatusInternalServerError}
|
||||||
|
}),
|
||||||
|
expectedError: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "triggered",
|
||||||
|
provider: AlertProvider{
|
||||||
|
Config: basicConfig,
|
||||||
|
},
|
||||||
|
alert: basicAlert,
|
||||||
|
resolved: false,
|
||||||
|
mockRoundTripper: test.MockRoundTripper(func(req *http.Request) *http.Response {
|
||||||
|
validateRequest(req)
|
||||||
|
return &http.Response{StatusCode: http.StatusOK}
|
||||||
|
}),
|
||||||
|
expectedError: false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "triggered error",
|
||||||
|
provider: AlertProvider{
|
||||||
|
Config: basicConfig,
|
||||||
|
},
|
||||||
|
alert: basicAlert,
|
||||||
|
resolved: false,
|
||||||
|
mockRoundTripper: test.MockRoundTripper(func(req *http.Request) *http.Response {
|
||||||
|
validateRequest(req)
|
||||||
|
return &http.Response{StatusCode: http.StatusInternalServerError}
|
||||||
|
}),
|
||||||
|
expectedError: true,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, tc := range testCases {
|
||||||
|
t.Run(tc.name, func(t *testing.T) {
|
||||||
|
client.InjectHTTPClient(&http.Client{Transport: tc.mockRoundTripper})
|
||||||
|
err := tc.provider.Send(
|
||||||
|
&endpoint.Endpoint{Name: "endpoint-name"},
|
||||||
|
&tc.alert,
|
||||||
|
&endpoint.Result{
|
||||||
|
ConditionResults: []*endpoint.ConditionResult{
|
||||||
|
{Condition: "[CONNECTED] == true", Success: tc.resolved},
|
||||||
|
{Condition: "[STATUS] == 200", Success: tc.resolved},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
tc.resolved,
|
||||||
|
)
|
||||||
|
if tc.expectedError && err == nil {
|
||||||
|
t.Error("expected error, got none")
|
||||||
|
}
|
||||||
|
if !tc.expectedError && err != nil {
|
||||||
|
t.Errorf("expected no error, got: %v", err)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
16
api/badge.go
16
api/badge.go
@ -37,11 +37,13 @@ var (
|
|||||||
|
|
||||||
// UptimeBadge handles the automatic generation of badge based on the group name and endpoint name passed.
|
// UptimeBadge handles the automatic generation of badge based on the group name and endpoint name passed.
|
||||||
//
|
//
|
||||||
// Valid values for :duration -> 7d, 24h, 1h
|
// Valid values for :duration -> 30d, 7d, 24h, 1h
|
||||||
func UptimeBadge(c *fiber.Ctx) error {
|
func UptimeBadge(c *fiber.Ctx) error {
|
||||||
duration := c.Params("duration")
|
duration := c.Params("duration")
|
||||||
var from time.Time
|
var from time.Time
|
||||||
switch duration {
|
switch duration {
|
||||||
|
case "30d":
|
||||||
|
from = time.Now().Add(-30 * 24 * time.Hour)
|
||||||
case "7d":
|
case "7d":
|
||||||
from = time.Now().Add(-7 * 24 * time.Hour)
|
from = time.Now().Add(-7 * 24 * time.Hour)
|
||||||
case "24h":
|
case "24h":
|
||||||
@ -49,7 +51,7 @@ func UptimeBadge(c *fiber.Ctx) error {
|
|||||||
case "1h":
|
case "1h":
|
||||||
from = time.Now().Add(-2 * time.Hour) // Because uptime metrics are stored by hour, we have to cheat a little
|
from = time.Now().Add(-2 * time.Hour) // Because uptime metrics are stored by hour, we have to cheat a little
|
||||||
default:
|
default:
|
||||||
return c.Status(400).SendString("Durations supported: 7d, 24h, 1h")
|
return c.Status(400).SendString("Durations supported: 30d, 7d, 24h, 1h")
|
||||||
}
|
}
|
||||||
key := c.Params("key")
|
key := c.Params("key")
|
||||||
uptime, err := store.Get().GetUptimeByKey(key, from, time.Now())
|
uptime, err := store.Get().GetUptimeByKey(key, from, time.Now())
|
||||||
@ -69,12 +71,14 @@ func UptimeBadge(c *fiber.Ctx) error {
|
|||||||
|
|
||||||
// ResponseTimeBadge handles the automatic generation of badge based on the group name and endpoint name passed.
|
// ResponseTimeBadge handles the automatic generation of badge based on the group name and endpoint name passed.
|
||||||
//
|
//
|
||||||
// Valid values for :duration -> 7d, 24h, 1h
|
// Valid values for :duration -> 30d, 7d, 24h, 1h
|
||||||
func ResponseTimeBadge(cfg *config.Config) fiber.Handler {
|
func ResponseTimeBadge(cfg *config.Config) fiber.Handler {
|
||||||
return func(c *fiber.Ctx) error {
|
return func(c *fiber.Ctx) error {
|
||||||
duration := c.Params("duration")
|
duration := c.Params("duration")
|
||||||
var from time.Time
|
var from time.Time
|
||||||
switch duration {
|
switch duration {
|
||||||
|
case "30d":
|
||||||
|
from = time.Now().Add(-30 * 24 * time.Hour)
|
||||||
case "7d":
|
case "7d":
|
||||||
from = time.Now().Add(-7 * 24 * time.Hour)
|
from = time.Now().Add(-7 * 24 * time.Hour)
|
||||||
case "24h":
|
case "24h":
|
||||||
@ -82,7 +86,7 @@ func ResponseTimeBadge(cfg *config.Config) fiber.Handler {
|
|||||||
case "1h":
|
case "1h":
|
||||||
from = time.Now().Add(-2 * time.Hour) // Because response time metrics are stored by hour, we have to cheat a little
|
from = time.Now().Add(-2 * time.Hour) // Because response time metrics are stored by hour, we have to cheat a little
|
||||||
default:
|
default:
|
||||||
return c.Status(400).SendString("Durations supported: 7d, 24h, 1h")
|
return c.Status(400).SendString("Durations supported: 30d, 7d, 24h, 1h")
|
||||||
}
|
}
|
||||||
key := c.Params("key")
|
key := c.Params("key")
|
||||||
averageResponseTime, err := store.Get().GetAverageResponseTimeByKey(key, from, time.Now())
|
averageResponseTime, err := store.Get().GetAverageResponseTimeByKey(key, from, time.Now())
|
||||||
@ -161,6 +165,8 @@ func HealthBadgeShields(c *fiber.Ctx) error {
|
|||||||
func generateUptimeBadgeSVG(duration string, uptime float64) []byte {
|
func generateUptimeBadgeSVG(duration string, uptime float64) []byte {
|
||||||
var labelWidth, valueWidth, valueWidthAdjustment int
|
var labelWidth, valueWidth, valueWidthAdjustment int
|
||||||
switch duration {
|
switch duration {
|
||||||
|
case "30d":
|
||||||
|
labelWidth = 70
|
||||||
case "7d":
|
case "7d":
|
||||||
labelWidth = 65
|
labelWidth = 65
|
||||||
case "24h":
|
case "24h":
|
||||||
@ -227,6 +233,8 @@ func getBadgeColorFromUptime(uptime float64) string {
|
|||||||
func generateResponseTimeBadgeSVG(duration string, averageResponseTime int, key string, cfg *config.Config) []byte {
|
func generateResponseTimeBadgeSVG(duration string, averageResponseTime int, key string, cfg *config.Config) []byte {
|
||||||
var labelWidth, valueWidth int
|
var labelWidth, valueWidth int
|
||||||
switch duration {
|
switch duration {
|
||||||
|
case "30d":
|
||||||
|
labelWidth = 110
|
||||||
case "7d":
|
case "7d":
|
||||||
labelWidth = 105
|
labelWidth = 105
|
||||||
case "24h":
|
case "24h":
|
||||||
|
10
api/chart.go
10
api/chart.go
@ -32,14 +32,18 @@ var (
|
|||||||
|
|
||||||
func ResponseTimeChart(c *fiber.Ctx) error {
|
func ResponseTimeChart(c *fiber.Ctx) error {
|
||||||
duration := c.Params("duration")
|
duration := c.Params("duration")
|
||||||
|
chartTimestampFormatter := chart.TimeValueFormatterWithFormat(timeFormat)
|
||||||
var from time.Time
|
var from time.Time
|
||||||
switch duration {
|
switch duration {
|
||||||
|
case "30d":
|
||||||
|
from = time.Now().Truncate(time.Hour).Add(-30 * 24 * time.Hour)
|
||||||
|
chartTimestampFormatter = chart.TimeDateValueFormatter
|
||||||
case "7d":
|
case "7d":
|
||||||
from = time.Now().Truncate(time.Hour).Add(-24 * 7 * time.Hour)
|
from = time.Now().Truncate(time.Hour).Add(-7 * 24 * time.Hour)
|
||||||
case "24h":
|
case "24h":
|
||||||
from = time.Now().Truncate(time.Hour).Add(-24 * time.Hour)
|
from = time.Now().Truncate(time.Hour).Add(-24 * time.Hour)
|
||||||
default:
|
default:
|
||||||
return c.Status(400).SendString("Durations supported: 7d, 24h")
|
return c.Status(400).SendString("Durations supported: 30d, 7d, 24h")
|
||||||
}
|
}
|
||||||
hourlyAverageResponseTime, err := store.Get().GetHourlyAverageResponseTimeByKey(c.Params("key"), from, time.Now())
|
hourlyAverageResponseTime, err := store.Get().GetHourlyAverageResponseTimeByKey(c.Params("key"), from, time.Now())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -88,7 +92,7 @@ func ResponseTimeChart(c *fiber.Ctx) error {
|
|||||||
Width: 1280,
|
Width: 1280,
|
||||||
Height: 300,
|
Height: 300,
|
||||||
XAxis: chart.XAxis{
|
XAxis: chart.XAxis{
|
||||||
ValueFormatter: chart.TimeValueFormatterWithFormat(timeFormat),
|
ValueFormatter: chartTimestampFormatter,
|
||||||
GridMajorStyle: gridStyle,
|
GridMajorStyle: gridStyle,
|
||||||
GridMinorStyle: gridStyle,
|
GridMinorStyle: gridStyle,
|
||||||
Style: axisStyle,
|
Style: axisStyle,
|
||||||
|
@ -49,6 +49,11 @@ func TestResponseTimeChart(t *testing.T) {
|
|||||||
Path: "/api/v1/endpoints/core_frontend/response-times/7d/chart.svg",
|
Path: "/api/v1/endpoints/core_frontend/response-times/7d/chart.svg",
|
||||||
ExpectedCode: http.StatusOK,
|
ExpectedCode: http.StatusOK,
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
Name: "chart-response-time-30d",
|
||||||
|
Path: "/api/v1/endpoints/core_frontend/response-times/30d/chart.svg",
|
||||||
|
ExpectedCode: http.StatusOK,
|
||||||
|
},
|
||||||
{
|
{
|
||||||
Name: "chart-response-time-with-invalid-duration",
|
Name: "chart-response-time-with-invalid-duration",
|
||||||
Path: "/api/v1/endpoints/core_backend/response-times/3d/chart.svg",
|
Path: "/api/v1/endpoints/core_backend/response-times/3d/chart.svg",
|
||||||
|
@ -4,7 +4,6 @@ import (
|
|||||||
"encoding/json"
|
"encoding/json"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
|
||||||
"log"
|
"log"
|
||||||
|
|
||||||
"github.com/TwiN/gatus/v5/client"
|
"github.com/TwiN/gatus/v5/client"
|
||||||
@ -62,14 +61,8 @@ func getEndpointStatusesFromRemoteInstances(remoteConfig *remote.Config) ([]*end
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
body, err := io.ReadAll(response.Body)
|
|
||||||
if err != nil {
|
|
||||||
_ = response.Body.Close()
|
|
||||||
log.Printf("[api.getEndpointStatusesFromRemoteInstances] Silently failed to retrieve endpoint statuses from %s: %s", instance.URL, err.Error())
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
var endpointStatuses []*endpoint.Status
|
var endpointStatuses []*endpoint.Status
|
||||||
if err = json.Unmarshal(body, &endpointStatuses); err != nil {
|
if err = json.NewDecoder(response.Body).Decode(&endpointStatuses); err != nil {
|
||||||
_ = response.Body.Close()
|
_ = response.Body.Close()
|
||||||
log.Printf("[api.getEndpointStatusesFromRemoteInstances] Silently failed to retrieve endpoint statuses from %s: %s", instance.URL, err.Error())
|
log.Printf("[api.getEndpointStatusesFromRemoteInstances] Silently failed to retrieve endpoint statuses from %s: %s", instance.URL, err.Error())
|
||||||
continue
|
continue
|
||||||
|
@ -46,6 +46,9 @@ func CreateExternalEndpointResult(cfg *config.Config) fiber.Handler {
|
|||||||
Success: c.QueryBool("success"),
|
Success: c.QueryBool("success"),
|
||||||
Errors: []string{},
|
Errors: []string{},
|
||||||
}
|
}
|
||||||
|
if !result.Success && c.Query("error") != "" {
|
||||||
|
result.Errors = append(result.Errors, c.Query("error"))
|
||||||
|
}
|
||||||
convertedEndpoint := externalEndpoint.ToEndpoint()
|
convertedEndpoint := externalEndpoint.ToEndpoint()
|
||||||
if err := store.Get().Insert(convertedEndpoint, result); err != nil {
|
if err := store.Get().Insert(convertedEndpoint, result); err != nil {
|
||||||
if errors.Is(err, common.ErrEndpointNotFound) {
|
if errors.Is(err, common.ErrEndpointNotFound) {
|
||||||
|
@ -64,12 +64,24 @@ func TestCreateExternalEndpointResult(t *testing.T) {
|
|||||||
AuthorizationHeaderBearerToken: "Bearer token",
|
AuthorizationHeaderBearerToken: "Bearer token",
|
||||||
ExpectedCode: 404,
|
ExpectedCode: 404,
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
Name: "bad-success-value",
|
||||||
|
Path: "/api/v1/endpoints/g_n/external?success=invalid",
|
||||||
|
AuthorizationHeaderBearerToken: "Bearer token",
|
||||||
|
ExpectedCode: 400,
|
||||||
|
},
|
||||||
{
|
{
|
||||||
Name: "good-token-success-true",
|
Name: "good-token-success-true",
|
||||||
Path: "/api/v1/endpoints/g_n/external?success=true",
|
Path: "/api/v1/endpoints/g_n/external?success=true",
|
||||||
AuthorizationHeaderBearerToken: "Bearer token",
|
AuthorizationHeaderBearerToken: "Bearer token",
|
||||||
ExpectedCode: 200,
|
ExpectedCode: 200,
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
Name: "good-token-success-true-with-ignored-error-because-success-true",
|
||||||
|
Path: "/api/v1/endpoints/g_n/external?success=true&error=failed",
|
||||||
|
AuthorizationHeaderBearerToken: "Bearer token",
|
||||||
|
ExpectedCode: 200,
|
||||||
|
},
|
||||||
{
|
{
|
||||||
Name: "good-token-success-false",
|
Name: "good-token-success-false",
|
||||||
Path: "/api/v1/endpoints/g_n/external?success=false",
|
Path: "/api/v1/endpoints/g_n/external?success=false",
|
||||||
@ -82,6 +94,12 @@ func TestCreateExternalEndpointResult(t *testing.T) {
|
|||||||
AuthorizationHeaderBearerToken: "Bearer token",
|
AuthorizationHeaderBearerToken: "Bearer token",
|
||||||
ExpectedCode: 200,
|
ExpectedCode: 200,
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
Name: "good-token-success-false-with-error",
|
||||||
|
Path: "/api/v1/endpoints/g_n/external?success=false&error=failed",
|
||||||
|
AuthorizationHeaderBearerToken: "Bearer token",
|
||||||
|
ExpectedCode: 200,
|
||||||
|
},
|
||||||
}
|
}
|
||||||
for _, scenario := range scenarios {
|
for _, scenario := range scenarios {
|
||||||
t.Run(scenario.Name, func(t *testing.T) {
|
t.Run(scenario.Name, func(t *testing.T) {
|
||||||
@ -108,21 +126,33 @@ func TestCreateExternalEndpointResult(t *testing.T) {
|
|||||||
if endpointStatus.Key != "g_n" {
|
if endpointStatus.Key != "g_n" {
|
||||||
t.Errorf("expected key to be g_n but got %s", endpointStatus.Key)
|
t.Errorf("expected key to be g_n but got %s", endpointStatus.Key)
|
||||||
}
|
}
|
||||||
if len(endpointStatus.Results) != 3 {
|
if len(endpointStatus.Results) != 5 {
|
||||||
t.Errorf("expected 3 results but got %d", len(endpointStatus.Results))
|
t.Errorf("expected 3 results but got %d", len(endpointStatus.Results))
|
||||||
}
|
}
|
||||||
if !endpointStatus.Results[0].Success {
|
if !endpointStatus.Results[0].Success {
|
||||||
t.Errorf("expected first result to be successful")
|
t.Errorf("expected first result to be successful")
|
||||||
}
|
}
|
||||||
if endpointStatus.Results[1].Success {
|
if !endpointStatus.Results[1].Success {
|
||||||
t.Errorf("expected second result to be unsuccessful")
|
t.Errorf("expected second result to be successful")
|
||||||
|
}
|
||||||
|
if len(endpointStatus.Results[1].Errors) > 0 {
|
||||||
|
t.Errorf("expected second result to have no errors")
|
||||||
}
|
}
|
||||||
if endpointStatus.Results[2].Success {
|
if endpointStatus.Results[2].Success {
|
||||||
t.Errorf("expected third result to be unsuccessful")
|
t.Errorf("expected third result to be unsuccessful")
|
||||||
}
|
}
|
||||||
|
if endpointStatus.Results[3].Success {
|
||||||
|
t.Errorf("expected fourth result to be unsuccessful")
|
||||||
|
}
|
||||||
|
if endpointStatus.Results[4].Success {
|
||||||
|
t.Errorf("expected fifth result to be unsuccessful")
|
||||||
|
}
|
||||||
|
if len(endpointStatus.Results[4].Errors) == 0 || endpointStatus.Results[4].Errors[0] != "failed" {
|
||||||
|
t.Errorf("expected fifth result to have errors: failed")
|
||||||
|
}
|
||||||
externalEndpointFromConfig := cfg.GetExternalEndpointByKey("g_n")
|
externalEndpointFromConfig := cfg.GetExternalEndpointByKey("g_n")
|
||||||
if externalEndpointFromConfig.NumberOfFailuresInARow != 2 {
|
if externalEndpointFromConfig.NumberOfFailuresInARow != 3 {
|
||||||
t.Errorf("expected 2 failures in a row but got %d", externalEndpointFromConfig.NumberOfFailuresInARow)
|
t.Errorf("expected 3 failures in a row but got %d", externalEndpointFromConfig.NumberOfFailuresInARow)
|
||||||
}
|
}
|
||||||
if externalEndpointFromConfig.NumberOfSuccessesInARow != 0 {
|
if externalEndpointFromConfig.NumberOfSuccessesInARow != 0 {
|
||||||
t.Errorf("expected 0 successes in a row but got %d", externalEndpointFromConfig.NumberOfSuccessesInARow)
|
t.Errorf("expected 0 successes in a row but got %d", externalEndpointFromConfig.NumberOfSuccessesInARow)
|
||||||
|
@ -188,7 +188,7 @@ func LoadConfiguration(configPath string) (*Config, error) {
|
|||||||
return nil, fmt.Errorf("error reading configuration from directory %s: %w", usedConfigPath, err)
|
return nil, fmt.Errorf("error reading configuration from directory %s: %w", usedConfigPath, err)
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
log.Printf("[config.LoadConfiguration] Reading configuration from configFile=%s", configPath)
|
log.Printf("[config.LoadConfiguration] Reading configuration from configFile=%s", usedConfigPath)
|
||||||
if data, err := os.ReadFile(usedConfigPath); err != nil {
|
if data, err := os.ReadFile(usedConfigPath); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
} else {
|
} else {
|
||||||
@ -245,16 +245,13 @@ func parseAndValidateConfigBytes(yamlBytes []byte) (config *Config, err error) {
|
|||||||
if config == nil || config.Endpoints == nil || len(config.Endpoints) == 0 {
|
if config == nil || config.Endpoints == nil || len(config.Endpoints) == 0 {
|
||||||
err = ErrNoEndpointInConfig
|
err = ErrNoEndpointInConfig
|
||||||
} else {
|
} else {
|
||||||
validateAlertingConfig(config.Alerting, config.Endpoints, config.Debug)
|
validateAlertingConfig(config.Alerting, config.Endpoints, config.ExternalEndpoints, config.Debug)
|
||||||
if err := validateSecurityConfig(config); err != nil {
|
if err := validateSecurityConfig(config); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
if err := validateEndpointsConfig(config); err != nil {
|
if err := validateEndpointsConfig(config); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
if err := validateExternalEndpointsConfig(config); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if err := validateWebConfig(config); err != nil {
|
if err := validateWebConfig(config); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -338,28 +335,37 @@ func validateWebConfig(config *Config) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func validateEndpointsConfig(config *Config) error {
|
func validateEndpointsConfig(config *Config) error {
|
||||||
|
duplicateValidationMap := make(map[string]bool)
|
||||||
|
// Validate endpoints
|
||||||
for _, ep := range config.Endpoints {
|
for _, ep := range config.Endpoints {
|
||||||
if config.Debug {
|
if config.Debug {
|
||||||
log.Printf("[config.validateEndpointsConfig] Validating endpoint '%s'", ep.Name)
|
log.Printf("[config.validateEndpointsConfig] Validating endpoint '%s'", ep.Name)
|
||||||
}
|
}
|
||||||
|
if endpointKey := ep.Key(); duplicateValidationMap[endpointKey] {
|
||||||
|
return fmt.Errorf("invalid endpoint %s: name and group combination must be unique", ep.Key())
|
||||||
|
} else {
|
||||||
|
duplicateValidationMap[endpointKey] = true
|
||||||
|
}
|
||||||
if err := ep.ValidateAndSetDefaults(); err != nil {
|
if err := ep.ValidateAndSetDefaults(); err != nil {
|
||||||
return fmt.Errorf("invalid endpoint %s: %w", ep.DisplayName(), err)
|
return fmt.Errorf("invalid endpoint %s: %w", ep.Key(), err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
log.Printf("[config.validateEndpointsConfig] Validated %d endpoints", len(config.Endpoints))
|
log.Printf("[config.validateEndpointsConfig] Validated %d endpoints", len(config.Endpoints))
|
||||||
return nil
|
// Validate external endpoints
|
||||||
}
|
|
||||||
|
|
||||||
func validateExternalEndpointsConfig(config *Config) error {
|
|
||||||
for _, ee := range config.ExternalEndpoints {
|
for _, ee := range config.ExternalEndpoints {
|
||||||
if config.Debug {
|
if config.Debug {
|
||||||
log.Printf("[config.validateExternalEndpointsConfig] Validating external endpoint '%s'", ee.Name)
|
log.Printf("[config.validateEndpointsConfig] Validating external endpoint '%s'", ee.Name)
|
||||||
|
}
|
||||||
|
if endpointKey := ee.Key(); duplicateValidationMap[endpointKey] {
|
||||||
|
return fmt.Errorf("invalid external endpoint %s: name and group combination must be unique", ee.Key())
|
||||||
|
} else {
|
||||||
|
duplicateValidationMap[endpointKey] = true
|
||||||
}
|
}
|
||||||
if err := ee.ValidateAndSetDefaults(); err != nil {
|
if err := ee.ValidateAndSetDefaults(); err != nil {
|
||||||
return fmt.Errorf("invalid external endpoint %s: %w", ee.DisplayName(), err)
|
return fmt.Errorf("invalid external endpoint %s: %w", ee.Key(), err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
log.Printf("[config.validateExternalEndpointsConfig] Validated %d external endpoints", len(config.ExternalEndpoints))
|
log.Printf("[config.validateEndpointsConfig] Validated %d external endpoints", len(config.ExternalEndpoints))
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -382,7 +388,7 @@ func validateSecurityConfig(config *Config) error {
|
|||||||
// Note that the alerting configuration has to be validated before the endpoint configuration, because the default alert
|
// Note that the alerting configuration has to be validated before the endpoint configuration, because the default alert
|
||||||
// returned by provider.AlertProvider.GetDefaultAlert() must be parsed before endpoint.Endpoint.ValidateAndSetDefaults()
|
// returned by provider.AlertProvider.GetDefaultAlert() must be parsed before endpoint.Endpoint.ValidateAndSetDefaults()
|
||||||
// sets the default alert values when none are set.
|
// sets the default alert values when none are set.
|
||||||
func validateAlertingConfig(alertingConfig *alerting.Config, endpoints []*endpoint.Endpoint, debug bool) {
|
func validateAlertingConfig(alertingConfig *alerting.Config, endpoints []*endpoint.Endpoint, externalEndpoints []*endpoint.ExternalEndpoint, debug bool) {
|
||||||
if alertingConfig == nil {
|
if alertingConfig == nil {
|
||||||
log.Printf("[config.validateAlertingConfig] Alerting is not configured")
|
log.Printf("[config.validateAlertingConfig] Alerting is not configured")
|
||||||
return
|
return
|
||||||
@ -391,12 +397,13 @@ func validateAlertingConfig(alertingConfig *alerting.Config, endpoints []*endpoi
|
|||||||
alert.TypeAWSSES,
|
alert.TypeAWSSES,
|
||||||
alert.TypeCustom,
|
alert.TypeCustom,
|
||||||
alert.TypeDiscord,
|
alert.TypeDiscord,
|
||||||
|
alert.TypeEmail,
|
||||||
alert.TypeGitHub,
|
alert.TypeGitHub,
|
||||||
alert.TypeGitLab,
|
alert.TypeGitLab,
|
||||||
|
alert.TypeGitea,
|
||||||
alert.TypeGoogleChat,
|
alert.TypeGoogleChat,
|
||||||
alert.TypeGotify,
|
alert.TypeGotify,
|
||||||
alert.TypeJetBrainsSpace,
|
alert.TypeJetBrainsSpace,
|
||||||
alert.TypeEmail,
|
|
||||||
alert.TypeMatrix,
|
alert.TypeMatrix,
|
||||||
alert.TypeMattermost,
|
alert.TypeMattermost,
|
||||||
alert.TypeMessagebird,
|
alert.TypeMessagebird,
|
||||||
@ -408,6 +415,7 @@ func validateAlertingConfig(alertingConfig *alerting.Config, endpoints []*endpoi
|
|||||||
alert.TypeTeams,
|
alert.TypeTeams,
|
||||||
alert.TypeTelegram,
|
alert.TypeTelegram,
|
||||||
alert.TypeTwilio,
|
alert.TypeTwilio,
|
||||||
|
alert.TypeZulip,
|
||||||
}
|
}
|
||||||
var validProviders, invalidProviders []alert.Type
|
var validProviders, invalidProviders []alert.Type
|
||||||
for _, alertType := range alertTypes {
|
for _, alertType := range alertTypes {
|
||||||
@ -420,7 +428,17 @@ func validateAlertingConfig(alertingConfig *alerting.Config, endpoints []*endpoi
|
|||||||
for alertIndex, endpointAlert := range ep.Alerts {
|
for alertIndex, endpointAlert := range ep.Alerts {
|
||||||
if alertType == endpointAlert.Type {
|
if alertType == endpointAlert.Type {
|
||||||
if debug {
|
if debug {
|
||||||
log.Printf("[config.validateAlertingConfig] Parsing alert %d with provider's default alert for provider=%s in endpoint=%s", alertIndex, alertType, ep.Name)
|
log.Printf("[config.validateAlertingConfig] Parsing alert %d with default alert for provider=%s in endpoint with key=%s", alertIndex, alertType, ep.Key())
|
||||||
|
}
|
||||||
|
provider.ParseWithDefaultAlert(alertProvider.GetDefaultAlert(), endpointAlert)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for _, ee := range externalEndpoints {
|
||||||
|
for alertIndex, endpointAlert := range ee.Alerts {
|
||||||
|
if alertType == endpointAlert.Type {
|
||||||
|
if debug {
|
||||||
|
log.Printf("[config.validateAlertingConfig] Parsing alert %d with default alert for provider=%s in endpoint with key=%s", alertIndex, alertType, ee.Key())
|
||||||
}
|
}
|
||||||
provider.ParseWithDefaultAlert(alertProvider.GetDefaultAlert(), endpointAlert)
|
provider.ParseWithDefaultAlert(alertProvider.GetDefaultAlert(), endpointAlert)
|
||||||
}
|
}
|
||||||
|
@ -16,6 +16,7 @@ import (
|
|||||||
"github.com/TwiN/gatus/v5/alerting/provider/email"
|
"github.com/TwiN/gatus/v5/alerting/provider/email"
|
||||||
"github.com/TwiN/gatus/v5/alerting/provider/github"
|
"github.com/TwiN/gatus/v5/alerting/provider/github"
|
||||||
"github.com/TwiN/gatus/v5/alerting/provider/googlechat"
|
"github.com/TwiN/gatus/v5/alerting/provider/googlechat"
|
||||||
|
"github.com/TwiN/gatus/v5/alerting/provider/gotify"
|
||||||
"github.com/TwiN/gatus/v5/alerting/provider/jetbrainsspace"
|
"github.com/TwiN/gatus/v5/alerting/provider/jetbrainsspace"
|
||||||
"github.com/TwiN/gatus/v5/alerting/provider/matrix"
|
"github.com/TwiN/gatus/v5/alerting/provider/matrix"
|
||||||
"github.com/TwiN/gatus/v5/alerting/provider/mattermost"
|
"github.com/TwiN/gatus/v5/alerting/provider/mattermost"
|
||||||
@ -36,6 +37,7 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
func TestLoadConfiguration(t *testing.T) {
|
func TestLoadConfiguration(t *testing.T) {
|
||||||
|
yes := true
|
||||||
dir := t.TempDir()
|
dir := t.TempDir()
|
||||||
scenarios := []struct {
|
scenarios := []struct {
|
||||||
name string
|
name string
|
||||||
@ -165,6 +167,8 @@ metrics: true
|
|||||||
alerting:
|
alerting:
|
||||||
slack:
|
slack:
|
||||||
webhook-url: https://hooks.slack.com/services/xxx/yyy/zzz
|
webhook-url: https://hooks.slack.com/services/xxx/yyy/zzz
|
||||||
|
default-alert:
|
||||||
|
enabled: true
|
||||||
|
|
||||||
endpoints:
|
endpoints:
|
||||||
- name: example
|
- name: example
|
||||||
@ -179,6 +183,12 @@ alerting:
|
|||||||
discord:
|
discord:
|
||||||
webhook-url: https://discord.com/api/webhooks/xxx/yyy
|
webhook-url: https://discord.com/api/webhooks/xxx/yyy
|
||||||
|
|
||||||
|
external-endpoints:
|
||||||
|
- name: ext-ep-test
|
||||||
|
token: "potato"
|
||||||
|
alerts:
|
||||||
|
- type: slack
|
||||||
|
|
||||||
endpoints:
|
endpoints:
|
||||||
- name: frontend
|
- name: frontend
|
||||||
url: https://example.com
|
url: https://example.com
|
||||||
@ -190,7 +200,20 @@ endpoints:
|
|||||||
Metrics: true,
|
Metrics: true,
|
||||||
Alerting: &alerting.Config{
|
Alerting: &alerting.Config{
|
||||||
Discord: &discord.AlertProvider{WebhookURL: "https://discord.com/api/webhooks/xxx/yyy"},
|
Discord: &discord.AlertProvider{WebhookURL: "https://discord.com/api/webhooks/xxx/yyy"},
|
||||||
Slack: &slack.AlertProvider{WebhookURL: "https://hooks.slack.com/services/xxx/yyy/zzz"},
|
Slack: &slack.AlertProvider{WebhookURL: "https://hooks.slack.com/services/xxx/yyy/zzz", DefaultAlert: &alert.Alert{Enabled: &yes}},
|
||||||
|
},
|
||||||
|
ExternalEndpoints: []*endpoint.ExternalEndpoint{
|
||||||
|
{
|
||||||
|
Name: "ext-ep-test",
|
||||||
|
Token: "potato",
|
||||||
|
Alerts: []*alert.Alert{
|
||||||
|
{
|
||||||
|
Type: alert.TypeSlack,
|
||||||
|
FailureThreshold: 3,
|
||||||
|
SuccessThreshold: 2,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
},
|
},
|
||||||
Endpoints: []*endpoint.Endpoint{
|
Endpoints: []*endpoint.Endpoint{
|
||||||
{
|
{
|
||||||
@ -325,10 +348,6 @@ external-endpoints:
|
|||||||
- name: ext-ep-test
|
- name: ext-ep-test
|
||||||
group: core
|
group: core
|
||||||
token: "potato"
|
token: "potato"
|
||||||
alerts:
|
|
||||||
- type: discord
|
|
||||||
description: "healthcheck failed"
|
|
||||||
send-on-resolved: true
|
|
||||||
|
|
||||||
endpoints:
|
endpoints:
|
||||||
- name: website
|
- name: website
|
||||||
@ -382,18 +401,7 @@ endpoints:
|
|||||||
if config.ExternalEndpoints[0].Token != "potato" {
|
if config.ExternalEndpoints[0].Token != "potato" {
|
||||||
t.Errorf("Token should have been %s", "potato")
|
t.Errorf("Token should have been %s", "potato")
|
||||||
}
|
}
|
||||||
if len(config.ExternalEndpoints[0].Alerts) != 1 {
|
|
||||||
t.Error("Should have returned one alert")
|
|
||||||
}
|
|
||||||
if config.ExternalEndpoints[0].Alerts[0].Type != alert.TypeDiscord {
|
|
||||||
t.Errorf("Type should have been %s", alert.TypeDiscord)
|
|
||||||
}
|
|
||||||
if config.ExternalEndpoints[0].Alerts[0].FailureThreshold != 3 {
|
|
||||||
t.Errorf("FailureThreshold should have been %d, got %d", 3, config.ExternalEndpoints[0].Alerts[0].FailureThreshold)
|
|
||||||
}
|
|
||||||
if config.ExternalEndpoints[0].Alerts[0].SuccessThreshold != 2 {
|
|
||||||
t.Errorf("SuccessThreshold should have been %d, got %d", 2, config.ExternalEndpoints[0].Alerts[0].SuccessThreshold)
|
|
||||||
}
|
|
||||||
if len(config.Endpoints) != 3 {
|
if len(config.Endpoints) != 3 {
|
||||||
t.Error("Should have returned two endpoints")
|
t.Error("Should have returned two endpoints")
|
||||||
}
|
}
|
||||||
@ -439,7 +447,6 @@ endpoints:
|
|||||||
if len(config.Endpoints[1].Conditions) != 2 {
|
if len(config.Endpoints[1].Conditions) != 2 {
|
||||||
t.Errorf("There should have been %d conditions", 2)
|
t.Errorf("There should have been %d conditions", 2)
|
||||||
}
|
}
|
||||||
|
|
||||||
if config.Endpoints[2].URL != "https://example.com/" {
|
if config.Endpoints[2].URL != "https://example.com/" {
|
||||||
t.Errorf("URL should have been %s", "https://example.com/")
|
t.Errorf("URL should have been %s", "https://example.com/")
|
||||||
}
|
}
|
||||||
@ -924,7 +931,7 @@ alerting:
|
|||||||
default-alert:
|
default-alert:
|
||||||
enabled: true
|
enabled: true
|
||||||
failure-threshold: 10
|
failure-threshold: 10
|
||||||
success-threshold: 1
|
success-threshold: 15
|
||||||
pagerduty:
|
pagerduty:
|
||||||
integration-key: "00000000000000000000000000000000"
|
integration-key: "00000000000000000000000000000000"
|
||||||
default-alert:
|
default-alert:
|
||||||
@ -977,24 +984,49 @@ alerting:
|
|||||||
enabled: true
|
enabled: true
|
||||||
failure-threshold: 5
|
failure-threshold: 5
|
||||||
success-threshold: 3
|
success-threshold: 3
|
||||||
|
email:
|
||||||
|
from: "from@example.com"
|
||||||
|
username: "from@example.com"
|
||||||
|
password: "hunter2"
|
||||||
|
host: "mail.example.com"
|
||||||
|
port: 587
|
||||||
|
to: "recipient1@example.com,recipient2@example.com"
|
||||||
|
client:
|
||||||
|
insecure: false
|
||||||
|
default-alert:
|
||||||
|
enabled: true
|
||||||
|
gotify:
|
||||||
|
server-url: "https://gotify.example"
|
||||||
|
token: "**************"
|
||||||
|
default-alert:
|
||||||
|
enabled: true
|
||||||
|
|
||||||
|
external-endpoints:
|
||||||
|
- name: ext-ep-test
|
||||||
|
group: core
|
||||||
|
token: potato
|
||||||
|
alerts:
|
||||||
|
- type: discord
|
||||||
|
|
||||||
endpoints:
|
endpoints:
|
||||||
- name: website
|
- name: website
|
||||||
url: https://twin.sh/health
|
url: https://twin.sh/health
|
||||||
alerts:
|
alerts:
|
||||||
- type: slack
|
- type: slack
|
||||||
- type: pagerduty
|
- type: pagerduty
|
||||||
- type: mattermost
|
- type: mattermost
|
||||||
- type: messagebird
|
- type: messagebird
|
||||||
- type: discord
|
- type: discord
|
||||||
success-threshold: 2 # test endpoint alert override
|
success-threshold: 8 # test endpoint alert override
|
||||||
- type: telegram
|
- type: telegram
|
||||||
- type: twilio
|
- type: twilio
|
||||||
- type: teams
|
- type: teams
|
||||||
- type: pushover
|
- type: pushover
|
||||||
- type: jetbrainsspace
|
- type: jetbrainsspace
|
||||||
conditions:
|
- type: email
|
||||||
- "[STATUS] == 200"
|
- type: gotify
|
||||||
|
conditions:
|
||||||
|
- "[STATUS] == 200"
|
||||||
`))
|
`))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Error("expected no error, got", err.Error())
|
t.Error("expected no error, got", err.Error())
|
||||||
@ -1071,6 +1103,12 @@ endpoints:
|
|||||||
if config.Alerting.Discord.GetDefaultAlert() == nil {
|
if config.Alerting.Discord.GetDefaultAlert() == nil {
|
||||||
t.Fatal("Discord.GetDefaultAlert() shouldn't have returned nil")
|
t.Fatal("Discord.GetDefaultAlert() shouldn't have returned nil")
|
||||||
}
|
}
|
||||||
|
if config.Alerting.Discord.GetDefaultAlert().FailureThreshold != 10 {
|
||||||
|
t.Errorf("Discord default alert failure threshold should've been %d, but was %d", 10, config.Alerting.Discord.GetDefaultAlert().FailureThreshold)
|
||||||
|
}
|
||||||
|
if config.Alerting.Discord.GetDefaultAlert().SuccessThreshold != 15 {
|
||||||
|
t.Errorf("Discord default alert success threshold should've been %d, but was %d", 15, config.Alerting.Discord.GetDefaultAlert().SuccessThreshold)
|
||||||
|
}
|
||||||
if config.Alerting.Discord.WebhookURL != "http://example.org" {
|
if config.Alerting.Discord.WebhookURL != "http://example.org" {
|
||||||
t.Errorf("Discord webhook should've been %s, but was %s", "http://example.org", config.Alerting.Discord.WebhookURL)
|
t.Errorf("Discord webhook should've been %s, but was %s", "http://example.org", config.Alerting.Discord.WebhookURL)
|
||||||
}
|
}
|
||||||
@ -1107,6 +1145,7 @@ endpoints:
|
|||||||
if config.Alerting.JetBrainsSpace == nil || !config.Alerting.JetBrainsSpace.IsValid() {
|
if config.Alerting.JetBrainsSpace == nil || !config.Alerting.JetBrainsSpace.IsValid() {
|
||||||
t.Fatal("JetBrainsSpace alerting config should've been valid")
|
t.Fatal("JetBrainsSpace alerting config should've been valid")
|
||||||
}
|
}
|
||||||
|
|
||||||
if config.Alerting.JetBrainsSpace.GetDefaultAlert() == nil {
|
if config.Alerting.JetBrainsSpace.GetDefaultAlert() == nil {
|
||||||
t.Fatal("JetBrainsSpace.GetDefaultAlert() shouldn't have returned nil")
|
t.Fatal("JetBrainsSpace.GetDefaultAlert() shouldn't have returned nil")
|
||||||
}
|
}
|
||||||
@ -1120,6 +1159,67 @@ endpoints:
|
|||||||
t.Errorf("JetBrainsSpace webhook should've been %s, but was %s", "baz", config.Alerting.JetBrainsSpace.Token)
|
t.Errorf("JetBrainsSpace webhook should've been %s, but was %s", "baz", config.Alerting.JetBrainsSpace.Token)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if config.Alerting.Email == nil || !config.Alerting.Email.IsValid() {
|
||||||
|
t.Fatal("Email alerting config should've been valid")
|
||||||
|
}
|
||||||
|
if config.Alerting.Email.GetDefaultAlert() == nil {
|
||||||
|
t.Fatal("Email.GetDefaultAlert() shouldn't have returned nil")
|
||||||
|
}
|
||||||
|
if config.Alerting.Email.From != "from@example.com" {
|
||||||
|
t.Errorf("Email from should've been %s, but was %s", "from@example.com", config.Alerting.Email.From)
|
||||||
|
}
|
||||||
|
if config.Alerting.Email.Username != "from@example.com" {
|
||||||
|
t.Errorf("Email username should've been %s, but was %s", "from@example.com", config.Alerting.Email.Username)
|
||||||
|
}
|
||||||
|
if config.Alerting.Email.Password != "hunter2" {
|
||||||
|
t.Errorf("Email password should've been %s, but was %s", "hunter2", config.Alerting.Email.Password)
|
||||||
|
}
|
||||||
|
if config.Alerting.Email.Host != "mail.example.com" {
|
||||||
|
t.Errorf("Email host should've been %s, but was %s", "mail.example.com", config.Alerting.Email.Host)
|
||||||
|
}
|
||||||
|
if config.Alerting.Email.Port != 587 {
|
||||||
|
t.Errorf("Email port should've been %d, but was %d", 587, config.Alerting.Email.Port)
|
||||||
|
}
|
||||||
|
if config.Alerting.Email.To != "recipient1@example.com,recipient2@example.com" {
|
||||||
|
t.Errorf("Email to should've been %s, but was %s", "recipient1@example.com,recipient2@example.com", config.Alerting.Email.To)
|
||||||
|
}
|
||||||
|
if config.Alerting.Email.ClientConfig == nil {
|
||||||
|
t.Fatal("Email client config should've been set")
|
||||||
|
}
|
||||||
|
if config.Alerting.Email.ClientConfig.Insecure {
|
||||||
|
t.Error("Email client config should've been secure")
|
||||||
|
}
|
||||||
|
|
||||||
|
if config.Alerting.Gotify == nil || !config.Alerting.Gotify.IsValid() {
|
||||||
|
t.Fatal("Gotify alerting config should've been valid")
|
||||||
|
}
|
||||||
|
if config.Alerting.Gotify.GetDefaultAlert() == nil {
|
||||||
|
t.Fatal("Gotify.GetDefaultAlert() shouldn't have returned nil")
|
||||||
|
}
|
||||||
|
if config.Alerting.Gotify.ServerURL != "https://gotify.example" {
|
||||||
|
t.Errorf("Gotify server URL should've been %s, but was %s", "https://gotify.example", config.Alerting.Gotify.ServerURL)
|
||||||
|
}
|
||||||
|
if config.Alerting.Gotify.Token != "**************" {
|
||||||
|
t.Errorf("Gotify token should've been %s, but was %s", "**************", config.Alerting.Gotify.Token)
|
||||||
|
}
|
||||||
|
|
||||||
|
// External endpoints
|
||||||
|
if len(config.ExternalEndpoints) != 1 {
|
||||||
|
t.Error("There should've been 1 external endpoint")
|
||||||
|
}
|
||||||
|
if config.ExternalEndpoints[0].Alerts[0].Type != alert.TypeDiscord {
|
||||||
|
t.Errorf("The type of the alert should've been %s, but it was %s", alert.TypeDiscord, config.ExternalEndpoints[0].Alerts[0].Type)
|
||||||
|
}
|
||||||
|
if !config.ExternalEndpoints[0].Alerts[0].IsEnabled() {
|
||||||
|
t.Error("The alert should've been enabled")
|
||||||
|
}
|
||||||
|
if config.ExternalEndpoints[0].Alerts[0].FailureThreshold != 10 {
|
||||||
|
t.Errorf("The failure threshold of the alert should've been %d, but it was %d", 10, config.ExternalEndpoints[0].Alerts[0].FailureThreshold)
|
||||||
|
}
|
||||||
|
if config.ExternalEndpoints[0].Alerts[0].SuccessThreshold != 15 {
|
||||||
|
t.Errorf("The default success threshold of the alert should've been %d, but it was %d", 15, config.ExternalEndpoints[0].Alerts[0].SuccessThreshold)
|
||||||
|
}
|
||||||
|
|
||||||
// Endpoints
|
// Endpoints
|
||||||
if len(config.Endpoints) != 1 {
|
if len(config.Endpoints) != 1 {
|
||||||
t.Error("There should've been 1 endpoint")
|
t.Error("There should've been 1 endpoint")
|
||||||
@ -1130,8 +1230,8 @@ endpoints:
|
|||||||
if config.Endpoints[0].Interval != 60*time.Second {
|
if config.Endpoints[0].Interval != 60*time.Second {
|
||||||
t.Errorf("Interval should have been %s, because it is the default value", 60*time.Second)
|
t.Errorf("Interval should have been %s, because it is the default value", 60*time.Second)
|
||||||
}
|
}
|
||||||
if len(config.Endpoints[0].Alerts) != 10 {
|
if len(config.Endpoints[0].Alerts) != 12 {
|
||||||
t.Fatal("There should've been 10 alerts configured")
|
t.Fatalf("There should've been 12 alerts configured, got %d", len(config.Endpoints[0].Alerts))
|
||||||
}
|
}
|
||||||
|
|
||||||
if config.Endpoints[0].Alerts[0].Type != alert.TypeSlack {
|
if config.Endpoints[0].Alerts[0].Type != alert.TypeSlack {
|
||||||
@ -1192,8 +1292,8 @@ endpoints:
|
|||||||
if config.Endpoints[0].Alerts[4].FailureThreshold != 10 {
|
if config.Endpoints[0].Alerts[4].FailureThreshold != 10 {
|
||||||
t.Errorf("The failure threshold of the alert should've been %d, but it was %d", 10, config.Endpoints[0].Alerts[4].FailureThreshold)
|
t.Errorf("The failure threshold of the alert should've been %d, but it was %d", 10, config.Endpoints[0].Alerts[4].FailureThreshold)
|
||||||
}
|
}
|
||||||
if config.Endpoints[0].Alerts[4].SuccessThreshold != 2 {
|
if config.Endpoints[0].Alerts[4].SuccessThreshold != 8 {
|
||||||
t.Errorf("The default success threshold of the alert should've been %d, but it was %d", 2, config.Endpoints[0].Alerts[4].SuccessThreshold)
|
t.Errorf("The default success threshold of the alert should've been %d because it was explicitly overriden, but it was %d", 8, config.Endpoints[0].Alerts[4].SuccessThreshold)
|
||||||
}
|
}
|
||||||
|
|
||||||
if config.Endpoints[0].Alerts[5].Type != alert.TypeTelegram {
|
if config.Endpoints[0].Alerts[5].Type != alert.TypeTelegram {
|
||||||
@ -1255,10 +1355,36 @@ endpoints:
|
|||||||
t.Error("The alert should've been enabled")
|
t.Error("The alert should've been enabled")
|
||||||
}
|
}
|
||||||
if config.Endpoints[0].Alerts[9].FailureThreshold != 5 {
|
if config.Endpoints[0].Alerts[9].FailureThreshold != 5 {
|
||||||
t.Errorf("The default failure threshold of the alert should've been %d, but it was %d", 3, config.Endpoints[0].Alerts[9].FailureThreshold)
|
t.Errorf("The default failure threshold of the alert should've been %d, but it was %d", 5, config.Endpoints[0].Alerts[9].FailureThreshold)
|
||||||
}
|
}
|
||||||
if config.Endpoints[0].Alerts[9].SuccessThreshold != 3 {
|
if config.Endpoints[0].Alerts[9].SuccessThreshold != 3 {
|
||||||
t.Errorf("The default success threshold of the alert should've been %d, but it was %d", 2, config.Endpoints[0].Alerts[9].SuccessThreshold)
|
t.Errorf("The default success threshold of the alert should've been %d, but it was %d", 3, config.Endpoints[0].Alerts[9].SuccessThreshold)
|
||||||
|
}
|
||||||
|
|
||||||
|
if config.Endpoints[0].Alerts[10].Type != alert.TypeEmail {
|
||||||
|
t.Errorf("The type of the alert should've been %s, but it was %s", alert.TypeEmail, config.Endpoints[0].Alerts[10].Type)
|
||||||
|
}
|
||||||
|
if !config.Endpoints[0].Alerts[10].IsEnabled() {
|
||||||
|
t.Error("The alert should've been enabled")
|
||||||
|
}
|
||||||
|
if config.Endpoints[0].Alerts[10].FailureThreshold != 3 {
|
||||||
|
t.Errorf("The default failure threshold of the alert should've been %d, but it was %d", 3, config.Endpoints[0].Alerts[10].FailureThreshold)
|
||||||
|
}
|
||||||
|
if config.Endpoints[0].Alerts[10].SuccessThreshold != 2 {
|
||||||
|
t.Errorf("The default success threshold of the alert should've been %d, but it was %d", 2, config.Endpoints[0].Alerts[10].SuccessThreshold)
|
||||||
|
}
|
||||||
|
|
||||||
|
if config.Endpoints[0].Alerts[11].Type != alert.TypeGotify {
|
||||||
|
t.Errorf("The type of the alert should've been %s, but it was %s", alert.TypeGotify, config.Endpoints[0].Alerts[11].Type)
|
||||||
|
}
|
||||||
|
if !config.Endpoints[0].Alerts[11].IsEnabled() {
|
||||||
|
t.Error("The alert should've been enabled")
|
||||||
|
}
|
||||||
|
if config.Endpoints[0].Alerts[11].FailureThreshold != 3 {
|
||||||
|
t.Errorf("The default failure threshold of the alert should've been %d, but it was %d", 3, config.Endpoints[0].Alerts[11].FailureThreshold)
|
||||||
|
}
|
||||||
|
if config.Endpoints[0].Alerts[11].SuccessThreshold != 2 {
|
||||||
|
t.Errorf("The default success threshold of the alert should've been %d, but it was %d", 2, config.Endpoints[0].Alerts[11].SuccessThreshold)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1532,6 +1658,99 @@ endpoints:
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestParseAndValidateConfigBytesWithDuplicateEndpointName(t *testing.T) {
|
||||||
|
scenarios := []struct {
|
||||||
|
name string
|
||||||
|
shouldError bool
|
||||||
|
config string
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "same-name-no-group",
|
||||||
|
shouldError: true,
|
||||||
|
config: `
|
||||||
|
endpoints:
|
||||||
|
- name: ep1
|
||||||
|
url: https://twin.sh/health
|
||||||
|
conditions:
|
||||||
|
- "[STATUS] == 200"
|
||||||
|
- name: ep1
|
||||||
|
url: https://twin.sh/health
|
||||||
|
conditions:
|
||||||
|
- "[STATUS] == 200"`,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "same-name-different-group",
|
||||||
|
shouldError: false,
|
||||||
|
config: `
|
||||||
|
endpoints:
|
||||||
|
- name: ep1
|
||||||
|
url: https://twin.sh/health
|
||||||
|
conditions:
|
||||||
|
- "[STATUS] == 200"
|
||||||
|
- name: ep1
|
||||||
|
group: g1
|
||||||
|
url: https://twin.sh/health
|
||||||
|
conditions:
|
||||||
|
- "[STATUS] == 200"`,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "same-name-same-group",
|
||||||
|
shouldError: true,
|
||||||
|
config: `
|
||||||
|
endpoints:
|
||||||
|
- name: ep1
|
||||||
|
group: g1
|
||||||
|
url: https://twin.sh/health
|
||||||
|
conditions:
|
||||||
|
- "[STATUS] == 200"
|
||||||
|
- name: ep1
|
||||||
|
group: g1
|
||||||
|
url: https://twin.sh/health
|
||||||
|
conditions:
|
||||||
|
- "[STATUS] == 200"`,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "same-name-different-endpoint-type",
|
||||||
|
shouldError: true,
|
||||||
|
config: `
|
||||||
|
external-endpoints:
|
||||||
|
- name: ep1
|
||||||
|
token: "12345678"
|
||||||
|
|
||||||
|
endpoints:
|
||||||
|
- name: ep1
|
||||||
|
url: https://twin.sh/health
|
||||||
|
conditions:
|
||||||
|
- "[STATUS] == 200"`,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "same-name-different-group-different-endpoint-type",
|
||||||
|
shouldError: false,
|
||||||
|
config: `
|
||||||
|
external-endpoints:
|
||||||
|
- name: ep1
|
||||||
|
group: gr1
|
||||||
|
token: "12345678"
|
||||||
|
|
||||||
|
endpoints:
|
||||||
|
- name: ep1
|
||||||
|
url: https://twin.sh/health
|
||||||
|
conditions:
|
||||||
|
- "[STATUS] == 200"`,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, scenario := range scenarios {
|
||||||
|
t.Run(scenario.name, func(t *testing.T) {
|
||||||
|
_, err := parseAndValidateConfigBytes([]byte(scenario.config))
|
||||||
|
if scenario.shouldError && err == nil {
|
||||||
|
t.Error("should've returned an error")
|
||||||
|
} else if !scenario.shouldError && err != nil {
|
||||||
|
t.Error("shouldn't have returned an error")
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func TestParseAndValidateConfigBytesWithInvalidStorageConfig(t *testing.T) {
|
func TestParseAndValidateConfigBytesWithInvalidStorageConfig(t *testing.T) {
|
||||||
_, err := parseAndValidateConfigBytes([]byte(`
|
_, err := parseAndValidateConfigBytes([]byte(`
|
||||||
storage:
|
storage:
|
||||||
@ -1645,7 +1864,7 @@ endpoints:
|
|||||||
|
|
||||||
func TestParseAndValidateConfigBytesWithNoEndpoints(t *testing.T) {
|
func TestParseAndValidateConfigBytesWithNoEndpoints(t *testing.T) {
|
||||||
_, err := parseAndValidateConfigBytes([]byte(``))
|
_, err := parseAndValidateConfigBytes([]byte(``))
|
||||||
if err != ErrNoEndpointInConfig {
|
if !errors.Is(err, ErrNoEndpointInConfig) {
|
||||||
t.Error("The error returned should have been of type ErrNoEndpointInConfig")
|
t.Error("The error returned should have been of type ErrNoEndpointInConfig")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -1657,6 +1876,7 @@ func TestGetAlertingProviderByAlertType(t *testing.T) {
|
|||||||
Email: &email.AlertProvider{},
|
Email: &email.AlertProvider{},
|
||||||
GitHub: &github.AlertProvider{},
|
GitHub: &github.AlertProvider{},
|
||||||
GoogleChat: &googlechat.AlertProvider{},
|
GoogleChat: &googlechat.AlertProvider{},
|
||||||
|
Gotify: &gotify.AlertProvider{},
|
||||||
JetBrainsSpace: &jetbrainsspace.AlertProvider{},
|
JetBrainsSpace: &jetbrainsspace.AlertProvider{},
|
||||||
Matrix: &matrix.AlertProvider{},
|
Matrix: &matrix.AlertProvider{},
|
||||||
Mattermost: &mattermost.AlertProvider{},
|
Mattermost: &mattermost.AlertProvider{},
|
||||||
@ -1679,6 +1899,7 @@ func TestGetAlertingProviderByAlertType(t *testing.T) {
|
|||||||
{alertType: alert.TypeEmail, expected: alertingConfig.Email},
|
{alertType: alert.TypeEmail, expected: alertingConfig.Email},
|
||||||
{alertType: alert.TypeGitHub, expected: alertingConfig.GitHub},
|
{alertType: alert.TypeGitHub, expected: alertingConfig.GitHub},
|
||||||
{alertType: alert.TypeGoogleChat, expected: alertingConfig.GoogleChat},
|
{alertType: alert.TypeGoogleChat, expected: alertingConfig.GoogleChat},
|
||||||
|
{alertType: alert.TypeGotify, expected: alertingConfig.Gotify},
|
||||||
{alertType: alert.TypeJetBrainsSpace, expected: alertingConfig.JetBrainsSpace},
|
{alertType: alert.TypeJetBrainsSpace, expected: alertingConfig.JetBrainsSpace},
|
||||||
{alertType: alert.TypeMatrix, expected: alertingConfig.Matrix},
|
{alertType: alert.TypeMatrix, expected: alertingConfig.Matrix},
|
||||||
{alertType: alert.TypeMattermost, expected: alertingConfig.Mattermost},
|
{alertType: alert.TypeMattermost, expected: alertingConfig.Mattermost},
|
||||||
|
@ -45,11 +45,6 @@ func (externalEndpoint *ExternalEndpoint) ValidateAndSetDefaults() error {
|
|||||||
if len(externalEndpoint.Token) == 0 {
|
if len(externalEndpoint.Token) == 0 {
|
||||||
return ErrExternalEndpointWithNoToken
|
return ErrExternalEndpointWithNoToken
|
||||||
}
|
}
|
||||||
for _, externalEndpointAlert := range externalEndpoint.Alerts {
|
|
||||||
if err := externalEndpointAlert.ValidateAndSetDefaults(); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -6,12 +6,14 @@ import (
|
|||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
_ "time/tzdata" // Required for IANA timezone support
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
errInvalidMaintenanceStartFormat = errors.New("invalid maintenance start format: must be hh:mm, between 00:00 and 23:59 inclusively (e.g. 23:00)")
|
errInvalidMaintenanceStartFormat = errors.New("invalid maintenance start format: must be hh:mm, between 00:00 and 23:59 inclusively (e.g. 23:00)")
|
||||||
errInvalidMaintenanceDuration = errors.New("invalid maintenance duration: must be bigger than 0 (e.g. 30m)")
|
errInvalidMaintenanceDuration = errors.New("invalid maintenance duration: must be bigger than 0 (e.g. 30m)")
|
||||||
errInvalidDayName = fmt.Errorf("invalid value specified for 'on'. supported values are %s", longDayNames)
|
errInvalidDayName = fmt.Errorf("invalid value specified for 'on'. supported values are %s", longDayNames)
|
||||||
|
errInvalidTimezone = errors.New("invalid timezone specified or format not supported. Use IANA timezone format (e.g. America/Sao_Paulo)")
|
||||||
|
|
||||||
longDayNames = []string{
|
longDayNames = []string{
|
||||||
"Sunday",
|
"Sunday",
|
||||||
@ -27,17 +29,19 @@ var (
|
|||||||
// Config allows for the configuration of a maintenance period.
|
// Config allows for the configuration of a maintenance period.
|
||||||
// During this maintenance period, no alerts will be sent.
|
// During this maintenance period, no alerts will be sent.
|
||||||
//
|
//
|
||||||
// Uses UTC.
|
// Uses UTC by default.
|
||||||
type Config struct {
|
type Config struct {
|
||||||
Enabled *bool `yaml:"enabled"` // Whether the maintenance period is enabled. Enabled by default if nil.
|
Enabled *bool `yaml:"enabled"` // Whether the maintenance period is enabled. Enabled by default if nil.
|
||||||
Start string `yaml:"start"` // Time at which the maintenance period starts (e.g. 23:00)
|
Start string `yaml:"start"` // Time at which the maintenance period starts (e.g. 23:00)
|
||||||
Duration time.Duration `yaml:"duration"` // Duration of the maintenance period (e.g. 4h)
|
Duration time.Duration `yaml:"duration"` // Duration of the maintenance period (e.g. 4h)
|
||||||
|
Timezone string `yaml:"timezone"` // Timezone in string format which the maintenance period is configured (e.g. America/Sao_Paulo)
|
||||||
|
|
||||||
// Every is a list of days of the week during which maintenance period applies.
|
// Every is a list of days of the week during which maintenance period applies.
|
||||||
// See longDayNames for list of valid values.
|
// See longDayNames for list of valid values.
|
||||||
// Every day if empty.
|
// Every day if empty.
|
||||||
Every []string `yaml:"every"`
|
Every []string `yaml:"every"`
|
||||||
|
|
||||||
|
TimezoneLocation *time.Location // Timezone in location format which the maintenance period is configured
|
||||||
durationToStartFromMidnight time.Duration
|
durationToStartFromMidnight time.Duration
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -49,7 +53,7 @@ func GetDefaultConfig() *Config {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// IsEnabled returns whether maintenance is enabled or not
|
// IsEnabled returns whether maintenance is enabled or not
|
||||||
func (c Config) IsEnabled() bool {
|
func (c *Config) IsEnabled() bool {
|
||||||
if c.Enabled == nil {
|
if c.Enabled == nil {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
@ -85,15 +89,27 @@ func (c *Config) ValidateAndSetDefaults() error {
|
|||||||
if c.Duration <= 0 || c.Duration > 24*time.Hour {
|
if c.Duration <= 0 || c.Duration > 24*time.Hour {
|
||||||
return errInvalidMaintenanceDuration
|
return errInvalidMaintenanceDuration
|
||||||
}
|
}
|
||||||
|
if c.Timezone != "" {
|
||||||
|
c.TimezoneLocation, err = time.LoadLocation(c.Timezone)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("%w: %w", errInvalidTimezone, err)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
c.Timezone = "UTC"
|
||||||
|
c.TimezoneLocation = time.UTC
|
||||||
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// IsUnderMaintenance checks whether the endpoints that Gatus monitors are within the configured maintenance window
|
// IsUnderMaintenance checks whether the endpoints that Gatus monitors are within the configured maintenance window
|
||||||
func (c Config) IsUnderMaintenance() bool {
|
func (c *Config) IsUnderMaintenance() bool {
|
||||||
if !c.IsEnabled() {
|
if !c.IsEnabled() {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
now := time.Now().UTC()
|
now := time.Now()
|
||||||
|
if c.TimezoneLocation != nil {
|
||||||
|
now = now.In(c.TimezoneLocation)
|
||||||
|
}
|
||||||
var dayWhereMaintenancePeriodWouldStart time.Time
|
var dayWhereMaintenancePeriodWouldStart time.Time
|
||||||
if now.Hour() >= int(c.durationToStartFromMidnight.Hours()) {
|
if now.Hour() >= int(c.durationToStartFromMidnight.Hours()) {
|
||||||
dayWhereMaintenancePeriodWouldStart = now.Truncate(24 * time.Hour)
|
dayWhereMaintenancePeriodWouldStart = now.Truncate(24 * time.Hour)
|
||||||
@ -112,7 +128,7 @@ func (c Config) IsUnderMaintenance() bool {
|
|||||||
return now.After(startOfMaintenancePeriod) && now.Before(endOfMaintenancePeriod)
|
return now.After(startOfMaintenancePeriod) && now.Before(endOfMaintenancePeriod)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c Config) hasDay(day string) bool {
|
func (c *Config) hasDay(day string) bool {
|
||||||
for _, d := range c.Every {
|
for _, d := range c.Every {
|
||||||
if d == day {
|
if d == day {
|
||||||
return true
|
return true
|
||||||
|
@ -90,6 +90,15 @@ func TestConfig_ValidateAndSetDefaults(t *testing.T) {
|
|||||||
},
|
},
|
||||||
expectedError: errInvalidMaintenanceDuration,
|
expectedError: errInvalidMaintenanceDuration,
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
name: "invalid-timezone",
|
||||||
|
cfg: &Config{
|
||||||
|
Start: "23:00",
|
||||||
|
Duration: time.Hour,
|
||||||
|
Timezone: "invalid-timezone",
|
||||||
|
},
|
||||||
|
expectedError: errInvalidTimezone,
|
||||||
|
},
|
||||||
{
|
{
|
||||||
name: "every-day-at-2300",
|
name: "every-day-at-2300",
|
||||||
cfg: &Config{
|
cfg: &Config{
|
||||||
@ -126,6 +135,33 @@ func TestConfig_ValidateAndSetDefaults(t *testing.T) {
|
|||||||
},
|
},
|
||||||
expectedError: nil,
|
expectedError: nil,
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
name: "timezone-amsterdam",
|
||||||
|
cfg: &Config{
|
||||||
|
Start: "23:00",
|
||||||
|
Duration: time.Hour,
|
||||||
|
Timezone: "Europe/Amsterdam",
|
||||||
|
},
|
||||||
|
expectedError: nil,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "timezone-cet",
|
||||||
|
cfg: &Config{
|
||||||
|
Start: "23:00",
|
||||||
|
Duration: time.Hour,
|
||||||
|
Timezone: "CET",
|
||||||
|
},
|
||||||
|
expectedError: nil,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "timezone-etc-plus-5",
|
||||||
|
cfg: &Config{
|
||||||
|
Start: "23:00",
|
||||||
|
Duration: time.Hour,
|
||||||
|
Timezone: "Etc/GMT+5",
|
||||||
|
},
|
||||||
|
expectedError: nil,
|
||||||
|
},
|
||||||
}
|
}
|
||||||
for _, scenario := range scenarios {
|
for _, scenario := range scenarios {
|
||||||
t.Run(scenario.name, func(t *testing.T) {
|
t.Run(scenario.name, func(t *testing.T) {
|
||||||
@ -220,7 +256,25 @@ func TestConfig_IsUnderMaintenance(t *testing.T) {
|
|||||||
expected: true,
|
expected: true,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "under-maintenance-starting-4h-ago-for-3h",
|
name: "under-maintenance-amsterdam-timezone-starting-now-for-2h",
|
||||||
|
cfg: &Config{
|
||||||
|
Start: fmt.Sprintf("%02d:00", now.Hour()),
|
||||||
|
Duration: 2 * time.Hour,
|
||||||
|
Timezone: "Europe/Amsterdam",
|
||||||
|
},
|
||||||
|
expected: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "under-maintenance-utc-timezone-starting-now-for-2h",
|
||||||
|
cfg: &Config{
|
||||||
|
Start: fmt.Sprintf("%02d:00", now.Hour()),
|
||||||
|
Duration: 2 * time.Hour,
|
||||||
|
Timezone: "UTC",
|
||||||
|
},
|
||||||
|
expected: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "not-under-maintenance-starting-4h-ago-for-3h",
|
||||||
cfg: &Config{
|
cfg: &Config{
|
||||||
Start: fmt.Sprintf("%02d:00", normalizeHour(now.Hour()-4)),
|
Start: fmt.Sprintf("%02d:00", normalizeHour(now.Hour()-4)),
|
||||||
Duration: 3 * time.Hour,
|
Duration: 3 * time.Hour,
|
||||||
@ -228,7 +282,7 @@ func TestConfig_IsUnderMaintenance(t *testing.T) {
|
|||||||
expected: false,
|
expected: false,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "under-maintenance-starting-5h-ago-for-1h",
|
name: "not-under-maintenance-starting-5h-ago-for-1h",
|
||||||
cfg: &Config{
|
cfg: &Config{
|
||||||
Start: fmt.Sprintf("%02d:00", normalizeHour(now.Hour()-5)),
|
Start: fmt.Sprintf("%02d:00", normalizeHour(now.Hour()-5)),
|
||||||
Duration: time.Hour,
|
Duration: time.Hour,
|
||||||
@ -253,6 +307,16 @@ func TestConfig_IsUnderMaintenance(t *testing.T) {
|
|||||||
},
|
},
|
||||||
expected: false,
|
expected: false,
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
name: "not-under-maintenance-los-angeles-timezone-starting-now-for-2h-today",
|
||||||
|
cfg: &Config{
|
||||||
|
Start: fmt.Sprintf("%02d:00", now.Hour()),
|
||||||
|
Duration: 2 * time.Hour,
|
||||||
|
Timezone: "America/Los_Angeles",
|
||||||
|
Every: []string{now.Weekday().String()},
|
||||||
|
},
|
||||||
|
expected: false,
|
||||||
|
},
|
||||||
}
|
}
|
||||||
for _, scenario := range scenarios {
|
for _, scenario := range scenarios {
|
||||||
t.Run(scenario.name, func(t *testing.T) {
|
t.Run(scenario.name, func(t *testing.T) {
|
||||||
|
110
go.mod
110
go.mod
@ -1,85 +1,93 @@
|
|||||||
module github.com/TwiN/gatus/v5
|
module github.com/TwiN/gatus/v5
|
||||||
|
|
||||||
go 1.21
|
go 1.22.2
|
||||||
|
|
||||||
require (
|
require (
|
||||||
|
code.gitea.io/sdk/gitea v0.19.0
|
||||||
github.com/TwiN/deepmerge v0.2.1
|
github.com/TwiN/deepmerge v0.2.1
|
||||||
github.com/TwiN/g8/v2 v2.0.0
|
github.com/TwiN/g8/v2 v2.0.0
|
||||||
github.com/TwiN/gocache/v2 v2.2.0
|
github.com/TwiN/gocache/v2 v2.2.2
|
||||||
github.com/TwiN/health v1.6.0
|
github.com/TwiN/health v1.6.0
|
||||||
github.com/TwiN/whois v1.1.7
|
github.com/TwiN/whois v1.1.9
|
||||||
github.com/aws/aws-sdk-go v1.47.9
|
github.com/aws/aws-sdk-go v1.54.10
|
||||||
github.com/coreos/go-oidc/v3 v3.7.0
|
github.com/coreos/go-oidc/v3 v3.10.0
|
||||||
github.com/gofiber/fiber/v2 v2.52.1
|
github.com/gofiber/fiber/v2 v2.52.4
|
||||||
github.com/google/go-github/v48 v48.2.0
|
github.com/google/go-github/v48 v48.2.0
|
||||||
github.com/google/uuid v1.6.0
|
github.com/google/uuid v1.6.0
|
||||||
github.com/ishidawataru/sctp v0.0.0-20210707070123-9a39160e9062
|
github.com/ishidawataru/sctp v0.0.0-20230406120618-7ff4192f6ff2
|
||||||
github.com/lib/pq v1.10.9
|
github.com/lib/pq v1.10.9
|
||||||
github.com/miekg/dns v1.1.56
|
github.com/miekg/dns v1.1.62
|
||||||
github.com/prometheus-community/pro-bing v0.3.0
|
github.com/prometheus-community/pro-bing v0.4.0
|
||||||
github.com/prometheus/client_golang v1.18.0
|
github.com/prometheus/client_golang v1.20.4
|
||||||
github.com/valyala/fasthttp v1.51.0
|
github.com/valyala/fasthttp v1.56.0
|
||||||
github.com/wcharczuk/go-chart/v2 v2.1.1
|
github.com/wcharczuk/go-chart/v2 v2.1.2
|
||||||
golang.org/x/crypto v0.21.0
|
golang.org/x/crypto v0.27.0
|
||||||
golang.org/x/net v0.22.0
|
golang.org/x/net v0.29.0
|
||||||
golang.org/x/oauth2 v0.18.0
|
golang.org/x/oauth2 v0.21.0
|
||||||
google.golang.org/api v0.148.0
|
google.golang.org/api v0.183.0
|
||||||
gopkg.in/mail.v2 v2.3.1
|
gopkg.in/mail.v2 v2.3.1
|
||||||
gopkg.in/yaml.v3 v3.0.1
|
gopkg.in/yaml.v3 v3.0.1
|
||||||
modernc.org/sqlite v1.28.0
|
modernc.org/sqlite v1.33.1
|
||||||
)
|
)
|
||||||
|
|
||||||
require (
|
require (
|
||||||
cloud.google.com/go/compute v1.23.0 // indirect
|
cloud.google.com/go/auth v0.5.1 // indirect
|
||||||
cloud.google.com/go/compute/metadata v0.2.3 // indirect
|
cloud.google.com/go/auth/oauth2adapt v0.2.2 // indirect
|
||||||
github.com/andybalholm/brotli v1.0.5 // indirect
|
cloud.google.com/go/compute/metadata v0.3.0 // indirect
|
||||||
|
github.com/andybalholm/brotli v1.1.0 // indirect
|
||||||
github.com/beorn7/perks v1.0.1 // indirect
|
github.com/beorn7/perks v1.0.1 // indirect
|
||||||
github.com/blend/go-sdk v1.20220411.3 // indirect
|
github.com/cespare/xxhash/v2 v2.3.0 // indirect
|
||||||
github.com/cespare/xxhash/v2 v2.2.0 // indirect
|
github.com/davidmz/go-pageant v1.0.2 // indirect
|
||||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
|
||||||
github.com/dustin/go-humanize v1.0.1 // indirect
|
github.com/dustin/go-humanize v1.0.1 // indirect
|
||||||
github.com/go-jose/go-jose/v3 v3.0.0 // indirect
|
github.com/felixge/httpsnoop v1.0.4 // indirect
|
||||||
|
github.com/go-fed/httpsig v1.1.0 // indirect
|
||||||
|
github.com/go-jose/go-jose/v4 v4.0.2 // indirect
|
||||||
|
github.com/go-logr/logr v1.4.2 // indirect
|
||||||
|
github.com/go-logr/stdr v1.2.2 // indirect
|
||||||
github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0 // indirect
|
github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0 // indirect
|
||||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
|
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
|
||||||
github.com/golang/protobuf v1.5.3 // indirect
|
github.com/golang/protobuf v1.5.4 // indirect
|
||||||
github.com/google/go-querystring v1.1.0 // indirect
|
github.com/google/go-querystring v1.1.0 // indirect
|
||||||
github.com/google/s2a-go v0.1.7 // indirect
|
github.com/google/s2a-go v0.1.7 // indirect
|
||||||
github.com/googleapis/enterprise-certificate-proxy v0.3.1 // indirect
|
github.com/googleapis/enterprise-certificate-proxy v0.3.2 // indirect
|
||||||
|
github.com/hashicorp/go-version v1.6.0 // indirect
|
||||||
|
github.com/hashicorp/golang-lru/v2 v2.0.7 // indirect
|
||||||
github.com/jmespath/go-jmespath v0.4.0 // indirect
|
github.com/jmespath/go-jmespath v0.4.0 // indirect
|
||||||
github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 // indirect
|
github.com/klauspost/compress v1.17.9 // indirect
|
||||||
github.com/klauspost/compress v1.17.0 // indirect
|
|
||||||
github.com/kr/text v0.2.0 // indirect
|
github.com/kr/text v0.2.0 // indirect
|
||||||
|
github.com/kylelemons/godebug v1.1.0 // indirect
|
||||||
github.com/mattn/go-colorable v0.1.13 // indirect
|
github.com/mattn/go-colorable v0.1.13 // indirect
|
||||||
github.com/mattn/go-isatty v0.0.20 // indirect
|
github.com/mattn/go-isatty v0.0.20 // indirect
|
||||||
github.com/mattn/go-runewidth v0.0.15 // indirect
|
github.com/mattn/go-runewidth v0.0.15 // indirect
|
||||||
github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 // indirect
|
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
|
||||||
github.com/prometheus/client_model v0.5.0 // indirect
|
github.com/ncruces/go-strftime v0.1.9 // indirect
|
||||||
github.com/prometheus/common v0.45.0 // indirect
|
github.com/prometheus/client_model v0.6.1 // indirect
|
||||||
github.com/prometheus/procfs v0.12.0 // indirect
|
github.com/prometheus/common v0.55.0 // indirect
|
||||||
|
github.com/prometheus/procfs v0.15.1 // indirect
|
||||||
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec // indirect
|
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec // indirect
|
||||||
github.com/rivo/uniseg v0.4.4 // indirect
|
github.com/rivo/uniseg v0.4.7 // indirect
|
||||||
github.com/rogpeppe/go-internal v1.11.0 // indirect
|
github.com/rogpeppe/go-internal v1.11.0 // indirect
|
||||||
github.com/valyala/bytebufferpool v1.0.0 // indirect
|
github.com/valyala/bytebufferpool v1.0.0 // indirect
|
||||||
github.com/valyala/tcplisten v1.0.0 // indirect
|
github.com/valyala/tcplisten v1.0.0 // indirect
|
||||||
go.opencensus.io v0.24.0 // indirect
|
go.opencensus.io v0.24.0 // indirect
|
||||||
golang.org/x/image v0.11.0 // indirect
|
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.52.0 // indirect
|
||||||
golang.org/x/mod v0.12.0 // indirect
|
go.opentelemetry.io/otel v1.27.0 // indirect
|
||||||
golang.org/x/sync v0.4.0 // indirect
|
go.opentelemetry.io/otel/metric v1.27.0 // indirect
|
||||||
golang.org/x/sys v0.18.0 // indirect
|
go.opentelemetry.io/otel/trace v1.27.0 // indirect
|
||||||
golang.org/x/text v0.14.0 // indirect
|
golang.org/x/image v0.18.0 // indirect
|
||||||
golang.org/x/tools v0.13.0 // indirect
|
golang.org/x/mod v0.18.0 // indirect
|
||||||
google.golang.org/appengine v1.6.8 // indirect
|
golang.org/x/sync v0.8.0 // indirect
|
||||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20231012201019-e917dd12ba7a // indirect
|
golang.org/x/sys v0.25.0 // indirect
|
||||||
google.golang.org/grpc v1.58.3 // indirect
|
golang.org/x/text v0.18.0 // indirect
|
||||||
google.golang.org/protobuf v1.31.0 // indirect
|
golang.org/x/tools v0.22.0 // indirect
|
||||||
|
google.golang.org/genproto/googleapis/rpc v0.0.0-20240604185151-ef581f913117 // indirect
|
||||||
|
google.golang.org/grpc v1.64.0 // indirect
|
||||||
|
google.golang.org/protobuf v1.34.2 // indirect
|
||||||
gopkg.in/alexcesaro/quotedprintable.v3 v3.0.0-20150716171945-2caba252f4dc // indirect
|
gopkg.in/alexcesaro/quotedprintable.v3 v3.0.0-20150716171945-2caba252f4dc // indirect
|
||||||
lukechampine.com/uint128 v1.2.0 // indirect
|
modernc.org/gc/v3 v3.0.0-20240304020402-f0dba7c97c2b // indirect
|
||||||
modernc.org/cc/v3 v3.40.0 // indirect
|
modernc.org/libc v1.55.3 // indirect
|
||||||
modernc.org/ccgo/v3 v3.16.13 // indirect
|
|
||||||
modernc.org/libc v1.29.0 // indirect
|
|
||||||
modernc.org/mathutil v1.6.0 // indirect
|
modernc.org/mathutil v1.6.0 // indirect
|
||||||
modernc.org/memory v1.7.2 // indirect
|
modernc.org/memory v1.8.0 // indirect
|
||||||
modernc.org/opt v0.1.3 // indirect
|
modernc.org/strutil v1.2.0 // indirect
|
||||||
modernc.org/strutil v1.1.3 // indirect
|
modernc.org/token v1.1.0 // indirect
|
||||||
modernc.org/token v1.0.1 // indirect
|
|
||||||
)
|
)
|
||||||
|
285
go.sum
285
go.sum
@ -1,48 +1,61 @@
|
|||||||
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||||
cloud.google.com/go/compute v1.23.0 h1:tP41Zoavr8ptEqaW6j+LQOnyBBhO7OkOMAGrgLopTwY=
|
cloud.google.com/go/auth v0.5.1 h1:0QNO7VThG54LUzKiQxv8C6x1YX7lUrzlAa1nVLF8CIw=
|
||||||
cloud.google.com/go/compute v1.23.0/go.mod h1:4tCnrn48xsqlwSAiLf1HXMQk8CONslYbdiEZc9FEIbM=
|
cloud.google.com/go/auth v0.5.1/go.mod h1:vbZT8GjzDf3AVqCcQmqeeM32U9HBFc32vVVAbwDsa6s=
|
||||||
cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY=
|
cloud.google.com/go/auth/oauth2adapt v0.2.2 h1:+TTV8aXpjeChS9M+aTtN/TjdQnzJvmzKFt//oWu7HX4=
|
||||||
cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA=
|
cloud.google.com/go/auth/oauth2adapt v0.2.2/go.mod h1:wcYjgpZI9+Yu7LyYBg4pqSiaRkfEK3GQcpb7C/uyF1Q=
|
||||||
|
cloud.google.com/go/compute/metadata v0.3.0 h1:Tz+eQXMEqDIKRsmY3cHTL6FVaynIjX2QxYC4trgAKZc=
|
||||||
|
cloud.google.com/go/compute/metadata v0.3.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k=
|
||||||
|
code.gitea.io/sdk/gitea v0.19.0 h1:8I6s1s4RHgzxiPHhOQdgim1RWIRcr0LVMbHBjBFXq4Y=
|
||||||
|
code.gitea.io/sdk/gitea v0.19.0/go.mod h1:IG9xZJoltDNeDSW0qiF2Vqx5orMWa7OhVWrjvrd5NpI=
|
||||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||||
github.com/TwiN/deepmerge v0.2.1 h1:GowJr9O4THTVW4awX63x1BVg1hgr4q+35XKKCYbwsSs=
|
github.com/TwiN/deepmerge v0.2.1 h1:GowJr9O4THTVW4awX63x1BVg1hgr4q+35XKKCYbwsSs=
|
||||||
github.com/TwiN/deepmerge v0.2.1/go.mod h1:LVBmCEBQvibYSF8Gyl/NqhHXH7yIiT7Ozqf9dHxGPW0=
|
github.com/TwiN/deepmerge v0.2.1/go.mod h1:LVBmCEBQvibYSF8Gyl/NqhHXH7yIiT7Ozqf9dHxGPW0=
|
||||||
github.com/TwiN/g8/v2 v2.0.0 h1:+hwIbRLMhDd2iwHzkZUPp2FkX7yTx8ddYOnS91HkDqQ=
|
github.com/TwiN/g8/v2 v2.0.0 h1:+hwIbRLMhDd2iwHzkZUPp2FkX7yTx8ddYOnS91HkDqQ=
|
||||||
github.com/TwiN/g8/v2 v2.0.0/go.mod h1:4sVAF27q8T8ISggRa/Fb0drw7wpB22B6eWd+/+SGMqE=
|
github.com/TwiN/g8/v2 v2.0.0/go.mod h1:4sVAF27q8T8ISggRa/Fb0drw7wpB22B6eWd+/+SGMqE=
|
||||||
github.com/TwiN/gocache/v2 v2.2.0 h1:M3B36KyH24BntxLrLaUb2kgTdq8DzCnfod0IekLG57w=
|
github.com/TwiN/gocache/v2 v2.2.2 h1:4HToPfDV8FSbaYO5kkbhLpEllUYse5rAf+hVU/mSsuI=
|
||||||
github.com/TwiN/gocache/v2 v2.2.0/go.mod h1:SnUuBsrwGQeNcDG6vhkOMJnqErZM0JGjgIkuKryokYA=
|
github.com/TwiN/gocache/v2 v2.2.2/go.mod h1:WfIuwd7GR82/7EfQqEtmLFC3a2vqaKbs4Pe6neB7Gyc=
|
||||||
github.com/TwiN/health v1.6.0 h1:L2ks575JhRgQqWWOfKjw9B0ec172hx7GdToqkYUycQM=
|
github.com/TwiN/health v1.6.0 h1:L2ks575JhRgQqWWOfKjw9B0ec172hx7GdToqkYUycQM=
|
||||||
github.com/TwiN/health v1.6.0/go.mod h1:Z6TszwQPMvtSiVx1QMidVRgvVr4KZGfiwqcD7/Z+3iw=
|
github.com/TwiN/health v1.6.0/go.mod h1:Z6TszwQPMvtSiVx1QMidVRgvVr4KZGfiwqcD7/Z+3iw=
|
||||||
github.com/TwiN/whois v1.1.7 h1:eGzLOrWhpYLAGXD8boXh0bBKllN/EmuBsLqTJT4tC/U=
|
github.com/TwiN/whois v1.1.9 h1:m20+m1CXnrstie+tW2ZmAJkfcT9zgwpVRUFsKeMw+ng=
|
||||||
github.com/TwiN/whois v1.1.7/go.mod h1:VOJAH4+3chAik5gva5zxJNXv2voEHjMNCf1y07sqj9w=
|
github.com/TwiN/whois v1.1.9/go.mod h1:TjipCMpJRAJYKmtz/rXQBU6UGxMh6bk8SHazu7OMnQE=
|
||||||
github.com/andybalholm/brotli v1.0.5 h1:8uQZIdzKmjc/iuPu7O2ioW48L81FgatrcpfFmiq/cCs=
|
github.com/andybalholm/brotli v1.1.0 h1:eLKJA0d02Lf0mVpIDgYnqXcUn0GqVmEFny3VuID1U3M=
|
||||||
github.com/andybalholm/brotli v1.0.5/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig=
|
github.com/andybalholm/brotli v1.1.0/go.mod h1:sms7XGricyQI9K10gOSf56VKKWS4oLer58Q+mhRPtnY=
|
||||||
github.com/aws/aws-sdk-go v1.47.9 h1:rarTsos0mA16q+huicGx0e560aYRtOucV5z2Mw23JRY=
|
github.com/aws/aws-sdk-go v1.54.10 h1:dvkMlAttUsyacKj2L4poIQBLzOSWL2JG2ty+yWrqets=
|
||||||
github.com/aws/aws-sdk-go v1.47.9/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk=
|
github.com/aws/aws-sdk-go v1.54.10/go.mod h1:eRwEWoyTWFMVYVQzKMNHWP5/RV4xIUGMQfXQHfHkpNU=
|
||||||
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
|
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
|
||||||
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
|
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
|
||||||
github.com/blend/go-sdk v1.20220411.3 h1:GFV4/FQX5UzXLPwWV03gP811pj7B8J2sbuq+GJQofXc=
|
|
||||||
github.com/blend/go-sdk v1.20220411.3/go.mod h1:7lnH8fTi6U4i1fArEXRyOIY2E1X4MALg09qsQqY1+ak=
|
|
||||||
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
||||||
github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44=
|
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
|
||||||
github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||||
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
||||||
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
|
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
|
||||||
github.com/coreos/go-oidc/v3 v3.7.0 h1:FTdj0uexT4diYIPlF4yoFVI5MRO1r5+SEcIpEw9vC0o=
|
github.com/coreos/go-oidc/v3 v3.10.0 h1:tDnXHnLyiTVyT/2zLDGj09pFPkhND8Gl8lnTRhoEaJU=
|
||||||
github.com/coreos/go-oidc/v3 v3.7.0/go.mod h1:yQzSCqBnK3e6Fs5l+f5i0F8Kwf0zpH9bPEsbY00KanM=
|
github.com/coreos/go-oidc/v3 v3.10.0/go.mod h1:5j11xcw0D3+SGxn6Z/WFADsgcWVMyNAlSQupk0KK3ac=
|
||||||
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
|
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
|
||||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||||
|
github.com/davidmz/go-pageant v1.0.2 h1:bPblRCh5jGU+Uptpz6LgMZGD5hJoOt7otgT454WvHn0=
|
||||||
|
github.com/davidmz/go-pageant v1.0.2/go.mod h1:P2EDDnMqIwG5Rrp05dTRITj9z2zpGcD9efWSkTNKLIE=
|
||||||
github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY=
|
github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY=
|
||||||
github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto=
|
github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto=
|
||||||
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||||
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||||
github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
|
github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
|
||||||
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
|
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
|
||||||
github.com/go-jose/go-jose/v3 v3.0.0 h1:s6rrhirfEP/CGIoc6p+PZAeogN2SxKav6Wp7+dyMWVo=
|
github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg=
|
||||||
github.com/go-jose/go-jose/v3 v3.0.0/go.mod h1:RNkWWRld676jZEYoV3+XK8L2ZnNSvIsxFMht0mSX+u8=
|
github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
|
||||||
github.com/gofiber/fiber/v2 v2.52.1 h1:1RoU2NS+b98o1L77sdl5mboGPiW+0Ypsi5oLmcYlgHI=
|
github.com/go-fed/httpsig v1.1.0 h1:9M+hb0jkEICD8/cAiNqEB66R87tTINszBRTjwjQzWcI=
|
||||||
github.com/gofiber/fiber/v2 v2.52.1/go.mod h1:KEOE+cXMhXG0zHc9d8+E38hoX+ZN7bhOtgeF2oT6jrQ=
|
github.com/go-fed/httpsig v1.1.0/go.mod h1:RCMrTZvN1bJYtofsG4rd5NaO5obxQ5xBkdiS7xsT7bM=
|
||||||
|
github.com/go-jose/go-jose/v4 v4.0.2 h1:R3l3kkBds16bO7ZFAEEcofK0MkrAJt3jlJznWZG0nvk=
|
||||||
|
github.com/go-jose/go-jose/v4 v4.0.2/go.mod h1:WVf9LFMHh/QVrmqrOfqun0C45tMe3RoiKJMPvgWwLfY=
|
||||||
|
github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
|
||||||
|
github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY=
|
||||||
|
github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
|
||||||
|
github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
|
||||||
|
github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
|
||||||
|
github.com/gofiber/fiber/v2 v2.52.4 h1:P+T+4iK7VaqUsq2PALYEfBBo6bJZ4q3FP8cZ84EggTM=
|
||||||
|
github.com/gofiber/fiber/v2 v2.52.4/go.mod h1:KEOE+cXMhXG0zHc9d8+E38hoX+ZN7bhOtgeF2oT6jrQ=
|
||||||
github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0 h1:DACJavvAHhabrF08vX0COfcOBJRhZ8lUbR+ZWIs0Y5g=
|
github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0 h1:DACJavvAHhabrF08vX0COfcOBJRhZ8lUbR+ZWIs0Y5g=
|
||||||
github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k=
|
github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k=
|
||||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
||||||
@ -59,10 +72,8 @@ github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:W
|
|||||||
github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
|
github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
|
||||||
github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
|
github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
|
||||||
github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
|
github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
|
||||||
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
|
github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek=
|
||||||
github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
|
github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=
|
||||||
github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg=
|
|
||||||
github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
|
|
||||||
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
|
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
|
||||||
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||||
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||||
@ -70,38 +81,41 @@ github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/
|
|||||||
github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||||
github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||||
github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||||
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
|
||||||
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
|
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
|
||||||
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||||
github.com/google/go-github/v48 v48.2.0 h1:68puzySE6WqUY9KWmpOsDEQfDZsso98rT6pZcz9HqcE=
|
github.com/google/go-github/v48 v48.2.0 h1:68puzySE6WqUY9KWmpOsDEQfDZsso98rT6pZcz9HqcE=
|
||||||
github.com/google/go-github/v48 v48.2.0/go.mod h1:dDlehKBDo850ZPvCTK0sEqTCVWcrGl2LcDiajkYi89Y=
|
github.com/google/go-github/v48 v48.2.0/go.mod h1:dDlehKBDo850ZPvCTK0sEqTCVWcrGl2LcDiajkYi89Y=
|
||||||
github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD/fhyJ8=
|
github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD/fhyJ8=
|
||||||
github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17icRSOU623lUBU=
|
github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17icRSOU623lUBU=
|
||||||
github.com/google/pprof v0.0.0-20221118152302-e6195bd50e26 h1:Xim43kblpZXfIBQsbuBVKCudVG457BR2GZFIz3uw3hQ=
|
github.com/google/pprof v0.0.0-20240409012703-83162a5b38cd h1:gbpYu9NMq8jhDVbvlGkMFWCjLFlqqEZjEmObmhUy6Vo=
|
||||||
github.com/google/pprof v0.0.0-20221118152302-e6195bd50e26/go.mod h1:dDKJzRmX4S37WGHujM7tX//fmj1uioxKzKxz3lo4HJo=
|
github.com/google/pprof v0.0.0-20240409012703-83162a5b38cd/go.mod h1:kf6iHlnVGwgKolg33glAes7Yg/8iWP8ukqeldJSO7jw=
|
||||||
github.com/google/s2a-go v0.1.7 h1:60BLSyTrOV4/haCDW4zb1guZItoSq8foHCXrAnjBo/o=
|
github.com/google/s2a-go v0.1.7 h1:60BLSyTrOV4/haCDW4zb1guZItoSq8foHCXrAnjBo/o=
|
||||||
github.com/google/s2a-go v0.1.7/go.mod h1:50CgR4k1jNlWBu4UfS4AcfhVe1r6pdZPygJ3R8F0Qdw=
|
github.com/google/s2a-go v0.1.7/go.mod h1:50CgR4k1jNlWBu4UfS4AcfhVe1r6pdZPygJ3R8F0Qdw=
|
||||||
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||||
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
|
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
|
||||||
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||||
github.com/googleapis/enterprise-certificate-proxy v0.3.1 h1:SBWmZhjUDRorQxrN0nwzf+AHBxnbFjViHQS4P0yVpmQ=
|
github.com/googleapis/enterprise-certificate-proxy v0.3.2 h1:Vie5ybvEvT75RniqhfFxPRy3Bf7vr3h0cechB90XaQs=
|
||||||
github.com/googleapis/enterprise-certificate-proxy v0.3.1/go.mod h1:VLSiSSBs/ksPL8kq3OBOQ6WRI2QnaFynd1DCjZ62+V0=
|
github.com/googleapis/enterprise-certificate-proxy v0.3.2/go.mod h1:VLSiSSBs/ksPL8kq3OBOQ6WRI2QnaFynd1DCjZ62+V0=
|
||||||
github.com/googleapis/gax-go/v2 v2.12.0 h1:A+gCJKdRfqXkr+BIRGtZLibNXf0m1f9E4HG56etFpas=
|
github.com/googleapis/gax-go/v2 v2.12.4 h1:9gWcmF85Wvq4ryPFvGFaOgPIs1AQX0d0bcbGw4Z96qg=
|
||||||
github.com/googleapis/gax-go/v2 v2.12.0/go.mod h1:y+aIqrI5eb1YGMVJfuV3185Ts/D7qKpsEkdD5+I6QGU=
|
github.com/googleapis/gax-go/v2 v2.12.4/go.mod h1:KYEYLorsnIGDi/rPC8b5TdlB9kbKoFubselGIoBMCwI=
|
||||||
github.com/ishidawataru/sctp v0.0.0-20210707070123-9a39160e9062 h1:G1+wBT0dwjIrBdLy0MIG0i+E4CQxEnedHXdauJEIH6g=
|
github.com/hashicorp/go-version v1.6.0 h1:feTTfFNnjP967rlCxM/I9g701jU+RN74YKx2mOkIeek=
|
||||||
github.com/ishidawataru/sctp v0.0.0-20210707070123-9a39160e9062/go.mod h1:co9pwDoBCm1kGxawmb4sPq0cSIOOWNPT4KnHotMP1Zg=
|
github.com/hashicorp/go-version v1.6.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
|
||||||
|
github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k=
|
||||||
|
github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM=
|
||||||
|
github.com/ishidawataru/sctp v0.0.0-20230406120618-7ff4192f6ff2 h1:i2fYnDurfLlJH8AyyMOnkLHnHeP8Ff/DDpuZA/D3bPo=
|
||||||
|
github.com/ishidawataru/sctp v0.0.0-20230406120618-7ff4192f6ff2/go.mod h1:co9pwDoBCm1kGxawmb4sPq0cSIOOWNPT4KnHotMP1Zg=
|
||||||
github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg=
|
github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg=
|
||||||
github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo=
|
github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo=
|
||||||
github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8=
|
github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8=
|
||||||
github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U=
|
github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U=
|
||||||
github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 h1:Z9n2FFNUXsshfwJMBgNA0RU6/i7WVaAegv3PtuIHPMs=
|
github.com/klauspost/compress v1.17.9 h1:6KIumPrER1LHsvBVuDa0r5xaG0Es51mhhB9BQB2qeMA=
|
||||||
github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:CzGEWj7cYgsdH8dAjBGEr58BoE7ScuLd+fwFZ44+/x8=
|
github.com/klauspost/compress v1.17.9/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw=
|
||||||
github.com/klauspost/compress v1.17.0 h1:Rnbp4K9EjcDuVuHtd0dgA4qNuv9yKDYKK1ulpJwgrqM=
|
|
||||||
github.com/klauspost/compress v1.17.0/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE=
|
|
||||||
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
|
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
|
||||||
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
|
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
|
||||||
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
||||||
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
||||||
|
github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc=
|
||||||
|
github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
|
||||||
github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw=
|
github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw=
|
||||||
github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
|
github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
|
||||||
github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA=
|
github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA=
|
||||||
@ -111,67 +125,83 @@ github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWE
|
|||||||
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
|
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
|
||||||
github.com/mattn/go-runewidth v0.0.15 h1:UNAjwbU9l54TA3KzvqLGxwWjHmMgBUVhBiTjelZgg3U=
|
github.com/mattn/go-runewidth v0.0.15 h1:UNAjwbU9l54TA3KzvqLGxwWjHmMgBUVhBiTjelZgg3U=
|
||||||
github.com/mattn/go-runewidth v0.0.15/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
|
github.com/mattn/go-runewidth v0.0.15/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
|
||||||
github.com/mattn/go-sqlite3 v1.14.16 h1:yOQRA0RpS5PFz/oikGwBEqvAWhWg5ufRz4ETLjwpU1Y=
|
github.com/miekg/dns v1.1.62 h1:cN8OuEF1/x5Rq6Np+h1epln8OiyPWV+lROx9LxcGgIQ=
|
||||||
github.com/mattn/go-sqlite3 v1.14.16/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S2DGjv9HUNg=
|
github.com/miekg/dns v1.1.62/go.mod h1:mvDlcItzm+br7MToIKqkglaGhlFMHJ9DTNNWONWXbNQ=
|
||||||
github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 h1:jWpvCLoY8Z/e3VKvlsiIGKtc+UG6U5vzxaoagmhXfyg=
|
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=
|
||||||
github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0/go.mod h1:QUyp042oQthUoa9bqDv0ER0wrtXnBruoNd7aNjkbP+k=
|
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
|
||||||
github.com/miekg/dns v1.1.56 h1:5imZaSeoRNvpM9SzWNhEcP9QliKiz20/dA2QabIGVnE=
|
github.com/ncruces/go-strftime v0.1.9 h1:bY0MQC28UADQmHmaF5dgpLmImcShSi2kHU9XLdhx/f4=
|
||||||
github.com/miekg/dns v1.1.56/go.mod h1:cRm6Oo2C8TY9ZS/TqsSrseAcncm74lfK5G+ikN2SWWY=
|
github.com/ncruces/go-strftime v0.1.9/go.mod h1:Fwc5htZGVVkseilnfgOVb9mKy6w1naJmn9CehxcKcls=
|
||||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||||
github.com/prometheus-community/pro-bing v0.3.0 h1:SFT6gHqXwbItEDJhTkzPWVqU6CLEtqEfNAPp47RUON4=
|
github.com/prometheus-community/pro-bing v0.4.0 h1:YMbv+i08gQz97OZZBwLyvmmQEEzyfyrrjEaAchdy3R4=
|
||||||
github.com/prometheus-community/pro-bing v0.3.0/go.mod h1:p9dLb9zdmv+eLxWfCT6jESWuDrS+YzpPkQBgysQF8a0=
|
github.com/prometheus-community/pro-bing v0.4.0/go.mod h1:b7wRYZtCcPmt4Sz319BykUU241rWLe1VFXyiyWK/dH4=
|
||||||
github.com/prometheus/client_golang v1.18.0 h1:HzFfmkOzH5Q8L8G+kSJKUx5dtG87sewO+FoDDqP5Tbk=
|
github.com/prometheus/client_golang v1.20.4 h1:Tgh3Yr67PaOv/uTqloMsCEdeuFTatm5zIq5+qNN23vI=
|
||||||
github.com/prometheus/client_golang v1.18.0/go.mod h1:T+GXkCk5wSJyOqMIzVgvvjFDlkOQntgjkJWKrN5txjA=
|
github.com/prometheus/client_golang v1.20.4/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE=
|
||||||
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||||
github.com/prometheus/client_model v0.5.0 h1:VQw1hfvPvk3Uv6Qf29VrPF32JB6rtbgI6cYPYQjL0Qw=
|
github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E=
|
||||||
github.com/prometheus/client_model v0.5.0/go.mod h1:dTiFglRmd66nLR9Pv9f0mZi7B7fk5Pm3gvsjB5tr+kI=
|
github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY=
|
||||||
github.com/prometheus/common v0.45.0 h1:2BGz0eBc2hdMDLnO/8n0jeB3oPrt2D08CekT0lneoxM=
|
github.com/prometheus/common v0.55.0 h1:KEi6DK7lXW/m7Ig5i47x0vRzuBsHuvJdi5ee6Y3G1dc=
|
||||||
github.com/prometheus/common v0.45.0/go.mod h1:YJmSTw9BoKxJplESWWxlbyttQR4uaEcGyv9MZjVOJsY=
|
github.com/prometheus/common v0.55.0/go.mod h1:2SECS4xJG1kd8XF9IcM1gMX6510RAEL65zxzNImwdc8=
|
||||||
github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo=
|
github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc=
|
||||||
github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo=
|
github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk=
|
||||||
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec h1:W09IVJc94icq4NjY3clb7Lk8O1qJ8BdBEF8z0ibU0rE=
|
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec h1:W09IVJc94icq4NjY3clb7Lk8O1qJ8BdBEF8z0ibU0rE=
|
||||||
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo=
|
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo=
|
||||||
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
|
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
|
||||||
github.com/rivo/uniseg v0.4.4 h1:8TfxU8dW6PdqD27gjM8MVNuicgxIjxpm4K7x4jp8sis=
|
github.com/rivo/uniseg v0.4.7 h1:WUdvkW8uEhrYfLC4ZzdpI2ztxP1I582+49Oc5Mq64VQ=
|
||||||
github.com/rivo/uniseg v0.4.4/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88=
|
github.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88=
|
||||||
github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M=
|
github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M=
|
||||||
github.com/rogpeppe/go-internal v1.11.0/go.mod h1:ddIwULY96R17DhadqLgMfk9H9tvdUzkipdSkR5nkCZA=
|
github.com/rogpeppe/go-internal v1.11.0/go.mod h1:ddIwULY96R17DhadqLgMfk9H9tvdUzkipdSkR5nkCZA=
|
||||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||||
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
|
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
|
||||||
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
|
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
|
||||||
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
|
||||||
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||||
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
|
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
|
||||||
github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk=
|
|
||||||
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
|
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
|
||||||
|
github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
|
||||||
|
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
|
||||||
github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw=
|
github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw=
|
||||||
github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc=
|
github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc=
|
||||||
github.com/valyala/fasthttp v1.51.0 h1:8b30A5JlZ6C7AS81RsWjYMQmrZG6feChmgAolCl1SqA=
|
github.com/valyala/fasthttp v1.56.0 h1:bEZdJev/6LCBlpdORfrLu/WOZXXxvrUQSiyniuaoW8U=
|
||||||
github.com/valyala/fasthttp v1.51.0/go.mod h1:oI2XroL+lI7vdXyYoQk03bXBThfFl2cVdIA3Xl7cH8g=
|
github.com/valyala/fasthttp v1.56.0/go.mod h1:sReBt3XZVnudxuLOx4J/fMrJVorWRiWY2koQKgABiVI=
|
||||||
github.com/valyala/tcplisten v1.0.0 h1:rBHj/Xf+E1tRGZyWIWwJDiRY0zc1Js+CV5DqwacVSA8=
|
github.com/valyala/tcplisten v1.0.0 h1:rBHj/Xf+E1tRGZyWIWwJDiRY0zc1Js+CV5DqwacVSA8=
|
||||||
github.com/valyala/tcplisten v1.0.0/go.mod h1:T0xQ8SeCZGxckz9qRXTfG43PvQ/mcWh7FwZEA7Ioqkc=
|
github.com/valyala/tcplisten v1.0.0/go.mod h1:T0xQ8SeCZGxckz9qRXTfG43PvQ/mcWh7FwZEA7Ioqkc=
|
||||||
github.com/wcharczuk/go-chart/v2 v2.1.1 h1:2u7na789qiD5WzccZsFz4MJWOJP72G+2kUuJoSNqWnE=
|
github.com/wcharczuk/go-chart/v2 v2.1.2 h1:Y17/oYNuXwZg6TFag06qe8sBajwwsuvPiJJXcUcLL6E=
|
||||||
github.com/wcharczuk/go-chart/v2 v2.1.1/go.mod h1:CyCAUt2oqvfhCl6Q5ZvAZwItgpQKZOkCJGb+VGv6l14=
|
github.com/wcharczuk/go-chart/v2 v2.1.2/go.mod h1:Zi4hbaqlWpYajnXB2K22IUYVXRXaLfSGNNR7P4ukyyQ=
|
||||||
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
|
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
|
||||||
go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0=
|
go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0=
|
||||||
go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo=
|
go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo=
|
||||||
|
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.49.0 h1:4Pp6oUg3+e/6M4C0A/3kJ2VYa++dsWVTtGgLVj5xtHg=
|
||||||
|
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.49.0/go.mod h1:Mjt1i1INqiaoZOMGR1RIUJN+i3ChKoFRqzrRQhlkbs0=
|
||||||
|
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.52.0 h1:9l89oX4ba9kHbBol3Xin3leYJ+252h0zszDtBwyKe2A=
|
||||||
|
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.52.0/go.mod h1:XLZfZboOJWHNKUv7eH0inh0E9VV6eWDFB/9yJyTLPp0=
|
||||||
|
go.opentelemetry.io/otel v1.27.0 h1:9BZoF3yMK/O1AafMiQTVu0YDj5Ea4hPhxCs7sGva+cg=
|
||||||
|
go.opentelemetry.io/otel v1.27.0/go.mod h1:DMpAK8fzYRzs+bi3rS5REupisuqTheUlSZJ1WnZaPAQ=
|
||||||
|
go.opentelemetry.io/otel/metric v1.27.0 h1:hvj3vdEKyeCi4YaYfNjv2NUje8FqKqUY8IlF0FxV/ik=
|
||||||
|
go.opentelemetry.io/otel/metric v1.27.0/go.mod h1:mVFgmRlhljgBiuk/MP/oKylr4hs85GZAylncepAX/ak=
|
||||||
|
go.opentelemetry.io/otel/trace v1.27.0 h1:IqYb813p7cmbHk0a5y6pD5JPakbVfftRXABGt5/Rscw=
|
||||||
|
go.opentelemetry.io/otel/trace v1.27.0/go.mod h1:6RiD1hkAprV4/q+yd2ln1HG9GoPx39SuvvstaLBl+l4=
|
||||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||||
golang.org/x/crypto v0.0.0-20190911031432-227b76d455e7/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
|
||||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||||
|
golang.org/x/crypto v0.0.0-20210513164829-c07d793c2f9a/go.mod h1:P+XmwS30IXTQdn5tA2iutPOUgjI07+tq3H3K9MVA1s8=
|
||||||
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||||
golang.org/x/crypto v0.21.0 h1:X31++rzVUdKhX5sWmSOFZxx8UW/ldWx55cbf08iNAMA=
|
golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc=
|
||||||
golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs=
|
golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU=
|
||||||
|
golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8=
|
||||||
|
golang.org/x/crypto v0.27.0 h1:GXm2NjJrPaiv/h1tb2UH8QfgC/hOf/+z0p6PT8o1w7A=
|
||||||
|
golang.org/x/crypto v0.27.0/go.mod h1:1Xngt8kV6Dvbssa53Ziq6Eqn0HqbZi5Z6R0ZpwQzt70=
|
||||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||||
golang.org/x/image v0.11.0 h1:ds2RoQvBvYTiJkwpSFDwCcDFNX7DqjL2WsUgTNk0Ooo=
|
golang.org/x/image v0.18.0 h1:jGzIakQa/ZXI1I0Fxvaa9W7yP25TqT6cHIHn+6CqvSQ=
|
||||||
golang.org/x/image v0.11.0/go.mod h1:bglhjqbqVuEb9e9+eNR45Jfu7D+T4Qan+NhQk8Ck2P8=
|
golang.org/x/image v0.18.0/go.mod h1:4yyo5vMFQjVjUcVk4jEQcU9MGy/rulF5WvUILseCM2E=
|
||||||
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||||
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
|
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
|
||||||
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||||
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
|
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
|
||||||
golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
|
golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
|
||||||
golang.org/x/mod v0.12.0 h1:rmsUpXtvNzj340zd98LZ4KntptpfRHwpFOHG188oHXc=
|
|
||||||
golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
|
golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
|
||||||
|
golang.org/x/mod v0.15.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
|
||||||
|
golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
|
||||||
|
golang.org/x/mod v0.18.0 h1:5+9lSbEzPSdWkH32vYPBwEpX8KwDbM52Ud9xBUvNlb0=
|
||||||
|
golang.org/x/mod v0.18.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
|
||||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
@ -182,18 +212,25 @@ golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwY
|
|||||||
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
||||||
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
|
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
|
||||||
golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
|
golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
|
||||||
golang.org/x/net v0.22.0 h1:9sGLhx7iRIHEiX0oAJ3MRZMUCElJgy7Br1nO+AMN3Tc=
|
golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
|
||||||
golang.org/x/net v0.22.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg=
|
golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk=
|
||||||
|
golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44=
|
||||||
|
golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM=
|
||||||
|
golang.org/x/net v0.29.0 h1:5ORfpBpCs4HzDYoodCDBbwHzdR5UrLBZ3sOnUJmFoHo=
|
||||||
|
golang.org/x/net v0.29.0/go.mod h1:gLkgy8jTGERgjzMic6DS9+SP0ajcu6Xu3Orq/SpETg0=
|
||||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||||
golang.org/x/oauth2 v0.18.0 h1:09qnuIAgzdx1XplqJvW6CQqMCtGZykZWcXzPMPUusvI=
|
golang.org/x/oauth2 v0.21.0 h1:tsimM75w1tF/uws5rbeHzIWxEqElMehnc+iW793zsZs=
|
||||||
golang.org/x/oauth2 v0.18.0/go.mod h1:Wf7knwG0MPoWIMMBgFlEaSUDaKskp0dCfrlJRJXbBi8=
|
golang.org/x/oauth2 v0.21.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI=
|
||||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
golang.org/x/sync v0.4.0 h1:zxkM55ReGkDlKSM+Fu41A+zmbZuaPVbGMzvvdUPznYQ=
|
golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y=
|
||||||
golang.org/x/sync v0.4.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y=
|
golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||||
|
golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||||
|
golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ=
|
||||||
|
golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
@ -205,21 +242,35 @@ golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBc
|
|||||||
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.18.0 h1:DBdB3niSjOA/O0blCZBqDefyWNYveAYMNF1Wum0DYQ4=
|
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
|
golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||||
|
golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||||
|
golang.org/x/sys v0.25.0 h1:r+8e+loiHxRqhXVl6ML1nO3l1+oFoWbnlu2Ehimmi34=
|
||||||
|
golang.org/x/sys v0.25.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||||
|
golang.org/x/telemetry v0.0.0-20240228155512-f48c80bd79b2/go.mod h1:TeRTkGYfJXctD9OcfyVLyj2J3IxLnKwHJR8f4D8a3YE=
|
||||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||||
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
|
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
|
||||||
golang.org/x/term v0.18.0 h1:FcHjZXDMxI8mM3nwhX9HlKop4C0YQvCVCdwYl2wOtE8=
|
golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo=
|
||||||
golang.org/x/term v0.18.0/go.mod h1:ILwASektA3OnRv7amZ1xhE/KTR+u50pbXfZ03+6Nx58=
|
golang.org/x/term v0.12.0/go.mod h1:owVbMEjm3cBLCHdkQu9b1opXd4ETQWc3BhuQGKgXgvU=
|
||||||
|
golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk=
|
||||||
|
golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY=
|
||||||
|
golang.org/x/term v0.24.0 h1:Mh5cbb+Zk2hqqXNO7S1iTjEphVL+jb8ZWaqh/g+JWkM=
|
||||||
|
golang.org/x/term v0.24.0/go.mod h1:lOBK/LVxemqiMij05LGJ0tzNr8xlmwBRJ81PX6wVLH8=
|
||||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||||
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
||||||
golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ=
|
|
||||||
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
||||||
golang.org/x/text v0.12.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
|
golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
|
||||||
golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ=
|
golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
|
||||||
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
|
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
|
||||||
|
golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
|
||||||
|
golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI=
|
||||||
|
golang.org/x/text v0.18.0 h1:XvMDiNzPAl0jr17s6W9lcaIhGUfUORdGCNsuLmPG224=
|
||||||
|
golang.org/x/text v0.18.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY=
|
||||||
|
golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk=
|
||||||
|
golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
|
||||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||||
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||||
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
|
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
|
||||||
@ -228,28 +279,28 @@ golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBn
|
|||||||
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||||
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
|
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
|
||||||
golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
|
golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
|
||||||
golang.org/x/tools v0.13.0 h1:Iey4qkscZuv0VvIt8E0neZjtPVQFSc870HQ448QgEmQ=
|
|
||||||
golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58=
|
golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58=
|
||||||
|
golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk=
|
||||||
|
golang.org/x/tools v0.22.0 h1:gqSGLZqv+AI9lIQzniJ0nZDRG5GBPsSi+DRNHWNz6yA=
|
||||||
|
golang.org/x/tools v0.22.0/go.mod h1:aCwcsjqvq7Yqt6TNyX7QMU2enbQ/Gt0bo6krSeEri+c=
|
||||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
google.golang.org/api v0.148.0 h1:HBq4TZlN4/1pNcu0geJZ/Q50vIwIXT532UIMYoo0vOs=
|
google.golang.org/api v0.183.0 h1:PNMeRDwo1pJdgNcFQ9GstuLe/noWKIc89pRWRLMvLwE=
|
||||||
google.golang.org/api v0.148.0/go.mod h1:8/TBgwaKjfqTdacOJrOv2+2Q6fBDU1uHKK06oGSkxzU=
|
google.golang.org/api v0.183.0/go.mod h1:q43adC5/pHoSZTx5h2mSmdF7NcyfW9JuDyIOJAgS9ZQ=
|
||||||
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
||||||
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||||
google.golang.org/appengine v1.6.8 h1:IhEN5q69dyKagZPYMSdIjS2HqprW324FRQZJcGqPAsM=
|
|
||||||
google.golang.org/appengine v1.6.8/go.mod h1:1jJ3jBArFh5pcgW8gCtRJnepW8FzD1V44FJffLiz/Ds=
|
|
||||||
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
|
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
|
||||||
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
|
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
|
||||||
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
|
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
|
||||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20231012201019-e917dd12ba7a h1:a2MQQVoTo96JC9PMGtGBymLp7+/RzpFc2yX/9WfFg1c=
|
google.golang.org/genproto/googleapis/rpc v0.0.0-20240604185151-ef581f913117 h1:1GBuWVLM/KMVUv1t1En5Gs+gFZCNd360GGb4sSxtrhU=
|
||||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20231012201019-e917dd12ba7a/go.mod h1:4cYg8o5yUbm77w8ZX00LhMVNl/YVBFJRYWDc0uYWMs0=
|
google.golang.org/genproto/googleapis/rpc v0.0.0-20240604185151-ef581f913117/go.mod h1:EfXuqaE1J41VCDicxHzUDm+8rk+7ZdXzHV0IhO/I6s0=
|
||||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||||
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
|
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
|
||||||
google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
|
google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
|
||||||
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
|
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
|
||||||
google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc=
|
google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc=
|
||||||
google.golang.org/grpc v1.58.3 h1:BjnpXut1btbtgN/6sp+brB2Kbm2LjNXnidYujAVbSoQ=
|
google.golang.org/grpc v1.64.0 h1:KH3VH9y/MgNQg1dE7b3XfVK0GsPSIzJwdF617gUSbvY=
|
||||||
google.golang.org/grpc v1.58.3/go.mod h1:tgX3ZQDlNJGU96V6yHh1T/JeoBQ2TXdr43YbYSsCJk0=
|
google.golang.org/grpc v1.64.0/go.mod h1:oxjF8E3FBnjp+/gVFYdWacaLDx9na1aqy9oovLpxQYg=
|
||||||
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
|
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
|
||||||
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
|
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
|
||||||
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
|
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
|
||||||
@ -259,10 +310,8 @@ google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2
|
|||||||
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||||
google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||||
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
|
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
|
||||||
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
|
google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg=
|
||||||
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
|
google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw=
|
||||||
google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8=
|
|
||||||
google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
|
|
||||||
gopkg.in/alexcesaro/quotedprintable.v3 v3.0.0-20150716171945-2caba252f4dc h1:2gGKlE2+asNV9m7xrywl36YYNnBG5ZQ0r/BOOxqPpmk=
|
gopkg.in/alexcesaro/quotedprintable.v3 v3.0.0-20150716171945-2caba252f4dc h1:2gGKlE2+asNV9m7xrywl36YYNnBG5ZQ0r/BOOxqPpmk=
|
||||||
gopkg.in/alexcesaro/quotedprintable.v3 v3.0.0-20150716171945-2caba252f4dc/go.mod h1:m7x9LTH6d71AHyAX77c9yqWCCa3UKHcVEj9y7hAtKDk=
|
gopkg.in/alexcesaro/quotedprintable.v3 v3.0.0-20150716171945-2caba252f4dc/go.mod h1:m7x9LTH6d71AHyAX77c9yqWCCa3UKHcVEj9y7hAtKDk=
|
||||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||||
@ -278,31 +327,29 @@ gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
|||||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||||
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||||
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||||
lukechampine.com/uint128 v1.2.0 h1:mBi/5l91vocEN8otkC5bDLhi2KdCticRiwbdB0O+rjI=
|
modernc.org/cc/v4 v4.21.4 h1:3Be/Rdo1fpr8GrQ7IVw9OHtplU4gWbb+wNgeoBMmGLQ=
|
||||||
lukechampine.com/uint128 v1.2.0/go.mod h1:c4eWIwlEGaxC/+H1VguhU4PHXNWDCDMUlWdIWl2j1gk=
|
modernc.org/cc/v4 v4.21.4/go.mod h1:HM7VJTZbUCR3rV8EYBi9wxnJ0ZBRiGE5OeGXNA0IsLQ=
|
||||||
modernc.org/cc/v3 v3.40.0 h1:P3g79IUS/93SYhtoeaHW+kRCIrYaxJ27MFPv+7kaTOw=
|
modernc.org/ccgo/v4 v4.19.2 h1:lwQZgvboKD0jBwdaeVCTouxhxAyN6iawF3STraAal8Y=
|
||||||
modernc.org/cc/v3 v3.40.0/go.mod h1:/bTg4dnWkSXowUO6ssQKnOV0yMVxDYNIsIrzqTFDGH0=
|
modernc.org/ccgo/v4 v4.19.2/go.mod h1:ysS3mxiMV38XGRTTcgo0DQTeTmAO4oCmJl1nX9VFI3s=
|
||||||
modernc.org/ccgo/v3 v3.16.13 h1:Mkgdzl46i5F/CNR/Kj80Ri59hC8TKAhZrYSaqvkwzUw=
|
modernc.org/fileutil v1.3.0 h1:gQ5SIzK3H9kdfai/5x41oQiKValumqNTDXMvKo62HvE=
|
||||||
modernc.org/ccgo/v3 v3.16.13/go.mod h1:2Quk+5YgpImhPjv2Qsob1DnZ/4som1lJTodubIcoUkY=
|
modernc.org/fileutil v1.3.0/go.mod h1:XatxS8fZi3pS8/hKG2GH/ArUogfxjpEKs3Ku3aK4JyQ=
|
||||||
modernc.org/ccorpus v1.11.6 h1:J16RXiiqiCgua6+ZvQot4yUuUy8zxgqbqEEUuGPlISk=
|
modernc.org/gc/v2 v2.4.1 h1:9cNzOqPyMJBvrUipmynX0ZohMhcxPtMccYgGOJdOiBw=
|
||||||
modernc.org/ccorpus v1.11.6/go.mod h1:2gEUTrWqdpH2pXsmTM1ZkjeSrUWDpjMu2T6m29L/ErQ=
|
modernc.org/gc/v2 v2.4.1/go.mod h1:wzN5dK1AzVGoH6XOzc3YZ+ey/jPgYHLuVckd62P0GYU=
|
||||||
modernc.org/httpfs v1.0.6 h1:AAgIpFZRXuYnkjftxTAZwMIiwEqAfk8aVB2/oA6nAeM=
|
modernc.org/gc/v3 v3.0.0-20240304020402-f0dba7c97c2b h1:BnN1t+pb1cy61zbvSUV7SeI0PwosMhlAEi/vBY4qxp8=
|
||||||
modernc.org/httpfs v1.0.6/go.mod h1:7dosgurJGp0sPaRanU53W4xZYKh14wfzX420oZADeHM=
|
modernc.org/gc/v3 v3.0.0-20240304020402-f0dba7c97c2b/go.mod h1:Qz0X07sNOR1jWYCrJMEnbW/X55x206Q7Vt4mz6/wHp4=
|
||||||
modernc.org/libc v1.29.0 h1:tTFRFq69YKCF2QyGNuRUQxKBm1uZZLubf6Cjh/pVHXs=
|
modernc.org/libc v1.55.3 h1:AzcW1mhlPNrRtjS5sS+eW2ISCgSOLLNyFzRh/V3Qj/U=
|
||||||
modernc.org/libc v1.29.0/go.mod h1:DaG/4Q3LRRdqpiLyP0C2m1B8ZMGkQ+cCgOIjEtQlYhQ=
|
modernc.org/libc v1.55.3/go.mod h1:qFXepLhz+JjFThQ4kzwzOjA/y/artDeg+pcYnY+Q83w=
|
||||||
modernc.org/mathutil v1.6.0 h1:fRe9+AmYlaej+64JsEEhoWuAYBkOtQiMEU7n/XgfYi4=
|
modernc.org/mathutil v1.6.0 h1:fRe9+AmYlaej+64JsEEhoWuAYBkOtQiMEU7n/XgfYi4=
|
||||||
modernc.org/mathutil v1.6.0/go.mod h1:Ui5Q9q1TR2gFm0AQRqQUaBWFLAhQpCwNcuhBOSedWPo=
|
modernc.org/mathutil v1.6.0/go.mod h1:Ui5Q9q1TR2gFm0AQRqQUaBWFLAhQpCwNcuhBOSedWPo=
|
||||||
modernc.org/memory v1.7.2 h1:Klh90S215mmH8c9gO98QxQFsY+W451E8AnzjoE2ee1E=
|
modernc.org/memory v1.8.0 h1:IqGTL6eFMaDZZhEWwcREgeMXYwmW83LYW8cROZYkg+E=
|
||||||
modernc.org/memory v1.7.2/go.mod h1:NO4NVCQy0N7ln+T9ngWqOQfi7ley4vpwvARR+Hjw95E=
|
modernc.org/memory v1.8.0/go.mod h1:XPZ936zp5OMKGWPqbD3JShgd/ZoQ7899TUuQqxY+peU=
|
||||||
modernc.org/opt v0.1.3 h1:3XOZf2yznlhC+ibLltsDGzABUGVx8J6pnFMS3E4dcq4=
|
modernc.org/opt v0.1.3 h1:3XOZf2yznlhC+ibLltsDGzABUGVx8J6pnFMS3E4dcq4=
|
||||||
modernc.org/opt v0.1.3/go.mod h1:WdSiB5evDcignE70guQKxYUl14mgWtbClRi5wmkkTX0=
|
modernc.org/opt v0.1.3/go.mod h1:WdSiB5evDcignE70guQKxYUl14mgWtbClRi5wmkkTX0=
|
||||||
modernc.org/sqlite v1.28.0 h1:Zx+LyDDmXczNnEQdvPuEfcFVA2ZPyaD7UCZDjef3BHQ=
|
modernc.org/sortutil v1.2.0 h1:jQiD3PfS2REGJNzNCMMaLSp/wdMNieTbKX920Cqdgqc=
|
||||||
modernc.org/sqlite v1.28.0/go.mod h1:Qxpazz0zH8Z1xCFyi5GSL3FzbtZ3fvbjmywNogldEW0=
|
modernc.org/sortutil v1.2.0/go.mod h1:TKU2s7kJMf1AE84OoiGppNHJwvB753OYfNl2WRb++Ss=
|
||||||
modernc.org/strutil v1.1.3 h1:fNMm+oJklMGYfU9Ylcywl0CO5O6nTfaowNsh2wpPjzY=
|
modernc.org/sqlite v1.33.1 h1:trb6Z3YYoeM9eDL1O8do81kP+0ejv+YzgyFo+Gwy0nM=
|
||||||
modernc.org/strutil v1.1.3/go.mod h1:MEHNA7PdEnEwLvspRMtWTNnp2nnyvMfkimT1NKNAGbw=
|
modernc.org/sqlite v1.33.1/go.mod h1:pXV2xHxhzXZsgT/RtTFAPY6JJDEvOTcTdwADQCCWD4k=
|
||||||
modernc.org/tcl v1.15.2 h1:C4ybAYCGJw968e+Me18oW55kD/FexcHbqH2xak1ROSY=
|
modernc.org/strutil v1.2.0 h1:agBi9dp1I+eOnxXeiZawM8F4LawKv4NzGWSaLfyeNZA=
|
||||||
modernc.org/tcl v1.15.2/go.mod h1:3+k/ZaEbKrC8ePv8zJWPtBSW0V7Gg9g8rkmhI1Kfs3c=
|
modernc.org/strutil v1.2.0/go.mod h1:/mdcBmfOibveCTBxUl5B5l6W+TTH1FXPLHZE6bTosX0=
|
||||||
modernc.org/token v1.0.1 h1:A3qvTqOwexpfZZeyI0FeGPDlSWX5pjZu9hF4lU+EKWg=
|
modernc.org/token v1.1.0 h1:Xl7Ap9dKaEs5kLoOQeQmPWevfnk/DM5qcLcYlA8ys6Y=
|
||||||
modernc.org/token v1.0.1/go.mod h1:UGzOrNV1mAFSEB63lOFHIpNRUVMvYTc6yu1SMY/XTDM=
|
modernc.org/token v1.1.0/go.mod h1:UGzOrNV1mAFSEB63lOFHIpNRUVMvYTc6yu1SMY/XTDM=
|
||||||
modernc.org/z v1.7.3 h1:zDJf6iHjrnB+WRD88stbXokugjyc0/pB91ri1gO6LZY=
|
|
||||||
modernc.org/z v1.7.3/go.mod h1:Ipv4tsdxZRbQyLq9Q1M6gdbkxYzdlrciF2Hi/lS7nWE=
|
|
||||||
|
58
main.go
58
main.go
@ -83,13 +83,67 @@ func initializeStorage(cfg *config.Config) {
|
|||||||
for _, ep := range cfg.Endpoints {
|
for _, ep := range cfg.Endpoints {
|
||||||
keys = append(keys, ep.Key())
|
keys = append(keys, ep.Key())
|
||||||
}
|
}
|
||||||
for _, externalEndpoint := range cfg.ExternalEndpoints {
|
for _, ee := range cfg.ExternalEndpoints {
|
||||||
keys = append(keys, externalEndpoint.Key())
|
keys = append(keys, ee.Key())
|
||||||
}
|
}
|
||||||
numberOfEndpointStatusesDeleted := store.Get().DeleteAllEndpointStatusesNotInKeys(keys)
|
numberOfEndpointStatusesDeleted := store.Get().DeleteAllEndpointStatusesNotInKeys(keys)
|
||||||
if numberOfEndpointStatusesDeleted > 0 {
|
if numberOfEndpointStatusesDeleted > 0 {
|
||||||
log.Printf("[main.initializeStorage] Deleted %d endpoint statuses because their matching endpoints no longer existed", numberOfEndpointStatusesDeleted)
|
log.Printf("[main.initializeStorage] Deleted %d endpoint statuses because their matching endpoints no longer existed", numberOfEndpointStatusesDeleted)
|
||||||
}
|
}
|
||||||
|
// Clean up the triggered alerts from the storage provider and load valid triggered endpoint alerts
|
||||||
|
numberOfPersistedTriggeredAlertsLoaded := 0
|
||||||
|
for _, ep := range cfg.Endpoints {
|
||||||
|
var checksums []string
|
||||||
|
for _, alert := range ep.Alerts {
|
||||||
|
if alert.IsEnabled() {
|
||||||
|
checksums = append(checksums, alert.Checksum())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
numberOfTriggeredAlertsDeleted := store.Get().DeleteAllTriggeredAlertsNotInChecksumsByEndpoint(ep, checksums)
|
||||||
|
if cfg.Debug && numberOfTriggeredAlertsDeleted > 0 {
|
||||||
|
log.Printf("[main.initializeStorage] Deleted %d triggered alerts for endpoint with key=%s because their configurations have been changed or deleted", numberOfTriggeredAlertsDeleted, ep.Key())
|
||||||
|
}
|
||||||
|
for _, alert := range ep.Alerts {
|
||||||
|
exists, resolveKey, numberOfSuccessesInARow, err := store.Get().GetTriggeredEndpointAlert(ep, alert)
|
||||||
|
if err != nil {
|
||||||
|
log.Printf("[main.initializeStorage] Failed to get triggered alert for endpoint with key=%s: %s", ep.Key(), err.Error())
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if exists {
|
||||||
|
alert.Triggered, alert.ResolveKey = true, resolveKey
|
||||||
|
ep.NumberOfSuccessesInARow, ep.NumberOfFailuresInARow = numberOfSuccessesInARow, alert.FailureThreshold
|
||||||
|
numberOfPersistedTriggeredAlertsLoaded++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for _, ee := range cfg.ExternalEndpoints {
|
||||||
|
var checksums []string
|
||||||
|
for _, alert := range ee.Alerts {
|
||||||
|
if alert.IsEnabled() {
|
||||||
|
checksums = append(checksums, alert.Checksum())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
convertedEndpoint := ee.ToEndpoint()
|
||||||
|
numberOfTriggeredAlertsDeleted := store.Get().DeleteAllTriggeredAlertsNotInChecksumsByEndpoint(convertedEndpoint, checksums)
|
||||||
|
if cfg.Debug && numberOfTriggeredAlertsDeleted > 0 {
|
||||||
|
log.Printf("[main.initializeStorage] Deleted %d triggered alerts for endpoint with key=%s because their configurations have been changed or deleted", numberOfTriggeredAlertsDeleted, ee.Key())
|
||||||
|
}
|
||||||
|
for _, alert := range ee.Alerts {
|
||||||
|
exists, resolveKey, numberOfSuccessesInARow, err := store.Get().GetTriggeredEndpointAlert(convertedEndpoint, alert)
|
||||||
|
if err != nil {
|
||||||
|
log.Printf("[main.initializeStorage] Failed to get triggered alert for endpoint with key=%s: %s", ee.Key(), err.Error())
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if exists {
|
||||||
|
alert.Triggered, alert.ResolveKey = true, resolveKey
|
||||||
|
ee.NumberOfSuccessesInARow, ee.NumberOfFailuresInARow = numberOfSuccessesInARow, alert.FailureThreshold
|
||||||
|
numberOfPersistedTriggeredAlertsLoaded++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if numberOfPersistedTriggeredAlertsLoaded > 0 {
|
||||||
|
log.Printf("[main.initializeStorage] Loaded %d persisted triggered alerts", numberOfPersistedTriggeredAlertsLoaded)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func listenToConfigurationFileChanges(cfg *config.Config) {
|
func listenToConfigurationFileChanges(cfg *config.Config) {
|
||||||
|
@ -5,6 +5,7 @@ import (
|
|||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/TwiN/gatus/v5/alerting/alert"
|
||||||
"github.com/TwiN/gatus/v5/config/endpoint"
|
"github.com/TwiN/gatus/v5/config/endpoint"
|
||||||
"github.com/TwiN/gatus/v5/storage/store/common"
|
"github.com/TwiN/gatus/v5/storage/store/common"
|
||||||
"github.com/TwiN/gatus/v5/storage/store/common/paging"
|
"github.com/TwiN/gatus/v5/storage/store/common/paging"
|
||||||
@ -174,6 +175,37 @@ func (s *Store) DeleteAllEndpointStatusesNotInKeys(keys []string) int {
|
|||||||
return s.cache.DeleteAll(keysToDelete)
|
return s.cache.DeleteAll(keysToDelete)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// GetTriggeredEndpointAlert returns whether the triggered alert for the specified endpoint as well as the necessary information to resolve it
|
||||||
|
//
|
||||||
|
// Always returns that the alert does not exist for the in-memory store since it does not support persistence across restarts
|
||||||
|
func (s *Store) GetTriggeredEndpointAlert(ep *endpoint.Endpoint, alert *alert.Alert) (exists bool, resolveKey string, numberOfSuccessesInARow int, err error) {
|
||||||
|
return false, "", 0, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpsertTriggeredEndpointAlert inserts/updates a triggered alert for an endpoint
|
||||||
|
// Used for persistence of triggered alerts across application restarts
|
||||||
|
//
|
||||||
|
// Does nothing for the in-memory store since it does not support persistence across restarts
|
||||||
|
func (s *Store) UpsertTriggeredEndpointAlert(ep *endpoint.Endpoint, triggeredAlert *alert.Alert) error {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeleteTriggeredEndpointAlert deletes a triggered alert for an endpoint
|
||||||
|
//
|
||||||
|
// Does nothing for the in-memory store since it does not support persistence across restarts
|
||||||
|
func (s *Store) DeleteTriggeredEndpointAlert(ep *endpoint.Endpoint, triggeredAlert *alert.Alert) error {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeleteAllTriggeredAlertsNotInChecksumsByEndpoint removes all triggered alerts owned by an endpoint whose alert
|
||||||
|
// configurations are not provided in the checksums list.
|
||||||
|
// This prevents triggered alerts that have been removed or modified from lingering in the database.
|
||||||
|
//
|
||||||
|
// Does nothing for the in-memory store since it does not support persistence across restarts
|
||||||
|
func (s *Store) DeleteAllTriggeredAlertsNotInChecksumsByEndpoint(ep *endpoint.Endpoint, checksums []string) int {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
// Clear deletes everything from the store
|
// Clear deletes everything from the store
|
||||||
func (s *Store) Clear() {
|
func (s *Store) Clear() {
|
||||||
s.cache.Clear()
|
s.cache.Clear()
|
||||||
|
@ -7,8 +7,8 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
numberOfHoursInTenDays = 10 * 24
|
uptimeCleanUpThreshold = 32 * 24
|
||||||
sevenDays = 7 * 24 * time.Hour
|
uptimeRetention = 30 * 24 * time.Hour
|
||||||
)
|
)
|
||||||
|
|
||||||
// processUptimeAfterResult processes the result by extracting the relevant from the result and recalculating the uptime
|
// processUptimeAfterResult processes the result by extracting the relevant from the result and recalculating the uptime
|
||||||
@ -30,10 +30,10 @@ func processUptimeAfterResult(uptime *endpoint.Uptime, result *endpoint.Result)
|
|||||||
hourlyStats.TotalExecutionsResponseTime += uint64(result.Duration.Milliseconds())
|
hourlyStats.TotalExecutionsResponseTime += uint64(result.Duration.Milliseconds())
|
||||||
// Clean up only when we're starting to have too many useless keys
|
// Clean up only when we're starting to have too many useless keys
|
||||||
// Note that this is only triggered when there are more entries than there should be after
|
// Note that this is only triggered when there are more entries than there should be after
|
||||||
// 10 days, despite the fact that we are deleting everything that's older than 7 days.
|
// 32 days, despite the fact that we are deleting everything that's older than 30 days.
|
||||||
// This is to prevent re-iterating on every `processUptimeAfterResult` as soon as the uptime has been logged for 7 days.
|
// This is to prevent re-iterating on every `processUptimeAfterResult` as soon as the uptime has been logged for 30 days.
|
||||||
if len(uptime.HourlyStatistics) > numberOfHoursInTenDays {
|
if len(uptime.HourlyStatistics) > uptimeCleanUpThreshold {
|
||||||
sevenDaysAgo := time.Now().Add(-(sevenDays + time.Hour)).Unix()
|
sevenDaysAgo := time.Now().Add(-(uptimeRetention + time.Hour)).Unix()
|
||||||
for hourlyUnixTimestamp := range uptime.HourlyStatistics {
|
for hourlyUnixTimestamp := range uptime.HourlyStatistics {
|
||||||
if sevenDaysAgo > hourlyUnixTimestamp {
|
if sevenDaysAgo > hourlyUnixTimestamp {
|
||||||
delete(uptime.HourlyStatistics, hourlyUnixTimestamp)
|
delete(uptime.HourlyStatistics, hourlyUnixTimestamp)
|
||||||
|
@ -51,8 +51,8 @@ func TestAddResultUptimeIsCleaningUpAfterItself(t *testing.T) {
|
|||||||
timestamp := now.Add(-12 * 24 * time.Hour)
|
timestamp := now.Add(-12 * 24 * time.Hour)
|
||||||
for timestamp.Unix() <= now.Unix() {
|
for timestamp.Unix() <= now.Unix() {
|
||||||
AddResult(status, &endpoint.Result{Timestamp: timestamp, Success: true})
|
AddResult(status, &endpoint.Result{Timestamp: timestamp, Success: true})
|
||||||
if len(status.Uptime.HourlyStatistics) > numberOfHoursInTenDays {
|
if len(status.Uptime.HourlyStatistics) > uptimeCleanUpThreshold {
|
||||||
t.Errorf("At no point in time should there be more than %d entries in status.SuccessfulExecutionsPerHour, but there are %d", numberOfHoursInTenDays, len(status.Uptime.HourlyStatistics))
|
t.Errorf("At no point in time should there be more than %d entries in status.SuccessfulExecutionsPerHour, but there are %d", uptimeCleanUpThreshold, len(status.Uptime.HourlyStatistics))
|
||||||
}
|
}
|
||||||
// Simulate endpoint with an interval of 3 minutes
|
// Simulate endpoint with an interval of 3 minutes
|
||||||
timestamp = timestamp.Add(3 * time.Minute)
|
timestamp = timestamp.Add(3 * time.Minute)
|
||||||
|
@ -16,7 +16,7 @@ func (s *Store) createPostgresSchema() error {
|
|||||||
_, err = s.db.Exec(`
|
_, err = s.db.Exec(`
|
||||||
CREATE TABLE IF NOT EXISTS endpoint_events (
|
CREATE TABLE IF NOT EXISTS endpoint_events (
|
||||||
endpoint_event_id BIGSERIAL PRIMARY KEY,
|
endpoint_event_id BIGSERIAL PRIMARY KEY,
|
||||||
endpoint_id INTEGER NOT NULL REFERENCES endpoints(endpoint_id) ON DELETE CASCADE,
|
endpoint_id BIGINT NOT NULL REFERENCES endpoints(endpoint_id) ON DELETE CASCADE,
|
||||||
event_type TEXT NOT NULL,
|
event_type TEXT NOT NULL,
|
||||||
event_timestamp TIMESTAMP NOT NULL
|
event_timestamp TIMESTAMP NOT NULL
|
||||||
)
|
)
|
||||||
@ -66,7 +66,20 @@ func (s *Store) createPostgresSchema() error {
|
|||||||
UNIQUE(endpoint_id, hour_unix_timestamp)
|
UNIQUE(endpoint_id, hour_unix_timestamp)
|
||||||
)
|
)
|
||||||
`)
|
`)
|
||||||
// Silent table modifications
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
_, err = s.db.Exec(`
|
||||||
|
CREATE TABLE IF NOT EXISTS endpoint_alerts_triggered (
|
||||||
|
endpoint_alert_trigger_id BIGSERIAL PRIMARY KEY,
|
||||||
|
endpoint_id BIGINT NOT NULL REFERENCES endpoints(endpoint_id) ON DELETE CASCADE,
|
||||||
|
configuration_checksum TEXT NOT NULL,
|
||||||
|
resolve_key TEXT NOT NULL,
|
||||||
|
number_of_successes_in_a_row INTEGER NOT NULL,
|
||||||
|
UNIQUE(endpoint_id, configuration_checksum)
|
||||||
|
)
|
||||||
|
`)
|
||||||
|
// Silent table modifications TODO: Remove this in v6.0.0
|
||||||
_, _ = s.db.Exec(`ALTER TABLE endpoint_results ADD IF NOT EXISTS domain_expiration BIGINT NOT NULL DEFAULT 0`)
|
_, _ = s.db.Exec(`ALTER TABLE endpoint_results ADD IF NOT EXISTS domain_expiration BIGINT NOT NULL DEFAULT 0`)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -66,7 +66,20 @@ func (s *Store) createSQLiteSchema() error {
|
|||||||
UNIQUE(endpoint_id, hour_unix_timestamp)
|
UNIQUE(endpoint_id, hour_unix_timestamp)
|
||||||
)
|
)
|
||||||
`)
|
`)
|
||||||
// Silent table modifications TODO: Remove this
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
_, err = s.db.Exec(`
|
||||||
|
CREATE TABLE IF NOT EXISTS endpoint_alerts_triggered (
|
||||||
|
endpoint_alert_trigger_id INTEGER PRIMARY KEY,
|
||||||
|
endpoint_id INTEGER NOT NULL REFERENCES endpoints(endpoint_id) ON DELETE CASCADE,
|
||||||
|
configuration_checksum TEXT NOT NULL,
|
||||||
|
resolve_key TEXT NOT NULL,
|
||||||
|
number_of_successes_in_a_row INTEGER NOT NULL,
|
||||||
|
UNIQUE(endpoint_id, configuration_checksum)
|
||||||
|
)
|
||||||
|
`)
|
||||||
|
// Silent table modifications TODO: Remove this in v6.0.0
|
||||||
_, _ = s.db.Exec(`ALTER TABLE endpoint_results ADD domain_expiration INTEGER NOT NULL DEFAULT 0`)
|
_, _ = s.db.Exec(`ALTER TABLE endpoint_results ADD domain_expiration INTEGER NOT NULL DEFAULT 0`)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -9,6 +9,7 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/TwiN/gatus/v5/alerting/alert"
|
||||||
"github.com/TwiN/gatus/v5/config/endpoint"
|
"github.com/TwiN/gatus/v5/config/endpoint"
|
||||||
"github.com/TwiN/gatus/v5/storage/store/common"
|
"github.com/TwiN/gatus/v5/storage/store/common"
|
||||||
"github.com/TwiN/gatus/v5/storage/store/common/paging"
|
"github.com/TwiN/gatus/v5/storage/store/common/paging"
|
||||||
@ -27,11 +28,13 @@ const (
|
|||||||
// for aesthetic purposes, I deemed it wasn't worth the performance impact of yet another one-to-many table.
|
// for aesthetic purposes, I deemed it wasn't worth the performance impact of yet another one-to-many table.
|
||||||
arraySeparator = "|~|"
|
arraySeparator = "|~|"
|
||||||
|
|
||||||
uptimeCleanUpThreshold = 10 * 24 * time.Hour // Maximum uptime age before triggering a clean up
|
eventsCleanUpThreshold = common.MaximumNumberOfEvents + 10 // Maximum number of events before triggering a cleanup
|
||||||
eventsCleanUpThreshold = common.MaximumNumberOfEvents + 10 // Maximum number of events before triggering a clean up
|
resultsCleanUpThreshold = common.MaximumNumberOfResults + 10 // Maximum number of results before triggering a cleanup
|
||||||
resultsCleanUpThreshold = common.MaximumNumberOfResults + 10 // Maximum number of results before triggering a clean up
|
|
||||||
|
|
||||||
uptimeRetention = 7 * 24 * time.Hour
|
uptimeTotalEntriesMergeThreshold = 100 // Maximum number of uptime entries before triggering a merge
|
||||||
|
uptimeAgeCleanUpThreshold = 32 * 24 * time.Hour // Maximum uptime age before triggering a cleanup
|
||||||
|
uptimeRetention = 30 * 24 * time.Hour // Minimum duration that must be kept to operate as intended
|
||||||
|
uptimeHourlyBuffer = 48 * time.Hour // Number of hours to buffer from now when determining which hourly uptime entries can be merged into daily uptime entries
|
||||||
|
|
||||||
cacheTTL = 10 * time.Minute
|
cacheTTL = 10 * time.Minute
|
||||||
)
|
)
|
||||||
@ -234,12 +237,12 @@ func (s *Store) Insert(ep *endpoint.Endpoint, result *endpoint.Result) error {
|
|||||||
// Endpoint doesn't exist in the database, insert it
|
// Endpoint doesn't exist in the database, insert it
|
||||||
if endpointID, err = s.insertEndpoint(tx, ep); err != nil {
|
if endpointID, err = s.insertEndpoint(tx, ep); err != nil {
|
||||||
_ = tx.Rollback()
|
_ = tx.Rollback()
|
||||||
log.Printf("[sql.Insert] Failed to create endpoint with group=%s; endpoint=%s: %s", ep.Group, ep.Name, err.Error())
|
log.Printf("[sql.Insert] Failed to create endpoint with key=%s: %s", ep.Key(), err.Error())
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
_ = tx.Rollback()
|
_ = tx.Rollback()
|
||||||
log.Printf("[sql.Insert] Failed to retrieve id of endpoint with group=%s; endpoint=%s: %s", ep.Group, ep.Name, err.Error())
|
log.Printf("[sql.Insert] Failed to retrieve id of endpoint with key=%s: %s", ep.Key(), err.Error())
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -255,7 +258,7 @@ func (s *Store) Insert(ep *endpoint.Endpoint, result *endpoint.Result) error {
|
|||||||
numberOfEvents, err := s.getNumberOfEventsByEndpointID(tx, endpointID)
|
numberOfEvents, err := s.getNumberOfEventsByEndpointID(tx, endpointID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// Silently fail
|
// Silently fail
|
||||||
log.Printf("[sql.Insert] Failed to retrieve total number of events for group=%s; endpoint=%s: %s", ep.Group, ep.Name, err.Error())
|
log.Printf("[sql.Insert] Failed to retrieve total number of events for endpoint with key=%s: %s", ep.Key(), err.Error())
|
||||||
}
|
}
|
||||||
if numberOfEvents == 0 {
|
if numberOfEvents == 0 {
|
||||||
// There's no events yet, which means we need to add the EventStart and the first healthy/unhealthy event
|
// There's no events yet, which means we need to add the EventStart and the first healthy/unhealthy event
|
||||||
@ -265,18 +268,18 @@ func (s *Store) Insert(ep *endpoint.Endpoint, result *endpoint.Result) error {
|
|||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// Silently fail
|
// Silently fail
|
||||||
log.Printf("[sql.Insert] Failed to insert event=%s for group=%s; endpoint=%s: %s", endpoint.EventStart, ep.Group, ep.Name, err.Error())
|
log.Printf("[sql.Insert] Failed to insert event=%s for endpoint with key=%s: %s", endpoint.EventStart, ep.Key(), err.Error())
|
||||||
}
|
}
|
||||||
event := endpoint.NewEventFromResult(result)
|
event := endpoint.NewEventFromResult(result)
|
||||||
if err = s.insertEndpointEvent(tx, endpointID, event); err != nil {
|
if err = s.insertEndpointEvent(tx, endpointID, event); err != nil {
|
||||||
// Silently fail
|
// Silently fail
|
||||||
log.Printf("[sql.Insert] Failed to insert event=%s for group=%s; endpoint=%s: %s", event.Type, ep.Group, ep.Name, err.Error())
|
log.Printf("[sql.Insert] Failed to insert event=%s for endpoint with key=%s: %s", event.Type, ep.Key(), err.Error())
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
// Get the success value of the previous result
|
// Get the success value of the previous result
|
||||||
var lastResultSuccess bool
|
var lastResultSuccess bool
|
||||||
if lastResultSuccess, err = s.getLastEndpointResultSuccessValue(tx, endpointID); err != nil {
|
if lastResultSuccess, err = s.getLastEndpointResultSuccessValue(tx, endpointID); err != nil {
|
||||||
log.Printf("[sql.Insert] Failed to retrieve outcome of previous result for group=%s; endpoint=%s: %s", ep.Group, ep.Name, err.Error())
|
log.Printf("[sql.Insert] Failed to retrieve outcome of previous result for endpoint with key=%s: %s", ep.Key(), err.Error())
|
||||||
} else {
|
} else {
|
||||||
// If we managed to retrieve the outcome of the previous result, we'll compare it with the new result.
|
// If we managed to retrieve the outcome of the previous result, we'll compare it with the new result.
|
||||||
// If the final outcome (success or failure) of the previous and the new result aren't the same, it means
|
// If the final outcome (success or failure) of the previous and the new result aren't the same, it means
|
||||||
@ -286,7 +289,7 @@ func (s *Store) Insert(ep *endpoint.Endpoint, result *endpoint.Result) error {
|
|||||||
event := endpoint.NewEventFromResult(result)
|
event := endpoint.NewEventFromResult(result)
|
||||||
if err = s.insertEndpointEvent(tx, endpointID, event); err != nil {
|
if err = s.insertEndpointEvent(tx, endpointID, event); err != nil {
|
||||||
// Silently fail
|
// Silently fail
|
||||||
log.Printf("[sql.Insert] Failed to insert event=%s for group=%s; endpoint=%s: %s", event.Type, ep.Group, ep.Name, err.Error())
|
log.Printf("[sql.Insert] Failed to insert event=%s for endpoint with key=%s: %s", event.Type, ep.Key(), err.Error())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -295,40 +298,55 @@ func (s *Store) Insert(ep *endpoint.Endpoint, result *endpoint.Result) error {
|
|||||||
// (since we're only deleting MaximumNumberOfEvents at a time instead of 1)
|
// (since we're only deleting MaximumNumberOfEvents at a time instead of 1)
|
||||||
if numberOfEvents > eventsCleanUpThreshold {
|
if numberOfEvents > eventsCleanUpThreshold {
|
||||||
if err = s.deleteOldEndpointEvents(tx, endpointID); err != nil {
|
if err = s.deleteOldEndpointEvents(tx, endpointID); err != nil {
|
||||||
log.Printf("[sql.Insert] Failed to delete old events for group=%s; endpoint=%s: %s", ep.Group, ep.Name, err.Error())
|
log.Printf("[sql.Insert] Failed to delete old events for endpoint with key=%s: %s", ep.Key(), err.Error())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Second, we need to insert the result.
|
// Second, we need to insert the result.
|
||||||
if err = s.insertEndpointResult(tx, endpointID, result); err != nil {
|
if err = s.insertEndpointResult(tx, endpointID, result); err != nil {
|
||||||
log.Printf("[sql.Insert] Failed to insert result for group=%s; endpoint=%s: %s", ep.Group, ep.Name, err.Error())
|
log.Printf("[sql.Insert] Failed to insert result for endpoint with key=%s: %s", ep.Key(), err.Error())
|
||||||
_ = tx.Rollback() // If we can't insert the result, we'll rollback now since there's no point continuing
|
_ = tx.Rollback() // If we can't insert the result, we'll rollback now since there's no point continuing
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
// Clean up old results
|
// Clean up old results
|
||||||
numberOfResults, err := s.getNumberOfResultsByEndpointID(tx, endpointID)
|
numberOfResults, err := s.getNumberOfResultsByEndpointID(tx, endpointID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Printf("[sql.Insert] Failed to retrieve total number of results for group=%s; endpoint=%s: %s", ep.Group, ep.Name, err.Error())
|
log.Printf("[sql.Insert] Failed to retrieve total number of results for endpoint with key=%s: %s", ep.Key(), err.Error())
|
||||||
} else {
|
} else {
|
||||||
if numberOfResults > resultsCleanUpThreshold {
|
if numberOfResults > resultsCleanUpThreshold {
|
||||||
if err = s.deleteOldEndpointResults(tx, endpointID); err != nil {
|
if err = s.deleteOldEndpointResults(tx, endpointID); err != nil {
|
||||||
log.Printf("[sql.Insert] Failed to delete old results for group=%s; endpoint=%s: %s", ep.Group, ep.Name, err.Error())
|
log.Printf("[sql.Insert] Failed to delete old results for endpoint with key=%s: %s", ep.Key(), err.Error())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Finally, we need to insert the uptime data.
|
// Finally, we need to insert the uptime data.
|
||||||
// Because the uptime data significantly outlives the results, we can't rely on the results for determining the uptime
|
// Because the uptime data significantly outlives the results, we can't rely on the results for determining the uptime
|
||||||
if err = s.updateEndpointUptime(tx, endpointID, result); err != nil {
|
if err = s.updateEndpointUptime(tx, endpointID, result); err != nil {
|
||||||
log.Printf("[sql.Insert] Failed to update uptime for group=%s; endpoint=%s: %s", ep.Group, ep.Name, err.Error())
|
log.Printf("[sql.Insert] Failed to update uptime for endpoint with key=%s: %s", ep.Key(), err.Error())
|
||||||
}
|
}
|
||||||
// Clean up old uptime entries
|
// Merge hourly uptime entries that can be merged into daily entries and clean up old uptime entries
|
||||||
|
numberOfUptimeEntries, err := s.getNumberOfUptimeEntriesByEndpointID(tx, endpointID)
|
||||||
|
if err != nil {
|
||||||
|
log.Printf("[sql.Insert] Failed to retrieve total number of uptime entries for endpoint with key=%s: %s", ep.Key(), err.Error())
|
||||||
|
} else {
|
||||||
|
// Merge older hourly uptime entries into daily uptime entries if we have more than uptimeTotalEntriesMergeThreshold
|
||||||
|
if numberOfUptimeEntries >= uptimeTotalEntriesMergeThreshold {
|
||||||
|
log.Printf("[sql.Insert] Merging hourly uptime entries for endpoint with key=%s; This is a lot of work, it shouldn't happen too often", ep.Key())
|
||||||
|
if err = s.mergeHourlyUptimeEntriesOlderThanMergeThresholdIntoDailyUptimeEntries(tx, endpointID); err != nil {
|
||||||
|
log.Printf("[sql.Insert] Failed to merge hourly uptime entries for endpoint with key=%s: %s", ep.Key(), err.Error())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Clean up outdated uptime entries
|
||||||
|
// In most cases, this would be handled by mergeHourlyUptimeEntriesOlderThanMergeThresholdIntoDailyUptimeEntries,
|
||||||
|
// but if Gatus was temporarily shut down, we might have some old entries that need to be cleaned up
|
||||||
ageOfOldestUptimeEntry, err := s.getAgeOfOldestEndpointUptimeEntry(tx, endpointID)
|
ageOfOldestUptimeEntry, err := s.getAgeOfOldestEndpointUptimeEntry(tx, endpointID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Printf("[sql.Insert] Failed to retrieve oldest endpoint uptime entry for group=%s; endpoint=%s: %s", ep.Group, ep.Name, err.Error())
|
log.Printf("[sql.Insert] Failed to retrieve oldest endpoint uptime entry for endpoint with key=%s: %s", ep.Key(), err.Error())
|
||||||
} else {
|
} else {
|
||||||
if ageOfOldestUptimeEntry > uptimeCleanUpThreshold {
|
if ageOfOldestUptimeEntry > uptimeAgeCleanUpThreshold {
|
||||||
if err = s.deleteOldUptimeEntries(tx, endpointID, time.Now().Add(-(uptimeRetention + time.Hour))); err != nil {
|
if err = s.deleteOldUptimeEntries(tx, endpointID, time.Now().Add(-(uptimeRetention + time.Hour))); err != nil {
|
||||||
log.Printf("[sql.Insert] Failed to delete old uptime entries for group=%s; endpoint=%s: %s", ep.Group, ep.Name, err.Error())
|
log.Printf("[sql.Insert] Failed to delete old uptime entries for endpoint with key=%s: %s", ep.Key(), err.Error())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -374,6 +392,8 @@ func (s *Store) DeleteAllEndpointStatusesNotInKeys(keys []string) int {
|
|||||||
}
|
}
|
||||||
if s.writeThroughCache != nil {
|
if s.writeThroughCache != nil {
|
||||||
// It's easier to just wipe out the entire cache than to try to find all keys that are not in the keys list
|
// It's easier to just wipe out the entire cache than to try to find all keys that are not in the keys list
|
||||||
|
// This only happens on start and during tests, so it's fine for us to just clear the cache without worrying
|
||||||
|
// about performance
|
||||||
_ = s.writeThroughCache.DeleteKeysByPattern("*")
|
_ = s.writeThroughCache.DeleteKeysByPattern("*")
|
||||||
}
|
}
|
||||||
// Return number of rows deleted
|
// Return number of rows deleted
|
||||||
@ -381,6 +401,111 @@ func (s *Store) DeleteAllEndpointStatusesNotInKeys(keys []string) int {
|
|||||||
return int(rowsAffects)
|
return int(rowsAffects)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// GetTriggeredEndpointAlert returns whether the triggered alert for the specified endpoint as well as the necessary information to resolve it
|
||||||
|
func (s *Store) GetTriggeredEndpointAlert(ep *endpoint.Endpoint, alert *alert.Alert) (exists bool, resolveKey string, numberOfSuccessesInARow int, err error) {
|
||||||
|
//log.Printf("[sql.GetTriggeredEndpointAlert] Getting triggered alert with checksum=%s for endpoint with key=%s", alert.Checksum(), ep.Key())
|
||||||
|
err = s.db.QueryRow(
|
||||||
|
"SELECT resolve_key, number_of_successes_in_a_row FROM endpoint_alerts_triggered WHERE endpoint_id = (SELECT endpoint_id FROM endpoints WHERE endpoint_key = $1 LIMIT 1) AND configuration_checksum = $2",
|
||||||
|
ep.Key(),
|
||||||
|
alert.Checksum(),
|
||||||
|
).Scan(&resolveKey, &numberOfSuccessesInARow)
|
||||||
|
if err != nil {
|
||||||
|
if errors.Is(err, sql.ErrNoRows) {
|
||||||
|
return false, "", 0, nil
|
||||||
|
}
|
||||||
|
return false, "", 0, err
|
||||||
|
}
|
||||||
|
return true, resolveKey, numberOfSuccessesInARow, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpsertTriggeredEndpointAlert inserts/updates a triggered alert for an endpoint
|
||||||
|
// Used for persistence of triggered alerts across application restarts
|
||||||
|
func (s *Store) UpsertTriggeredEndpointAlert(ep *endpoint.Endpoint, triggeredAlert *alert.Alert) error {
|
||||||
|
//log.Printf("[sql.UpsertTriggeredEndpointAlert] Upserting triggered alert with checksum=%s for endpoint with key=%s", triggeredAlert.Checksum(), ep.Key())
|
||||||
|
tx, err := s.db.Begin()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
endpointID, err := s.getEndpointID(tx, ep)
|
||||||
|
if err != nil {
|
||||||
|
if errors.Is(err, common.ErrEndpointNotFound) {
|
||||||
|
// Endpoint doesn't exist in the database, insert it
|
||||||
|
// This shouldn't happen, but we'll handle it anyway
|
||||||
|
if endpointID, err = s.insertEndpoint(tx, ep); err != nil {
|
||||||
|
_ = tx.Rollback()
|
||||||
|
log.Printf("[sql.UpsertTriggeredEndpointAlert] Failed to create endpoint with key=%s: %s", ep.Key(), err.Error())
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
_ = tx.Rollback()
|
||||||
|
log.Printf("[sql.UpsertTriggeredEndpointAlert] Failed to retrieve id of endpoint with key=%s: %s", ep.Key(), err.Error())
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
_, err = tx.Exec(
|
||||||
|
`
|
||||||
|
INSERT INTO endpoint_alerts_triggered (endpoint_id, configuration_checksum, resolve_key, number_of_successes_in_a_row)
|
||||||
|
VALUES ($1, $2, $3, $4)
|
||||||
|
ON CONFLICT(endpoint_id, configuration_checksum) DO UPDATE SET
|
||||||
|
resolve_key = $3,
|
||||||
|
number_of_successes_in_a_row = $4
|
||||||
|
`,
|
||||||
|
endpointID,
|
||||||
|
triggeredAlert.Checksum(),
|
||||||
|
triggeredAlert.ResolveKey,
|
||||||
|
ep.NumberOfSuccessesInARow, // We only persist NumberOfSuccessesInARow, because all alerts in this table are already triggered
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
_ = tx.Rollback()
|
||||||
|
log.Printf("[sql.UpsertTriggeredEndpointAlert] Failed to persist triggered alert for endpoint with key=%s: %s", ep.Key(), err.Error())
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err = tx.Commit(); err != nil {
|
||||||
|
_ = tx.Rollback()
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeleteTriggeredEndpointAlert deletes a triggered alert for an endpoint
|
||||||
|
func (s *Store) DeleteTriggeredEndpointAlert(ep *endpoint.Endpoint, triggeredAlert *alert.Alert) error {
|
||||||
|
//log.Printf("[sql.DeleteTriggeredEndpointAlert] Deleting triggered alert with checksum=%s for endpoint with key=%s", triggeredAlert.Checksum(), ep.Key())
|
||||||
|
_, err := s.db.Exec("DELETE FROM endpoint_alerts_triggered WHERE configuration_checksum = $1 AND endpoint_id = (SELECT endpoint_id FROM endpoints WHERE endpoint_key = $2 LIMIT 1)", triggeredAlert.Checksum(), ep.Key())
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeleteAllTriggeredAlertsNotInChecksumsByEndpoint removes all triggered alerts owned by an endpoint whose alert
|
||||||
|
// configurations are not provided in the checksums list.
|
||||||
|
// This prevents triggered alerts that have been removed or modified from lingering in the database.
|
||||||
|
func (s *Store) DeleteAllTriggeredAlertsNotInChecksumsByEndpoint(ep *endpoint.Endpoint, checksums []string) int {
|
||||||
|
//log.Printf("[sql.DeleteAllTriggeredAlertsNotInChecksumsByEndpoint] Deleting triggered alerts for endpoint with key=%s that do not belong to any of checksums=%v", ep.Key(), checksums)
|
||||||
|
var err error
|
||||||
|
var result sql.Result
|
||||||
|
if len(checksums) == 0 {
|
||||||
|
// No checksums? Then it means there are no (enabled) alerts configured for that endpoint, so we can get rid of all
|
||||||
|
// persisted triggered alerts for that endpoint
|
||||||
|
result, err = s.db.Exec("DELETE FROM endpoint_alerts_triggered WHERE endpoint_id = (SELECT endpoint_id FROM endpoints WHERE endpoint_key = $1 LIMIT 1)", ep.Key())
|
||||||
|
} else {
|
||||||
|
args := make([]interface{}, 0, len(checksums)+1)
|
||||||
|
args = append(args, ep.Key())
|
||||||
|
query := `DELETE FROM endpoint_alerts_triggered
|
||||||
|
WHERE endpoint_id = (SELECT endpoint_id FROM endpoints WHERE endpoint_key = $1 LIMIT 1)
|
||||||
|
AND configuration_checksum NOT IN (`
|
||||||
|
for i := range checksums {
|
||||||
|
query += fmt.Sprintf("$%d,", i+2)
|
||||||
|
args = append(args, checksums[i])
|
||||||
|
}
|
||||||
|
query = query[:len(query)-1] + ")" // Remove the last comma and add the closing parenthesis
|
||||||
|
result, err = s.db.Exec(query, args...)
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
log.Printf("[sql.DeleteAllTriggeredAlertsNotInChecksumsByEndpoint] Failed to delete rows for endpoint with key=%s that do not belong to any of checksums=%v: %s", ep.Key(), checksums, err.Error())
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
// Return number of rows deleted
|
||||||
|
rowsAffects, _ := result.RowsAffected()
|
||||||
|
return int(rowsAffects)
|
||||||
|
}
|
||||||
|
|
||||||
// Clear deletes everything from the store
|
// Clear deletes everything from the store
|
||||||
func (s *Store) Clear() {
|
func (s *Store) Clear() {
|
||||||
_, _ = s.db.Exec("DELETE FROM endpoints")
|
_, _ = s.db.Exec("DELETE FROM endpoints")
|
||||||
@ -757,6 +882,12 @@ func (s *Store) getNumberOfResultsByEndpointID(tx *sql.Tx, endpointID int64) (in
|
|||||||
return numberOfResults, err
|
return numberOfResults, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (s *Store) getNumberOfUptimeEntriesByEndpointID(tx *sql.Tx, endpointID int64) (int64, error) {
|
||||||
|
var numberOfUptimeEntries int64
|
||||||
|
err := tx.QueryRow("SELECT COUNT(1) FROM endpoint_uptimes WHERE endpoint_id = $1", endpointID).Scan(&numberOfUptimeEntries)
|
||||||
|
return numberOfUptimeEntries, err
|
||||||
|
}
|
||||||
|
|
||||||
func (s *Store) getAgeOfOldestEndpointUptimeEntry(tx *sql.Tx, endpointID int64) (time.Duration, error) {
|
func (s *Store) getAgeOfOldestEndpointUptimeEntry(tx *sql.Tx, endpointID int64) (time.Duration, error) {
|
||||||
rows, err := tx.Query(
|
rows, err := tx.Query(
|
||||||
`
|
`
|
||||||
@ -840,6 +971,92 @@ func (s *Store) deleteOldUptimeEntries(tx *sql.Tx, endpointID int64, maxAge time
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// mergeHourlyUptimeEntriesOlderThanMergeThresholdIntoDailyUptimeEntries merges all hourly uptime entries older than
|
||||||
|
// uptimeHourlyMergeThreshold from now into daily uptime entries by summing all hourly entries of the same day into a
|
||||||
|
// single entry.
|
||||||
|
//
|
||||||
|
// This effectively limits the number of uptime entries to (48+(n-2)) where 48 is for the first 48 entries with hourly
|
||||||
|
// entries (defined by uptimeHourlyBuffer) and n is the number of days for all entries older than 48 hours.
|
||||||
|
// Supporting 30d of entries would then result in far less than 24*30=720 entries.
|
||||||
|
func (s *Store) mergeHourlyUptimeEntriesOlderThanMergeThresholdIntoDailyUptimeEntries(tx *sql.Tx, endpointID int64) error {
|
||||||
|
// Calculate timestamp of the first full day of uptime entries that would not impact the uptime calculation for 24h badges
|
||||||
|
// The logic is that once at least 48 hours passed, we:
|
||||||
|
// - No longer need to worry about keeping hourly entries
|
||||||
|
// - Don't have to worry about new hourly entries being inserted, as the day has already passed
|
||||||
|
// which implies that no matter at what hour of the day we are, any timestamp + 48h floored to the current day
|
||||||
|
// will never impact the 24h uptime badge calculation
|
||||||
|
now := time.Now()
|
||||||
|
minThreshold := now.Add(-uptimeHourlyBuffer)
|
||||||
|
minThreshold = time.Date(minThreshold.Year(), minThreshold.Month(), minThreshold.Day(), 0, 0, 0, 0, minThreshold.Location())
|
||||||
|
maxThreshold := now.Add(-uptimeRetention)
|
||||||
|
// Get all uptime entries older than uptimeHourlyMergeThreshold
|
||||||
|
rows, err := tx.Query(
|
||||||
|
`
|
||||||
|
SELECT hour_unix_timestamp, total_executions, successful_executions, total_response_time
|
||||||
|
FROM endpoint_uptimes
|
||||||
|
WHERE endpoint_id = $1
|
||||||
|
AND hour_unix_timestamp < $2
|
||||||
|
AND hour_unix_timestamp >= $3
|
||||||
|
`,
|
||||||
|
endpointID,
|
||||||
|
minThreshold.Unix(),
|
||||||
|
maxThreshold.Unix(),
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
type Entry struct {
|
||||||
|
totalExecutions int
|
||||||
|
successfulExecutions int
|
||||||
|
totalResponseTime int
|
||||||
|
}
|
||||||
|
dailyEntries := make(map[int64]*Entry)
|
||||||
|
for rows.Next() {
|
||||||
|
var unixTimestamp int64
|
||||||
|
entry := Entry{}
|
||||||
|
if err = rows.Scan(&unixTimestamp, &entry.totalExecutions, &entry.successfulExecutions, &entry.totalResponseTime); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
timestamp := time.Unix(unixTimestamp, 0)
|
||||||
|
unixTimestampFlooredAtDay := time.Date(timestamp.Year(), timestamp.Month(), timestamp.Day(), 0, 0, 0, 0, timestamp.Location()).Unix()
|
||||||
|
if dailyEntry := dailyEntries[unixTimestampFlooredAtDay]; dailyEntry == nil {
|
||||||
|
dailyEntries[unixTimestampFlooredAtDay] = &entry
|
||||||
|
} else {
|
||||||
|
dailyEntries[unixTimestampFlooredAtDay].totalExecutions += entry.totalExecutions
|
||||||
|
dailyEntries[unixTimestampFlooredAtDay].successfulExecutions += entry.successfulExecutions
|
||||||
|
dailyEntries[unixTimestampFlooredAtDay].totalResponseTime += entry.totalResponseTime
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Delete older hourly uptime entries
|
||||||
|
_, err = tx.Exec("DELETE FROM endpoint_uptimes WHERE endpoint_id = $1 AND hour_unix_timestamp < $2", endpointID, minThreshold.Unix())
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
// Insert new daily uptime entries
|
||||||
|
for unixTimestamp, entry := range dailyEntries {
|
||||||
|
_, err = tx.Exec(
|
||||||
|
`
|
||||||
|
INSERT INTO endpoint_uptimes (endpoint_id, hour_unix_timestamp, total_executions, successful_executions, total_response_time)
|
||||||
|
VALUES ($1, $2, $3, $4, $5)
|
||||||
|
ON CONFLICT(endpoint_id, hour_unix_timestamp) DO UPDATE SET
|
||||||
|
total_executions = $3,
|
||||||
|
successful_executions = $4,
|
||||||
|
total_response_time = $5
|
||||||
|
`,
|
||||||
|
endpointID,
|
||||||
|
unixTimestamp,
|
||||||
|
entry.totalExecutions,
|
||||||
|
entry.successfulExecutions,
|
||||||
|
entry.totalResponseTime,
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// TODO: Find a way to ignore entries that were already merged?
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
func generateCacheKey(endpointKey string, p *paging.EndpointStatusParams) string {
|
func generateCacheKey(endpointKey string, p *paging.EndpointStatusParams) string {
|
||||||
return fmt.Sprintf("%s-%d-%d-%d-%d", endpointKey, p.EventsPage, p.EventsPageSize, p.ResultsPage, p.ResultsPageSize)
|
return fmt.Sprintf("%s-%d-%d-%d-%d", endpointKey, p.EventsPage, p.EventsPageSize, p.ResultsPage, p.ResultsPageSize)
|
||||||
}
|
}
|
||||||
|
@ -1,9 +1,12 @@
|
|||||||
package sql
|
package sql
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/TwiN/gatus/v5/alerting/alert"
|
||||||
"github.com/TwiN/gatus/v5/config/endpoint"
|
"github.com/TwiN/gatus/v5/config/endpoint"
|
||||||
"github.com/TwiN/gatus/v5/storage/store/common"
|
"github.com/TwiN/gatus/v5/storage/store/common"
|
||||||
"github.com/TwiN/gatus/v5/storage/store/common/paging"
|
"github.com/TwiN/gatus/v5/storage/store/common/paging"
|
||||||
@ -81,13 +84,13 @@ var (
|
|||||||
)
|
)
|
||||||
|
|
||||||
func TestNewStore(t *testing.T) {
|
func TestNewStore(t *testing.T) {
|
||||||
if _, err := NewStore("", "TestNewStore.db", false); err != ErrDatabaseDriverNotSpecified {
|
if _, err := NewStore("", t.TempDir()+"/TestNewStore.db", false); !errors.Is(err, ErrDatabaseDriverNotSpecified) {
|
||||||
t.Error("expected error due to blank driver parameter")
|
t.Error("expected error due to blank driver parameter")
|
||||||
}
|
}
|
||||||
if _, err := NewStore("sqlite", "", false); err != ErrPathNotSpecified {
|
if _, err := NewStore("sqlite", "", false); !errors.Is(err, ErrPathNotSpecified) {
|
||||||
t.Error("expected error due to blank path parameter")
|
t.Error("expected error due to blank path parameter")
|
||||||
}
|
}
|
||||||
if store, err := NewStore("sqlite", t.TempDir()+"/TestNewStore.db", false); err != nil {
|
if store, err := NewStore("sqlite", t.TempDir()+"/TestNewStore.db", true); err != nil {
|
||||||
t.Error("shouldn't have returned any error, got", err.Error())
|
t.Error("shouldn't have returned any error, got", err.Error())
|
||||||
} else {
|
} else {
|
||||||
_ = store.db.Close()
|
_ = store.db.Close()
|
||||||
@ -130,18 +133,18 @@ func TestStore_InsertCleansUpOldUptimeEntriesProperly(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Since this is one hour before reaching the clean up threshold, the oldest entry should now be this one
|
// Since this is one hour before reaching the clean up threshold, the oldest entry should now be this one
|
||||||
store.Insert(&testEndpoint, &endpoint.Result{Timestamp: now.Add(-(uptimeCleanUpThreshold - time.Hour)), Success: true})
|
store.Insert(&testEndpoint, &endpoint.Result{Timestamp: now.Add(-(uptimeAgeCleanUpThreshold - time.Hour)), Success: true})
|
||||||
|
|
||||||
tx, _ = store.db.Begin()
|
tx, _ = store.db.Begin()
|
||||||
oldest, _ = store.getAgeOfOldestEndpointUptimeEntry(tx, 1)
|
oldest, _ = store.getAgeOfOldestEndpointUptimeEntry(tx, 1)
|
||||||
_ = tx.Commit()
|
_ = tx.Commit()
|
||||||
if oldest.Truncate(time.Hour) != uptimeCleanUpThreshold-time.Hour {
|
if oldest.Truncate(time.Hour) != uptimeAgeCleanUpThreshold-time.Hour {
|
||||||
t.Errorf("oldest endpoint uptime entry should've been ~%s hours old, was %s", uptimeCleanUpThreshold-time.Hour, oldest)
|
t.Errorf("oldest endpoint uptime entry should've been ~%s hours old, was %s", uptimeAgeCleanUpThreshold-time.Hour, oldest)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Since this entry is after the uptimeCleanUpThreshold, both this entry as well as the previous
|
// Since this entry is after the uptimeAgeCleanUpThreshold, both this entry as well as the previous
|
||||||
// one should be deleted since they both surpass uptimeRetention
|
// one should be deleted since they both surpass uptimeRetention
|
||||||
store.Insert(&testEndpoint, &endpoint.Result{Timestamp: now.Add(-(uptimeCleanUpThreshold + time.Hour)), Success: true})
|
store.Insert(&testEndpoint, &endpoint.Result{Timestamp: now.Add(-(uptimeAgeCleanUpThreshold + time.Hour)), Success: true})
|
||||||
|
|
||||||
tx, _ = store.db.Begin()
|
tx, _ = store.db.Begin()
|
||||||
oldest, _ = store.getAgeOfOldestEndpointUptimeEntry(tx, 1)
|
oldest, _ = store.getAgeOfOldestEndpointUptimeEntry(tx, 1)
|
||||||
@ -151,8 +154,128 @@ func TestStore_InsertCleansUpOldUptimeEntriesProperly(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestStore_HourlyUptimeEntriesAreMergedIntoDailyUptimeEntriesProperly(t *testing.T) {
|
||||||
|
store, _ := NewStore("sqlite", t.TempDir()+"/TestStore_HourlyUptimeEntriesAreMergedIntoDailyUptimeEntriesProperly.db", false)
|
||||||
|
defer store.Close()
|
||||||
|
now := time.Now().Truncate(time.Hour)
|
||||||
|
now = time.Date(now.Year(), now.Month(), now.Day(), now.Hour(), 0, 0, 0, now.Location())
|
||||||
|
|
||||||
|
scenarios := []struct {
|
||||||
|
numberOfHours int
|
||||||
|
expectedMaxUptimeEntries int64
|
||||||
|
}{
|
||||||
|
{numberOfHours: 1, expectedMaxUptimeEntries: 1},
|
||||||
|
{numberOfHours: 10, expectedMaxUptimeEntries: 10},
|
||||||
|
{numberOfHours: 50, expectedMaxUptimeEntries: 50},
|
||||||
|
{numberOfHours: 75, expectedMaxUptimeEntries: 75},
|
||||||
|
{numberOfHours: 99, expectedMaxUptimeEntries: 99},
|
||||||
|
{numberOfHours: 150, expectedMaxUptimeEntries: 100},
|
||||||
|
{numberOfHours: 300, expectedMaxUptimeEntries: 100},
|
||||||
|
{numberOfHours: 768, expectedMaxUptimeEntries: 100}, // 32 days (in hours), which means anything beyond that won't be persisted anyway
|
||||||
|
{numberOfHours: 1000, expectedMaxUptimeEntries: 100},
|
||||||
|
}
|
||||||
|
// Note that is not technically an accurate real world representation, because uptime entries are always added in
|
||||||
|
// the present, while this test is inserting results from the past to simulate long term uptime entries.
|
||||||
|
// Since we want to test the behavior and not the test itself, this is a "best effort" approach.
|
||||||
|
for _, scenario := range scenarios {
|
||||||
|
t.Run(fmt.Sprintf("num-hours-%d-expected-max-entries-%d", scenario.numberOfHours, scenario.expectedMaxUptimeEntries), func(t *testing.T) {
|
||||||
|
for i := scenario.numberOfHours; i > 0; i-- {
|
||||||
|
//fmt.Printf("i: %d (%s)\n", i, now.Add(-time.Duration(i)*time.Hour))
|
||||||
|
// Create an uptime entry
|
||||||
|
err := store.Insert(&testEndpoint, &endpoint.Result{Timestamp: now.Add(-time.Duration(i) * time.Hour), Success: true})
|
||||||
|
if err != nil {
|
||||||
|
t.Log(err)
|
||||||
|
}
|
||||||
|
//// DEBUGGING: check number of uptime entries for endpoint
|
||||||
|
//tx, _ := store.db.Begin()
|
||||||
|
//numberOfUptimeEntriesForEndpoint, err := store.getNumberOfUptimeEntriesByEndpointID(tx, 1)
|
||||||
|
//if err != nil {
|
||||||
|
// t.Log(err)
|
||||||
|
//}
|
||||||
|
//_ = tx.Commit()
|
||||||
|
//t.Logf("i=%d; numberOfHours=%d; There are currently %d uptime entries for endpointID=%d", i, scenario.numberOfHours, numberOfUptimeEntriesForEndpoint, 1)
|
||||||
|
}
|
||||||
|
// check number of uptime entries for endpoint
|
||||||
|
tx, _ := store.db.Begin()
|
||||||
|
numberOfUptimeEntriesForEndpoint, err := store.getNumberOfUptimeEntriesByEndpointID(tx, 1)
|
||||||
|
if err != nil {
|
||||||
|
t.Log(err)
|
||||||
|
}
|
||||||
|
_ = tx.Commit()
|
||||||
|
//t.Logf("numberOfHours=%d; There are currently %d uptime entries for endpointID=%d", scenario.numberOfHours, numberOfUptimeEntriesForEndpoint, 1)
|
||||||
|
if scenario.expectedMaxUptimeEntries < numberOfUptimeEntriesForEndpoint {
|
||||||
|
t.Errorf("expected %d (uptime entries) to be smaller than %d", numberOfUptimeEntriesForEndpoint, scenario.expectedMaxUptimeEntries)
|
||||||
|
}
|
||||||
|
store.Clear()
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestStore_getEndpointUptime(t *testing.T) {
|
||||||
|
store, _ := NewStore("sqlite", t.TempDir()+"/TestStore_InsertCleansUpEventsAndResultsProperly.db", false)
|
||||||
|
defer store.Clear()
|
||||||
|
defer store.Close()
|
||||||
|
// Add 768 hourly entries (32 days)
|
||||||
|
// Daily entries should be merged from hourly entries automatically
|
||||||
|
for i := 768; i > 0; i-- {
|
||||||
|
err := store.Insert(&testEndpoint, &endpoint.Result{Timestamp: time.Now().Add(-time.Duration(i) * time.Hour), Duration: time.Second, Success: true})
|
||||||
|
if err != nil {
|
||||||
|
t.Log(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Check the number of uptime entries
|
||||||
|
tx, _ := store.db.Begin()
|
||||||
|
numberOfUptimeEntriesForEndpoint, err := store.getNumberOfUptimeEntriesByEndpointID(tx, 1)
|
||||||
|
if err != nil {
|
||||||
|
t.Log(err)
|
||||||
|
}
|
||||||
|
if numberOfUptimeEntriesForEndpoint < 20 || numberOfUptimeEntriesForEndpoint > 200 {
|
||||||
|
t.Errorf("expected number of uptime entries to be between 20 and 200, got %d", numberOfUptimeEntriesForEndpoint)
|
||||||
|
}
|
||||||
|
// Retrieve uptime for the past 30d
|
||||||
|
uptime, avgResponseTime, err := store.getEndpointUptime(tx, 1, time.Now().Add(-(30 * 24 * time.Hour)), time.Now())
|
||||||
|
if err != nil {
|
||||||
|
t.Log(err)
|
||||||
|
}
|
||||||
|
_ = tx.Commit()
|
||||||
|
if avgResponseTime != time.Second {
|
||||||
|
t.Errorf("expected average response time to be %s, got %s", time.Second, avgResponseTime)
|
||||||
|
}
|
||||||
|
if uptime != 1 {
|
||||||
|
t.Errorf("expected uptime to be 1, got %f", uptime)
|
||||||
|
}
|
||||||
|
// Add a new unsuccessful result, which should impact the uptime
|
||||||
|
err = store.Insert(&testEndpoint, &endpoint.Result{Timestamp: time.Now(), Duration: time.Second, Success: false})
|
||||||
|
if err != nil {
|
||||||
|
t.Log(err)
|
||||||
|
}
|
||||||
|
// Retrieve uptime for the past 30d
|
||||||
|
tx, _ = store.db.Begin()
|
||||||
|
uptime, _, err = store.getEndpointUptime(tx, 1, time.Now().Add(-(30 * 24 * time.Hour)), time.Now())
|
||||||
|
if err != nil {
|
||||||
|
t.Log(err)
|
||||||
|
}
|
||||||
|
_ = tx.Commit()
|
||||||
|
if uptime == 1 {
|
||||||
|
t.Errorf("expected uptime to be less than 1, got %f", uptime)
|
||||||
|
}
|
||||||
|
// Retrieve uptime for the past 30d, but excluding the last 24h
|
||||||
|
// This is not a real use case as there is no way for users to exclude the last 24h, but this is a great way
|
||||||
|
// to ensure that hourly merging works as intended
|
||||||
|
tx, _ = store.db.Begin()
|
||||||
|
uptimeExcludingLast24h, _, err := store.getEndpointUptime(tx, 1, time.Now().Add(-(30 * 24 * time.Hour)), time.Now().Add(-24*time.Hour))
|
||||||
|
if err != nil {
|
||||||
|
t.Log(err)
|
||||||
|
}
|
||||||
|
_ = tx.Commit()
|
||||||
|
if uptimeExcludingLast24h == uptime {
|
||||||
|
t.Error("expected uptimeExcludingLast24h to to be different from uptime, got")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func TestStore_InsertCleansUpEventsAndResultsProperly(t *testing.T) {
|
func TestStore_InsertCleansUpEventsAndResultsProperly(t *testing.T) {
|
||||||
store, _ := NewStore("sqlite", t.TempDir()+"/TestStore_InsertCleansUpEventsAndResultsProperly.db", false)
|
store, _ := NewStore("sqlite", t.TempDir()+"/TestStore_InsertCleansUpEventsAndResultsProperly.db", false)
|
||||||
|
defer store.Clear()
|
||||||
defer store.Close()
|
defer store.Close()
|
||||||
for i := 0; i < resultsCleanUpThreshold+eventsCleanUpThreshold; i++ {
|
for i := 0; i < resultsCleanUpThreshold+eventsCleanUpThreshold; i++ {
|
||||||
store.Insert(&testEndpoint, &testSuccessfulResult)
|
store.Insert(&testEndpoint, &testSuccessfulResult)
|
||||||
@ -165,7 +288,40 @@ func TestStore_InsertCleansUpEventsAndResultsProperly(t *testing.T) {
|
|||||||
t.Errorf("number of events shouldn't have exceeded %d, reached %d", eventsCleanUpThreshold, len(ss.Events))
|
t.Errorf("number of events shouldn't have exceeded %d, reached %d", eventsCleanUpThreshold, len(ss.Events))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestStore_InsertWithCaching(t *testing.T) {
|
||||||
|
store, _ := NewStore("sqlite", t.TempDir()+"/TestStore_InsertWithCaching.db", true)
|
||||||
|
defer store.Close()
|
||||||
|
// Add 2 results
|
||||||
|
store.Insert(&testEndpoint, &testSuccessfulResult)
|
||||||
|
store.Insert(&testEndpoint, &testSuccessfulResult)
|
||||||
|
// Verify that they exist
|
||||||
|
endpointStatuses, _ := store.GetAllEndpointStatuses(paging.NewEndpointStatusParams().WithResults(1, 20))
|
||||||
|
if numberOfEndpointStatuses := len(endpointStatuses); numberOfEndpointStatuses != 1 {
|
||||||
|
t.Fatalf("expected 1 EndpointStatus, got %d", numberOfEndpointStatuses)
|
||||||
|
}
|
||||||
|
if len(endpointStatuses[0].Results) != 2 {
|
||||||
|
t.Fatalf("expected 2 results, got %d", len(endpointStatuses[0].Results))
|
||||||
|
}
|
||||||
|
// Add 2 more results
|
||||||
|
store.Insert(&testEndpoint, &testUnsuccessfulResult)
|
||||||
|
store.Insert(&testEndpoint, &testUnsuccessfulResult)
|
||||||
|
// Verify that they exist
|
||||||
|
endpointStatuses, _ = store.GetAllEndpointStatuses(paging.NewEndpointStatusParams().WithResults(1, 20))
|
||||||
|
if numberOfEndpointStatuses := len(endpointStatuses); numberOfEndpointStatuses != 1 {
|
||||||
|
t.Fatalf("expected 1 EndpointStatus, got %d", numberOfEndpointStatuses)
|
||||||
|
}
|
||||||
|
if len(endpointStatuses[0].Results) != 4 {
|
||||||
|
t.Fatalf("expected 4 results, got %d", len(endpointStatuses[0].Results))
|
||||||
|
}
|
||||||
|
// Clear the store, which should also clear the cache
|
||||||
store.Clear()
|
store.Clear()
|
||||||
|
// Verify that they no longer exist
|
||||||
|
endpointStatuses, _ = store.GetAllEndpointStatuses(paging.NewEndpointStatusParams().WithResults(1, 20))
|
||||||
|
if numberOfEndpointStatuses := len(endpointStatuses); numberOfEndpointStatuses != 0 {
|
||||||
|
t.Fatalf("expected 0 EndpointStatus, got %d", numberOfEndpointStatuses)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestStore_Persistence(t *testing.T) {
|
func TestStore_Persistence(t *testing.T) {
|
||||||
@ -182,6 +338,9 @@ func TestStore_Persistence(t *testing.T) {
|
|||||||
if uptime, _ := store.GetUptimeByKey(testEndpoint.Key(), time.Now().Add(-time.Hour*24*7), time.Now()); uptime != 0.5 {
|
if uptime, _ := store.GetUptimeByKey(testEndpoint.Key(), time.Now().Add(-time.Hour*24*7), time.Now()); uptime != 0.5 {
|
||||||
t.Errorf("the uptime over the past 7d should've been 0.5, got %f", uptime)
|
t.Errorf("the uptime over the past 7d should've been 0.5, got %f", uptime)
|
||||||
}
|
}
|
||||||
|
if uptime, _ := store.GetUptimeByKey(testEndpoint.Key(), time.Now().Add(-time.Hour*24*30), time.Now()); uptime != 0.5 {
|
||||||
|
t.Errorf("the uptime over the past 30d should've been 0.5, got %f", uptime)
|
||||||
|
}
|
||||||
ssFromOldStore, _ := store.GetEndpointStatus(testEndpoint.Group, testEndpoint.Name, paging.NewEndpointStatusParams().WithResults(1, common.MaximumNumberOfResults).WithEvents(1, common.MaximumNumberOfEvents))
|
ssFromOldStore, _ := store.GetEndpointStatus(testEndpoint.Group, testEndpoint.Name, paging.NewEndpointStatusParams().WithResults(1, common.MaximumNumberOfResults).WithEvents(1, common.MaximumNumberOfEvents))
|
||||||
if ssFromOldStore == nil || ssFromOldStore.Group != "group" || ssFromOldStore.Name != "name" || len(ssFromOldStore.Events) != 3 || len(ssFromOldStore.Results) != 2 {
|
if ssFromOldStore == nil || ssFromOldStore.Group != "group" || ssFromOldStore.Name != "name" || len(ssFromOldStore.Events) != 3 || len(ssFromOldStore.Results) != 2 {
|
||||||
store.Close()
|
store.Close()
|
||||||
@ -368,10 +527,10 @@ func TestStore_NoRows(t *testing.T) {
|
|||||||
defer store.Close()
|
defer store.Close()
|
||||||
tx, _ := store.db.Begin()
|
tx, _ := store.db.Begin()
|
||||||
defer tx.Rollback()
|
defer tx.Rollback()
|
||||||
if _, err := store.getLastEndpointResultSuccessValue(tx, 1); err != errNoRowsReturned {
|
if _, err := store.getLastEndpointResultSuccessValue(tx, 1); !errors.Is(err, errNoRowsReturned) {
|
||||||
t.Errorf("should've %v, got %v", errNoRowsReturned, err)
|
t.Errorf("should've %v, got %v", errNoRowsReturned, err)
|
||||||
}
|
}
|
||||||
if _, err := store.getAgeOfOldestEndpointUptimeEntry(tx, 1); err != errNoRowsReturned {
|
if _, err := store.getAgeOfOldestEndpointUptimeEntry(tx, 1); !errors.Is(err, errNoRowsReturned) {
|
||||||
t.Errorf("should've %v, got %v", errNoRowsReturned, err)
|
t.Errorf("should've %v, got %v", errNoRowsReturned, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -564,3 +723,131 @@ func TestCacheKey(t *testing.T) {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestTriggeredEndpointAlertsPersistence(t *testing.T) {
|
||||||
|
store, _ := NewStore("sqlite", t.TempDir()+"/TestTriggeredEndpointAlertsPersistence.db", false)
|
||||||
|
defer store.Close()
|
||||||
|
yes, desc := false, "description"
|
||||||
|
ep := testEndpoint
|
||||||
|
ep.NumberOfSuccessesInARow = 0
|
||||||
|
alrt := &alert.Alert{
|
||||||
|
Type: alert.TypePagerDuty,
|
||||||
|
Enabled: &yes,
|
||||||
|
FailureThreshold: 4,
|
||||||
|
SuccessThreshold: 2,
|
||||||
|
Description: &desc,
|
||||||
|
SendOnResolved: &yes,
|
||||||
|
Triggered: true,
|
||||||
|
ResolveKey: "1234567",
|
||||||
|
}
|
||||||
|
// Alert just triggered, so NumberOfSuccessesInARow is 0
|
||||||
|
if err := store.UpsertTriggeredEndpointAlert(&ep, alrt); err != nil {
|
||||||
|
t.Fatal("expected no error, got", err.Error())
|
||||||
|
}
|
||||||
|
exists, resolveKey, numberOfSuccessesInARow, err := store.GetTriggeredEndpointAlert(&ep, alrt)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal("expected no error, got", err.Error())
|
||||||
|
}
|
||||||
|
if !exists {
|
||||||
|
t.Error("expected triggered alert to exist")
|
||||||
|
}
|
||||||
|
if resolveKey != alrt.ResolveKey {
|
||||||
|
t.Errorf("expected resolveKey %s, got %s", alrt.ResolveKey, resolveKey)
|
||||||
|
}
|
||||||
|
if numberOfSuccessesInARow != ep.NumberOfSuccessesInARow {
|
||||||
|
t.Errorf("expected persisted NumberOfSuccessesInARow to be %d, got %d", ep.NumberOfSuccessesInARow, numberOfSuccessesInARow)
|
||||||
|
}
|
||||||
|
// Endpoint just had a successful evaluation, so NumberOfSuccessesInARow is now 1
|
||||||
|
ep.NumberOfSuccessesInARow++
|
||||||
|
if err := store.UpsertTriggeredEndpointAlert(&ep, alrt); err != nil {
|
||||||
|
t.Fatal("expected no error, got", err.Error())
|
||||||
|
}
|
||||||
|
exists, resolveKey, numberOfSuccessesInARow, err = store.GetTriggeredEndpointAlert(&ep, alrt)
|
||||||
|
if err != nil {
|
||||||
|
t.Error("expected no error, got", err.Error())
|
||||||
|
}
|
||||||
|
if !exists {
|
||||||
|
t.Error("expected triggered alert to exist")
|
||||||
|
}
|
||||||
|
if resolveKey != alrt.ResolveKey {
|
||||||
|
t.Errorf("expected resolveKey %s, got %s", alrt.ResolveKey, resolveKey)
|
||||||
|
}
|
||||||
|
if numberOfSuccessesInARow != ep.NumberOfSuccessesInARow {
|
||||||
|
t.Errorf("expected persisted NumberOfSuccessesInARow to be %d, got %d", ep.NumberOfSuccessesInARow, numberOfSuccessesInARow)
|
||||||
|
}
|
||||||
|
// Simulate the endpoint having another successful evaluation, which means the alert is now resolved,
|
||||||
|
// and we should delete the triggered alert from the store
|
||||||
|
ep.NumberOfSuccessesInARow++
|
||||||
|
if err := store.DeleteTriggeredEndpointAlert(&ep, alrt); err != nil {
|
||||||
|
t.Fatal("expected no error, got", err.Error())
|
||||||
|
}
|
||||||
|
exists, _, _, err = store.GetTriggeredEndpointAlert(&ep, alrt)
|
||||||
|
if err != nil {
|
||||||
|
t.Error("expected no error, got", err.Error())
|
||||||
|
}
|
||||||
|
if exists {
|
||||||
|
t.Error("expected triggered alert to no longer exist as it has been deleted")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestStore_DeleteAllTriggeredAlertsNotInChecksumsByEndpoint(t *testing.T) {
|
||||||
|
store, _ := NewStore("sqlite", t.TempDir()+"/TestStore_DeleteAllTriggeredAlertsNotInChecksumsByEndpoint.db", false)
|
||||||
|
defer store.Close()
|
||||||
|
yes, desc := false, "description"
|
||||||
|
ep1 := testEndpoint
|
||||||
|
ep1.Name = "ep1"
|
||||||
|
ep2 := testEndpoint
|
||||||
|
ep2.Name = "ep2"
|
||||||
|
alert1 := alert.Alert{
|
||||||
|
Type: alert.TypePagerDuty,
|
||||||
|
Enabled: &yes,
|
||||||
|
FailureThreshold: 4,
|
||||||
|
SuccessThreshold: 2,
|
||||||
|
Description: &desc,
|
||||||
|
SendOnResolved: &yes,
|
||||||
|
Triggered: true,
|
||||||
|
ResolveKey: "1234567",
|
||||||
|
}
|
||||||
|
alert2 := alert1
|
||||||
|
alert2.Type, alert2.ResolveKey = alert.TypeSlack, ""
|
||||||
|
alert3 := alert2
|
||||||
|
if err := store.UpsertTriggeredEndpointAlert(&ep1, &alert1); err != nil {
|
||||||
|
t.Fatal("expected no error, got", err.Error())
|
||||||
|
}
|
||||||
|
if err := store.UpsertTriggeredEndpointAlert(&ep1, &alert2); err != nil {
|
||||||
|
t.Fatal("expected no error, got", err.Error())
|
||||||
|
}
|
||||||
|
if err := store.UpsertTriggeredEndpointAlert(&ep2, &alert3); err != nil {
|
||||||
|
t.Fatal("expected no error, got", err.Error())
|
||||||
|
}
|
||||||
|
if exists, _, _, _ := store.GetTriggeredEndpointAlert(&ep1, &alert1); !exists {
|
||||||
|
t.Error("expected alert1 to have been deleted")
|
||||||
|
}
|
||||||
|
if exists, _, _, _ := store.GetTriggeredEndpointAlert(&ep1, &alert2); !exists {
|
||||||
|
t.Error("expected alert2 to exist for ep1")
|
||||||
|
}
|
||||||
|
if exists, _, _, _ := store.GetTriggeredEndpointAlert(&ep2, &alert3); !exists {
|
||||||
|
t.Error("expected alert3 to exist for ep2")
|
||||||
|
}
|
||||||
|
// Now we simulate the alert configuration being updated, and the alert being resolved
|
||||||
|
if deleted := store.DeleteAllTriggeredAlertsNotInChecksumsByEndpoint(&ep1, []string{alert2.Checksum()}); deleted != 1 {
|
||||||
|
t.Errorf("expected 1 triggered alert to be deleted, got %d", deleted)
|
||||||
|
}
|
||||||
|
if exists, _, _, _ := store.GetTriggeredEndpointAlert(&ep1, &alert1); exists {
|
||||||
|
t.Error("expected alert1 to have been deleted")
|
||||||
|
}
|
||||||
|
if exists, _, _, _ := store.GetTriggeredEndpointAlert(&ep1, &alert2); !exists {
|
||||||
|
t.Error("expected alert2 to exist for ep1")
|
||||||
|
}
|
||||||
|
if exists, _, _, _ := store.GetTriggeredEndpointAlert(&ep2, &alert3); !exists {
|
||||||
|
t.Error("expected alert3 to exist for ep2")
|
||||||
|
}
|
||||||
|
// Now let's just assume all alerts for ep1 were removed
|
||||||
|
if deleted := store.DeleteAllTriggeredAlertsNotInChecksumsByEndpoint(&ep1, []string{}); deleted != 1 {
|
||||||
|
t.Errorf("expected 1 triggered alert to be deleted, got %d", deleted)
|
||||||
|
}
|
||||||
|
// Make sure the alert for ep2 still exists
|
||||||
|
if exists, _, _, _ := store.GetTriggeredEndpointAlert(&ep2, &alert3); !exists {
|
||||||
|
t.Error("expected alert3 to exist for ep2")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
@ -5,6 +5,7 @@ import (
|
|||||||
"log"
|
"log"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/TwiN/gatus/v5/alerting/alert"
|
||||||
"github.com/TwiN/gatus/v5/config/endpoint"
|
"github.com/TwiN/gatus/v5/config/endpoint"
|
||||||
"github.com/TwiN/gatus/v5/storage"
|
"github.com/TwiN/gatus/v5/storage"
|
||||||
"github.com/TwiN/gatus/v5/storage/store/common/paging"
|
"github.com/TwiN/gatus/v5/storage/store/common/paging"
|
||||||
@ -41,6 +42,21 @@ type Store interface {
|
|||||||
// Used to delete endpoints that have been persisted but are no longer part of the configured endpoints
|
// Used to delete endpoints that have been persisted but are no longer part of the configured endpoints
|
||||||
DeleteAllEndpointStatusesNotInKeys(keys []string) int
|
DeleteAllEndpointStatusesNotInKeys(keys []string) int
|
||||||
|
|
||||||
|
// GetTriggeredEndpointAlert returns whether the triggered alert for the specified endpoint as well as the necessary information to resolve it
|
||||||
|
GetTriggeredEndpointAlert(ep *endpoint.Endpoint, alert *alert.Alert) (exists bool, resolveKey string, numberOfSuccessesInARow int, err error)
|
||||||
|
|
||||||
|
// UpsertTriggeredEndpointAlert inserts/updates a triggered alert for an endpoint
|
||||||
|
// Used for persistence of triggered alerts across application restarts
|
||||||
|
UpsertTriggeredEndpointAlert(ep *endpoint.Endpoint, triggeredAlert *alert.Alert) error
|
||||||
|
|
||||||
|
// DeleteTriggeredEndpointAlert deletes a triggered alert for an endpoint
|
||||||
|
DeleteTriggeredEndpointAlert(ep *endpoint.Endpoint, triggeredAlert *alert.Alert) error
|
||||||
|
|
||||||
|
// DeleteAllTriggeredAlertsNotInChecksumsByEndpoint removes all triggered alerts owned by an endpoint whose alert
|
||||||
|
// configurations are not provided in the checksums list.
|
||||||
|
// This prevents triggered alerts that have been removed or modified from lingering in the database.
|
||||||
|
DeleteAllTriggeredAlertsNotInChecksumsByEndpoint(ep *endpoint.Endpoint, checksums []string) int
|
||||||
|
|
||||||
// Clear deletes everything from the store
|
// Clear deletes everything from the store
|
||||||
Clear()
|
Clear()
|
||||||
|
|
||||||
|
@ -7,6 +7,7 @@ import (
|
|||||||
|
|
||||||
"github.com/TwiN/gatus/v5/alerting"
|
"github.com/TwiN/gatus/v5/alerting"
|
||||||
"github.com/TwiN/gatus/v5/config/endpoint"
|
"github.com/TwiN/gatus/v5/config/endpoint"
|
||||||
|
"github.com/TwiN/gatus/v5/storage/store"
|
||||||
)
|
)
|
||||||
|
|
||||||
// HandleAlerting takes care of alerts to resolve and alerts to trigger based on result success or failure
|
// HandleAlerting takes care of alerts to resolve and alerts to trigger based on result success or failure
|
||||||
@ -50,9 +51,12 @@ func handleAlertsToTrigger(ep *endpoint.Endpoint, result *endpoint.Result, alert
|
|||||||
log.Printf("[watchdog.handleAlertsToTrigger] Failed to send an alert for endpoint=%s: %s", ep.Name, err.Error())
|
log.Printf("[watchdog.handleAlertsToTrigger] Failed to send an alert for endpoint=%s: %s", ep.Name, err.Error())
|
||||||
} else {
|
} else {
|
||||||
endpointAlert.Triggered = true
|
endpointAlert.Triggered = true
|
||||||
|
if err := store.Get().UpsertTriggeredEndpointAlert(ep, endpointAlert); err != nil {
|
||||||
|
log.Printf("[watchdog.handleAlertsToTrigger] Failed to persist triggered endpoint alert for endpoint with key=%s: %s", ep.Key(), err.Error())
|
||||||
|
}
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
log.Printf("[watchdog.handleAlertsToResolve] Not sending alert of type=%s despite being TRIGGERED, because the provider wasn't configured properly", endpointAlert.Type)
|
log.Printf("[watchdog.handleAlertsToTrigger] Not sending alert of type=%s despite being TRIGGERED, because the provider wasn't configured properly", endpointAlert.Type)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -60,21 +64,31 @@ func handleAlertsToTrigger(ep *endpoint.Endpoint, result *endpoint.Result, alert
|
|||||||
func handleAlertsToResolve(ep *endpoint.Endpoint, result *endpoint.Result, alertingConfig *alerting.Config, debug bool) {
|
func handleAlertsToResolve(ep *endpoint.Endpoint, result *endpoint.Result, alertingConfig *alerting.Config, debug bool) {
|
||||||
ep.NumberOfSuccessesInARow++
|
ep.NumberOfSuccessesInARow++
|
||||||
for _, endpointAlert := range ep.Alerts {
|
for _, endpointAlert := range ep.Alerts {
|
||||||
if !endpointAlert.IsEnabled() || !endpointAlert.Triggered || endpointAlert.SuccessThreshold > ep.NumberOfSuccessesInARow {
|
isStillBelowSuccessThreshold := endpointAlert.SuccessThreshold > ep.NumberOfSuccessesInARow
|
||||||
|
if isStillBelowSuccessThreshold && endpointAlert.IsEnabled() && endpointAlert.Triggered {
|
||||||
|
// Persist NumberOfSuccessesInARow
|
||||||
|
if err := store.Get().UpsertTriggeredEndpointAlert(ep, endpointAlert); err != nil {
|
||||||
|
log.Printf("[watchdog.handleAlertsToResolve] Failed to update triggered endpoint alert for endpoint with key=%s: %s", ep.Key(), err.Error())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if !endpointAlert.IsEnabled() || !endpointAlert.Triggered || isStillBelowSuccessThreshold {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
// Even if the alert provider returns an error, we still set the alert's Triggered variable to false.
|
// Even if the alert provider returns an error, we still set the alert's Triggered variable to false.
|
||||||
// Further explanation can be found on Alert's Triggered field.
|
// Further explanation can be found on Alert's Triggered field.
|
||||||
endpointAlert.Triggered = false
|
endpointAlert.Triggered = false
|
||||||
|
if err := store.Get().DeleteTriggeredEndpointAlert(ep, endpointAlert); err != nil {
|
||||||
|
log.Printf("[watchdog.handleAlertsToResolve] Failed to delete persisted triggered endpoint alert for endpoint with key=%s: %s", ep.Key(), err.Error())
|
||||||
|
}
|
||||||
if !endpointAlert.IsSendingOnResolved() {
|
if !endpointAlert.IsSendingOnResolved() {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
alertProvider := alertingConfig.GetAlertingProviderByAlertType(endpointAlert.Type)
|
alertProvider := alertingConfig.GetAlertingProviderByAlertType(endpointAlert.Type)
|
||||||
if alertProvider != nil {
|
if alertProvider != nil {
|
||||||
log.Printf("[watchdog.handleAlertsToResolve] Sending %s alert because alert for endpoint=%s with description='%s' has been RESOLVED", endpointAlert.Type, ep.Name, endpointAlert.GetDescription())
|
log.Printf("[watchdog.handleAlertsToResolve] Sending %s alert because alert for endpoint with key=%s with description='%s' has been RESOLVED", endpointAlert.Type, ep.Key(), endpointAlert.GetDescription())
|
||||||
err := alertProvider.Send(ep, endpointAlert, result, true)
|
err := alertProvider.Send(ep, endpointAlert, result, true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Printf("[watchdog.handleAlertsToResolve] Failed to send an alert for endpoint=%s: %s", ep.Name, err.Error())
|
log.Printf("[watchdog.handleAlertsToResolve] Failed to send an alert for endpoint with key=%s: %s", ep.Key(), err.Error())
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
log.Printf("[watchdog.handleAlertsToResolve] Not sending alert of type=%s despite being RESOLVED, because the provider wasn't configured properly", endpointAlert.Type)
|
log.Printf("[watchdog.handleAlertsToResolve] Not sending alert of type=%s despite being RESOLVED, because the provider wasn't configured properly", endpointAlert.Type)
|
||||||
|
Loading…
Reference in New Issue
Block a user