mirror of
https://github.com/openziti/zrok.git
synced 2025-06-20 17:58:50 +02:00
Merge pull request #268 from openziti/v0.4_bandwidth_limits
New Metrics and Limits Infrastructure (#235)
This commit is contained in:
commit
8b767acc62
@ -1,6 +1,10 @@
|
||||
# v0.4.0
|
||||
|
||||
FEATURE: New metrics infrastructure based on OpenZiti usage events (https://github.com/openziti/zrok/issues/128). See the [v0.4 Metrics Guide](docs/guides/v0.4_metrics.md) for more information.
|
||||
FEATURE: New metrics infrastructure based on OpenZiti usage events (https://github.com/openziti/zrok/issues/128). See the [v0.4 Metrics Guide](docs/guides/metrics-and-limits/configuring-metrics.md) for more information.
|
||||
|
||||
FEATURE: New limits implementation based on the new metrics infrastructure (https://github.com/openziti/zrok/issues/235). See the [v0.4 Limits Guide](docs/guides/metrics-and-limits/configuring-limits.md) for more information.
|
||||
|
||||
CHANGE: The controller configuration version bumps from `v: 2` to `v: 3` to support all of the new `v0.4` functionality. See the [example ctrl.yml](etc/ctrl.yml) for details on the new configuration.
|
||||
|
||||
CHANGE: The underlying database store now utilizes a `deleted` flag on all tables to implement "soft deletes". This was necessary for the new metrics infrastructure, where we need to account for metrics data that arrived after the lifetime of a share or environment; and also we're going to need this for limits, where we need to see historical information about activity in the past (https://github.com/openziti/zrok/issues/262)
|
||||
|
||||
|
@ -3,6 +3,7 @@ package main
|
||||
import (
|
||||
"github.com/michaelquigley/cf"
|
||||
"github.com/openziti/zrok/controller"
|
||||
"github.com/openziti/zrok/controller/config"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
@ -32,7 +33,7 @@ func newAdminBootstrap() *adminBootstrap {
|
||||
|
||||
func (cmd *adminBootstrap) run(_ *cobra.Command, args []string) {
|
||||
configPath := args[0]
|
||||
inCfg, err := controller.LoadConfig(configPath)
|
||||
inCfg, err := config.LoadConfig(configPath)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
@ -3,6 +3,7 @@ package main
|
||||
import (
|
||||
"github.com/michaelquigley/cf"
|
||||
"github.com/openziti/zrok/controller"
|
||||
"github.com/openziti/zrok/controller/config"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
@ -27,7 +28,7 @@ func newAdminGcCommand() *adminGcCommand {
|
||||
}
|
||||
|
||||
func (gc *adminGcCommand) run(_ *cobra.Command, args []string) {
|
||||
cfg, err := controller.LoadConfig(args[0])
|
||||
cfg, err := config.LoadConfig(args[0])
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
@ -3,14 +3,21 @@ package main
|
||||
import (
|
||||
"github.com/michaelquigley/cf"
|
||||
"github.com/openziti/zrok/controller"
|
||||
"github.com/openziti/zrok/controller/config"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var controllerCmd *controllerCommand
|
||||
|
||||
var metricsCmd = &cobra.Command{
|
||||
Use: "metrics",
|
||||
Short: "Metrics related commands",
|
||||
}
|
||||
|
||||
func init() {
|
||||
controllerCmd = newControllerCommand()
|
||||
controllerCmd.cmd.AddCommand(metricsCmd)
|
||||
rootCmd.AddCommand(controllerCmd.cmd)
|
||||
}
|
||||
|
||||
@ -31,7 +38,7 @@ func newControllerCommand() *controllerCommand {
|
||||
}
|
||||
|
||||
func (cmd *controllerCommand) run(_ *cobra.Command, args []string) {
|
||||
cfg, err := controller.LoadConfig(args[0])
|
||||
cfg, err := config.LoadConfig(args[0])
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
61
cmd/zrok/controllerMetricsBridge.go
Normal file
61
cmd/zrok/controllerMetricsBridge.go
Normal file
@ -0,0 +1,61 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"github.com/michaelquigley/cf"
|
||||
"github.com/openziti/zrok/controller/config"
|
||||
"github.com/openziti/zrok/controller/env"
|
||||
"github.com/openziti/zrok/controller/metrics"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/spf13/cobra"
|
||||
"os"
|
||||
"os/signal"
|
||||
"syscall"
|
||||
"time"
|
||||
)
|
||||
|
||||
func init() {
|
||||
metricsCmd.AddCommand(newBridgeCommand().cmd)
|
||||
}
|
||||
|
||||
type bridgeCommand struct {
|
||||
cmd *cobra.Command
|
||||
}
|
||||
|
||||
func newBridgeCommand() *bridgeCommand {
|
||||
cmd := &cobra.Command{
|
||||
Use: "bridge <configPath>",
|
||||
Short: "Start a zrok metrics bridge",
|
||||
Args: cobra.ExactArgs(1),
|
||||
}
|
||||
command := &bridgeCommand{cmd}
|
||||
cmd.Run = command.run
|
||||
return command
|
||||
}
|
||||
|
||||
func (cmd *bridgeCommand) run(_ *cobra.Command, args []string) {
|
||||
cfg, err := config.LoadConfig(args[0])
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
logrus.Infof(cf.Dump(cfg, env.GetCfOptions()))
|
||||
|
||||
bridge, err := metrics.NewBridge(cfg.Bridge)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
if _, err = bridge.Start(); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
c := make(chan os.Signal)
|
||||
signal.Notify(c, os.Interrupt, syscall.SIGTERM)
|
||||
go func() {
|
||||
<-c
|
||||
bridge.Stop()
|
||||
os.Exit(0)
|
||||
}()
|
||||
|
||||
for {
|
||||
time.Sleep(24 * 60 * time.Minute)
|
||||
}
|
||||
}
|
@ -2,7 +2,7 @@ package main
|
||||
|
||||
import (
|
||||
"github.com/michaelquigley/cf"
|
||||
"github.com/openziti/zrok/controller"
|
||||
"github.com/openziti/zrok/controller/config"
|
||||
"github.com/openziti/zrok/tui"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/spf13/cobra"
|
||||
@ -28,7 +28,7 @@ func newControllerValidateCommand() *controllerValidateCommand {
|
||||
}
|
||||
|
||||
func (cmd *controllerValidateCommand) run(_ *cobra.Command, args []string) {
|
||||
cfg, err := controller.LoadConfig(args[0])
|
||||
cfg, err := config.LoadConfig(args[0])
|
||||
if err != nil {
|
||||
tui.Error("controller config validation failed", err)
|
||||
}
|
||||
|
@ -1,57 +0,0 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"github.com/michaelquigley/cf"
|
||||
"github.com/openziti/zrok/controller/metrics"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/spf13/cobra"
|
||||
"os"
|
||||
"os/signal"
|
||||
"syscall"
|
||||
"time"
|
||||
)
|
||||
|
||||
func init() {
|
||||
rootCmd.AddCommand(newMetricsCommand().cmd)
|
||||
}
|
||||
|
||||
type metricsCommand struct {
|
||||
cmd *cobra.Command
|
||||
}
|
||||
|
||||
func newMetricsCommand() *metricsCommand {
|
||||
cmd := &cobra.Command{
|
||||
Use: "metrics <configPath>",
|
||||
Short: "Start a zrok metrics agent",
|
||||
Args: cobra.ExactArgs(1),
|
||||
}
|
||||
command := &metricsCommand{cmd}
|
||||
cmd.Run = command.run
|
||||
return command
|
||||
}
|
||||
|
||||
func (cmd *metricsCommand) run(_ *cobra.Command, args []string) {
|
||||
cfg, err := metrics.LoadConfig(args[0])
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
logrus.Infof(cf.Dump(cfg, metrics.GetCfOptions()))
|
||||
|
||||
ma, err := metrics.Run(cfg)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
c := make(chan os.Signal)
|
||||
signal.Notify(c, os.Interrupt, syscall.SIGTERM)
|
||||
go func() {
|
||||
<-c
|
||||
ma.Stop()
|
||||
ma.Join()
|
||||
os.Exit(0)
|
||||
}()
|
||||
|
||||
for {
|
||||
time.Sleep(30 * time.Minute)
|
||||
}
|
||||
}
|
@ -45,12 +45,12 @@ func (h *accessHandler) Handle(params share.AccessParams, principal *rest_model_
|
||||
}
|
||||
|
||||
shrToken := params.Body.ShrToken
|
||||
sshr, err := str.FindShareWithToken(shrToken, tx)
|
||||
shr, err := str.FindShareWithToken(shrToken, tx)
|
||||
if err != nil {
|
||||
logrus.Errorf("error finding share")
|
||||
return share.NewAccessNotFound()
|
||||
}
|
||||
if sshr == nil {
|
||||
if shr == nil {
|
||||
logrus.Errorf("unable to find share '%v' for user '%v'", shrToken, principal.Email)
|
||||
return share.NewAccessNotFound()
|
||||
}
|
||||
@ -61,7 +61,7 @@ func (h *accessHandler) Handle(params share.AccessParams, principal *rest_model_
|
||||
return share.NewAccessInternalServerError()
|
||||
}
|
||||
|
||||
if _, err := str.CreateFrontend(envId, &store.Frontend{Token: feToken, ZId: envZId}, tx); err != nil {
|
||||
if _, err := str.CreateFrontend(envId, &store.Frontend{PrivateShareId: &shr.Id, Token: feToken, ZId: envZId}, tx); err != nil {
|
||||
logrus.Errorf("error creating frontend record for user '%v': %v", principal.Email, err)
|
||||
return share.NewAccessInternalServerError()
|
||||
}
|
||||
@ -76,7 +76,7 @@ func (h *accessHandler) Handle(params share.AccessParams, principal *rest_model_
|
||||
"zrokFrontendToken": feToken,
|
||||
"zrokShareToken": shrToken,
|
||||
}
|
||||
if err := zrokEdgeSdk.CreateServicePolicyDial(envZId+"-"+sshr.ZId+"-dial", sshr.ZId, []string{envZId}, addlTags, edge); err != nil {
|
||||
if err := zrokEdgeSdk.CreateServicePolicyDial(envZId+"-"+shr.ZId+"-dial", shr.ZId, []string{envZId}, addlTags, edge); err != nil {
|
||||
logrus.Errorf("unable to create dial policy for user '%v': %v", principal.Email, err)
|
||||
return share.NewAccessInternalServerError()
|
||||
}
|
||||
|
@ -12,7 +12,8 @@ import (
|
||||
"github.com/openziti/edge/rest_model"
|
||||
rest_model_edge "github.com/openziti/edge/rest_model"
|
||||
"github.com/openziti/sdk-golang/ziti"
|
||||
config2 "github.com/openziti/sdk-golang/ziti/config"
|
||||
ziti_config "github.com/openziti/sdk-golang/ziti/config"
|
||||
zrok_config "github.com/openziti/zrok/controller/config"
|
||||
"github.com/openziti/zrok/controller/store"
|
||||
"github.com/openziti/zrok/controller/zrokEdgeSdk"
|
||||
"github.com/openziti/zrok/model"
|
||||
@ -22,7 +23,7 @@ import (
|
||||
"time"
|
||||
)
|
||||
|
||||
func Bootstrap(skipCtrl, skipFrontend bool, inCfg *Config) error {
|
||||
func Bootstrap(skipCtrl, skipFrontend bool, inCfg *zrok_config.Config) error {
|
||||
cfg = inCfg
|
||||
|
||||
if v, err := store.Open(cfg.Store); err == nil {
|
||||
@ -138,7 +139,7 @@ func getIdentityId(identityName string) (string, error) {
|
||||
if err != nil {
|
||||
return "", errors.Wrapf(err, "error opening identity '%v' from zrokdir", identityName)
|
||||
}
|
||||
zcfg, err := config2.NewFromFile(zif)
|
||||
zcfg, err := ziti_config.NewFromFile(zif)
|
||||
if err != nil {
|
||||
return "", errors.Wrapf(err, "error loading ziti config from file '%v'", zif)
|
||||
}
|
||||
|
@ -1,6 +1,10 @@
|
||||
package controller
|
||||
package config
|
||||
|
||||
import (
|
||||
"github.com/openziti/zrok/controller/emailUi"
|
||||
"github.com/openziti/zrok/controller/env"
|
||||
"github.com/openziti/zrok/controller/limits"
|
||||
"github.com/openziti/zrok/controller/metrics"
|
||||
"github.com/openziti/zrok/controller/zrokEdgeSdk"
|
||||
"time"
|
||||
|
||||
@ -9,20 +13,21 @@ import (
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
const ConfigVersion = 2
|
||||
const ConfigVersion = 3
|
||||
|
||||
type Config struct {
|
||||
V int
|
||||
Admin *AdminConfig
|
||||
Bridge *metrics.BridgeConfig
|
||||
Endpoint *EndpointConfig
|
||||
Email *EmailConfig
|
||||
Influx *InfluxConfig
|
||||
Limits *LimitsConfig
|
||||
Email *emailUi.Config
|
||||
Limits *limits.Config
|
||||
Maintenance *MaintenanceConfig
|
||||
Metrics *metrics.Config
|
||||
Registration *RegistrationConfig
|
||||
ResetPassword *ResetPasswordConfig
|
||||
Store *store.Config
|
||||
Ziti *zrokEdgeSdk.ZitiConfig
|
||||
Ziti *zrokEdgeSdk.Config
|
||||
}
|
||||
|
||||
type AdminConfig struct {
|
||||
@ -35,14 +40,6 @@ type EndpointConfig struct {
|
||||
Port int
|
||||
}
|
||||
|
||||
type EmailConfig struct {
|
||||
Host string
|
||||
Port int
|
||||
Username string
|
||||
Password string `cf:"+secret"`
|
||||
From string
|
||||
}
|
||||
|
||||
type RegistrationConfig struct {
|
||||
RegistrationUrlTemplate string
|
||||
TokenStrategy string
|
||||
@ -52,13 +49,6 @@ type ResetPasswordConfig struct {
|
||||
ResetUrlTemplate string
|
||||
}
|
||||
|
||||
type InfluxConfig struct {
|
||||
Url string
|
||||
Bucket string
|
||||
Org string
|
||||
Token string `cf:"+secret"`
|
||||
}
|
||||
|
||||
type MaintenanceConfig struct {
|
||||
ResetPassword *ResetPasswordMaintenanceConfig
|
||||
Registration *RegistrationMaintenanceConfig
|
||||
@ -76,19 +66,9 @@ type ResetPasswordMaintenanceConfig struct {
|
||||
BatchLimit int
|
||||
}
|
||||
|
||||
const Unlimited = -1
|
||||
|
||||
type LimitsConfig struct {
|
||||
Environments int
|
||||
Shares int
|
||||
}
|
||||
|
||||
func DefaultConfig() *Config {
|
||||
return &Config{
|
||||
Limits: &LimitsConfig{
|
||||
Environments: Unlimited,
|
||||
Shares: Unlimited,
|
||||
},
|
||||
Limits: limits.DefaultConfig(),
|
||||
Maintenance: &MaintenanceConfig{
|
||||
ResetPassword: &ResetPasswordMaintenanceConfig{
|
||||
ExpirationTimeout: time.Minute * 15,
|
||||
@ -106,7 +86,7 @@ func DefaultConfig() *Config {
|
||||
|
||||
func LoadConfig(path string) (*Config, error) {
|
||||
cfg := DefaultConfig()
|
||||
if err := cf.BindYaml(cfg, path, cf.DefaultOptions()); err != nil {
|
||||
if err := cf.BindYaml(cfg, path, env.GetCfOptions()); err != nil {
|
||||
return nil, errors.Wrapf(err, "error loading controller config '%v'", path)
|
||||
}
|
||||
if cfg.V != ConfigVersion {
|
@ -3,15 +3,16 @@ package controller
|
||||
import (
|
||||
"github.com/go-openapi/runtime/middleware"
|
||||
"github.com/openziti/zrok/build"
|
||||
"github.com/openziti/zrok/controller/config"
|
||||
"github.com/openziti/zrok/rest_model_zrok"
|
||||
"github.com/openziti/zrok/rest_server_zrok/operations/metadata"
|
||||
)
|
||||
|
||||
type configurationHandler struct {
|
||||
cfg *Config
|
||||
cfg *config.Config
|
||||
}
|
||||
|
||||
func newConfigurationHandler(cfg *Config) *configurationHandler {
|
||||
func newConfigurationHandler(cfg *config.Config) *configurationHandler {
|
||||
return &configurationHandler{
|
||||
cfg: cfg,
|
||||
}
|
||||
|
@ -2,6 +2,10 @@ package controller
|
||||
|
||||
import (
|
||||
"context"
|
||||
"github.com/openziti/zrok/controller/config"
|
||||
"github.com/openziti/zrok/controller/limits"
|
||||
"github.com/openziti/zrok/controller/metrics"
|
||||
"github.com/sirupsen/logrus"
|
||||
|
||||
"github.com/go-openapi/loads"
|
||||
influxdb2 "github.com/influxdata/influxdb-client-go/v2"
|
||||
@ -13,11 +17,12 @@ import (
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
var cfg *Config
|
||||
var cfg *config.Config
|
||||
var str *store.Store
|
||||
var idb influxdb2.Client
|
||||
var limitsAgent *limits.Agent
|
||||
|
||||
func Run(inCfg *Config) error {
|
||||
func Run(inCfg *config.Config) error {
|
||||
cfg = inCfg
|
||||
|
||||
swaggerSpec, err := loads.Embedded(rest_server_zrok.SwaggerJSON, rest_server_zrok.FlatSwaggerJSON)
|
||||
@ -39,7 +44,7 @@ func Run(inCfg *Config) error {
|
||||
api.AdminInviteTokenGenerateHandler = newInviteTokenGenerateHandler()
|
||||
api.AdminListFrontendsHandler = newListFrontendsHandler()
|
||||
api.AdminUpdateFrontendHandler = newUpdateFrontendHandler()
|
||||
api.EnvironmentEnableHandler = newEnableHandler(cfg.Limits)
|
||||
api.EnvironmentEnableHandler = newEnableHandler()
|
||||
api.EnvironmentDisableHandler = newDisableHandler()
|
||||
api.MetadataConfigurationHandler = newConfigurationHandler(cfg)
|
||||
api.MetadataGetEnvironmentDetailHandler = newEnvironmentDetailHandler()
|
||||
@ -47,7 +52,7 @@ func Run(inCfg *Config) error {
|
||||
api.MetadataOverviewHandler = metadata.OverviewHandlerFunc(overviewHandler)
|
||||
api.MetadataVersionHandler = metadata.VersionHandlerFunc(versionHandler)
|
||||
api.ShareAccessHandler = newAccessHandler()
|
||||
api.ShareShareHandler = newShareHandler(cfg.Limits)
|
||||
api.ShareShareHandler = newShareHandler()
|
||||
api.ShareUnaccessHandler = newUnaccessHandler()
|
||||
api.ShareUnshareHandler = newUnshareHandler()
|
||||
api.ShareUpdateShareHandler = newUpdateShareHandler()
|
||||
@ -62,8 +67,31 @@ func Run(inCfg *Config) error {
|
||||
return errors.Wrap(err, "error opening store")
|
||||
}
|
||||
|
||||
if cfg.Influx != nil {
|
||||
idb = influxdb2.NewClient(cfg.Influx.Url, cfg.Influx.Token)
|
||||
if cfg.Metrics != nil && cfg.Metrics.Influx != nil {
|
||||
idb = influxdb2.NewClient(cfg.Metrics.Influx.Url, cfg.Metrics.Influx.Token)
|
||||
} else {
|
||||
logrus.Warn("skipping influx client; no configuration")
|
||||
}
|
||||
|
||||
if cfg.Metrics != nil && cfg.Metrics.Agent != nil && cfg.Metrics.Influx != nil {
|
||||
ma, err := metrics.NewAgent(cfg.Metrics.Agent, str, cfg.Metrics.Influx)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "error creating metrics agent")
|
||||
}
|
||||
if err := ma.Start(); err != nil {
|
||||
return errors.Wrap(err, "error starting metrics agent")
|
||||
}
|
||||
defer func() { ma.Stop() }()
|
||||
|
||||
if cfg.Limits != nil && cfg.Limits.Enforcing {
|
||||
limitsAgent, err = limits.NewAgent(cfg.Limits, cfg.Metrics.Influx, cfg.Ziti, cfg.Email, str)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "error creating limits agent")
|
||||
}
|
||||
ma.AddUsageSink(limitsAgent)
|
||||
limitsAgent.Start()
|
||||
defer func() { limitsAgent.Stop() }()
|
||||
}
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
|
9
controller/emailUi/config.go
Normal file
9
controller/emailUi/config.go
Normal file
@ -0,0 +1,9 @@
|
||||
package emailUi
|
||||
|
||||
type Config struct {
|
||||
Host string
|
||||
Port int
|
||||
Username string
|
||||
Password string `cf:"+secret"`
|
||||
From string
|
||||
}
|
@ -2,5 +2,5 @@ package emailUi
|
||||
|
||||
import "embed"
|
||||
|
||||
//go:embed verify.gohtml verify.gotext resetPassword.gohtml resetPassword.gotext
|
||||
//go:embed verify.gohtml verify.gotext resetPassword.gohtml resetPassword.gotext limitWarning.gohtml limitWarning.gotext
|
||||
var FS embed.FS
|
||||
|
156
controller/emailUi/limitWarning.gohtml
Normal file
156
controller/emailUi/limitWarning.gohtml
Normal file
@ -0,0 +1,156 @@
|
||||
<!DOCTYPE html>
|
||||
<html lang="en">
|
||||
|
||||
<head>
|
||||
<meta charset="utf-8">
|
||||
<meta http-equiv="X-UA-Compatible" content="IE=edge,chrome=1">
|
||||
<title>Transfer limit warning!</title>
|
||||
<meta name="description" content="zrok Transfer Limit Warning">
|
||||
<meta name="viewport" content="width=device-width">
|
||||
<link rel="preconnect" href="https://fonts.googleapis.com">
|
||||
<link rel="preconnect" href="https://fonts.gstatic.com" crossorigin>
|
||||
<link href="https://fonts.googleapis.com/css2?family=JetBrains+Mono&display=swap" rel="stylesheet">
|
||||
<style>
|
||||
body {
|
||||
margin: 0;
|
||||
padding: 25;
|
||||
font-family: 'JetBrains Mono', 'Courier New', monospace;
|
||||
-webkit-font-smoothing: antialiased;
|
||||
-moz-osx-font-smoothing: grayscale;
|
||||
color: #ffffff;
|
||||
background-color: #3b2693;
|
||||
|
||||
}
|
||||
|
||||
a:link {
|
||||
color: #00d7e4;
|
||||
}
|
||||
|
||||
a:visited {
|
||||
color: #00d7e4;
|
||||
}
|
||||
|
||||
a:hover,
|
||||
a:active {
|
||||
color: #ff0100;
|
||||
}
|
||||
|
||||
.claim {
|
||||
font-size: 2em;
|
||||
margin: 0.5em 0 1em 0;
|
||||
}
|
||||
|
||||
.container {
|
||||
width: 62em;
|
||||
margin: 2em auto;
|
||||
max-width: 100%;
|
||||
text-align: center;
|
||||
}
|
||||
|
||||
|
||||
|
||||
.btn {
|
||||
display: inline-block;
|
||||
margin: .25em;
|
||||
padding: 10px 16px;
|
||||
font-size: 1.15em;
|
||||
line-height: 1.33;
|
||||
border-radius: 6px;
|
||||
text-align: center;
|
||||
white-space: nowrap;
|
||||
vertical-align: middle;
|
||||
text-decoration: none;
|
||||
}
|
||||
|
||||
.btn-primary {
|
||||
color: #ffffff;
|
||||
background-color: #ff0100;
|
||||
border-color: #ff0100;
|
||||
}
|
||||
|
||||
a.btn-primary:link,
|
||||
a.btn-primary:visited {
|
||||
color: #ffffff;
|
||||
}
|
||||
|
||||
a.btn-primary:hover,
|
||||
a.btn-primary:active {
|
||||
background-color: #cf0100;
|
||||
}
|
||||
|
||||
.btn-secondary {
|
||||
background-color: #b3b3b3;
|
||||
border-color: #b3b3b3;
|
||||
color: #252525;
|
||||
font-weight: 800;
|
||||
}
|
||||
|
||||
a.btn-secondary:link,
|
||||
a.btn-secondary:visited {
|
||||
color: #666;
|
||||
}
|
||||
|
||||
a.btn-secondary:hover,
|
||||
a.btn-secondary:hover {
|
||||
background-color: #ccc;
|
||||
color: #333;
|
||||
}
|
||||
|
||||
.about {
|
||||
margin: 1em auto;
|
||||
}
|
||||
|
||||
.about td {
|
||||
text-align: left;
|
||||
}
|
||||
|
||||
.about td:first-child {
|
||||
width: 80px;
|
||||
}
|
||||
|
||||
@media screen and (max-width: 600px) {
|
||||
img {
|
||||
height: auto !important;
|
||||
}
|
||||
}
|
||||
|
||||
@media screen and (max-width: 400px) {
|
||||
body {
|
||||
font-size: 14px;
|
||||
}
|
||||
}
|
||||
|
||||
@media screen and (max-width: 320px) {
|
||||
body {
|
||||
font-size: 12px;
|
||||
}
|
||||
}
|
||||
</style>
|
||||
</head>
|
||||
|
||||
<body style="font-family: 'JetBrains Mono', 'Courier New', monospace; color: #ffffff; background-color: #3b2693; font-weight: 600;">
|
||||
|
||||
<div class="container">
|
||||
<div class="banner" style="margin: auto;">
|
||||
<img src="https://zrok.io/wp-content/uploads/2023/03/warning.jpg" width="363px" height="500px" style="padding-bottom: 10px;"/>
|
||||
</div>
|
||||
<div class="cta" style="text-align: center;">
|
||||
<h3 style="text-align: center;">Your account is reaching a transfer limit, {{ .EmailAddress }}.</h3>
|
||||
</div>
|
||||
|
||||
<div>
|
||||
{{ .Detail }}
|
||||
</div>
|
||||
|
||||
<table border="0" cellpadding="0" cellspacing="0" align="center" class="about">
|
||||
<tr>
|
||||
<td><a href="https://github.com/openziti/zrok" target="_blank">github.com/openziti/zrok</a></td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>{{ .Version }}</td>
|
||||
</tr>
|
||||
</table>
|
||||
<p style="text-align: center;"></a>Copyright © 2023 <a href="http://www.netfoundry.io" target="_blank" style="color: #00d7e4;">NetFoundry, Inc.</a></p>
|
||||
</div>
|
||||
</body>
|
||||
</html>
|
3
controller/emailUi/limitWarning.gotext
Normal file
3
controller/emailUi/limitWarning.gotext
Normal file
@ -0,0 +1,3 @@
|
||||
Your account is nearing a transfer size limit, {{ .EmailAddress }}!
|
||||
|
||||
{{ .Detail }}
|
25
controller/emailUi/model.go
Normal file
25
controller/emailUi/model.go
Normal file
@ -0,0 +1,25 @@
|
||||
package emailUi
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"github.com/pkg/errors"
|
||||
"text/template"
|
||||
)
|
||||
|
||||
type WarningEmail struct {
|
||||
EmailAddress string
|
||||
Detail string
|
||||
Version string
|
||||
}
|
||||
|
||||
func (we WarningEmail) MergeTemplate(filename string) (string, error) {
|
||||
t, err := template.ParseFS(FS, filename)
|
||||
if err != nil {
|
||||
return "", errors.Wrapf(err, "error parsing warning email template '%v'", filename)
|
||||
}
|
||||
buf := new(bytes.Buffer)
|
||||
if err := t.Execute(buf, we); err != nil {
|
||||
return "", errors.Wrapf(err, "error executing warning email template '%v'", filename)
|
||||
}
|
||||
return buf.String(), nil
|
||||
}
|
@ -13,24 +13,22 @@ import (
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
type enableHandler struct {
|
||||
cfg *LimitsConfig
|
||||
}
|
||||
type enableHandler struct{}
|
||||
|
||||
func newEnableHandler(cfg *LimitsConfig) *enableHandler {
|
||||
return &enableHandler{cfg: cfg}
|
||||
func newEnableHandler() *enableHandler {
|
||||
return &enableHandler{}
|
||||
}
|
||||
|
||||
func (h *enableHandler) Handle(params environment.EnableParams, principal *rest_model_zrok.Principal) middleware.Responder {
|
||||
// start transaction early; if it fails, don't bother creating ziti resources
|
||||
tx, err := str.Begin()
|
||||
trx, err := str.Begin()
|
||||
if err != nil {
|
||||
logrus.Errorf("error starting transaction for user '%v': %v", principal.Email, err)
|
||||
return environment.NewEnableInternalServerError()
|
||||
}
|
||||
defer func() { _ = tx.Rollback() }()
|
||||
defer func() { _ = trx.Rollback() }()
|
||||
|
||||
if err := h.checkLimits(principal, tx); err != nil {
|
||||
if err := h.checkLimits(principal, trx); err != nil {
|
||||
logrus.Errorf("limits error for user '%v': %v", principal.Email, err)
|
||||
return environment.NewEnableUnauthorized()
|
||||
}
|
||||
@ -70,14 +68,14 @@ func (h *enableHandler) Handle(params environment.EnableParams, principal *rest_
|
||||
Host: params.Body.Host,
|
||||
Address: realRemoteAddress(params.HTTPRequest),
|
||||
ZId: envZId,
|
||||
}, tx)
|
||||
}, trx)
|
||||
if err != nil {
|
||||
logrus.Errorf("error storing created identity for user '%v': %v", principal.Email, err)
|
||||
_ = tx.Rollback()
|
||||
_ = trx.Rollback()
|
||||
return environment.NewEnableInternalServerError()
|
||||
}
|
||||
|
||||
if err := tx.Commit(); err != nil {
|
||||
if err := trx.Commit(); err != nil {
|
||||
logrus.Errorf("error committing for user '%v': %v", principal.Email, err)
|
||||
return environment.NewEnableInternalServerError()
|
||||
}
|
||||
@ -99,14 +97,16 @@ func (h *enableHandler) Handle(params environment.EnableParams, principal *rest_
|
||||
return resp
|
||||
}
|
||||
|
||||
func (h *enableHandler) checkLimits(principal *rest_model_zrok.Principal, tx *sqlx.Tx) error {
|
||||
if !principal.Limitless && h.cfg.Environments > Unlimited {
|
||||
envs, err := str.FindEnvironmentsForAccount(int(principal.ID), tx)
|
||||
if err != nil {
|
||||
return errors.Errorf("unable to find environments for account '%v': %v", principal.Email, err)
|
||||
}
|
||||
if len(envs)+1 > h.cfg.Environments {
|
||||
return errors.Errorf("would exceed environments limit of %d for '%v'", h.cfg.Environments, principal.Email)
|
||||
func (h *enableHandler) checkLimits(principal *rest_model_zrok.Principal, trx *sqlx.Tx) error {
|
||||
if !principal.Limitless {
|
||||
if limitsAgent != nil {
|
||||
ok, err := limitsAgent.CanCreateEnvironment(int(principal.ID), trx)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error checking environment limits for '%v'", principal.Email)
|
||||
}
|
||||
if !ok {
|
||||
return errors.Errorf("environment limit check failed for '%v'", principal.Email)
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
|
14
controller/env/cf.go
vendored
Normal file
14
controller/env/cf.go
vendored
Normal file
@ -0,0 +1,14 @@
|
||||
package env
|
||||
|
||||
import (
|
||||
"github.com/michaelquigley/cf"
|
||||
)
|
||||
|
||||
var cfOpts *cf.Options
|
||||
|
||||
func GetCfOptions() *cf.Options {
|
||||
if cfOpts == nil {
|
||||
cfOpts = cf.DefaultOptions()
|
||||
}
|
||||
return cfOpts
|
||||
}
|
@ -41,11 +41,13 @@ func (h *environmentDetailHandler) Handle(params metadata.GetEnvironmentDetailPa
|
||||
return metadata.NewGetEnvironmentDetailInternalServerError()
|
||||
}
|
||||
var sparkData map[string][]int64
|
||||
if cfg.Influx != nil {
|
||||
if cfg.Metrics != nil && cfg.Metrics.Influx != nil {
|
||||
sparkData, err = sparkDataForShares(shrs)
|
||||
if err != nil {
|
||||
logrus.Errorf("error querying spark data for shares for user '%v': %v", principal.Email, err)
|
||||
}
|
||||
} else {
|
||||
logrus.Debug("skipping spark data for shares; no influx configuration")
|
||||
}
|
||||
for _, shr := range shrs {
|
||||
feEndpoint := ""
|
||||
|
@ -8,6 +8,7 @@ import (
|
||||
"github.com/openziti/edge/rest_management_api_client/service"
|
||||
"github.com/openziti/edge/rest_management_api_client/service_edge_router_policy"
|
||||
"github.com/openziti/edge/rest_management_api_client/service_policy"
|
||||
zrok_config "github.com/openziti/zrok/controller/config"
|
||||
"github.com/openziti/zrok/controller/store"
|
||||
"github.com/openziti/zrok/controller/zrokEdgeSdk"
|
||||
"github.com/pkg/errors"
|
||||
@ -16,7 +17,7 @@ import (
|
||||
"time"
|
||||
)
|
||||
|
||||
func GC(inCfg *Config) error {
|
||||
func GC(inCfg *zrok_config.Config) error {
|
||||
cfg = inCfg
|
||||
if v, err := store.Open(cfg.Store); err == nil {
|
||||
str = v
|
||||
|
@ -2,6 +2,7 @@ package controller
|
||||
|
||||
import (
|
||||
"github.com/go-openapi/runtime/middleware"
|
||||
"github.com/openziti/zrok/controller/config"
|
||||
"github.com/openziti/zrok/controller/store"
|
||||
"github.com/openziti/zrok/rest_server_zrok/operations/account"
|
||||
"github.com/openziti/zrok/util"
|
||||
@ -9,10 +10,10 @@ import (
|
||||
)
|
||||
|
||||
type inviteHandler struct {
|
||||
cfg *Config
|
||||
cfg *config.Config
|
||||
}
|
||||
|
||||
func newInviteHandler(cfg *Config) *inviteHandler {
|
||||
func newInviteHandler(cfg *config.Config) *inviteHandler {
|
||||
return &inviteHandler{
|
||||
cfg: cfg,
|
||||
}
|
||||
|
44
controller/limits/accountLimitAction.go
Normal file
44
controller/limits/accountLimitAction.go
Normal file
@ -0,0 +1,44 @@
|
||||
package limits
|
||||
|
||||
import (
|
||||
"github.com/jmoiron/sqlx"
|
||||
"github.com/openziti/edge/rest_management_api_client"
|
||||
"github.com/openziti/zrok/controller/store"
|
||||
"github.com/openziti/zrok/controller/zrokEdgeSdk"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
type accountLimitAction struct {
|
||||
str *store.Store
|
||||
edge *rest_management_api_client.ZitiEdgeManagement
|
||||
}
|
||||
|
||||
func newAccountLimitAction(str *store.Store, edge *rest_management_api_client.ZitiEdgeManagement) *accountLimitAction {
|
||||
return &accountLimitAction{str, edge}
|
||||
}
|
||||
|
||||
func (a *accountLimitAction) HandleAccount(acct *store.Account, rxBytes, txBytes int64, limit *BandwidthPerPeriod, trx *sqlx.Tx) error {
|
||||
logrus.Infof("limiting '%v'", acct.Email)
|
||||
|
||||
envs, err := a.str.FindEnvironmentsForAccount(acct.Id, trx)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error finding environments for account '%v'", acct.Email)
|
||||
}
|
||||
|
||||
for _, env := range envs {
|
||||
shrs, err := a.str.FindSharesForEnvironment(env.Id, trx)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error finding shares for environment '%v'", env.ZId)
|
||||
}
|
||||
|
||||
for _, shr := range shrs {
|
||||
if err := zrokEdgeSdk.DeleteServicePolicyDial(env.ZId, shr.Token, a.edge); err != nil {
|
||||
return errors.Wrapf(err, "error deleting dial service policy for '%v'", shr.Token)
|
||||
}
|
||||
logrus.Infof("removed dial service policy for share '%v' of environment '%v'", shr.Token, env.ZId)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
49
controller/limits/accountRelaxAction.go
Normal file
49
controller/limits/accountRelaxAction.go
Normal file
@ -0,0 +1,49 @@
|
||||
package limits
|
||||
|
||||
import (
|
||||
"github.com/jmoiron/sqlx"
|
||||
"github.com/openziti/edge/rest_management_api_client"
|
||||
"github.com/openziti/zrok/controller/store"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
type accountRelaxAction struct {
|
||||
str *store.Store
|
||||
edge *rest_management_api_client.ZitiEdgeManagement
|
||||
}
|
||||
|
||||
func newAccountRelaxAction(str *store.Store, edge *rest_management_api_client.ZitiEdgeManagement) *accountRelaxAction {
|
||||
return &accountRelaxAction{str, edge}
|
||||
}
|
||||
|
||||
func (a *accountRelaxAction) HandleAccount(acct *store.Account, _, _ int64, _ *BandwidthPerPeriod, trx *sqlx.Tx) error {
|
||||
logrus.Infof("relaxing '%v'", acct.Email)
|
||||
|
||||
envs, err := a.str.FindEnvironmentsForAccount(acct.Id, trx)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error finding environments for account '%v'", acct.Email)
|
||||
}
|
||||
|
||||
for _, env := range envs {
|
||||
shrs, err := a.str.FindSharesForEnvironment(env.Id, trx)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error finding shares for environment '%v'", env.ZId)
|
||||
}
|
||||
|
||||
for _, shr := range shrs {
|
||||
switch shr.ShareMode {
|
||||
case "public":
|
||||
if err := relaxPublicShare(a.str, a.edge, shr, trx); err != nil {
|
||||
return err
|
||||
}
|
||||
case "private":
|
||||
if err := relaxPrivateShare(a.str, a.edge, shr, trx); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
49
controller/limits/accountWarningAction.go
Normal file
49
controller/limits/accountWarningAction.go
Normal file
@ -0,0 +1,49 @@
|
||||
package limits
|
||||
|
||||
import (
|
||||
"github.com/jmoiron/sqlx"
|
||||
"github.com/openziti/edge/rest_management_api_client"
|
||||
"github.com/openziti/zrok/controller/emailUi"
|
||||
"github.com/openziti/zrok/controller/store"
|
||||
"github.com/openziti/zrok/util"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
type accountWarningAction struct {
|
||||
str *store.Store
|
||||
edge *rest_management_api_client.ZitiEdgeManagement
|
||||
cfg *emailUi.Config
|
||||
}
|
||||
|
||||
func newAccountWarningAction(cfg *emailUi.Config, str *store.Store, edge *rest_management_api_client.ZitiEdgeManagement) *accountWarningAction {
|
||||
return &accountWarningAction{str, edge, cfg}
|
||||
}
|
||||
|
||||
func (a *accountWarningAction) HandleAccount(acct *store.Account, rxBytes, txBytes int64, limit *BandwidthPerPeriod, trx *sqlx.Tx) error {
|
||||
logrus.Infof("warning '%v'", acct.Email)
|
||||
|
||||
rxLimit := "(unlimited bytes)"
|
||||
if limit.Limit.Rx != Unlimited {
|
||||
rxLimit = util.BytesToSize(limit.Limit.Rx)
|
||||
}
|
||||
txLimit := "(unlimited bytes)"
|
||||
if limit.Limit.Tx != Unlimited {
|
||||
txLimit = util.BytesToSize(limit.Limit.Tx)
|
||||
}
|
||||
totalLimit := "(unlimited bytes)"
|
||||
if limit.Limit.Total != Unlimited {
|
||||
totalLimit = util.BytesToSize(limit.Limit.Total)
|
||||
}
|
||||
|
||||
detail := newDetailMessage()
|
||||
detail = detail.append("Your account has received %v and sent %v (for a total of %v), which has triggered a transfer limit warning.", util.BytesToSize(rxBytes), util.BytesToSize(txBytes), util.BytesToSize(rxBytes+txBytes))
|
||||
detail = detail.append("This zrok instance only allows an account to receive %v, send %v, totalling not more than %v for each %v.", rxLimit, txLimit, totalLimit, limit.Period)
|
||||
detail = detail.append("If you exceed the transfer limit, access to your shares will be temporarily disabled (until the last %v falls below the transfer limit)", limit.Period)
|
||||
|
||||
if err := sendLimitWarningEmail(a.cfg, acct.Email, detail); err != nil {
|
||||
return errors.Wrapf(err, "error sending limit warning email to '%v'", acct.Email)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
669
controller/limits/agent.go
Normal file
669
controller/limits/agent.go
Normal file
@ -0,0 +1,669 @@
|
||||
package limits
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/jmoiron/sqlx"
|
||||
"github.com/openziti/zrok/controller/emailUi"
|
||||
"github.com/openziti/zrok/controller/metrics"
|
||||
"github.com/openziti/zrok/controller/store"
|
||||
"github.com/openziti/zrok/controller/zrokEdgeSdk"
|
||||
"github.com/openziti/zrok/util"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
"reflect"
|
||||
"time"
|
||||
)
|
||||
|
||||
type Agent struct {
|
||||
cfg *Config
|
||||
ifx *influxReader
|
||||
zCfg *zrokEdgeSdk.Config
|
||||
str *store.Store
|
||||
queue chan *metrics.Usage
|
||||
acctWarningActions []AccountAction
|
||||
acctLimitActions []AccountAction
|
||||
acctRelaxActions []AccountAction
|
||||
envWarningActions []EnvironmentAction
|
||||
envLimitActions []EnvironmentAction
|
||||
envRelaxActions []EnvironmentAction
|
||||
shrWarningActions []ShareAction
|
||||
shrLimitActions []ShareAction
|
||||
shrRelaxActions []ShareAction
|
||||
close chan struct{}
|
||||
join chan struct{}
|
||||
}
|
||||
|
||||
func NewAgent(cfg *Config, ifxCfg *metrics.InfluxConfig, zCfg *zrokEdgeSdk.Config, emailCfg *emailUi.Config, str *store.Store) (*Agent, error) {
|
||||
edge, err := zrokEdgeSdk.Client(zCfg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
a := &Agent{
|
||||
cfg: cfg,
|
||||
ifx: newInfluxReader(ifxCfg),
|
||||
zCfg: zCfg,
|
||||
str: str,
|
||||
queue: make(chan *metrics.Usage, 1024),
|
||||
acctWarningActions: []AccountAction{newAccountWarningAction(emailCfg, str, edge)},
|
||||
acctLimitActions: []AccountAction{newAccountLimitAction(str, edge)},
|
||||
acctRelaxActions: []AccountAction{newAccountRelaxAction(str, edge)},
|
||||
envWarningActions: []EnvironmentAction{newEnvironmentWarningAction(emailCfg, str, edge)},
|
||||
envLimitActions: []EnvironmentAction{newEnvironmentLimitAction(str, edge)},
|
||||
envRelaxActions: []EnvironmentAction{newEnvironmentRelaxAction(str, edge)},
|
||||
shrWarningActions: []ShareAction{newShareWarningAction(emailCfg, str, edge)},
|
||||
shrLimitActions: []ShareAction{newShareLimitAction(str, edge)},
|
||||
shrRelaxActions: []ShareAction{newShareRelaxAction(str, edge)},
|
||||
close: make(chan struct{}),
|
||||
join: make(chan struct{}),
|
||||
}
|
||||
return a, nil
|
||||
}
|
||||
|
||||
func (a *Agent) Start() {
|
||||
go a.run()
|
||||
}
|
||||
|
||||
func (a *Agent) Stop() {
|
||||
close(a.close)
|
||||
<-a.join
|
||||
}
|
||||
|
||||
func (a *Agent) CanCreateEnvironment(acctId int, trx *sqlx.Tx) (bool, error) {
|
||||
if a.cfg.Enforcing {
|
||||
if empty, err := a.str.IsAccountLimitJournalEmpty(acctId, trx); err == nil && !empty {
|
||||
alj, err := a.str.FindLatestAccountLimitJournal(acctId, trx)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if alj.Action == store.LimitAction {
|
||||
return false, nil
|
||||
}
|
||||
} else if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
if a.cfg.Environments > Unlimited {
|
||||
envs, err := a.str.FindEnvironmentsForAccount(acctId, trx)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if len(envs)+1 > a.cfg.Environments {
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func (a *Agent) CanCreateShare(acctId, envId int, trx *sqlx.Tx) (bool, error) {
|
||||
if a.cfg.Enforcing {
|
||||
if empty, err := a.str.IsAccountLimitJournalEmpty(acctId, trx); err == nil && !empty {
|
||||
alj, err := a.str.FindLatestAccountLimitJournal(acctId, trx)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if alj.Action == store.LimitAction {
|
||||
return false, nil
|
||||
}
|
||||
} else if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
if empty, err := a.str.IsEnvironmentLimitJournalEmpty(envId, trx); err == nil && !empty {
|
||||
elj, err := a.str.FindLatestEnvironmentLimitJournal(envId, trx)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if elj.Action == store.LimitAction {
|
||||
return false, nil
|
||||
}
|
||||
} else if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
if a.cfg.Shares > Unlimited {
|
||||
envs, err := a.str.FindEnvironmentsForAccount(acctId, trx)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
total := 0
|
||||
for i := range envs {
|
||||
shrs, err := a.str.FindSharesForEnvironment(envs[i].Id, trx)
|
||||
if err != nil {
|
||||
return false, errors.Wrapf(err, "unable to find shares for environment '%v'", envs[i].ZId)
|
||||
}
|
||||
total += len(shrs)
|
||||
if total+1 > a.cfg.Shares {
|
||||
return false, nil
|
||||
}
|
||||
logrus.Infof("total = %d", total)
|
||||
}
|
||||
}
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func (a *Agent) Handle(u *metrics.Usage) error {
|
||||
logrus.Debugf("handling: %v", u)
|
||||
a.queue <- u
|
||||
return nil
|
||||
}
|
||||
|
||||
func (a *Agent) run() {
|
||||
logrus.Info("started")
|
||||
defer logrus.Info("stopped")
|
||||
|
||||
lastCycle := time.Now()
|
||||
mainLoop:
|
||||
for {
|
||||
select {
|
||||
case usage := <-a.queue:
|
||||
if err := a.enforce(usage); err != nil {
|
||||
logrus.Errorf("error running enforcement: %v", err)
|
||||
}
|
||||
if time.Since(lastCycle) > a.cfg.Cycle {
|
||||
if err := a.relax(); err != nil {
|
||||
logrus.Errorf("error running relax cycle: %v", err)
|
||||
}
|
||||
lastCycle = time.Now()
|
||||
}
|
||||
|
||||
case <-time.After(a.cfg.Cycle):
|
||||
if err := a.relax(); err != nil {
|
||||
logrus.Errorf("error running relax cycle: %v", err)
|
||||
}
|
||||
lastCycle = time.Now()
|
||||
|
||||
case <-a.close:
|
||||
close(a.join)
|
||||
break mainLoop
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (a *Agent) enforce(u *metrics.Usage) error {
|
||||
trx, err := a.str.Begin()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "error starting transaction")
|
||||
}
|
||||
defer func() { _ = trx.Rollback() }()
|
||||
|
||||
if enforce, warning, rxBytes, txBytes, err := a.checkAccountLimit(u.AccountId); err == nil {
|
||||
if enforce {
|
||||
enforced := false
|
||||
var enforcedAt time.Time
|
||||
if empty, err := a.str.IsAccountLimitJournalEmpty(int(u.AccountId), trx); err == nil && !empty {
|
||||
if latest, err := a.str.FindLatestAccountLimitJournal(int(u.AccountId), trx); err == nil {
|
||||
enforced = latest.Action == store.LimitAction
|
||||
enforcedAt = latest.UpdatedAt
|
||||
}
|
||||
}
|
||||
|
||||
if !enforced {
|
||||
_, err := a.str.CreateAccountLimitJournal(&store.AccountLimitJournal{
|
||||
AccountId: int(u.AccountId),
|
||||
RxBytes: rxBytes,
|
||||
TxBytes: txBytes,
|
||||
Action: store.LimitAction,
|
||||
}, trx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
acct, err := a.str.GetAccount(int(u.AccountId), trx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// run account limit actions
|
||||
for _, action := range a.acctLimitActions {
|
||||
if err := action.HandleAccount(acct, rxBytes, txBytes, a.cfg.Bandwidth.PerAccount, trx); err != nil {
|
||||
return errors.Wrapf(err, "%v", reflect.TypeOf(action).String())
|
||||
}
|
||||
}
|
||||
if err := trx.Commit(); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
logrus.Debugf("already enforced limit for account '#%d' at %v", u.AccountId, enforcedAt)
|
||||
}
|
||||
|
||||
} else if warning {
|
||||
warned := false
|
||||
var warnedAt time.Time
|
||||
if empty, err := a.str.IsAccountLimitJournalEmpty(int(u.AccountId), trx); err == nil && !empty {
|
||||
if latest, err := a.str.FindLatestAccountLimitJournal(int(u.AccountId), trx); err == nil {
|
||||
warned = latest.Action == store.WarningAction || latest.Action == store.LimitAction
|
||||
warnedAt = latest.UpdatedAt
|
||||
}
|
||||
}
|
||||
|
||||
if !warned {
|
||||
_, err := a.str.CreateAccountLimitJournal(&store.AccountLimitJournal{
|
||||
AccountId: int(u.AccountId),
|
||||
RxBytes: rxBytes,
|
||||
TxBytes: txBytes,
|
||||
Action: store.WarningAction,
|
||||
}, trx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
acct, err := a.str.GetAccount(int(u.AccountId), trx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// run account warning actions
|
||||
for _, action := range a.acctWarningActions {
|
||||
if err := action.HandleAccount(acct, rxBytes, txBytes, a.cfg.Bandwidth.PerAccount, trx); err != nil {
|
||||
return errors.Wrapf(err, "%v", reflect.TypeOf(action).String())
|
||||
}
|
||||
}
|
||||
if err := trx.Commit(); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
logrus.Debugf("already warned account '#%d' at %v", u.AccountId, warnedAt)
|
||||
}
|
||||
|
||||
} else {
|
||||
if enforce, warning, rxBytes, txBytes, err := a.checkEnvironmentLimit(u.EnvironmentId); err == nil {
|
||||
if enforce {
|
||||
enforced := false
|
||||
var enforcedAt time.Time
|
||||
if empty, err := a.str.IsEnvironmentLimitJournalEmpty(int(u.EnvironmentId), trx); err == nil && !empty {
|
||||
if latest, err := a.str.FindLatestEnvironmentLimitJournal(int(u.EnvironmentId), trx); err == nil {
|
||||
enforced = latest.Action == store.LimitAction
|
||||
enforcedAt = latest.UpdatedAt
|
||||
}
|
||||
}
|
||||
|
||||
if !enforced {
|
||||
_, err := a.str.CreateEnvironmentLimitJournal(&store.EnvironmentLimitJournal{
|
||||
EnvironmentId: int(u.EnvironmentId),
|
||||
RxBytes: rxBytes,
|
||||
TxBytes: txBytes,
|
||||
Action: store.LimitAction,
|
||||
}, trx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
env, err := a.str.GetEnvironment(int(u.EnvironmentId), trx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// run environment limit actions
|
||||
for _, action := range a.envLimitActions {
|
||||
if err := action.HandleEnvironment(env, rxBytes, txBytes, a.cfg.Bandwidth.PerEnvironment, trx); err != nil {
|
||||
return errors.Wrapf(err, "%v", reflect.TypeOf(action).String())
|
||||
}
|
||||
}
|
||||
if err := trx.Commit(); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
logrus.Debugf("already enforced limit for environment '#%d' at %v", u.EnvironmentId, enforcedAt)
|
||||
}
|
||||
|
||||
} else if warning {
|
||||
warned := false
|
||||
var warnedAt time.Time
|
||||
if empty, err := a.str.IsEnvironmentLimitJournalEmpty(int(u.EnvironmentId), trx); err == nil && !empty {
|
||||
if latest, err := a.str.FindLatestEnvironmentLimitJournal(int(u.EnvironmentId), trx); err == nil {
|
||||
warned = latest.Action == store.WarningAction || latest.Action == store.LimitAction
|
||||
warnedAt = latest.UpdatedAt
|
||||
}
|
||||
}
|
||||
|
||||
if !warned {
|
||||
_, err := a.str.CreateEnvironmentLimitJournal(&store.EnvironmentLimitJournal{
|
||||
EnvironmentId: int(u.EnvironmentId),
|
||||
RxBytes: rxBytes,
|
||||
TxBytes: txBytes,
|
||||
Action: store.WarningAction,
|
||||
}, trx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
env, err := a.str.GetEnvironment(int(u.EnvironmentId), trx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// run environment warning actions
|
||||
for _, action := range a.envWarningActions {
|
||||
if err := action.HandleEnvironment(env, rxBytes, txBytes, a.cfg.Bandwidth.PerEnvironment, trx); err != nil {
|
||||
return errors.Wrapf(err, "%v", reflect.TypeOf(action).String())
|
||||
}
|
||||
}
|
||||
if err := trx.Commit(); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
logrus.Debugf("already warned environment '#%d' at %v", u.EnvironmentId, warnedAt)
|
||||
}
|
||||
|
||||
} else {
|
||||
if enforce, warning, rxBytes, txBytes, err := a.checkShareLimit(u.ShareToken); err == nil {
|
||||
if enforce {
|
||||
shr, err := a.str.FindShareWithToken(u.ShareToken, trx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
enforced := false
|
||||
var enforcedAt time.Time
|
||||
if empty, err := a.str.IsShareLimitJournalEmpty(shr.Id, trx); err == nil && !empty {
|
||||
if latest, err := a.str.FindLatestShareLimitJournal(shr.Id, trx); err == nil {
|
||||
enforced = latest.Action == store.LimitAction
|
||||
enforcedAt = latest.UpdatedAt
|
||||
}
|
||||
}
|
||||
|
||||
if !enforced {
|
||||
_, err := a.str.CreateShareLimitJournal(&store.ShareLimitJournal{
|
||||
ShareId: shr.Id,
|
||||
RxBytes: rxBytes,
|
||||
TxBytes: txBytes,
|
||||
Action: store.LimitAction,
|
||||
}, trx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// run share limit actions
|
||||
for _, action := range a.shrLimitActions {
|
||||
if err := action.HandleShare(shr, rxBytes, txBytes, a.cfg.Bandwidth.PerShare, trx); err != nil {
|
||||
return errors.Wrapf(err, "%v", reflect.TypeOf(action).String())
|
||||
}
|
||||
}
|
||||
if err := trx.Commit(); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
logrus.Debugf("already enforced limit for share '%v' at %v", shr.Token, enforcedAt)
|
||||
}
|
||||
|
||||
} else if warning {
|
||||
shr, err := a.str.FindShareWithToken(u.ShareToken, trx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
warned := false
|
||||
var warnedAt time.Time
|
||||
if empty, err := a.str.IsShareLimitJournalEmpty(shr.Id, trx); err == nil && !empty {
|
||||
if latest, err := a.str.FindLatestShareLimitJournal(shr.Id, trx); err == nil {
|
||||
warned = latest.Action == store.WarningAction || latest.Action == store.LimitAction
|
||||
warnedAt = latest.UpdatedAt
|
||||
}
|
||||
}
|
||||
|
||||
if !warned {
|
||||
_, err := a.str.CreateShareLimitJournal(&store.ShareLimitJournal{
|
||||
ShareId: shr.Id,
|
||||
RxBytes: rxBytes,
|
||||
TxBytes: txBytes,
|
||||
Action: store.WarningAction,
|
||||
}, trx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// run share warning actions
|
||||
for _, action := range a.shrWarningActions {
|
||||
if err := action.HandleShare(shr, rxBytes, txBytes, a.cfg.Bandwidth.PerShare, trx); err != nil {
|
||||
return errors.Wrapf(err, "%v", reflect.TypeOf(action).String())
|
||||
}
|
||||
}
|
||||
if err := trx.Commit(); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
logrus.Debugf("already warned share '%v' at %v", shr.Token, warnedAt)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
logrus.Error(err)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
logrus.Error(err)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
logrus.Error(err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (a *Agent) relax() error {
|
||||
logrus.Debug("relaxing")
|
||||
|
||||
trx, err := a.str.Begin()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "error starting transaction")
|
||||
}
|
||||
defer func() { _ = trx.Rollback() }()
|
||||
|
||||
commit := false
|
||||
|
||||
if sljs, err := a.str.FindAllLatestShareLimitJournal(trx); err == nil {
|
||||
for _, slj := range sljs {
|
||||
if shr, err := a.str.GetShare(slj.ShareId, trx); err == nil {
|
||||
if slj.Action == store.WarningAction || slj.Action == store.LimitAction {
|
||||
if enforce, warning, rxBytes, txBytes, err := a.checkShareLimit(shr.Token); err == nil {
|
||||
if !enforce && !warning {
|
||||
if slj.Action == store.LimitAction {
|
||||
// run relax actions for share
|
||||
for _, action := range a.shrRelaxActions {
|
||||
if err := action.HandleShare(shr, rxBytes, txBytes, a.cfg.Bandwidth.PerShare, trx); err != nil {
|
||||
return errors.Wrapf(err, "%v", reflect.TypeOf(action).String())
|
||||
}
|
||||
}
|
||||
} else {
|
||||
logrus.Infof("relaxing warning for '%v'", shr.Token)
|
||||
}
|
||||
if err := a.str.DeleteShareLimitJournalForShare(shr.Id, trx); err == nil {
|
||||
commit = true
|
||||
} else {
|
||||
logrus.Errorf("error deleting share_limit_journal for '%v'", shr.Token)
|
||||
}
|
||||
} else {
|
||||
logrus.Infof("share '%v' still over limit", shr.Token)
|
||||
}
|
||||
} else {
|
||||
logrus.Errorf("error checking share limit for '%v': %v", shr.Token, err)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
logrus.Errorf("error getting share for '#%d': %v", slj.ShareId, err)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
return err
|
||||
}
|
||||
|
||||
if eljs, err := a.str.FindAllLatestEnvironmentLimitJournal(trx); err == nil {
|
||||
for _, elj := range eljs {
|
||||
if env, err := a.str.GetEnvironment(elj.EnvironmentId, trx); err == nil {
|
||||
if elj.Action == store.WarningAction || elj.Action == store.LimitAction {
|
||||
if enforce, warning, rxBytes, txBytes, err := a.checkEnvironmentLimit(int64(elj.EnvironmentId)); err == nil {
|
||||
if !enforce && !warning {
|
||||
if elj.Action == store.LimitAction {
|
||||
// run relax actions for environment
|
||||
for _, action := range a.envRelaxActions {
|
||||
if err := action.HandleEnvironment(env, rxBytes, txBytes, a.cfg.Bandwidth.PerEnvironment, trx); err != nil {
|
||||
return errors.Wrapf(err, "%v", reflect.TypeOf(action).String())
|
||||
}
|
||||
}
|
||||
} else {
|
||||
logrus.Infof("relaxing warning for '%v'", env.ZId)
|
||||
}
|
||||
if err := a.str.DeleteEnvironmentLimitJournalForEnvironment(env.Id, trx); err == nil {
|
||||
commit = true
|
||||
} else {
|
||||
logrus.Errorf("error deleteing environment_limit_journal for '%v': %v", env.ZId, err)
|
||||
}
|
||||
} else {
|
||||
logrus.Infof("environment '%v' still over limit", env.ZId)
|
||||
}
|
||||
} else {
|
||||
logrus.Errorf("error checking environment limit for '%v': %v", env.ZId, err)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
logrus.Errorf("error getting environment for '#%d': %v", elj.EnvironmentId, err)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
return err
|
||||
}
|
||||
|
||||
if aljs, err := a.str.FindAllLatestAccountLimitJournal(trx); err == nil {
|
||||
for _, alj := range aljs {
|
||||
if acct, err := a.str.GetAccount(alj.AccountId, trx); err == nil {
|
||||
if alj.Action == store.WarningAction || alj.Action == store.LimitAction {
|
||||
if enforce, warning, rxBytes, txBytes, err := a.checkAccountLimit(int64(alj.AccountId)); err == nil {
|
||||
if !enforce && !warning {
|
||||
if alj.Action == store.LimitAction {
|
||||
// run relax actions for account
|
||||
for _, action := range a.acctRelaxActions {
|
||||
if err := action.HandleAccount(acct, rxBytes, txBytes, a.cfg.Bandwidth.PerAccount, trx); err != nil {
|
||||
return errors.Wrapf(err, "%v", reflect.TypeOf(action).String())
|
||||
}
|
||||
}
|
||||
} else {
|
||||
logrus.Infof("relaxing warning for '%v'", acct.Email)
|
||||
}
|
||||
if err := a.str.DeleteAccountLimitJournalForAccount(acct.Id, trx); err == nil {
|
||||
commit = true
|
||||
} else {
|
||||
logrus.Errorf("error deleting account_limit_journal for '%v': %v", acct.Email, err)
|
||||
}
|
||||
} else {
|
||||
logrus.Infof("account '%v' still over limit", acct.Email)
|
||||
}
|
||||
} else {
|
||||
logrus.Errorf("error checking account limit for '%v': %v", acct.Email, err)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
logrus.Errorf("error getting account for '#%d': %v", alj.AccountId, err)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
return err
|
||||
}
|
||||
|
||||
if commit {
|
||||
if err := trx.Commit(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (a *Agent) checkAccountLimit(acctId int64) (enforce, warning bool, rxBytes, txBytes int64, err error) {
|
||||
period := 24 * time.Hour
|
||||
limit := DefaultBandwidthPerPeriod()
|
||||
if a.cfg.Bandwidth != nil && a.cfg.Bandwidth.PerAccount != nil {
|
||||
limit = a.cfg.Bandwidth.PerAccount
|
||||
}
|
||||
if limit.Period > 0 {
|
||||
period = limit.Period
|
||||
}
|
||||
rx, tx, err := a.ifx.totalRxTxForAccount(acctId, period)
|
||||
if err != nil {
|
||||
logrus.Error(err)
|
||||
}
|
||||
|
||||
enforce, warning = a.checkLimit(limit, rx, tx)
|
||||
return enforce, warning, rx, tx, nil
|
||||
}
|
||||
|
||||
func (a *Agent) checkEnvironmentLimit(envId int64) (enforce, warning bool, rxBytes, txBytes int64, err error) {
|
||||
period := 24 * time.Hour
|
||||
limit := DefaultBandwidthPerPeriod()
|
||||
if a.cfg.Bandwidth != nil && a.cfg.Bandwidth.PerEnvironment != nil {
|
||||
limit = a.cfg.Bandwidth.PerEnvironment
|
||||
}
|
||||
if limit.Period > 0 {
|
||||
period = limit.Period
|
||||
}
|
||||
rx, tx, err := a.ifx.totalRxTxForEnvironment(envId, period)
|
||||
if err != nil {
|
||||
logrus.Error(err)
|
||||
}
|
||||
|
||||
enforce, warning = a.checkLimit(limit, rx, tx)
|
||||
return enforce, warning, rx, tx, nil
|
||||
}
|
||||
|
||||
func (a *Agent) checkShareLimit(shrToken string) (enforce, warning bool, rxBytes, txBytes int64, err error) {
|
||||
period := 24 * time.Hour
|
||||
limit := DefaultBandwidthPerPeriod()
|
||||
if a.cfg.Bandwidth != nil && a.cfg.Bandwidth.PerShare != nil {
|
||||
limit = a.cfg.Bandwidth.PerShare
|
||||
}
|
||||
if limit.Period > 0 {
|
||||
period = limit.Period
|
||||
}
|
||||
rx, tx, err := a.ifx.totalRxTxForShare(shrToken, period)
|
||||
if err != nil {
|
||||
logrus.Error(err)
|
||||
}
|
||||
|
||||
enforce, warning = a.checkLimit(limit, rx, tx)
|
||||
if enforce || warning {
|
||||
logrus.Debugf("'%v': %v", shrToken, describeLimit(limit, rx, tx))
|
||||
}
|
||||
|
||||
return enforce, warning, rx, tx, nil
|
||||
}
|
||||
|
||||
func (a *Agent) checkLimit(cfg *BandwidthPerPeriod, rx, tx int64) (enforce, warning bool) {
|
||||
if cfg.Limit.Rx != Unlimited && rx > cfg.Limit.Rx {
|
||||
return true, false
|
||||
}
|
||||
if cfg.Limit.Tx != Unlimited && tx > cfg.Limit.Tx {
|
||||
return true, false
|
||||
}
|
||||
if cfg.Limit.Total != Unlimited && rx+tx > cfg.Limit.Total {
|
||||
return true, false
|
||||
}
|
||||
|
||||
if cfg.Warning.Rx != Unlimited && rx > cfg.Warning.Rx {
|
||||
return false, true
|
||||
}
|
||||
if cfg.Warning.Tx != Unlimited && tx > cfg.Warning.Tx {
|
||||
return false, true
|
||||
}
|
||||
if cfg.Warning.Total != Unlimited && rx+tx > cfg.Warning.Total {
|
||||
return false, true
|
||||
}
|
||||
|
||||
return false, false
|
||||
}
|
||||
|
||||
func describeLimit(cfg *BandwidthPerPeriod, rx, tx int64) string {
|
||||
out := ""
|
||||
|
||||
if cfg.Limit.Rx != Unlimited && rx > cfg.Limit.Rx {
|
||||
out += fmt.Sprintf("['%v' over rx limit '%v']", util.BytesToSize(rx), util.BytesToSize(cfg.Limit.Rx))
|
||||
}
|
||||
if cfg.Limit.Tx != Unlimited && tx > cfg.Limit.Tx {
|
||||
out += fmt.Sprintf("['%v' over tx limit '%v']", util.BytesToSize(tx), util.BytesToSize(cfg.Limit.Tx))
|
||||
}
|
||||
if cfg.Limit.Total != Unlimited && rx+tx > cfg.Limit.Total {
|
||||
out += fmt.Sprintf("['%v' over total limit '%v']", util.BytesToSize(rx+tx), util.BytesToSize(cfg.Limit.Total))
|
||||
}
|
||||
|
||||
if cfg.Warning.Rx != Unlimited && rx > cfg.Warning.Rx {
|
||||
out += fmt.Sprintf("['%v' over rx warning '%v']", util.BytesToSize(rx), util.BytesToSize(cfg.Warning.Rx))
|
||||
}
|
||||
if cfg.Warning.Tx != Unlimited && tx > cfg.Warning.Tx {
|
||||
out += fmt.Sprintf("['%v' over tx warning '%v']", util.BytesToSize(tx), util.BytesToSize(cfg.Warning.Tx))
|
||||
}
|
||||
if cfg.Warning.Total != Unlimited && rx+tx > cfg.Warning.Total {
|
||||
out += fmt.Sprintf("['%v' over total warning '%v']", util.BytesToSize(rx+tx), util.BytesToSize(cfg.Warning.Total))
|
||||
}
|
||||
|
||||
return out
|
||||
}
|
61
controller/limits/config.go
Normal file
61
controller/limits/config.go
Normal file
@ -0,0 +1,61 @@
|
||||
package limits
|
||||
|
||||
import "time"
|
||||
|
||||
const Unlimited = -1
|
||||
|
||||
type Config struct {
|
||||
Environments int
|
||||
Shares int
|
||||
Bandwidth *BandwidthConfig
|
||||
Cycle time.Duration
|
||||
Enforcing bool
|
||||
}
|
||||
|
||||
type BandwidthConfig struct {
|
||||
PerAccount *BandwidthPerPeriod
|
||||
PerEnvironment *BandwidthPerPeriod
|
||||
PerShare *BandwidthPerPeriod
|
||||
}
|
||||
|
||||
type BandwidthPerPeriod struct {
|
||||
Period time.Duration
|
||||
Warning *Bandwidth
|
||||
Limit *Bandwidth
|
||||
}
|
||||
|
||||
type Bandwidth struct {
|
||||
Rx int64
|
||||
Tx int64
|
||||
Total int64
|
||||
}
|
||||
|
||||
func DefaultBandwidthPerPeriod() *BandwidthPerPeriod {
|
||||
return &BandwidthPerPeriod{
|
||||
Period: 24 * time.Hour,
|
||||
Warning: &Bandwidth{
|
||||
Rx: Unlimited,
|
||||
Tx: Unlimited,
|
||||
Total: Unlimited,
|
||||
},
|
||||
Limit: &Bandwidth{
|
||||
Rx: Unlimited,
|
||||
Tx: Unlimited,
|
||||
Total: Unlimited,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func DefaultConfig() *Config {
|
||||
return &Config{
|
||||
Environments: Unlimited,
|
||||
Shares: Unlimited,
|
||||
Bandwidth: &BandwidthConfig{
|
||||
PerAccount: DefaultBandwidthPerPeriod(),
|
||||
PerEnvironment: DefaultBandwidthPerPeriod(),
|
||||
PerShare: DefaultBandwidthPerPeriod(),
|
||||
},
|
||||
Enforcing: false,
|
||||
Cycle: 15 * time.Minute,
|
||||
}
|
||||
}
|
92
controller/limits/email.go
Normal file
92
controller/limits/email.go
Normal file
@ -0,0 +1,92 @@
|
||||
package limits
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/openziti/zrok/build"
|
||||
"github.com/openziti/zrok/controller/emailUi"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/wneessen/go-mail"
|
||||
)
|
||||
|
||||
type detailMessage struct {
|
||||
lines []string
|
||||
}
|
||||
|
||||
func newDetailMessage() *detailMessage {
|
||||
return &detailMessage{}
|
||||
}
|
||||
|
||||
func (m *detailMessage) append(msg string, args ...interface{}) *detailMessage {
|
||||
m.lines = append(m.lines, fmt.Sprintf(msg, args...))
|
||||
return m
|
||||
}
|
||||
|
||||
func (m *detailMessage) html() string {
|
||||
out := ""
|
||||
for i := range m.lines {
|
||||
out += fmt.Sprintf("<p style=\"text-align: left;\">%s</p>\n", m.lines[i])
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
func (m *detailMessage) plain() string {
|
||||
out := ""
|
||||
for i := range m.lines {
|
||||
out += fmt.Sprintf("%s\n\n", m.lines[i])
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
func sendLimitWarningEmail(cfg *emailUi.Config, emailTo string, d *detailMessage) error {
|
||||
emailData := &emailUi.WarningEmail{
|
||||
EmailAddress: emailTo,
|
||||
Version: build.String(),
|
||||
}
|
||||
|
||||
emailData.Detail = d.plain()
|
||||
plainBody, err := emailData.MergeTemplate("limitWarning.gotext")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
emailData.Detail = d.html()
|
||||
htmlBody, err := emailData.MergeTemplate("limitWarning.gohtml")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
msg := mail.NewMsg()
|
||||
if err := msg.From(cfg.From); err != nil {
|
||||
return errors.Wrap(err, "failed to set from address in limit warning email")
|
||||
}
|
||||
if err := msg.To(emailTo); err != nil {
|
||||
return errors.Wrap(err, "failed to set to address in limit warning email")
|
||||
}
|
||||
|
||||
msg.Subject("zrok Limit Warning Notification")
|
||||
msg.SetDate()
|
||||
msg.SetMessageID()
|
||||
msg.SetBulk()
|
||||
msg.SetImportance(mail.ImportanceHigh)
|
||||
msg.SetBodyString(mail.TypeTextPlain, plainBody)
|
||||
msg.SetBodyString(mail.TypeTextHTML, htmlBody)
|
||||
|
||||
client, err := mail.NewClient(cfg.Host,
|
||||
mail.WithPort(cfg.Port),
|
||||
mail.WithSMTPAuth(mail.SMTPAuthPlain),
|
||||
mail.WithUsername(cfg.Username),
|
||||
mail.WithPassword(cfg.Password),
|
||||
mail.WithTLSPolicy(mail.TLSMandatory),
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "error creating limit warning email client")
|
||||
}
|
||||
if err := client.DialAndSend(msg); err != nil {
|
||||
return errors.Wrap(err, "error sending limit warning email")
|
||||
}
|
||||
|
||||
logrus.Infof("limit warning email sent to '%v'", emailTo)
|
||||
return nil
|
||||
}
|
37
controller/limits/environmentLimitAction.go
Normal file
37
controller/limits/environmentLimitAction.go
Normal file
@ -0,0 +1,37 @@
|
||||
package limits
|
||||
|
||||
import (
|
||||
"github.com/jmoiron/sqlx"
|
||||
"github.com/openziti/edge/rest_management_api_client"
|
||||
"github.com/openziti/zrok/controller/store"
|
||||
"github.com/openziti/zrok/controller/zrokEdgeSdk"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
type environmentLimitAction struct {
|
||||
str *store.Store
|
||||
edge *rest_management_api_client.ZitiEdgeManagement
|
||||
}
|
||||
|
||||
func newEnvironmentLimitAction(str *store.Store, edge *rest_management_api_client.ZitiEdgeManagement) *environmentLimitAction {
|
||||
return &environmentLimitAction{str, edge}
|
||||
}
|
||||
|
||||
func (a *environmentLimitAction) HandleEnvironment(env *store.Environment, _, _ int64, _ *BandwidthPerPeriod, trx *sqlx.Tx) error {
|
||||
logrus.Infof("limiting '%v'", env.ZId)
|
||||
|
||||
shrs, err := a.str.FindSharesForEnvironment(env.Id, trx)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error finding shares for environment '%v'", env.ZId)
|
||||
}
|
||||
|
||||
for _, shr := range shrs {
|
||||
if err := zrokEdgeSdk.DeleteServicePolicyDial(env.ZId, shr.Token, a.edge); err != nil {
|
||||
return errors.Wrapf(err, "error deleting dial service policy for '%v'", shr.Token)
|
||||
}
|
||||
logrus.Infof("removed dial service policy for share '%v' of environment '%v'", shr.Token, env.ZId)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
44
controller/limits/environmentRelaxAction.go
Normal file
44
controller/limits/environmentRelaxAction.go
Normal file
@ -0,0 +1,44 @@
|
||||
package limits
|
||||
|
||||
import (
|
||||
"github.com/jmoiron/sqlx"
|
||||
"github.com/openziti/edge/rest_management_api_client"
|
||||
"github.com/openziti/zrok/controller/store"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
type environmentRelaxAction struct {
|
||||
str *store.Store
|
||||
edge *rest_management_api_client.ZitiEdgeManagement
|
||||
}
|
||||
|
||||
func newEnvironmentRelaxAction(str *store.Store, edge *rest_management_api_client.ZitiEdgeManagement) *environmentRelaxAction {
|
||||
return &environmentRelaxAction{str, edge}
|
||||
}
|
||||
|
||||
func (a *environmentRelaxAction) HandleEnvironment(env *store.Environment, rxBytes, txBytes int64, limit *BandwidthPerPeriod, trx *sqlx.Tx) error {
|
||||
logrus.Infof("relaxing '%v'", env.ZId)
|
||||
|
||||
shrs, err := a.str.FindSharesForEnvironment(env.Id, trx)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error finding shares for environment '%v'", env.ZId)
|
||||
}
|
||||
|
||||
for _, shr := range shrs {
|
||||
if !shr.Deleted {
|
||||
switch shr.ShareMode {
|
||||
case "public":
|
||||
if err := relaxPublicShare(a.str, a.edge, shr, trx); err != nil {
|
||||
return err
|
||||
}
|
||||
case "private":
|
||||
if err := relaxPrivateShare(a.str, a.edge, shr, trx); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
56
controller/limits/environmentWarningAction.go
Normal file
56
controller/limits/environmentWarningAction.go
Normal file
@ -0,0 +1,56 @@
|
||||
package limits
|
||||
|
||||
import (
|
||||
"github.com/jmoiron/sqlx"
|
||||
"github.com/openziti/edge/rest_management_api_client"
|
||||
"github.com/openziti/zrok/controller/emailUi"
|
||||
"github.com/openziti/zrok/controller/store"
|
||||
"github.com/openziti/zrok/util"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
type environmentWarningAction struct {
|
||||
str *store.Store
|
||||
edge *rest_management_api_client.ZitiEdgeManagement
|
||||
cfg *emailUi.Config
|
||||
}
|
||||
|
||||
func newEnvironmentWarningAction(cfg *emailUi.Config, str *store.Store, edge *rest_management_api_client.ZitiEdgeManagement) *environmentWarningAction {
|
||||
return &environmentWarningAction{str, edge, cfg}
|
||||
}
|
||||
|
||||
func (a *environmentWarningAction) HandleEnvironment(env *store.Environment, rxBytes, txBytes int64, limit *BandwidthPerPeriod, trx *sqlx.Tx) error {
|
||||
logrus.Infof("warning '%v'", env.ZId)
|
||||
|
||||
if env.AccountId != nil {
|
||||
acct, err := a.str.GetAccount(*env.AccountId, trx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
rxLimit := "unlimited bytes"
|
||||
if limit.Limit.Rx != Unlimited {
|
||||
rxLimit = util.BytesToSize(limit.Limit.Rx)
|
||||
}
|
||||
txLimit := "unlimited bytes"
|
||||
if limit.Limit.Tx != Unlimited {
|
||||
txLimit = util.BytesToSize(limit.Limit.Tx)
|
||||
}
|
||||
totalLimit := "unlimited bytes"
|
||||
if limit.Limit.Total != Unlimited {
|
||||
totalLimit = util.BytesToSize(limit.Limit.Total)
|
||||
}
|
||||
|
||||
detail := newDetailMessage()
|
||||
detail = detail.append("Your environment '%v' has received %v and sent %v (for a total of %v), which has triggered a transfer limit warning.", env.Description, util.BytesToSize(rxBytes), util.BytesToSize(txBytes), util.BytesToSize(rxBytes+txBytes))
|
||||
detail = detail.append("This zrok instance only allows a share to receive %v, send %v, totalling not more than %v for each %v.", rxLimit, txLimit, totalLimit, limit.Period)
|
||||
detail = detail.append("If you exceed the transfer limit, access to your shares will be temporarily disabled (until the last %v falls below the transfer limit).", limit.Period)
|
||||
|
||||
if err := sendLimitWarningEmail(a.cfg, acct.Email, detail); err != nil {
|
||||
return errors.Wrapf(err, "error sending limit warning email to '%v'", acct.Email)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
88
controller/limits/influxReader.go
Normal file
88
controller/limits/influxReader.go
Normal file
@ -0,0 +1,88 @@
|
||||
package limits
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
influxdb2 "github.com/influxdata/influxdb-client-go/v2"
|
||||
"github.com/influxdata/influxdb-client-go/v2/api"
|
||||
"github.com/openziti/zrok/controller/metrics"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
type influxReader struct {
|
||||
cfg *metrics.InfluxConfig
|
||||
idb influxdb2.Client
|
||||
queryApi api.QueryAPI
|
||||
}
|
||||
|
||||
func newInfluxReader(cfg *metrics.InfluxConfig) *influxReader {
|
||||
idb := influxdb2.NewClient(cfg.Url, cfg.Token)
|
||||
queryApi := idb.QueryAPI(cfg.Org)
|
||||
return &influxReader{cfg, idb, queryApi}
|
||||
}
|
||||
|
||||
func (r *influxReader) totalRxTxForAccount(acctId int64, duration time.Duration) (int64, int64, error) {
|
||||
query := fmt.Sprintf("from(bucket: \"%v\")\n", r.cfg.Bucket) +
|
||||
fmt.Sprintf("|> range(start: -%v)\n", duration) +
|
||||
"|> filter(fn: (r) => r[\"_measurement\"] == \"xfer\")\n" +
|
||||
"|> filter(fn: (r) => r[\"_field\"] == \"rx\" or r[\"_field\"] == \"tx\")\n" +
|
||||
"|> filter(fn: (r) => r[\"namespace\"] == \"backend\")\n" +
|
||||
fmt.Sprintf("|> filter(fn: (r) => r[\"acctId\"] == \"%d\")\n", acctId) +
|
||||
"|> drop(columns: [\"share\", \"envId\"])\n" +
|
||||
"|> sum()"
|
||||
return r.runQueryForRxTx(query)
|
||||
}
|
||||
|
||||
func (r *influxReader) totalRxTxForEnvironment(envId int64, duration time.Duration) (int64, int64, error) {
|
||||
query := fmt.Sprintf("from(bucket: \"%v\")\n", r.cfg.Bucket) +
|
||||
fmt.Sprintf("|> range(start: -%v)\n", duration) +
|
||||
"|> filter(fn: (r) => r[\"_measurement\"] == \"xfer\")\n" +
|
||||
"|> filter(fn: (r) => r[\"_field\"] == \"rx\" or r[\"_field\"] == \"tx\")\n" +
|
||||
"|> filter(fn: (r) => r[\"namespace\"] == \"backend\")\n" +
|
||||
fmt.Sprintf("|> filter(fn: (r) => r[\"envId\"] == \"%d\")\n", envId) +
|
||||
"|> drop(columns: [\"share\", \"acctId\"])\n" +
|
||||
"|> sum()"
|
||||
return r.runQueryForRxTx(query)
|
||||
}
|
||||
|
||||
func (r *influxReader) totalRxTxForShare(shrToken string, duration time.Duration) (int64, int64, error) {
|
||||
query := fmt.Sprintf("from(bucket: \"%v\")\n", r.cfg.Bucket) +
|
||||
fmt.Sprintf("|> range(start: -%v)\n", duration) +
|
||||
"|> filter(fn: (r) => r[\"_measurement\"] == \"xfer\")\n" +
|
||||
"|> filter(fn: (r) => r[\"_field\"] == \"rx\" or r[\"_field\"] == \"tx\")\n" +
|
||||
"|> filter(fn: (r) => r[\"namespace\"] == \"backend\")\n" +
|
||||
fmt.Sprintf("|> filter(fn: (r) => r[\"share\"] == \"%v\")\n", shrToken) +
|
||||
"|> sum()"
|
||||
return r.runQueryForRxTx(query)
|
||||
}
|
||||
|
||||
func (r *influxReader) runQueryForRxTx(query string) (rx int64, tx int64, err error) {
|
||||
result, err := r.queryApi.Query(context.Background(), query)
|
||||
if err != nil {
|
||||
return -1, -1, err
|
||||
}
|
||||
|
||||
count := 0
|
||||
for result.Next() {
|
||||
if v, ok := result.Record().Value().(int64); ok {
|
||||
switch result.Record().Field() {
|
||||
case "tx":
|
||||
tx = v
|
||||
case "rx":
|
||||
rx = v
|
||||
default:
|
||||
logrus.Warnf("field '%v'?", result.Record().Field())
|
||||
}
|
||||
} else {
|
||||
return -1, -1, errors.New("error asserting value type")
|
||||
}
|
||||
count++
|
||||
}
|
||||
if count != 0 && count != 2 {
|
||||
return -1, -1, errors.Errorf("expected 2 results; got '%d' (%v)", count, strings.ReplaceAll(query, "\n", ""))
|
||||
}
|
||||
return rx, tx, nil
|
||||
}
|
18
controller/limits/model.go
Normal file
18
controller/limits/model.go
Normal file
@ -0,0 +1,18 @@
|
||||
package limits
|
||||
|
||||
import (
|
||||
"github.com/jmoiron/sqlx"
|
||||
"github.com/openziti/zrok/controller/store"
|
||||
)
|
||||
|
||||
type AccountAction interface {
|
||||
HandleAccount(a *store.Account, rxBytes, txBytes int64, limit *BandwidthPerPeriod, trx *sqlx.Tx) error
|
||||
}
|
||||
|
||||
type EnvironmentAction interface {
|
||||
HandleEnvironment(e *store.Environment, rxBytes, txBytes int64, limit *BandwidthPerPeriod, trx *sqlx.Tx) error
|
||||
}
|
||||
|
||||
type ShareAction interface {
|
||||
HandleShare(s *store.Share, rxBytes, txBytes int64, limit *BandwidthPerPeriod, trx *sqlx.Tx) error
|
||||
}
|
34
controller/limits/shareLimitAction.go
Normal file
34
controller/limits/shareLimitAction.go
Normal file
@ -0,0 +1,34 @@
|
||||
package limits
|
||||
|
||||
import (
|
||||
"github.com/jmoiron/sqlx"
|
||||
"github.com/openziti/edge/rest_management_api_client"
|
||||
"github.com/openziti/zrok/controller/store"
|
||||
"github.com/openziti/zrok/controller/zrokEdgeSdk"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
type shareLimitAction struct {
|
||||
str *store.Store
|
||||
edge *rest_management_api_client.ZitiEdgeManagement
|
||||
}
|
||||
|
||||
func newShareLimitAction(str *store.Store, edge *rest_management_api_client.ZitiEdgeManagement) *shareLimitAction {
|
||||
return &shareLimitAction{str, edge}
|
||||
}
|
||||
|
||||
func (a *shareLimitAction) HandleShare(shr *store.Share, _, _ int64, _ *BandwidthPerPeriod, trx *sqlx.Tx) error {
|
||||
logrus.Infof("limiting '%v'", shr.Token)
|
||||
|
||||
env, err := a.str.GetEnvironment(shr.EnvironmentId, trx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := zrokEdgeSdk.DeleteServicePolicyDial(env.ZId, shr.Token, a.edge); err != nil {
|
||||
return err
|
||||
}
|
||||
logrus.Infof("removed dial service policy for '%v'", shr.Token)
|
||||
|
||||
return nil
|
||||
}
|
83
controller/limits/shareRelaxAction.go
Normal file
83
controller/limits/shareRelaxAction.go
Normal file
@ -0,0 +1,83 @@
|
||||
package limits
|
||||
|
||||
import (
|
||||
"github.com/jmoiron/sqlx"
|
||||
"github.com/openziti/edge/rest_management_api_client"
|
||||
"github.com/openziti/zrok/controller/store"
|
||||
"github.com/openziti/zrok/controller/zrokEdgeSdk"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
type shareRelaxAction struct {
|
||||
str *store.Store
|
||||
edge *rest_management_api_client.ZitiEdgeManagement
|
||||
}
|
||||
|
||||
func newShareRelaxAction(str *store.Store, edge *rest_management_api_client.ZitiEdgeManagement) *shareRelaxAction {
|
||||
return &shareRelaxAction{str, edge}
|
||||
}
|
||||
|
||||
func (a *shareRelaxAction) HandleShare(shr *store.Share, _, _ int64, _ *BandwidthPerPeriod, trx *sqlx.Tx) error {
|
||||
logrus.Infof("relaxing '%v'", shr.Token)
|
||||
|
||||
if !shr.Deleted {
|
||||
switch shr.ShareMode {
|
||||
case "public":
|
||||
if err := relaxPublicShare(a.str, a.edge, shr, trx); err != nil {
|
||||
return err
|
||||
}
|
||||
case "private":
|
||||
if err := relaxPrivateShare(a.str, a.edge, shr, trx); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func relaxPublicShare(str *store.Store, edge *rest_management_api_client.ZitiEdgeManagement, shr *store.Share, trx *sqlx.Tx) error {
|
||||
env, err := str.GetEnvironment(shr.EnvironmentId, trx)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "error finding environment")
|
||||
}
|
||||
|
||||
fe, err := str.FindFrontendPubliclyNamed(*shr.FrontendSelection, trx)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error finding frontend name '%v' for '%v'", *shr.FrontendSelection, shr.Token)
|
||||
}
|
||||
|
||||
if err := zrokEdgeSdk.CreateServicePolicyDial(env.ZId+"-"+shr.ZId+"-dial", shr.ZId, []string{fe.ZId}, zrokEdgeSdk.ZrokShareTags(shr.Token).SubTags, edge); err != nil {
|
||||
return errors.Wrapf(err, "error creating dial service policy for '%v'", shr.Token)
|
||||
}
|
||||
logrus.Infof("added dial service policy for '%v'", shr.Token)
|
||||
return nil
|
||||
}
|
||||
|
||||
func relaxPrivateShare(str *store.Store, edge *rest_management_api_client.ZitiEdgeManagement, shr *store.Share, trx *sqlx.Tx) error {
|
||||
fes, err := str.FindFrontendsForPrivateShare(shr.Id, trx)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error finding frontends for share '%v'", shr.Token)
|
||||
}
|
||||
for _, fe := range fes {
|
||||
if fe.EnvironmentId != nil {
|
||||
env, err := str.GetEnvironment(*fe.EnvironmentId, trx)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error getting environment for frontend '%v'", fe.Token)
|
||||
}
|
||||
|
||||
addlTags := map[string]interface{}{
|
||||
"zrokEnvironmentZId": env.ZId,
|
||||
"zrokFrontendToken": fe.Token,
|
||||
"zrokShareToken": shr.Token,
|
||||
}
|
||||
if err := zrokEdgeSdk.CreateServicePolicyDial(env.ZId+"-"+shr.ZId+"-dial", shr.ZId, []string{env.ZId}, addlTags, edge); err != nil {
|
||||
return errors.Wrapf(err, "unable to create dial policy for frontend '%v'", fe.Token)
|
||||
}
|
||||
|
||||
logrus.Infof("added dial service policy for share '%v' to private frontend '%v'", shr.Token, fe.Token)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
61
controller/limits/shareWarningAction.go
Normal file
61
controller/limits/shareWarningAction.go
Normal file
@ -0,0 +1,61 @@
|
||||
package limits
|
||||
|
||||
import (
|
||||
"github.com/jmoiron/sqlx"
|
||||
"github.com/openziti/edge/rest_management_api_client"
|
||||
"github.com/openziti/zrok/controller/emailUi"
|
||||
"github.com/openziti/zrok/controller/store"
|
||||
"github.com/openziti/zrok/util"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
type shareWarningAction struct {
|
||||
str *store.Store
|
||||
edge *rest_management_api_client.ZitiEdgeManagement
|
||||
cfg *emailUi.Config
|
||||
}
|
||||
|
||||
func newShareWarningAction(cfg *emailUi.Config, str *store.Store, edge *rest_management_api_client.ZitiEdgeManagement) *shareWarningAction {
|
||||
return &shareWarningAction{str, edge, cfg}
|
||||
}
|
||||
|
||||
func (a *shareWarningAction) HandleShare(shr *store.Share, rxBytes, txBytes int64, limit *BandwidthPerPeriod, trx *sqlx.Tx) error {
|
||||
logrus.Infof("warning '%v'", shr.Token)
|
||||
|
||||
env, err := a.str.GetEnvironment(shr.EnvironmentId, trx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if env.AccountId != nil {
|
||||
acct, err := a.str.GetAccount(*env.AccountId, trx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
rxLimit := "unlimited bytes"
|
||||
if limit.Limit.Rx != Unlimited {
|
||||
rxLimit = util.BytesToSize(limit.Limit.Rx)
|
||||
}
|
||||
txLimit := "unlimited bytes"
|
||||
if limit.Limit.Tx != Unlimited {
|
||||
txLimit = util.BytesToSize(limit.Limit.Tx)
|
||||
}
|
||||
totalLimit := "unlimited bytes"
|
||||
if limit.Limit.Total != Unlimited {
|
||||
totalLimit = util.BytesToSize(limit.Limit.Total)
|
||||
}
|
||||
|
||||
detail := newDetailMessage()
|
||||
detail = detail.append("Your share '%v' has received %v and sent %v (for a total of %v), which has triggered a transfer limit warning.", shr.Token, util.BytesToSize(rxBytes), util.BytesToSize(txBytes), util.BytesToSize(rxBytes+txBytes))
|
||||
detail = detail.append("This zrok instance only allows a share to receive %v, send %v, totalling not more than %v for each %v.", rxLimit, txLimit, totalLimit, limit.Period)
|
||||
detail = detail.append("If you exceed the transfer limit, access to your shares will be temporarily disabled (until the last %v falls below the transfer limit).", limit.Period)
|
||||
|
||||
if err := sendLimitWarningEmail(a.cfg, acct.Email, detail); err != nil {
|
||||
return errors.Wrapf(err, "error sending limit warning email to '%v'", acct.Email)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
@ -3,6 +3,7 @@ package controller
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"github.com/openziti/zrok/controller/config"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
@ -11,11 +12,11 @@ import (
|
||||
)
|
||||
|
||||
type maintenanceRegistrationAgent struct {
|
||||
cfg *RegistrationMaintenanceConfig
|
||||
cfg *config.RegistrationMaintenanceConfig
|
||||
ctx context.Context
|
||||
}
|
||||
|
||||
func newRegistrationMaintenanceAgent(ctx context.Context, cfg *RegistrationMaintenanceConfig) *maintenanceRegistrationAgent {
|
||||
func newRegistrationMaintenanceAgent(ctx context.Context, cfg *config.RegistrationMaintenanceConfig) *maintenanceRegistrationAgent {
|
||||
return &maintenanceRegistrationAgent{
|
||||
cfg: cfg,
|
||||
ctx: ctx,
|
||||
@ -78,11 +79,11 @@ func (ma *maintenanceRegistrationAgent) deleteExpiredAccountRequests() error {
|
||||
}
|
||||
|
||||
type maintenanceResetPasswordAgent struct {
|
||||
cfg *ResetPasswordMaintenanceConfig
|
||||
cfg *config.ResetPasswordMaintenanceConfig
|
||||
ctx context.Context
|
||||
}
|
||||
|
||||
func newMaintenanceResetPasswordAgent(ctx context.Context, cfg *ResetPasswordMaintenanceConfig) *maintenanceResetPasswordAgent {
|
||||
func newMaintenanceResetPasswordAgent(ctx context.Context, cfg *config.ResetPasswordMaintenanceConfig) *maintenanceResetPasswordAgent {
|
||||
return &maintenanceResetPasswordAgent{
|
||||
cfg: cfg,
|
||||
ctx: ctx,
|
||||
|
@ -1,58 +1,58 @@
|
||||
package metrics
|
||||
|
||||
import (
|
||||
"github.com/openziti/zrok/controller/store"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
type MetricsAgent struct {
|
||||
src Source
|
||||
cache *shareCache
|
||||
join chan struct{}
|
||||
type Agent struct {
|
||||
events chan ZitiEventJson
|
||||
src ZitiEventJsonSource
|
||||
srcJoin chan struct{}
|
||||
cache *cache
|
||||
snks []UsageSink
|
||||
}
|
||||
|
||||
func Run(cfg *Config) (*MetricsAgent, error) {
|
||||
logrus.Info("starting")
|
||||
|
||||
if cfg.Store == nil {
|
||||
return nil, errors.New("no 'store' configured; exiting")
|
||||
func NewAgent(cfg *AgentConfig, str *store.Store, ifxCfg *InfluxConfig) (*Agent, error) {
|
||||
a := &Agent{}
|
||||
if v, ok := cfg.Source.(ZitiEventJsonSource); ok {
|
||||
a.src = v
|
||||
} else {
|
||||
return nil, errors.New("invalid event json source")
|
||||
}
|
||||
cache, err := newShareCache(cfg.Store)
|
||||
a.cache = newShareCache(str)
|
||||
a.snks = append(a.snks, newInfluxWriter(ifxCfg))
|
||||
return a, nil
|
||||
}
|
||||
|
||||
func (a *Agent) AddUsageSink(snk UsageSink) {
|
||||
a.snks = append(a.snks, snk)
|
||||
}
|
||||
|
||||
func (a *Agent) Start() error {
|
||||
a.events = make(chan ZitiEventJson)
|
||||
srcJoin, err := a.src.Start(a.events)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error creating share cache")
|
||||
}
|
||||
|
||||
if cfg.Source == nil {
|
||||
return nil, errors.New("no 'source' configured; exiting")
|
||||
}
|
||||
|
||||
src, ok := cfg.Source.(Source)
|
||||
if !ok {
|
||||
return nil, errors.New("invalid 'source'; exiting")
|
||||
}
|
||||
|
||||
if cfg.Influx == nil {
|
||||
return nil, errors.New("no 'influx' configured; exiting")
|
||||
}
|
||||
|
||||
idb := openInfluxDb(cfg.Influx)
|
||||
|
||||
events := make(chan map[string]interface{})
|
||||
join, err := src.Start(events)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error starting source")
|
||||
return err
|
||||
}
|
||||
a.srcJoin = srcJoin
|
||||
|
||||
go func() {
|
||||
logrus.Info("started")
|
||||
defer logrus.Info("stopped")
|
||||
for {
|
||||
select {
|
||||
case event := <-events:
|
||||
usage := Ingest(event)
|
||||
if shrToken, err := cache.getToken(usage.ZitiServiceId); err == nil {
|
||||
usage.ShareToken = shrToken
|
||||
if err := idb.Write(usage); err != nil {
|
||||
case event := <-a.events:
|
||||
if usage, err := Ingest(event); err == nil {
|
||||
if err := a.cache.addZrokDetail(usage); err != nil {
|
||||
logrus.Error(err)
|
||||
}
|
||||
for _, snk := range a.snks {
|
||||
if err := snk.Handle(usage); err != nil {
|
||||
logrus.Error(err)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
logrus.Error(err)
|
||||
}
|
||||
@ -60,14 +60,10 @@ func Run(cfg *Config) (*MetricsAgent, error) {
|
||||
}
|
||||
}()
|
||||
|
||||
return &MetricsAgent{src: src, join: join}, nil
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ma *MetricsAgent) Stop() {
|
||||
logrus.Info("stopping")
|
||||
ma.src.Stop()
|
||||
}
|
||||
|
||||
func (ma *MetricsAgent) Join() {
|
||||
<-ma.join
|
||||
func (a *Agent) Stop() {
|
||||
a.src.Stop()
|
||||
close(a.events)
|
||||
}
|
||||
|
66
controller/metrics/amqpSink.go
Normal file
66
controller/metrics/amqpSink.go
Normal file
@ -0,0 +1,66 @@
|
||||
package metrics
|
||||
|
||||
import (
|
||||
"context"
|
||||
"github.com/michaelquigley/cf"
|
||||
"github.com/openziti/zrok/controller/env"
|
||||
"github.com/pkg/errors"
|
||||
amqp "github.com/rabbitmq/amqp091-go"
|
||||
"github.com/sirupsen/logrus"
|
||||
"time"
|
||||
)
|
||||
|
||||
func init() {
|
||||
env.GetCfOptions().AddFlexibleSetter("amqpSink", loadAmqpSinkConfig)
|
||||
}
|
||||
|
||||
type AmqpSinkConfig struct {
|
||||
Url string `cf:"+secret"`
|
||||
QueueName string
|
||||
}
|
||||
|
||||
func loadAmqpSinkConfig(v interface{}, _ *cf.Options) (interface{}, error) {
|
||||
if submap, ok := v.(map[string]interface{}); ok {
|
||||
cfg := &AmqpSinkConfig{}
|
||||
if err := cf.Bind(cfg, submap, cf.DefaultOptions()); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return newAmqpSink(cfg)
|
||||
}
|
||||
return nil, errors.New("invalid config structure for 'amqpSink'")
|
||||
}
|
||||
|
||||
type amqpSink struct {
|
||||
conn *amqp.Connection
|
||||
ch *amqp.Channel
|
||||
queue amqp.Queue
|
||||
}
|
||||
|
||||
func newAmqpSink(cfg *AmqpSinkConfig) (*amqpSink, error) {
|
||||
conn, err := amqp.Dial(cfg.Url)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error dialing amqp broker")
|
||||
}
|
||||
|
||||
ch, err := conn.Channel()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error getting amqp channel")
|
||||
}
|
||||
|
||||
queue, err := ch.QueueDeclare(cfg.QueueName, true, false, false, false, nil)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error declaring queue")
|
||||
}
|
||||
|
||||
return &amqpSink{conn, ch, queue}, nil
|
||||
}
|
||||
|
||||
func (s *amqpSink) Handle(event ZitiEventJson) error {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
||||
defer cancel()
|
||||
logrus.Infof("pushing '%v'", event)
|
||||
return s.ch.PublishWithContext(ctx, "", s.queue.Name, false, false, amqp.Publishing{
|
||||
ContentType: "application/json",
|
||||
Body: []byte(event),
|
||||
})
|
||||
}
|
86
controller/metrics/amqpSource.go
Normal file
86
controller/metrics/amqpSource.go
Normal file
@ -0,0 +1,86 @@
|
||||
package metrics
|
||||
|
||||
import (
|
||||
"github.com/michaelquigley/cf"
|
||||
"github.com/openziti/zrok/controller/env"
|
||||
"github.com/pkg/errors"
|
||||
amqp "github.com/rabbitmq/amqp091-go"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
func init() {
|
||||
env.GetCfOptions().AddFlexibleSetter("amqpSource", loadAmqpSourceConfig)
|
||||
}
|
||||
|
||||
type AmqpSourceConfig struct {
|
||||
Url string `cf:"+secret"`
|
||||
QueueName string
|
||||
}
|
||||
|
||||
func loadAmqpSourceConfig(v interface{}, _ *cf.Options) (interface{}, error) {
|
||||
if submap, ok := v.(map[string]interface{}); ok {
|
||||
cfg := &AmqpSourceConfig{}
|
||||
if err := cf.Bind(cfg, submap, cf.DefaultOptions()); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return newAmqpSource(cfg)
|
||||
}
|
||||
return nil, errors.New("invalid config structure for 'amqpSource'")
|
||||
}
|
||||
|
||||
type amqpSource struct {
|
||||
conn *amqp.Connection
|
||||
ch *amqp.Channel
|
||||
queue amqp.Queue
|
||||
msgs <-chan amqp.Delivery
|
||||
join chan struct{}
|
||||
}
|
||||
|
||||
func newAmqpSource(cfg *AmqpSourceConfig) (*amqpSource, error) {
|
||||
conn, err := amqp.Dial(cfg.Url)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error dialing amqp broker")
|
||||
}
|
||||
|
||||
ch, err := conn.Channel()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error getting amqp channel")
|
||||
}
|
||||
|
||||
queue, err := ch.QueueDeclare(cfg.QueueName, true, false, false, false, nil)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error declaring queue")
|
||||
}
|
||||
|
||||
msgs, err := ch.Consume(cfg.QueueName, "zrok", true, false, false, false, nil)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error consuming")
|
||||
}
|
||||
|
||||
return &amqpSource{
|
||||
conn,
|
||||
ch,
|
||||
queue,
|
||||
msgs,
|
||||
make(chan struct{}),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (s *amqpSource) Start(events chan ZitiEventJson) (join chan struct{}, err error) {
|
||||
go func() {
|
||||
logrus.Info("started")
|
||||
defer logrus.Info("stopped")
|
||||
for event := range s.msgs {
|
||||
events <- ZitiEventJson(event.Body)
|
||||
}
|
||||
close(s.join)
|
||||
}()
|
||||
return s.join, nil
|
||||
}
|
||||
|
||||
func (s *amqpSource) Stop() {
|
||||
if err := s.ch.Close(); err != nil {
|
||||
logrus.Error(err)
|
||||
}
|
||||
<-s.join
|
||||
}
|
77
controller/metrics/bridge.go
Normal file
77
controller/metrics/bridge.go
Normal file
@ -0,0 +1,77 @@
|
||||
package metrics
|
||||
|
||||
import (
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
type BridgeConfig struct {
|
||||
Source interface{}
|
||||
Sink interface{}
|
||||
}
|
||||
|
||||
type Bridge struct {
|
||||
src ZitiEventJsonSource
|
||||
srcJoin chan struct{}
|
||||
snk ZitiEventJsonSink
|
||||
events chan ZitiEventJson
|
||||
close chan struct{}
|
||||
join chan struct{}
|
||||
}
|
||||
|
||||
func NewBridge(cfg *BridgeConfig) (*Bridge, error) {
|
||||
b := &Bridge{
|
||||
events: make(chan ZitiEventJson),
|
||||
join: make(chan struct{}),
|
||||
close: make(chan struct{}),
|
||||
}
|
||||
if v, ok := cfg.Source.(ZitiEventJsonSource); ok {
|
||||
b.src = v
|
||||
} else {
|
||||
return nil, errors.New("invalid source type")
|
||||
}
|
||||
if v, ok := cfg.Sink.(ZitiEventJsonSink); ok {
|
||||
b.snk = v
|
||||
} else {
|
||||
return nil, errors.New("invalid sink type")
|
||||
}
|
||||
return b, nil
|
||||
}
|
||||
|
||||
func (b *Bridge) Start() (join chan struct{}, err error) {
|
||||
if b.srcJoin, err = b.src.Start(b.events); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
go func() {
|
||||
logrus.Info("started")
|
||||
defer logrus.Info("stopped")
|
||||
defer close(b.join)
|
||||
|
||||
eventLoop:
|
||||
for {
|
||||
select {
|
||||
case eventJson := <-b.events:
|
||||
logrus.Info(eventJson)
|
||||
if err := b.snk.Handle(eventJson); err == nil {
|
||||
logrus.Infof("-> %v", eventJson)
|
||||
} else {
|
||||
logrus.Error(err)
|
||||
}
|
||||
|
||||
case <-b.close:
|
||||
logrus.Info("received close signal")
|
||||
break eventLoop
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
return b.join, nil
|
||||
}
|
||||
|
||||
func (b *Bridge) Stop() {
|
||||
b.src.Stop()
|
||||
close(b.close)
|
||||
<-b.srcJoin
|
||||
<-b.join
|
||||
}
|
35
controller/metrics/cache.go
Normal file
35
controller/metrics/cache.go
Normal file
@ -0,0 +1,35 @@
|
||||
package metrics
|
||||
|
||||
import (
|
||||
"github.com/openziti/zrok/controller/store"
|
||||
)
|
||||
|
||||
type cache struct {
|
||||
str *store.Store
|
||||
}
|
||||
|
||||
func newShareCache(str *store.Store) *cache {
|
||||
return &cache{str}
|
||||
}
|
||||
|
||||
func (c *cache) addZrokDetail(u *Usage) error {
|
||||
tx, err := c.str.Begin()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer func() { _ = tx.Rollback() }()
|
||||
|
||||
shr, err := c.str.FindShareWithZIdAndDeleted(u.ZitiServiceId, tx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
u.ShareToken = shr.Token
|
||||
env, err := c.str.GetEnvironment(shr.EnvironmentId, tx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
u.EnvironmentId = int64(env.Id)
|
||||
u.AccountId = int64(*env.AccountId)
|
||||
|
||||
return nil
|
||||
}
|
@ -1,10 +0,0 @@
|
||||
package metrics
|
||||
|
||||
import "github.com/michaelquigley/cf"
|
||||
|
||||
func GetCfOptions() *cf.Options {
|
||||
opts := cf.DefaultOptions()
|
||||
opts.AddFlexibleSetter("file", loadFileSourceConfig)
|
||||
opts.AddFlexibleSetter("websocket", loadWebsocketSourceConfig)
|
||||
return opts
|
||||
}
|
@ -1,15 +1,12 @@
|
||||
package metrics
|
||||
|
||||
import (
|
||||
"github.com/michaelquigley/cf"
|
||||
"github.com/openziti/zrok/controller/store"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
type Config struct {
|
||||
Source interface{}
|
||||
Influx *InfluxConfig
|
||||
Store *store.Config
|
||||
Agent *AgentConfig
|
||||
}
|
||||
|
||||
type AgentConfig struct {
|
||||
Source interface{}
|
||||
}
|
||||
|
||||
type InfluxConfig struct {
|
||||
@ -18,11 +15,3 @@ type InfluxConfig struct {
|
||||
Org string
|
||||
Token string `cf:"+secret"`
|
||||
}
|
||||
|
||||
func LoadConfig(path string) (*Config, error) {
|
||||
cfg := &Config{}
|
||||
if err := cf.BindYaml(cfg, path, GetCfOptions()); err != nil {
|
||||
return nil, errors.Wrapf(err, "error loading config from '%v'", path)
|
||||
}
|
||||
return cfg, nil
|
||||
}
|
||||
|
@ -2,17 +2,21 @@ package metrics
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"encoding/json"
|
||||
"github.com/michaelquigley/cf"
|
||||
"github.com/nxadm/tail"
|
||||
"github.com/openziti/zrok/controller/env"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
"os"
|
||||
)
|
||||
|
||||
func init() {
|
||||
env.GetCfOptions().AddFlexibleSetter("fileSource", loadFileSourceConfig)
|
||||
}
|
||||
|
||||
type FileSourceConfig struct {
|
||||
Path string
|
||||
IndexPath string
|
||||
Path string
|
||||
PointerPath string
|
||||
}
|
||||
|
||||
func loadFileSourceConfig(v interface{}, _ *cf.Options) (interface{}, error) {
|
||||
@ -23,36 +27,36 @@ func loadFileSourceConfig(v interface{}, _ *cf.Options) (interface{}, error) {
|
||||
}
|
||||
return &fileSource{cfg: cfg}, nil
|
||||
}
|
||||
return nil, errors.New("invalid config structure for 'file' source")
|
||||
return nil, errors.New("invalid config structure for 'fileSource'")
|
||||
}
|
||||
|
||||
type fileSource struct {
|
||||
cfg *FileSourceConfig
|
||||
t *tail.Tail
|
||||
cfg *FileSourceConfig
|
||||
ptrF *os.File
|
||||
t *tail.Tail
|
||||
}
|
||||
|
||||
func (s *fileSource) Start(events chan map[string]interface{}) (join chan struct{}, err error) {
|
||||
func (s *fileSource) Start(events chan ZitiEventJson) (join chan struct{}, err error) {
|
||||
f, err := os.Open(s.cfg.Path)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error opening '%v'", s.cfg.Path)
|
||||
}
|
||||
_ = f.Close()
|
||||
|
||||
idxF, err := os.OpenFile(s.indexPath(), os.O_CREATE|os.O_RDWR, os.ModePerm)
|
||||
s.ptrF, err = os.OpenFile(s.pointerPath(), os.O_CREATE|os.O_RDWR, os.ModePerm)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error opening '%v'", s.indexPath())
|
||||
return nil, errors.Wrapf(err, "error opening pointer '%v'", s.pointerPath())
|
||||
}
|
||||
|
||||
pos := int64(0)
|
||||
posBuf := make([]byte, 8)
|
||||
if n, err := idxF.Read(posBuf); err == nil && n == 8 {
|
||||
pos = int64(binary.LittleEndian.Uint64(posBuf))
|
||||
logrus.Infof("recovered stored position: %d", pos)
|
||||
ptr, err := s.readPtr()
|
||||
if err != nil {
|
||||
logrus.Errorf("error reading pointer: %v", err)
|
||||
}
|
||||
logrus.Infof("retrieved stored position pointer at '%d'", ptr)
|
||||
|
||||
join = make(chan struct{})
|
||||
go func() {
|
||||
s.tail(pos, events, idxF)
|
||||
s.tail(ptr, events)
|
||||
close(join)
|
||||
}()
|
||||
|
||||
@ -65,43 +69,62 @@ func (s *fileSource) Stop() {
|
||||
}
|
||||
}
|
||||
|
||||
func (s *fileSource) tail(pos int64, events chan map[string]interface{}, idxF *os.File) {
|
||||
logrus.Infof("started")
|
||||
defer logrus.Infof("stopped")
|
||||
|
||||
posBuf := make([]byte, 8)
|
||||
func (s *fileSource) tail(ptr int64, events chan ZitiEventJson) {
|
||||
logrus.Info("started")
|
||||
defer logrus.Info("stopped")
|
||||
|
||||
var err error
|
||||
s.t, err = tail.TailFile(s.cfg.Path, tail.Config{
|
||||
ReOpen: true,
|
||||
Follow: true,
|
||||
Location: &tail.SeekInfo{Offset: pos},
|
||||
Location: &tail.SeekInfo{Offset: ptr},
|
||||
})
|
||||
if err != nil {
|
||||
logrus.Error(err)
|
||||
logrus.Errorf("error starting tail: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
for line := range s.t.Lines {
|
||||
event := make(map[string]interface{})
|
||||
if err := json.Unmarshal([]byte(line.Text), &event); err == nil {
|
||||
binary.LittleEndian.PutUint64(posBuf, uint64(line.SeekInfo.Offset))
|
||||
if n, err := idxF.Seek(0, 0); err == nil && n == 0 {
|
||||
if n, err := idxF.Write(posBuf); err != nil || n != 8 {
|
||||
logrus.Errorf("error writing index (%d): %v", n, err)
|
||||
}
|
||||
}
|
||||
events <- event
|
||||
} else {
|
||||
logrus.Errorf("error parsing line #%d: %v", line.Num, err)
|
||||
for event := range s.t.Lines {
|
||||
events <- ZitiEventJson(event.Text)
|
||||
|
||||
if err := s.writePtr(event.SeekInfo.Offset); err != nil {
|
||||
logrus.Error(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *fileSource) indexPath() string {
|
||||
if s.cfg.IndexPath == "" {
|
||||
return s.cfg.Path + ".idx"
|
||||
func (s *fileSource) pointerPath() string {
|
||||
if s.cfg.PointerPath == "" {
|
||||
return s.cfg.Path + ".ptr"
|
||||
} else {
|
||||
return s.cfg.IndexPath
|
||||
return s.cfg.PointerPath
|
||||
}
|
||||
}
|
||||
|
||||
func (s *fileSource) readPtr() (int64, error) {
|
||||
ptr := int64(0)
|
||||
buf := make([]byte, 8)
|
||||
if n, err := s.ptrF.Seek(0, 0); err == nil && n == 0 {
|
||||
if n, err := s.ptrF.Read(buf); err == nil && n == 8 {
|
||||
ptr = int64(binary.LittleEndian.Uint64(buf))
|
||||
return ptr, nil
|
||||
} else {
|
||||
return 0, errors.Wrapf(err, "error reading pointer (%d): %v", n, err)
|
||||
}
|
||||
} else {
|
||||
return 0, errors.Wrapf(err, "error seeking pointer (%d): %v", n, err)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *fileSource) writePtr(ptr int64) error {
|
||||
buf := make([]byte, 8)
|
||||
binary.LittleEndian.PutUint64(buf, uint64(ptr))
|
||||
if n, err := s.ptrF.Seek(0, 0); err == nil && n == 0 {
|
||||
if n, err := s.ptrF.Write(buf); err != nil || n != 8 {
|
||||
return errors.Wrapf(err, "error writing pointer (%d): %v", n, err)
|
||||
}
|
||||
} else {
|
||||
return errors.Wrapf(err, "error seeking pointer (%d): %v", n, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -10,42 +10,52 @@ import (
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
type influxDb struct {
|
||||
type influxWriter struct {
|
||||
idb influxdb2.Client
|
||||
writeApi api.WriteAPIBlocking
|
||||
}
|
||||
|
||||
func openInfluxDb(cfg *InfluxConfig) *influxDb {
|
||||
func newInfluxWriter(cfg *InfluxConfig) *influxWriter {
|
||||
idb := influxdb2.NewClient(cfg.Url, cfg.Token)
|
||||
wapi := idb.WriteAPIBlocking(cfg.Org, cfg.Bucket)
|
||||
return &influxDb{idb, wapi}
|
||||
writeApi := idb.WriteAPIBlocking(cfg.Org, cfg.Bucket)
|
||||
return &influxWriter{idb, writeApi}
|
||||
}
|
||||
|
||||
func (i *influxDb) Write(u *Usage) error {
|
||||
func (w *influxWriter) Handle(u *Usage) error {
|
||||
out := fmt.Sprintf("share: %v, circuit: %v", u.ShareToken, u.ZitiCircuitId)
|
||||
|
||||
envId := fmt.Sprintf("%d", u.EnvironmentId)
|
||||
acctId := fmt.Sprintf("%d", u.AccountId)
|
||||
|
||||
var pts []*write.Point
|
||||
circuitPt := influxdb2.NewPoint("circuits",
|
||||
map[string]string{"share": u.ShareToken, "envId": envId, "acctId": acctId},
|
||||
map[string]interface{}{"circuit": u.ZitiCircuitId},
|
||||
u.IntervalStart)
|
||||
pts = append(pts, circuitPt)
|
||||
|
||||
if u.BackendTx > 0 || u.BackendRx > 0 {
|
||||
pt := influxdb2.NewPoint("xfer",
|
||||
map[string]string{"namespace": "backend", "share": u.ShareToken},
|
||||
map[string]interface{}{"bytesRead": u.BackendRx, "bytesWritten": u.BackendTx},
|
||||
map[string]string{"namespace": "backend", "share": u.ShareToken, "envId": envId, "acctId": acctId},
|
||||
map[string]interface{}{"rx": u.BackendRx, "tx": u.BackendTx},
|
||||
u.IntervalStart)
|
||||
pts = append(pts, pt)
|
||||
out += fmt.Sprintf(" backend {rx: %v, tx: %v}", util.BytesToSize(u.BackendRx), util.BytesToSize(u.BackendTx))
|
||||
}
|
||||
if u.FrontendTx > 0 || u.FrontendRx > 0 {
|
||||
pt := influxdb2.NewPoint("xfer",
|
||||
map[string]string{"namespace": "frontend", "share": u.ShareToken},
|
||||
map[string]interface{}{"bytesRead": u.FrontendRx, "bytesWritten": u.FrontendTx},
|
||||
map[string]string{"namespace": "frontend", "share": u.ShareToken, "envId": envId, "acctId": acctId},
|
||||
map[string]interface{}{"rx": u.FrontendRx, "tx": u.FrontendTx},
|
||||
u.IntervalStart)
|
||||
pts = append(pts, pt)
|
||||
out += fmt.Sprintf(" frontend {rx: %v, tx: %v}", util.BytesToSize(u.FrontendRx), util.BytesToSize(u.FrontendTx))
|
||||
}
|
||||
if len(pts) > 0 {
|
||||
if err := i.writeApi.WritePoint(context.Background(), pts...); err == nil {
|
||||
logrus.Info(out)
|
||||
} else {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := w.writeApi.WritePoint(context.Background(), pts...); err == nil {
|
||||
logrus.Info(out)
|
||||
} else {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
@ -12,6 +12,8 @@ type Usage struct {
|
||||
ZitiServiceId string
|
||||
ZitiCircuitId string
|
||||
ShareToken string
|
||||
EnvironmentId int64
|
||||
AccountId int64
|
||||
FrontendTx int64
|
||||
FrontendRx int64
|
||||
BackendTx int64
|
||||
@ -25,17 +27,25 @@ func (u Usage) String() string {
|
||||
out += ", " + fmt.Sprintf("service '%v'", u.ZitiServiceId)
|
||||
out += ", " + fmt.Sprintf("circuit '%v'", u.ZitiCircuitId)
|
||||
out += ", " + fmt.Sprintf("share '%v'", u.ShareToken)
|
||||
out += ", " + fmt.Sprintf("environment '%d'", u.EnvironmentId)
|
||||
out += ", " + fmt.Sprintf("account '%v'", u.AccountId)
|
||||
out += ", " + fmt.Sprintf("fe {rx %v, tx %v}", util.BytesToSize(u.FrontendRx), util.BytesToSize(u.FrontendTx))
|
||||
out += ", " + fmt.Sprintf("be {rx %v, tx %v}", util.BytesToSize(u.BackendRx), util.BytesToSize(u.BackendTx))
|
||||
out += "}"
|
||||
return out
|
||||
}
|
||||
|
||||
type Source interface {
|
||||
Start(chan map[string]interface{}) (chan struct{}, error)
|
||||
type UsageSink interface {
|
||||
Handle(u *Usage) error
|
||||
}
|
||||
|
||||
type ZitiEventJson string
|
||||
|
||||
type ZitiEventJsonSource interface {
|
||||
Start(chan ZitiEventJson) (join chan struct{}, err error)
|
||||
Stop()
|
||||
}
|
||||
|
||||
type Ingester interface {
|
||||
Ingest(msg map[string]interface{}) error
|
||||
type ZitiEventJsonSink interface {
|
||||
Handle(event ZitiEventJson) error
|
||||
}
|
||||
|
@ -1,31 +0,0 @@
|
||||
package metrics
|
||||
|
||||
import (
|
||||
"github.com/openziti/zrok/controller/store"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
type shareCache struct {
|
||||
str *store.Store
|
||||
}
|
||||
|
||||
func newShareCache(cfg *store.Config) (*shareCache, error) {
|
||||
str, err := store.Open(cfg)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error opening store")
|
||||
}
|
||||
return &shareCache{str}, nil
|
||||
}
|
||||
|
||||
func (sc *shareCache) getToken(svcZId string) (string, error) {
|
||||
tx, err := sc.str.Begin()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
defer func() { _ = tx.Rollback() }()
|
||||
shr, err := sc.str.FindShareWithZIdAndDeleted(svcZId, tx)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return shr.Token, nil
|
||||
}
|
102
controller/metrics/usageIngest.go
Normal file
102
controller/metrics/usageIngest.go
Normal file
@ -0,0 +1,102 @@
|
||||
package metrics
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
"reflect"
|
||||
"time"
|
||||
)
|
||||
|
||||
func Ingest(event ZitiEventJson) (*Usage, error) {
|
||||
eventMap := make(map[string]interface{})
|
||||
if err := json.Unmarshal([]byte(event), &eventMap); err == nil {
|
||||
u := &Usage{ProcessedStamp: time.Now()}
|
||||
if ns, found := eventMap["namespace"]; found && ns == "fabric.usage" {
|
||||
if v, found := eventMap["interval_start_utc"]; found {
|
||||
if vFloat64, ok := v.(float64); ok {
|
||||
u.IntervalStart = time.Unix(int64(vFloat64), 0)
|
||||
} else {
|
||||
logrus.Error("unable to assert 'interval_start_utc'")
|
||||
}
|
||||
} else {
|
||||
logrus.Error("missing 'interval_start_utc'")
|
||||
}
|
||||
if v, found := eventMap["tags"]; found {
|
||||
if tags, ok := v.(map[string]interface{}); ok {
|
||||
if v, found := tags["serviceId"]; found {
|
||||
if vStr, ok := v.(string); ok {
|
||||
u.ZitiServiceId = vStr
|
||||
} else {
|
||||
logrus.Error("unable to assert 'tags/serviceId'")
|
||||
}
|
||||
} else {
|
||||
logrus.Error("missing 'tags/serviceId'")
|
||||
}
|
||||
} else {
|
||||
logrus.Errorf("unable to assert 'tags'")
|
||||
}
|
||||
} else {
|
||||
logrus.Errorf("missing 'tags'")
|
||||
}
|
||||
if v, found := eventMap["usage"]; found {
|
||||
if usage, ok := v.(map[string]interface{}); ok {
|
||||
if v, found := usage["ingress.tx"]; found {
|
||||
if vFloat64, ok := v.(float64); ok {
|
||||
u.FrontendTx = int64(vFloat64)
|
||||
} else {
|
||||
logrus.Error("unable to assert 'usage/ingress.tx'")
|
||||
}
|
||||
} else {
|
||||
logrus.Warn("missing 'usage/ingress.tx'")
|
||||
}
|
||||
if v, found := usage["ingress.rx"]; found {
|
||||
if vFloat64, ok := v.(float64); ok {
|
||||
u.FrontendRx = int64(vFloat64)
|
||||
} else {
|
||||
logrus.Error("unable to assert 'usage/ingress.rx")
|
||||
}
|
||||
} else {
|
||||
logrus.Warn("missing 'usage/ingress.rx")
|
||||
}
|
||||
if v, found := usage["egress.tx"]; found {
|
||||
if vFloat64, ok := v.(float64); ok {
|
||||
u.BackendTx = int64(vFloat64)
|
||||
} else {
|
||||
logrus.Error("unable to assert 'usage/egress.tx'")
|
||||
}
|
||||
} else {
|
||||
logrus.Warn("missing 'usage/egress.tx'")
|
||||
}
|
||||
if v, found := usage["egress.rx"]; found {
|
||||
if vFloat64, ok := v.(float64); ok {
|
||||
u.BackendRx = int64(vFloat64)
|
||||
} else {
|
||||
logrus.Error("unable to assert 'usage/egress.rx'")
|
||||
}
|
||||
} else {
|
||||
logrus.Warn("missing 'usage/egress.rx'")
|
||||
}
|
||||
} else {
|
||||
logrus.Errorf("unable to assert 'usage' (%v) %v", reflect.TypeOf(v), event)
|
||||
}
|
||||
} else {
|
||||
logrus.Warnf("missing 'usage'")
|
||||
}
|
||||
if v, found := eventMap["circuit_id"]; found {
|
||||
if vStr, ok := v.(string); ok {
|
||||
u.ZitiCircuitId = vStr
|
||||
} else {
|
||||
logrus.Error("unable to assert 'circuit_id'")
|
||||
}
|
||||
} else {
|
||||
logrus.Warn("missing 'circuit_id'")
|
||||
}
|
||||
} else {
|
||||
logrus.Errorf("not 'fabric.usage'")
|
||||
}
|
||||
return u, nil
|
||||
} else {
|
||||
return nil, errors.Wrap(err, "error unmarshaling")
|
||||
}
|
||||
}
|
@ -1,95 +0,0 @@
|
||||
package metrics
|
||||
|
||||
import (
|
||||
"github.com/sirupsen/logrus"
|
||||
"reflect"
|
||||
"time"
|
||||
)
|
||||
|
||||
func Ingest(event map[string]interface{}) *Usage {
|
||||
u := &Usage{ProcessedStamp: time.Now()}
|
||||
if ns, found := event["namespace"]; found && ns == "fabric.usage" {
|
||||
if v, found := event["interval_start_utc"]; found {
|
||||
if vFloat64, ok := v.(float64); ok {
|
||||
u.IntervalStart = time.Unix(int64(vFloat64), 0)
|
||||
} else {
|
||||
logrus.Error("unable to assert 'interval_start_utc'")
|
||||
}
|
||||
} else {
|
||||
logrus.Error("missing 'interval_start_utc'")
|
||||
}
|
||||
if v, found := event["tags"]; found {
|
||||
if tags, ok := v.(map[string]interface{}); ok {
|
||||
if v, found := tags["serviceId"]; found {
|
||||
if vStr, ok := v.(string); ok {
|
||||
u.ZitiServiceId = vStr
|
||||
} else {
|
||||
logrus.Error("unable to assert 'tags/serviceId'")
|
||||
}
|
||||
} else {
|
||||
logrus.Error("missing 'tags/serviceId'")
|
||||
}
|
||||
} else {
|
||||
logrus.Errorf("unable to assert 'tags'")
|
||||
}
|
||||
} else {
|
||||
logrus.Errorf("missing 'tags'")
|
||||
}
|
||||
if v, found := event["usage"]; found {
|
||||
if usage, ok := v.(map[string]interface{}); ok {
|
||||
if v, found := usage["ingress.tx"]; found {
|
||||
if vFloat64, ok := v.(float64); ok {
|
||||
u.FrontendTx = int64(vFloat64)
|
||||
} else {
|
||||
logrus.Error("unable to assert 'usage/ingress.tx'")
|
||||
}
|
||||
} else {
|
||||
logrus.Warn("missing 'usage/ingress.tx'")
|
||||
}
|
||||
if v, found := usage["ingress.rx"]; found {
|
||||
if vFloat64, ok := v.(float64); ok {
|
||||
u.FrontendRx = int64(vFloat64)
|
||||
} else {
|
||||
logrus.Error("unable to assert 'usage/ingress.rx")
|
||||
}
|
||||
} else {
|
||||
logrus.Warn("missing 'usage/ingress.rx")
|
||||
}
|
||||
if v, found := usage["egress.tx"]; found {
|
||||
if vFloat64, ok := v.(float64); ok {
|
||||
u.BackendTx = int64(vFloat64)
|
||||
} else {
|
||||
logrus.Error("unable to assert 'usage/egress.tx'")
|
||||
}
|
||||
} else {
|
||||
logrus.Warn("missing 'usage/egress.tx'")
|
||||
}
|
||||
if v, found := usage["egress.rx"]; found {
|
||||
if vFloat64, ok := v.(float64); ok {
|
||||
u.BackendRx = int64(vFloat64)
|
||||
} else {
|
||||
logrus.Error("unable to assert 'usage/egress.rx'")
|
||||
}
|
||||
} else {
|
||||
logrus.Warn("missing 'usage/egress.rx'")
|
||||
}
|
||||
} else {
|
||||
logrus.Errorf("unable to assert 'usage' (%v) %v", reflect.TypeOf(v), event)
|
||||
}
|
||||
} else {
|
||||
logrus.Warnf("missing 'usage'")
|
||||
}
|
||||
if v, found := event["circuit_id"]; found {
|
||||
if vStr, ok := v.(string); ok {
|
||||
u.ZitiCircuitId = vStr
|
||||
} else {
|
||||
logrus.Error("unable to assert 'circuit_id'")
|
||||
}
|
||||
} else {
|
||||
logrus.Warn("missing 'circuit_id'")
|
||||
}
|
||||
} else {
|
||||
logrus.Errorf("not 'fabric.usage'")
|
||||
}
|
||||
return u
|
||||
}
|
@ -1,7 +1,6 @@
|
||||
package metrics
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"encoding/json"
|
||||
@ -14,6 +13,7 @@ import (
|
||||
"github.com/openziti/fabric/pb/mgmt_pb"
|
||||
"github.com/openziti/identity"
|
||||
"github.com/openziti/sdk-golang/ziti/constants"
|
||||
"github.com/openziti/zrok/controller/env"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
"io"
|
||||
@ -22,11 +22,15 @@ import (
|
||||
"time"
|
||||
)
|
||||
|
||||
func init() {
|
||||
env.GetCfOptions().AddFlexibleSetter("websocketSource", loadWebsocketSourceConfig)
|
||||
}
|
||||
|
||||
type WebsocketSourceConfig struct {
|
||||
WebsocketEndpoint string
|
||||
ApiEndpoint string
|
||||
WebsocketEndpoint string // wss://127.0.0.1:1280/fabric/v1/ws-api
|
||||
ApiEndpoint string // https://127.0.0.1:1280
|
||||
Username string
|
||||
Password string
|
||||
Password string `cf:"+secret"`
|
||||
}
|
||||
|
||||
func loadWebsocketSourceConfig(v interface{}, _ *cf.Options) (interface{}, error) {
|
||||
@ -37,17 +41,17 @@ func loadWebsocketSourceConfig(v interface{}, _ *cf.Options) (interface{}, error
|
||||
}
|
||||
return &websocketSource{cfg: cfg}, nil
|
||||
}
|
||||
return nil, errors.New("invalid config structure for 'websocket' source")
|
||||
return nil, errors.New("invalid config structure for 'websocketSource'")
|
||||
}
|
||||
|
||||
type websocketSource struct {
|
||||
cfg *WebsocketSourceConfig
|
||||
ch channel.Channel
|
||||
events chan map[string]interface{}
|
||||
events chan ZitiEventJson
|
||||
join chan struct{}
|
||||
}
|
||||
|
||||
func (s *websocketSource) Start(events chan map[string]interface{}) (chan struct{}, error) {
|
||||
func (s *websocketSource) Start(events chan ZitiEventJson) (join chan struct{}, err error) {
|
||||
caCerts, err := rest_util.GetControllerWellKnownCas(s.cfg.ApiEndpoint)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -146,17 +150,5 @@ func (s *websocketSource) Stop() {
|
||||
}
|
||||
|
||||
func (s *websocketSource) HandleReceive(msg *channel.Message, _ channel.Channel) {
|
||||
decoder := json.NewDecoder(bytes.NewReader(msg.Body))
|
||||
for {
|
||||
ev := make(map[string]interface{})
|
||||
err := decoder.Decode(&ev)
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
if err == nil {
|
||||
s.events <- ev
|
||||
} else {
|
||||
logrus.Errorf("error parsing '%v': %v", string(msg.Body), err)
|
||||
}
|
||||
}
|
||||
s.events <- ZitiEventJson(msg.Body)
|
||||
}
|
||||
|
@ -11,27 +11,23 @@ import (
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
type shareHandler struct {
|
||||
cfg *LimitsConfig
|
||||
}
|
||||
type shareHandler struct{}
|
||||
|
||||
func newShareHandler(cfg *LimitsConfig) *shareHandler {
|
||||
return &shareHandler{cfg: cfg}
|
||||
func newShareHandler() *shareHandler {
|
||||
return &shareHandler{}
|
||||
}
|
||||
|
||||
func (h *shareHandler) Handle(params share.ShareParams, principal *rest_model_zrok.Principal) middleware.Responder {
|
||||
logrus.Infof("handling")
|
||||
|
||||
tx, err := str.Begin()
|
||||
trx, err := str.Begin()
|
||||
if err != nil {
|
||||
logrus.Errorf("error starting transaction: %v", err)
|
||||
return share.NewShareInternalServerError()
|
||||
}
|
||||
defer func() { _ = tx.Rollback() }()
|
||||
defer func() { _ = trx.Rollback() }()
|
||||
|
||||
envZId := params.Body.EnvZID
|
||||
envId := 0
|
||||
envs, err := str.FindEnvironmentsForAccount(int(principal.ID), tx)
|
||||
envs, err := str.FindEnvironmentsForAccount(int(principal.ID), trx)
|
||||
if err == nil {
|
||||
found := false
|
||||
for _, env := range envs {
|
||||
@ -51,7 +47,7 @@ func (h *shareHandler) Handle(params share.ShareParams, principal *rest_model_zr
|
||||
return share.NewShareInternalServerError()
|
||||
}
|
||||
|
||||
if err := h.checkLimits(principal, envs, tx); err != nil {
|
||||
if err := h.checkLimits(envId, principal, trx); err != nil {
|
||||
logrus.Errorf("limits error: %v", err)
|
||||
return share.NewShareUnauthorized()
|
||||
}
|
||||
@ -79,7 +75,7 @@ func (h *shareHandler) Handle(params share.ShareParams, principal *rest_model_zr
|
||||
var frontendZIds []string
|
||||
var frontendTemplates []string
|
||||
for _, frontendSelection := range params.Body.FrontendSelection {
|
||||
sfe, err := str.FindFrontendPubliclyNamed(frontendSelection, tx)
|
||||
sfe, err := str.FindFrontendPubliclyNamed(frontendSelection, trx)
|
||||
if err != nil {
|
||||
logrus.Error(err)
|
||||
return share.NewShareNotFound()
|
||||
@ -119,19 +115,22 @@ func (h *shareHandler) Handle(params share.ShareParams, principal *rest_model_zr
|
||||
BackendProxyEndpoint: ¶ms.Body.BackendProxyEndpoint,
|
||||
Reserved: reserved,
|
||||
}
|
||||
if len(params.Body.FrontendSelection) > 0 {
|
||||
sshr.FrontendSelection = ¶ms.Body.FrontendSelection[0]
|
||||
}
|
||||
if len(frontendEndpoints) > 0 {
|
||||
sshr.FrontendEndpoint = &frontendEndpoints[0]
|
||||
} else if sshr.ShareMode == "private" {
|
||||
sshr.FrontendEndpoint = &sshr.ShareMode
|
||||
}
|
||||
|
||||
sid, err := str.CreateShare(envId, sshr, tx)
|
||||
sid, err := str.CreateShare(envId, sshr, trx)
|
||||
if err != nil {
|
||||
logrus.Errorf("error creating share record: %v", err)
|
||||
return share.NewShareInternalServerError()
|
||||
}
|
||||
|
||||
if err := tx.Commit(); err != nil {
|
||||
if err := trx.Commit(); err != nil {
|
||||
logrus.Errorf("error committing share record: %v", err)
|
||||
return share.NewShareInternalServerError()
|
||||
}
|
||||
@ -143,17 +142,15 @@ func (h *shareHandler) Handle(params share.ShareParams, principal *rest_model_zr
|
||||
})
|
||||
}
|
||||
|
||||
func (h *shareHandler) checkLimits(principal *rest_model_zrok.Principal, envs []*store.Environment, tx *sqlx.Tx) error {
|
||||
if !principal.Limitless && h.cfg.Shares > Unlimited {
|
||||
total := 0
|
||||
for i := range envs {
|
||||
shrs, err := str.FindSharesForEnvironment(envs[i].Id, tx)
|
||||
func (h *shareHandler) checkLimits(envId int, principal *rest_model_zrok.Principal, trx *sqlx.Tx) error {
|
||||
if !principal.Limitless {
|
||||
if limitsAgent != nil {
|
||||
ok, err := limitsAgent.CanCreateShare(int(principal.ID), envId, trx)
|
||||
if err != nil {
|
||||
return errors.Errorf("unable to find shares for environment '%v': %v", envs[i].ZId, err)
|
||||
return errors.Wrapf(err, "error checking share limits for '%v'", principal.Email)
|
||||
}
|
||||
total += len(shrs)
|
||||
if total+1 > h.cfg.Shares {
|
||||
return errors.Errorf("would exceed shares limit of %d for '%v'", h.cfg.Shares, principal.Email)
|
||||
if !ok {
|
||||
return errors.Errorf("share limit check failed for '%v'", principal.Email)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -43,12 +43,13 @@ func (h *shareDetailHandler) Handle(params metadata.GetShareDetailParams, princi
|
||||
return metadata.NewGetShareDetailNotFound()
|
||||
}
|
||||
var sparkData map[string][]int64
|
||||
if cfg.Influx != nil {
|
||||
if cfg.Metrics != nil && cfg.Metrics.Influx != nil {
|
||||
sparkData, err = sparkDataForShares([]*store.Share{shr})
|
||||
logrus.Info(sparkData)
|
||||
if err != nil {
|
||||
logrus.Errorf("error querying spark data for share: %v", err)
|
||||
}
|
||||
} else {
|
||||
logrus.Debug("skipping spark data; no influx configuration")
|
||||
}
|
||||
feEndpoint := ""
|
||||
if shr.FrontendEndpoint != nil {
|
||||
|
@ -10,7 +10,7 @@ func sparkDataForShares(shrs []*store.Share) (map[string][]int64, error) {
|
||||
out := make(map[string][]int64)
|
||||
|
||||
if len(shrs) > 0 {
|
||||
qapi := idb.QueryAPI(cfg.Influx.Org)
|
||||
qapi := idb.QueryAPI(cfg.Metrics.Influx.Org)
|
||||
|
||||
result, err := qapi.Query(context.Background(), sparkFluxQuery(shrs))
|
||||
if err != nil {
|
||||
@ -19,11 +19,11 @@ func sparkDataForShares(shrs []*store.Share) (map[string][]int64, error) {
|
||||
|
||||
for result.Next() {
|
||||
combinedRate := int64(0)
|
||||
readRate := result.Record().ValueByKey("bytesRead")
|
||||
readRate := result.Record().ValueByKey("tx")
|
||||
if readRate != nil {
|
||||
combinedRate += readRate.(int64)
|
||||
}
|
||||
writeRate := result.Record().ValueByKey("bytesWritten")
|
||||
writeRate := result.Record().ValueByKey("tx")
|
||||
if writeRate != nil {
|
||||
combinedRate += writeRate.(int64)
|
||||
}
|
||||
@ -48,7 +48,7 @@ func sparkFluxQuery(shrs []*store.Share) string {
|
||||
query := "read = from(bucket: \"zrok\")" +
|
||||
"|> range(start: -5m)" +
|
||||
"|> filter(fn: (r) => r[\"_measurement\"] == \"xfer\")" +
|
||||
"|> filter(fn: (r) => r[\"_field\"] == \"bytesRead\" or r[\"_field\"] == \"bytesWritten\")" +
|
||||
"|> filter(fn: (r) => r[\"_field\"] == \"rx\" or r[\"_field\"] == \"tx\")" +
|
||||
"|> filter(fn: (r) => r[\"namespace\"] == \"backend\")" +
|
||||
shrFilter +
|
||||
"|> aggregateWindow(every: 5s, fn: sum, createEmpty: true)\n" +
|
||||
|
@ -15,7 +15,7 @@ type Account struct {
|
||||
Deleted bool
|
||||
}
|
||||
|
||||
func (self *Store) CreateAccount(a *Account, tx *sqlx.Tx) (int, error) {
|
||||
func (str *Store) CreateAccount(a *Account, tx *sqlx.Tx) (int, error) {
|
||||
stmt, err := tx.Prepare("insert into accounts (email, salt, password, token, limitless) values ($1, $2, $3, $4, $5) returning id")
|
||||
if err != nil {
|
||||
return 0, errors.Wrap(err, "error preparing accounts insert statement")
|
||||
@ -27,7 +27,7 @@ func (self *Store) CreateAccount(a *Account, tx *sqlx.Tx) (int, error) {
|
||||
return id, nil
|
||||
}
|
||||
|
||||
func (self *Store) GetAccount(id int, tx *sqlx.Tx) (*Account, error) {
|
||||
func (str *Store) GetAccount(id int, tx *sqlx.Tx) (*Account, error) {
|
||||
a := &Account{}
|
||||
if err := tx.QueryRowx("select * from accounts where id = $1", id).StructScan(a); err != nil {
|
||||
return nil, errors.Wrap(err, "error selecting account by id")
|
||||
@ -35,7 +35,7 @@ func (self *Store) GetAccount(id int, tx *sqlx.Tx) (*Account, error) {
|
||||
return a, nil
|
||||
}
|
||||
|
||||
func (self *Store) FindAccountWithEmail(email string, tx *sqlx.Tx) (*Account, error) {
|
||||
func (str *Store) FindAccountWithEmail(email string, tx *sqlx.Tx) (*Account, error) {
|
||||
a := &Account{}
|
||||
if err := tx.QueryRowx("select * from accounts where email = $1 and not deleted", email).StructScan(a); err != nil {
|
||||
return nil, errors.Wrap(err, "error selecting account by email")
|
||||
@ -43,7 +43,7 @@ func (self *Store) FindAccountWithEmail(email string, tx *sqlx.Tx) (*Account, er
|
||||
return a, nil
|
||||
}
|
||||
|
||||
func (self *Store) FindAccountWithEmailAndDeleted(email string, tx *sqlx.Tx) (*Account, error) {
|
||||
func (str *Store) FindAccountWithEmailAndDeleted(email string, tx *sqlx.Tx) (*Account, error) {
|
||||
a := &Account{}
|
||||
if err := tx.QueryRowx("select * from accounts where email = $1", email).StructScan(a); err != nil {
|
||||
return nil, errors.Wrap(err, "error selecting acount by email")
|
||||
@ -51,7 +51,7 @@ func (self *Store) FindAccountWithEmailAndDeleted(email string, tx *sqlx.Tx) (*A
|
||||
return a, nil
|
||||
}
|
||||
|
||||
func (self *Store) FindAccountWithToken(token string, tx *sqlx.Tx) (*Account, error) {
|
||||
func (str *Store) FindAccountWithToken(token string, tx *sqlx.Tx) (*Account, error) {
|
||||
a := &Account{}
|
||||
if err := tx.QueryRowx("select * from accounts where token = $1 and not deleted", token).StructScan(a); err != nil {
|
||||
return nil, errors.Wrap(err, "error selecting account by token")
|
||||
@ -59,7 +59,7 @@ func (self *Store) FindAccountWithToken(token string, tx *sqlx.Tx) (*Account, er
|
||||
return a, nil
|
||||
}
|
||||
|
||||
func (self *Store) UpdateAccount(a *Account, tx *sqlx.Tx) (int, error) {
|
||||
func (str *Store) UpdateAccount(a *Account, tx *sqlx.Tx) (int, error) {
|
||||
stmt, err := tx.Prepare("update accounts set email=$1, salt=$2, password=$3, token=$4, limitless=$5 where id = $6")
|
||||
if err != nil {
|
||||
return 0, errors.Wrap(err, "error preparing accounts update statement")
|
||||
|
65
controller/store/accountLimitJournal.go
Normal file
65
controller/store/accountLimitJournal.go
Normal file
@ -0,0 +1,65 @@
|
||||
package store
|
||||
|
||||
import (
|
||||
"github.com/jmoiron/sqlx"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
type AccountLimitJournal struct {
|
||||
Model
|
||||
AccountId int
|
||||
RxBytes int64
|
||||
TxBytes int64
|
||||
Action LimitJournalAction
|
||||
}
|
||||
|
||||
func (str *Store) CreateAccountLimitJournal(j *AccountLimitJournal, trx *sqlx.Tx) (int, error) {
|
||||
stmt, err := trx.Prepare("insert into account_limit_journal (account_id, rx_bytes, tx_bytes, action) values ($1, $2, $3, $4) returning id")
|
||||
if err != nil {
|
||||
return 0, errors.Wrap(err, "error preparing account_limit_journal insert statement")
|
||||
}
|
||||
var id int
|
||||
if err := stmt.QueryRow(j.AccountId, j.RxBytes, j.TxBytes, j.Action).Scan(&id); err != nil {
|
||||
return 0, errors.Wrap(err, "error executing account_limit_journal insert statement")
|
||||
}
|
||||
return id, nil
|
||||
}
|
||||
|
||||
func (str *Store) IsAccountLimitJournalEmpty(acctId int, trx *sqlx.Tx) (bool, error) {
|
||||
count := 0
|
||||
if err := trx.QueryRowx("select count(0) from account_limit_journal where account_id = $1", acctId).Scan(&count); err != nil {
|
||||
return false, err
|
||||
}
|
||||
return count == 0, nil
|
||||
}
|
||||
|
||||
func (str *Store) FindLatestAccountLimitJournal(acctId int, trx *sqlx.Tx) (*AccountLimitJournal, error) {
|
||||
j := &AccountLimitJournal{}
|
||||
if err := trx.QueryRowx("select * from account_limit_journal where account_id = $1 order by id desc limit 1", acctId).StructScan(j); err != nil {
|
||||
return nil, errors.Wrap(err, "error finding account_limit_journal by account_id")
|
||||
}
|
||||
return j, nil
|
||||
}
|
||||
|
||||
func (str *Store) FindAllLatestAccountLimitJournal(trx *sqlx.Tx) ([]*AccountLimitJournal, error) {
|
||||
rows, err := trx.Queryx("select id, account_id, rx_bytes, tx_bytes, action, created_at, updated_at from account_limit_journal where id in (select max(id) as id from account_limit_journal group by account_id)")
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error selecting all latest account_limit_journal")
|
||||
}
|
||||
var aljs []*AccountLimitJournal
|
||||
for rows.Next() {
|
||||
alj := &AccountLimitJournal{}
|
||||
if err := rows.StructScan(alj); err != nil {
|
||||
return nil, errors.Wrap(err, "error scanning account_limit_journal")
|
||||
}
|
||||
aljs = append(aljs, alj)
|
||||
}
|
||||
return aljs, nil
|
||||
}
|
||||
|
||||
func (str *Store) DeleteAccountLimitJournalForAccount(acctId int, trx *sqlx.Tx) error {
|
||||
if _, err := trx.Exec("delete from account_limit_journal where account_id = $1", acctId); err != nil {
|
||||
return errors.Wrapf(err, "error deleting account_limit journal for '#%d'", acctId)
|
||||
}
|
||||
return nil
|
||||
}
|
79
controller/store/accountLimitJournal_test.go
Normal file
79
controller/store/accountLimitJournal_test.go
Normal file
@ -0,0 +1,79 @@
|
||||
package store
|
||||
|
||||
import (
|
||||
"github.com/stretchr/testify/assert"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestAccountLimitJournal(t *testing.T) {
|
||||
str, err := Open(&Config{Path: ":memory:", Type: "sqlite3"})
|
||||
assert.Nil(t, err)
|
||||
assert.NotNil(t, str)
|
||||
|
||||
trx, err := str.Begin()
|
||||
assert.Nil(t, err)
|
||||
assert.NotNil(t, trx)
|
||||
|
||||
aljEmpty, err := str.IsAccountLimitJournalEmpty(1, trx)
|
||||
assert.Nil(t, err)
|
||||
assert.True(t, aljEmpty)
|
||||
|
||||
acctId, err := str.CreateAccount(&Account{Email: "nobody@nowehere.com", Salt: "salt", Password: "password", Token: "token", Limitless: false, Deleted: false}, trx)
|
||||
assert.Nil(t, err)
|
||||
|
||||
_, err = str.CreateAccountLimitJournal(&AccountLimitJournal{AccountId: acctId, RxBytes: 1024, TxBytes: 2048, Action: WarningAction}, trx)
|
||||
assert.Nil(t, err)
|
||||
|
||||
aljEmpty, err = str.IsAccountLimitJournalEmpty(acctId, trx)
|
||||
assert.Nil(t, err)
|
||||
assert.False(t, aljEmpty)
|
||||
|
||||
latestAlj, err := str.FindLatestAccountLimitJournal(acctId, trx)
|
||||
assert.Nil(t, err)
|
||||
assert.NotNil(t, latestAlj)
|
||||
assert.Equal(t, int64(1024), latestAlj.RxBytes)
|
||||
assert.Equal(t, int64(2048), latestAlj.TxBytes)
|
||||
assert.Equal(t, WarningAction, latestAlj.Action)
|
||||
|
||||
_, err = str.CreateAccountLimitJournal(&AccountLimitJournal{AccountId: acctId, RxBytes: 2048, TxBytes: 4096, Action: LimitAction}, trx)
|
||||
assert.Nil(t, err)
|
||||
|
||||
latestAlj, err = str.FindLatestAccountLimitJournal(acctId, trx)
|
||||
assert.Nil(t, err)
|
||||
assert.NotNil(t, latestAlj)
|
||||
assert.Equal(t, int64(2048), latestAlj.RxBytes)
|
||||
assert.Equal(t, int64(4096), latestAlj.TxBytes)
|
||||
assert.Equal(t, LimitAction, latestAlj.Action)
|
||||
}
|
||||
|
||||
func TestFindAllLatestAccountLimitJournal(t *testing.T) {
|
||||
str, err := Open(&Config{Path: ":memory:", Type: "sqlite3"})
|
||||
assert.Nil(t, err)
|
||||
assert.NotNil(t, str)
|
||||
|
||||
trx, err := str.Begin()
|
||||
assert.Nil(t, err)
|
||||
assert.NotNil(t, trx)
|
||||
|
||||
acctId1, err := str.CreateAccount(&Account{Email: "nobody@nowehere.com", Salt: "salt1", Password: "password1", Token: "token1", Limitless: false, Deleted: false}, trx)
|
||||
assert.Nil(t, err)
|
||||
|
||||
_, err = str.CreateAccountLimitJournal(&AccountLimitJournal{AccountId: acctId1, RxBytes: 2048, TxBytes: 4096, Action: WarningAction}, trx)
|
||||
assert.Nil(t, err)
|
||||
_, err = str.CreateAccountLimitJournal(&AccountLimitJournal{AccountId: acctId1, RxBytes: 2048, TxBytes: 4096, Action: ClearAction}, trx)
|
||||
assert.Nil(t, err)
|
||||
aljId13, err := str.CreateAccountLimitJournal(&AccountLimitJournal{AccountId: acctId1, RxBytes: 2048, TxBytes: 4096, Action: LimitAction}, trx)
|
||||
assert.Nil(t, err)
|
||||
|
||||
acctId2, err := str.CreateAccount(&Account{Email: "someone@somewhere.com", Salt: "salt2", Password: "password2", Token: "token2", Limitless: false, Deleted: false}, trx)
|
||||
assert.Nil(t, err)
|
||||
|
||||
aljId21, err := str.CreateAccountLimitJournal(&AccountLimitJournal{AccountId: acctId2, RxBytes: 2048, TxBytes: 4096, Action: WarningAction}, trx)
|
||||
assert.Nil(t, err)
|
||||
|
||||
aljs, err := str.FindAllLatestAccountLimitJournal(trx)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, 2, len(aljs))
|
||||
assert.Equal(t, aljId13, aljs[0].Id)
|
||||
assert.Equal(t, aljId21, aljs[1].Id)
|
||||
}
|
@ -17,7 +17,7 @@ type AccountRequest struct {
|
||||
Deleted bool
|
||||
}
|
||||
|
||||
func (self *Store) CreateAccountRequest(ar *AccountRequest, tx *sqlx.Tx) (int, error) {
|
||||
func (str *Store) CreateAccountRequest(ar *AccountRequest, tx *sqlx.Tx) (int, error) {
|
||||
stmt, err := tx.Prepare("insert into account_requests (token, email, source_address) values ($1, $2, $3) returning id")
|
||||
if err != nil {
|
||||
return 0, errors.Wrap(err, "error preparing account_requests insert statement")
|
||||
@ -29,7 +29,7 @@ func (self *Store) CreateAccountRequest(ar *AccountRequest, tx *sqlx.Tx) (int, e
|
||||
return id, nil
|
||||
}
|
||||
|
||||
func (self *Store) GetAccountRequest(id int, tx *sqlx.Tx) (*AccountRequest, error) {
|
||||
func (str *Store) GetAccountRequest(id int, tx *sqlx.Tx) (*AccountRequest, error) {
|
||||
ar := &AccountRequest{}
|
||||
if err := tx.QueryRowx("select * from account_requests where id = $1", id).StructScan(ar); err != nil {
|
||||
return nil, errors.Wrap(err, "error selecting account_request by id")
|
||||
@ -37,7 +37,7 @@ func (self *Store) GetAccountRequest(id int, tx *sqlx.Tx) (*AccountRequest, erro
|
||||
return ar, nil
|
||||
}
|
||||
|
||||
func (self *Store) FindAccountRequestWithToken(token string, tx *sqlx.Tx) (*AccountRequest, error) {
|
||||
func (str *Store) FindAccountRequestWithToken(token string, tx *sqlx.Tx) (*AccountRequest, error) {
|
||||
ar := &AccountRequest{}
|
||||
if err := tx.QueryRowx("select * from account_requests where token = $1 and not deleted", token).StructScan(ar); err != nil {
|
||||
return nil, errors.Wrap(err, "error selecting account_request by token")
|
||||
@ -45,9 +45,9 @@ func (self *Store) FindAccountRequestWithToken(token string, tx *sqlx.Tx) (*Acco
|
||||
return ar, nil
|
||||
}
|
||||
|
||||
func (self *Store) FindExpiredAccountRequests(before time.Time, limit int, tx *sqlx.Tx) ([]*AccountRequest, error) {
|
||||
func (str *Store) FindExpiredAccountRequests(before time.Time, limit int, tx *sqlx.Tx) ([]*AccountRequest, error) {
|
||||
var sql string
|
||||
switch self.cfg.Type {
|
||||
switch str.cfg.Type {
|
||||
case "postgres":
|
||||
sql = "select * from account_requests where created_at < $1 and not deleted limit %d for update"
|
||||
|
||||
@ -55,7 +55,7 @@ func (self *Store) FindExpiredAccountRequests(before time.Time, limit int, tx *s
|
||||
sql = "select * from account_requests where created_at < $1 and not deleted limit %d"
|
||||
|
||||
default:
|
||||
return nil, errors.Errorf("unknown database type '%v'", self.cfg.Type)
|
||||
return nil, errors.Errorf("unknown database type '%v'", str.cfg.Type)
|
||||
}
|
||||
|
||||
rows, err := tx.Queryx(fmt.Sprintf(sql, limit), before)
|
||||
@ -73,7 +73,7 @@ func (self *Store) FindExpiredAccountRequests(before time.Time, limit int, tx *s
|
||||
return ars, nil
|
||||
}
|
||||
|
||||
func (self *Store) FindAccountRequestWithEmail(email string, tx *sqlx.Tx) (*AccountRequest, error) {
|
||||
func (str *Store) FindAccountRequestWithEmail(email string, tx *sqlx.Tx) (*AccountRequest, error) {
|
||||
ar := &AccountRequest{}
|
||||
if err := tx.QueryRowx("select * from account_requests where email = $1 and not deleted", email).StructScan(ar); err != nil {
|
||||
return nil, errors.Wrap(err, "error selecting account_request by email")
|
||||
@ -81,7 +81,7 @@ func (self *Store) FindAccountRequestWithEmail(email string, tx *sqlx.Tx) (*Acco
|
||||
return ar, nil
|
||||
}
|
||||
|
||||
func (self *Store) DeleteAccountRequest(id int, tx *sqlx.Tx) error {
|
||||
func (str *Store) DeleteAccountRequest(id int, tx *sqlx.Tx) error {
|
||||
stmt, err := tx.Prepare("update account_requests set deleted = true, updated_at = current_timestamp where id = $1")
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "error preparing account_requests delete statement")
|
||||
@ -93,7 +93,7 @@ func (self *Store) DeleteAccountRequest(id int, tx *sqlx.Tx) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (self *Store) DeleteMultipleAccountRequests(ids []int, tx *sqlx.Tx) error {
|
||||
func (str *Store) DeleteMultipleAccountRequests(ids []int, tx *sqlx.Tx) error {
|
||||
if len(ids) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
@ -15,7 +15,7 @@ type Environment struct {
|
||||
Deleted bool
|
||||
}
|
||||
|
||||
func (self *Store) CreateEnvironment(accountId int, i *Environment, tx *sqlx.Tx) (int, error) {
|
||||
func (str *Store) CreateEnvironment(accountId int, i *Environment, tx *sqlx.Tx) (int, error) {
|
||||
stmt, err := tx.Prepare("insert into environments (account_id, description, host, address, z_id) values ($1, $2, $3, $4, $5) returning id")
|
||||
if err != nil {
|
||||
return 0, errors.Wrap(err, "error preparing environments insert statement")
|
||||
@ -27,7 +27,7 @@ func (self *Store) CreateEnvironment(accountId int, i *Environment, tx *sqlx.Tx)
|
||||
return id, nil
|
||||
}
|
||||
|
||||
func (self *Store) CreateEphemeralEnvironment(i *Environment, tx *sqlx.Tx) (int, error) {
|
||||
func (str *Store) CreateEphemeralEnvironment(i *Environment, tx *sqlx.Tx) (int, error) {
|
||||
stmt, err := tx.Prepare("insert into environments (description, host, address, z_id) values ($1, $2, $3, $4) returning id")
|
||||
if err != nil {
|
||||
return 0, errors.Wrap(err, "error preparing environments (ephemeral) insert statement")
|
||||
@ -39,7 +39,7 @@ func (self *Store) CreateEphemeralEnvironment(i *Environment, tx *sqlx.Tx) (int,
|
||||
return id, nil
|
||||
}
|
||||
|
||||
func (self *Store) GetEnvironment(id int, tx *sqlx.Tx) (*Environment, error) {
|
||||
func (str *Store) GetEnvironment(id int, tx *sqlx.Tx) (*Environment, error) {
|
||||
i := &Environment{}
|
||||
if err := tx.QueryRowx("select * from environments where id = $1", id).StructScan(i); err != nil {
|
||||
return nil, errors.Wrap(err, "error selecting environment by id")
|
||||
@ -47,7 +47,7 @@ func (self *Store) GetEnvironment(id int, tx *sqlx.Tx) (*Environment, error) {
|
||||
return i, nil
|
||||
}
|
||||
|
||||
func (self *Store) FindEnvironmentsForAccount(accountId int, tx *sqlx.Tx) ([]*Environment, error) {
|
||||
func (str *Store) FindEnvironmentsForAccount(accountId int, tx *sqlx.Tx) ([]*Environment, error) {
|
||||
rows, err := tx.Queryx("select environments.* from environments where account_id = $1 and not deleted", accountId)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error selecting environments by account id")
|
||||
@ -63,7 +63,7 @@ func (self *Store) FindEnvironmentsForAccount(accountId int, tx *sqlx.Tx) ([]*En
|
||||
return is, nil
|
||||
}
|
||||
|
||||
func (self *Store) FindEnvironmentForAccount(envZId string, accountId int, tx *sqlx.Tx) (*Environment, error) {
|
||||
func (str *Store) FindEnvironmentForAccount(envZId string, accountId int, tx *sqlx.Tx) (*Environment, error) {
|
||||
env := &Environment{}
|
||||
if err := tx.QueryRowx("select environments.* from environments where z_id = $1 and account_id = $2 and not deleted", envZId, accountId).StructScan(env); err != nil {
|
||||
return nil, errors.Wrap(err, "error finding environment by z_id and account_id")
|
||||
@ -71,7 +71,7 @@ func (self *Store) FindEnvironmentForAccount(envZId string, accountId int, tx *s
|
||||
return env, nil
|
||||
}
|
||||
|
||||
func (self *Store) DeleteEnvironment(id int, tx *sqlx.Tx) error {
|
||||
func (str *Store) DeleteEnvironment(id int, tx *sqlx.Tx) error {
|
||||
stmt, err := tx.Prepare("update environments set updated_at = current_timestamp, deleted = true where id = $1")
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "error preparing environments delete statement")
|
||||
|
65
controller/store/environmentLimitJournal.go
Normal file
65
controller/store/environmentLimitJournal.go
Normal file
@ -0,0 +1,65 @@
|
||||
package store
|
||||
|
||||
import (
|
||||
"github.com/jmoiron/sqlx"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
type EnvironmentLimitJournal struct {
|
||||
Model
|
||||
EnvironmentId int
|
||||
RxBytes int64
|
||||
TxBytes int64
|
||||
Action LimitJournalAction
|
||||
}
|
||||
|
||||
func (str *Store) CreateEnvironmentLimitJournal(j *EnvironmentLimitJournal, trx *sqlx.Tx) (int, error) {
|
||||
stmt, err := trx.Prepare("insert into environment_limit_journal (environment_id, rx_bytes, tx_bytes, action) values ($1, $2, $3, $4) returning id")
|
||||
if err != nil {
|
||||
return 0, errors.Wrap(err, "error preparing environment_limit_journal insert statement")
|
||||
}
|
||||
var id int
|
||||
if err := stmt.QueryRow(j.EnvironmentId, j.RxBytes, j.TxBytes, j.Action).Scan(&id); err != nil {
|
||||
return 0, errors.Wrap(err, "error executing environment_limit_journal insert statement")
|
||||
}
|
||||
return id, nil
|
||||
}
|
||||
|
||||
func (str *Store) IsEnvironmentLimitJournalEmpty(envId int, trx *sqlx.Tx) (bool, error) {
|
||||
count := 0
|
||||
if err := trx.QueryRowx("select count(0) from environment_limit_journal where environment_id = $1", envId).Scan(&count); err != nil {
|
||||
return false, err
|
||||
}
|
||||
return count == 0, nil
|
||||
}
|
||||
|
||||
func (str *Store) FindLatestEnvironmentLimitJournal(envId int, trx *sqlx.Tx) (*EnvironmentLimitJournal, error) {
|
||||
j := &EnvironmentLimitJournal{}
|
||||
if err := trx.QueryRowx("select * from environment_limit_journal where environment_id = $1 order by created_at desc limit 1", envId).StructScan(j); err != nil {
|
||||
return nil, errors.Wrap(err, "error finding environment_limit_journal by environment_id")
|
||||
}
|
||||
return j, nil
|
||||
}
|
||||
|
||||
func (str *Store) FindAllLatestEnvironmentLimitJournal(trx *sqlx.Tx) ([]*EnvironmentLimitJournal, error) {
|
||||
rows, err := trx.Queryx("select id, environment_id, rx_bytes, tx_bytes, action, created_at, updated_at from environment_limit_journal where id in (select max(id) as id from environment_limit_journal group by environment_id)")
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error selecting all latest environment_limit_journal")
|
||||
}
|
||||
var eljs []*EnvironmentLimitJournal
|
||||
for rows.Next() {
|
||||
elj := &EnvironmentLimitJournal{}
|
||||
if err := rows.StructScan(elj); err != nil {
|
||||
return nil, errors.Wrap(err, "error scanning environment_limit_journal")
|
||||
}
|
||||
eljs = append(eljs, elj)
|
||||
}
|
||||
return eljs, nil
|
||||
}
|
||||
|
||||
func (str *Store) DeleteEnvironmentLimitJournalForEnvironment(envId int, trx *sqlx.Tx) error {
|
||||
if _, err := trx.Exec("delete from environment_limit_journal where environment_id = $1", envId); err != nil {
|
||||
return errors.Wrapf(err, "error deleteing environment_limit_journal for '#%d'", envId)
|
||||
}
|
||||
return nil
|
||||
}
|
@ -7,22 +7,23 @@ import (
|
||||
|
||||
type Frontend struct {
|
||||
Model
|
||||
EnvironmentId *int
|
||||
Token string
|
||||
ZId string
|
||||
PublicName *string
|
||||
UrlTemplate *string
|
||||
Reserved bool
|
||||
Deleted bool
|
||||
EnvironmentId *int
|
||||
PrivateShareId *int
|
||||
Token string
|
||||
ZId string
|
||||
PublicName *string
|
||||
UrlTemplate *string
|
||||
Reserved bool
|
||||
Deleted bool
|
||||
}
|
||||
|
||||
func (str *Store) CreateFrontend(envId int, f *Frontend, tx *sqlx.Tx) (int, error) {
|
||||
stmt, err := tx.Prepare("insert into frontends (environment_id, token, z_id, public_name, url_template, reserved) values ($1, $2, $3, $4, $5, $6) returning id")
|
||||
stmt, err := tx.Prepare("insert into frontends (environment_id, private_share_id, token, z_id, public_name, url_template, reserved) values ($1, $2, $3, $4, $5, $6, $7) returning id")
|
||||
if err != nil {
|
||||
return 0, errors.Wrap(err, "error preparing frontends insert statement")
|
||||
}
|
||||
var id int
|
||||
if err := stmt.QueryRow(envId, f.Token, f.ZId, f.PublicName, f.UrlTemplate, f.Reserved).Scan(&id); err != nil {
|
||||
if err := stmt.QueryRow(envId, f.PrivateShareId, f.Token, f.ZId, f.PublicName, f.UrlTemplate, f.Reserved).Scan(&id); err != nil {
|
||||
return 0, errors.Wrap(err, "error executing frontends insert statement")
|
||||
}
|
||||
return id, nil
|
||||
@ -104,13 +105,29 @@ func (str *Store) FindPublicFrontends(tx *sqlx.Tx) ([]*Frontend, error) {
|
||||
return frontends, nil
|
||||
}
|
||||
|
||||
func (str *Store) FindFrontendsForPrivateShare(shrId int, tx *sqlx.Tx) ([]*Frontend, error) {
|
||||
rows, err := tx.Queryx("select frontends.* from frontends where private_share_id = $1 and not deleted", shrId)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error selecting frontends by private_share_id")
|
||||
}
|
||||
var is []*Frontend
|
||||
for rows.Next() {
|
||||
i := &Frontend{}
|
||||
if err := rows.StructScan(i); err != nil {
|
||||
return nil, errors.Wrap(err, "error scanning frontend")
|
||||
}
|
||||
is = append(is, i)
|
||||
}
|
||||
return is, nil
|
||||
}
|
||||
|
||||
func (str *Store) UpdateFrontend(fe *Frontend, tx *sqlx.Tx) error {
|
||||
sql := "update frontends set environment_id = $1, token = $2, z_id = $3, public_name = $4, url_template = $5, reserved = $6, updated_at = current_timestamp where id = $7"
|
||||
sql := "update frontends set environment_id = $1, private_share_id = $2, token = $3, z_id = $4, public_name = $5, url_template = $6, reserved = $7, updated_at = current_timestamp where id = $8"
|
||||
stmt, err := tx.Prepare(sql)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "error preparing frontends update statement")
|
||||
}
|
||||
_, err = stmt.Exec(fe.EnvironmentId, fe.Token, fe.ZId, fe.PublicName, fe.UrlTemplate, fe.Reserved, fe.Id)
|
||||
_, err = stmt.Exec(fe.EnvironmentId, fe.PrivateShareId, fe.Token, fe.ZId, fe.PublicName, fe.UrlTemplate, fe.Reserved, fe.Id)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "error executing frontends update statement")
|
||||
}
|
||||
|
9
controller/store/model.go
Normal file
9
controller/store/model.go
Normal file
@ -0,0 +1,9 @@
|
||||
package store
|
||||
|
||||
type LimitJournalAction string
|
||||
|
||||
const (
|
||||
LimitAction LimitJournalAction = "limit"
|
||||
WarningAction LimitJournalAction = "warning"
|
||||
ClearAction LimitJournalAction = "clear"
|
||||
)
|
@ -16,7 +16,7 @@ type PasswordResetRequest struct {
|
||||
Deleted bool
|
||||
}
|
||||
|
||||
func (self *Store) CreatePasswordResetRequest(prr *PasswordResetRequest, tx *sqlx.Tx) (int, error) {
|
||||
func (str *Store) CreatePasswordResetRequest(prr *PasswordResetRequest, tx *sqlx.Tx) (int, error) {
|
||||
stmt, err := tx.Prepare("insert into password_reset_requests (account_id, token) values ($1, $2) ON CONFLICT(account_id) DO UPDATE SET token=$2 returning id")
|
||||
if err != nil {
|
||||
return 0, errors.Wrap(err, "error preparing password_reset_requests insert statement")
|
||||
@ -28,7 +28,7 @@ func (self *Store) CreatePasswordResetRequest(prr *PasswordResetRequest, tx *sql
|
||||
return id, nil
|
||||
}
|
||||
|
||||
func (self *Store) FindPasswordResetRequestWithToken(token string, tx *sqlx.Tx) (*PasswordResetRequest, error) {
|
||||
func (str *Store) FindPasswordResetRequestWithToken(token string, tx *sqlx.Tx) (*PasswordResetRequest, error) {
|
||||
prr := &PasswordResetRequest{}
|
||||
if err := tx.QueryRowx("select * from password_reset_requests where token = $1 and not deleted", token).StructScan(prr); err != nil {
|
||||
return nil, errors.Wrap(err, "error selecting password_reset_requests by token")
|
||||
@ -36,16 +36,16 @@ func (self *Store) FindPasswordResetRequestWithToken(token string, tx *sqlx.Tx)
|
||||
return prr, nil
|
||||
}
|
||||
|
||||
func (self *Store) FindExpiredPasswordResetRequests(before time.Time, limit int, tx *sqlx.Tx) ([]*PasswordResetRequest, error) {
|
||||
func (str *Store) FindExpiredPasswordResetRequests(before time.Time, limit int, tx *sqlx.Tx) ([]*PasswordResetRequest, error) {
|
||||
var sql string
|
||||
switch self.cfg.Type {
|
||||
switch str.cfg.Type {
|
||||
case "postgres":
|
||||
sql = "select * from password_reset_requests where created_at < $1 and not deleted limit %d for update"
|
||||
|
||||
case "sqlite3":
|
||||
sql = "select * from password_reset_requests where created_at < $1 and not deleted limit %d"
|
||||
default:
|
||||
return nil, errors.Errorf("unknown database type '%v'", self.cfg.Type)
|
||||
return nil, errors.Errorf("unknown database type '%v'", str.cfg.Type)
|
||||
}
|
||||
|
||||
rows, err := tx.Queryx(fmt.Sprintf(sql, limit), before)
|
||||
@ -63,7 +63,7 @@ func (self *Store) FindExpiredPasswordResetRequests(before time.Time, limit int,
|
||||
return prrs, nil
|
||||
}
|
||||
|
||||
func (self *Store) DeletePasswordResetRequest(id int, tx *sqlx.Tx) error {
|
||||
func (str *Store) DeletePasswordResetRequest(id int, tx *sqlx.Tx) error {
|
||||
stmt, err := tx.Prepare("update password_reset_requests set updated_at = current_timestamp, deleted = true where id = $1")
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "error preparing password_reset_requests delete statement")
|
||||
@ -75,7 +75,7 @@ func (self *Store) DeletePasswordResetRequest(id int, tx *sqlx.Tx) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (self *Store) DeleteMultiplePasswordResetRequests(ids []int, tx *sqlx.Tx) error {
|
||||
func (str *Store) DeleteMultiplePasswordResetRequests(ids []int, tx *sqlx.Tx) error {
|
||||
if len(ids) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
@ -19,7 +19,7 @@ type Share struct {
|
||||
Deleted bool
|
||||
}
|
||||
|
||||
func (self *Store) CreateShare(envId int, shr *Share, tx *sqlx.Tx) (int, error) {
|
||||
func (str *Store) CreateShare(envId int, shr *Share, tx *sqlx.Tx) (int, error) {
|
||||
stmt, err := tx.Prepare("insert into shares (environment_id, z_id, token, share_mode, backend_mode, frontend_selection, frontend_endpoint, backend_proxy_endpoint, reserved) values ($1, $2, $3, $4, $5, $6, $7, $8, $9) returning id")
|
||||
if err != nil {
|
||||
return 0, errors.Wrap(err, "error preparing shares insert statement")
|
||||
@ -31,7 +31,7 @@ func (self *Store) CreateShare(envId int, shr *Share, tx *sqlx.Tx) (int, error)
|
||||
return id, nil
|
||||
}
|
||||
|
||||
func (self *Store) GetShare(id int, tx *sqlx.Tx) (*Share, error) {
|
||||
func (str *Store) GetShare(id int, tx *sqlx.Tx) (*Share, error) {
|
||||
shr := &Share{}
|
||||
if err := tx.QueryRowx("select * from shares where id = $1", id).StructScan(shr); err != nil {
|
||||
return nil, errors.Wrap(err, "error selecting share by id")
|
||||
@ -39,7 +39,7 @@ func (self *Store) GetShare(id int, tx *sqlx.Tx) (*Share, error) {
|
||||
return shr, nil
|
||||
}
|
||||
|
||||
func (self *Store) FindAllShares(tx *sqlx.Tx) ([]*Share, error) {
|
||||
func (str *Store) FindAllShares(tx *sqlx.Tx) ([]*Share, error) {
|
||||
rows, err := tx.Queryx("select * from shares where not deleted order by id")
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error selecting all shares")
|
||||
@ -55,7 +55,7 @@ func (self *Store) FindAllShares(tx *sqlx.Tx) ([]*Share, error) {
|
||||
return shrs, nil
|
||||
}
|
||||
|
||||
func (self *Store) FindShareWithToken(shrToken string, tx *sqlx.Tx) (*Share, error) {
|
||||
func (str *Store) FindShareWithToken(shrToken string, tx *sqlx.Tx) (*Share, error) {
|
||||
shr := &Share{}
|
||||
if err := tx.QueryRowx("select * from shares where token = $1 and not deleted", shrToken).StructScan(shr); err != nil {
|
||||
return nil, errors.Wrap(err, "error selecting share by token")
|
||||
@ -63,7 +63,7 @@ func (self *Store) FindShareWithToken(shrToken string, tx *sqlx.Tx) (*Share, err
|
||||
return shr, nil
|
||||
}
|
||||
|
||||
func (self *Store) FindShareWithZIdAndDeleted(zId string, tx *sqlx.Tx) (*Share, error) {
|
||||
func (str *Store) FindShareWithZIdAndDeleted(zId string, tx *sqlx.Tx) (*Share, error) {
|
||||
shr := &Share{}
|
||||
if err := tx.QueryRowx("select * from shares where z_id = $1", zId).StructScan(shr); err != nil {
|
||||
return nil, errors.Wrap(err, "error selecting share by z_id")
|
||||
@ -71,7 +71,7 @@ func (self *Store) FindShareWithZIdAndDeleted(zId string, tx *sqlx.Tx) (*Share,
|
||||
return shr, nil
|
||||
}
|
||||
|
||||
func (self *Store) FindSharesForEnvironment(envId int, tx *sqlx.Tx) ([]*Share, error) {
|
||||
func (str *Store) FindSharesForEnvironment(envId int, tx *sqlx.Tx) ([]*Share, error) {
|
||||
rows, err := tx.Queryx("select shares.* from shares where environment_id = $1 and not deleted", envId)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error selecting shares by environment id")
|
||||
@ -87,7 +87,7 @@ func (self *Store) FindSharesForEnvironment(envId int, tx *sqlx.Tx) ([]*Share, e
|
||||
return shrs, nil
|
||||
}
|
||||
|
||||
func (self *Store) UpdateShare(shr *Share, tx *sqlx.Tx) error {
|
||||
func (str *Store) UpdateShare(shr *Share, tx *sqlx.Tx) error {
|
||||
sql := "update shares set z_id = $1, token = $2, share_mode = $3, backend_mode = $4, frontend_selection = $5, frontend_endpoint = $6, backend_proxy_endpoint = $7, reserved = $8, updated_at = current_timestamp where id = $9"
|
||||
stmt, err := tx.Prepare(sql)
|
||||
if err != nil {
|
||||
@ -100,7 +100,7 @@ func (self *Store) UpdateShare(shr *Share, tx *sqlx.Tx) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (self *Store) DeleteShare(id int, tx *sqlx.Tx) error {
|
||||
func (str *Store) DeleteShare(id int, tx *sqlx.Tx) error {
|
||||
stmt, err := tx.Prepare("update shares set updated_at = current_timestamp, deleted = true where id = $1")
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "error preparing shares delete statement")
|
||||
|
65
controller/store/shareLimitJournal.go
Normal file
65
controller/store/shareLimitJournal.go
Normal file
@ -0,0 +1,65 @@
|
||||
package store
|
||||
|
||||
import (
|
||||
"github.com/jmoiron/sqlx"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
type ShareLimitJournal struct {
|
||||
Model
|
||||
ShareId int
|
||||
RxBytes int64
|
||||
TxBytes int64
|
||||
Action LimitJournalAction
|
||||
}
|
||||
|
||||
func (str *Store) CreateShareLimitJournal(j *ShareLimitJournal, trx *sqlx.Tx) (int, error) {
|
||||
stmt, err := trx.Prepare("insert into share_limit_journal (share_id, rx_bytes, tx_bytes, action) values ($1, $2, $3, $4) returning id")
|
||||
if err != nil {
|
||||
return 0, errors.Wrap(err, "error preparing share_limit_journal insert statement")
|
||||
}
|
||||
var id int
|
||||
if err := stmt.QueryRow(j.ShareId, j.RxBytes, j.TxBytes, j.Action).Scan(&id); err != nil {
|
||||
return 0, errors.Wrap(err, "error executing share_limit_journal insert statement")
|
||||
}
|
||||
return id, nil
|
||||
}
|
||||
|
||||
func (str *Store) IsShareLimitJournalEmpty(shrId int, trx *sqlx.Tx) (bool, error) {
|
||||
count := 0
|
||||
if err := trx.QueryRowx("select count(0) from share_limit_journal where share_id = $1", shrId).Scan(&count); err != nil {
|
||||
return false, err
|
||||
}
|
||||
return count == 0, nil
|
||||
}
|
||||
|
||||
func (str *Store) FindLatestShareLimitJournal(shrId int, trx *sqlx.Tx) (*ShareLimitJournal, error) {
|
||||
j := &ShareLimitJournal{}
|
||||
if err := trx.QueryRowx("select * from share_limit_journal where share_id = $1 order by created_at desc limit 1", shrId).StructScan(j); err != nil {
|
||||
return nil, errors.Wrap(err, "error finding share_limit_journal by share_id")
|
||||
}
|
||||
return j, nil
|
||||
}
|
||||
|
||||
func (str *Store) FindAllLatestShareLimitJournal(trx *sqlx.Tx) ([]*ShareLimitJournal, error) {
|
||||
rows, err := trx.Queryx("select id, share_id, rx_bytes, tx_bytes, action, created_at, updated_at from share_limit_journal where id in (select max(id) as id from share_limit_journal group by share_id)")
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error selecting all latest share_limit_journal")
|
||||
}
|
||||
var sljs []*ShareLimitJournal
|
||||
for rows.Next() {
|
||||
slj := &ShareLimitJournal{}
|
||||
if err := rows.StructScan(slj); err != nil {
|
||||
return nil, errors.Wrap(err, "error scanning share_limit_journal")
|
||||
}
|
||||
sljs = append(sljs, slj)
|
||||
}
|
||||
return sljs, nil
|
||||
}
|
||||
|
||||
func (str *Store) DeleteShareLimitJournalForShare(shrId int, trx *sqlx.Tx) error {
|
||||
if _, err := trx.Exec("delete from share_limit_journal where share_id = $1", shrId); err != nil {
|
||||
return errors.Wrapf(err, "error deleting share_limit_journal for '#%d'", shrId)
|
||||
}
|
||||
return nil
|
||||
}
|
@ -0,0 +1,33 @@
|
||||
-- +migrate Up
|
||||
|
||||
create type limit_action_type as enum ('clear', 'warning', 'limit');
|
||||
|
||||
create table account_limit_journal (
|
||||
id serial primary key,
|
||||
account_id integer references accounts(id),
|
||||
rx_bytes bigint not null,
|
||||
tx_bytes bigint not null,
|
||||
action limit_action_type not null,
|
||||
created_at timestamptz not null default(current_timestamp),
|
||||
updated_at timestamptz not null default(current_timestamp)
|
||||
);
|
||||
|
||||
create table environment_limit_journal (
|
||||
id serial primary key,
|
||||
environment_id integer references environments(id),
|
||||
rx_bytes bigint not null,
|
||||
tx_bytes bigint not null,
|
||||
action limit_action_type not null,
|
||||
created_at timestamptz not null default(current_timestamp),
|
||||
updated_at timestamptz not null default(current_timestamp)
|
||||
);
|
||||
|
||||
create table share_limit_journal (
|
||||
id serial primary key,
|
||||
share_id integer references shares(id),
|
||||
rx_bytes bigint not null,
|
||||
tx_bytes bigint not null,
|
||||
action limit_action_type not null,
|
||||
created_at timestamptz not null default(current_timestamp),
|
||||
updated_at timestamptz not null default(current_timestamp)
|
||||
);
|
@ -0,0 +1,31 @@
|
||||
-- +migrate Up
|
||||
|
||||
alter table frontends rename to frontends_old;
|
||||
alter sequence frontends_id_seq rename to frontends_id_seq_old;
|
||||
|
||||
create table frontends (
|
||||
id serial primary key,
|
||||
environment_id integer references environments(id),
|
||||
private_share_id integer references shares(id),
|
||||
token varchar(32) not null unique,
|
||||
z_id varchar(32) not null,
|
||||
url_template varchar(1024),
|
||||
public_name varchar(64) unique,
|
||||
reserved boolean not null default(false),
|
||||
created_at timestamptz not null default(current_timestamp),
|
||||
updated_at timestamptz not null default(current_timestamp),
|
||||
deleted boolean not null default(false)
|
||||
);
|
||||
|
||||
insert into frontends (id, environment_id, token, z_id, url_template, public_name, reserved, created_at, updated_at, deleted)
|
||||
select id, environment_id, token, z_id, url_template, public_name, reserved, created_at, updated_at, deleted from frontends_old;
|
||||
|
||||
select setval('frontends_id_seq', (select max(id) from frontends));
|
||||
|
||||
drop table frontends_old;
|
||||
|
||||
alter index frontends_pkey1 rename to frontends_pkey;
|
||||
alter index frontends_public_name_key1 rename to frontends_public_name_key;
|
||||
alter index frontends_token_key1 rename to frontends_token_key;
|
||||
|
||||
alter table frontends rename constraint frontends_environment_id_fkey1 to frontends_environment_id_fkey;
|
31
controller/store/sql/sqlite3/009_v0_4_0_limits_journals.sql
Normal file
31
controller/store/sql/sqlite3/009_v0_4_0_limits_journals.sql
Normal file
@ -0,0 +1,31 @@
|
||||
-- +migrate Up
|
||||
|
||||
create table account_limit_journal (
|
||||
id integer primary key,
|
||||
account_id integer references accounts(id),
|
||||
rx_bytes bigint not null,
|
||||
tx_bytes bigint not null,
|
||||
action limit_action_type not null,
|
||||
created_at datetime not null default(strftime('%Y-%m-%d %H:%M:%f', 'now')),
|
||||
updated_at datetime not null default(strftime('%Y-%m-%d %H:%M:%f', 'now'))
|
||||
);
|
||||
|
||||
create table environment_limit_journal (
|
||||
id integer primary key,
|
||||
environment_id integer references environments(id),
|
||||
rx_bytes bigint not null,
|
||||
tx_bytes bigint not null,
|
||||
action limit_action_type not null,
|
||||
created_at datetime not null default(strftime('%Y-%m-%d %H:%M:%f', 'now')),
|
||||
updated_at datetime not null default(strftime('%Y-%m-%d %H:%M:%f', 'now'))
|
||||
);
|
||||
|
||||
create table share_limit_journal (
|
||||
id integer primary key,
|
||||
share_id integer references shares(id),
|
||||
rx_bytes bigint not null,
|
||||
tx_bytes bigint not null,
|
||||
action limit_action_type not null,
|
||||
created_at datetime not null default(strftime('%Y-%m-%d %H:%M:%f', 'now')),
|
||||
updated_at datetime not null default(strftime('%Y-%m-%d %H:%M:%f', 'now'))
|
||||
);
|
@ -0,0 +1,3 @@
|
||||
-- +migrate Up
|
||||
|
||||
alter table frontends add column private_share_id references shares(id);
|
@ -62,15 +62,15 @@ func Open(cfg *Config) (*Store, error) {
|
||||
return store, nil
|
||||
}
|
||||
|
||||
func (self *Store) Begin() (*sqlx.Tx, error) {
|
||||
return self.db.Beginx()
|
||||
func (str *Store) Begin() (*sqlx.Tx, error) {
|
||||
return str.db.Beginx()
|
||||
}
|
||||
|
||||
func (self *Store) Close() error {
|
||||
return self.db.Close()
|
||||
func (str *Store) Close() error {
|
||||
return str.db.Close()
|
||||
}
|
||||
|
||||
func (self *Store) migrate(cfg *Config) error {
|
||||
func (str *Store) migrate(cfg *Config) error {
|
||||
switch cfg.Type {
|
||||
case "sqlite3":
|
||||
migrations := &migrate.EmbedFileSystemMigrationSource{
|
||||
@ -78,7 +78,7 @@ func (self *Store) migrate(cfg *Config) error {
|
||||
Root: "/",
|
||||
}
|
||||
migrate.SetTable("migrations")
|
||||
n, err := migrate.Exec(self.db.DB, "sqlite3", migrations, migrate.Up)
|
||||
n, err := migrate.Exec(str.db.DB, "sqlite3", migrations, migrate.Up)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "error running migrations")
|
||||
}
|
||||
@ -90,7 +90,7 @@ func (self *Store) migrate(cfg *Config) error {
|
||||
Root: "/",
|
||||
}
|
||||
migrate.SetTable("migrations")
|
||||
n, err := migrate.Exec(self.db.DB, "postgres", migrations, migrate.Up)
|
||||
n, err := migrate.Exec(str.db.DB, "postgres", migrations, migrate.Up)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "error running migrations")
|
||||
}
|
||||
|
@ -3,6 +3,7 @@ package controller
|
||||
import (
|
||||
errors2 "github.com/go-openapi/errors"
|
||||
"github.com/jaevor/go-nanoid"
|
||||
"github.com/openziti/zrok/controller/config"
|
||||
"github.com/openziti/zrok/rest_model_zrok"
|
||||
"github.com/sirupsen/logrus"
|
||||
"net/http"
|
||||
@ -10,10 +11,10 @@ import (
|
||||
)
|
||||
|
||||
type zrokAuthenticator struct {
|
||||
cfg *Config
|
||||
cfg *config.Config
|
||||
}
|
||||
|
||||
func newZrokAuthenticator(cfg *Config) *zrokAuthenticator {
|
||||
func newZrokAuthenticator(cfg *config.Config) *zrokAuthenticator {
|
||||
return &zrokAuthenticator{cfg}
|
||||
}
|
||||
|
||||
|
@ -6,13 +6,13 @@ import (
|
||||
"github.com/openziti/edge/rest_util"
|
||||
)
|
||||
|
||||
type ZitiConfig struct {
|
||||
type Config struct {
|
||||
ApiEndpoint string
|
||||
Username string
|
||||
Password string `cf:"+secret"`
|
||||
}
|
||||
|
||||
func Client(cfg *ZitiConfig) (*rest_management_api_client.ZitiEdgeManagement, error) {
|
||||
func Client(cfg *Config) (*rest_management_api_client.ZitiEdgeManagement, error) {
|
||||
caCerts, err := rest_util.GetControllerWellKnownCas(cfg.ApiEndpoint)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
7
docs/guides/metrics-and-limits/_category_.json
Normal file
7
docs/guides/metrics-and-limits/_category_.json
Normal file
@ -0,0 +1,7 @@
|
||||
{
|
||||
"label": "Metrics and Limits",
|
||||
"position": 40,
|
||||
"link": {
|
||||
"type": "generated-index"
|
||||
}
|
||||
}
|
84
docs/guides/metrics-and-limits/configuring-limits.md
Normal file
84
docs/guides/metrics-and-limits/configuring-limits.md
Normal file
@ -0,0 +1,84 @@
|
||||
# Configuring Limits
|
||||
|
||||
> If you have not yet configured [metrics](configuring-metrics.md), please visit the [metrics guide](configuring-metrics.md) first before working through the limits configuration.
|
||||
|
||||
The limits facility in `zrok` is responsible for controlling the number of resources in use (environments, shares) and also for ensuring that any single account, environment, or share is held below the configured thresholds.
|
||||
|
||||
Take this `zrok` controller configuration stanza as an example:
|
||||
|
||||
```yaml
|
||||
limits:
|
||||
enforcing: true
|
||||
cycle: 1m
|
||||
environments: -1
|
||||
shares: -1
|
||||
bandwidth:
|
||||
per_account:
|
||||
period: 5m
|
||||
warning:
|
||||
rx: -1
|
||||
tx: -1
|
||||
total: 7242880
|
||||
limit:
|
||||
rx: -1
|
||||
tx: -1
|
||||
total: 10485760
|
||||
per_environment:
|
||||
period: 5m
|
||||
warning:
|
||||
rx: -1
|
||||
tx: -1
|
||||
total: -1
|
||||
limit:
|
||||
rx: -1
|
||||
tx: -1
|
||||
total: -1
|
||||
per_share:
|
||||
period: 5m
|
||||
warning:
|
||||
rx: -1
|
||||
tx: -1
|
||||
total: -1
|
||||
limit:
|
||||
rx: -1
|
||||
tx: -1
|
||||
total: -1
|
||||
```
|
||||
|
||||
## The Global Controls
|
||||
|
||||
The `enforcing` boolean will globally enable or disable limits for the controller.
|
||||
|
||||
The `cycle` value controls how frequently the limits system will look for limited resources to re-enable.
|
||||
|
||||
## Resource Limits
|
||||
|
||||
The `environments` and `shares` values control the number of environments and shares that are allowed per-account. Any limit value can be set to `-1`, which means _unlimited_.
|
||||
|
||||
## Bandwidth Limits
|
||||
|
||||
The `bandwidth` section is designed to provide a configurable system for controlling the amount of data transfer that can be performed by users of the `zrok` service instance. The bandwidth limits are configurable for each share, environment, and account.
|
||||
|
||||
`per_account`, `per_environment`, and `per_share` are all configured the same way:
|
||||
|
||||
The `period` specifies the time window for the bandwidth limit. See the documentation for [`time.Duration.ParseDuration`](https://pkg.go.dev/time#ParseDuration) for details about the format used for these durations. If the `period` is set to 5 minutes, then the limits implementation will monitor the send and receive traffic for the resource (share, environment, or account) for the last 5 minutes, and if the amount of data is greater than either the `warning` or the `limit` threshold, action will be taken.
|
||||
|
||||
The `rx` value is the number of bytes _received_ by the resource. The `tx` value is the number of bytes _transmitted_ by the resource. And `total` is the combined `rx`+`tx` value.
|
||||
|
||||
If the traffic quantity is greater than the `warning` threshold, the user will receive an email notification letting them know that their data transfer size is rising and will eventually be limited (the email details the limit threshold).
|
||||
|
||||
If the traffic quantity is greater than the `limit` threshold, the resources will be limited until the traffic in the window (the last 5 minutes in our example) falls back below the `limit` threshold.
|
||||
|
||||
### Limit Actions
|
||||
|
||||
When a resource is limited, the actions taken differ depending on what kind of resource is being limited.
|
||||
|
||||
When a share is limited, the dial service policies for that share are removed. No other action is taken. This means that public frontends will simply return a `404` as if the share is no longer there. Private frontends will also return `404` errors. When the limit is relaxed, the dial policies are put back in place and the share will continue operating normally.
|
||||
|
||||
When an environment is limited, all of the shares in that environment become limited, and the user is not able to create new shares in that environment. When the limit is relaxed, all of the share limits are relaxed and the user is again able to add shares to the environment.
|
||||
|
||||
When an account is limited, all of the environments in that account become limited (limiting all of the shares), and the user is not able to create new environments or shares. When the limit is relaxed, all of the environments and shares will return to normal operation.
|
||||
|
||||
## Unlimited Accounts
|
||||
|
||||
The `accounts` table in the database includes a `limitless` column. When this column is set to `true` the account is not subject to any of the limits in the system.
|
116
docs/guides/metrics-and-limits/configuring-metrics.md
Normal file
116
docs/guides/metrics-and-limits/configuring-metrics.md
Normal file
@ -0,0 +1,116 @@
|
||||
# Configuring Metrics
|
||||
|
||||
A fully configured, production-scale `zrok` service instance looks like this:
|
||||
|
||||

|
||||
|
||||
`zrok` metrics builds on top of the `fabric.usage` event type from OpenZiti. The OpenZiti controller has a number of way to emit events. The `zrok` controller has several ways to consume `fabric.usage` events. Smaller installations could be configured in these ways:
|
||||
|
||||

|
||||
|
||||
Environments that horizontally scale the `zrok` control plane with multiple controllers should use an AMQP-based queue to "fan out" the metrics workload across the entire control plane. Simpler installations that use a single `zrok` controller can collect `fabric.usage` events from the OpenZiti controller by "tailing" the events log file, or collecting them from the OpenZiti controller's websocket implementation.
|
||||
|
||||
## Configuring the OpenZiti Controller
|
||||
|
||||
> This requires a version of OpenZiti with a `fabric` dependency of `v0.22.52` or newer.
|
||||
|
||||
Emitting `fabric.usage` events to a file is currently the most reliable mechanism to capture usage events into `zrok`. We're going to configure the OpenZiti controller to append `fabric.usage` events to a file, by adding this stanza to the OpenZiti controller configuration:
|
||||
|
||||
```yaml
|
||||
events:
|
||||
jsonLogger:
|
||||
subscriptions:
|
||||
- type: fabric.usage
|
||||
version: 3
|
||||
handler:
|
||||
type: file
|
||||
format: json
|
||||
path: /tmp/fabric-usage.json
|
||||
```
|
||||
|
||||
You'll want to adjust the `events/jsonLogger/handler/path` to wherever you would like to send these events for ingestion into `zrok`. There are additional OpenZiti options that control file rotation. Be sure to consult the OpenZiti docs to tune these settings to be appropriate for your environment.
|
||||
|
||||
By default the OpenZiti events infrastructure reports and batches events in 1 minute buckets. 1 minute is too large of an interval to provide a snappy `zrok` metrics experience. So, let's increase the frequency to every 5 seconds. Add this to the `network` stanza of your OpenZiti controller:
|
||||
|
||||
```yaml
|
||||
network:
|
||||
intervalAgeThreshold: 5s
|
||||
metricsReportInterval: 5s
|
||||
```
|
||||
|
||||
And you'll want to add this stanza to the router configuration for every router on your OpenZiti network:
|
||||
|
||||
```yaml
|
||||
metrics:
|
||||
reportInterval: 5s
|
||||
intervalAgeThreshold: 5s
|
||||
```
|
||||
|
||||
Be sure to restart all of the components of your OpenZiti network after making these configuration changes.
|
||||
|
||||
## Configuring the zrok Metrics Bridge
|
||||
|
||||
`zrok` currently uses a "metrics bridge" component (running as a separate process) to consume the `fabric.usage` events from the OpenZiti controller, and publish them onto an AMQP queue. Add a stanza like the following to your `zrok` controller configuration:
|
||||
|
||||
```yaml
|
||||
bridge:
|
||||
source:
|
||||
type: fileSource
|
||||
path: /tmp/fabric-usage.json
|
||||
sink:
|
||||
type: amqpSink
|
||||
url: amqp://guest:guest@localhost:5672
|
||||
queue_name: events
|
||||
```
|
||||
|
||||
This configuration consumes the `fabric.usage` events from the file we previously specified in our OpenZiti controller configuration, and publishes them onto an AMQP queue.
|
||||
|
||||
### RabbitMQ
|
||||
|
||||
For this example, we're going to use RabbitMQ as our AMQP implementation. The stock, default RabbitMQ configuration, launched as a `docker` container will work just fine:
|
||||
|
||||
```
|
||||
$ docker run -it --rm --name rabbitmq -p 5672:5672 -p 15672:15672 rabbitmq:3.11-management
|
||||
```
|
||||
|
||||
Once RabbitMQ is running, you can start the `zrok` metrics bridge by pointing it at your `zrok` controller configuration, like this:
|
||||
|
||||
```
|
||||
$ zrok ctrl metrics bridge <path/to/zrok-controller.yaml>
|
||||
```
|
||||
|
||||
## Configuring zrok Metrics
|
||||
|
||||
Configure the `metrics` section of your `zrok` controller. Here is an example:
|
||||
|
||||
```yaml
|
||||
metrics:
|
||||
agent:
|
||||
source:
|
||||
type: amqpSource
|
||||
url: amqp://guest:guest@localhost:5672
|
||||
queue_name: events
|
||||
influx:
|
||||
url: "http://127.0.0.1:8086"
|
||||
bucket: zrok
|
||||
org: zrok
|
||||
token: "<secret token>"
|
||||
```
|
||||
|
||||
This configures the `zrok` controller to consume usage events from the AMQP queue, and configures the InfluxDB metrics store.
|
||||
|
||||
## Testing Metrics
|
||||
|
||||
With all of the components configured and running, either use `zrok test loop` or manually create share(s) to generate traffic on the `zrok` instance. If everything is working correctly, you should see log messages from the controller like the following, which indicate that that the controller is processing OpenZiti usage events, and generating `zrok` metrics:
|
||||
|
||||
```
|
||||
[5339.658] INFO zrok/controller/metrics.(*influxWriter).Handle: share: 736z80mr4syu, circuit: Ad1V-6y48 backend {rx: 4.5 kB, tx: 4.6 kB} frontend {rx: 4.6 kB, tx: 4.5 kB}
|
||||
[5349.652] INFO zrok/controller/metrics.(*influxWriter).Handle: share: 736z80mr4syu, circuit: Ad1V-6y48 backend {rx: 2.5 kB, tx: 2.6 kB} frontend {rx: 2.6 kB, tx: 2.5 kB}
|
||||
[5354.657] INFO zrok/controller/metrics.(*influxWriter).Handle: share: 5a4u7lqxb7pa, circuit: iG1--6H4S backend {rx: 13.2 kB, tx: 13.3 kB} frontend {rx: 13.3 kB, tx: 13.2 kB}
|
||||
```
|
||||
|
||||
The `zrok` web console should also be showing activity for your share(s) like the following:
|
||||
|
||||

|
||||
|
||||
With metrics configured, you might be interested in [configuring limits](configuring-limits.md).
|
@ -0,0 +1,70 @@
|
||||
<mxfile host="Electron" modified="2023-04-04T16:56:44.671Z" agent="Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) draw.io/21.1.2 Chrome/106.0.5249.199 Electron/21.4.3 Safari/537.36" etag="hNOxKmEJVuYIWfjZN-Q2" version="21.1.2" type="device">
|
||||
<diagram name="Page-1" id="IMoEC3u-7S6gkD3jGaqt">
|
||||
<mxGraphModel dx="1030" dy="801" grid="1" gridSize="10" guides="1" tooltips="1" connect="1" arrows="1" fold="1" page="1" pageScale="1" pageWidth="600" pageHeight="400" math="0" shadow="0">
|
||||
<root>
|
||||
<mxCell id="0" />
|
||||
<mxCell id="1" parent="0" />
|
||||
<mxCell id="z8BNBxY42kQ6VSPeSeC1-1" value="Ziti<br>Controller" style="ellipse;shape=cloud;whiteSpace=wrap;html=1;" vertex="1" parent="1">
|
||||
<mxGeometry x="40" y="50" width="120" height="80" as="geometry" />
|
||||
</mxCell>
|
||||
<mxCell id="z8BNBxY42kQ6VSPeSeC1-2" value="events.json" style="shape=document;whiteSpace=wrap;html=1;boundedLbl=1;" vertex="1" parent="1">
|
||||
<mxGeometry x="190" y="65" width="80" height="50" as="geometry" />
|
||||
</mxCell>
|
||||
<mxCell id="z8BNBxY42kQ6VSPeSeC1-3" value="" style="endArrow=classic;html=1;rounded=0;exitX=0.875;exitY=0.5;exitDx=0;exitDy=0;exitPerimeter=0;entryX=0;entryY=0.5;entryDx=0;entryDy=0;" edge="1" parent="1" source="z8BNBxY42kQ6VSPeSeC1-1" target="z8BNBxY42kQ6VSPeSeC1-2">
|
||||
<mxGeometry width="50" height="50" relative="1" as="geometry">
|
||||
<mxPoint x="280" y="280" as="sourcePoint" />
|
||||
<mxPoint x="330" y="230" as="targetPoint" />
|
||||
</mxGeometry>
|
||||
</mxCell>
|
||||
<mxCell id="z8BNBxY42kQ6VSPeSeC1-15" value="zrok<br>Metrics Store<br><font style="font-size: 9px;">(InfluxDB)</font>" style="shape=cylinder3;whiteSpace=wrap;html=1;boundedLbl=1;backgroundOutline=1;size=15;" vertex="1" parent="1">
|
||||
<mxGeometry x="471" y="40" width="90" height="100" as="geometry" />
|
||||
</mxCell>
|
||||
<mxCell id="z8BNBxY42kQ6VSPeSeC1-17" value="" style="endArrow=classic;startArrow=classic;html=1;rounded=0;entryX=1;entryY=0.5;entryDx=0;entryDy=0;exitX=0;exitY=0.5;exitDx=0;exitDy=0;exitPerimeter=0;" edge="1" parent="1" source="z8BNBxY42kQ6VSPeSeC1-15" target="z8BNBxY42kQ6VSPeSeC1-11">
|
||||
<mxGeometry width="50" height="50" relative="1" as="geometry">
|
||||
<mxPoint x="501" y="284" as="sourcePoint" />
|
||||
<mxPoint x="551" y="234" as="targetPoint" />
|
||||
</mxGeometry>
|
||||
</mxCell>
|
||||
<mxCell id="z8BNBxY42kQ6VSPeSeC1-18" value="" style="endArrow=classic;html=1;rounded=0;exitX=1;exitY=0.5;exitDx=0;exitDy=0;" edge="1" parent="1" source="z8BNBxY42kQ6VSPeSeC1-2" target="z8BNBxY42kQ6VSPeSeC1-11">
|
||||
<mxGeometry width="50" height="50" relative="1" as="geometry">
|
||||
<mxPoint x="190" y="230" as="sourcePoint" />
|
||||
<mxPoint x="240" y="180" as="targetPoint" />
|
||||
</mxGeometry>
|
||||
</mxCell>
|
||||
<mxCell id="z8BNBxY42kQ6VSPeSeC1-19" value="Ziti<br>Controller" style="ellipse;shape=cloud;whiteSpace=wrap;html=1;" vertex="1" parent="1">
|
||||
<mxGeometry x="98" y="270" width="120" height="80" as="geometry" />
|
||||
</mxCell>
|
||||
<mxCell id="z8BNBxY42kQ6VSPeSeC1-24" value="zrok<br>Metrics Store<br><font style="font-size: 9px;">(InfluxDB)</font>" style="shape=cylinder3;whiteSpace=wrap;html=1;boundedLbl=1;backgroundOutline=1;size=15;" vertex="1" parent="1">
|
||||
<mxGeometry x="413" y="260" width="90" height="100" as="geometry" />
|
||||
</mxCell>
|
||||
<mxCell id="z8BNBxY42kQ6VSPeSeC1-25" value="" style="endArrow=classic;startArrow=classic;html=1;rounded=0;entryX=1;entryY=0.5;entryDx=0;entryDy=0;exitX=0;exitY=0.5;exitDx=0;exitDy=0;exitPerimeter=0;" edge="1" parent="1" source="z8BNBxY42kQ6VSPeSeC1-24" target="z8BNBxY42kQ6VSPeSeC1-23">
|
||||
<mxGeometry width="50" height="50" relative="1" as="geometry">
|
||||
<mxPoint x="443" y="504" as="sourcePoint" />
|
||||
<mxPoint x="493" y="454" as="targetPoint" />
|
||||
</mxGeometry>
|
||||
</mxCell>
|
||||
<mxCell id="z8BNBxY42kQ6VSPeSeC1-23" value="zrok<br>Controller" style="rounded=1;whiteSpace=wrap;html=1;" vertex="1" parent="1">
|
||||
<mxGeometry x="252" y="280" width="120" height="60" as="geometry" />
|
||||
</mxCell>
|
||||
<mxCell id="z8BNBxY42kQ6VSPeSeC1-29" value="" style="endArrow=classic;html=1;rounded=0;exitX=0.875;exitY=0.5;exitDx=0;exitDy=0;exitPerimeter=0;entryX=0;entryY=0.5;entryDx=0;entryDy=0;" edge="1" parent="1" source="z8BNBxY42kQ6VSPeSeC1-19" target="z8BNBxY42kQ6VSPeSeC1-23">
|
||||
<mxGeometry width="50" height="50" relative="1" as="geometry">
|
||||
<mxPoint x="198" y="462" as="sourcePoint" />
|
||||
<mxPoint x="248" y="412" as="targetPoint" />
|
||||
</mxGeometry>
|
||||
</mxCell>
|
||||
<mxCell id="z8BNBxY42kQ6VSPeSeC1-30" value="" style="endArrow=none;dashed=1;html=1;dashPattern=1 3;strokeWidth=2;rounded=0;" edge="1" parent="1">
|
||||
<mxGeometry width="50" height="50" relative="1" as="geometry">
|
||||
<mxPoint x="220" y="310" as="sourcePoint" />
|
||||
<mxPoint x="250" y="230" as="targetPoint" />
|
||||
</mxGeometry>
|
||||
</mxCell>
|
||||
<mxCell id="z8BNBxY42kQ6VSPeSeC1-31" value="Events over Websocket" style="text;html=1;strokeColor=none;fillColor=none;align=center;verticalAlign=middle;whiteSpace=wrap;rounded=0;fontSize=9;" vertex="1" parent="1">
|
||||
<mxGeometry x="200" y="210" width="100" height="20" as="geometry" />
|
||||
</mxCell>
|
||||
<mxCell id="z8BNBxY42kQ6VSPeSeC1-11" value="zrok<br>Controller" style="rounded=1;whiteSpace=wrap;html=1;" vertex="1" parent="1">
|
||||
<mxGeometry x="310" y="60" width="120" height="60" as="geometry" />
|
||||
</mxCell>
|
||||
</root>
|
||||
</mxGraphModel>
|
||||
</diagram>
|
||||
</mxfile>
|
Binary file not shown.
After Width: | Height: | Size: 33 KiB |
@ -0,0 +1,67 @@
|
||||
<mxfile host="Electron" modified="2023-04-04T15:26:45.884Z" agent="Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) draw.io/21.1.2 Chrome/106.0.5249.199 Electron/21.4.3 Safari/537.36" etag="WmZNtDHtF91euXotUkS-" version="21.1.2" type="device">
|
||||
<diagram name="Page-1" id="IMoEC3u-7S6gkD3jGaqt">
|
||||
<mxGraphModel dx="1030" dy="801" grid="1" gridSize="10" guides="1" tooltips="1" connect="1" arrows="1" fold="1" page="1" pageScale="1" pageWidth="600" pageHeight="600" math="0" shadow="0">
|
||||
<root>
|
||||
<mxCell id="0" />
|
||||
<mxCell id="1" parent="0" />
|
||||
<mxCell id="z8BNBxY42kQ6VSPeSeC1-1" value="Ziti<br>Controller" style="ellipse;shape=cloud;whiteSpace=wrap;html=1;" vertex="1" parent="1">
|
||||
<mxGeometry x="110" y="55" width="120" height="80" as="geometry" />
|
||||
</mxCell>
|
||||
<mxCell id="z8BNBxY42kQ6VSPeSeC1-2" value="events.json" style="shape=document;whiteSpace=wrap;html=1;boundedLbl=1;" vertex="1" parent="1">
|
||||
<mxGeometry x="260" y="70" width="80" height="50" as="geometry" />
|
||||
</mxCell>
|
||||
<mxCell id="z8BNBxY42kQ6VSPeSeC1-3" value="" style="endArrow=classic;html=1;rounded=0;exitX=0.875;exitY=0.5;exitDx=0;exitDy=0;exitPerimeter=0;entryX=0;entryY=0.5;entryDx=0;entryDy=0;" edge="1" parent="1" source="z8BNBxY42kQ6VSPeSeC1-1" target="z8BNBxY42kQ6VSPeSeC1-2">
|
||||
<mxGeometry width="50" height="50" relative="1" as="geometry">
|
||||
<mxPoint x="350" y="285" as="sourcePoint" />
|
||||
<mxPoint x="400" y="235" as="targetPoint" />
|
||||
</mxGeometry>
|
||||
</mxCell>
|
||||
<mxCell id="z8BNBxY42kQ6VSPeSeC1-4" value="zrok<br>Metrics Bridge" style="rounded=1;whiteSpace=wrap;html=1;" vertex="1" parent="1">
|
||||
<mxGeometry x="370" y="65" width="120" height="60" as="geometry" />
|
||||
</mxCell>
|
||||
<mxCell id="z8BNBxY42kQ6VSPeSeC1-5" value="" style="endArrow=classic;html=1;rounded=0;exitX=1;exitY=0.5;exitDx=0;exitDy=0;entryX=0;entryY=0.5;entryDx=0;entryDy=0;" edge="1" parent="1" source="z8BNBxY42kQ6VSPeSeC1-2" target="z8BNBxY42kQ6VSPeSeC1-4">
|
||||
<mxGeometry width="50" height="50" relative="1" as="geometry">
|
||||
<mxPoint x="300" y="255" as="sourcePoint" />
|
||||
<mxPoint x="350" y="205" as="targetPoint" />
|
||||
</mxGeometry>
|
||||
</mxCell>
|
||||
<mxCell id="z8BNBxY42kQ6VSPeSeC1-6" value="Event Queue<br><font style="font-size: 9px;">(AMQP/RabbitMQ)</font>" style="ellipse;shape=cloud;whiteSpace=wrap;html=1;" vertex="1" parent="1">
|
||||
<mxGeometry x="240" y="175" width="120" height="80" as="geometry" />
|
||||
</mxCell>
|
||||
<mxCell id="z8BNBxY42kQ6VSPeSeC1-12" value="" style="group" vertex="1" connectable="0" parent="1">
|
||||
<mxGeometry x="230.5" y="295" width="139" height="81" as="geometry" />
|
||||
</mxCell>
|
||||
<mxCell id="z8BNBxY42kQ6VSPeSeC1-9" value="" style="rounded=1;whiteSpace=wrap;html=1;" vertex="1" parent="z8BNBxY42kQ6VSPeSeC1-12">
|
||||
<mxGeometry width="120" height="60" as="geometry" />
|
||||
</mxCell>
|
||||
<mxCell id="z8BNBxY42kQ6VSPeSeC1-10" value="" style="rounded=1;whiteSpace=wrap;html=1;" vertex="1" parent="z8BNBxY42kQ6VSPeSeC1-12">
|
||||
<mxGeometry x="10" y="10" width="120" height="60" as="geometry" />
|
||||
</mxCell>
|
||||
<mxCell id="z8BNBxY42kQ6VSPeSeC1-11" value="zrok<br>Controller(s)" style="rounded=1;whiteSpace=wrap;html=1;" vertex="1" parent="z8BNBxY42kQ6VSPeSeC1-12">
|
||||
<mxGeometry x="19" y="21" width="120" height="60" as="geometry" />
|
||||
</mxCell>
|
||||
<mxCell id="z8BNBxY42kQ6VSPeSeC1-13" value="" style="endArrow=classic;html=1;rounded=0;exitX=0.5;exitY=1;exitDx=0;exitDy=0;entryX=0.88;entryY=0.25;entryDx=0;entryDy=0;entryPerimeter=0;" edge="1" parent="1" source="z8BNBxY42kQ6VSPeSeC1-4" target="z8BNBxY42kQ6VSPeSeC1-6">
|
||||
<mxGeometry width="50" height="50" relative="1" as="geometry">
|
||||
<mxPoint x="430" y="165" as="sourcePoint" />
|
||||
<mxPoint x="470" y="205" as="targetPoint" />
|
||||
</mxGeometry>
|
||||
</mxCell>
|
||||
<mxCell id="z8BNBxY42kQ6VSPeSeC1-14" value="" style="endArrow=classic;html=1;rounded=0;exitX=0.55;exitY=0.95;exitDx=0;exitDy=0;exitPerimeter=0;" edge="1" parent="1" source="z8BNBxY42kQ6VSPeSeC1-6" target="z8BNBxY42kQ6VSPeSeC1-9">
|
||||
<mxGeometry width="50" height="50" relative="1" as="geometry">
|
||||
<mxPoint x="470" y="355" as="sourcePoint" />
|
||||
<mxPoint x="520" y="305" as="targetPoint" />
|
||||
</mxGeometry>
|
||||
</mxCell>
|
||||
<mxCell id="z8BNBxY42kQ6VSPeSeC1-15" value="zrok<br>Metrics Store<br><font style="font-size: 9px;">(InfluxDB)</font>" style="shape=cylinder3;whiteSpace=wrap;html=1;boundedLbl=1;backgroundOutline=1;size=15;" vertex="1" parent="1">
|
||||
<mxGeometry x="250" y="425" width="100" height="120" as="geometry" />
|
||||
</mxCell>
|
||||
<mxCell id="z8BNBxY42kQ6VSPeSeC1-17" value="" style="endArrow=classic;startArrow=classic;html=1;rounded=0;entryX=0.5;entryY=1;entryDx=0;entryDy=0;exitX=0.5;exitY=0;exitDx=0;exitDy=0;exitPerimeter=0;" edge="1" parent="1" source="z8BNBxY42kQ6VSPeSeC1-15" target="z8BNBxY42kQ6VSPeSeC1-11">
|
||||
<mxGeometry width="50" height="50" relative="1" as="geometry">
|
||||
<mxPoint x="450" y="435" as="sourcePoint" />
|
||||
<mxPoint x="500" y="385" as="targetPoint" />
|
||||
</mxGeometry>
|
||||
</mxCell>
|
||||
</root>
|
||||
</mxGraphModel>
|
||||
</diagram>
|
||||
</mxfile>
|
BIN
docs/guides/metrics-and-limits/images/metrics-architecture.png
Normal file
BIN
docs/guides/metrics-and-limits/images/metrics-architecture.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 35 KiB |
BIN
docs/guides/metrics-and-limits/images/zrok-console-activity.png
Executable file
BIN
docs/guides/metrics-and-limits/images/zrok-console-activity.png
Executable file
Binary file not shown.
After Width: | Height: | Size: 83 KiB |
694
docs/guides/v0.4_limits.md
Normal file
694
docs/guides/v0.4_limits.md
Normal file
@ -0,0 +1,694 @@
|
||||
# Testing the Limits
|
||||
|
||||
Consider the following `zrok controller` configuration stanza, describing the limits we'll be using for this testing scenario:
|
||||
|
||||
```yaml
|
||||
limits:
|
||||
environments: -1
|
||||
shares: -1
|
||||
bandwidth:
|
||||
per_account:
|
||||
period: 5m
|
||||
warning:
|
||||
rx: -1
|
||||
tx: -1
|
||||
total: -1
|
||||
limit:
|
||||
rx: -1
|
||||
tx: -1
|
||||
total: -1
|
||||
per_environment:
|
||||
period: 5m
|
||||
warning:
|
||||
rx: -1
|
||||
tx: -1
|
||||
total: -1
|
||||
limit:
|
||||
rx: -1
|
||||
tx: -1
|
||||
total: -1
|
||||
per_share:
|
||||
period: 5m
|
||||
warning:
|
||||
rx: -1
|
||||
tx: -1
|
||||
total: 1048576
|
||||
limit:
|
||||
rx: -1
|
||||
tx: -1
|
||||
total: 2097152
|
||||
enforcing: true
|
||||
cycle: 1m
|
||||
```
|
||||
|
||||
Any limit values set to `-1` are "unlimited". In this case, we're only enforcing a transfer limit on for shares. This limits configuration will send a warning when a share has transferred more than 1 megabyte in a 5 minute period, and will temporarily deactivate the share when it has transferred more than 2 megabytes in a 5 minute period.
|
||||
|
||||
We're going to use the `zrok test loop public` framework to create a number of `public` shares and generate traffic. Here are the parameters we'll be using:
|
||||
|
||||
```
|
||||
$ zrok test loop public -l 7 -i 10000 --min-pacing-ms 100 --max-pacing-ms 1500
|
||||
```
|
||||
|
||||
This configuration will create 7 shares. Each share will perform 10,000 iterations. The delay between iterations will be randomly generated with a floor of 100ms and a ceiling of 1500ms.
|
||||
|
||||
Let's look at the `zrok controller` log for this run:
|
||||
|
||||
First, our `zrok test loop public ` command will create the 7 shares:
|
||||
|
||||
```
|
||||
[ 2.047] INFO zrok/controller.(*shareHandler).Handle: added frontend selection 'public' with ziti identity 'rBayMvm7UI' for share '0evcupz5k410'
|
||||
[ 2.081] INFO zrok/controller.(*shareHandler).Handle: added frontend selection 'public' with ziti identity 'rBayMvm7UI' for share '8k6dnu7x7ag0'
|
||||
[ 2.082] INFO zrok/controller/zrokEdgeSdk.CreateConfig: created config '19cyxfHo32R6fhVsYHZ84g' for environment 'd.wJYlpt9'
|
||||
[ 2.083] INFO zrok/controller.(*shareHandler).Handle: added frontend selection 'public' with ziti identity 'rBayMvm7UI' for share '53z6mz4re7tu'
|
||||
[ 2.086] INFO zrok/controller/zrokEdgeSdk.CreateShareService: created share '0evcupz5k410' (with ziti id '3WHJGqUdxkDtPYLgEL5V3q') for environment 'd.wJYlpt9'
|
||||
[ 2.090] INFO zrok/controller.(*shareHandler).Handle: added frontend selection 'public' with ziti identity 'rBayMvm7UI' for share '7u9szn30ikh0'
|
||||
[ 2.090] INFO zrok/controller.(*shareHandler).Handle: added frontend selection 'public' with ziti identity 'rBayMvm7UI' for share 'dh3f3jj7zhig'
|
||||
[ 2.091] INFO zrok/controller.(*shareHandler).Handle: added frontend selection 'public' with ziti identity 'rBayMvm7UI' for share 'tr7vpyrzvmh0'
|
||||
[ 2.096] INFO zrok/controller/zrokEdgeSdk.CreateServicePolicyBind: created bind service policy '4V8FsgCt63ySkG2pFWG5fz' for service '3WHJGqUdxkDtPYLgEL5V3q' for identity 'd.wJYlpt9'
|
||||
[ 2.097] INFO zrok/controller/zrokEdgeSdk.CreateConfig: created config '5nG9jM8VNl0uBFcRRt3AvI' for environment 'd.wJYlpt9'
|
||||
[ 2.098] INFO zrok/controller/zrokEdgeSdk.CreateServicePolicyDial: created dial service policy '74f2gUotsC7DteqpsWrxp0' for service '3WHJGqUdxkDtPYLgEL5V3q' for identities '[rBayMvm7UI]'
|
||||
[ 2.099] INFO zrok/controller/zrokEdgeSdk.CreateShareService: created share '8k6dnu7x7ag0' (with ziti id '2J0I9dPe2JGnY1GwjmM6n7') for environment 'd.wJYlpt9'
|
||||
[ 2.100] INFO zrok/controller/zrokEdgeSdk.CreateShareServiceEdgeRouterPolicy: created service edge router policy '2AqCUMqNtarmglOfhvnkI' for service '3WHJGqUdxkDtPYLgEL5V3q' for environment 'd.wJYlpt9'
|
||||
[ 2.100] INFO zrok/controller/zrokEdgeSdk.CreateServicePolicyBind: created bind service policy '4vT5eEPahgWEVdAuKN91Sd' for service '2J0I9dPe2JGnY1GwjmM6n7' for identity 'd.wJYlpt9'
|
||||
[ 2.104] INFO zrok/controller/zrokEdgeSdk.CreateServicePolicyDial: created dial service policy '5UHCkXZabFHeWYHmF01Zoc' for service '2J0I9dPe2JGnY1GwjmM6n7' for identities '[rBayMvm7UI]'
|
||||
[ 2.106] INFO zrok/controller.(*shareHandler).Handle: recorded share '0evcupz5k410' with id '503' for 'michael@quigley.com'
|
||||
[ 2.106] INFO zrok/controller/zrokEdgeSdk.CreateConfig: created config '6U3XDGnBjtONN5H6pUze12' for environment 'd.wJYlpt9'
|
||||
[ 2.108] INFO zrok/controller/zrokEdgeSdk.CreateShareServiceEdgeRouterPolicy: created service edge router policy '2RIKOBMOckfbI2xMSLAKxC' for service '2J0I9dPe2JGnY1GwjmM6n7' for environment 'd.wJYlpt9'
|
||||
[ 2.109] INFO zrok/controller/zrokEdgeSdk.CreateShareService: created share '53z6mz4re7tu' (with ziti id '2NiotGOyBHBEbFZwbTFJ2u') for environment 'd.wJYlpt9'
|
||||
[ 2.109] INFO zrok/controller/zrokEdgeSdk.CreateConfig: created config '1FnBhnGNXDe58dwTpbFc1x' for environment 'd.wJYlpt9'
|
||||
[ 2.109] INFO zrok/controller.(*shareHandler).Handle: recorded share '8k6dnu7x7ag0' with id '504' for 'michael@quigley.com'
|
||||
[ 2.112] INFO zrok/controller/zrokEdgeSdk.CreateServicePolicyBind: created bind service policy 'RRfDaA5kjCqUBVC9LvN1H' for service '2NiotGOyBHBEbFZwbTFJ2u' for identity 'd.wJYlpt9'
|
||||
[ 2.112] INFO zrok/controller/zrokEdgeSdk.CreateConfig: created config '2gid15nP0GIUVuaFQ15GWV' for environment 'd.wJYlpt9'
|
||||
[ 2.115] INFO zrok/controller/zrokEdgeSdk.CreateShareService: created share '7u9szn30ikh0' (with ziti id '6FzYnK0RFJmT0rDSP1bzVE') for environment 'd.wJYlpt9'
|
||||
[ 2.115] INFO zrok/controller/zrokEdgeSdk.CreateServicePolicyDial: created dial service policy '1oo3LuBKxduKAs1wsKndtW' for service '2NiotGOyBHBEbFZwbTFJ2u' for identities '[rBayMvm7UI]'
|
||||
[ 2.117] INFO zrok/controller/zrokEdgeSdk.CreateServicePolicyBind: created bind service policy '1mabRt9jefSe52CJh6FmhB' for service '6FzYnK0RFJmT0rDSP1bzVE' for identity 'd.wJYlpt9'
|
||||
[ 2.117] INFO zrok/controller/zrokEdgeSdk.CreateShareServiceEdgeRouterPolicy: created service edge router policy '2CM03d1cNpG4rma38BLzCQ' for service '2NiotGOyBHBEbFZwbTFJ2u' for environment 'd.wJYlpt9'
|
||||
[ 2.118] INFO zrok/controller/zrokEdgeSdk.CreateServicePolicyDial: created dial service policy '3dBtc3v2G70aqqDSqujQOy' for service '6FzYnK0RFJmT0rDSP1bzVE' for identities '[rBayMvm7UI]'
|
||||
[ 2.119] INFO zrok/controller.(*shareHandler).Handle: recorded share '53z6mz4re7tu' with id '505' for 'michael@quigley.com'
|
||||
[ 2.121] INFO zrok/controller/zrokEdgeSdk.CreateShareServiceEdgeRouterPolicy: created service edge router policy '3xAG26zA9yska3LeZQUJ3N' for service '6FzYnK0RFJmT0rDSP1bzVE' for environment 'd.wJYlpt9'
|
||||
[ 2.122] INFO zrok/controller.(*shareHandler).Handle: added frontend selection 'public' with ziti identity 'rBayMvm7UI' for share 's0uzz1p7xjrr'
|
||||
[ 2.124] INFO zrok/controller.(*shareHandler).Handle: recorded share '7u9szn30ikh0' with id '506' for 'michael@quigley.com'
|
||||
[ 2.128] INFO zrok/controller/zrokEdgeSdk.CreateShareService: created share 'tr7vpyrzvmh0' (with ziti id '7jyiTZ0z2ediD5hZbxu7KH') for environment 'd.wJYlpt9'
|
||||
[ 2.130] INFO zrok/controller/zrokEdgeSdk.CreateServicePolicyBind: created bind service policy '6RwWEoIsb8gBVKJfZP3ur3' for service '7jyiTZ0z2ediD5hZbxu7KH' for identity 'd.wJYlpt9'
|
||||
[ 2.131] INFO zrok/controller/zrokEdgeSdk.CreateConfig: created config '76iBDASRcxOmGtdwjVHo26' for environment 'd.wJYlpt9'
|
||||
[ 2.132] INFO zrok/controller/zrokEdgeSdk.CreateServicePolicyDial: created dial service policy '1cURGP202D8n6fzpzWhcgK' for service '7jyiTZ0z2ediD5hZbxu7KH' for identities '[rBayMvm7UI]'
|
||||
[ 2.138] INFO zrok/controller/zrokEdgeSdk.CreateShareService: created share 'dh3f3jj7zhig' (with ziti id 'nyKOLlxUWWbCzD7h9Jhjq') for environment 'd.wJYlpt9'
|
||||
[ 2.139] INFO zrok/controller/zrokEdgeSdk.CreateShareServiceEdgeRouterPolicy: created service edge router policy '2nMZaiChQAPpFnblNn1ljP' for service '7jyiTZ0z2ediD5hZbxu7KH' for environment 'd.wJYlpt9'
|
||||
[ 2.142] INFO zrok/controller.(*shareHandler).Handle: recorded share 'tr7vpyrzvmh0' with id '507' for 'michael@quigley.com'
|
||||
[ 2.143] INFO zrok/controller/zrokEdgeSdk.CreateServicePolicyBind: created bind service policy '1xF4ky6cDJm63tzlNTqoLC' for service 'nyKOLlxUWWbCzD7h9Jhjq' for identity 'd.wJYlpt9'
|
||||
[ 2.143] INFO zrok/controller/zrokEdgeSdk.CreateConfig: created config '4AN4sOtdQv99uHmFn3erx4' for environment 'd.wJYlpt9'
|
||||
[ 2.145] INFO zrok/controller/zrokEdgeSdk.CreateServicePolicyDial: created dial service policy '7GerqbN5lVfcOng91J2J6I' for service 'nyKOLlxUWWbCzD7h9Jhjq' for identities '[rBayMvm7UI]'
|
||||
[ 2.145] INFO zrok/controller/zrokEdgeSdk.CreateShareService: created share 's0uzz1p7xjrr' (with ziti id 'KtK5E46HR93YIBrrwUlIN') for environment 'd.wJYlpt9'
|
||||
[ 2.147] INFO zrok/controller/zrokEdgeSdk.CreateShareServiceEdgeRouterPolicy: created service edge router policy '2ZnnIXSTQ3Zscha1kykqQr' for service 'nyKOLlxUWWbCzD7h9Jhjq' for environment 'd.wJYlpt9'
|
||||
[ 2.149] INFO zrok/controller.(*shareHandler).Handle: recorded share 'dh3f3jj7zhig' with id '508' for 'michael@quigley.com'
|
||||
[ 2.155] INFO zrok/controller/zrokEdgeSdk.CreateServicePolicyBind: created bind service policy '6oohOQFEo75yl9vnIbyzdj' for service 'KtK5E46HR93YIBrrwUlIN' for identity 'd.wJYlpt9'
|
||||
[ 2.156] INFO zrok/controller/zrokEdgeSdk.CreateServicePolicyDial: created dial service policy '7eB3ubrntSHxkeHBCGJcOY' for service 'KtK5E46HR93YIBrrwUlIN' for identities '[rBayMvm7UI]'
|
||||
[ 2.157] INFO zrok/controller/zrokEdgeSdk.CreateShareServiceEdgeRouterPolicy: created service edge router policy '2CGCz8dcquNvZC0ZUwDZ5F' for service 'KtK5E46HR93YIBrrwUlIN' for environment 'd.wJYlpt9'
|
||||
[ 2.159] INFO zrok/controller.(*shareHandler).Handle: recorded share 's0uzz1p7xjrr' with id '509' for 'michael@quigley.com'
|
||||
```
|
||||
|
||||
Next, we observe metrics being reported from OpenZiti into the `zrok` metrics infrastructure for each of the 7 shares:
|
||||
|
||||
```
|
||||
[ 10.183] INFO zrok/controller/metrics.(*influxWriter).Handle: share: 53z6mz4re7tu, circuit: ONzzjVS0w backend {rx: 32.4 kB, tx: 32.6 kB} frontend {rx: 32.6 kB, tx: 32.4 kB}
|
||||
[ 10.192] INFO zrok/controller/metrics.(*influxWriter).Handle: share: tr7vpyrzvmh0, circuit: MZXXjVi0w backend {rx: 22.5 kB, tx: 22.8 kB} frontend {rx: 22.8 kB, tx: 22.5 kB}
|
||||
[ 10.196] INFO zrok/controller/metrics.(*influxWriter).Handle: share: 8k6dnu7x7ag0, circuit: SNXXjViuwU backend {rx: 15.1 kB, tx: 15.3 kB} frontend {rx: 15.3 kB, tx: 15.1 kB}
|
||||
[ 15.164] INFO zrok/controller/metrics.(*influxWriter).Handle: share: 53z6mz4re7tu, circuit: ONzzjVS0w backend {rx: 53.0 kB, tx: 53.4 kB} frontend {rx: 53.4 kB, tx: 53.0 kB}
|
||||
[ 15.168] INFO zrok/controller/metrics.(*influxWriter).Handle: share: 0evcupz5k410, circuit: cNzzH4i0w backend {rx: 50.3 kB, tx: 50.6 kB} frontend {rx: 50.6 kB, tx: 50.3 kB}
|
||||
[ 15.170] INFO zrok/controller/metrics.(*influxWriter).Handle: share: dh3f3jj7zhig, circuit: fNXXHVSuw backend {rx: 46.2 kB, tx: 46.6 kB} frontend {rx: 46.6 kB, tx: 46.2 kB}
|
||||
[ 15.172] INFO zrok/controller/metrics.(*influxWriter).Handle: share: tr7vpyrzvmh0, circuit: MZXXjVi0w backend {rx: 26.4 kB, tx: 26.8 kB} frontend {rx: 26.8 kB, tx: 26.4 kB}
|
||||
[ 20.163] INFO zrok/controller/metrics.(*influxWriter).Handle: share: dh3f3jj7zhig, circuit: fNXXHVSuw backend {rx: 26.9 kB, tx: 27.1 kB} frontend {rx: 27.1 kB, tx: 26.9 kB}
|
||||
[ 20.165] INFO zrok/controller/metrics.(*influxWriter).Handle: share: tr7vpyrzvmh0, circuit: MZXXjVi0w backend {rx: 26.0 kB, tx: 26.2 kB} frontend {rx: 26.2 kB, tx: 26.0 kB}
|
||||
[ 20.168] INFO zrok/controller/metrics.(*influxWriter).Handle: share: s0uzz1p7xjrr, circuit: RZzXHVSuw backend {rx: 67.1 kB, tx: 67.6 kB} frontend {rx: 67.6 kB, tx: 67.1 kB}
|
||||
[ 25.164] INFO zrok/controller/metrics.(*influxWriter).Handle: share: dh3f3jj7zhig, circuit: fNXXHVSuw backend {rx: 38.1 kB, tx: 38.4 kB} frontend {rx: 38.4 kB, tx: 38.1 kB}
|
||||
[ 25.167] INFO zrok/controller/metrics.(*influxWriter).Handle: share: 7u9szn30ikh0, circuit: iNzXj4S0r backend {rx: 26.3 kB, tx: 26.7 kB} frontend {rx: 26.7 kB, tx: 26.3 kB}
|
||||
[ 25.169] INFO zrok/controller/metrics.(*influxWriter).Handle: share: tr7vpyrzvmh0, circuit: MZXXjVi0w backend {rx: 18.2 kB, tx: 18.4 kB} frontend {rx: 18.4 kB, tx: 18.2 kB}
|
||||
[ 25.171] INFO zrok/controller/metrics.(*influxWriter).Handle: share: 53z6mz4re7tu, circuit: ONzzjVS0w backend {rx: 22.6 kB, tx: 23.0 kB} frontend {rx: 23.0 kB, tx: 22.6 kB}
|
||||
[ 30.163] INFO zrok/controller/metrics.(*influxWriter).Handle: share: 53z6mz4re7tu, circuit: ONzzjVS0w backend {rx: 45.1 kB, tx: 45.4 kB} frontend {rx: 45.4 kB, tx: 45.1 kB}
|
||||
[ 30.165] INFO zrok/controller/metrics.(*influxWriter).Handle: share: 0evcupz5k410, circuit: cNzzH4i0w backend {rx: 44.0 kB, tx: 44.3 kB} frontend {rx: 44.3 kB, tx: 44.0 kB}
|
||||
[ 30.167] INFO zrok/controller/metrics.(*influxWriter).Handle: share: dh3f3jj7zhig, circuit: fNXXHVSuw backend {rx: 65.1 kB, tx: 65.5 kB} frontend {rx: 65.5 kB, tx: 65.1 kB}
|
||||
[ 35.163] INFO zrok/controller/metrics.(*influxWriter).Handle: share: 0evcupz5k410, circuit: cNzzH4i0w backend {rx: 35.9 kB, tx: 36.1 kB} frontend {rx: 36.1 kB, tx: 35.9 kB}
|
||||
[ 35.165] INFO zrok/controller/metrics.(*influxWriter).Handle: share: 8k6dnu7x7ag0, circuit: SNXXjViuwU backend {rx: 36.4 kB, tx: 36.9 kB} frontend {rx: 36.9 kB, tx: 36.4 kB}
|
||||
[ 35.166] INFO zrok/controller/metrics.(*influxWriter).Handle: share: s0uzz1p7xjrr, circuit: RZzXHVSuw backend {rx: 28.9 kB, tx: 29.3 kB} frontend {rx: 29.3 kB, tx: 28.9 kB}
|
||||
[ 35.168] INFO zrok/controller/metrics.(*influxWriter).Handle: share: dh3f3jj7zhig, circuit: fNXXHVSuw backend {rx: 40.9 kB, tx: 41.2 kB} frontend {rx: 41.2 kB, tx: 40.9 kB}
|
||||
[ 40.163] INFO zrok/controller/metrics.(*influxWriter).Handle: share: tr7vpyrzvmh0, circuit: MZXXjVi0w backend {rx: 31.0 kB, tx: 31.3 kB} frontend {rx: 31.3 kB, tx: 31.0 kB}
|
||||
[ 40.165] INFO zrok/controller/metrics.(*influxWriter).Handle: share: 7u9szn30ikh0, circuit: iNzXj4S0r backend {rx: 28.5 kB, tx: 28.8 kB} frontend {rx: 28.8 kB, tx: 28.5 kB}
|
||||
[ 40.167] INFO zrok/controller/metrics.(*influxWriter).Handle: share: dh3f3jj7zhig, circuit: fNXXHVSuw backend {rx: 40.4 kB, tx: 40.8 kB} frontend {rx: 40.8 kB, tx: 40.4 kB}
|
||||
[ 45.164] INFO zrok/controller/metrics.(*influxWriter).Handle: share: 0evcupz5k410, circuit: cNzzH4i0w backend {rx: 60.1 kB, tx: 60.4 kB} frontend {rx: 60.4 kB, tx: 60.1 kB}
|
||||
[ 45.166] INFO zrok/controller/metrics.(*influxWriter).Handle: share: 8k6dnu7x7ag0, circuit: SNXXjViuwU backend {rx: 64.8 kB, tx: 65.2 kB} frontend {rx: 65.2 kB, tx: 64.8 kB}
|
||||
[ 45.168] INFO zrok/controller/metrics.(*influxWriter).Handle: share: 7u9szn30ikh0, circuit: iNzXj4S0r backend {rx: 39.2 kB, tx: 39.5 kB} frontend {rx: 39.5 kB, tx: 39.2 kB}
|
||||
[ 45.170] INFO zrok/controller/metrics.(*influxWriter).Handle: share: dh3f3jj7zhig, circuit: fNXXHVSuw backend {rx: 23.9 kB, tx: 24.1 kB} frontend {rx: 24.1 kB, tx: 23.9 kB}
|
||||
[ 50.164] INFO zrok/controller/metrics.(*influxWriter).Handle: share: 53z6mz4re7tu, circuit: ONzzjVS0w backend {rx: 23.0 kB, tx: 23.2 kB} frontend {rx: 23.2 kB, tx: 23.0 kB}
|
||||
[ 50.165] INFO zrok/controller/metrics.(*influxWriter).Handle: share: tr7vpyrzvmh0, circuit: MZXXjVi0w backend {rx: 41.4 kB, tx: 41.8 kB} frontend {rx: 41.8 kB, tx: 41.4 kB}
|
||||
[ 50.167] INFO zrok/controller/metrics.(*influxWriter).Handle: share: 8k6dnu7x7ag0, circuit: SNXXjViuwU backend {rx: 50.8 kB, tx: 51.2 kB} frontend {rx: 51.2 kB, tx: 50.8 kB}
|
||||
[ 55.163] INFO zrok/controller/metrics.(*influxWriter).Handle: share: 53z6mz4re7tu, circuit: ONzzjVS0w backend {rx: 29.2 kB, tx: 29.5 kB} frontend {rx: 29.5 kB, tx: 29.2 kB}
|
||||
[ 55.165] INFO zrok/controller/metrics.(*influxWriter).Handle: share: 0evcupz5k410, circuit: cNzzH4i0w backend {rx: 27.8 kB, tx: 28.0 kB} frontend {rx: 28.0 kB, tx: 27.8 kB}
|
||||
[ 55.167] INFO zrok/controller/metrics.(*influxWriter).Handle: share: dh3f3jj7zhig, circuit: fNXXHVSuw backend {rx: 21.7 kB, tx: 21.9 kB} frontend {rx: 21.9 kB, tx: 21.7 kB}
|
||||
[ 55.168] INFO zrok/controller/metrics.(*influxWriter).Handle: share: tr7vpyrzvmh0, circuit: MZXXjVi0w backend {rx: 30.0 kB, tx: 30.3 kB} frontend {rx: 30.3 kB, tx: 30.0 kB}
|
||||
[ 60.164] INFO zrok/controller/metrics.(*influxWriter).Handle: share: tr7vpyrzvmh0, circuit: MZXXjVi0w backend {rx: 43.4 kB, tx: 43.7 kB} frontend {rx: 43.7 kB, tx: 43.4 kB}
|
||||
[ 60.167] INFO zrok/controller/metrics.(*influxWriter).Handle: share: 8k6dnu7x7ag0, circuit: SNXXjViuwU backend {rx: 44.7 kB, tx: 44.9 kB} frontend {rx: 44.9 kB, tx: 44.7 kB}
|
||||
[ 60.168] INFO zrok/controller/metrics.(*influxWriter).Handle: share: dh3f3jj7zhig, circuit: fNXXHVSuw backend {rx: 30.6 kB, tx: 30.8 kB} frontend {rx: 30.8 kB, tx: 30.6 kB}
|
||||
[ 65.164] INFO zrok/controller/metrics.(*influxWriter).Handle: share: s0uzz1p7xjrr, circuit: RZzXHVSuw backend {rx: 41.9 kB, tx: 42.2 kB} frontend {rx: 42.2 kB, tx: 41.9 kB}
|
||||
[ 65.165] INFO zrok/controller/metrics.(*influxWriter).Handle: share: 53z6mz4re7tu, circuit: ONzzjVS0w backend {rx: 38.1 kB, tx: 38.4 kB} frontend {rx: 38.4 kB, tx: 38.1 kB}
|
||||
[ 65.167] INFO zrok/controller/metrics.(*influxWriter).Handle: share: 0evcupz5k410, circuit: cNzzH4i0w backend {rx: 42.8 kB, tx: 43.3 kB} frontend {rx: 43.3 kB, tx: 42.8 kB}
|
||||
[ 65.168] INFO zrok/controller/metrics.(*influxWriter).Handle: share: tr7vpyrzvmh0, circuit: MZXXjVi0w backend {rx: 28.9 kB, tx: 29.2 kB} frontend {rx: 29.2 kB, tx: 28.9 kB}
|
||||
[ 70.163] INFO zrok/controller/metrics.(*influxWriter).Handle: share: 0evcupz5k410, circuit: cNzzH4i0w backend {rx: 43.6 kB, tx: 43.9 kB} frontend {rx: 43.9 kB, tx: 43.6 kB}
|
||||
[ 70.166] INFO zrok/controller/metrics.(*influxWriter).Handle: share: tr7vpyrzvmh0, circuit: MZXXjVi0w backend {rx: 30.3 kB, tx: 30.7 kB} frontend {rx: 30.7 kB, tx: 30.3 kB}
|
||||
[ 70.168] INFO zrok/controller/metrics.(*influxWriter).Handle: share: 8k6dnu7x7ag0, circuit: SNXXjViuwU backend {rx: 58.9 kB, tx: 59.5 kB} frontend {rx: 59.5 kB, tx: 58.9 kB}
|
||||
[ 75.163] INFO zrok/controller/metrics.(*influxWriter).Handle: share: s0uzz1p7xjrr, circuit: RZzXHVSuw backend {rx: 32.5 kB, tx: 32.7 kB} frontend {rx: 32.7 kB, tx: 32.5 kB}
|
||||
[ 75.165] INFO zrok/controller/metrics.(*influxWriter).Handle: share: 8k6dnu7x7ag0, circuit: SNXXjViuwU backend {rx: 31.7 kB, tx: 32.2 kB} frontend {rx: 32.2 kB, tx: 31.7 kB}
|
||||
[ 75.167] INFO zrok/controller/metrics.(*influxWriter).Handle: share: tr7vpyrzvmh0, circuit: MZXXjVi0w backend {rx: 42.2 kB, tx: 42.6 kB} frontend {rx: 42.6 kB, tx: 42.2 kB}
|
||||
[ 75.168] INFO zrok/controller/metrics.(*influxWriter).Handle: share: 53z6mz4re7tu, circuit: ONzzjVS0w backend {rx: 61.7 kB, tx: 62.0 kB} frontend {rx: 62.0 kB, tx: 61.7 kB}
|
||||
[ 80.163] INFO zrok/controller/metrics.(*influxWriter).Handle: share: dh3f3jj7zhig, circuit: fNXXHVSuw backend {rx: 48.3 kB, tx: 48.7 kB} frontend {rx: 48.7 kB, tx: 48.3 kB}
|
||||
[ 80.165] INFO zrok/controller/metrics.(*influxWriter).Handle: share: s0uzz1p7xjrr, circuit: RZzXHVSuw backend {rx: 54.5 kB, tx: 55.2 kB} frontend {rx: 55.2 kB, tx: 54.5 kB}
|
||||
[ 80.167] INFO zrok/controller/metrics.(*influxWriter).Handle: share: tr7vpyrzvmh0, circuit: MZXXjVi0w backend {rx: 21.3 kB, tx: 21.5 kB} frontend {rx: 21.5 kB, tx: 21.3 kB}
|
||||
[ 85.163] INFO zrok/controller/metrics.(*influxWriter).Handle: share: 53z6mz4re7tu, circuit: ONzzjVS0w backend {rx: 47.7 kB, tx: 48.1 kB} frontend {rx: 48.1 kB, tx: 47.7 kB}
|
||||
[ 85.164] INFO zrok/controller/metrics.(*influxWriter).Handle: share: dh3f3jj7zhig, circuit: fNXXHVSuw backend {rx: 27.0 kB, tx: 27.4 kB} frontend {rx: 27.4 kB, tx: 27.0 kB}
|
||||
[ 85.167] INFO zrok/controller/metrics.(*influxWriter).Handle: share: s0uzz1p7xjrr, circuit: RZzXHVSuw backend {rx: 72.9 kB, tx: 73.4 kB} frontend {rx: 73.4 kB, tx: 72.9 kB}
|
||||
[ 85.168] INFO zrok/controller/metrics.(*influxWriter).Handle: share: tr7vpyrzvmh0, circuit: MZXXjVi0w backend {rx: 78.6 kB, tx: 79.1 kB} frontend {rx: 79.1 kB, tx: 78.6 kB}
|
||||
[ 90.163] INFO zrok/controller/metrics.(*influxWriter).Handle: share: dh3f3jj7zhig, circuit: fNXXHVSuw backend {rx: 58.7 kB, tx: 59.1 kB} frontend {rx: 59.1 kB, tx: 58.7 kB}
|
||||
[ 90.166] INFO zrok/controller/metrics.(*influxWriter).Handle: share: 8k6dnu7x7ag0, circuit: SNXXjViuwU backend {rx: 48.9 kB, tx: 49.3 kB} frontend {rx: 49.3 kB, tx: 48.9 kB}
|
||||
[ 90.167] INFO zrok/controller/metrics.(*influxWriter).Handle: share: tr7vpyrzvmh0, circuit: MZXXjVi0w backend {rx: 63.4 kB, tx: 63.7 kB} frontend {rx: 63.7 kB, tx: 63.4 kB}
|
||||
[ 95.164] INFO zrok/controller/metrics.(*influxWriter).Handle: share: 0evcupz5k410, circuit: cNzzH4i0w backend {rx: 59.0 kB, tx: 59.4 kB} frontend {rx: 59.4 kB, tx: 59.0 kB}
|
||||
[ 95.166] INFO zrok/controller/metrics.(*influxWriter).Handle: share: 53z6mz4re7tu, circuit: ONzzjVS0w backend {rx: 65.9 kB, tx: 66.2 kB} frontend {rx: 66.2 kB, tx: 65.9 kB}
|
||||
[ 95.168] INFO zrok/controller/metrics.(*influxWriter).Handle: share: s0uzz1p7xjrr, circuit: RZzXHVSuw backend {rx: 48.9 kB, tx: 49.3 kB} frontend {rx: 49.3 kB, tx: 48.9 kB}
|
||||
[ 95.169] INFO zrok/controller/metrics.(*influxWriter).Handle: share: 7u9szn30ikh0, circuit: iNzXj4S0r backend {rx: 27.5 kB, tx: 27.8 kB} frontend {rx: 27.8 kB, tx: 27.5 kB}
|
||||
[ 100.164] INFO zrok/controller/metrics.(*influxWriter).Handle: share: s0uzz1p7xjrr, circuit: RZzXHVSuw backend {rx: 56.3 kB, tx: 56.8 kB} frontend {rx: 56.8 kB, tx: 56.3 kB}
|
||||
[ 100.166] INFO zrok/controller/metrics.(*influxWriter).Handle: share: 53z6mz4re7tu, circuit: ONzzjVS0w backend {rx: 25.8 kB, tx: 26.2 kB} frontend {rx: 26.2 kB, tx: 25.8 kB}
|
||||
[ 100.168] INFO zrok/controller/metrics.(*influxWriter).Handle: share: 7u9szn30ikh0, circuit: iNzXj4S0r backend {rx: 33.9 kB, tx: 34.2 kB} frontend {rx: 34.2 kB, tx: 33.9 kB}
|
||||
[ 105.163] INFO zrok/controller/metrics.(*influxWriter).Handle: share: 53z6mz4re7tu, circuit: ONzzjVS0w backend {rx: 30.7 kB, tx: 31.0 kB} frontend {rx: 31.0 kB, tx: 30.7 kB}
|
||||
[ 105.165] INFO zrok/controller/metrics.(*influxWriter).Handle: share: tr7vpyrzvmh0, circuit: MZXXjVi0w backend {rx: 64.6 kB, tx: 64.9 kB} frontend {rx: 64.9 kB, tx: 64.6 kB}
|
||||
[ 105.167] INFO zrok/controller/metrics.(*influxWriter).Handle: share: 0evcupz5k410, circuit: cNzzH4i0w backend {rx: 49.0 kB, tx: 49.3 kB} frontend {rx: 49.3 kB, tx: 49.0 kB}
|
||||
[ 105.168] INFO zrok/controller/metrics.(*influxWriter).Handle: share: s0uzz1p7xjrr, circuit: RZzXHVSuw backend {rx: 36.2 kB, tx: 36.6 kB} frontend {rx: 36.6 kB, tx: 36.2 kB}
|
||||
```
|
||||
|
||||
Our first share receives a bandwidth warning, after transferring more than 1 megabyte:
|
||||
|
||||
```
|
||||
[ 105.189] INFO zrok/controller/limits.(*shareWarningAction).HandleShare: warning 'tr7vpyrzvmh0'
|
||||
[ 106.192] INFO zrok/controller/limits.sendLimitWarningEmail: limit warning email sent to 'michael@quigley.com'
|
||||
[ 110.162] INFO zrok/controller/metrics.(*influxWriter).Handle: share: 53z6mz4re7tu, circuit: ONzzjVS0w backend {rx: 30.6 kB, tx: 30.9 kB} frontend {rx: 30.9 kB, tx: 30.6 kB}
|
||||
[ 110.165] INFO zrok/controller/metrics.(*influxWriter).Handle: share: dh3f3jj7zhig, circuit: fNXXHVSuw backend {rx: 42.9 kB, tx: 43.3 kB} frontend {rx: 43.3 kB, tx: 42.9 kB}
|
||||
[ 110.170] INFO zrok/controller/metrics.(*influxWriter).Handle: share: 0evcupz5k410, circuit: cNzzH4i0w backend {rx: 31.5 kB, tx: 31.7 kB} frontend {rx: 31.7 kB, tx: 31.5 kB}
|
||||
[ 115.163] INFO zrok/controller/metrics.(*influxWriter).Handle: share: 7u9szn30ikh0, circuit: iNzXj4S0r backend {rx: 45.3 kB, tx: 45.7 kB} frontend {rx: 45.7 kB, tx: 45.3 kB}
|
||||
[ 115.164] INFO zrok/controller/metrics.(*influxWriter).Handle: share: tr7vpyrzvmh0, circuit: MZXXjVi0w backend {rx: 36.4 kB, tx: 36.8 kB} frontend {rx: 36.8 kB, tx: 36.4 kB}
|
||||
[ 115.166] INFO zrok/controller/metrics.(*influxWriter).Handle: share: s0uzz1p7xjrr, circuit: RZzXHVSuw backend {rx: 46.5 kB, tx: 46.9 kB} frontend {rx: 46.9 kB, tx: 46.5 kB}
|
||||
[ 115.170] INFO zrok/controller/metrics.(*influxWriter).Handle: share: dh3f3jj7zhig, circuit: fNXXHVSuw backend {rx: 63.9 kB, tx: 64.4 kB} frontend {rx: 64.4 kB, tx: 63.9 kB}
|
||||
```
|
||||
|
||||
More shares start receiving bandwidth warnings:
|
||||
|
||||
```
|
||||
[ 115.230] INFO zrok/controller/limits.(*shareWarningAction).HandleShare: warning 'dh3f3jj7zhig'
|
||||
[ 116.575] INFO zrok/controller/limits.sendLimitWarningEmail: limit warning email sent to 'michael@quigley.com'
|
||||
[ 120.164] INFO zrok/controller/metrics.(*influxWriter).Handle: share: 53z6mz4re7tu, circuit: ONzzjVS0w backend {rx: 58.4 kB, tx: 58.8 kB} frontend {rx: 58.8 kB, tx: 58.4 kB}
|
||||
[ 120.166] INFO zrok/controller/metrics.(*influxWriter).Handle: share: s0uzz1p7xjrr, circuit: RZzXHVSuw backend {rx: 30.5 kB, tx: 30.8 kB} frontend {rx: 30.8 kB, tx: 30.5 kB}
|
||||
[ 120.168] INFO zrok/controller/metrics.(*influxWriter).Handle: share: 8k6dnu7x7ag0, circuit: SNXXjViuwU backend {rx: 29.9 kB, tx: 30.2 kB} frontend {rx: 30.2 kB, tx: 29.9 kB}
|
||||
[ 120.180] INFO zrok/controller/limits.(*shareWarningAction).HandleShare: warning '53z6mz4re7tu'
|
||||
[ 122.733] INFO zrok/controller/limits.sendLimitWarningEmail: limit warning email sent to 'michael@quigley.com'
|
||||
[ 125.163] INFO zrok/controller/metrics.(*influxWriter).Handle: share: tr7vpyrzvmh0, circuit: MZXXjVi0w backend {rx: 47.0 kB, tx: 47.3 kB} frontend {rx: 47.3 kB, tx: 47.0 kB}
|
||||
[ 125.166] INFO zrok/controller/metrics.(*influxWriter).Handle: share: 0evcupz5k410, circuit: cNzzH4i0w backend {rx: 55.5 kB, tx: 56.0 kB} frontend {rx: 56.0 kB, tx: 55.5 kB}
|
||||
[ 125.167] INFO zrok/controller/metrics.(*influxWriter).Handle: share: s0uzz1p7xjrr, circuit: RZzXHVSuw backend {rx: 49.6 kB, tx: 49.9 kB} frontend {rx: 49.9 kB, tx: 49.6 kB}
|
||||
[ 125.170] INFO zrok/controller/metrics.(*influxWriter).Handle: share: dh3f3jj7zhig, circuit: fNXXHVSuw backend {rx: 17.6 kB, tx: 17.8 kB} frontend {rx: 17.8 kB, tx: 17.6 kB}
|
||||
[ 125.211] INFO zrok/controller/limits.(*shareWarningAction).HandleShare: warning 's0uzz1p7xjrr'
|
||||
[ 126.117] INFO zrok/controller/limits.sendLimitWarningEmail: limit warning email sent to 'michael@quigley.com'
|
||||
[ 130.164] INFO zrok/controller/metrics.(*influxWriter).Handle: share: 0evcupz5k410, circuit: cNzzH4i0w backend {rx: 38.8 kB, tx: 39.0 kB} frontend {rx: 39.0 kB, tx: 38.8 kB}
|
||||
[ 130.166] INFO zrok/controller/metrics.(*influxWriter).Handle: share: 8k6dnu7x7ag0, circuit: SNXXjViuwU backend {rx: 76.6 kB, tx: 76.9 kB} frontend {rx: 76.9 kB, tx: 76.6 kB}
|
||||
[ 130.168] INFO zrok/controller/metrics.(*influxWriter).Handle: share: s0uzz1p7xjrr, circuit: RZzXHVSuw backend {rx: 50.1 kB, tx: 50.5 kB} frontend {rx: 50.5 kB, tx: 50.1 kB}
|
||||
[ 130.178] INFO zrok/controller/limits.(*shareWarningAction).HandleShare: warning '0evcupz5k410'
|
||||
[ 130.921] INFO zrok/controller/limits.sendLimitWarningEmail: limit warning email sent to 'michael@quigley.com'
|
||||
[ 135.164] INFO zrok/controller/metrics.(*influxWriter).Handle: share: 53z6mz4re7tu, circuit: ONzzjVS0w backend {rx: 32.8 kB, tx: 33.2 kB} frontend {rx: 33.2 kB, tx: 32.8 kB}
|
||||
[ 135.167] INFO zrok/controller/metrics.(*influxWriter).Handle: share: 7u9szn30ikh0, circuit: iNzXj4S0r backend {rx: 34.7 kB, tx: 35.0 kB} frontend {rx: 35.0 kB, tx: 34.7 kB}
|
||||
[ 135.169] INFO zrok/controller/metrics.(*influxWriter).Handle: share: tr7vpyrzvmh0, circuit: MZXXjVi0w backend {rx: 38.9 kB, tx: 39.2 kB} frontend {rx: 39.2 kB, tx: 38.9 kB}
|
||||
[ 135.170] INFO zrok/controller/metrics.(*influxWriter).Handle: share: 0evcupz5k410, circuit: cNzzH4i0w backend {rx: 51.4 kB, tx: 51.8 kB} frontend {rx: 51.8 kB, tx: 51.4 kB}
|
||||
[ 140.165] INFO zrok/controller/metrics.(*influxWriter).Handle: share: 7u9szn30ikh0, circuit: iNzXj4S0r backend {rx: 52.8 kB, tx: 53.2 kB} frontend {rx: 53.2 kB, tx: 52.8 kB}
|
||||
[ 140.168] INFO zrok/controller/metrics.(*influxWriter).Handle: share: tr7vpyrzvmh0, circuit: MZXXjVi0w backend {rx: 33.1 kB, tx: 33.4 kB} frontend {rx: 33.4 kB, tx: 33.1 kB}
|
||||
[ 140.169] INFO zrok/controller/metrics.(*influxWriter).Handle: share: dh3f3jj7zhig, circuit: fNXXHVSuw backend {rx: 35.6 kB, tx: 36.0 kB} frontend {rx: 36.0 kB, tx: 35.6 kB}
|
||||
[ 145.163] INFO zrok/controller/metrics.(*influxWriter).Handle: share: 7u9szn30ikh0, circuit: iNzXj4S0r backend {rx: 27.5 kB, tx: 27.8 kB} frontend {rx: 27.8 kB, tx: 27.5 kB}
|
||||
[ 145.165] INFO zrok/controller/metrics.(*influxWriter).Handle: share: 53z6mz4re7tu, circuit: ONzzjVS0w backend {rx: 24.6 kB, tx: 25.1 kB} frontend {rx: 25.1 kB, tx: 24.6 kB}
|
||||
[ 145.167] INFO zrok/controller/metrics.(*influxWriter).Handle: share: 0evcupz5k410, circuit: cNzzH4i0w backend {rx: 30.1 kB, tx: 30.5 kB} frontend {rx: 30.5 kB, tx: 30.1 kB}
|
||||
[ 145.169] INFO zrok/controller/metrics.(*influxWriter).Handle: share: s0uzz1p7xjrr, circuit: RZzXHVSuw backend {rx: 24.7 kB, tx: 25.1 kB} frontend {rx: 25.1 kB, tx: 24.7 kB}
|
||||
[ 150.164] INFO zrok/controller/metrics.(*influxWriter).Handle: share: tr7vpyrzvmh0, circuit: MZXXjVi0w backend {rx: 72.0 kB, tx: 72.4 kB} frontend {rx: 72.4 kB, tx: 72.0 kB}
|
||||
[ 150.166] INFO zrok/controller/metrics.(*influxWriter).Handle: share: s0uzz1p7xjrr, circuit: RZzXHVSuw backend {rx: 31.8 kB, tx: 32.1 kB} frontend {rx: 32.1 kB, tx: 31.8 kB}
|
||||
[ 150.167] INFO zrok/controller/metrics.(*influxWriter).Handle: share: 7u9szn30ikh0, circuit: iNzXj4S0r backend {rx: 43.7 kB, tx: 43.9 kB} frontend {rx: 43.9 kB, tx: 43.7 kB}
|
||||
[ 155.164] INFO zrok/controller/metrics.(*influxWriter).Handle: share: dh3f3jj7zhig, circuit: fNXXHVSuw backend {rx: 49.4 kB, tx: 49.8 kB} frontend {rx: 49.8 kB, tx: 49.4 kB}
|
||||
[ 155.166] INFO zrok/controller/metrics.(*influxWriter).Handle: share: 53z6mz4re7tu, circuit: ONzzjVS0w backend {rx: 46.4 kB, tx: 46.6 kB} frontend {rx: 46.6 kB, tx: 46.4 kB}
|
||||
[ 155.168] INFO zrok/controller/metrics.(*influxWriter).Handle: share: 7u9szn30ikh0, circuit: iNzXj4S0r backend {rx: 50.7 kB, tx: 51.0 kB} frontend {rx: 51.0 kB, tx: 50.7 kB}
|
||||
[ 155.169] INFO zrok/controller/metrics.(*influxWriter).Handle: share: s0uzz1p7xjrr, circuit: RZzXHVSuw backend {rx: 58.5 kB, tx: 58.9 kB} frontend {rx: 58.9 kB, tx: 58.5 kB}
|
||||
[ 160.164] INFO zrok/controller/metrics.(*influxWriter).Handle: share: 0evcupz5k410, circuit: cNzzH4i0w backend {rx: 43.0 kB, tx: 43.3 kB} frontend {rx: 43.3 kB, tx: 43.0 kB}
|
||||
[ 160.166] INFO zrok/controller/metrics.(*influxWriter).Handle: share: 53z6mz4re7tu, circuit: ONzzjVS0w backend {rx: 66.0 kB, tx: 66.4 kB} frontend {rx: 66.4 kB, tx: 66.0 kB}
|
||||
[ 160.168] INFO zrok/controller/metrics.(*influxWriter).Handle: share: 8k6dnu7x7ag0, circuit: SNXXjViuwU backend {rx: 31.5 kB, tx: 31.9 kB} frontend {rx: 31.9 kB, tx: 31.5 kB}
|
||||
[ 165.164] INFO zrok/controller/metrics.(*influxWriter).Handle: share: s0uzz1p7xjrr, circuit: RZzXHVSuw backend {rx: 60.0 kB, tx: 60.3 kB} frontend {rx: 60.3 kB, tx: 60.0 kB}
|
||||
[ 165.167] INFO zrok/controller/metrics.(*influxWriter).Handle: share: dh3f3jj7zhig, circuit: fNXXHVSuw backend {rx: 47.3 kB, tx: 47.6 kB} frontend {rx: 47.6 kB, tx: 47.3 kB}
|
||||
[ 165.168] INFO zrok/controller/metrics.(*influxWriter).Handle: share: 8k6dnu7x7ag0, circuit: SNXXjViuwU backend {rx: 41.1 kB, tx: 41.3 kB} frontend {rx: 41.3 kB, tx: 41.1 kB}
|
||||
[ 165.170] INFO zrok/controller/metrics.(*influxWriter).Handle: share: 7u9szn30ikh0, circuit: iNzXj4S0r backend {rx: 37.2 kB, tx: 37.5 kB} frontend {rx: 37.5 kB, tx: 37.2 kB}
|
||||
[ 165.216] INFO zrok/controller/limits.(*shareWarningAction).HandleShare: warning '8k6dnu7x7ag0'
|
||||
[ 165.930] INFO zrok/controller/limits.sendLimitWarningEmail: limit warning email sent to 'michael@quigley.com'
|
||||
[ 170.163] INFO zrok/controller/metrics.(*influxWriter).Handle: share: 0evcupz5k410, circuit: cNzzH4i0w backend {rx: 43.1 kB, tx: 43.5 kB} frontend {rx: 43.5 kB, tx: 43.1 kB}
|
||||
[ 170.165] INFO zrok/controller/metrics.(*influxWriter).Handle: share: 53z6mz4re7tu, circuit: ONzzjVS0w backend {rx: 45.4 kB, tx: 45.8 kB} frontend {rx: 45.8 kB, tx: 45.4 kB}
|
||||
[ 170.167] INFO zrok/controller/metrics.(*influxWriter).Handle: share: s0uzz1p7xjrr, circuit: RZzXHVSuw backend {rx: 58.0 kB, tx: 58.3 kB} frontend {rx: 58.3 kB, tx: 58.0 kB}
|
||||
[ 175.164] INFO zrok/controller/metrics.(*influxWriter).Handle: share: tr7vpyrzvmh0, circuit: MZXXjVi0w backend {rx: 63.5 kB, tx: 63.9 kB} frontend {rx: 63.9 kB, tx: 63.5 kB}
|
||||
[ 175.167] INFO zrok/controller/metrics.(*influxWriter).Handle: share: s0uzz1p7xjrr, circuit: RZzXHVSuw backend {rx: 45.0 kB, tx: 45.3 kB} frontend {rx: 45.3 kB, tx: 45.0 kB}
|
||||
[ 175.169] INFO zrok/controller/metrics.(*influxWriter).Handle: share: 0evcupz5k410, circuit: cNzzH4i0w backend {rx: 35.0 kB, tx: 35.2 kB} frontend {rx: 35.2 kB, tx: 35.0 kB}
|
||||
[ 175.171] INFO zrok/controller/metrics.(*influxWriter).Handle: share: dh3f3jj7zhig, circuit: fNXXHVSuw backend {rx: 22.8 kB, tx: 23.2 kB} frontend {rx: 23.2 kB, tx: 22.8 kB}
|
||||
[ 180.164] INFO zrok/controller/metrics.(*influxWriter).Handle: share: 8k6dnu7x7ag0, circuit: SNXXjViuwU backend {rx: 42.3 kB, tx: 42.6 kB} frontend {rx: 42.6 kB, tx: 42.3 kB}
|
||||
[ 180.167] INFO zrok/controller/metrics.(*influxWriter).Handle: share: 7u9szn30ikh0, circuit: iNzXj4S0r backend {rx: 39.6 kB, tx: 40.1 kB} frontend {rx: 40.1 kB, tx: 39.6 kB}
|
||||
[ 180.168] INFO zrok/controller/metrics.(*influxWriter).Handle: share: 0evcupz5k410, circuit: cNzzH4i0w backend {rx: 53.1 kB, tx: 53.4 kB} frontend {rx: 53.4 kB, tx: 53.1 kB}
|
||||
[ 185.163] INFO zrok/controller/metrics.(*influxWriter).Handle: share: tr7vpyrzvmh0, circuit: MZXXjVi0w backend {rx: 74.1 kB, tx: 74.6 kB} frontend {rx: 74.6 kB, tx: 74.1 kB}
|
||||
[ 185.165] INFO zrok/controller/metrics.(*influxWriter).Handle: share: dh3f3jj7zhig, circuit: fNXXHVSuw backend {rx: 71.0 kB, tx: 71.4 kB} frontend {rx: 71.4 kB, tx: 71.0 kB}
|
||||
[ 185.166] INFO zrok/controller/metrics.(*influxWriter).Handle: share: 7u9szn30ikh0, circuit: iNzXj4S0r backend {rx: 78.8 kB, tx: 79.2 kB} frontend {rx: 79.2 kB, tx: 78.8 kB}
|
||||
[ 185.168] INFO zrok/controller/metrics.(*influxWriter).Handle: share: s0uzz1p7xjrr, circuit: RZzXHVSuw backend {rx: 37.8 kB, tx: 38.2 kB} frontend {rx: 38.2 kB, tx: 37.8 kB}
|
||||
[ 185.213] INFO zrok/controller/limits.(*shareWarningAction).HandleShare: warning '7u9szn30ikh0'
|
||||
[ 186.862] INFO zrok/controller/limits.sendLimitWarningEmail: limit warning email sent to 'michael@quigley.com'
|
||||
[ 190.164] INFO zrok/controller/metrics.(*influxWriter).Handle: share: 53z6mz4re7tu, circuit: ONzzjVS0w backend {rx: 43.3 kB, tx: 43.8 kB} frontend {rx: 43.8 kB, tx: 43.3 kB}
|
||||
[ 190.166] INFO zrok/controller/metrics.(*influxWriter).Handle: share: 0evcupz5k410, circuit: cNzzH4i0w backend {rx: 39.6 kB, tx: 39.9 kB} frontend {rx: 39.9 kB, tx: 39.6 kB}
|
||||
[ 190.168] INFO zrok/controller/metrics.(*influxWriter).Handle: share: tr7vpyrzvmh0, circuit: MZXXjVi0w backend {rx: 38.6 kB, tx: 38.9 kB} frontend {rx: 38.9 kB, tx: 38.6 kB}
|
||||
[ 195.163] INFO zrok/controller/metrics.(*influxWriter).Handle: share: 53z6mz4re7tu, circuit: ONzzjVS0w backend {rx: 44.0 kB, tx: 44.4 kB} frontend {rx: 44.4 kB, tx: 44.0 kB}
|
||||
[ 195.165] INFO zrok/controller/metrics.(*influxWriter).Handle: share: 7u9szn30ikh0, circuit: iNzXj4S0r backend {rx: 45.2 kB, tx: 45.5 kB} frontend {rx: 45.5 kB, tx: 45.2 kB}
|
||||
[ 195.170] INFO zrok/controller/metrics.(*influxWriter).Handle: share: 8k6dnu7x7ag0, circuit: SNXXjViuwU backend {rx: 67.1 kB, tx: 67.5 kB} frontend {rx: 67.5 kB, tx: 67.1 kB}
|
||||
[ 195.172] INFO zrok/controller/metrics.(*influxWriter).Handle: share: dh3f3jj7zhig, circuit: fNXXHVSuw backend {rx: 44.4 kB, tx: 44.8 kB} frontend {rx: 44.8 kB, tx: 44.4 kB}
|
||||
[ 200.164] INFO zrok/controller/metrics.(*influxWriter).Handle: share: 8k6dnu7x7ag0, circuit: SNXXjViuwU backend {rx: 23.7 kB, tx: 23.9 kB} frontend {rx: 23.9 kB, tx: 23.7 kB}
|
||||
[ 200.166] INFO zrok/controller/metrics.(*influxWriter).Handle: share: dh3f3jj7zhig, circuit: fNXXHVSuw backend {rx: 46.7 kB, tx: 47.1 kB} frontend {rx: 47.1 kB, tx: 46.7 kB}
|
||||
[ 200.168] INFO zrok/controller/metrics.(*influxWriter).Handle: share: 7u9szn30ikh0, circuit: iNzXj4S0r backend {rx: 48.7 kB, tx: 49.1 kB} frontend {rx: 49.1 kB, tx: 48.7 kB}
|
||||
[ 205.163] INFO zrok/controller/metrics.(*influxWriter).Handle: share: tr7vpyrzvmh0, circuit: MZXXjVi0w backend {rx: 47.8 kB, tx: 48.1 kB} frontend {rx: 48.1 kB, tx: 47.8 kB}
|
||||
[ 205.165] INFO zrok/controller/metrics.(*influxWriter).Handle: share: 0evcupz5k410, circuit: cNzzH4i0w backend {rx: 57.2 kB, tx: 57.6 kB} frontend {rx: 57.6 kB, tx: 57.2 kB}
|
||||
[ 205.167] INFO zrok/controller/metrics.(*influxWriter).Handle: share: dh3f3jj7zhig, circuit: fNXXHVSuw backend {rx: 47.7 kB, tx: 47.9 kB} frontend {rx: 47.9 kB, tx: 47.7 kB}
|
||||
[ 205.168] INFO zrok/controller/metrics.(*influxWriter).Handle: share: 8k6dnu7x7ag0, circuit: SNXXjViuwU backend {rx: 38.9 kB, tx: 39.3 kB} frontend {rx: 39.3 kB, tx: 38.9 kB}
|
||||
[ 210.163] INFO zrok/controller/metrics.(*influxWriter).Handle: share: dh3f3jj7zhig, circuit: fNXXHVSuw backend {rx: 37.5 kB, tx: 37.8 kB} frontend {rx: 37.8 kB, tx: 37.5 kB}
|
||||
[ 210.165] INFO zrok/controller/metrics.(*influxWriter).Handle: share: 8k6dnu7x7ag0, circuit: SNXXjViuwU backend {rx: 15.3 kB, tx: 15.5 kB} frontend {rx: 15.5 kB, tx: 15.3 kB}
|
||||
[ 210.166] INFO zrok/controller/metrics.(*influxWriter).Handle: share: s0uzz1p7xjrr, circuit: RZzXHVSuw backend {rx: 41.3 kB, tx: 41.5 kB} frontend {rx: 41.5 kB, tx: 41.3 kB}
|
||||
[ 215.163] INFO zrok/controller/metrics.(*influxWriter).Handle: share: dh3f3jj7zhig, circuit: fNXXHVSuw backend {rx: 28.0 kB, tx: 28.4 kB} frontend {rx: 28.4 kB, tx: 28.0 kB}
|
||||
[ 215.165] INFO zrok/controller/metrics.(*influxWriter).Handle: share: 7u9szn30ikh0, circuit: iNzXj4S0r backend {rx: 42.5 kB, tx: 42.8 kB} frontend {rx: 42.8 kB, tx: 42.5 kB}
|
||||
[ 215.167] INFO zrok/controller/metrics.(*influxWriter).Handle: share: 8k6dnu7x7ag0, circuit: SNXXjViuwU backend {rx: 47.6 kB, tx: 48.0 kB} frontend {rx: 48.0 kB, tx: 47.6 kB}
|
||||
[ 215.168] INFO zrok/controller/metrics.(*influxWriter).Handle: share: 53z6mz4re7tu, circuit: ONzzjVS0w backend {rx: 43.4 kB, tx: 43.8 kB} frontend {rx: 43.8 kB, tx: 43.4 kB}
|
||||
[ 220.164] INFO zrok/controller/metrics.(*influxWriter).Handle: share: tr7vpyrzvmh0, circuit: MZXXjVi0w backend {rx: 19.4 kB, tx: 19.7 kB} frontend {rx: 19.7 kB, tx: 19.4 kB}
|
||||
[ 220.166] INFO zrok/controller/metrics.(*influxWriter).Handle: share: dh3f3jj7zhig, circuit: fNXXHVSuw backend {rx: 37.1 kB, tx: 37.4 kB} frontend {rx: 37.4 kB, tx: 37.1 kB}
|
||||
[ 220.168] INFO zrok/controller/metrics.(*influxWriter).Handle: share: s0uzz1p7xjrr, circuit: RZzXHVSuw backend {rx: 69.1 kB, tx: 69.5 kB} frontend {rx: 69.5 kB, tx: 69.1 kB}
|
||||
```
|
||||
|
||||
Our first share crosses the 2 megabyte boundary and the system limits its ability to transfer additional data by removing its dial service policy:
|
||||
|
||||
```
|
||||
[ 220.195] INFO zrok/controller/limits.(*shareLimitAction).HandleShare: limiting 'dh3f3jj7zhig'
|
||||
[ 220.211] INFO zrok/controller/zrokEdgeSdk.DeleteServicePolicy: deleted service policy '7GerqbN5lVfcOng91J2J6I' for environment 'd.wJYlpt9'
|
||||
[ 220.211] INFO zrok/controller/limits.(*shareLimitAction).HandleShare: removed dial service policy for 'dh3f3jj7zhig'
|
||||
[ 225.163] INFO zrok/controller/metrics.(*influxWriter).Handle: share: 0evcupz5k410, circuit: cNzzH4i0w backend {rx: 45.1 kB, tx: 45.5 kB} frontend {rx: 45.5 kB, tx: 45.1 kB}
|
||||
[ 225.165] INFO zrok/controller/metrics.(*influxWriter).Handle: share: 7u9szn30ikh0, circuit: iNzXj4S0r backend {rx: 26.9 kB, tx: 27.3 kB} frontend {rx: 27.3 kB, tx: 26.9 kB}
|
||||
[ 225.166] INFO zrok/controller/metrics.(*influxWriter).Handle: share: dh3f3jj7zhig, circuit: fNXXHVSuw backend {rx: 40.9 kB, tx: 41.0 kB} frontend {rx: 41.0 kB, tx: 40.9 kB}
|
||||
[ 225.168] INFO zrok/controller/metrics.(*influxWriter).Handle: share: 8k6dnu7x7ag0, circuit: SNXXjViuwU backend {rx: 28.8 kB, tx: 29.1 kB} frontend {rx: 29.1 kB, tx: 28.8 kB}
|
||||
[ 230.163] INFO zrok/controller/metrics.(*influxWriter).Handle: share: 0evcupz5k410, circuit: cNzzH4i0w backend {rx: 55.4 kB, tx: 55.8 kB} frontend {rx: 55.8 kB, tx: 55.4 kB}
|
||||
[ 230.165] INFO zrok/controller/metrics.(*influxWriter).Handle: share: 7u9szn30ikh0, circuit: iNzXj4S0r backend {rx: 44.8 kB, tx: 45.2 kB} frontend {rx: 45.2 kB, tx: 44.8 kB}
|
||||
[ 230.167] INFO zrok/controller/metrics.(*influxWriter).Handle: share: 53z6mz4re7tu, circuit: ONzzjVS0w backend {rx: 52.2 kB, tx: 52.5 kB} frontend {rx: 52.5 kB, tx: 52.2 kB}
|
||||
[ 235.163] INFO zrok/controller/metrics.(*influxWriter).Handle: share: 8k6dnu7x7ag0, circuit: SNXXjViuwU backend {rx: 53.2 kB, tx: 53.6 kB} frontend {rx: 53.6 kB, tx: 53.2 kB}
|
||||
[ 235.165] INFO zrok/controller/metrics.(*influxWriter).Handle: share: 0evcupz5k410, circuit: cNzzH4i0w backend {rx: 43.1 kB, tx: 43.4 kB} frontend {rx: 43.4 kB, tx: 43.1 kB}
|
||||
[ 235.166] INFO zrok/controller/metrics.(*influxWriter).Handle: share: 53z6mz4re7tu, circuit: ONzzjVS0w backend {rx: 45.9 kB, tx: 46.2 kB} frontend {rx: 46.2 kB, tx: 45.9 kB}
|
||||
[ 240.163] INFO zrok/controller/metrics.(*influxWriter).Handle: share: 8k6dnu7x7ag0, circuit: SNXXjViuwU backend {rx: 50.6 kB, tx: 51.0 kB} frontend {rx: 51.0 kB, tx: 50.6 kB}
|
||||
[ 240.165] INFO zrok/controller/metrics.(*influxWriter).Handle: share: 0evcupz5k410, circuit: cNzzH4i0w backend {rx: 49.7 kB, tx: 50.0 kB} frontend {rx: 50.0 kB, tx: 49.7 kB}
|
||||
[ 240.167] INFO zrok/controller/metrics.(*influxWriter).Handle: share: 7u9szn30ikh0, circuit: iNzXj4S0r backend {rx: 37.3 kB, tx: 37.6 kB} frontend {rx: 37.6 kB, tx: 37.3 kB}
|
||||
```
|
||||
|
||||
More shares become limited and are prevented from transferring data. Notice the metrics output reducing in the logs. As more shares become limited, we're naturally seeing less data transfer occurring on the OpenZiti network:
|
||||
|
||||
```
|
||||
[ 240.188] INFO zrok/controller/limits.(*shareLimitAction).HandleShare: limiting '0evcupz5k410'
|
||||
[ 240.203] INFO zrok/controller/zrokEdgeSdk.DeleteServicePolicy: deleted service policy '74f2gUotsC7DteqpsWrxp0' for environment 'd.wJYlpt9'
|
||||
[ 240.203] INFO zrok/controller/limits.(*shareLimitAction).HandleShare: removed dial service policy for '0evcupz5k410'
|
||||
[ 245.163] INFO zrok/controller/metrics.(*influxWriter).Handle: share: 8k6dnu7x7ag0, circuit: SNXXjViuwU backend {rx: 28.2 kB, tx: 28.5 kB} frontend {rx: 28.5 kB, tx: 28.2 kB}
|
||||
[ 245.166] INFO zrok/controller/metrics.(*influxWriter).Handle: share: 53z6mz4re7tu, circuit: dev-h4iuwD backend {rx: 47.3 kB, tx: 47.9 kB} frontend {rx: 47.9 kB, tx: 47.3 kB}
|
||||
[ 245.167] INFO zrok/controller/metrics.(*influxWriter).Handle: share: 7u9szn30ikh0, circuit: iNzXj4S0r backend {rx: 48.2 kB, tx: 48.5 kB} frontend {rx: 48.5 kB, tx: 48.2 kB}
|
||||
[ 245.194] INFO zrok/controller/limits.(*shareLimitAction).HandleShare: limiting '53z6mz4re7tu'
|
||||
[ 245.196] INFO zrok/controller/zrokEdgeSdk.DeleteServicePolicy: deleted service policy '1oo3LuBKxduKAs1wsKndtW' for environment 'd.wJYlpt9'
|
||||
[ 245.197] INFO zrok/controller/limits.(*shareLimitAction).HandleShare: removed dial service policy for '53z6mz4re7tu'
|
||||
[ 250.164] INFO zrok/controller/metrics.(*influxWriter).Handle: share: 53z6mz4re7tu, circuit: dev-h4iuwD backend {rx: 33.2 kB, tx: 33.5 kB} frontend {rx: 33.5 kB, tx: 33.2 kB}
|
||||
[ 250.167] INFO zrok/controller/metrics.(*influxWriter).Handle: share: s0uzz1p7xjrr, circuit: RZzXHVSuw backend {rx: 48.2 kB, tx: 48.4 kB} frontend {rx: 48.4 kB, tx: 48.2 kB}
|
||||
[ 250.191] INFO zrok/controller/limits.(*shareLimitAction).HandleShare: limiting 's0uzz1p7xjrr'
|
||||
[ 250.194] INFO zrok/controller/zrokEdgeSdk.DeleteServicePolicy: deleted service policy '7eB3ubrntSHxkeHBCGJcOY' for environment 'd.wJYlpt9'
|
||||
[ 250.194] INFO zrok/controller/limits.(*shareLimitAction).HandleShare: removed dial service policy for 's0uzz1p7xjrr'
|
||||
[ 255.163] INFO zrok/controller/metrics.(*influxWriter).Handle: share: tr7vpyrzvmh0, circuit: MZXXjVi0w backend {rx: 35.8 kB, tx: 36.0 kB} frontend {rx: 36.0 kB, tx: 35.8 kB}
|
||||
[ 255.165] INFO zrok/controller/metrics.(*influxWriter).Handle: share: s0uzz1p7xjrr, circuit: RZzXHVSuw backend {rx: 50.4 kB, tx: 50.6 kB} frontend {rx: 50.6 kB, tx: 50.4 kB}
|
||||
[ 255.179] INFO zrok/controller/limits.(*shareLimitAction).HandleShare: limiting 'tr7vpyrzvmh0'
|
||||
[ 255.182] INFO zrok/controller/zrokEdgeSdk.DeleteServicePolicy: deleted service policy '1cURGP202D8n6fzpzWhcgK' for environment 'd.wJYlpt9'
|
||||
[ 255.182] INFO zrok/controller/limits.(*shareLimitAction).HandleShare: removed dial service policy for 'tr7vpyrzvmh0'
|
||||
[ 260.163] INFO zrok/controller/metrics.(*influxWriter).Handle: share: 8k6dnu7x7ag0, circuit: SNXXjViuwU backend {rx: 37.3 kB, tx: 47.7 kB} frontend {rx: 47.7 kB, tx: 37.3 kB}
|
||||
[ 260.165] INFO zrok/controller/metrics.(*influxWriter).Handle: share: tr7vpyrzvmh0, circuit: MZXXjVi0w backend {rx: 69.2 kB, tx: 69.7 kB} frontend {rx: 69.7 kB, tx: 69.2 kB}
|
||||
[ 265.163] INFO zrok/controller/metrics.(*influxWriter).Handle: share: 7u9szn30ikh0, circuit: iNzXj4S0r backend {rx: 59.7 kB, tx: 60.1 kB} frontend {rx: 60.1 kB, tx: 59.7 kB}
|
||||
[ 270.163] INFO zrok/controller/metrics.(*influxWriter).Handle: share: 7u9szn30ikh0, circuit: iNzXj4S0r backend {rx: 46.9 kB, tx: 47.2 kB} frontend {rx: 47.2 kB, tx: 46.9 kB}
|
||||
[ 275.163] INFO zrok/controller/metrics.(*influxWriter).Handle: share: 7u9szn30ikh0, circuit: iNzXj4S0r backend {rx: 32.9 kB, tx: 33.2 kB} frontend {rx: 33.2 kB, tx: 32.9 kB}
|
||||
[ 280.163] INFO zrok/controller/metrics.(*influxWriter).Handle: share: 7u9szn30ikh0, circuit: iNzXj4S0r backend {rx: 43.2 kB, tx: 43.7 kB} frontend {rx: 43.7 kB, tx: 43.2 kB}
|
||||
[ 285.163] INFO zrok/controller/metrics.(*influxWriter).Handle: share: 8k6dnu7x7ag0, circuit: SNXXjViuwU backend {rx: 50.0 kB, tx: 50.4 kB} frontend {rx: 50.4 kB, tx: 50.0 kB}
|
||||
[ 290.162] INFO zrok/controller/metrics.(*influxWriter).Handle: share: 8k6dnu7x7ag0, circuit: SNXXjViuwU backend {rx: 51.8 kB, tx: 52.3 kB} frontend {rx: 52.3 kB, tx: 51.8 kB}
|
||||
```
|
||||
|
||||
By this point, we're seeing very little traffic on the OpenZiti network:
|
||||
|
||||
```
|
||||
[ 290.176] INFO zrok/controller/limits.(*shareLimitAction).HandleShare: limiting '8k6dnu7x7ag0'
|
||||
[ 290.190] INFO zrok/controller/zrokEdgeSdk.DeleteServicePolicy: deleted service policy '5UHCkXZabFHeWYHmF01Zoc' for environment 'd.wJYlpt9'
|
||||
[ 290.191] INFO zrok/controller/limits.(*shareLimitAction).HandleShare: removed dial service policy for '8k6dnu7x7ag0'
|
||||
[ 295.163] INFO zrok/controller/metrics.(*influxWriter).Handle: share: 7u9szn30ikh0, circuit: iNzXj4S0r backend {rx: 44.0 kB, tx: 44.4 kB} frontend {rx: 44.4 kB, tx: 44.0 kB}
|
||||
[ 295.178] INFO zrok/controller/limits.(*shareLimitAction).HandleShare: limiting '7u9szn30ikh0'
|
||||
[ 295.181] INFO zrok/controller/zrokEdgeSdk.DeleteServicePolicy: deleted service policy '3dBtc3v2G70aqqDSqujQOy' for environment 'd.wJYlpt9'
|
||||
[ 295.181] INFO zrok/controller/limits.(*shareLimitAction).HandleShare: removed dial service policy for '7u9szn30ikh0'
|
||||
```
|
||||
Notice the timestamps on the log messages. There have been no metrics messages for 60 seconds.
|
||||
|
||||
The limits agent runs a periodic process to look for limited resources to re-enable. It produces messages like this when there are no resources to re-enable:
|
||||
|
||||
```
|
||||
[ 355.183] INFO zrok/controller/limits.(*Agent).relax: relaxing
|
||||
[ 355.188] INFO zrok/controller/limits.(*Agent).relax: share 'dh3f3jj7zhig' still over limit
|
||||
[ 355.192] INFO zrok/controller/limits.(*Agent).relax: share '0evcupz5k410' still over limit
|
||||
[ 355.196] INFO zrok/controller/limits.(*Agent).relax: share '53z6mz4re7tu' still over limit
|
||||
[ 355.199] INFO zrok/controller/limits.(*Agent).relax: share 's0uzz1p7xjrr' still over limit
|
||||
[ 355.203] INFO zrok/controller/limits.(*Agent).relax: share 'tr7vpyrzvmh0' still over limit
|
||||
[ 355.207] INFO zrok/controller/limits.(*Agent).relax: share '8k6dnu7x7ag0' still over limit
|
||||
[ 355.220] INFO zrok/controller/limits.(*Agent).relax: share '7u9szn30ikh0' still over limit
|
||||
[ 415.223] INFO zrok/controller/limits.(*Agent).relax: relaxing
|
||||
[ 415.228] INFO zrok/controller/limits.(*Agent).relax: share 'dh3f3jj7zhig' still over limit
|
||||
[ 415.232] INFO zrok/controller/limits.(*Agent).relax: share '0evcupz5k410' still over limit
|
||||
[ 415.236] INFO zrok/controller/limits.(*Agent).relax: share '53z6mz4re7tu' still over limit
|
||||
[ 415.240] INFO zrok/controller/limits.(*Agent).relax: share 's0uzz1p7xjrr' still over limit
|
||||
[ 415.245] INFO zrok/controller/limits.(*Agent).relax: share 'tr7vpyrzvmh0' still over limit
|
||||
[ 415.250] INFO zrok/controller/limits.(*Agent).relax: share '8k6dnu7x7ag0' still over limit
|
||||
[ 415.253] INFO zrok/controller/limits.(*Agent).relax: share '7u9szn30ikh0' still over limit
|
||||
```
|
||||
Enough time has finally passed that the agent is able to remove the restrictions on some of the services:
|
||||
|
||||
```
|
||||
[ 475.255] INFO zrok/controller/limits.(*Agent).relax: relaxing
|
||||
[ 475.260] INFO zrok/controller/limits.(*shareRelaxAction).HandleShare: relaxing 'dh3f3jj7zhig'
|
||||
[ 475.274] INFO zrok/controller/zrokEdgeSdk.CreateServicePolicyDial: created dial service policy '3LQG2ptwUxIuWtRzTLAqAc' for service 'nyKOLlxUWWbCzD7h9Jhjq' for identities '[rBayMvm7UI]'
|
||||
[ 475.274] INFO zrok/controller/limits.(*shareRelaxAction).HandleShare: added dial service policy for 'dh3f3jj7zhig'
|
||||
[ 475.279] INFO zrok/controller/limits.(*shareRelaxAction).HandleShare: relaxing '0evcupz5k410'
|
||||
[ 475.281] INFO zrok/controller/zrokEdgeSdk.CreateServicePolicyDial: created dial service policy '4BPqQhFsGGmoBsqFDIWlWA' for service '3WHJGqUdxkDtPYLgEL5V3q' for identities '[rBayMvm7UI]'
|
||||
[ 475.281] INFO zrok/controller/limits.(*shareRelaxAction).HandleShare: added dial service policy for '0evcupz5k410'
|
||||
[ 475.285] INFO zrok/controller/limits.(*shareRelaxAction).HandleShare: relaxing '53z6mz4re7tu'
|
||||
[ 475.287] INFO zrok/controller/zrokEdgeSdk.CreateServicePolicyDial: created dial service policy '64Kz6F7CluxH1drfyMkzDx' for service '2NiotGOyBHBEbFZwbTFJ2u' for identities '[rBayMvm7UI]'
|
||||
[ 475.287] INFO zrok/controller/limits.(*shareRelaxAction).HandleShare: added dial service policy for '53z6mz4re7tu'
|
||||
[ 475.292] INFO zrok/controller/limits.(*shareRelaxAction).HandleShare: relaxing 's0uzz1p7xjrr'
|
||||
[ 475.295] INFO zrok/controller/zrokEdgeSdk.CreateServicePolicyDial: created dial service policy '6MZ8i9sqvom96P70P24FJQ' for service 'KtK5E46HR93YIBrrwUlIN' for identities '[rBayMvm7UI]'
|
||||
[ 475.295] INFO zrok/controller/limits.(*shareRelaxAction).HandleShare: added dial service policy for 's0uzz1p7xjrr'
|
||||
[ 475.299] INFO zrok/controller/limits.(*shareRelaxAction).HandleShare: relaxing 'tr7vpyrzvmh0'
|
||||
[ 475.301] INFO zrok/controller/zrokEdgeSdk.CreateServicePolicyDial: created dial service policy '1kfuMP2APitf3qC2tsOC1b' for service '7jyiTZ0z2ediD5hZbxu7KH' for identities '[rBayMvm7UI]'
|
||||
[ 475.301] INFO zrok/controller/limits.(*shareRelaxAction).HandleShare: added dial service policy for 'tr7vpyrzvmh0'
|
||||
[ 475.305] INFO zrok/controller/limits.(*shareRelaxAction).HandleShare: relaxing '8k6dnu7x7ag0'
|
||||
[ 475.308] INFO zrok/controller/zrokEdgeSdk.CreateServicePolicyDial: created dial service policy '12jWOvjIIuvYRW9vXfkRKw' for service '2J0I9dPe2JGnY1GwjmM6n7' for identities '[rBayMvm7UI]'
|
||||
[ 475.308] INFO zrok/controller/limits.(*shareRelaxAction).HandleShare: added dial service policy for '8k6dnu7x7ag0'
|
||||
[ 475.313] INFO zrok/controller/limits.(*Agent).relax: share '7u9szn30ikh0' still over limit
|
||||
```
|
||||
And notice that we're now starting to see traffic on those shares again:
|
||||
|
||||
```
|
||||
[ 485.164] INFO zrok/controller/metrics.(*influxWriter).Handle: share: 53z6mz4re7tu, circuit: CjNBpViur backend {rx: 59.7 kB, tx: 60.0 kB} frontend {rx: 60.0 kB, tx: 59.7 kB}
|
||||
[ 485.166] INFO zrok/controller/metrics.(*influxWriter).Handle: share: 8k6dnu7x7ag0, circuit: qwlLhVS0w backend {rx: 50.1 kB, tx: 50.4 kB} frontend {rx: 50.4 kB, tx: 50.1 kB}
|
||||
[ 485.168] INFO zrok/controller/metrics.(*influxWriter).Handle: share: 0evcupz5k410, circuit: fMNBpVi0w backend {rx: 80.3 kB, tx: 80.7 kB} frontend {rx: 80.7 kB, tx: 80.3 kB}
|
||||
[ 485.200] INFO zrok/controller/limits.(*shareWarningAction).HandleShare: warning '8k6dnu7x7ag0'
|
||||
[ 486.095] INFO zrok/controller/limits.sendLimitWarningEmail: limit warning email sent to 'michael@quigley.com'
|
||||
[ 490.162] INFO zrok/controller/metrics.(*influxWriter).Handle: share: 53z6mz4re7tu, circuit: CjNBpViur backend {rx: 40.6 kB, tx: 40.9 kB} frontend {rx: 40.9 kB, tx: 40.6 kB}
|
||||
[ 490.164] INFO zrok/controller/metrics.(*influxWriter).Handle: share: dh3f3jj7zhig, circuit: 5TNBhVS0r backend {rx: 44.5 kB, tx: 45.0 kB} frontend {rx: 45.0 kB, tx: 44.5 kB}
|
||||
[ 490.166] INFO zrok/controller/metrics.(*influxWriter).Handle: share: 0evcupz5k410, circuit: fMNBpVi0w backend {rx: 60.7 kB, tx: 61.1 kB} frontend {rx: 61.1 kB, tx: 60.7 kB}
|
||||
[ 495.163] INFO zrok/controller/metrics.(*influxWriter).Handle: share: 53z6mz4re7tu, circuit: CjNBpViur backend {rx: 45.3 kB, tx: 45.6 kB} frontend {rx: 45.6 kB, tx: 45.3 kB}
|
||||
[ 495.165] INFO zrok/controller/metrics.(*influxWriter).Handle: share: dh3f3jj7zhig, circuit: 5TNBhVS0r backend {rx: 42.0 kB, tx: 42.4 kB} frontend {rx: 42.4 kB, tx: 42.0 kB}
|
||||
[ 495.167] INFO zrok/controller/metrics.(*influxWriter).Handle: share: 8k6dnu7x7ag0, circuit: qwlLhVS0w backend {rx: 33.3 kB, tx: 33.8 kB} frontend {rx: 33.8 kB, tx: 33.3 kB}
|
||||
[ 500.163] INFO zrok/controller/metrics.(*influxWriter).Handle: share: tr7vpyrzvmh0, circuit: LdZLhVSuw backend {rx: 28.2 kB, tx: 28.5 kB} frontend {rx: 28.5 kB, tx: 28.2 kB}
|
||||
[ 500.165] INFO zrok/controller/metrics.(*influxWriter).Handle: share: 8k6dnu7x7ag0, circuit: qwlLhVS0w backend {rx: 40.0 kB, tx: 40.3 kB} frontend {rx: 40.3 kB, tx: 40.0 kB}
|
||||
[ 500.167] INFO zrok/controller/metrics.(*influxWriter).Handle: share: dh3f3jj7zhig, circuit: 5TNBhVS0r backend {rx: 53.6 kB, tx: 54.0 kB} frontend {rx: 54.0 kB, tx: 53.6 kB}
|
||||
[ 505.201] INFO zrok/controller/metrics.(*influxWriter).Handle: share: dh3f3jj7zhig, circuit: 5TNBhVS0r backend {rx: 38.4 kB, tx: 38.6 kB} frontend {rx: 38.6 kB, tx: 38.4 kB}
|
||||
[ 505.208] INFO zrok/controller/metrics.(*influxWriter).Handle: share: 53z6mz4re7tu, circuit: CjNBpViur backend {rx: 33.8 kB, tx: 34.2 kB} frontend {rx: 34.2 kB, tx: 33.8 kB}
|
||||
[ 505.210] INFO zrok/controller/metrics.(*influxWriter).Handle: share: tr7vpyrzvmh0, circuit: LdZLhVSuw backend {rx: 39.7 kB, tx: 40.0 kB} frontend {rx: 40.0 kB, tx: 39.7 kB}
|
||||
[ 510.164] INFO zrok/controller/metrics.(*influxWriter).Handle: share: 0evcupz5k410, circuit: fMNBpVi0w backend {rx: 74.0 kB, tx: 74.5 kB} frontend {rx: 74.5 kB, tx: 74.0 kB}
|
||||
[ 510.167] INFO zrok/controller/metrics.(*influxWriter).Handle: share: 8k6dnu7x7ag0, circuit: qwlLhVS0w backend {rx: 51.5 kB, tx: 51.8 kB} frontend {rx: 51.8 kB, tx: 51.5 kB}
|
||||
[ 510.169] INFO zrok/controller/metrics.(*influxWriter).Handle: share: s0uzz1p7xjrr, circuit: OJyBpVS0w backend {rx: 33.5 kB, tx: 33.9 kB} frontend {rx: 33.9 kB, tx: 33.5 kB}
|
||||
[ 515.163] INFO zrok/controller/metrics.(*influxWriter).Handle: share: tr7vpyrzvmh0, circuit: LdZLhVSuw backend {rx: 62.7 kB, tx: 63.0 kB} frontend {rx: 63.0 kB, tx: 62.7 kB}
|
||||
[ 515.165] INFO zrok/controller/metrics.(*influxWriter).Handle: share: 53z6mz4re7tu, circuit: CjNBpViur backend {rx: 32.5 kB, tx: 32.9 kB} frontend {rx: 32.9 kB, tx: 32.5 kB}
|
||||
[ 515.166] INFO zrok/controller/metrics.(*influxWriter).Handle: share: 8k6dnu7x7ag0, circuit: qwlLhVS0w backend {rx: 47.4 kB, tx: 47.7 kB} frontend {rx: 47.7 kB, tx: 47.4 kB}
|
||||
[ 520.164] INFO zrok/controller/metrics.(*influxWriter).Handle: share: s0uzz1p7xjrr, circuit: OJyBpVS0w backend {rx: 48.2 kB, tx: 48.5 kB} frontend {rx: 48.5 kB, tx: 48.2 kB}
|
||||
[ 520.166] INFO zrok/controller/metrics.(*influxWriter).Handle: share: 53z6mz4re7tu, circuit: CjNBpViur backend {rx: 45.8 kB, tx: 46.1 kB} frontend {rx: 46.1 kB, tx: 45.8 kB}
|
||||
[ 520.167] INFO zrok/controller/metrics.(*influxWriter).Handle: share: tr7vpyrzvmh0, circuit: LdZLhVSuw backend {rx: 34.2 kB, tx: 34.4 kB} frontend {rx: 34.4 kB, tx: 34.2 kB}
|
||||
[ 525.163] INFO zrok/controller/metrics.(*influxWriter).Handle: share: 8k6dnu7x7ag0, circuit: qwlLhVS0w backend {rx: 35.0 kB, tx: 35.4 kB} frontend {rx: 35.4 kB, tx: 35.0 kB}
|
||||
[ 525.165] INFO zrok/controller/metrics.(*influxWriter).Handle: share: 53z6mz4re7tu, circuit: CjNBpViur backend {rx: 40.1 kB, tx: 40.4 kB} frontend {rx: 40.4 kB, tx: 40.1 kB}
|
||||
[ 525.167] INFO zrok/controller/metrics.(*influxWriter).Handle: share: tr7vpyrzvmh0, circuit: LdZLhVSuw backend {rx: 24.7 kB, tx: 25.0 kB} frontend {rx: 25.0 kB, tx: 24.7 kB}
|
||||
[ 530.164] INFO zrok/controller/metrics.(*influxWriter).Handle: share: 8k6dnu7x7ag0, circuit: qwlLhVS0w backend {rx: 57.3 kB, tx: 57.9 kB} frontend {rx: 57.9 kB, tx: 57.3 kB}
|
||||
[ 530.167] INFO zrok/controller/metrics.(*influxWriter).Handle: share: s0uzz1p7xjrr, circuit: OJyBpVS0w backend {rx: 51.4 kB, tx: 51.7 kB} frontend {rx: 51.7 kB, tx: 51.4 kB}
|
||||
[ 530.168] INFO zrok/controller/metrics.(*influxWriter).Handle: share: 53z6mz4re7tu, circuit: CjNBpViur backend {rx: 40.6 kB, tx: 41.0 kB} frontend {rx: 41.0 kB, tx: 40.6 kB}
|
||||
[ 535.164] INFO zrok/controller/metrics.(*influxWriter).Handle: share: tr7vpyrzvmh0, circuit: LdZLhVSuw backend {rx: 41.5 kB, tx: 41.9 kB} frontend {rx: 41.9 kB, tx: 41.5 kB}
|
||||
[ 535.166] INFO zrok/controller/metrics.(*influxWriter).Handle: share: s0uzz1p7xjrr, circuit: OJyBpVS0w backend {rx: 61.4 kB, tx: 61.9 kB} frontend {rx: 61.9 kB, tx: 61.4 kB}
|
||||
[ 535.168] INFO zrok/controller/metrics.(*influxWriter).Handle: share: dh3f3jj7zhig, circuit: 5TNBhVS0r backend {rx: 33.3 kB, tx: 33.6 kB} frontend {rx: 33.6 kB, tx: 33.3 kB}
|
||||
[ 540.163] INFO zrok/controller/metrics.(*influxWriter).Handle: share: tr7vpyrzvmh0, circuit: LdZLhVSuw backend {rx: 37.2 kB, tx: 37.5 kB} frontend {rx: 37.5 kB, tx: 37.2 kB}
|
||||
[ 540.165] INFO zrok/controller/metrics.(*influxWriter).Handle: share: 8k6dnu7x7ag0, circuit: qwlLhVS0w backend {rx: 53.8 kB, tx: 54.3 kB} frontend {rx: 54.3 kB, tx: 53.8 kB}
|
||||
[ 540.167] INFO zrok/controller/metrics.(*influxWriter).Handle: share: dh3f3jj7zhig, circuit: 5TNBhVS0r backend {rx: 75.8 kB, tx: 76.4 kB} frontend {rx: 76.4 kB, tx: 75.8 kB}
|
||||
[ 545.164] INFO zrok/controller/metrics.(*influxWriter).Handle: share: tr7vpyrzvmh0, circuit: LdZLhVSuw backend {rx: 69.9 kB, tx: 70.2 kB} frontend {rx: 70.2 kB, tx: 69.9 kB}
|
||||
[ 545.166] INFO zrok/controller/metrics.(*influxWriter).Handle: share: 0evcupz5k410, circuit: fMNBpVi0w backend {rx: 24.9 kB, tx: 25.2 kB} frontend {rx: 25.2 kB, tx: 24.9 kB}
|
||||
[ 545.168] INFO zrok/controller/metrics.(*influxWriter).Handle: share: 53z6mz4re7tu, circuit: CjNBpViur backend {rx: 39.4 kB, tx: 39.6 kB} frontend {rx: 39.6 kB, tx: 39.4 kB}
|
||||
[ 550.164] INFO zrok/controller/metrics.(*influxWriter).Handle: share: s0uzz1p7xjrr, circuit: OJyBpVS0w backend {rx: 64.0 kB, tx: 64.3 kB} frontend {rx: 64.3 kB, tx: 64.0 kB}
|
||||
[ 550.166] INFO zrok/controller/metrics.(*influxWriter).Handle: share: dh3f3jj7zhig, circuit: 5TNBhVS0r backend {rx: 39.2 kB, tx: 39.6 kB} frontend {rx: 39.6 kB, tx: 39.2 kB}
|
||||
[ 550.168] INFO zrok/controller/metrics.(*influxWriter).Handle: share: 0evcupz5k410, circuit: fMNBpVi0w backend {rx: 47.6 kB, tx: 47.9 kB} frontend {rx: 47.9 kB, tx: 47.6 kB}
|
||||
[ 555.165] INFO zrok/controller/metrics.(*influxWriter).Handle: share: 53z6mz4re7tu, circuit: CjNBpViur backend {rx: 54.2 kB, tx: 54.8 kB} frontend {rx: 54.8 kB, tx: 54.2 kB}
|
||||
[ 555.167] INFO zrok/controller/metrics.(*influxWriter).Handle: share: tr7vpyrzvmh0, circuit: LdZLhVSuw backend {rx: 44.5 kB, tx: 44.8 kB} frontend {rx: 44.8 kB, tx: 44.5 kB}
|
||||
[ 555.169] INFO zrok/controller/metrics.(*influxWriter).Handle: share: s0uzz1p7xjrr, circuit: OJyBpVS0w backend {rx: 66.3 kB, tx: 66.7 kB} frontend {rx: 66.7 kB, tx: 66.3 kB}
|
||||
[ 560.164] INFO zrok/controller/metrics.(*influxWriter).Handle: share: dh3f3jj7zhig, circuit: 5TNBhVS0r backend {rx: 18.9 kB, tx: 19.2 kB} frontend {rx: 19.2 kB, tx: 18.9 kB}
|
||||
[ 560.166] INFO zrok/controller/metrics.(*influxWriter).Handle: share: 8k6dnu7x7ag0, circuit: qwlLhVS0w backend {rx: 51.5 kB, tx: 51.8 kB} frontend {rx: 51.8 kB, tx: 51.5 kB}
|
||||
[ 560.167] INFO zrok/controller/metrics.(*influxWriter).Handle: share: s0uzz1p7xjrr, circuit: OJyBpVS0w backend {rx: 36.3 kB, tx: 36.7 kB} frontend {rx: 36.7 kB, tx: 36.3 kB}
|
||||
[ 565.163] INFO zrok/controller/metrics.(*influxWriter).Handle: share: s0uzz1p7xjrr, circuit: OJyBpVS0w backend {rx: 70.7 kB, tx: 71.0 kB} frontend {rx: 71.0 kB, tx: 70.7 kB}
|
||||
[ 565.165] INFO zrok/controller/metrics.(*influxWriter).Handle: share: dh3f3jj7zhig, circuit: 5TNBhVS0r backend {rx: 58.7 kB, tx: 59.1 kB} frontend {rx: 59.1 kB, tx: 58.7 kB}
|
||||
[ 565.167] INFO zrok/controller/metrics.(*influxWriter).Handle: share: tr7vpyrzvmh0, circuit: LdZLhVSuw backend {rx: 33.8 kB, tx: 34.0 kB} frontend {rx: 34.0 kB, tx: 33.8 kB}
|
||||
[ 570.163] INFO zrok/controller/metrics.(*influxWriter).Handle: share: 8k6dnu7x7ag0, circuit: qwlLhVS0w backend {rx: 35.4 kB, tx: 35.7 kB} frontend {rx: 35.7 kB, tx: 35.4 kB}
|
||||
[ 570.166] INFO zrok/controller/metrics.(*influxWriter).Handle: share: dh3f3jj7zhig, circuit: 5TNBhVS0r backend {rx: 47.2 kB, tx: 47.6 kB} frontend {rx: 47.6 kB, tx: 47.2 kB}
|
||||
[ 570.167] INFO zrok/controller/metrics.(*influxWriter).Handle: share: 53z6mz4re7tu, circuit: CjNBpViur backend {rx: 75.5 kB, tx: 75.8 kB} frontend {rx: 75.8 kB, tx: 75.5 kB}
|
||||
[ 575.164] INFO zrok/controller/metrics.(*influxWriter).Handle: share: 53z6mz4re7tu, circuit: CjNBpViur backend {rx: 34.3 kB, tx: 34.6 kB} frontend {rx: 34.6 kB, tx: 34.3 kB}
|
||||
[ 575.167] INFO zrok/controller/metrics.(*influxWriter).Handle: share: tr7vpyrzvmh0, circuit: LdZLhVSuw backend {rx: 36.2 kB, tx: 36.6 kB} frontend {rx: 36.6 kB, tx: 36.2 kB}
|
||||
[ 575.169] INFO zrok/controller/metrics.(*influxWriter).Handle: share: s0uzz1p7xjrr, circuit: OJyBpVS0w backend {rx: 53.9 kB, tx: 54.2 kB} frontend {rx: 54.2 kB, tx: 53.9 kB}
|
||||
[ 575.178] INFO zrok/controller/limits.(*shareWarningAction).HandleShare: warning '53z6mz4re7tu'
|
||||
[ 575.953] INFO zrok/controller/limits.sendLimitWarningEmail: limit warning email sent to 'michael@quigley.com'
|
||||
[ 580.164] INFO zrok/controller/metrics.(*influxWriter).Handle: share: 8k6dnu7x7ag0, circuit: qwlLhVS0w backend {rx: 50.3 kB, tx: 50.7 kB} frontend {rx: 50.7 kB, tx: 50.3 kB}
|
||||
[ 580.166] INFO zrok/controller/metrics.(*influxWriter).Handle: share: 53z6mz4re7tu, circuit: CjNBpViur backend {rx: 55.7 kB, tx: 56.1 kB} frontend {rx: 56.1 kB, tx: 55.7 kB}
|
||||
[ 580.168] INFO zrok/controller/metrics.(*influxWriter).Handle: share: tr7vpyrzvmh0, circuit: LdZLhVSuw backend {rx: 41.0 kB, tx: 41.3 kB} frontend {rx: 41.3 kB, tx: 41.0 kB}
|
||||
[ 585.164] INFO zrok/controller/metrics.(*influxWriter).Handle: share: 0evcupz5k410, circuit: fMNBpVi0w backend {rx: 32.3 kB, tx: 32.6 kB} frontend {rx: 32.6 kB, tx: 32.3 kB}
|
||||
[ 585.165] INFO zrok/controller/metrics.(*influxWriter).Handle: share: tr7vpyrzvmh0, circuit: LdZLhVSuw backend {rx: 18.5 kB, tx: 18.8 kB} frontend {rx: 18.8 kB, tx: 18.5 kB}
|
||||
[ 585.167] INFO zrok/controller/metrics.(*influxWriter).Handle: share: 53z6mz4re7tu, circuit: CjNBpViur backend {rx: 43.2 kB, tx: 43.6 kB} frontend {rx: 43.6 kB, tx: 43.2 kB}
|
||||
[ 590.164] INFO zrok/controller/metrics.(*influxWriter).Handle: share: dh3f3jj7zhig, circuit: 5TNBhVS0r backend {rx: 32.8 kB, tx: 33.0 kB} frontend {rx: 33.0 kB, tx: 32.8 kB}
|
||||
[ 590.166] INFO zrok/controller/metrics.(*influxWriter).Handle: share: 53z6mz4re7tu, circuit: CjNBpViur backend {rx: 63.4 kB, tx: 63.7 kB} frontend {rx: 63.7 kB, tx: 63.4 kB}
|
||||
[ 590.167] INFO zrok/controller/metrics.(*influxWriter).Handle: share: tr7vpyrzvmh0, circuit: LdZLhVSuw backend {rx: 18.1 kB, tx: 18.3 kB} frontend {rx: 18.3 kB, tx: 18.1 kB}
|
||||
[ 590.208] INFO zrok/controller/limits.(*shareWarningAction).HandleShare: warning 'tr7vpyrzvmh0'
|
||||
[ 591.168] INFO zrok/controller/limits.sendLimitWarningEmail: limit warning email sent to 'michael@quigley.com'
|
||||
[ 595.163] INFO zrok/controller/metrics.(*influxWriter).Handle: share: dh3f3jj7zhig, circuit: 5TNBhVS0r backend {rx: 34.1 kB, tx: 34.5 kB} frontend {rx: 34.5 kB, tx: 34.1 kB}
|
||||
[ 595.166] INFO zrok/controller/metrics.(*influxWriter).Handle: share: 53z6mz4re7tu, circuit: CjNBpViur backend {rx: 46.0 kB, tx: 46.3 kB} frontend {rx: 46.3 kB, tx: 46.0 kB}
|
||||
[ 595.169] INFO zrok/controller/metrics.(*influxWriter).Handle: share: 8k6dnu7x7ag0, circuit: qwlLhVS0w backend {rx: 49.1 kB, tx: 49.4 kB} frontend {rx: 49.4 kB, tx: 49.1 kB}
|
||||
[ 600.163] INFO zrok/controller/metrics.(*influxWriter).Handle: share: 0evcupz5k410, circuit: fMNBpVi0w backend {rx: 34.0 kB, tx: 34.3 kB} frontend {rx: 34.3 kB, tx: 34.0 kB}
|
||||
[ 600.165] INFO zrok/controller/metrics.(*influxWriter).Handle: share: s0uzz1p7xjrr, circuit: OJyBpVS0w backend {rx: 46.6 kB, tx: 47.1 kB} frontend {rx: 47.1 kB, tx: 46.6 kB}
|
||||
[ 600.167] INFO zrok/controller/metrics.(*influxWriter).Handle: share: 53z6mz4re7tu, circuit: CjNBpViur backend {rx: 23.2 kB, tx: 23.5 kB} frontend {rx: 23.5 kB, tx: 23.2 kB}
|
||||
[ 600.189] INFO zrok/controller/limits.(*shareWarningAction).HandleShare: warning 's0uzz1p7xjrr'
|
||||
[ 600.949] INFO zrok/controller/limits.sendLimitWarningEmail: limit warning email sent to 'michael@quigley.com'
|
||||
[ 605.163] INFO zrok/controller/metrics.(*influxWriter).Handle: share: 53z6mz4re7tu, circuit: CjNBpViur backend {rx: 61.5 kB, tx: 61.8 kB} frontend {rx: 61.8 kB, tx: 61.5 kB}
|
||||
[ 605.165] INFO zrok/controller/metrics.(*influxWriter).Handle: share: tr7vpyrzvmh0, circuit: LdZLhVSuw backend {rx: 38.3 kB, tx: 38.7 kB} frontend {rx: 38.7 kB, tx: 38.3 kB}
|
||||
[ 605.167] INFO zrok/controller/metrics.(*influxWriter).Handle: share: 0evcupz5k410, circuit: fMNBpVi0w backend {rx: 35.1 kB, tx: 35.5 kB} frontend {rx: 35.5 kB, tx: 35.1 kB}
|
||||
[ 610.163] INFO zrok/controller/metrics.(*influxWriter).Handle: share: 8k6dnu7x7ag0, circuit: qwlLhVS0w backend {rx: 37.7 kB, tx: 38.1 kB} frontend {rx: 38.1 kB, tx: 37.7 kB}
|
||||
[ 610.165] INFO zrok/controller/metrics.(*influxWriter).Handle: share: 0evcupz5k410, circuit: fMNBpVi0w backend {rx: 50.4 kB, tx: 50.7 kB} frontend {rx: 50.7 kB, tx: 50.4 kB}
|
||||
[ 610.167] INFO zrok/controller/metrics.(*influxWriter).Handle: share: 53z6mz4re7tu, circuit: CjNBpViur backend {rx: 27.9 kB, tx: 28.2 kB} frontend {rx: 28.2 kB, tx: 27.9 kB}
|
||||
[ 615.163] INFO zrok/controller/metrics.(*influxWriter).Handle: share: 8k6dnu7x7ag0, circuit: qwlLhVS0w backend {rx: 25.7 kB, tx: 26.0 kB} frontend {rx: 26.0 kB, tx: 25.7 kB}
|
||||
[ 615.165] INFO zrok/controller/metrics.(*influxWriter).Handle: share: 0evcupz5k410, circuit: fMNBpVi0w backend {rx: 32.2 kB, tx: 32.5 kB} frontend {rx: 32.5 kB, tx: 32.2 kB}
|
||||
[ 615.169] INFO zrok/controller/metrics.(*influxWriter).Handle: share: s0uzz1p7xjrr, circuit: OJyBpVS0w backend {rx: 47.2 kB, tx: 47.6 kB} frontend {rx: 47.6 kB, tx: 47.2 kB}
|
||||
[ 620.165] INFO zrok/controller/metrics.(*influxWriter).Handle: share: dh3f3jj7zhig, circuit: 5TNBhVS0r backend {rx: 35.7 kB, tx: 36.2 kB} frontend {rx: 36.2 kB, tx: 35.7 kB}
|
||||
[ 620.167] INFO zrok/controller/metrics.(*influxWriter).Handle: share: s0uzz1p7xjrr, circuit: OJyBpVS0w backend {rx: 60.6 kB, tx: 60.9 kB} frontend {rx: 60.9 kB, tx: 60.6 kB}
|
||||
[ 620.169] INFO zrok/controller/metrics.(*influxWriter).Handle: share: 53z6mz4re7tu, circuit: CjNBpViur backend {rx: 51.8 kB, tx: 52.3 kB} frontend {rx: 52.3 kB, tx: 51.8 kB}
|
||||
[ 620.178] INFO zrok/controller/limits.(*shareWarningAction).HandleShare: warning 'dh3f3jj7zhig'
|
||||
[ 620.929] INFO zrok/controller/limits.sendLimitWarningEmail: limit warning email sent to 'michael@quigley.com'
|
||||
[ 625.162] INFO zrok/controller/metrics.(*influxWriter).Handle: share: s0uzz1p7xjrr, circuit: OJyBpVS0w backend {rx: 41.6 kB, tx: 42.0 kB} frontend {rx: 42.0 kB, tx: 41.6 kB}
|
||||
[ 625.164] INFO zrok/controller/metrics.(*influxWriter).Handle: share: dh3f3jj7zhig, circuit: 5TNBhVS0r backend {rx: 46.4 kB, tx: 46.7 kB} frontend {rx: 46.7 kB, tx: 46.4 kB}
|
||||
[ 625.166] INFO zrok/controller/metrics.(*influxWriter).Handle: share: 53z6mz4re7tu, circuit: CjNBpViur backend {rx: 48.4 kB, tx: 48.7 kB} frontend {rx: 48.7 kB, tx: 48.4 kB}
|
||||
[ 630.164] INFO zrok/controller/metrics.(*influxWriter).Handle: share: 8k6dnu7x7ag0, circuit: qwlLhVS0w backend {rx: 29.3 kB, tx: 29.5 kB} frontend {rx: 29.5 kB, tx: 29.3 kB}
|
||||
[ 630.166] INFO zrok/controller/metrics.(*influxWriter).Handle: share: s0uzz1p7xjrr, circuit: OJyBpVS0w backend {rx: 49.7 kB, tx: 50.2 kB} frontend {rx: 50.2 kB, tx: 49.7 kB}
|
||||
[ 630.168] INFO zrok/controller/metrics.(*influxWriter).Handle: share: tr7vpyrzvmh0, circuit: LdZLhVSuw backend {rx: 47.7 kB, tx: 48.0 kB} frontend {rx: 48.0 kB, tx: 47.7 kB}
|
||||
[ 635.164] INFO zrok/controller/metrics.(*influxWriter).Handle: share: s0uzz1p7xjrr, circuit: OJyBpVS0w backend {rx: 69.2 kB, tx: 69.6 kB} frontend {rx: 69.6 kB, tx: 69.2 kB}
|
||||
[ 635.166] INFO zrok/controller/metrics.(*influxWriter).Handle: share: 0evcupz5k410, circuit: fMNBpVi0w backend {rx: 45.8 kB, tx: 46.2 kB} frontend {rx: 46.2 kB, tx: 45.8 kB}
|
||||
[ 635.167] INFO zrok/controller/metrics.(*influxWriter).Handle: share: dh3f3jj7zhig, circuit: 5TNBhVS0r backend {rx: 38.6 kB, tx: 39.1 kB} frontend {rx: 39.1 kB, tx: 38.6 kB}
|
||||
[ 640.164] INFO zrok/controller/metrics.(*influxWriter).Handle: share: 53z6mz4re7tu, circuit: CjNBpViur backend {rx: 41.7 kB, tx: 42.0 kB} frontend {rx: 42.0 kB, tx: 41.7 kB}
|
||||
[ 640.166] INFO zrok/controller/metrics.(*influxWriter).Handle: share: tr7vpyrzvmh0, circuit: LdZLhVSuw backend {rx: 27.5 kB, tx: 28.0 kB} frontend {rx: 28.0 kB, tx: 27.5 kB}
|
||||
[ 640.167] INFO zrok/controller/metrics.(*influxWriter).Handle: share: 8k6dnu7x7ag0, circuit: qwlLhVS0w backend {rx: 28.4 kB, tx: 28.7 kB} frontend {rx: 28.7 kB, tx: 28.4 kB}
|
||||
[ 645.164] INFO zrok/controller/metrics.(*influxWriter).Handle: share: tr7vpyrzvmh0, circuit: LdZLhVSuw backend {rx: 39.8 kB, tx: 40.0 kB} frontend {rx: 40.0 kB, tx: 39.8 kB}
|
||||
[ 645.166] INFO zrok/controller/metrics.(*influxWriter).Handle: share: 0evcupz5k410, circuit: fMNBpVi0w backend {rx: 52.2 kB, tx: 52.5 kB} frontend {rx: 52.5 kB, tx: 52.2 kB}
|
||||
[ 645.168] INFO zrok/controller/metrics.(*influxWriter).Handle: share: 53z6mz4re7tu, circuit: CjNBpViur backend {rx: 39.3 kB, tx: 39.6 kB} frontend {rx: 39.6 kB, tx: 39.3 kB}
|
||||
[ 645.300] INFO zrok/controller/limits.(*shareWarningAction).HandleShare: warning '0evcupz5k410'
|
||||
[ 647.031] INFO zrok/controller/limits.sendLimitWarningEmail: limit warning email sent to 'michael@quigley.com'
|
||||
[ 650.164] INFO zrok/controller/metrics.(*influxWriter).Handle: share: dh3f3jj7zhig, circuit: 5TNBhVS0r backend {rx: 36.3 kB, tx: 36.7 kB} frontend {rx: 36.7 kB, tx: 36.3 kB}
|
||||
[ 650.166] INFO zrok/controller/metrics.(*influxWriter).Handle: share: 0evcupz5k410, circuit: fMNBpVi0w backend {rx: 50.7 kB, tx: 51.0 kB} frontend {rx: 51.0 kB, tx: 50.7 kB}
|
||||
[ 650.168] INFO zrok/controller/metrics.(*influxWriter).Handle: share: tr7vpyrzvmh0, circuit: LdZLhVSuw backend {rx: 48.5 kB, tx: 48.8 kB} frontend {rx: 48.8 kB, tx: 48.5 kB}
|
||||
[ 655.164] INFO zrok/controller/metrics.(*influxWriter).Handle: share: 53z6mz4re7tu, circuit: CjNBpViur backend {rx: 24.6 kB, tx: 24.9 kB} frontend {rx: 24.9 kB, tx: 24.6 kB}
|
||||
[ 655.166] INFO zrok/controller/metrics.(*influxWriter).Handle: share: s0uzz1p7xjrr, circuit: OJyBpVS0w backend {rx: 45.6 kB, tx: 46.0 kB} frontend {rx: 46.0 kB, tx: 45.6 kB}
|
||||
[ 655.167] INFO zrok/controller/metrics.(*influxWriter).Handle: share: 8k6dnu7x7ag0, circuit: qwlLhVS0w backend {rx: 51.8 kB, tx: 52.1 kB} frontend {rx: 52.1 kB, tx: 51.8 kB}
|
||||
[ 655.284] INFO zrok/controller/limits.(*shareLimitAction).HandleShare: limiting '53z6mz4re7tu'
|
||||
[ 655.299] INFO zrok/controller/zrokEdgeSdk.DeleteServicePolicy: deleted service policy '64Kz6F7CluxH1drfyMkzDx' for environment 'd.wJYlpt9'
|
||||
[ 655.299] INFO zrok/controller/limits.(*shareLimitAction).HandleShare: removed dial service policy for '53z6mz4re7tu'
|
||||
[ 660.164] INFO zrok/controller/metrics.(*influxWriter).Handle: share: 8k6dnu7x7ag0, circuit: qwlLhVS0w backend {rx: 70.9 kB, tx: 71.4 kB} frontend {rx: 71.4 kB, tx: 70.9 kB}
|
||||
[ 660.166] INFO zrok/controller/metrics.(*influxWriter).Handle: share: 53z6mz4re7tu, circuit: CjNBpViur backend {rx: 49.0 kB, tx: 49.5 kB} frontend {rx: 49.5 kB, tx: 49.0 kB}
|
||||
[ 660.168] INFO zrok/controller/metrics.(*influxWriter).Handle: share: tr7vpyrzvmh0, circuit: LdZLhVSuw backend {rx: 36.2 kB, tx: 36.5 kB} frontend {rx: 36.5 kB, tx: 36.2 kB}
|
||||
[ 665.164] INFO zrok/controller/metrics.(*influxWriter).Handle: share: s0uzz1p7xjrr, circuit: OJyBpVS0w backend {rx: 48.3 kB, tx: 48.7 kB} frontend {rx: 48.7 kB, tx: 48.3 kB}
|
||||
[ 665.166] INFO zrok/controller/metrics.(*influxWriter).Handle: share: tr7vpyrzvmh0, circuit: LdZLhVSuw backend {rx: 42.1 kB, tx: 42.4 kB} frontend {rx: 42.4 kB, tx: 42.1 kB}
|
||||
[ 665.168] INFO zrok/controller/metrics.(*influxWriter).Handle: share: dh3f3jj7zhig, circuit: 5TNBhVS0r backend {rx: 47.4 kB, tx: 47.7 kB} frontend {rx: 47.7 kB, tx: 47.4 kB}
|
||||
[ 670.164] INFO zrok/controller/metrics.(*influxWriter).Handle: share: tr7vpyrzvmh0, circuit: LdZLhVSuw backend {rx: 40.2 kB, tx: 40.6 kB} frontend {rx: 40.6 kB, tx: 40.2 kB}
|
||||
[ 670.167] INFO zrok/controller/metrics.(*influxWriter).Handle: share: s0uzz1p7xjrr, circuit: OJyBpVS0w backend {rx: 62.1 kB, tx: 62.4 kB} frontend {rx: 62.4 kB, tx: 62.1 kB}
|
||||
[ 675.164] INFO zrok/controller/metrics.(*influxWriter).Handle: share: 0evcupz5k410, circuit: fMNBpVi0w backend {rx: 13.8 kB, tx: 14.1 kB} frontend {rx: 14.1 kB, tx: 13.8 kB}
|
||||
[ 675.166] INFO zrok/controller/metrics.(*influxWriter).Handle: share: tr7vpyrzvmh0, circuit: LdZLhVSuw backend {rx: 36.6 kB, tx: 36.8 kB} frontend {rx: 36.8 kB, tx: 36.6 kB}
|
||||
[ 675.168] INFO zrok/controller/metrics.(*influxWriter).Handle: share: 8k6dnu7x7ag0, circuit: qwlLhVS0w backend {rx: 35.3 kB, tx: 35.6 kB} frontend {rx: 35.6 kB, tx: 35.3 kB}
|
||||
[ 680.164] INFO zrok/controller/metrics.(*influxWriter).Handle: share: dh3f3jj7zhig, circuit: 5TNBhVS0r backend {rx: 55.3 kB, tx: 55.8 kB} frontend {rx: 55.8 kB, tx: 55.3 kB}
|
||||
[ 680.166] INFO zrok/controller/metrics.(*influxWriter).Handle: share: tr7vpyrzvmh0, circuit: LdZLhVSuw backend {rx: 46.6 kB, tx: 46.9 kB} frontend {rx: 46.9 kB, tx: 46.6 kB}
|
||||
[ 685.164] INFO zrok/controller/metrics.(*influxWriter).Handle: share: tr7vpyrzvmh0, circuit: LdZLhVSuw backend {rx: 56.2 kB, tx: 56.5 kB} frontend {rx: 56.5 kB, tx: 56.2 kB}
|
||||
[ 685.172] INFO zrok/controller/metrics.(*influxWriter).Handle: share: 0evcupz5k410, circuit: fMNBpVi0w backend {rx: 42.4 kB, tx: 42.8 kB} frontend {rx: 42.8 kB, tx: 42.4 kB}
|
||||
[ 685.175] INFO zrok/controller/metrics.(*influxWriter).Handle: share: dh3f3jj7zhig, circuit: 5TNBhVS0r backend {rx: 46.8 kB, tx: 47.1 kB} frontend {rx: 47.1 kB, tx: 46.8 kB}
|
||||
[ 690.164] INFO zrok/controller/metrics.(*influxWriter).Handle: share: dh3f3jj7zhig, circuit: 5TNBhVS0r backend {rx: 72.4 kB, tx: 72.8 kB} frontend {rx: 72.8 kB, tx: 72.4 kB}
|
||||
[ 690.167] INFO zrok/controller/metrics.(*influxWriter).Handle: share: 8k6dnu7x7ag0, circuit: qwlLhVS0w backend {rx: 58.5 kB, tx: 58.7 kB} frontend {rx: 58.7 kB, tx: 58.5 kB}
|
||||
[ 695.163] INFO zrok/controller/metrics.(*influxWriter).Handle: share: 0evcupz5k410, circuit: fMNBpVi0w backend {rx: 21.1 kB, tx: 21.4 kB} frontend {rx: 21.4 kB, tx: 21.1 kB}
|
||||
[ 695.165] INFO zrok/controller/metrics.(*influxWriter).Handle: share: tr7vpyrzvmh0, circuit: LdZLhVSuw backend {rx: 41.5 kB, tx: 41.8 kB} frontend {rx: 41.8 kB, tx: 41.5 kB}
|
||||
[ 695.167] INFO zrok/controller/metrics.(*influxWriter).Handle: share: s0uzz1p7xjrr, circuit: OJyBpVS0w backend {rx: 49.5 kB, tx: 49.8 kB} frontend {rx: 49.8 kB, tx: 49.5 kB}
|
||||
[ 700.164] INFO zrok/controller/metrics.(*influxWriter).Handle: share: 0evcupz5k410, circuit: fMNBpVi0w backend {rx: 28.6 kB, tx: 28.9 kB} frontend {rx: 28.9 kB, tx: 28.6 kB}
|
||||
[ 700.165] INFO zrok/controller/metrics.(*influxWriter).Handle: share: s0uzz1p7xjrr, circuit: OJyBpVS0w backend {rx: 58.6 kB, tx: 59.0 kB} frontend {rx: 59.0 kB, tx: 58.6 kB}
|
||||
[ 700.193] INFO zrok/controller/limits.(*shareLimitAction).HandleShare: limiting 's0uzz1p7xjrr'
|
||||
[ 700.208] INFO zrok/controller/zrokEdgeSdk.DeleteServicePolicy: deleted service policy '6MZ8i9sqvom96P70P24FJQ' for environment 'd.wJYlpt9'
|
||||
[ 700.208] INFO zrok/controller/limits.(*shareLimitAction).HandleShare: removed dial service policy for 's0uzz1p7xjrr'
|
||||
[ 705.165] INFO zrok/controller/metrics.(*influxWriter).Handle: share: 0evcupz5k410, circuit: fMNBpVi0w backend {rx: 40.1 kB, tx: 40.6 kB} frontend {rx: 40.6 kB, tx: 40.1 kB}
|
||||
[ 705.167] INFO zrok/controller/metrics.(*influxWriter).Handle: share: 8k6dnu7x7ag0, circuit: qwlLhVS0w backend {rx: 55.7 kB, tx: 56.1 kB} frontend {rx: 56.1 kB, tx: 55.7 kB}
|
||||
[ 705.169] INFO zrok/controller/metrics.(*influxWriter).Handle: share: s0uzz1p7xjrr, circuit: OJyBpVS0w backend {rx: 46.0 kB, tx: 46.2 kB} frontend {rx: 46.2 kB, tx: 46.0 kB}
|
||||
[ 710.164] INFO zrok/controller/metrics.(*influxWriter).Handle: share: tr7vpyrzvmh0, circuit: LdZLhVSuw backend {rx: 20.4 kB, tx: 20.6 kB} frontend {rx: 20.6 kB, tx: 20.4 kB}
|
||||
[ 710.166] INFO zrok/controller/metrics.(*influxWriter).Handle: share: 0evcupz5k410, circuit: fMNBpVi0w backend {rx: 74.0 kB, tx: 74.4 kB} frontend {rx: 74.4 kB, tx: 74.0 kB}
|
||||
[ 710.178] INFO zrok/controller/limits.(*shareLimitAction).HandleShare: limiting 'tr7vpyrzvmh0'
|
||||
[ 710.192] INFO zrok/controller/zrokEdgeSdk.DeleteServicePolicy: deleted service policy '1kfuMP2APitf3qC2tsOC1b' for environment 'd.wJYlpt9'
|
||||
[ 710.192] INFO zrok/controller/limits.(*shareLimitAction).HandleShare: removed dial service policy for 'tr7vpyrzvmh0'
|
||||
[ 715.165] INFO zrok/controller/metrics.(*influxWriter).Handle: share: 0evcupz5k410, circuit: fMNBpVi0w backend {rx: 59.7 kB, tx: 60.2 kB} frontend {rx: 60.2 kB, tx: 59.7 kB}
|
||||
[ 715.167] INFO zrok/controller/metrics.(*influxWriter).Handle: share: tr7vpyrzvmh0, circuit: LdZLhVSuw backend {rx: 67.5 kB, tx: 67.8 kB} frontend {rx: 67.8 kB, tx: 67.5 kB}
|
||||
[ 720.163] INFO zrok/controller/metrics.(*influxWriter).Handle: share: 8k6dnu7x7ag0, circuit: qwlLhVS0w backend {rx: 43.7 kB, tx: 44.1 kB} frontend {rx: 44.1 kB, tx: 43.7 kB}
|
||||
[ 725.164] INFO zrok/controller/metrics.(*influxWriter).Handle: share: 8k6dnu7x7ag0, circuit: qwlLhVS0w backend {rx: 36.4 kB, tx: 36.6 kB} frontend {rx: 36.6 kB, tx: 36.4 kB}
|
||||
[ 725.166] INFO zrok/controller/metrics.(*influxWriter).Handle: share: dh3f3jj7zhig, circuit: 5TNBhVS0r backend {rx: 72.2 kB, tx: 72.7 kB} frontend {rx: 72.7 kB, tx: 72.2 kB}
|
||||
[ 730.164] INFO zrok/controller/metrics.(*influxWriter).Handle: share: dh3f3jj7zhig, circuit: 5TNBhVS0r backend {rx: 41.0 kB, tx: 41.4 kB} frontend {rx: 41.4 kB, tx: 41.0 kB}
|
||||
[ 735.163] INFO zrok/controller/metrics.(*influxWriter).Handle: share: dh3f3jj7zhig, circuit: 5TNBhVS0r backend {rx: 31.3 kB, tx: 31.6 kB} frontend {rx: 31.6 kB, tx: 31.3 kB}
|
||||
[ 735.165] INFO zrok/controller/metrics.(*influxWriter).Handle: share: 0evcupz5k410, circuit: fMNBpVi0w backend {rx: 39.1 kB, tx: 39.4 kB} frontend {rx: 39.4 kB, tx: 39.1 kB}
|
||||
[ 740.163] INFO zrok/controller/metrics.(*influxWriter).Handle: share: dh3f3jj7zhig, circuit: 5TNBhVS0r backend {rx: 30.7 kB, tx: 31.0 kB} frontend {rx: 31.0 kB, tx: 30.7 kB}
|
||||
[ 740.177] INFO zrok/controller/limits.(*shareLimitAction).HandleShare: limiting 'dh3f3jj7zhig'
|
||||
[ 740.192] INFO zrok/controller/zrokEdgeSdk.DeleteServicePolicy: deleted service policy '3LQG2ptwUxIuWtRzTLAqAc' for environment 'd.wJYlpt9'
|
||||
[ 740.192] INFO zrok/controller/limits.(*shareLimitAction).HandleShare: removed dial service policy for 'dh3f3jj7zhig'
|
||||
[ 745.164] INFO zrok/controller/metrics.(*influxWriter).Handle: share: 0evcupz5k410, circuit: fMNBpVi0w backend {rx: 38.2 kB, tx: 38.5 kB} frontend {rx: 38.5 kB, tx: 38.2 kB}
|
||||
[ 745.165] INFO zrok/controller/metrics.(*influxWriter).Handle: share: 8k6dnu7x7ag0, circuit: qwlLhVS0w backend {rx: 42.3 kB, tx: 42.7 kB} frontend {rx: 42.7 kB, tx: 42.3 kB}
|
||||
[ 745.192] INFO zrok/controller/limits.(*shareLimitAction).HandleShare: limiting '8k6dnu7x7ag0'
|
||||
[ 745.195] INFO zrok/controller/zrokEdgeSdk.DeleteServicePolicy: deleted service policy '12jWOvjIIuvYRW9vXfkRKw' for environment 'd.wJYlpt9'
|
||||
[ 745.195] INFO zrok/controller/limits.(*shareLimitAction).HandleShare: removed dial service policy for '8k6dnu7x7ag0'
|
||||
[ 750.164] INFO zrok/controller/metrics.(*influxWriter).Handle: share: 0evcupz5k410, circuit: fMNBpVi0w backend {rx: 36.4 kB, tx: 36.7 kB} frontend {rx: 36.7 kB, tx: 36.4 kB}
|
||||
[ 760.164] INFO zrok/controller/metrics.(*influxWriter).Handle: share: 0evcupz5k410, circuit: fMNBpVi0w backend {rx: 57.5 kB, tx: 58.0 kB} frontend {rx: 58.0 kB, tx: 57.5 kB}
|
||||
[ 760.178] INFO zrok/controller/limits.(*shareLimitAction).HandleShare: limiting '0evcupz5k410'
|
||||
[ 760.194] INFO zrok/controller/zrokEdgeSdk.DeleteServicePolicy: deleted service policy '4BPqQhFsGGmoBsqFDIWlWA' for environment 'd.wJYlpt9'
|
||||
[ 760.194] INFO zrok/controller/limits.(*shareLimitAction).HandleShare: removed dial service policy for '0evcupz5k410'
|
||||
[ 820.195] INFO zrok/controller/limits.(*Agent).relax: relaxing
|
||||
[ 820.200] ERROR zrok/controller/limits.(*Agent).checkShareLimit: expected 2 results; got '0' (from(bucket: "zrok")|> range(start: -5m0s)|> filter(fn: (r) => r["_measurement"] == "xfer")|> filter(fn: (r) => r["_field"] == "rx" or r["_field"] == "tx")|> filter(fn: (r) => r["namespace"] == "backend")|> filter(fn: (r) => r["share"] == "7u9szn30ikh0")|> sum())
|
||||
[ 820.201] INFO zrok/controller/limits.(*shareRelaxAction).HandleShare: relaxing '7u9szn30ikh0'
|
||||
[ 820.215] INFO zrok/controller/zrokEdgeSdk.CreateServicePolicyDial: created dial service policy '4yz1WSGg04BeARMuVkmxf7' for service '6FzYnK0RFJmT0rDSP1bzVE' for identities '[rBayMvm7UI]'
|
||||
[ 820.215] INFO zrok/controller/limits.(*shareRelaxAction).HandleShare: added dial service policy for '7u9szn30ikh0'
|
||||
[ 820.219] INFO zrok/controller/limits.(*Agent).relax: share '53z6mz4re7tu' still over limit
|
||||
[ 820.223] INFO zrok/controller/limits.(*Agent).relax: share 's0uzz1p7xjrr' still over limit
|
||||
[ 820.227] INFO zrok/controller/limits.(*Agent).relax: share 'tr7vpyrzvmh0' still over limit
|
||||
[ 820.231] INFO zrok/controller/limits.(*Agent).relax: share 'dh3f3jj7zhig' still over limit
|
||||
[ 820.236] INFO zrok/controller/limits.(*Agent).relax: share '8k6dnu7x7ag0' still over limit
|
||||
[ 820.240] INFO zrok/controller/limits.(*Agent).relax: share '0evcupz5k410' still over limit
|
||||
[ 830.164] INFO zrok/controller/metrics.(*influxWriter).Handle: share: 7u9szn30ikh0, circuit: ESgSh4i0r backend {rx: 61.6 kB, tx: 61.9 kB} frontend {rx: 61.9 kB, tx: 61.6 kB}
|
||||
[ 840.164] INFO zrok/controller/metrics.(*influxWriter).Handle: share: 7u9szn30ikh0, circuit: ESgSh4i0r backend {rx: 61.4 kB, tx: 61.7 kB} frontend {rx: 61.7 kB, tx: 61.4 kB}
|
||||
[ 850.164] INFO zrok/controller/metrics.(*influxWriter).Handle: share: 7u9szn30ikh0, circuit: ESgSh4i0r backend {rx: 30.4 kB, tx: 30.7 kB} frontend {rx: 30.7 kB, tx: 30.4 kB}
|
||||
[ 860.163] INFO zrok/controller/metrics.(*influxWriter).Handle: share: 7u9szn30ikh0, circuit: ESgSh4i0r backend {rx: 52.6 kB, tx: 53.0 kB} frontend {rx: 53.0 kB, tx: 52.6 kB}
|
||||
[ 870.163] INFO zrok/controller/metrics.(*influxWriter).Handle: share: 7u9szn30ikh0, circuit: ESgSh4i0r backend {rx: 42.2 kB, tx: 42.6 kB} frontend {rx: 42.6 kB, tx: 42.2 kB}
|
||||
[ 880.163] INFO zrok/controller/metrics.(*influxWriter).Handle: share: 7u9szn30ikh0, circuit: ESgSh4i0r backend {rx: 27.1 kB, tx: 27.4 kB} frontend {rx: 27.4 kB, tx: 27.1 kB}
|
||||
[ 890.163] INFO zrok/controller/metrics.(*influxWriter).Handle: share: 7u9szn30ikh0, circuit: ESgSh4i0r backend {rx: 49.9 kB, tx: 50.3 kB} frontend {rx: 50.3 kB, tx: 49.9 kB}
|
||||
[ 900.164] INFO zrok/controller/metrics.(*influxWriter).Handle: share: 7u9szn30ikh0, circuit: ESgSh4i0r backend {rx: 51.5 kB, tx: 51.8 kB} frontend {rx: 51.8 kB, tx: 51.5 kB}
|
||||
[ 910.163] INFO zrok/controller/metrics.(*influxWriter).Handle: share: 7u9szn30ikh0, circuit: ESgSh4i0r backend {rx: 44.0 kB, tx: 44.5 kB} frontend {rx: 44.5 kB, tx: 44.0 kB}
|
||||
[ 920.163] INFO zrok/controller/metrics.(*influxWriter).Handle: share: 7u9szn30ikh0, circuit: ESgSh4i0r backend {rx: 43.5 kB, tx: 43.8 kB} frontend {rx: 43.8 kB, tx: 43.5 kB}
|
||||
[ 930.164] INFO zrok/controller/metrics.(*influxWriter).Handle: share: 7u9szn30ikh0, circuit: ESgSh4i0r backend {rx: 61.3 kB, tx: 61.7 kB} frontend {rx: 61.7 kB, tx: 61.3 kB}
|
||||
[ 930.177] INFO zrok/controller/limits.(*shareWarningAction).HandleShare: warning '7u9szn30ikh0'
|
||||
[ 931.057] INFO zrok/controller/limits.sendLimitWarningEmail: limit warning email sent to 'michael@quigley.com'
|
||||
[ 940.163] INFO zrok/controller/metrics.(*influxWriter).Handle: share: 7u9szn30ikh0, circuit: ESgSh4i0r backend {rx: 30.2 kB, tx: 30.5 kB} frontend {rx: 30.5 kB, tx: 30.2 kB}
|
||||
[ 950.163] INFO zrok/controller/metrics.(*influxWriter).Handle: share: 7u9szn30ikh0, circuit: ESgSh4i0r backend {rx: 56.2 kB, tx: 56.6 kB} frontend {rx: 56.6 kB, tx: 56.2 kB}
|
||||
[ 960.163] INFO zrok/controller/metrics.(*influxWriter).Handle: share: 7u9szn30ikh0, circuit: ESgSh4i0r backend {rx: 73.1 kB, tx: 73.6 kB} frontend {rx: 73.6 kB, tx: 73.1 kB}
|
||||
[ 970.164] INFO zrok/controller/metrics.(*influxWriter).Handle: share: 7u9szn30ikh0, circuit: ESgSh4i0r backend {rx: 35.1 kB, tx: 35.4 kB} frontend {rx: 35.4 kB, tx: 35.1 kB}
|
||||
[ 980.163] INFO zrok/controller/metrics.(*influxWriter).Handle: share: 7u9szn30ikh0, circuit: ESgSh4i0r backend {rx: 63.6 kB, tx: 64.0 kB} frontend {rx: 64.0 kB, tx: 63.6 kB}
|
||||
[ 990.163] INFO zrok/controller/metrics.(*influxWriter).Handle: share: 7u9szn30ikh0, circuit: ESgSh4i0r backend {rx: 46.6 kB, tx: 47.0 kB} frontend {rx: 47.0 kB, tx: 46.6 kB}
|
||||
[1000.163] INFO zrok/controller/metrics.(*influxWriter).Handle: share: 7u9szn30ikh0, circuit: ESgSh4i0r backend {rx: 36.8 kB, tx: 37.3 kB} frontend {rx: 37.3 kB, tx: 36.8 kB}
|
||||
[1010.163] INFO zrok/controller/metrics.(*influxWriter).Handle: share: 7u9szn30ikh0, circuit: ESgSh4i0r backend {rx: 24.5 kB, tx: 24.9 kB} frontend {rx: 24.9 kB, tx: 24.5 kB}
|
||||
[1020.164] INFO zrok/controller/metrics.(*influxWriter).Handle: share: 7u9szn30ikh0, circuit: ESgSh4i0r backend {rx: 47.3 kB, tx: 47.7 kB} frontend {rx: 47.7 kB, tx: 47.3 kB}
|
||||
[1030.164] INFO zrok/controller/metrics.(*influxWriter).Handle: share: 7u9szn30ikh0, circuit: ESgSh4i0r backend {rx: 29.6 kB, tx: 29.9 kB} frontend {rx: 29.9 kB, tx: 29.6 kB}
|
||||
[1040.164] INFO zrok/controller/metrics.(*influxWriter).Handle: share: 7u9szn30ikh0, circuit: ESgSh4i0r backend {rx: 48.7 kB, tx: 49.1 kB} frontend {rx: 49.1 kB, tx: 48.7 kB}
|
||||
[1050.164] INFO zrok/controller/metrics.(*influxWriter).Handle: share: 7u9szn30ikh0, circuit: ESgSh4i0r backend {rx: 41.8 kB, tx: 42.0 kB} frontend {rx: 42.0 kB, tx: 41.8 kB}
|
||||
[1050.284] INFO zrok/controller/limits.(*shareLimitAction).HandleShare: limiting '7u9szn30ikh0'
|
||||
[1050.300] INFO zrok/controller/zrokEdgeSdk.DeleteServicePolicy: deleted service policy '4yz1WSGg04BeARMuVkmxf7' for environment 'd.wJYlpt9'
|
||||
[1050.300] INFO zrok/controller/limits.(*shareLimitAction).HandleShare: removed dial service policy for '7u9szn30ikh0'
|
||||
[1110.301] INFO zrok/controller/limits.(*Agent).relax: relaxing
|
||||
[1110.307] ERROR zrok/controller/limits.(*Agent).checkShareLimit: expected 2 results; got '0' (from(bucket: "zrok")|> range(start: -5m0s)|> filter(fn: (r) => r["_measurement"] == "xfer")|> filter(fn: (r) => r["_field"] == "rx" or r["_field"] == "tx")|> filter(fn: (r) => r["namespace"] == "backend")|> filter(fn: (r) => r["share"] == "53z6mz4re7tu")|> sum())
|
||||
[1110.307] INFO zrok/controller/limits.(*shareRelaxAction).HandleShare: relaxing '53z6mz4re7tu'
|
||||
[1110.321] INFO zrok/controller/zrokEdgeSdk.CreateServicePolicyDial: created dial service policy 'WxOiC60VDWvHHlbtcaJ6D' for service '2NiotGOyBHBEbFZwbTFJ2u' for identities '[rBayMvm7UI]'
|
||||
[1110.321] INFO zrok/controller/limits.(*shareRelaxAction).HandleShare: added dial service policy for '53z6mz4re7tu'
|
||||
[1110.325] ERROR zrok/controller/limits.(*Agent).checkShareLimit: expected 2 results; got '0' (from(bucket: "zrok")|> range(start: -5m0s)|> filter(fn: (r) => r["_measurement"] == "xfer")|> filter(fn: (r) => r["_field"] == "rx" or r["_field"] == "tx")|> filter(fn: (r) => r["namespace"] == "backend")|> filter(fn: (r) => r["share"] == "s0uzz1p7xjrr")|> sum())
|
||||
[1110.325] INFO zrok/controller/limits.(*shareRelaxAction).HandleShare: relaxing 's0uzz1p7xjrr'
|
||||
[1110.327] INFO zrok/controller/zrokEdgeSdk.CreateServicePolicyDial: created dial service policy '2ubWYvKo2EOnrn1U4MQ4Cu' for service 'KtK5E46HR93YIBrrwUlIN' for identities '[rBayMvm7UI]'
|
||||
[1110.327] INFO zrok/controller/limits.(*shareRelaxAction).HandleShare: added dial service policy for 's0uzz1p7xjrr'
|
||||
[1110.331] ERROR zrok/controller/limits.(*Agent).checkShareLimit: expected 2 results; got '0' (from(bucket: "zrok")|> range(start: -5m0s)|> filter(fn: (r) => r["_measurement"] == "xfer")|> filter(fn: (r) => r["_field"] == "rx" or r["_field"] == "tx")|> filter(fn: (r) => r["namespace"] == "backend")|> filter(fn: (r) => r["share"] == "tr7vpyrzvmh0")|> sum())
|
||||
[1110.331] INFO zrok/controller/limits.(*shareRelaxAction).HandleShare: relaxing 'tr7vpyrzvmh0'
|
||||
[1110.343] INFO zrok/controller/zrokEdgeSdk.CreateServicePolicyDial: created dial service policy '1Q2DMHZ9AFsBA8D2SNzC4l' for service '7jyiTZ0z2ediD5hZbxu7KH' for identities '[rBayMvm7UI]'
|
||||
[1110.343] INFO zrok/controller/limits.(*shareRelaxAction).HandleShare: added dial service policy for 'tr7vpyrzvmh0'
|
||||
[1110.348] ERROR zrok/controller/limits.(*Agent).checkShareLimit: expected 2 results; got '0' (from(bucket: "zrok")|> range(start: -5m0s)|> filter(fn: (r) => r["_measurement"] == "xfer")|> filter(fn: (r) => r["_field"] == "rx" or r["_field"] == "tx")|> filter(fn: (r) => r["namespace"] == "backend")|> filter(fn: (r) => r["share"] == "dh3f3jj7zhig")|> sum())
|
||||
[1110.349] INFO zrok/controller/limits.(*shareRelaxAction).HandleShare: relaxing 'dh3f3jj7zhig'
|
||||
[1110.351] INFO zrok/controller/zrokEdgeSdk.CreateServicePolicyDial: created dial service policy 'BrG9wKvUsajfhPkVfz44g' for service 'nyKOLlxUWWbCzD7h9Jhjq' for identities '[rBayMvm7UI]'
|
||||
[1110.351] INFO zrok/controller/limits.(*shareRelaxAction).HandleShare: added dial service policy for 'dh3f3jj7zhig'
|
||||
[1110.356] ERROR zrok/controller/limits.(*Agent).checkShareLimit: expected 2 results; got '0' (from(bucket: "zrok")|> range(start: -5m0s)|> filter(fn: (r) => r["_measurement"] == "xfer")|> filter(fn: (r) => r["_field"] == "rx" or r["_field"] == "tx")|> filter(fn: (r) => r["namespace"] == "backend")|> filter(fn: (r) => r["share"] == "8k6dnu7x7ag0")|> sum())
|
||||
[1110.356] INFO zrok/controller/limits.(*shareRelaxAction).HandleShare: relaxing '8k6dnu7x7ag0'
|
||||
[1110.364] INFO zrok/controller/zrokEdgeSdk.CreateServicePolicyDial: created dial service policy '1kbYWDgPbtk0JYjIPsRGOC' for service '2J0I9dPe2JGnY1GwjmM6n7' for identities '[rBayMvm7UI]'
|
||||
[1110.364] INFO zrok/controller/limits.(*shareRelaxAction).HandleShare: added dial service policy for '8k6dnu7x7ag0'
|
||||
[1110.372] ERROR zrok/controller/limits.(*Agent).checkShareLimit: expected 2 results; got '0' (from(bucket: "zrok")|> range(start: -5m0s)|> filter(fn: (r) => r["_measurement"] == "xfer")|> filter(fn: (r) => r["_field"] == "rx" or r["_field"] == "tx")|> filter(fn: (r) => r["namespace"] == "backend")|> filter(fn: (r) => r["share"] == "0evcupz5k410")|> sum())
|
||||
[1110.372] INFO zrok/controller/limits.(*shareRelaxAction).HandleShare: relaxing '0evcupz5k410'
|
||||
[1110.374] INFO zrok/controller/zrokEdgeSdk.CreateServicePolicyDial: created dial service policy 'E30643mY9M6vU6bQSQHa9' for service '3WHJGqUdxkDtPYLgEL5V3q' for identities '[rBayMvm7UI]'
|
||||
[1110.374] INFO zrok/controller/limits.(*shareRelaxAction).HandleShare: added dial service policy for '0evcupz5k410'
|
||||
[1110.378] INFO zrok/controller/limits.(*Agent).relax: share '7u9szn30ikh0' still over limit
|
||||
[1115.165] INFO zrok/controller/metrics.(*influxWriter).Handle: share: tr7vpyrzvmh0, circuit: gaSGp4i0r backend {rx: 3.4 kB, tx: 3.4 kB} frontend {rx: 3.4 kB, tx: 3.4 kB}
|
||||
[1120.165] INFO zrok/controller/metrics.(*influxWriter).Handle: share: tr7vpyrzvmh0, circuit: gaSGp4i0r backend {rx: 26.5 kB, tx: 26.7 kB} frontend {rx: 26.7 kB, tx: 26.5 kB}
|
||||
[1120.167] INFO zrok/controller/metrics.(*influxWriter).Handle: share: 8k6dnu7x7ag0, circuit: nESTh4iur backend {rx: 65.6 kB, tx: 66.1 kB} frontend {rx: 66.1 kB, tx: 65.6 kB}
|
||||
[1120.169] INFO zrok/controller/metrics.(*influxWriter).Handle: share: s0uzz1p7xjrr, circuit: GGiTp4S0w backend {rx: 53.5 kB, tx: 54.0 kB} frontend {rx: 54.0 kB, tx: 53.5 kB}
|
||||
[1125.164] INFO zrok/controller/metrics.(*influxWriter).Handle: share: tr7vpyrzvmh0, circuit: gaSGp4i0r backend {rx: 43.7 kB, tx: 44.1 kB} frontend {rx: 44.1 kB, tx: 43.7 kB}
|
||||
[1125.167] INFO zrok/controller/metrics.(*influxWriter).Handle: share: 53z6mz4re7tu, circuit: fSiGh4iur backend {rx: 17.8 kB, tx: 18.0 kB} frontend {rx: 18.0 kB, tx: 17.8 kB}
|
||||
[1125.169] INFO zrok/controller/metrics.(*influxWriter).Handle: share: 0evcupz5k410, circuit: k4SGhVSuw backend {rx: 51.9 kB, tx: 52.3 kB} frontend {rx: 52.3 kB, tx: 51.9 kB}
|
||||
[1130.163] INFO zrok/controller/metrics.(*influxWriter).Handle: share: tr7vpyrzvmh0, circuit: gaSGp4i0r backend {rx: 50.9 kB, tx: 51.2 kB} frontend {rx: 51.2 kB, tx: 50.9 kB}
|
||||
[1130.165] INFO zrok/controller/metrics.(*influxWriter).Handle: share: s0uzz1p7xjrr, circuit: GGiTp4S0w backend {rx: 48.6 kB, tx: 49.0 kB} frontend {rx: 49.0 kB, tx: 48.6 kB}
|
||||
[1130.166] INFO zrok/controller/metrics.(*influxWriter).Handle: share: 53z6mz4re7tu, circuit: fSiGh4iur backend {rx: 37.8 kB, tx: 38.0 kB} frontend {rx: 38.0 kB, tx: 37.8 kB}
|
||||
[1135.164] INFO zrok/controller/metrics.(*influxWriter).Handle: share: tr7vpyrzvmh0, circuit: gaSGp4i0r backend {rx: 57.6 kB, tx: 58.1 kB} frontend {rx: 58.1 kB, tx: 57.6 kB}
|
||||
[1135.166] INFO zrok/controller/metrics.(*influxWriter).Handle: share: 53z6mz4re7tu, circuit: fSiGh4iur backend {rx: 43.6 kB, tx: 44.1 kB} frontend {rx: 44.1 kB, tx: 43.6 kB}
|
||||
[1135.167] INFO zrok/controller/metrics.(*influxWriter).Handle: share: s0uzz1p7xjrr, circuit: GGiTp4S0w backend {rx: 51.0 kB, tx: 51.4 kB} frontend {rx: 51.4 kB, tx: 51.0 kB}
|
||||
[1140.165] INFO zrok/controller/metrics.(*influxWriter).Handle: share: 8k6dnu7x7ag0, circuit: nESTh4iur backend {rx: 28.4 kB, tx: 28.6 kB} frontend {rx: 28.6 kB, tx: 28.4 kB}
|
||||
[1140.167] INFO zrok/controller/metrics.(*influxWriter).Handle: share: 53z6mz4re7tu, circuit: fSiGh4iur backend {rx: 31.6 kB, tx: 32.0 kB} frontend {rx: 32.0 kB, tx: 31.6 kB}
|
||||
[1140.169] INFO zrok/controller/metrics.(*influxWriter).Handle: share: s0uzz1p7xjrr, circuit: GGiTp4S0w backend {rx: 23.8 kB, tx: 24.2 kB} frontend {rx: 24.2 kB, tx: 23.8 kB}
|
||||
[1141.514] INFO zrok/controller/zrokEdgeSdk.DeleteServiceEdgeRouterPolicy: deleted service edge router policy '2RIKOBMOckfbI2xMSLAKxC' for environment 'd.wJYlpt9'
|
||||
[1141.517] INFO zrok/controller/zrokEdgeSdk.DeleteServicePolicy: deleted service policy '1kbYWDgPbtk0JYjIPsRGOC' for environment 'd.wJYlpt9'
|
||||
[1141.519] INFO zrok/controller/zrokEdgeSdk.DeleteServicePolicy: deleted service policy '4vT5eEPahgWEVdAuKN91Sd' for environment 'd.wJYlpt9'
|
||||
[1141.521] INFO zrok/controller/zrokEdgeSdk.DeleteConfig: deleted config '5nG9jM8VNl0uBFcRRt3AvI' for 'd.wJYlpt9'
|
||||
[1141.522] INFO zrok/controller/zrokEdgeSdk.DeleteService: deleted service '2J0I9dPe2JGnY1GwjmM6n7' for environment 'd.wJYlpt9'
|
||||
[1141.599] INFO zrok/controller/zrokEdgeSdk.DeleteServiceEdgeRouterPolicy: deleted service edge router policy '2CM03d1cNpG4rma38BLzCQ' for environment 'd.wJYlpt9'
|
||||
[1141.602] INFO zrok/controller/zrokEdgeSdk.DeleteServicePolicy: deleted service policy 'WxOiC60VDWvHHlbtcaJ6D' for environment 'd.wJYlpt9'
|
||||
[1141.635] INFO zrok/controller/zrokEdgeSdk.DeleteServicePolicy: deleted service policy 'RRfDaA5kjCqUBVC9LvN1H' for environment 'd.wJYlpt9'
|
||||
[1141.639] INFO zrok/controller/zrokEdgeSdk.DeleteConfig: deleted config '6U3XDGnBjtONN5H6pUze12' for 'd.wJYlpt9'
|
||||
[1141.645] INFO zrok/controller/zrokEdgeSdk.DeleteService: deleted service '2NiotGOyBHBEbFZwbTFJ2u' for environment 'd.wJYlpt9'
|
||||
[1141.701] INFO zrok/controller/zrokEdgeSdk.DeleteServiceEdgeRouterPolicy: deleted service edge router policy '2ZnnIXSTQ3Zscha1kykqQr' for environment 'd.wJYlpt9'
|
||||
[1141.704] INFO zrok/controller/zrokEdgeSdk.DeleteServicePolicy: deleted service policy 'BrG9wKvUsajfhPkVfz44g' for environment 'd.wJYlpt9'
|
||||
[1141.706] INFO zrok/controller/zrokEdgeSdk.DeleteServicePolicy: deleted service policy '1xF4ky6cDJm63tzlNTqoLC' for environment 'd.wJYlpt9'
|
||||
[1141.707] INFO zrok/controller/zrokEdgeSdk.DeleteConfig: deleted config '76iBDASRcxOmGtdwjVHo26' for 'd.wJYlpt9'
|
||||
[1141.708] INFO zrok/controller/zrokEdgeSdk.DeleteService: deleted service 'nyKOLlxUWWbCzD7h9Jhjq' for environment 'd.wJYlpt9'
|
||||
[1141.926] INFO zrok/controller/zrokEdgeSdk.DeleteServiceEdgeRouterPolicy: deleted service edge router policy '3xAG26zA9yska3LeZQUJ3N' for environment 'd.wJYlpt9'
|
||||
[1141.927] INFO zrok/controller/zrokEdgeSdk.DeleteServicePolicy: did not find a service policy
|
||||
[1141.929] INFO zrok/controller/zrokEdgeSdk.DeleteServicePolicy: deleted service policy '1mabRt9jefSe52CJh6FmhB' for environment 'd.wJYlpt9'
|
||||
[1141.931] INFO zrok/controller/zrokEdgeSdk.DeleteConfig: deleted config '2gid15nP0GIUVuaFQ15GWV' for 'd.wJYlpt9'
|
||||
[1141.932] INFO zrok/controller/zrokEdgeSdk.DeleteService: deleted service '6FzYnK0RFJmT0rDSP1bzVE' for environment 'd.wJYlpt9'
|
||||
[1142.053] INFO zrok/controller/zrokEdgeSdk.DeleteServiceEdgeRouterPolicy: deleted service edge router policy '2nMZaiChQAPpFnblNn1ljP' for environment 'd.wJYlpt9'
|
||||
[1142.056] INFO zrok/controller/zrokEdgeSdk.DeleteServicePolicy: deleted service policy '1Q2DMHZ9AFsBA8D2SNzC4l' for environment 'd.wJYlpt9'
|
||||
[1142.058] INFO zrok/controller/zrokEdgeSdk.DeleteServicePolicy: deleted service policy '6RwWEoIsb8gBVKJfZP3ur3' for environment 'd.wJYlpt9'
|
||||
[1142.064] INFO zrok/controller/zrokEdgeSdk.DeleteConfig: deleted config '1FnBhnGNXDe58dwTpbFc1x' for 'd.wJYlpt9'
|
||||
[1142.066] INFO zrok/controller/zrokEdgeSdk.DeleteService: deleted service '7jyiTZ0z2ediD5hZbxu7KH' for environment 'd.wJYlpt9'
|
||||
[1142.320] INFO zrok/controller/zrokEdgeSdk.DeleteServiceEdgeRouterPolicy: deleted service edge router policy '2AqCUMqNtarmglOfhvnkI' for environment 'd.wJYlpt9'
|
||||
[1142.324] INFO zrok/controller/zrokEdgeSdk.DeleteServicePolicy: deleted service policy 'E30643mY9M6vU6bQSQHa9' for environment 'd.wJYlpt9'
|
||||
[1142.326] INFO zrok/controller/zrokEdgeSdk.DeleteServicePolicy: deleted service policy '4V8FsgCt63ySkG2pFWG5fz' for environment 'd.wJYlpt9'
|
||||
[1142.329] INFO zrok/controller/zrokEdgeSdk.DeleteConfig: deleted config '19cyxfHo32R6fhVsYHZ84g' for 'd.wJYlpt9'
|
||||
[1142.330] INFO zrok/controller/zrokEdgeSdk.DeleteService: deleted service '3WHJGqUdxkDtPYLgEL5V3q' for environment 'd.wJYlpt9'
|
||||
[1142.701] INFO zrok/controller/zrokEdgeSdk.DeleteServiceEdgeRouterPolicy: deleted service edge router policy '2CGCz8dcquNvZC0ZUwDZ5F' for environment 'd.wJYlpt9'
|
||||
[1142.704] INFO zrok/controller/zrokEdgeSdk.DeleteServicePolicy: deleted service policy '2ubWYvKo2EOnrn1U4MQ4Cu' for environment 'd.wJYlpt9'
|
||||
[1142.708] INFO zrok/controller/zrokEdgeSdk.DeleteServicePolicy: deleted service policy '6oohOQFEo75yl9vnIbyzdj' for environment 'd.wJYlpt9'
|
||||
[1142.709] INFO zrok/controller/zrokEdgeSdk.DeleteConfig: deleted config '4AN4sOtdQv99uHmFn3erx4' for 'd.wJYlpt9'
|
||||
[1142.710] INFO zrok/controller/zrokEdgeSdk.DeleteService: deleted service 'KtK5E46HR93YIBrrwUlIN' for environment 'd.wJYlpt9'
|
||||
```
|
@ -1,31 +0,0 @@
|
||||
`v0.4` includes a new metrics infrastructure based on OpenZiti usage, which provides `zrok` with telemetry used to power end-user intelligence about shares, and also to power usage-based limits.
|
||||
|
||||
# Configuration
|
||||
|
||||
This requires a version of OpenZiti with a `fabric` dependency of `v0.22.52` or newer.
|
||||
|
||||
## controller configuration:
|
||||
|
||||
```
|
||||
network:
|
||||
intervalAgeThreshold: 5s
|
||||
metricsReportInterval: 5s
|
||||
|
||||
events:
|
||||
jsonLogger:
|
||||
subscriptions:
|
||||
- type: fabric.usage
|
||||
version: 3
|
||||
handler:
|
||||
type: file
|
||||
format: json
|
||||
path: /tmp/fabric-usage.log
|
||||
```
|
||||
|
||||
## router configuration:
|
||||
|
||||
```
|
||||
metrics:
|
||||
reportInterval: 5s
|
||||
intervalAgeThreshold: 5s
|
||||
```
|
90
etc/ctrl.yml
90
etc/ctrl.yml
@ -9,7 +9,7 @@
|
||||
# configuration, the software will expect this field to be incremented. This protects you against invalid configuration
|
||||
# versions.
|
||||
#
|
||||
v: 2
|
||||
v: 3
|
||||
|
||||
admin:
|
||||
# The `secrets` array contains a list of strings that represent valid `ZROK_ADMIN_TOKEN` values to be used for
|
||||
@ -23,6 +23,20 @@ admin:
|
||||
#
|
||||
tou_link: '<a href="https://google.com" target="_">Terms and Conditions</a>'
|
||||
|
||||
# The `bridge` section configures the `zrok controller metrics bridge`, specifying the source and sink where OpenZiti
|
||||
# `fabric.usage` events are consumed and then sent into `zrok`. For production environments, we recommend that you use
|
||||
# the `fileSource`, tailing the events from a JSON file written to by the OpenZiti controller. The `amqpSink` will then
|
||||
# forward the events to an AMQP queue for consumption by multiple `zrok` controllers.
|
||||
#
|
||||
bridge:
|
||||
source:
|
||||
type: fileSource
|
||||
path: /tmp/fabric-usage.log
|
||||
sink:
|
||||
type: amqpSink
|
||||
url: amqp://guest:guest@localhost:5672
|
||||
queue_name: events
|
||||
|
||||
# The `endpoint` section determines where the HTTP listener that serves the API and web console will be bound.
|
||||
#
|
||||
endpoint:
|
||||
@ -38,20 +52,46 @@ email:
|
||||
password: ""
|
||||
from: ziggy@zrok.io
|
||||
|
||||
# InfluxDB configuration. InfluxDB is used to support sparkline displays in the web console.
|
||||
# Service instance limits configuration.
|
||||
#
|
||||
influx:
|
||||
url: http://127.0.0.1:8086
|
||||
bucket: zrok
|
||||
org: zrok
|
||||
token: ""
|
||||
|
||||
# Instance-wide limits for per-user limits. `-1` represents unlimited. Each user can have the `limitless` flag set on
|
||||
# their record in the `accounts` table in the database, to allow the user to ignore the instance-wide limits.
|
||||
# See `docs/guides/metrics-and-limits/configuring-limits.md` for details.
|
||||
#
|
||||
limits:
|
||||
environments: -1
|
||||
shares: -1
|
||||
environments: -1
|
||||
shares: -1
|
||||
bandwidth:
|
||||
per_account:
|
||||
period: 5m
|
||||
warning:
|
||||
rx: -1
|
||||
tx: -1
|
||||
total: 7242880
|
||||
limit:
|
||||
rx: -1
|
||||
tx: -1
|
||||
total: 10485760
|
||||
per_environment:
|
||||
period: 5m
|
||||
warning:
|
||||
rx: -1
|
||||
tx: -1
|
||||
total: -1
|
||||
limit:
|
||||
rx: -1
|
||||
tx: -1
|
||||
total: -1
|
||||
per_share:
|
||||
period: 5m
|
||||
warning:
|
||||
rx: -1
|
||||
tx: -1
|
||||
total: -1
|
||||
limit:
|
||||
rx: -1
|
||||
tx: -1
|
||||
total: -1
|
||||
enforcing: false
|
||||
cycle: 5m
|
||||
|
||||
# Background maintenance job configuration. The `registration` job purges registration requests created through the
|
||||
# `zrok invite` tool. The `reset_password` job purges password reset requests.
|
||||
@ -66,17 +106,35 @@ maintenance:
|
||||
check_frequency: 15m
|
||||
batch_limit: 500
|
||||
|
||||
# The name of the service used to report metrics from the frontends (`zrok access public`) to the zrok controller
|
||||
# fleet.
|
||||
# Metrics configuration.
|
||||
#
|
||||
metrics:
|
||||
service_name: metrics
|
||||
agent:
|
||||
# The `source` controls where the `zrok controller` looks to consume OpenZiti `fabric.usage` events. This works in
|
||||
# concert with the `bridge` section above to consume events from an AMQP queue. This can also be configured to work
|
||||
# with a `fileSource` (see the `bridge` section above for details), and also with a `websocketSource`.
|
||||
#
|
||||
source:
|
||||
type: amqpSource
|
||||
url: amqp://guest:guest@localhost:5672
|
||||
queue_name: events
|
||||
#
|
||||
# The `influx` section configures access to the InfluxDB instance used to store `zrok` metrics.
|
||||
#
|
||||
influx:
|
||||
url: "http://127.0.0.1:8086"
|
||||
bucket: zrok
|
||||
org: zrok
|
||||
token: "<INFLUX TOKEN>"
|
||||
|
||||
# Configure the generated URL for the registration email. The registration token will be appended to this URL.
|
||||
#
|
||||
registration:
|
||||
registration_url_template: https://zrok.server.com/register
|
||||
token_strategy: store
|
||||
#
|
||||
# Set `token_strategy` to `store` to require an invite token.
|
||||
#
|
||||
#token_strategy: store
|
||||
|
||||
# Configure the generated URL for password resets. The reset token will be appended to this URL.
|
||||
#
|
||||
|
@ -1,20 +0,0 @@
|
||||
# file source
|
||||
#
|
||||
source:
|
||||
type: file
|
||||
path: /tmp/fabric-usage.log
|
||||
|
||||
# websocket source
|
||||
#
|
||||
#source:
|
||||
# type: websocket
|
||||
# websocket_endpoint: wss://127.0.0.1:1280/fabric/v1/ws-api
|
||||
# api_endpoint: https://127.0.0.1:1280
|
||||
# username: admin
|
||||
# password: ""
|
||||
|
||||
influx:
|
||||
url: "http://127.0.0.1:8086"
|
||||
bucket: zrok
|
||||
org: zrok
|
||||
token: ""
|
2
go.mod
2
go.mod
@ -32,6 +32,7 @@ require (
|
||||
github.com/openziti/identity v1.0.37
|
||||
github.com/openziti/sdk-golang v0.18.61
|
||||
github.com/pkg/errors v0.9.1
|
||||
github.com/rabbitmq/amqp091-go v1.7.0
|
||||
github.com/rubenv/sql-migrate v1.1.2
|
||||
github.com/shirou/gopsutil/v3 v3.23.2
|
||||
github.com/sirupsen/logrus v1.9.0
|
||||
@ -41,7 +42,6 @@ require (
|
||||
golang.org/x/crypto v0.6.0
|
||||
golang.org/x/net v0.8.0
|
||||
golang.org/x/time v0.3.0
|
||||
gopkg.in/mgo.v2 v2.0.0-20190816093944-a6b53ec6cb22
|
||||
nhooyr.io/websocket v1.8.7
|
||||
)
|
||||
|
||||
|
7
go.sum
7
go.sum
@ -525,6 +525,8 @@ github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c/go.mod h1:Om
|
||||
github.com/poy/onpar v0.0.0-20190519213022-ee068f8ea4d1 h1:oL4IBbcqwhhNWh31bjOX8C/OCy0zs9906d/VUru+bqg=
|
||||
github.com/poy/onpar v0.0.0-20190519213022-ee068f8ea4d1/go.mod h1:nSbFQvMj97ZyhFRSJYtut+msi4sOY6zJDGCdSc+/rZU=
|
||||
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/rabbitmq/amqp091-go v1.7.0 h1:V5CF5qPem5OGSnEo8BoSbsDGwejg6VUJsKEdneaoTUo=
|
||||
github.com/rabbitmq/amqp091-go v1.7.0/go.mod h1:wfClAtY0C7bOHxd3GjmF26jEHn+rR/0B3+YV+Vn9/NI=
|
||||
github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 h1:N/ElC8H3+5XpJzTSTfLsJV/mx9Q9g7kxmchpfZyxgzM=
|
||||
github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
|
||||
github.com/rivo/uniseg v0.1.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
|
||||
@ -638,6 +640,8 @@ go.opentelemetry.io/otel/sdk v1.11.1 h1:F7KmQgoHljhUuJyA+9BiU+EkJfyX5nVVF4wyzWZp
|
||||
go.opentelemetry.io/otel/trace v1.11.1 h1:ofxdnzsNrGBYXbP7t7zpUK281+go5rF7dvdIZXF8gdQ=
|
||||
go.opentelemetry.io/otel/trace v1.11.1/go.mod h1:f/Q9G7vzk5u91PhbmKbg1Qn0rzH1LJ4vbPHFGkTPtOk=
|
||||
go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
|
||||
go.uber.org/goleak v1.2.0 h1:xqgm/S+aQvhWFTtR0XK3Jvg7z8kGV8P4X14IzwN3Eqk=
|
||||
go.uber.org/goleak v1.2.0/go.mod h1:XJYK+MuIchqpmGmUSAzotztawfKvYLUIgg7guXrwVUo=
|
||||
go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU=
|
||||
go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo=
|
||||
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
@ -913,6 +917,7 @@ golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4f
|
||||
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0=
|
||||
golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
|
||||
golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
|
||||
golang.org/x/tools v0.1.7/go.mod h1:LGqMHiF4EqQNHR1JncWGqT5BVaXmza+X+BDGol+dOxo=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
@ -1031,8 +1036,6 @@ gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EV
|
||||
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
|
||||
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
|
||||
gopkg.in/ini.v1 v1.62.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
|
||||
gopkg.in/mgo.v2 v2.0.0-20190816093944-a6b53ec6cb22 h1:VpOs+IwYnYBaFnrNAeB8UUWtL3vEUnzSCL1nVjPhqrw=
|
||||
gopkg.in/mgo.v2 v2.0.0-20190816093944-a6b53ec6cb22/go.mod h1:yeKp02qBN3iKW1OzL3MGk2IdtZzaj7SFntXj72NppTA=
|
||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
|
||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
|
||||
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
|
1186
ui/package-lock.json
generated
1186
ui/package-lock.json
generated
File diff suppressed because it is too large
Load Diff
@ -16,12 +16,12 @@
|
||||
"react-bootstrap": "^2.7.0",
|
||||
"react-data-table-component": "^7.5.2",
|
||||
"react-dom": "^18.2.0",
|
||||
"react-flow-renderer": "^10.3.12",
|
||||
"react-force-graph": "^1.41.20",
|
||||
"react-router-dom": "^6.4.0",
|
||||
"react-sizeme": "^3.0.2",
|
||||
"react-sparklines": "^1.7.0",
|
||||
"styled-components": "^5.3.5"
|
||||
"styled-components": "^5.3.5",
|
||||
"svgo": "^3.0.2"
|
||||
},
|
||||
"devDependencies": {
|
||||
"react-scripts": "^5.0.1"
|
||||
|
Loading…
x
Reference in New Issue
Block a user