Merge branch 'main' of github.com:openziti/zrok into add-changelog-reminder

This commit is contained in:
Kenneth Bingham 2024-06-26 18:17:59 -04:00
commit df307a5e7a
No known key found for this signature in database
GPG Key ID: 31709281860130B6
82 changed files with 2479 additions and 2007 deletions

View File

@ -1,11 +1,54 @@
# CHANGELOG # CHANGELOG
## v0.4.34
FIX: Fix for mixing limited and unlimited (-1) resource counts in the limits system (https://github.com/openziti/zrok/issues/680)
CHANGE: add changelog reminder bot
## v0.4.33
FIX: Fix for log message in `Agent.CanAccessShare` (`"account '#%d' over frontends per share limit '%d'"`), which was not returning the correct limit value.
FIX: Properly set `permission_mode` in `frontends` when createing a private frontend using `zrok access private` (https://github.com/openziti/zrok/issues/677)
CHANGE: Updated `react-bootstrap` to version `2.10.2` (web console).
CHANGE: Updated `@mui/material` to version `5.15.18` (web console).
CHANGE: Updated `react` and `react-dom` to version `18.3.1` (web console).
CHANGE: Updated `recharts` to version `2.12.7` (web console).
CHANGE: Updated `react-router-dom` to version `6.23.1` (web console).
CHANGE: Updated `axios` to version `1.7.2` for (node SDK).
CHANGE: Updated `@openziti/ziti-sdk-nodejs` to version `0.17.0` (node SDK).
## v0.4.32
FEATURE: New permission mode support for public frontends. Open permission mode frontends are available to all users in the service instance. Closed permission mode frontends reference the new `frontend_grants` table that can be used to control which accounts are allowed to create shares using that frontend. `zrok admin create frontend` now supports `--closed` flag to create closed permission mode frontends (https://github.com/openziti/zrok/issues/539)
FEATURE: New config `defaultFrontend` that specifies the default frontend to be used for an environment. Provides the default `--frontend` for `zrok share public` and `zrok reserve public` (https://github.com/openziti/zrok/issues/663)
FEATURE: Resource count limits now include `share_frontends` to limit the number of frontends that are allowed to make connections to a share (https://github.com/openziti/zrok/issues/650)
CHANGE: The frontend selection flag used by `zrok share public` and `zrok reserve public` has been changed from `--frontends` to `--frontend`
FIX: use controller config spec v4 in the Docker instance
## v0.4.31 ## v0.4.31
FIX: Correct the syntax for the Docker and Linux zrok-share "frontdoor" service that broke OAuth email address pattern matching FEATURE: New "limits classes" limits implementation (https://github.com/openziti/zrok/issues/606). This new feature allows for extensive limits customization on a per-user basis, with fallback to the global defaults in the controller configuration.
CHANGE: log messages that said "backend proxy endpoint" were clarified to say "backend target" CHANGE: The controller configuration version has been updated to version `4` (`v: 4`) to support the new limits global configuration changes (https://github.com/openziti/zrok/issues/606).
CHANGE: add changelog reminder bot
CHANGE: A new `ZROK_CTRL_CONFIG_VERSION` environment variable now exists to temporarily force the controller to assume a specific controller configuration version, regardless of what version exists in the file. This allows two different config versions to potentially be co-mingled in the same controller configuration file. Use with care (https://github.com/openziti/zrok/issues/648)
CHANGE: Log messages that said `backend proxy endpoint` were clarified to say `backend target`.
FIX: Correct the syntax for the Docker and Linux zrok-share "frontdoor" service that broke OAuth email address pattern matching.
## v0.4.30 ## v0.4.30

View File

@ -4,6 +4,7 @@ import (
"github.com/openziti/zrok/environment" "github.com/openziti/zrok/environment"
"github.com/openziti/zrok/rest_client_zrok/admin" "github.com/openziti/zrok/rest_client_zrok/admin"
"github.com/openziti/zrok/rest_model_zrok" "github.com/openziti/zrok/rest_model_zrok"
"github.com/openziti/zrok/sdk/golang/sdk"
"github.com/openziti/zrok/tui" "github.com/openziti/zrok/tui"
"github.com/sirupsen/logrus" "github.com/sirupsen/logrus"
"github.com/spf13/cobra" "github.com/spf13/cobra"
@ -15,7 +16,8 @@ func init() {
} }
type adminCreateFrontendCommand struct { type adminCreateFrontendCommand struct {
cmd *cobra.Command cmd *cobra.Command
closed bool
} }
func newAdminCreateFrontendCommand() *adminCreateFrontendCommand { func newAdminCreateFrontendCommand() *adminCreateFrontendCommand {
@ -25,6 +27,7 @@ func newAdminCreateFrontendCommand() *adminCreateFrontendCommand {
Args: cobra.ExactArgs(3), Args: cobra.ExactArgs(3),
} }
command := &adminCreateFrontendCommand{cmd: cmd} command := &adminCreateFrontendCommand{cmd: cmd}
cmd.Flags().BoolVar(&command.closed, "closed", false, "Enabled closed permission mode")
cmd.Run = command.run cmd.Run = command.run
return command return command
} }
@ -44,11 +47,16 @@ func (cmd *adminCreateFrontendCommand) run(_ *cobra.Command, args []string) {
panic(err) panic(err)
} }
permissionMode := sdk.OpenPermissionMode
if cmd.closed {
permissionMode = sdk.ClosedPermissionMode
}
req := admin.NewCreateFrontendParams() req := admin.NewCreateFrontendParams()
req.Body = &rest_model_zrok.CreateFrontendRequest{ req.Body = &rest_model_zrok.CreateFrontendRequest{
ZID: zId, ZID: zId,
PublicName: publicName, PublicName: publicName,
URLTemplate: urlTemplate, URLTemplate: urlTemplate,
PermissionMode: string(permissionMode),
} }
resp, err := zrok.Admin.CreateFrontend(req, mustGetAdminAuth()) resp, err := zrok.Admin.CreateFrontend(req, mustGetAdminAuth())

View File

@ -40,6 +40,12 @@ func (cmd *configGetCommand) run(_ *cobra.Command, args []string) {
} else { } else {
fmt.Println("apiEndpoint = <unset>") fmt.Println("apiEndpoint = <unset>")
} }
case "defaultFrontend":
if env.Config() != nil && env.Config().DefaultFrontend != "" {
fmt.Printf("defaultFrontend = %v\n", env.Config().DefaultFrontend)
} else {
fmt.Println("defaultFrontend = <unset>")
}
default: default:
fmt.Printf("unknown config name '%v'\n", configName) fmt.Printf("unknown config name '%v'\n", configName)
} }

View File

@ -63,6 +63,20 @@ func (cmd *configSetCommand) run(_ *cobra.Command, args []string) {
fmt.Printf("\n[%v]: because you have a %v-d environment, you won't see your config change until you run %v first!\n\n", tui.WarningLabel, tui.Code.Render("zrok enable"), tui.Code.Render("zrok disable")) fmt.Printf("\n[%v]: because you have a %v-d environment, you won't see your config change until you run %v first!\n\n", tui.WarningLabel, tui.Code.Render("zrok enable"), tui.Code.Render("zrok disable"))
} }
case "defaultFrontend":
if env.Config() == nil {
if err := env.SetConfig(&env_core.Config{DefaultFrontend: value}); err != nil {
tui.Error("unable to save config", err)
}
} else {
cfg := env.Config()
cfg.DefaultFrontend = value
if err := env.SetConfig(cfg); err != nil {
tui.Error("unable to save config", err)
}
}
fmt.Println("zrok configuration updated")
default: default:
fmt.Printf("unknown config name '%v'\n", configName) fmt.Printf("unknown config name '%v'\n", configName)
os.Exit(1) os.Exit(1)

View File

@ -3,7 +3,6 @@ package main
import ( import (
"fmt" "fmt"
"github.com/openziti/zrok/environment" "github.com/openziti/zrok/environment"
"github.com/openziti/zrok/environment/env_core"
"github.com/openziti/zrok/tui" "github.com/openziti/zrok/tui"
"github.com/spf13/cobra" "github.com/spf13/cobra"
"os" "os"
@ -36,18 +35,25 @@ func (cmd *configUnsetCommand) run(_ *cobra.Command, args []string) {
panic(err) panic(err)
} }
switch configName { if env.Config() != nil {
case "apiEndpoint": cfg := env.Config()
if err := env.SetConfig(&env_core.Config{}); err != nil { switch configName {
case "apiEndpoint":
cfg.ApiEndpoint = ""
if env.IsEnabled() {
fmt.Printf("\n[%v]: because you have a %v-d environment, you won't see your config change until you run %v first!\n\n", tui.WarningLabel, tui.Code.Render("zrok enable"), tui.Code.Render("zrok disable"))
}
case "defaultFrontend":
cfg.DefaultFrontend = ""
default:
fmt.Printf("unknown config name '%v'\n", configName)
os.Exit(1)
}
if err := env.SetConfig(cfg); err != nil {
tui.Error("unable to save config", err) tui.Error("unable to save config", err)
} }
fmt.Println("zrok configuration updated") fmt.Println("zrok configuration updated")
if env.IsEnabled() {
fmt.Printf("\n[%v]: because you have a %v-d environment, you won't see your config change until you run %v first!\n\n", tui.WarningLabel, tui.Code.Render("zrok enable"), tui.Code.Render("zrok disable"))
}
default:
fmt.Printf("unknown config name '%v'\n", configName)
os.Exit(1)
} }
} }

View File

@ -40,8 +40,13 @@ func newReserveCommand() *reserveCommand {
Args: cobra.RangeArgs(1, 2), Args: cobra.RangeArgs(1, 2),
} }
command := &reserveCommand{cmd: cmd} command := &reserveCommand{cmd: cmd}
defaultFrontends := []string{"public"}
if root, err := environment.LoadRoot(); err == nil {
defaultFrontend, _ := root.DefaultFrontend()
defaultFrontends = []string{defaultFrontend}
}
cmd.Flags().StringVarP(&command.uniqueName, "unique-name", "n", "", "A unique name for the reserved share (defaults to generated identifier)") cmd.Flags().StringVarP(&command.uniqueName, "unique-name", "n", "", "A unique name for the reserved share (defaults to generated identifier)")
cmd.Flags().StringArrayVar(&command.frontendSelection, "frontends", []string{"public"}, "Selected frontends to use for the share") cmd.Flags().StringArrayVar(&command.frontendSelection, "frontend", defaultFrontends, "Selected frontends to use for the share")
cmd.Flags().StringVarP(&command.backendMode, "backend-mode", "b", "proxy", "The backend mode (public|private: proxy, web, caddy, drive) (private: tcpTunnel, udpTunnel, socks, vpn)") cmd.Flags().StringVarP(&command.backendMode, "backend-mode", "b", "proxy", "The backend mode (public|private: proxy, web, caddy, drive) (private: tcpTunnel, udpTunnel, socks, vpn)")
cmd.Flags().BoolVarP(&command.jsonOutput, "json-output", "j", false, "Emit JSON describing the created reserved share") cmd.Flags().BoolVarP(&command.jsonOutput, "json-output", "j", false, "Emit JSON describing the created reserved share")
cmd.Flags().StringArrayVar(&command.basicAuth, "basic-auth", []string{}, "Basic authentication users (<username:password>,...)") cmd.Flags().StringArrayVar(&command.basicAuth, "basic-auth", []string{}, "Basic authentication users (<username:password>,...)")

View File

@ -45,7 +45,12 @@ func newSharePublicCommand() *sharePublicCommand {
Args: cobra.ExactArgs(1), Args: cobra.ExactArgs(1),
} }
command := &sharePublicCommand{cmd: cmd} command := &sharePublicCommand{cmd: cmd}
cmd.Flags().StringArrayVar(&command.frontendSelection, "frontends", []string{"public"}, "Selected frontends to use for the share") defaultFrontends := []string{"public"}
if root, err := environment.LoadRoot(); err == nil {
defaultFrontend, _ := root.DefaultFrontend()
defaultFrontends = []string{defaultFrontend}
}
cmd.Flags().StringArrayVar(&command.frontendSelection, "frontend", defaultFrontends, "Selected frontends to use for the share")
cmd.Flags().StringVarP(&command.backendMode, "backend-mode", "b", "proxy", "The backend mode {proxy, web, caddy, drive}") cmd.Flags().StringVarP(&command.backendMode, "backend-mode", "b", "proxy", "The backend mode {proxy, web, caddy, drive}")
cmd.Flags().BoolVar(&command.headless, "headless", false, "Disable TUI and run headless") cmd.Flags().BoolVar(&command.headless, "headless", false, "Disable TUI and run headless")
cmd.Flags().BoolVar(&command.insecure, "insecure", false, "Enable insecure TLS certificate validation for <target>") cmd.Flags().BoolVar(&command.insecure, "insecure", false, "Enable insecure TLS certificate validation for <target>")

View File

@ -48,8 +48,10 @@ func (cmd *statusCommand) run(_ *cobra.Command, _ []string) {
t.SetOutputMirror(os.Stdout) t.SetOutputMirror(os.Stdout)
t.SetStyle(table.StyleColoredDark) t.SetStyle(table.StyleColoredDark)
t.AppendHeader(table.Row{"Config", "Value", "Source"}) t.AppendHeader(table.Row{"Config", "Value", "Source"})
apiEndpoint, from := env.ApiEndpoint() apiEndpoint, apiEndpointFrom := env.ApiEndpoint()
t.AppendRow(table.Row{"apiEndpoint", apiEndpoint, from}) t.AppendRow(table.Row{"apiEndpoint", apiEndpoint, apiEndpointFrom})
defaultFrontend, defaultFrontendFrom := env.DefaultFrontend()
t.AppendRow(table.Row{"defaultFrontend", defaultFrontend, defaultFrontendFrom})
t.Render() t.Render()
_, _ = fmt.Fprintf(os.Stderr, "\n") _, _ = fmt.Fprintf(os.Stderr, "\n")

View File

@ -81,7 +81,7 @@ func (h *accessHandler) Handle(params share.AccessParams, principal *rest_model_
return share.NewAccessInternalServerError() return share.NewAccessInternalServerError()
} }
if _, err := str.CreateFrontend(envId, &store.Frontend{PrivateShareId: &shr.Id, Token: feToken, ZId: envZId}, trx); err != nil { if _, err := str.CreateFrontend(envId, &store.Frontend{PrivateShareId: &shr.Id, Token: feToken, ZId: envZId, PermissionMode: store.ClosedPermissionMode}, trx); err != nil {
logrus.Errorf("error creating frontend record for user '%v': %v", principal.Email, err) logrus.Errorf("error creating frontend record for user '%v': %v", principal.Email, err)
return share.NewAccessInternalServerError() return share.NewAccessInternalServerError()
} }

View File

@ -1,6 +1,8 @@
package config package config
import ( import (
"os"
"strconv"
"time" "time"
"github.com/openziti/zrok/controller/emailUi" "github.com/openziti/zrok/controller/emailUi"
@ -14,7 +16,7 @@ import (
"github.com/pkg/errors" "github.com/pkg/errors"
) )
const ConfigVersion = 3 const ConfigVersion = 4
type Config struct { type Config struct {
V int V int
@ -119,8 +121,22 @@ func LoadConfig(path string) (*Config, error) {
if err := cf.BindYaml(cfg, path, env.GetCfOptions()); err != nil { if err := cf.BindYaml(cfg, path, env.GetCfOptions()); err != nil {
return nil, errors.Wrapf(err, "error loading controller config '%v'", path) return nil, errors.Wrapf(err, "error loading controller config '%v'", path)
} }
if cfg.V != ConfigVersion { if !envVersionOk() && cfg.V != ConfigVersion {
return nil, errors.Errorf("expecting configuration version '%v', your configuration is version '%v'; please see zrok.io for changelog and configuration documentation", ConfigVersion, cfg.V) return nil, errors.Errorf("expecting configuration version '%v', your configuration is version '%v'; please see zrok.io for changelog and configuration documentation", ConfigVersion, cfg.V)
} }
return cfg, nil return cfg, nil
} }
func envVersionOk() bool {
vStr := os.Getenv("ZROK_CTRL_CONFIG_VERSION")
if vStr != "" {
envV, err := strconv.Atoi(vStr)
if err != nil {
return false
}
if envV == ConfigVersion {
return true
}
}
return false
}

View File

@ -2,7 +2,6 @@ package controller
import ( import (
"errors" "errors"
"github.com/go-openapi/runtime/middleware" "github.com/go-openapi/runtime/middleware"
"github.com/lib/pq" "github.com/lib/pq"
"github.com/mattn/go-sqlite3" "github.com/mattn/go-sqlite3"
@ -57,11 +56,12 @@ func (h *createFrontendHandler) Handle(params admin.CreateFrontendParams, princi
} }
fe := &store.Frontend{ fe := &store.Frontend{
Token: feToken, Token: feToken,
ZId: params.Body.ZID, ZId: params.Body.ZID,
PublicName: &params.Body.PublicName, PublicName: &params.Body.PublicName,
UrlTemplate: &params.Body.URLTemplate, UrlTemplate: &params.Body.URLTemplate,
Reserved: true, Reserved: true,
PermissionMode: store.PermissionMode(params.Body.PermissionMode),
} }
if _, err := str.CreateGlobalFrontend(fe, tx); err != nil { if _, err := str.CreateGlobalFrontend(fe, tx); err != nil {
perr := &pq.Error{} perr := &pq.Error{}

View File

@ -1,48 +0,0 @@
package limits
import (
"github.com/jmoiron/sqlx"
"github.com/openziti/zrok/controller/store"
"github.com/openziti/zrok/controller/zrokEdgeSdk"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
type accountLimitAction struct {
str *store.Store
zCfg *zrokEdgeSdk.Config
}
func newAccountLimitAction(str *store.Store, zCfg *zrokEdgeSdk.Config) *accountLimitAction {
return &accountLimitAction{str, zCfg}
}
func (a *accountLimitAction) HandleAccount(acct *store.Account, _, _ int64, _ *BandwidthPerPeriod, trx *sqlx.Tx) error {
logrus.Infof("limiting '%v'", acct.Email)
envs, err := a.str.FindEnvironmentsForAccount(acct.Id, trx)
if err != nil {
return errors.Wrapf(err, "error finding environments for account '%v'", acct.Email)
}
edge, err := zrokEdgeSdk.Client(a.zCfg)
if err != nil {
return err
}
for _, env := range envs {
shrs, err := a.str.FindSharesForEnvironment(env.Id, trx)
if err != nil {
return errors.Wrapf(err, "error finding shares for environment '%v'", env.ZId)
}
for _, shr := range shrs {
if err := zrokEdgeSdk.DeleteServicePoliciesDial(env.ZId, shr.Token, edge); err != nil {
return errors.Wrapf(err, "error deleting dial service policy for '%v'", shr.Token)
}
logrus.Infof("removed dial service policy for share '%v' of environment '%v'", shr.Token, env.ZId)
}
}
return nil
}

View File

@ -1,55 +0,0 @@
package limits
import (
"github.com/jmoiron/sqlx"
"github.com/openziti/zrok/controller/store"
"github.com/openziti/zrok/controller/zrokEdgeSdk"
"github.com/openziti/zrok/sdk/golang/sdk"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
type accountRelaxAction struct {
str *store.Store
zCfg *zrokEdgeSdk.Config
}
func newAccountRelaxAction(str *store.Store, zCfg *zrokEdgeSdk.Config) *accountRelaxAction {
return &accountRelaxAction{str, zCfg}
}
func (a *accountRelaxAction) HandleAccount(acct *store.Account, _, _ int64, _ *BandwidthPerPeriod, trx *sqlx.Tx) error {
logrus.Infof("relaxing '%v'", acct.Email)
envs, err := a.str.FindEnvironmentsForAccount(acct.Id, trx)
if err != nil {
return errors.Wrapf(err, "error finding environments for account '%v'", acct.Email)
}
edge, err := zrokEdgeSdk.Client(a.zCfg)
if err != nil {
return err
}
for _, env := range envs {
shrs, err := a.str.FindSharesForEnvironment(env.Id, trx)
if err != nil {
return errors.Wrapf(err, "error finding shares for environment '%v'", env.ZId)
}
for _, shr := range shrs {
switch shr.ShareMode {
case string(sdk.PublicShareMode):
if err := relaxPublicShare(a.str, edge, shr, trx); err != nil {
return errors.Wrap(err, "error relaxing public share")
}
case string(sdk.PrivateShareMode):
if err := relaxPrivateShare(a.str, edge, shr, trx); err != nil {
return errors.Wrap(err, "error relaxing private share")
}
}
}
}
return nil
}

View File

@ -1,12 +1,12 @@
package limits package limits
import ( import (
"fmt"
"github.com/jmoiron/sqlx" "github.com/jmoiron/sqlx"
"github.com/openziti/zrok/controller/emailUi" "github.com/openziti/zrok/controller/emailUi"
"github.com/openziti/zrok/controller/metrics" "github.com/openziti/zrok/controller/metrics"
"github.com/openziti/zrok/controller/store" "github.com/openziti/zrok/controller/store"
"github.com/openziti/zrok/controller/zrokEdgeSdk" "github.com/openziti/zrok/controller/zrokEdgeSdk"
"github.com/openziti/zrok/sdk/golang/sdk"
"github.com/openziti/zrok/util" "github.com/openziti/zrok/util"
"github.com/pkg/errors" "github.com/pkg/errors"
"github.com/sirupsen/logrus" "github.com/sirupsen/logrus"
@ -15,42 +15,30 @@ import (
) )
type Agent struct { type Agent struct {
cfg *Config cfg *Config
ifx *influxReader ifx *influxReader
zCfg *zrokEdgeSdk.Config zCfg *zrokEdgeSdk.Config
str *store.Store str *store.Store
queue chan *metrics.Usage queue chan *metrics.Usage
acctWarningActions []AccountAction warningActions []AccountAction
acctLimitActions []AccountAction limitActions []AccountAction
acctRelaxActions []AccountAction relaxActions []AccountAction
envWarningActions []EnvironmentAction close chan struct{}
envLimitActions []EnvironmentAction join chan struct{}
envRelaxActions []EnvironmentAction
shrWarningActions []ShareAction
shrLimitActions []ShareAction
shrRelaxActions []ShareAction
close chan struct{}
join chan struct{}
} }
func NewAgent(cfg *Config, ifxCfg *metrics.InfluxConfig, zCfg *zrokEdgeSdk.Config, emailCfg *emailUi.Config, str *store.Store) (*Agent, error) { func NewAgent(cfg *Config, ifxCfg *metrics.InfluxConfig, zCfg *zrokEdgeSdk.Config, emailCfg *emailUi.Config, str *store.Store) (*Agent, error) {
a := &Agent{ a := &Agent{
cfg: cfg, cfg: cfg,
ifx: newInfluxReader(ifxCfg), ifx: newInfluxReader(ifxCfg),
zCfg: zCfg, zCfg: zCfg,
str: str, str: str,
queue: make(chan *metrics.Usage, 1024), queue: make(chan *metrics.Usage, 1024),
acctWarningActions: []AccountAction{newAccountWarningAction(emailCfg, str)}, warningActions: []AccountAction{newWarningAction(emailCfg, str)},
acctLimitActions: []AccountAction{newAccountLimitAction(str, zCfg)}, limitActions: []AccountAction{newLimitAction(str, zCfg)},
acctRelaxActions: []AccountAction{newAccountRelaxAction(str, zCfg)}, relaxActions: []AccountAction{newRelaxAction(str, zCfg)},
envWarningActions: []EnvironmentAction{newEnvironmentWarningAction(emailCfg, str)}, close: make(chan struct{}),
envLimitActions: []EnvironmentAction{newEnvironmentLimitAction(str, zCfg)}, join: make(chan struct{}),
envRelaxActions: []EnvironmentAction{newEnvironmentRelaxAction(str, zCfg)},
shrWarningActions: []ShareAction{newShareWarningAction(emailCfg, str)},
shrLimitActions: []ShareAction{newShareLimitAction(str, zCfg)},
shrRelaxActions: []ShareAction{newShareRelaxAction(str, zCfg)},
close: make(chan struct{}),
join: make(chan struct{}),
} }
return a, nil return a, nil
} }
@ -66,19 +54,16 @@ func (a *Agent) Stop() {
func (a *Agent) CanCreateEnvironment(acctId int, trx *sqlx.Tx) (bool, error) { func (a *Agent) CanCreateEnvironment(acctId int, trx *sqlx.Tx) (bool, error) {
if a.cfg.Enforcing { if a.cfg.Enforcing {
if empty, err := a.str.IsAccountLimitJournalEmpty(acctId, trx); err == nil && !empty { if err := a.str.LimitCheckLock(acctId, trx); err != nil {
alj, err := a.str.FindLatestAccountLimitJournal(acctId, trx)
if err != nil {
return false, err
}
if alj.Action == store.LimitAction {
return false, nil
}
} else if err != nil {
return false, err return false, err
} }
if a.cfg.Environments > Unlimited { ul, err := a.getUserLimits(acctId, trx)
if err != nil {
return false, err
}
if ul.resource.GetEnvironments() > store.Unlimited {
envs, err := a.str.FindEnvironmentsForAccount(acctId, trx) envs, err := a.str.FindEnvironmentsForAccount(acctId, trx)
if err != nil { if err != nil {
return false, err return false, err
@ -88,51 +73,79 @@ func (a *Agent) CanCreateEnvironment(acctId int, trx *sqlx.Tx) (bool, error) {
} }
} }
} }
return true, nil return true, nil
} }
func (a *Agent) CanCreateShare(acctId, envId int, trx *sqlx.Tx) (bool, error) { func (a *Agent) CanCreateShare(acctId, envId int, reserved, uniqueName bool, _ sdk.ShareMode, backendMode sdk.BackendMode, trx *sqlx.Tx) (bool, error) {
if a.cfg.Enforcing { if a.cfg.Enforcing {
if empty, err := a.str.IsAccountLimitJournalEmpty(acctId, trx); err == nil && !empty { if err := a.str.LimitCheckLock(acctId, trx); err != nil {
alj, err := a.str.FindLatestAccountLimitJournal(acctId, trx)
if err != nil {
return false, err
}
if alj.Action == store.LimitAction {
return false, nil
}
} else if err != nil {
return false, err return false, err
} }
if empty, err := a.str.IsEnvironmentLimitJournalEmpty(envId, trx); err == nil && !empty { ul, err := a.getUserLimits(acctId, trx)
elj, err := a.str.FindLatestEnvironmentLimitJournal(envId, trx) if err != nil {
if err != nil {
return false, err
}
if elj.Action == store.LimitAction {
return false, nil
}
} else if err != nil {
return false, err return false, err
} }
if a.cfg.Shares > Unlimited { if scopedBwc, found := ul.scopes[backendMode]; found {
latestScopedJe, err := a.isBandwidthClassLimitedForAccount(acctId, scopedBwc, trx)
if err != nil {
return false, err
}
if latestScopedJe != nil {
return false, nil
}
} else {
for _, bwc := range ul.bandwidth {
latestJe, err := a.isBandwidthClassLimitedForAccount(acctId, bwc, trx)
if err != nil {
return false, err
}
if latestJe != nil {
return false, nil
}
}
}
rc := ul.resource
if scopeRc, found := ul.scopes[backendMode]; found {
rc = scopeRc
}
if rc.GetShares() > store.Unlimited || (reserved && rc.GetReservedShares() > store.Unlimited) || (reserved && uniqueName && rc.GetUniqueNames() > store.Unlimited) {
envs, err := a.str.FindEnvironmentsForAccount(acctId, trx) envs, err := a.str.FindEnvironmentsForAccount(acctId, trx)
if err != nil { if err != nil {
return false, err return false, err
} }
total := 0 total := 0
reserveds := 0
uniqueNames := 0
for i := range envs { for i := range envs {
shrs, err := a.str.FindSharesForEnvironment(envs[i].Id, trx) shrs, err := a.str.FindSharesForEnvironment(envs[i].Id, trx)
if err != nil { if err != nil {
return false, errors.Wrapf(err, "unable to find shares for environment '%v'", envs[i].ZId) return false, errors.Wrapf(err, "unable to find shares for environment '%v'", envs[i].ZId)
} }
total += len(shrs) total += len(shrs)
if total+1 > a.cfg.Shares { for _, shr := range shrs {
if shr.Reserved {
reserveds++
}
if shr.UniqueName {
uniqueNames++
}
}
if rc.GetShares() > store.Unlimited && total+1 > rc.GetShares() {
logrus.Debugf("account '#%d', environment '%d' over shares limit '%d'", acctId, envId, a.cfg.Shares)
return false, nil
}
if reserved && rc.GetReservedShares() > store.Unlimited && reserveds+1 > rc.GetReservedShares() {
logrus.Debugf("account '#%d', environment '%d' over reserved shares limit '%d'", acctId, envId, a.cfg.ReservedShares)
return false, nil
}
if reserved && uniqueName && rc.GetUniqueNames() > store.Unlimited && uniqueNames+1 > rc.GetUniqueNames() {
logrus.Debugf("account '#%d', environment '%d' over unique names limit '%d'", acctId, envId, a.cfg.UniqueNames)
return false, nil return false, nil
} }
logrus.Infof("total = %d", total)
} }
} }
} }
@ -145,50 +158,56 @@ func (a *Agent) CanAccessShare(shrId int, trx *sqlx.Tx) (bool, error) {
if err != nil { if err != nil {
return false, err return false, err
} }
if empty, err := a.str.IsShareLimitJournalEmpty(shr.Id, trx); err == nil && !empty {
slj, err := a.str.FindLatestShareLimitJournal(shr.Id, trx)
if err != nil {
return false, err
}
if slj.Action == store.LimitAction {
return false, nil
}
} else if err != nil {
return false, err
}
env, err := a.str.GetEnvironment(shr.EnvironmentId, trx) env, err := a.str.GetEnvironment(shr.EnvironmentId, trx)
if err != nil { if err != nil {
return false, err return false, err
} }
if empty, err := a.str.IsEnvironmentLimitJournalEmpty(env.Id, trx); err == nil && !empty {
elj, err := a.str.FindLatestEnvironmentLimitJournal(env.Id, trx)
if err != nil {
return false, err
}
if elj.Action == store.LimitAction {
return false, nil
}
} else if err != nil {
return false, err
}
if env.AccountId != nil { if env.AccountId != nil {
acct, err := a.str.GetAccount(*env.AccountId, trx) if err := a.str.LimitCheckLock(*env.AccountId, trx); err != nil {
return false, err
}
ul, err := a.getUserLimits(*env.AccountId, trx)
if err != nil { if err != nil {
return false, err return false, err
} }
if empty, err := a.str.IsAccountLimitJournalEmpty(acct.Id, trx); err == nil && !empty {
alj, err := a.str.FindLatestAccountLimitJournal(acct.Id, trx) if scopedBwc, found := ul.scopes[sdk.BackendMode(shr.BackendMode)]; found {
latestScopedJe, err := a.isBandwidthClassLimitedForAccount(*env.AccountId, scopedBwc, trx)
if err != nil { if err != nil {
return false, err return false, err
} }
if alj.Action == store.LimitAction { if latestScopedJe != nil {
return false, nil return false, nil
} }
} else if err != nil { } else {
return false, err for _, bwc := range ul.bandwidth {
latestJe, err := a.isBandwidthClassLimitedForAccount(*env.AccountId, bwc, trx)
if err != nil {
return false, err
}
if latestJe != nil {
return false, nil
}
}
} }
rc := ul.resource
if scopeRc, found := ul.scopes[sdk.BackendMode(shr.BackendMode)]; found {
rc = scopeRc
}
if rc.GetShareFrontends() > store.Unlimited {
fes, err := a.str.FindFrontendsForPrivateShare(shr.Id, trx)
if err != nil {
return false, err
}
if len(fes)+1 > rc.GetShareFrontends() {
logrus.Infof("account '#%d' over frontends per share limit '%d'", *env.AccountId, rc.GetShareFrontends())
return false, nil
}
}
} else {
return false, nil
} }
} }
return true, nil return true, nil
@ -251,245 +270,65 @@ func (a *Agent) enforce(u *metrics.Usage) error {
return nil return nil
} }
if enforce, warning, rxBytes, txBytes, err := a.checkAccountLimit(u.AccountId); err == nil { shr, err := a.str.FindShareWithTokenEvenIfDeleted(u.ShareToken, trx)
if enforce { if err != nil {
enforced := false return err
var enforcedAt time.Time }
if empty, err := a.str.IsAccountLimitJournalEmpty(int(u.AccountId), trx); err == nil && !empty {
if latest, err := a.str.FindLatestAccountLimitJournal(int(u.AccountId), trx); err == nil {
enforced = latest.Action == store.LimitAction
enforcedAt = latest.UpdatedAt
}
}
if !enforced { ul, err := a.getUserLimits(int(u.AccountId), trx)
_, err := a.str.CreateAccountLimitJournal(&store.AccountLimitJournal{ if err != nil {
AccountId: int(u.AccountId), return err
RxBytes: rxBytes, }
TxBytes: txBytes,
Action: store.LimitAction,
}, trx)
if err != nil {
return err
}
acct, err := a.str.GetAccount(int(u.AccountId), trx)
if err != nil {
return err
}
// run account limit actions
for _, action := range a.acctLimitActions {
if err := action.HandleAccount(acct, rxBytes, txBytes, a.cfg.Bandwidth.PerAccount, trx); err != nil {
return errors.Wrapf(err, "%v", reflect.TypeOf(action).String())
}
}
if err := trx.Commit(); err != nil {
return err
}
} else {
logrus.Debugf("already enforced limit for account '#%d' at %v", u.AccountId, enforcedAt)
}
} else if warning { exceededBwc, rxBytes, txBytes, err := a.anyBandwidthLimitExceeded(acct, u, ul.toBandwidthArray(sdk.BackendMode(shr.BackendMode)))
warned := false if err != nil {
var warnedAt time.Time return errors.Wrap(err, "error checking limit classes")
if empty, err := a.str.IsAccountLimitJournalEmpty(int(u.AccountId), trx); err == nil && !empty { }
if latest, err := a.str.FindLatestAccountLimitJournal(int(u.AccountId), trx); err == nil {
warned = latest.Action == store.WarningAction || latest.Action == store.LimitAction
warnedAt = latest.UpdatedAt
}
}
if !warned { if exceededBwc != nil {
_, err := a.str.CreateAccountLimitJournal(&store.AccountLimitJournal{ latestJe, err := a.isBandwidthClassLimitedForAccount(int(u.AccountId), exceededBwc, trx)
AccountId: int(u.AccountId), if err != nil {
RxBytes: rxBytes, return err
TxBytes: txBytes, }
Action: store.WarningAction, if latestJe == nil {
}, trx) je := &store.BandwidthLimitJournalEntry{
if err != nil { AccountId: int(u.AccountId),
return err RxBytes: rxBytes,
} TxBytes: txBytes,
acct, err := a.str.GetAccount(int(u.AccountId), trx) Action: exceededBwc.GetLimitAction(),
if err != nil { }
return err if !exceededBwc.IsGlobal() {
} lcId := exceededBwc.GetLimitClassId()
// run account warning actions je.LimitClassId = &lcId
for _, action := range a.acctWarningActions { }
if err := action.HandleAccount(acct, rxBytes, txBytes, a.cfg.Bandwidth.PerAccount, trx); err != nil { if _, err := a.str.CreateBandwidthLimitJournalEntry(je, trx); err != nil {
return errors.Wrapf(err, "%v", reflect.TypeOf(action).String()) return err
} }
} acct, err := a.str.GetAccount(int(u.AccountId), trx)
if err := trx.Commit(); err != nil { if err != nil {
return err return err
} }
} else { switch exceededBwc.GetLimitAction() {
logrus.Debugf("already warned account '#%d' at %v", u.AccountId, warnedAt) case store.LimitLimitAction:
} for _, limitAction := range a.limitActions {
if err := limitAction.HandleAccount(acct, rxBytes, txBytes, exceededBwc, ul, trx); err != nil {
} else { return errors.Wrapf(err, "%v", reflect.TypeOf(limitAction).String())
if enforce, warning, rxBytes, txBytes, err := a.checkEnvironmentLimit(u.EnvironmentId); err == nil { }
if enforce { }
enforced := false
var enforcedAt time.Time case store.WarningLimitAction:
if empty, err := a.str.IsEnvironmentLimitJournalEmpty(int(u.EnvironmentId), trx); err == nil && !empty { for _, warningAction := range a.warningActions {
if latest, err := a.str.FindLatestEnvironmentLimitJournal(int(u.EnvironmentId), trx); err == nil { if err := warningAction.HandleAccount(acct, rxBytes, txBytes, exceededBwc, ul, trx); err != nil {
enforced = latest.Action == store.LimitAction return errors.Wrapf(err, "%v", reflect.TypeOf(warningAction).String())
enforcedAt = latest.UpdatedAt }
} }
} }
if err := trx.Commit(); err != nil {
if !enforced { return err
_, err := a.str.CreateEnvironmentLimitJournal(&store.EnvironmentLimitJournal{ }
EnvironmentId: int(u.EnvironmentId), } else {
RxBytes: rxBytes, logrus.Debugf("limit '%v' already applied for '%v' (at: %v)", exceededBwc, acct.Email, latestJe.CreatedAt)
TxBytes: txBytes,
Action: store.LimitAction,
}, trx)
if err != nil {
return err
}
env, err := a.str.GetEnvironment(int(u.EnvironmentId), trx)
if err != nil {
return err
}
// run environment limit actions
for _, action := range a.envLimitActions {
if err := action.HandleEnvironment(env, rxBytes, txBytes, a.cfg.Bandwidth.PerEnvironment, trx); err != nil {
return errors.Wrapf(err, "%v", reflect.TypeOf(action).String())
}
}
if err := trx.Commit(); err != nil {
return err
}
} else {
logrus.Debugf("already enforced limit for environment '#%d' at %v", u.EnvironmentId, enforcedAt)
}
} else if warning {
warned := false
var warnedAt time.Time
if empty, err := a.str.IsEnvironmentLimitJournalEmpty(int(u.EnvironmentId), trx); err == nil && !empty {
if latest, err := a.str.FindLatestEnvironmentLimitJournal(int(u.EnvironmentId), trx); err == nil {
warned = latest.Action == store.WarningAction || latest.Action == store.LimitAction
warnedAt = latest.UpdatedAt
}
}
if !warned {
_, err := a.str.CreateEnvironmentLimitJournal(&store.EnvironmentLimitJournal{
EnvironmentId: int(u.EnvironmentId),
RxBytes: rxBytes,
TxBytes: txBytes,
Action: store.WarningAction,
}, trx)
if err != nil {
return err
}
env, err := a.str.GetEnvironment(int(u.EnvironmentId), trx)
if err != nil {
return err
}
// run environment warning actions
for _, action := range a.envWarningActions {
if err := action.HandleEnvironment(env, rxBytes, txBytes, a.cfg.Bandwidth.PerEnvironment, trx); err != nil {
return errors.Wrapf(err, "%v", reflect.TypeOf(action).String())
}
}
if err := trx.Commit(); err != nil {
return err
}
} else {
logrus.Debugf("already warned environment '#%d' at %v", u.EnvironmentId, warnedAt)
}
} else {
if enforce, warning, rxBytes, txBytes, err := a.checkShareLimit(u.ShareToken); err == nil {
if enforce {
shr, err := a.str.FindShareWithToken(u.ShareToken, trx)
if err != nil {
return err
}
enforced := false
var enforcedAt time.Time
if empty, err := a.str.IsShareLimitJournalEmpty(shr.Id, trx); err == nil && !empty {
if latest, err := a.str.FindLatestShareLimitJournal(shr.Id, trx); err == nil {
enforced = latest.Action == store.LimitAction
enforcedAt = latest.UpdatedAt
}
}
if !enforced {
_, err := a.str.CreateShareLimitJournal(&store.ShareLimitJournal{
ShareId: shr.Id,
RxBytes: rxBytes,
TxBytes: txBytes,
Action: store.LimitAction,
}, trx)
if err != nil {
return err
}
// run share limit actions
for _, action := range a.shrLimitActions {
if err := action.HandleShare(shr, rxBytes, txBytes, a.cfg.Bandwidth.PerShare, trx); err != nil {
return errors.Wrapf(err, "%v", reflect.TypeOf(action).String())
}
}
if err := trx.Commit(); err != nil {
return err
}
} else {
logrus.Debugf("already enforced limit for share '%v' at %v", shr.Token, enforcedAt)
}
} else if warning {
shr, err := a.str.FindShareWithToken(u.ShareToken, trx)
if err != nil {
return err
}
warned := false
var warnedAt time.Time
if empty, err := a.str.IsShareLimitJournalEmpty(shr.Id, trx); err == nil && !empty {
if latest, err := a.str.FindLatestShareLimitJournal(shr.Id, trx); err == nil {
warned = latest.Action == store.WarningAction || latest.Action == store.LimitAction
warnedAt = latest.UpdatedAt
}
}
if !warned {
_, err := a.str.CreateShareLimitJournal(&store.ShareLimitJournal{
ShareId: shr.Id,
RxBytes: rxBytes,
TxBytes: txBytes,
Action: store.WarningAction,
}, trx)
if err != nil {
return err
}
// run share warning actions
for _, action := range a.shrWarningActions {
if err := action.HandleShare(shr, rxBytes, txBytes, a.cfg.Bandwidth.PerShare, trx); err != nil {
return errors.Wrapf(err, "%v", reflect.TypeOf(action).String())
}
}
if err := trx.Commit(); err != nil {
return err
}
} else {
logrus.Debugf("already warned share '%v' at %v", shr.Token, warnedAt)
}
}
} else {
logrus.Error(err)
}
}
} else {
logrus.Error(err)
}
} }
} else {
logrus.Error(err)
} }
return nil return nil
@ -506,108 +345,82 @@ func (a *Agent) relax() error {
commit := false commit := false
if sljs, err := a.str.FindAllLatestShareLimitJournal(trx); err == nil { if bwjes, err := a.str.FindAllBandwidthLimitJournal(trx); err == nil {
for _, slj := range sljs { accounts := make(map[int]*store.Account)
if shr, err := a.str.GetShare(slj.ShareId, trx); err == nil { uls := make(map[int]*userLimits)
if slj.Action == store.WarningAction || slj.Action == store.LimitAction { accountPeriods := make(map[int]map[int]*periodBwValues)
if enforce, warning, rxBytes, txBytes, err := a.checkShareLimit(shr.Token); err == nil {
if !enforce && !warning {
if slj.Action == store.LimitAction {
// run relax actions for share
for _, action := range a.shrRelaxActions {
if err := action.HandleShare(shr, rxBytes, txBytes, a.cfg.Bandwidth.PerShare, trx); err != nil {
return errors.Wrapf(err, "%v", reflect.TypeOf(action).String())
}
}
} else {
logrus.Infof("relaxing warning for '%v'", shr.Token)
}
if err := a.str.DeleteShareLimitJournalForShare(shr.Id, trx); err == nil {
commit = true
} else {
logrus.Errorf("error deleting share_limit_journal for '%v'", shr.Token)
}
} else {
logrus.Infof("share '%v' still over limit", shr.Token)
}
} else {
logrus.Errorf("error checking share limit for '%v': %v", shr.Token, err)
}
}
} else {
logrus.Errorf("error getting share for '#%d': %v", slj.ShareId, err)
}
}
} else {
return err
}
if eljs, err := a.str.FindAllLatestEnvironmentLimitJournal(trx); err == nil { for _, bwje := range bwjes {
for _, elj := range eljs { if _, found := accounts[bwje.AccountId]; !found {
if env, err := a.str.GetEnvironment(elj.EnvironmentId, trx); err == nil { if acct, err := a.str.GetAccount(bwje.AccountId, trx); err == nil {
if elj.Action == store.WarningAction || elj.Action == store.LimitAction { accounts[bwje.AccountId] = acct
if enforce, warning, rxBytes, txBytes, err := a.checkEnvironmentLimit(int64(elj.EnvironmentId)); err == nil { ul, err := a.getUserLimits(acct.Id, trx)
if !enforce && !warning { if err != nil {
if elj.Action == store.LimitAction { return errors.Wrapf(err, "error getting user limits for '%v'", acct.Email)
// run relax actions for environment
for _, action := range a.envRelaxActions {
if err := action.HandleEnvironment(env, rxBytes, txBytes, a.cfg.Bandwidth.PerEnvironment, trx); err != nil {
return errors.Wrapf(err, "%v", reflect.TypeOf(action).String())
}
}
} else {
logrus.Infof("relaxing warning for '%v'", env.ZId)
}
if err := a.str.DeleteEnvironmentLimitJournalForEnvironment(env.Id, trx); err == nil {
commit = true
} else {
logrus.Errorf("error deleteing environment_limit_journal for '%v': %v", env.ZId, err)
}
} else {
logrus.Infof("environment '%v' still over limit", env.ZId)
}
} else {
logrus.Errorf("error checking environment limit for '%v': %v", env.ZId, err)
} }
uls[bwje.AccountId] = ul
accountPeriods[bwje.AccountId] = make(map[int]*periodBwValues)
} else {
return err
} }
} else {
logrus.Errorf("error getting environment for '#%d': %v", elj.EnvironmentId, err)
} }
}
} else {
return err
}
if aljs, err := a.str.FindAllLatestAccountLimitJournal(trx); err == nil { var bwc store.BandwidthClass
for _, alj := range aljs { if bwje.LimitClassId == nil {
if acct, err := a.str.GetAccount(alj.AccountId, trx); err == nil { globalBwcs := newConfigBandwidthClasses(a.cfg.Bandwidth)
if alj.Action == store.WarningAction || alj.Action == store.LimitAction { if bwje.Action == store.WarningLimitAction {
if enforce, warning, rxBytes, txBytes, err := a.checkAccountLimit(int64(alj.AccountId)); err == nil { bwc = globalBwcs[0]
if !enforce && !warning { } else {
if alj.Action == store.LimitAction { bwc = globalBwcs[1]
// run relax actions for account }
for _, action := range a.acctRelaxActions { } else {
if err := action.HandleAccount(acct, rxBytes, txBytes, a.cfg.Bandwidth.PerAccount, trx); err != nil { lc, err := a.str.GetLimitClass(*bwje.LimitClassId, trx)
return errors.Wrapf(err, "%v", reflect.TypeOf(action).String()) if err != nil {
} return err
} }
} else { bwc = lc
logrus.Infof("relaxing warning for '%v'", acct.Email) }
}
if err := a.str.DeleteAccountLimitJournalForAccount(acct.Id, trx); err == nil { if periods, accountFound := accountPeriods[bwje.AccountId]; accountFound {
commit = true if _, periodFound := periods[bwc.GetPeriodMinutes()]; !periodFound {
} else { rx, tx, err := a.ifx.totalRxTxForAccount(int64(bwje.AccountId), time.Duration(bwc.GetPeriodMinutes())*time.Minute)
logrus.Errorf("error deleting account_limit_journal for '%v': %v", acct.Email, err) if err != nil {
} return err
} else { }
logrus.Infof("account '%v' still over limit", acct.Email) periods[bwc.GetPeriodMinutes()] = &periodBwValues{rx: rx, tx: tx}
accountPeriods[bwje.AccountId] = periods
}
} else {
return errors.New("accountPeriods corrupted")
}
used := accountPeriods[bwje.AccountId][bwc.GetPeriodMinutes()]
if !a.transferBytesExceeded(used.rx, used.tx, bwc) {
if bwc.GetLimitAction() == store.LimitLimitAction {
logrus.Infof("relaxing limit '%v' for '%v'", bwc.String(), accounts[bwje.AccountId].Email)
for _, action := range a.relaxActions {
if err := action.HandleAccount(accounts[bwje.AccountId], used.rx, used.tx, bwc, uls[bwje.AccountId], trx); err != nil {
return errors.Wrapf(err, "%v", reflect.TypeOf(action).String())
} }
}
} else {
logrus.Infof("relaxing warning '%v' for '%v'", bwc.String(), accounts[bwje.AccountId].Email)
}
if bwc.IsGlobal() {
if err := a.str.DeleteBandwidthLimitJournalEntryForGlobal(bwje.AccountId, trx); err == nil {
commit = true
} else { } else {
logrus.Errorf("error checking account limit for '%v': %v", acct.Email, err) logrus.Errorf("error deleting global bandwidth limit journal entry for '%v': %v", accounts[bwje.AccountId].Email, err)
}
} else {
if err := a.str.DeleteBandwidthLimitJournalEntryForLimitClass(bwje.AccountId, *bwje.LimitClassId, trx); err == nil {
commit = true
} else {
logrus.Errorf("error deleting bandwidth limit journal entry for '%v': %v", accounts[bwje.AccountId].Email, err)
} }
} }
} else { } else {
logrus.Errorf("error getting account for '#%d': %v", alj.AccountId, err) logrus.Infof("'%v' still over limit: '%v' with rx: %v, tx: %v, total: %v", accounts[bwje.AccountId].Email, bwc, util.BytesToSize(used.rx), util.BytesToSize(used.tx), util.BytesToSize(used.rx+used.tx))
} }
} }
} else { } else {
@ -623,110 +436,84 @@ func (a *Agent) relax() error {
return nil return nil
} }
func (a *Agent) checkAccountLimit(acctId int64) (enforce, warning bool, rxBytes, txBytes int64, err error) { func (a *Agent) isBandwidthClassLimitedForAccount(acctId int, bwc store.BandwidthClass, trx *sqlx.Tx) (*store.BandwidthLimitJournalEntry, error) {
period := 24 * time.Hour if bwc.IsGlobal() {
limit := DefaultBandwidthPerPeriod() if empty, err := a.str.IsBandwidthLimitJournalEmptyForGlobal(acctId, trx); err == nil && !empty {
if a.cfg.Bandwidth != nil && a.cfg.Bandwidth.PerAccount != nil { je, err := a.str.FindLatestBandwidthLimitJournalForGlobal(acctId, trx)
limit = a.cfg.Bandwidth.PerAccount if err != nil {
return nil, err
}
if je.Action == store.LimitLimitAction {
logrus.Debugf("account '#%d' over bandwidth for global bandwidth class '%v'", acctId, bwc)
return je, nil
}
} else if err != nil {
return nil, err
}
} else {
if empty, err := a.str.IsBandwidthLimitJournalEmptyForLimitClass(acctId, bwc.GetLimitClassId(), trx); err == nil && !empty {
je, err := a.str.FindLatestBandwidthLimitJournalForLimitClass(acctId, bwc.GetLimitClassId(), trx)
if err != nil {
return nil, err
}
if je.Action == store.LimitLimitAction {
logrus.Debugf("account '#%d' over bandwidth for limit class '%v'", acctId, bwc)
return je, nil
}
} else if err != nil {
return nil, err
}
} }
if limit.Period > 0 { return nil, nil
period = limit.Period
}
rx, tx, err := a.ifx.totalRxTxForAccount(acctId, period)
if err != nil {
logrus.Error(err)
}
enforce, warning = a.checkLimit(limit, rx, tx)
return enforce, warning, rx, tx, nil
} }
func (a *Agent) checkEnvironmentLimit(envId int64) (enforce, warning bool, rxBytes, txBytes int64, err error) { func (a *Agent) anyBandwidthLimitExceeded(acct *store.Account, u *metrics.Usage, bwcs []store.BandwidthClass) (store.BandwidthClass, int64, int64, error) {
period := 24 * time.Hour periodBw := make(map[int]periodBwValues)
limit := DefaultBandwidthPerPeriod()
if a.cfg.Bandwidth != nil && a.cfg.Bandwidth.PerEnvironment != nil { var selectedLc store.BandwidthClass
limit = a.cfg.Bandwidth.PerEnvironment var rxBytes int64
} var txBytes int64
if limit.Period > 0 {
period = limit.Period for _, bwc := range bwcs {
} if _, found := periodBw[bwc.GetPeriodMinutes()]; !found {
rx, tx, err := a.ifx.totalRxTxForEnvironment(envId, period) rx, tx, err := a.ifx.totalRxTxForAccount(u.AccountId, time.Minute*time.Duration(bwc.GetPeriodMinutes()))
if err != nil { if err != nil {
logrus.Error(err) return nil, 0, 0, errors.Wrapf(err, "error getting rx/tx for account '%v'", acct.Email)
}
periodBw[bwc.GetPeriodMinutes()] = periodBwValues{rx: rx, tx: tx}
}
period := periodBw[bwc.GetPeriodMinutes()]
if a.transferBytesExceeded(period.rx, period.tx, bwc) {
selectedLc = bwc
rxBytes = period.rx
txBytes = period.tx
} else {
logrus.Debugf("'%v' limit ok '%v' with rx: %v, tx: %v, total: %v", acct.Email, bwc, util.BytesToSize(period.rx), util.BytesToSize(period.tx), util.BytesToSize(period.rx+period.tx))
}
} }
enforce, warning = a.checkLimit(limit, rx, tx) if selectedLc != nil {
return enforce, warning, rx, tx, nil logrus.Infof("'%v' exceeded limit '%v' with rx: %v, tx: %v, total: %v", acct.Email, selectedLc, util.BytesToSize(rxBytes), util.BytesToSize(txBytes), util.BytesToSize(rxBytes+txBytes))
}
return selectedLc, rxBytes, txBytes, nil
} }
func (a *Agent) checkShareLimit(shrToken string) (enforce, warning bool, rxBytes, txBytes int64, err error) { func (a *Agent) transferBytesExceeded(rx, tx int64, bwc store.BandwidthClass) bool {
period := 24 * time.Hour if bwc.GetTxBytes() != store.Unlimited && tx >= bwc.GetTxBytes() {
limit := DefaultBandwidthPerPeriod() return true
if a.cfg.Bandwidth != nil && a.cfg.Bandwidth.PerShare != nil {
limit = a.cfg.Bandwidth.PerShare
} }
if limit.Period > 0 { if bwc.GetRxBytes() != store.Unlimited && rx >= bwc.GetRxBytes() {
period = limit.Period return true
} }
rx, tx, err := a.ifx.totalRxTxForShare(shrToken, period) if bwc.GetTotalBytes() != store.Unlimited && tx+rx >= bwc.GetTotalBytes() {
if err != nil { return true
logrus.Error(err)
} }
return false
enforce, warning = a.checkLimit(limit, rx, tx)
if enforce || warning {
logrus.Debugf("'%v': %v", shrToken, describeLimit(limit, rx, tx))
}
return enforce, warning, rx, tx, nil
} }
func (a *Agent) checkLimit(cfg *BandwidthPerPeriod, rx, tx int64) (enforce, warning bool) { type periodBwValues struct {
if cfg.Limit.Rx != Unlimited && rx > cfg.Limit.Rx { rx int64
return true, false tx int64
}
if cfg.Limit.Tx != Unlimited && tx > cfg.Limit.Tx {
return true, false
}
if cfg.Limit.Total != Unlimited && rx+tx > cfg.Limit.Total {
return true, false
}
if cfg.Warning.Rx != Unlimited && rx > cfg.Warning.Rx {
return false, true
}
if cfg.Warning.Tx != Unlimited && tx > cfg.Warning.Tx {
return false, true
}
if cfg.Warning.Total != Unlimited && rx+tx > cfg.Warning.Total {
return false, true
}
return false, false
}
func describeLimit(cfg *BandwidthPerPeriod, rx, tx int64) string {
out := ""
if cfg.Limit.Rx != Unlimited && rx > cfg.Limit.Rx {
out += fmt.Sprintf("['%v' over rx limit '%v']", util.BytesToSize(rx), util.BytesToSize(cfg.Limit.Rx))
}
if cfg.Limit.Tx != Unlimited && tx > cfg.Limit.Tx {
out += fmt.Sprintf("['%v' over tx limit '%v']", util.BytesToSize(tx), util.BytesToSize(cfg.Limit.Tx))
}
if cfg.Limit.Total != Unlimited && rx+tx > cfg.Limit.Total {
out += fmt.Sprintf("['%v' over total limit '%v']", util.BytesToSize(rx+tx), util.BytesToSize(cfg.Limit.Total))
}
if cfg.Warning.Rx != Unlimited && rx > cfg.Warning.Rx {
out += fmt.Sprintf("['%v' over rx warning '%v']", util.BytesToSize(rx), util.BytesToSize(cfg.Warning.Rx))
}
if cfg.Warning.Tx != Unlimited && tx > cfg.Warning.Tx {
out += fmt.Sprintf("['%v' over tx warning '%v']", util.BytesToSize(tx), util.BytesToSize(cfg.Warning.Tx))
}
if cfg.Warning.Total != Unlimited && rx+tx > cfg.Warning.Total {
out += fmt.Sprintf("['%v' over total warning '%v']", util.BytesToSize(rx+tx), util.BytesToSize(cfg.Warning.Total))
}
return out
} }

View File

@ -0,0 +1,84 @@
package limits
import (
"fmt"
"github.com/openziti/zrok/controller/store"
"github.com/openziti/zrok/sdk/golang/sdk"
"github.com/openziti/zrok/util"
)
type configBandwidthClass struct {
periodInMinutes int
bw *Bandwidth
limitAction store.LimitAction
}
func newConfigBandwidthClasses(cfg *BandwidthPerPeriod) []store.BandwidthClass {
return []store.BandwidthClass{
&configBandwidthClass{
periodInMinutes: int(cfg.Period.Minutes()),
bw: cfg.Warning,
limitAction: store.WarningLimitAction,
},
&configBandwidthClass{
periodInMinutes: int(cfg.Period.Minutes()),
bw: cfg.Limit,
limitAction: store.LimitLimitAction,
},
}
}
func (bc *configBandwidthClass) IsGlobal() bool {
return true
}
func (bc *configBandwidthClass) IsScoped() bool {
return false
}
func (bc *configBandwidthClass) GetLimitClassId() int {
return -1
}
func (bc *configBandwidthClass) GetShareMode() sdk.ShareMode {
return ""
}
func (bc *configBandwidthClass) GetBackendMode() sdk.BackendMode {
return ""
}
func (bc *configBandwidthClass) GetPeriodMinutes() int {
return bc.periodInMinutes
}
func (bc *configBandwidthClass) GetRxBytes() int64 {
return bc.bw.Rx
}
func (bc *configBandwidthClass) GetTxBytes() int64 {
return bc.bw.Tx
}
func (bc *configBandwidthClass) GetTotalBytes() int64 {
return bc.bw.Total
}
func (bc *configBandwidthClass) GetLimitAction() store.LimitAction {
return bc.limitAction
}
func (bc *configBandwidthClass) String() string {
out := fmt.Sprintf("ConfigClass<periodMinutes: %d", bc.periodInMinutes)
if bc.bw.Rx > store.Unlimited {
out += fmt.Sprintf(", rxBytes: %v", util.BytesToSize(bc.bw.Rx))
}
if bc.bw.Tx > store.Unlimited {
out += fmt.Sprintf(", txBytes: %v", util.BytesToSize(bc.bw.Tx))
}
if bc.bw.Total > store.Unlimited {
out += fmt.Sprintf(", totalBytes: %v", util.BytesToSize(bc.bw.Total))
}
out += fmt.Sprintf(", limitAction: %s>", bc.limitAction)
return out
}

View File

@ -1,21 +1,19 @@
package limits package limits
import "time" import (
"github.com/openziti/zrok/controller/store"
const Unlimited = -1 "time"
)
type Config struct { type Config struct {
Environments int Environments int
Shares int Shares int
Bandwidth *BandwidthConfig ReservedShares int
Cycle time.Duration UniqueNames int
Enforcing bool ShareFrontends int
} Bandwidth *BandwidthPerPeriod
Cycle time.Duration
type BandwidthConfig struct { Enforcing bool
PerAccount *BandwidthPerPeriod
PerEnvironment *BandwidthPerPeriod
PerShare *BandwidthPerPeriod
} }
type BandwidthPerPeriod struct { type BandwidthPerPeriod struct {
@ -34,28 +32,27 @@ func DefaultBandwidthPerPeriod() *BandwidthPerPeriod {
return &BandwidthPerPeriod{ return &BandwidthPerPeriod{
Period: 24 * time.Hour, Period: 24 * time.Hour,
Warning: &Bandwidth{ Warning: &Bandwidth{
Rx: Unlimited, Rx: store.Unlimited,
Tx: Unlimited, Tx: store.Unlimited,
Total: Unlimited, Total: store.Unlimited,
}, },
Limit: &Bandwidth{ Limit: &Bandwidth{
Rx: Unlimited, Rx: store.Unlimited,
Tx: Unlimited, Tx: store.Unlimited,
Total: Unlimited, Total: store.Unlimited,
}, },
} }
} }
func DefaultConfig() *Config { func DefaultConfig() *Config {
return &Config{ return &Config{
Environments: Unlimited, Environments: store.Unlimited,
Shares: Unlimited, Shares: store.Unlimited,
Bandwidth: &BandwidthConfig{ ReservedShares: store.Unlimited,
PerAccount: DefaultBandwidthPerPeriod(), UniqueNames: store.Unlimited,
PerEnvironment: DefaultBandwidthPerPeriod(), ShareFrontends: store.Unlimited,
PerShare: DefaultBandwidthPerPeriod(), Bandwidth: DefaultBandwidthPerPeriod(),
}, Enforcing: false,
Enforcing: false, Cycle: 15 * time.Minute,
Cycle: 15 * time.Minute,
} }
} }

View File

@ -1,41 +0,0 @@
package limits
import (
"github.com/jmoiron/sqlx"
"github.com/openziti/zrok/controller/store"
"github.com/openziti/zrok/controller/zrokEdgeSdk"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
type environmentLimitAction struct {
str *store.Store
zCfg *zrokEdgeSdk.Config
}
func newEnvironmentLimitAction(str *store.Store, zCfg *zrokEdgeSdk.Config) *environmentLimitAction {
return &environmentLimitAction{str, zCfg}
}
func (a *environmentLimitAction) HandleEnvironment(env *store.Environment, _, _ int64, _ *BandwidthPerPeriod, trx *sqlx.Tx) error {
logrus.Infof("limiting '%v'", env.ZId)
shrs, err := a.str.FindSharesForEnvironment(env.Id, trx)
if err != nil {
return errors.Wrapf(err, "error finding shares for environment '%v'", env.ZId)
}
edge, err := zrokEdgeSdk.Client(a.zCfg)
if err != nil {
return err
}
for _, shr := range shrs {
if err := zrokEdgeSdk.DeleteServicePoliciesDial(env.ZId, shr.Token, edge); err != nil {
return errors.Wrapf(err, "error deleting dial service policy for '%v'", shr.Token)
}
logrus.Infof("removed dial service policy for share '%v' of environment '%v'", shr.Token, env.ZId)
}
return nil
}

View File

@ -1,50 +0,0 @@
package limits
import (
"github.com/jmoiron/sqlx"
"github.com/openziti/zrok/controller/store"
"github.com/openziti/zrok/controller/zrokEdgeSdk"
"github.com/openziti/zrok/sdk/golang/sdk"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
type environmentRelaxAction struct {
str *store.Store
zCfg *zrokEdgeSdk.Config
}
func newEnvironmentRelaxAction(str *store.Store, zCfg *zrokEdgeSdk.Config) *environmentRelaxAction {
return &environmentRelaxAction{str, zCfg}
}
func (a *environmentRelaxAction) HandleEnvironment(env *store.Environment, rxBytes, txBytes int64, limit *BandwidthPerPeriod, trx *sqlx.Tx) error {
logrus.Infof("relaxing '%v'", env.ZId)
shrs, err := a.str.FindSharesForEnvironment(env.Id, trx)
if err != nil {
return errors.Wrapf(err, "error finding shares for environment '%v'", env.ZId)
}
edge, err := zrokEdgeSdk.Client(a.zCfg)
if err != nil {
return err
}
for _, shr := range shrs {
if !shr.Deleted {
switch shr.ShareMode {
case string(sdk.PublicShareMode):
if err := relaxPublicShare(a.str, edge, shr, trx); err != nil {
return err
}
case string(sdk.PrivateShareMode):
if err := relaxPrivateShare(a.str, edge, shr, trx); err != nil {
return err
}
}
}
}
return nil
}

View File

@ -1,58 +0,0 @@
package limits
import (
"github.com/jmoiron/sqlx"
"github.com/openziti/zrok/controller/emailUi"
"github.com/openziti/zrok/controller/store"
"github.com/openziti/zrok/util"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
type environmentWarningAction struct {
str *store.Store
cfg *emailUi.Config
}
func newEnvironmentWarningAction(cfg *emailUi.Config, str *store.Store) *environmentWarningAction {
return &environmentWarningAction{str, cfg}
}
func (a *environmentWarningAction) HandleEnvironment(env *store.Environment, rxBytes, txBytes int64, limit *BandwidthPerPeriod, trx *sqlx.Tx) error {
logrus.Infof("warning '%v'", env.ZId)
if a.cfg != nil {
if env.AccountId != nil {
acct, err := a.str.GetAccount(*env.AccountId, trx)
if err != nil {
return err
}
rxLimit := "unlimited bytes"
if limit.Limit.Rx != Unlimited {
rxLimit = util.BytesToSize(limit.Limit.Rx)
}
txLimit := "unlimited bytes"
if limit.Limit.Tx != Unlimited {
txLimit = util.BytesToSize(limit.Limit.Tx)
}
totalLimit := "unlimited bytes"
if limit.Limit.Total != Unlimited {
totalLimit = util.BytesToSize(limit.Limit.Total)
}
detail := newDetailMessage()
detail = detail.append("Your environment '%v' has received %v and sent %v (for a total of %v), which has triggered a transfer limit warning.", env.Description, util.BytesToSize(rxBytes), util.BytesToSize(txBytes), util.BytesToSize(rxBytes+txBytes))
detail = detail.append("This zrok instance only allows a share to receive %v, send %v, totalling not more than %v for each %v.", rxLimit, txLimit, totalLimit, limit.Period)
detail = detail.append("If you exceed the transfer limit, access to your shares will be temporarily disabled (until the last %v falls below the transfer limit).", limit.Period)
if err := sendLimitWarningEmail(a.cfg, acct.Email, detail); err != nil {
return errors.Wrapf(err, "error sending limit warning email to '%v'", acct.Email)
}
}
} else {
logrus.Warnf("skipping warning email for environment limit; no email configuration specified")
}
return nil
}

View File

@ -0,0 +1,52 @@
package limits
import (
"github.com/jmoiron/sqlx"
"github.com/openziti/zrok/controller/store"
"github.com/openziti/zrok/controller/zrokEdgeSdk"
"github.com/openziti/zrok/sdk/golang/sdk"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
type limitAction struct {
str *store.Store
zCfg *zrokEdgeSdk.Config
}
func newLimitAction(str *store.Store, zCfg *zrokEdgeSdk.Config) *limitAction {
return &limitAction{str, zCfg}
}
func (a *limitAction) HandleAccount(acct *store.Account, _, _ int64, bwc store.BandwidthClass, ul *userLimits, trx *sqlx.Tx) error {
envs, err := a.str.FindEnvironmentsForAccount(acct.Id, trx)
if err != nil {
return errors.Wrapf(err, "error finding environments for account '%v'", acct.Email)
}
edge, err := zrokEdgeSdk.Client(a.zCfg)
if err != nil {
return err
}
ignoreBackends := ul.ignoreBackends(bwc)
for _, env := range envs {
shrs, err := a.str.FindSharesForEnvironment(env.Id, trx)
if err != nil {
return errors.Wrapf(err, "error finding shares for environment '%v'", env.ZId)
}
for _, shr := range shrs {
if _, ignore := ignoreBackends[sdk.BackendMode(shr.BackendMode)]; !ignore {
if err := zrokEdgeSdk.DeleteServicePoliciesDial(env.ZId, shr.Token, edge); err != nil {
return errors.Wrapf(err, "error deleting dial service policy for '%v'", shr.Token)
}
logrus.Infof("removed dial service policy for share '%v' of environment '%v'", shr.Token, env.ZId)
} else {
logrus.Debugf("ignoring share '%v' for '%v' with backend mode '%v'", shr.Token, acct.Email, shr.BackendMode)
}
}
}
return nil
}

View File

@ -6,13 +6,5 @@ import (
) )
type AccountAction interface { type AccountAction interface {
HandleAccount(a *store.Account, rxBytes, txBytes int64, limit *BandwidthPerPeriod, trx *sqlx.Tx) error HandleAccount(a *store.Account, rxBytes, txBytes int64, bwc store.BandwidthClass, ul *userLimits, trx *sqlx.Tx) error
}
type EnvironmentAction interface {
HandleEnvironment(e *store.Environment, rxBytes, txBytes int64, limit *BandwidthPerPeriod, trx *sqlx.Tx) error
}
type ShareAction interface {
HandleShare(s *store.Share, rxBytes, txBytes int64, limit *BandwidthPerPeriod, trx *sqlx.Tx) error
} }

View File

@ -10,32 +10,64 @@ import (
"github.com/sirupsen/logrus" "github.com/sirupsen/logrus"
) )
type shareRelaxAction struct { type relaxAction struct {
str *store.Store str *store.Store
zCfg *zrokEdgeSdk.Config zCfg *zrokEdgeSdk.Config
} }
func newShareRelaxAction(str *store.Store, zCfg *zrokEdgeSdk.Config) *shareRelaxAction { func newRelaxAction(str *store.Store, zCfg *zrokEdgeSdk.Config) *relaxAction {
return &shareRelaxAction{str, zCfg} return &relaxAction{str, zCfg}
} }
func (a *shareRelaxAction) HandleShare(shr *store.Share, _, _ int64, _ *BandwidthPerPeriod, trx *sqlx.Tx) error { func (a *relaxAction) HandleAccount(acct *store.Account, _, _ int64, bwc store.BandwidthClass, _ *userLimits, trx *sqlx.Tx) error {
logrus.Infof("relaxing '%v'", shr.Token) logrus.Debugf("relaxing '%v'", acct.Email)
if !shr.Deleted { envs, err := a.str.FindEnvironmentsForAccount(acct.Id, trx)
edge, err := zrokEdgeSdk.Client(a.zCfg) if err != nil {
if err != nil { return errors.Wrapf(err, "error finding environments for account '%v'", acct.Email)
return err }
}
switch shr.ShareMode { jes, err := a.str.FindAllLatestBandwidthLimitJournalForAccount(acct.Id, trx)
case string(sdk.PublicShareMode): if err != nil {
if err := relaxPublicShare(a.str, edge, shr, trx); err != nil { return errors.Wrapf(err, "error finding latest bandwidth limit journal entries for account '%v'", acct.Email)
}
limitedBackends := make(map[sdk.BackendMode]bool)
for _, je := range jes {
if je.LimitClassId != nil {
lc, err := a.str.GetLimitClass(*je.LimitClassId, trx)
if err != nil {
return err return err
} }
case string(sdk.PrivateShareMode): if lc.BackendMode != nil && lc.LimitAction == store.LimitLimitAction {
if err := relaxPrivateShare(a.str, edge, shr, trx); err != nil { limitedBackends[*lc.BackendMode] = true
return err }
}
}
edge, err := zrokEdgeSdk.Client(a.zCfg)
if err != nil {
return err
}
for _, env := range envs {
shrs, err := a.str.FindSharesForEnvironment(env.Id, trx)
if err != nil {
return errors.Wrapf(err, "error finding shares for environment '%v'", env.ZId)
}
for _, shr := range shrs {
_, stayLimited := limitedBackends[sdk.BackendMode(shr.BackendMode)]
if (!bwc.IsScoped() && !stayLimited) || bwc.GetBackendMode() == sdk.BackendMode(shr.BackendMode) {
switch shr.ShareMode {
case string(sdk.PublicShareMode):
if err := relaxPublicShare(a.str, edge, shr, trx); err != nil {
logrus.Errorf("error relaxing public share '%v' for account '%v' (ignoring): %v", shr.Token, acct.Email, err)
}
case string(sdk.PrivateShareMode):
if err := relaxPrivateShare(a.str, edge, shr, trx); err != nil {
logrus.Errorf("error relaxing private share '%v' for account '%v' (ignoring): %v", shr.Token, acct.Email, err)
}
}
} }
} }
} }

View File

@ -0,0 +1,46 @@
package limits
import (
"fmt"
"github.com/openziti/zrok/controller/store"
)
type configResourceCountClass struct {
cfg *Config
}
func newConfigResourceCountClass(cfg *Config) store.ResourceCountClass {
return &configResourceCountClass{cfg}
}
func (rcc *configResourceCountClass) IsGlobal() bool {
return true
}
func (rcc *configResourceCountClass) GetLimitClassId() int {
return -1
}
func (rcc *configResourceCountClass) GetEnvironments() int {
return rcc.cfg.Environments
}
func (rcc *configResourceCountClass) GetShares() int {
return rcc.cfg.Shares
}
func (rcc *configResourceCountClass) GetReservedShares() int {
return rcc.cfg.ReservedShares
}
func (rcc *configResourceCountClass) GetUniqueNames() int {
return rcc.cfg.UniqueNames
}
func (rcc *configResourceCountClass) GetShareFrontends() int {
return rcc.cfg.ShareFrontends
}
func (rcc *configResourceCountClass) String() string {
return fmt.Sprintf("Config<environments: %d, shares: %d, reservedShares: %d, uniqueNames: %d, share_frontends: %d>", rcc.cfg.Environments, rcc.cfg.Shares, rcc.cfg.ReservedShares, rcc.cfg.UniqueNames, rcc.cfg.ShareFrontends)
}

View File

@ -1,38 +0,0 @@
package limits
import (
"github.com/jmoiron/sqlx"
"github.com/openziti/zrok/controller/store"
"github.com/openziti/zrok/controller/zrokEdgeSdk"
"github.com/sirupsen/logrus"
)
type shareLimitAction struct {
str *store.Store
zCfg *zrokEdgeSdk.Config
}
func newShareLimitAction(str *store.Store, zCfg *zrokEdgeSdk.Config) *shareLimitAction {
return &shareLimitAction{str, zCfg}
}
func (a *shareLimitAction) HandleShare(shr *store.Share, _, _ int64, _ *BandwidthPerPeriod, trx *sqlx.Tx) error {
logrus.Infof("limiting '%v'", shr.Token)
env, err := a.str.GetEnvironment(shr.EnvironmentId, trx)
if err != nil {
return err
}
edge, err := zrokEdgeSdk.Client(a.zCfg)
if err != nil {
return err
}
if err := zrokEdgeSdk.DeleteServicePoliciesDial(env.ZId, shr.Token, edge); err != nil {
return err
}
logrus.Infof("removed dial service policy for '%v'", shr.Token)
return nil
}

View File

@ -1,63 +0,0 @@
package limits
import (
"github.com/jmoiron/sqlx"
"github.com/openziti/zrok/controller/emailUi"
"github.com/openziti/zrok/controller/store"
"github.com/openziti/zrok/util"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
type shareWarningAction struct {
str *store.Store
cfg *emailUi.Config
}
func newShareWarningAction(cfg *emailUi.Config, str *store.Store) *shareWarningAction {
return &shareWarningAction{str, cfg}
}
func (a *shareWarningAction) HandleShare(shr *store.Share, rxBytes, txBytes int64, limit *BandwidthPerPeriod, trx *sqlx.Tx) error {
logrus.Infof("warning '%v'", shr.Token)
if a.cfg != nil {
env, err := a.str.GetEnvironment(shr.EnvironmentId, trx)
if err != nil {
return err
}
if env.AccountId != nil {
acct, err := a.str.GetAccount(*env.AccountId, trx)
if err != nil {
return err
}
rxLimit := "unlimited bytes"
if limit.Limit.Rx != Unlimited {
rxLimit = util.BytesToSize(limit.Limit.Rx)
}
txLimit := "unlimited bytes"
if limit.Limit.Tx != Unlimited {
txLimit = util.BytesToSize(limit.Limit.Tx)
}
totalLimit := "unlimited bytes"
if limit.Limit.Total != Unlimited {
totalLimit = util.BytesToSize(limit.Limit.Total)
}
detail := newDetailMessage()
detail = detail.append("Your share '%v' has received %v and sent %v (for a total of %v), which has triggered a transfer limit warning.", shr.Token, util.BytesToSize(rxBytes), util.BytesToSize(txBytes), util.BytesToSize(rxBytes+txBytes))
detail = detail.append("This zrok instance only allows a share to receive %v, send %v, totalling not more than %v for each %v.", rxLimit, txLimit, totalLimit, limit.Period)
detail = detail.append("If you exceed the transfer limit, access to your shares will be temporarily disabled (until the last %v falls below the transfer limit).", limit.Period)
if err := sendLimitWarningEmail(a.cfg, acct.Email, detail); err != nil {
return errors.Wrapf(err, "error sending limit warning email to '%v'", acct.Email)
}
}
} else {
logrus.Warnf("skipping warning email for share limit; no email configuration specified")
}
return nil
}

View File

@ -0,0 +1,123 @@
package limits
import (
"github.com/jmoiron/sqlx"
"github.com/openziti/zrok/controller/store"
"github.com/openziti/zrok/sdk/golang/sdk"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
type userLimits struct {
resource store.ResourceCountClass
bandwidth []store.BandwidthClass
scopes map[sdk.BackendMode]*store.LimitClass
}
func (ul *userLimits) toBandwidthArray(backendMode sdk.BackendMode) []store.BandwidthClass {
if scopedBwc, found := ul.scopes[backendMode]; found {
out := make([]store.BandwidthClass, 0)
for _, bwc := range ul.bandwidth {
out = append(out, bwc)
}
out = append(out, scopedBwc)
return out
}
return ul.bandwidth
}
func (ul *userLimits) ignoreBackends(bwc store.BandwidthClass) map[sdk.BackendMode]bool {
if bwc.IsScoped() {
ignoreBackends := make(map[sdk.BackendMode]bool)
for backendMode := range ul.scopes {
if backendMode != bwc.GetBackendMode() {
ignoreBackends[backendMode] = true
}
}
return ignoreBackends
} else {
ignoreBackends := make(map[sdk.BackendMode]bool)
for backendMode := range ul.scopes {
ignoreBackends[backendMode] = true
}
return ignoreBackends
}
}
func (a *Agent) getUserLimits(acctId int, trx *sqlx.Tx) (*userLimits, error) {
resource := newConfigResourceCountClass(a.cfg)
cfgBwcs := newConfigBandwidthClasses(a.cfg.Bandwidth)
bwWarning := cfgBwcs[0]
bwLimit := cfgBwcs[1]
scopes := make(map[sdk.BackendMode]*store.LimitClass)
alcs, err := a.str.FindAppliedLimitClassesForAccount(acctId, trx)
if err != nil {
return nil, errors.Wrapf(err, "error finding applied limit classes for account '%d'", acctId)
}
for _, alc := range alcs {
if a.isResourceCountClass(alc) {
resource = alc
} else if a.isUnscopedBandwidthClass(alc) {
if alc.LimitAction == store.WarningLimitAction {
bwWarning = alc
} else {
bwLimit = alc
}
} else if a.isScopedLimitClass(alc) {
scopes[*alc.BackendMode] = alc
} else {
logrus.Warnf("unknown type of limit class '%v' for account '#%d'", alc, acctId)
}
}
userLimits := &userLimits{
resource: resource,
bandwidth: []store.BandwidthClass{bwWarning, bwLimit},
scopes: scopes,
}
return userLimits, nil
}
func (a *Agent) isResourceCountClass(alc *store.LimitClass) bool {
if alc.BackendMode != nil {
return false
}
if alc.Environments == store.Unlimited && alc.Shares == store.Unlimited && alc.ReservedShares == store.Unlimited && alc.UniqueNames == store.Unlimited && alc.ShareFrontends == store.Unlimited {
return false
}
return true
}
func (a *Agent) isUnscopedBandwidthClass(alc *store.LimitClass) bool {
if alc.BackendMode != nil {
return false
}
if alc.Environments > store.Unlimited || alc.Shares > store.Unlimited || alc.ReservedShares > store.Unlimited || alc.UniqueNames > store.Unlimited || alc.ShareFrontends > store.Unlimited {
return false
}
if alc.PeriodMinutes < 1 {
return false
}
if alc.RxBytes == store.Unlimited && alc.TxBytes == store.Unlimited && alc.TotalBytes == store.Unlimited {
return false
}
return true
}
func (a *Agent) isScopedLimitClass(alc *store.LimitClass) bool {
if alc.BackendMode == nil {
return false
}
if alc.Environments > store.Unlimited {
return false
}
if alc.PeriodMinutes < 1 {
return false
}
if alc.RxBytes == store.Unlimited && alc.TxBytes == store.Unlimited && alc.TotalBytes == store.Unlimited {
return false
}
return true
}

View File

@ -7,38 +7,39 @@ import (
"github.com/openziti/zrok/util" "github.com/openziti/zrok/util"
"github.com/pkg/errors" "github.com/pkg/errors"
"github.com/sirupsen/logrus" "github.com/sirupsen/logrus"
"time"
) )
type accountWarningAction struct { type warningAction struct {
str *store.Store str *store.Store
cfg *emailUi.Config cfg *emailUi.Config
} }
func newAccountWarningAction(cfg *emailUi.Config, str *store.Store) *accountWarningAction { func newWarningAction(cfg *emailUi.Config, str *store.Store) *warningAction {
return &accountWarningAction{str, cfg} return &warningAction{str, cfg}
} }
func (a *accountWarningAction) HandleAccount(acct *store.Account, rxBytes, txBytes int64, limit *BandwidthPerPeriod, _ *sqlx.Tx) error { func (a *warningAction) HandleAccount(acct *store.Account, rxBytes, txBytes int64, bwc store.BandwidthClass, _ *userLimits, _ *sqlx.Tx) error {
logrus.Infof("warning '%v'", acct.Email) logrus.Infof("warning '%v'", acct.Email)
if a.cfg != nil { if a.cfg != nil {
rxLimit := "(unlimited bytes)" rxLimit := "(store.Unlimited bytes)"
if limit.Limit.Rx != Unlimited { if bwc.GetRxBytes() != store.Unlimited {
rxLimit = util.BytesToSize(limit.Limit.Rx) rxLimit = util.BytesToSize(bwc.GetRxBytes())
} }
txLimit := "(unlimited bytes)" txLimit := "(store.Unlimited bytes)"
if limit.Limit.Tx != Unlimited { if bwc.GetTxBytes() != store.Unlimited {
txLimit = util.BytesToSize(limit.Limit.Tx) txLimit = util.BytesToSize(bwc.GetTxBytes())
} }
totalLimit := "(unlimited bytes)" totalLimit := "(store.Unlimited bytes)"
if limit.Limit.Total != Unlimited { if bwc.GetTotalBytes() != store.Unlimited {
totalLimit = util.BytesToSize(limit.Limit.Total) totalLimit = util.BytesToSize(bwc.GetTotalBytes())
} }
detail := newDetailMessage() detail := newDetailMessage()
detail = detail.append("Your account has received %v and sent %v (for a total of %v), which has triggered a transfer limit warning.", util.BytesToSize(rxBytes), util.BytesToSize(txBytes), util.BytesToSize(rxBytes+txBytes)) detail = detail.append("Your account has received %v and sent %v (for a total of %v), which has triggered a transfer limit warning.", util.BytesToSize(rxBytes), util.BytesToSize(txBytes), util.BytesToSize(rxBytes+txBytes))
detail = detail.append("This zrok instance only allows an account to receive %v, send %v, totalling not more than %v for each %v.", rxLimit, txLimit, totalLimit, limit.Period) detail = detail.append("This zrok instance only allows an account to receive %v, send %v, totalling not more than %v for each %v.", rxLimit, txLimit, totalLimit, time.Duration(bwc.GetPeriodMinutes())*time.Minute)
detail = detail.append("If you exceed the transfer limit, access to your shares will be temporarily disabled (until the last %v falls below the transfer limit)", limit.Period) detail = detail.append("If you exceed the transfer limit, access to your shares will be temporarily disabled (until the last %v falls below the transfer limit)", time.Duration(bwc.GetPeriodMinutes())*time.Minute)
if err := sendLimitWarningEmail(a.cfg, acct.Email, detail); err != nil { if err := sendLimitWarningEmail(a.cfg, acct.Email, detail); err != nil {
return errors.Wrapf(err, "error sending limit warning email to '%v'", acct.Email) return errors.Wrapf(err, "error sending limit warning email to '%v'", acct.Email)

View File

@ -27,11 +27,6 @@ func (h *overviewHandler) Handle(_ metadata.OverviewParams, principal *rest_mode
logrus.Errorf("error finding environments for '%v': %v", principal.Email, err) logrus.Errorf("error finding environments for '%v': %v", principal.Email, err)
return metadata.NewOverviewInternalServerError() return metadata.NewOverviewInternalServerError()
} }
elm, err := newEnvironmentsLimitedMap(envs, trx)
if err != nil {
logrus.Errorf("error finding limited environments for '%v': %v", principal.Email, err)
return metadata.NewOverviewInternalServerError()
}
accountLimited, err := h.isAccountLimited(principal, trx) accountLimited, err := h.isAccountLimited(principal, trx)
if err != nil { if err != nil {
logrus.Errorf("error checking account limited for '%v': %v", principal.Email, err) logrus.Errorf("error checking account limited for '%v': %v", principal.Email, err)
@ -44,7 +39,6 @@ func (h *overviewHandler) Handle(_ metadata.OverviewParams, principal *rest_mode
Description: env.Description, Description: env.Description,
Host: env.Host, Host: env.Host,
ZID: env.ZId, ZID: env.ZId,
Limited: elm.isLimited(env),
CreatedAt: env.CreatedAt.UnixMilli(), CreatedAt: env.CreatedAt.UnixMilli(),
UpdatedAt: env.UpdatedAt.UnixMilli(), UpdatedAt: env.UpdatedAt.UnixMilli(),
}, },
@ -54,11 +48,6 @@ func (h *overviewHandler) Handle(_ metadata.OverviewParams, principal *rest_mode
logrus.Errorf("error finding shares for environment '%v': %v", env.ZId, err) logrus.Errorf("error finding shares for environment '%v': %v", env.ZId, err)
return metadata.NewOverviewInternalServerError() return metadata.NewOverviewInternalServerError()
} }
slm, err := newSharesLimitedMap(shrs, trx)
if err != nil {
logrus.Errorf("error finding limited shares for environment '%v': %v", env.ZId, err)
return metadata.NewOverviewInternalServerError()
}
for _, shr := range shrs { for _, shr := range shrs {
feEndpoint := "" feEndpoint := ""
if shr.FrontendEndpoint != nil { if shr.FrontendEndpoint != nil {
@ -81,7 +70,6 @@ func (h *overviewHandler) Handle(_ metadata.OverviewParams, principal *rest_mode
FrontendEndpoint: feEndpoint, FrontendEndpoint: feEndpoint,
BackendProxyEndpoint: beProxyEndpoint, BackendProxyEndpoint: beProxyEndpoint,
Reserved: shr.Reserved, Reserved: shr.Reserved,
Limited: slm.isLimited(shr),
CreatedAt: shr.CreatedAt.UnixMilli(), CreatedAt: shr.CreatedAt.UnixMilli(),
UpdatedAt: shr.UpdatedAt.UnixMilli(), UpdatedAt: shr.UpdatedAt.UnixMilli(),
} }
@ -116,70 +104,16 @@ func (h *overviewHandler) Handle(_ metadata.OverviewParams, principal *rest_mode
} }
func (h *overviewHandler) isAccountLimited(principal *rest_model_zrok.Principal, trx *sqlx.Tx) (bool, error) { func (h *overviewHandler) isAccountLimited(principal *rest_model_zrok.Principal, trx *sqlx.Tx) (bool, error) {
var alj *store.AccountLimitJournal var je *store.BandwidthLimitJournalEntry
aljEmpty, err := str.IsAccountLimitJournalEmpty(int(principal.ID), trx) jEmpty, err := str.IsBandwidthLimitJournalEmpty(int(principal.ID), trx)
if err != nil { if err != nil {
return false, err return false, err
} }
if !aljEmpty { if !jEmpty {
alj, err = str.FindLatestAccountLimitJournal(int(principal.ID), trx) je, err = str.FindLatestBandwidthLimitJournal(int(principal.ID), trx)
if err != nil { if err != nil {
return false, err return false, err
} }
} }
return alj != nil && alj.Action == store.LimitAction, nil return je != nil && je.Action == store.LimitLimitAction, nil
}
type sharesLimitedMap struct {
v map[int]struct{}
}
func newSharesLimitedMap(shrs []*store.Share, trx *sqlx.Tx) (*sharesLimitedMap, error) {
var shrIds []int
for i := range shrs {
shrIds = append(shrIds, shrs[i].Id)
}
shrsLimited, err := str.FindSelectedLatestShareLimitjournal(shrIds, trx)
if err != nil {
return nil, err
}
slm := &sharesLimitedMap{v: make(map[int]struct{})}
for i := range shrsLimited {
if shrsLimited[i].Action == store.LimitAction {
slm.v[shrsLimited[i].ShareId] = struct{}{}
}
}
return slm, nil
}
func (m *sharesLimitedMap) isLimited(shr *store.Share) bool {
_, limited := m.v[shr.Id]
return limited
}
type environmentsLimitedMap struct {
v map[int]struct{}
}
func newEnvironmentsLimitedMap(envs []*store.Environment, trx *sqlx.Tx) (*environmentsLimitedMap, error) {
var envIds []int
for i := range envs {
envIds = append(envIds, envs[i].Id)
}
envsLimited, err := str.FindSelectedLatestEnvironmentLimitJournal(envIds, trx)
if err != nil {
return nil, err
}
elm := &environmentsLimitedMap{v: make(map[int]struct{})}
for i := range envsLimited {
if envsLimited[i].Action == store.LimitAction {
elm.v[envsLimited[i].EnvironmentId] = struct{}{}
}
}
return elm, nil
}
func (m *environmentsLimitedMap) isLimited(env *store.Environment) bool {
_, limited := m.v[env.Id]
return limited
} }

View File

@ -49,7 +49,9 @@ func (h *shareHandler) Handle(params share.ShareParams, principal *rest_model_zr
return share.NewShareInternalServerError() return share.NewShareInternalServerError()
} }
if err := h.checkLimits(envId, principal, trx); err != nil { shareMode := sdk.ShareMode(params.Body.ShareMode)
backendMode := sdk.BackendMode(params.Body.BackendMode)
if err := h.checkLimits(envId, principal, params.Body.Reserved, params.Body.UniqueName != "", shareMode, backendMode, trx); err != nil {
logrus.Errorf("limits error: %v", err) logrus.Errorf("limits error: %v", err)
return share.NewShareUnauthorized() return share.NewShareUnauthorized()
} }
@ -114,6 +116,17 @@ func (h *shareHandler) Handle(params share.ShareParams, principal *rest_model_zr
logrus.Error(err) logrus.Error(err)
return share.NewShareNotFound() return share.NewShareNotFound()
} }
if sfe.PermissionMode == store.ClosedPermissionMode {
granted, err := str.IsFrontendGrantedToAccount(int(principal.ID), sfe.Id, trx)
if err != nil {
logrus.Error(err)
return share.NewShareInternalServerError()
}
if !granted {
logrus.Errorf("'%v' is not granted access to frontend '%v'", principal.Email, frontendSelection)
return share.NewShareNotFound()
}
}
if sfe != nil && sfe.UrlTemplate != nil { if sfe != nil && sfe.UrlTemplate != nil {
frontendZIds = append(frontendZIds, sfe.ZId) frontendZIds = append(frontendZIds, sfe.ZId)
frontendTemplates = append(frontendTemplates, *sfe.UrlTemplate) frontendTemplates = append(frontendTemplates, *sfe.UrlTemplate)
@ -147,6 +160,7 @@ func (h *shareHandler) Handle(params share.ShareParams, principal *rest_model_zr
BackendMode: params.Body.BackendMode, BackendMode: params.Body.BackendMode,
BackendProxyEndpoint: &params.Body.BackendProxyEndpoint, BackendProxyEndpoint: &params.Body.BackendProxyEndpoint,
Reserved: reserved, Reserved: reserved,
UniqueName: reserved && uniqueName != "",
PermissionMode: store.OpenPermissionMode, PermissionMode: store.OpenPermissionMode,
} }
if params.Body.PermissionMode != "" { if params.Body.PermissionMode != "" {
@ -189,10 +203,10 @@ func (h *shareHandler) Handle(params share.ShareParams, principal *rest_model_zr
}) })
} }
func (h *shareHandler) checkLimits(envId int, principal *rest_model_zrok.Principal, trx *sqlx.Tx) error { func (h *shareHandler) checkLimits(envId int, principal *rest_model_zrok.Principal, reserved, uniqueName bool, shareMode sdk.ShareMode, backendMode sdk.BackendMode, trx *sqlx.Tx) error {
if !principal.Limitless { if !principal.Limitless {
if limitsAgent != nil { if limitsAgent != nil {
ok, err := limitsAgent.CanCreateShare(int(principal.ID), envId, trx) ok, err := limitsAgent.CanCreateShare(int(principal.ID), envId, reserved, uniqueName, shareMode, backendMode, trx)
if err != nil { if err != nil {
return errors.Wrapf(err, "error checking share limits for '%v'", principal.Email) return errors.Wrapf(err, "error checking share limits for '%v'", principal.Email)
} }

View File

@ -1,65 +0,0 @@
package store
import (
"github.com/jmoiron/sqlx"
"github.com/pkg/errors"
)
type AccountLimitJournal struct {
Model
AccountId int
RxBytes int64
TxBytes int64
Action LimitJournalAction
}
func (str *Store) CreateAccountLimitJournal(j *AccountLimitJournal, trx *sqlx.Tx) (int, error) {
stmt, err := trx.Prepare("insert into account_limit_journal (account_id, rx_bytes, tx_bytes, action) values ($1, $2, $3, $4) returning id")
if err != nil {
return 0, errors.Wrap(err, "error preparing account_limit_journal insert statement")
}
var id int
if err := stmt.QueryRow(j.AccountId, j.RxBytes, j.TxBytes, j.Action).Scan(&id); err != nil {
return 0, errors.Wrap(err, "error executing account_limit_journal insert statement")
}
return id, nil
}
func (str *Store) IsAccountLimitJournalEmpty(acctId int, trx *sqlx.Tx) (bool, error) {
count := 0
if err := trx.QueryRowx("select count(0) from account_limit_journal where account_id = $1", acctId).Scan(&count); err != nil {
return false, err
}
return count == 0, nil
}
func (str *Store) FindLatestAccountLimitJournal(acctId int, trx *sqlx.Tx) (*AccountLimitJournal, error) {
j := &AccountLimitJournal{}
if err := trx.QueryRowx("select * from account_limit_journal where account_id = $1 order by id desc limit 1", acctId).StructScan(j); err != nil {
return nil, errors.Wrap(err, "error finding account_limit_journal by account_id")
}
return j, nil
}
func (str *Store) FindAllLatestAccountLimitJournal(trx *sqlx.Tx) ([]*AccountLimitJournal, error) {
rows, err := trx.Queryx("select id, account_id, rx_bytes, tx_bytes, action, created_at, updated_at from account_limit_journal where id in (select max(id) as id from account_limit_journal group by account_id)")
if err != nil {
return nil, errors.Wrap(err, "error selecting all latest account_limit_journal")
}
var aljs []*AccountLimitJournal
for rows.Next() {
alj := &AccountLimitJournal{}
if err := rows.StructScan(alj); err != nil {
return nil, errors.Wrap(err, "error scanning account_limit_journal")
}
aljs = append(aljs, alj)
}
return aljs, nil
}
func (str *Store) DeleteAccountLimitJournalForAccount(acctId int, trx *sqlx.Tx) error {
if _, err := trx.Exec("delete from account_limit_journal where account_id = $1", acctId); err != nil {
return errors.Wrapf(err, "error deleting account_limit journal for '#%d'", acctId)
}
return nil
}

View File

@ -1,79 +0,0 @@
package store
import (
"github.com/stretchr/testify/assert"
"testing"
)
func TestAccountLimitJournal(t *testing.T) {
str, err := Open(&Config{Path: ":memory:", Type: "sqlite3"})
assert.Nil(t, err)
assert.NotNil(t, str)
trx, err := str.Begin()
assert.Nil(t, err)
assert.NotNil(t, trx)
aljEmpty, err := str.IsAccountLimitJournalEmpty(1, trx)
assert.Nil(t, err)
assert.True(t, aljEmpty)
acctId, err := str.CreateAccount(&Account{Email: "nobody@nowehere.com", Salt: "salt", Password: "password", Token: "token", Limitless: false, Deleted: false}, trx)
assert.Nil(t, err)
_, err = str.CreateAccountLimitJournal(&AccountLimitJournal{AccountId: acctId, RxBytes: 1024, TxBytes: 2048, Action: WarningAction}, trx)
assert.Nil(t, err)
aljEmpty, err = str.IsAccountLimitJournalEmpty(acctId, trx)
assert.Nil(t, err)
assert.False(t, aljEmpty)
latestAlj, err := str.FindLatestAccountLimitJournal(acctId, trx)
assert.Nil(t, err)
assert.NotNil(t, latestAlj)
assert.Equal(t, int64(1024), latestAlj.RxBytes)
assert.Equal(t, int64(2048), latestAlj.TxBytes)
assert.Equal(t, WarningAction, latestAlj.Action)
_, err = str.CreateAccountLimitJournal(&AccountLimitJournal{AccountId: acctId, RxBytes: 2048, TxBytes: 4096, Action: LimitAction}, trx)
assert.Nil(t, err)
latestAlj, err = str.FindLatestAccountLimitJournal(acctId, trx)
assert.Nil(t, err)
assert.NotNil(t, latestAlj)
assert.Equal(t, int64(2048), latestAlj.RxBytes)
assert.Equal(t, int64(4096), latestAlj.TxBytes)
assert.Equal(t, LimitAction, latestAlj.Action)
}
func TestFindAllLatestAccountLimitJournal(t *testing.T) {
str, err := Open(&Config{Path: ":memory:", Type: "sqlite3"})
assert.Nil(t, err)
assert.NotNil(t, str)
trx, err := str.Begin()
assert.Nil(t, err)
assert.NotNil(t, trx)
acctId1, err := str.CreateAccount(&Account{Email: "nobody@nowehere.com", Salt: "salt1", Password: "password1", Token: "token1", Limitless: false, Deleted: false}, trx)
assert.Nil(t, err)
_, err = str.CreateAccountLimitJournal(&AccountLimitJournal{AccountId: acctId1, RxBytes: 2048, TxBytes: 4096, Action: WarningAction}, trx)
assert.Nil(t, err)
_, err = str.CreateAccountLimitJournal(&AccountLimitJournal{AccountId: acctId1, RxBytes: 2048, TxBytes: 4096, Action: ClearAction}, trx)
assert.Nil(t, err)
aljId13, err := str.CreateAccountLimitJournal(&AccountLimitJournal{AccountId: acctId1, RxBytes: 2048, TxBytes: 4096, Action: LimitAction}, trx)
assert.Nil(t, err)
acctId2, err := str.CreateAccount(&Account{Email: "someone@somewhere.com", Salt: "salt2", Password: "password2", Token: "token2", Limitless: false, Deleted: false}, trx)
assert.Nil(t, err)
aljId21, err := str.CreateAccountLimitJournal(&AccountLimitJournal{AccountId: acctId2, RxBytes: 2048, TxBytes: 4096, Action: WarningAction}, trx)
assert.Nil(t, err)
aljs, err := str.FindAllLatestAccountLimitJournal(trx)
assert.Nil(t, err)
assert.Equal(t, 2, len(aljs))
assert.Equal(t, aljId13, aljs[0].Id)
assert.Equal(t, aljId21, aljs[1].Id)
}

View File

@ -0,0 +1,40 @@
package store
import (
"github.com/jmoiron/sqlx"
"github.com/pkg/errors"
)
type AppliedLimitClass struct {
Model
AccountId int
LimitClassId int
}
func (str *Store) ApplyLimitClass(lc *AppliedLimitClass, trx *sqlx.Tx) (int, error) {
stmt, err := trx.Prepare("insert into applied_limit_classes (account_id, limit_class_id) values ($1, $2) returning id")
if err != nil {
return 0, errors.Wrap(err, "error preparing applied_limit_classes insert statement")
}
var id int
if err := stmt.QueryRow(lc.AccountId, lc.LimitClassId).Scan(&id); err != nil {
return 0, errors.Wrap(err, "error executing applied_limit_classes insert statement")
}
return id, nil
}
func (str *Store) FindAppliedLimitClassesForAccount(acctId int, trx *sqlx.Tx) ([]*LimitClass, error) {
rows, err := trx.Queryx("select limit_classes.* from applied_limit_classes, limit_classes where applied_limit_classes.account_id = $1 and applied_limit_classes.limit_class_id = limit_classes.id", acctId)
if err != nil {
return nil, errors.Wrap(err, "error finding limit classes for account")
}
var lcs []*LimitClass
for rows.Next() {
lc := &LimitClass{}
if err := rows.StructScan(&lc); err != nil {
return nil, errors.Wrap(err, "error scanning limit_classes")
}
lcs = append(lcs, lc)
}
return lcs, nil
}

View File

@ -0,0 +1,137 @@
package store
import (
"github.com/jmoiron/sqlx"
"github.com/pkg/errors"
)
type BandwidthLimitJournalEntry struct {
Model
AccountId int
LimitClassId *int
Action LimitAction
RxBytes int64
TxBytes int64
}
func (str *Store) CreateBandwidthLimitJournalEntry(j *BandwidthLimitJournalEntry, trx *sqlx.Tx) (int, error) {
stmt, err := trx.Prepare("insert into bandwidth_limit_journal (account_id, limit_class_id, action, rx_bytes, tx_bytes) values ($1, $2, $3, $4, $5) returning id")
if err != nil {
return 0, errors.Wrap(err, "error preparing bandwidth_limit_journal insert statement")
}
var id int
if err := stmt.QueryRow(j.AccountId, j.LimitClassId, j.Action, j.RxBytes, j.TxBytes).Scan(&id); err != nil {
return 0, errors.Wrap(err, "error executing bandwidth_limit_journal insert statement")
}
return id, nil
}
func (str *Store) IsBandwidthLimitJournalEmpty(acctId int, trx *sqlx.Tx) (bool, error) {
count := 0
if err := trx.QueryRowx("select count(0) from bandwidth_limit_journal where account_id = $1", acctId).Scan(&count); err != nil {
return false, err
}
return count == 0, nil
}
func (str *Store) FindLatestBandwidthLimitJournal(acctId int, trx *sqlx.Tx) (*BandwidthLimitJournalEntry, error) {
j := &BandwidthLimitJournalEntry{}
if err := trx.QueryRowx("select * from bandwidth_limit_journal where account_id = $1 order by id desc limit 1", acctId).StructScan(j); err != nil {
return nil, errors.Wrap(err, "error finding bandwidth_limit_journal by account_id")
}
return j, nil
}
func (str *Store) IsBandwidthLimitJournalEmptyForGlobal(acctId int, trx *sqlx.Tx) (bool, error) {
count := 0
if err := trx.QueryRowx("select count(0) from bandwidth_limit_journal where account_id = $1 and limit_class_id is null", acctId).Scan(&count); err != nil {
return false, err
}
return count == 0, nil
}
func (str *Store) FindLatestBandwidthLimitJournalForGlobal(acctId int, trx *sqlx.Tx) (*BandwidthLimitJournalEntry, error) {
j := &BandwidthLimitJournalEntry{}
if err := trx.QueryRowx("select * from bandwidth_limit_journal where account_id = $1 and limit_class_id is null order by id desc limit 1", acctId).StructScan(j); err != nil {
return nil, errors.Wrap(err, "error finding bandwidth_limit_journal by account_id for global")
}
return j, nil
}
func (str *Store) IsBandwidthLimitJournalEmptyForLimitClass(acctId, lcId int, trx *sqlx.Tx) (bool, error) {
count := 0
if err := trx.QueryRowx("select count(0) from bandwidth_limit_journal where account_id = $1 and limit_class_id = $2", acctId, lcId).Scan(&count); err != nil {
return false, err
}
return count == 0, nil
}
func (str *Store) FindLatestBandwidthLimitJournalForLimitClass(acctId, lcId int, trx *sqlx.Tx) (*BandwidthLimitJournalEntry, error) {
j := &BandwidthLimitJournalEntry{}
if err := trx.QueryRowx("select * from bandwidth_limit_journal where account_id = $1 and limit_class_id = $2 order by id desc limit 1", acctId, lcId).StructScan(j); err != nil {
return nil, errors.Wrap(err, "error finding bandwidth_limit_journal by account_id and limit_class_id")
}
return j, nil
}
func (str *Store) FindAllBandwidthLimitJournal(trx *sqlx.Tx) ([]*BandwidthLimitJournalEntry, error) {
rows, err := trx.Queryx("select * from bandwidth_limit_journal")
if err != nil {
return nil, errors.Wrap(err, "error finding all from bandwidth_limit_journal")
}
var jes []*BandwidthLimitJournalEntry
for rows.Next() {
je := &BandwidthLimitJournalEntry{}
if err := rows.StructScan(je); err != nil {
return nil, errors.Wrap(err, "error scanning bandwidth_limit_journal")
}
jes = append(jes, je)
}
return jes, nil
}
func (str *Store) FindAllLatestBandwidthLimitJournalForAccount(acctId int, trx *sqlx.Tx) ([]*BandwidthLimitJournalEntry, error) {
rows, err := trx.Queryx("select * from bandwidth_limit_journal where account_id = $1", acctId)
if err != nil {
return nil, errors.Wrap(err, "error finding all for account from bandwidth_limit_journal")
}
var jes []*BandwidthLimitJournalEntry
for rows.Next() {
je := &BandwidthLimitJournalEntry{}
if err := rows.StructScan(je); err != nil {
return nil, errors.Wrap(err, "error scanning bandwidth_limit_journal")
}
jes = append(jes, je)
}
return jes, nil
}
func (str *Store) FindAllLatestBandwidthLimitJournal(trx *sqlx.Tx) ([]*BandwidthLimitJournalEntry, error) {
rows, err := trx.Queryx("select id, account_id, limit_class_id, action, rx_bytes, tx_bytes, created_at, updated_at from bandwidth_limit_journal where id in (select max(id) as id from bandwidth_limit_journal group by account_id)")
if err != nil {
return nil, errors.Wrap(err, "error finding all latest bandwidth_limit_journal")
}
var jes []*BandwidthLimitJournalEntry
for rows.Next() {
je := &BandwidthLimitJournalEntry{}
if err := rows.StructScan(je); err != nil {
return nil, errors.Wrap(err, "error scanning bandwidth_limit_journal")
}
jes = append(jes, je)
}
return jes, nil
}
func (str *Store) DeleteBandwidthLimitJournalEntryForGlobal(acctId int, trx *sqlx.Tx) error {
if _, err := trx.Exec("delete from bandwidth_limit_journal where account_id = $1 and limit_class_id is null", acctId); err != nil {
return errors.Wrapf(err, "error deleting from bandwidth_limit_journal for account_id = %d and limit_class_id is null", acctId)
}
return nil
}
func (str *Store) DeleteBandwidthLimitJournalEntryForLimitClass(acctId int, lcId int, trx *sqlx.Tx) error {
if _, err := trx.Exec("delete from bandwidth_limit_journal where account_id = $1 and limit_class_id = $2", acctId, lcId); err != nil {
return errors.Wrapf(err, "error deleting from bandwidth_limit_journal for account_id = %d and limit_class_id = %d", acctId, lcId)
}
return nil
}

View File

@ -0,0 +1,95 @@
package store
import (
"github.com/openziti/zrok/sdk/golang/sdk"
"github.com/stretchr/testify/assert"
"testing"
)
func TestBandwidthLimitJournal(t *testing.T) {
str, err := Open(&Config{Path: ":memory:", Type: "sqlite3"})
assert.NoError(t, err)
assert.NotNil(t, str)
trx, err := str.Begin()
assert.NoError(t, err)
assert.NotNil(t, trx)
jEmpty, err := str.IsBandwidthLimitJournalEmpty(1, trx)
assert.NoError(t, err)
assert.True(t, jEmpty)
acctId, err := str.CreateAccount(&Account{Email: "nobody@nowhere.com", Salt: "salt", Password: "password", Token: "token"}, trx)
assert.NoError(t, err)
_, err = str.CreateBandwidthLimitJournalEntry(&BandwidthLimitJournalEntry{AccountId: acctId, Action: WarningLimitAction, RxBytes: 1024, TxBytes: 2048}, trx)
assert.NoError(t, err)
jEmpty, err = str.IsBandwidthLimitJournalEmpty(acctId, trx)
assert.NoError(t, err)
assert.False(t, jEmpty)
latestJe, err := str.FindLatestBandwidthLimitJournal(acctId, trx)
assert.NoError(t, err)
assert.NotNil(t, latestJe)
assert.Nil(t, latestJe.LimitClassId)
assert.Equal(t, WarningLimitAction, latestJe.Action)
assert.Equal(t, int64(1024), latestJe.RxBytes)
assert.Equal(t, int64(2048), latestJe.TxBytes)
lc := &LimitClass{
BackendMode: new(sdk.BackendMode),
PeriodMinutes: 60,
RxBytes: 4096,
TxBytes: 8192,
TotalBytes: 10240,
LimitAction: LimitLimitAction,
}
*lc.BackendMode = sdk.ProxyBackendMode
lcId, err := str.CreateLimitClass(lc, trx)
assert.NoError(t, err)
_, err = str.CreateBandwidthLimitJournalEntry(&BandwidthLimitJournalEntry{AccountId: acctId, LimitClassId: &lcId, Action: LimitLimitAction, RxBytes: 10240, TxBytes: 20480}, trx)
assert.NoError(t, err)
latestJe, err = str.FindLatestBandwidthLimitJournal(acctId, trx)
assert.NoError(t, err)
assert.NotNil(t, latestJe)
assert.NotNil(t, latestJe.LimitClassId)
assert.Equal(t, lcId, *latestJe.LimitClassId)
assert.Equal(t, LimitLimitAction, latestJe.Action)
assert.Equal(t, int64(10240), latestJe.RxBytes)
assert.Equal(t, int64(20480), latestJe.TxBytes)
}
func TestFindAllBandwidthLimitJournal(t *testing.T) {
str, err := Open(&Config{Path: ":memory:", Type: "sqlite3"})
assert.Nil(t, err)
assert.NotNil(t, str)
trx, err := str.Begin()
assert.Nil(t, err)
assert.NotNil(t, trx)
acctId1, err := str.CreateAccount(&Account{Email: "nobody@nowehere.com", Salt: "salt1", Password: "password1", Token: "token1", Limitless: false, Deleted: false}, trx)
assert.Nil(t, err)
_, err = str.CreateBandwidthLimitJournalEntry(&BandwidthLimitJournalEntry{AccountId: acctId1, Action: WarningLimitAction, RxBytes: 2048, TxBytes: 4096}, trx)
assert.Nil(t, err)
_, err = str.CreateBandwidthLimitJournalEntry(&BandwidthLimitJournalEntry{AccountId: acctId1, Action: LimitLimitAction, RxBytes: 2048, TxBytes: 4096}, trx)
assert.Nil(t, err)
aljId13, err := str.CreateBandwidthLimitJournalEntry(&BandwidthLimitJournalEntry{AccountId: acctId1, Action: LimitLimitAction, RxBytes: 8192, TxBytes: 10240}, trx)
assert.Nil(t, err)
acctId2, err := str.CreateAccount(&Account{Email: "someone@somewhere.com", Salt: "salt2", Password: "password2", Token: "token2", Limitless: false, Deleted: false}, trx)
assert.Nil(t, err)
aljId21, err := str.CreateBandwidthLimitJournalEntry(&BandwidthLimitJournalEntry{AccountId: acctId2, Action: WarningLimitAction, RxBytes: 2048, TxBytes: 4096}, trx)
assert.Nil(t, err)
aljs, err := str.FindAllLatestBandwidthLimitJournal(trx)
assert.Nil(t, err)
assert.Equal(t, 2, len(aljs))
assert.Equal(t, aljId13, aljs[0].Id)
assert.Equal(t, aljId21, aljs[1].Id)
}

View File

@ -1,93 +0,0 @@
package store
import (
"fmt"
"github.com/jmoiron/sqlx"
"github.com/pkg/errors"
)
type EnvironmentLimitJournal struct {
Model
EnvironmentId int
RxBytes int64
TxBytes int64
Action LimitJournalAction
}
func (str *Store) CreateEnvironmentLimitJournal(j *EnvironmentLimitJournal, trx *sqlx.Tx) (int, error) {
stmt, err := trx.Prepare("insert into environment_limit_journal (environment_id, rx_bytes, tx_bytes, action) values ($1, $2, $3, $4) returning id")
if err != nil {
return 0, errors.Wrap(err, "error preparing environment_limit_journal insert statement")
}
var id int
if err := stmt.QueryRow(j.EnvironmentId, j.RxBytes, j.TxBytes, j.Action).Scan(&id); err != nil {
return 0, errors.Wrap(err, "error executing environment_limit_journal insert statement")
}
return id, nil
}
func (str *Store) IsEnvironmentLimitJournalEmpty(envId int, trx *sqlx.Tx) (bool, error) {
count := 0
if err := trx.QueryRowx("select count(0) from environment_limit_journal where environment_id = $1", envId).Scan(&count); err != nil {
return false, err
}
return count == 0, nil
}
func (str *Store) FindLatestEnvironmentLimitJournal(envId int, trx *sqlx.Tx) (*EnvironmentLimitJournal, error) {
j := &EnvironmentLimitJournal{}
if err := trx.QueryRowx("select * from environment_limit_journal where environment_id = $1 order by created_at desc limit 1", envId).StructScan(j); err != nil {
return nil, errors.Wrap(err, "error finding environment_limit_journal by environment_id")
}
return j, nil
}
func (str *Store) FindSelectedLatestEnvironmentLimitJournal(envIds []int, trx *sqlx.Tx) ([]*EnvironmentLimitJournal, error) {
if len(envIds) < 1 {
return nil, nil
}
in := "("
for i := range envIds {
if i > 0 {
in += ", "
}
in += fmt.Sprintf("%d", envIds[i])
}
in += ")"
rows, err := trx.Queryx("select id, environment_id, rx_bytes, tx_bytes, action, created_at, updated_at from environment_limit_journal where id in (select max(id) as id from environment_limit_journal group by environment_id) and environment_id in " + in)
if err != nil {
return nil, errors.Wrap(err, "error selecting all latest environment_limit_journal")
}
var eljs []*EnvironmentLimitJournal
for rows.Next() {
elj := &EnvironmentLimitJournal{}
if err := rows.StructScan(elj); err != nil {
return nil, errors.Wrap(err, "error scanning environment_limit_journal")
}
eljs = append(eljs, elj)
}
return eljs, nil
}
func (str *Store) FindAllLatestEnvironmentLimitJournal(trx *sqlx.Tx) ([]*EnvironmentLimitJournal, error) {
rows, err := trx.Queryx("select id, environment_id, rx_bytes, tx_bytes, action, created_at, updated_at from environment_limit_journal where id in (select max(id) as id from environment_limit_journal group by environment_id)")
if err != nil {
return nil, errors.Wrap(err, "error selecting all latest environment_limit_journal")
}
var eljs []*EnvironmentLimitJournal
for rows.Next() {
elj := &EnvironmentLimitJournal{}
if err := rows.StructScan(elj); err != nil {
return nil, errors.Wrap(err, "error scanning environment_limit_journal")
}
eljs = append(eljs, elj)
}
return eljs, nil
}
func (str *Store) DeleteEnvironmentLimitJournalForEnvironment(envId int, trx *sqlx.Tx) error {
if _, err := trx.Exec("delete from environment_limit_journal where environment_id = $1", envId); err != nil {
return errors.Wrapf(err, "error deleteing environment_limit_journal for '#%d'", envId)
}
return nil
}

View File

@ -14,28 +14,28 @@ type Frontend struct {
PublicName *string PublicName *string
UrlTemplate *string UrlTemplate *string
Reserved bool Reserved bool
Deleted bool PermissionMode PermissionMode
} }
func (str *Store) CreateFrontend(envId int, f *Frontend, tx *sqlx.Tx) (int, error) { func (str *Store) CreateFrontend(envId int, f *Frontend, tx *sqlx.Tx) (int, error) {
stmt, err := tx.Prepare("insert into frontends (environment_id, private_share_id, token, z_id, public_name, url_template, reserved) values ($1, $2, $3, $4, $5, $6, $7) returning id") stmt, err := tx.Prepare("insert into frontends (environment_id, private_share_id, token, z_id, public_name, url_template, reserved, permission_mode) values ($1, $2, $3, $4, $5, $6, $7, $8) returning id")
if err != nil { if err != nil {
return 0, errors.Wrap(err, "error preparing frontends insert statement") return 0, errors.Wrap(err, "error preparing frontends insert statement")
} }
var id int var id int
if err := stmt.QueryRow(envId, f.PrivateShareId, f.Token, f.ZId, f.PublicName, f.UrlTemplate, f.Reserved).Scan(&id); err != nil { if err := stmt.QueryRow(envId, f.PrivateShareId, f.Token, f.ZId, f.PublicName, f.UrlTemplate, f.Reserved, f.PermissionMode).Scan(&id); err != nil {
return 0, errors.Wrap(err, "error executing frontends insert statement") return 0, errors.Wrap(err, "error executing frontends insert statement")
} }
return id, nil return id, nil
} }
func (str *Store) CreateGlobalFrontend(f *Frontend, tx *sqlx.Tx) (int, error) { func (str *Store) CreateGlobalFrontend(f *Frontend, tx *sqlx.Tx) (int, error) {
stmt, err := tx.Prepare("insert into frontends (token, z_id, public_name, url_template, reserved) values ($1, $2, $3, $4, $5) returning id") stmt, err := tx.Prepare("insert into frontends (token, z_id, public_name, url_template, reserved, permission_mode) values ($1, $2, $3, $4, $5, $6) returning id")
if err != nil { if err != nil {
return 0, errors.Wrap(err, "error preparing global frontends insert statement") return 0, errors.Wrap(err, "error preparing global frontends insert statement")
} }
var id int var id int
if err := stmt.QueryRow(f.Token, f.ZId, f.PublicName, f.UrlTemplate, f.Reserved).Scan(&id); err != nil { if err := stmt.QueryRow(f.Token, f.ZId, f.PublicName, f.UrlTemplate, f.Reserved, f.PermissionMode).Scan(&id); err != nil {
return 0, errors.Wrap(err, "error executing global frontends insert statement") return 0, errors.Wrap(err, "error executing global frontends insert statement")
} }
return id, nil return id, nil
@ -122,12 +122,12 @@ func (str *Store) FindFrontendsForPrivateShare(shrId int, tx *sqlx.Tx) ([]*Front
} }
func (str *Store) UpdateFrontend(fe *Frontend, tx *sqlx.Tx) error { func (str *Store) UpdateFrontend(fe *Frontend, tx *sqlx.Tx) error {
sql := "update frontends set environment_id = $1, private_share_id = $2, token = $3, z_id = $4, public_name = $5, url_template = $6, reserved = $7, updated_at = current_timestamp where id = $8" sql := "update frontends set environment_id = $1, private_share_id = $2, token = $3, z_id = $4, public_name = $5, url_template = $6, reserved = $7, permission_mode = $8, updated_at = current_timestamp where id = $9"
stmt, err := tx.Prepare(sql) stmt, err := tx.Prepare(sql)
if err != nil { if err != nil {
return errors.Wrap(err, "error preparing frontends update statement") return errors.Wrap(err, "error preparing frontends update statement")
} }
_, err = stmt.Exec(fe.EnvironmentId, fe.PrivateShareId, fe.Token, fe.ZId, fe.PublicName, fe.UrlTemplate, fe.Reserved, fe.Id) _, err = stmt.Exec(fe.EnvironmentId, fe.PrivateShareId, fe.Token, fe.ZId, fe.PublicName, fe.UrlTemplate, fe.Reserved, fe.PermissionMode, fe.Id)
if err != nil { if err != nil {
return errors.Wrap(err, "error executing frontends update statement") return errors.Wrap(err, "error executing frontends update statement")
} }

View File

@ -0,0 +1,18 @@
package store
import (
"github.com/jmoiron/sqlx"
"github.com/pkg/errors"
)
func (str *Store) IsFrontendGrantedToAccount(acctId, frontendId int, trx *sqlx.Tx) (bool, error) {
stmt, err := trx.Prepare("select count(0) from frontend_grants where account_id = $1 AND frontend_id = $2")
if err != nil {
return false, errors.Wrap(err, "error preparing frontend_grants select statement")
}
var count int
if err := stmt.QueryRow(acctId, frontendId).Scan(&count); err != nil {
return false, errors.Wrap(err, "error querying frontend_grants count")
}
return count > 0, nil
}

View File

@ -0,0 +1,19 @@
package store
import (
"github.com/jmoiron/sqlx"
"github.com/pkg/errors"
)
func (str *Store) LimitCheckLock(acctId int, trx *sqlx.Tx) error {
if str.cfg.EnableLocking {
stmt, err := trx.Prepare("insert into limit_check_locks (account_id) values ($1) on conflict (account_id) do update set updated_at = current_timestamp")
if err != nil {
return errors.Wrap(err, "error preparing upsert on limit_check_locks")
}
if _, err := stmt.Exec(acctId); err != nil {
return errors.Wrap(err, "error executing upsert on limit_check_locks")
}
}
return nil
}

View File

@ -0,0 +1,175 @@
package store
import (
"fmt"
"github.com/jmoiron/sqlx"
"github.com/openziti/zrok/sdk/golang/sdk"
"github.com/openziti/zrok/util"
"github.com/pkg/errors"
)
const Unlimited = -1
type BaseLimitClass interface {
IsGlobal() bool
GetLimitClassId() int
String() string
}
type ResourceCountClass interface {
BaseLimitClass
GetEnvironments() int
GetShares() int
GetReservedShares() int
GetUniqueNames() int
GetShareFrontends() int
}
type BandwidthClass interface {
BaseLimitClass
IsScoped() bool
GetBackendMode() sdk.BackendMode
GetPeriodMinutes() int
GetRxBytes() int64
GetTxBytes() int64
GetTotalBytes() int64
GetLimitAction() LimitAction
}
type LimitClass struct {
Model
Label *string
BackendMode *sdk.BackendMode
Environments int
Shares int
ReservedShares int
UniqueNames int
ShareFrontends int
PeriodMinutes int
RxBytes int64
TxBytes int64
TotalBytes int64
LimitAction LimitAction
}
func (lc LimitClass) IsGlobal() bool {
return false
}
func (lc LimitClass) IsScoped() bool {
return lc.BackendMode != nil
}
func (lc LimitClass) GetLimitClassId() int {
return lc.Id
}
func (lc LimitClass) GetEnvironments() int {
return lc.Environments
}
func (lc LimitClass) GetShares() int {
return lc.Shares
}
func (lc LimitClass) GetReservedShares() int {
return lc.ReservedShares
}
func (lc LimitClass) GetUniqueNames() int {
return lc.UniqueNames
}
func (lc LimitClass) GetShareFrontends() int {
return lc.ShareFrontends
}
func (lc LimitClass) GetBackendMode() sdk.BackendMode {
if lc.BackendMode == nil {
return ""
}
return *lc.BackendMode
}
func (lc LimitClass) GetPeriodMinutes() int {
return lc.PeriodMinutes
}
func (lc LimitClass) GetRxBytes() int64 {
return lc.RxBytes
}
func (lc LimitClass) GetTxBytes() int64 {
return lc.TxBytes
}
func (lc LimitClass) GetTotalBytes() int64 {
return lc.TotalBytes
}
func (lc LimitClass) GetLimitAction() LimitAction {
return lc.LimitAction
}
func (lc LimitClass) String() string {
out := "LimitClass<"
if lc.Label != nil && *lc.Label != "" {
out += "'" + *lc.Label + "'"
} else {
out += fmt.Sprintf("#%d", lc.Id)
}
if lc.BackendMode != nil {
out += fmt.Sprintf(", backendMode: '%s'", *lc.BackendMode)
}
if lc.Environments > Unlimited {
out += fmt.Sprintf(", environments: %d", lc.Environments)
}
if lc.Shares > Unlimited {
out += fmt.Sprintf(", shares: %d", lc.Shares)
}
if lc.ReservedShares > Unlimited {
out += fmt.Sprintf(", reservedShares: %d", lc.ReservedShares)
}
if lc.UniqueNames > Unlimited {
out += fmt.Sprintf(", uniqueNames: %d", lc.UniqueNames)
}
if lc.ShareFrontends > Unlimited {
out += fmt.Sprintf(", shareFrontends: %d", lc.ShareFrontends)
}
if lc.RxBytes > Unlimited || lc.TxBytes > Unlimited || lc.TotalBytes > Unlimited {
out += fmt.Sprintf(", periodMinutes: %d", lc.PeriodMinutes)
}
if lc.RxBytes > Unlimited {
out += fmt.Sprintf(", rxBytes: %v", util.BytesToSize(lc.RxBytes))
}
if lc.TxBytes > Unlimited {
out += fmt.Sprintf(", txBytes: %v", util.BytesToSize(lc.TxBytes))
}
if lc.TotalBytes > Unlimited {
out += fmt.Sprintf(", totalBytes: %v", util.BytesToSize(lc.TotalBytes))
}
out += fmt.Sprintf(", limitAction: '%v'>", lc.LimitAction)
return out
}
var _ BandwidthClass = (*LimitClass)(nil)
func (str *Store) CreateLimitClass(lc *LimitClass, trx *sqlx.Tx) (int, error) {
stmt, err := trx.Prepare("insert into limit_classes (label, backend_mode, environments, shares, reserved_shares, unique_names, share_frontends, period_minutes, rx_bytes, tx_bytes, total_bytes, limit_action) values ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12) returning id")
if err != nil {
return 0, errors.Wrap(err, "error preparing limit_classes insert statement")
}
var id int
if err := stmt.QueryRow(lc.Label, lc.BackendMode, lc.Environments, lc.Shares, lc.ReservedShares, lc.UniqueNames, lc.ShareFrontends, lc.PeriodMinutes, lc.RxBytes, lc.TxBytes, lc.TotalBytes, lc.LimitAction).Scan(&id); err != nil {
return 0, errors.Wrap(err, "error executing limit_classes insert statement")
}
return id, nil
}
func (str *Store) GetLimitClass(lcId int, trx *sqlx.Tx) (*LimitClass, error) {
lc := &LimitClass{}
if err := trx.QueryRowx("select * from limit_classes where id = $1", lcId).StructScan(lc); err != nil {
return nil, errors.Wrap(err, "error selecting limit_class by id")
}
return lc, nil
}

View File

@ -1,11 +1,10 @@
package store package store
type LimitJournalAction string type LimitAction string
const ( const (
LimitAction LimitJournalAction = "limit" LimitLimitAction LimitAction = "limit"
WarningAction LimitJournalAction = "warning" WarningLimitAction LimitAction = "warning"
ClearAction LimitJournalAction = "clear"
) )
type PermissionMode string type PermissionMode string

View File

@ -16,17 +16,17 @@ type Share struct {
FrontendEndpoint *string FrontendEndpoint *string
BackendProxyEndpoint *string BackendProxyEndpoint *string
Reserved bool Reserved bool
UniqueName bool
PermissionMode PermissionMode PermissionMode PermissionMode
Deleted bool
} }
func (str *Store) CreateShare(envId int, shr *Share, tx *sqlx.Tx) (int, error) { func (str *Store) CreateShare(envId int, shr *Share, tx *sqlx.Tx) (int, error) {
stmt, err := tx.Prepare("insert into shares (environment_id, z_id, token, share_mode, backend_mode, frontend_selection, frontend_endpoint, backend_proxy_endpoint, reserved, permission_mode) values ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10) returning id") stmt, err := tx.Prepare("insert into shares (environment_id, z_id, token, share_mode, backend_mode, frontend_selection, frontend_endpoint, backend_proxy_endpoint, reserved, unique_name, permission_mode) values ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11) returning id")
if err != nil { if err != nil {
return 0, errors.Wrap(err, "error preparing shares insert statement") return 0, errors.Wrap(err, "error preparing shares insert statement")
} }
var id int var id int
if err := stmt.QueryRow(envId, shr.ZId, shr.Token, shr.ShareMode, shr.BackendMode, shr.FrontendSelection, shr.FrontendEndpoint, shr.BackendProxyEndpoint, shr.Reserved, shr.PermissionMode).Scan(&id); err != nil { if err := stmt.QueryRow(envId, shr.ZId, shr.Token, shr.ShareMode, shr.BackendMode, shr.FrontendSelection, shr.FrontendEndpoint, shr.BackendProxyEndpoint, shr.Reserved, shr.UniqueName, shr.PermissionMode).Scan(&id); err != nil {
return 0, errors.Wrap(err, "error executing shares insert statement") return 0, errors.Wrap(err, "error executing shares insert statement")
} }
return id, nil return id, nil
@ -64,6 +64,14 @@ func (str *Store) FindShareWithToken(shrToken string, tx *sqlx.Tx) (*Share, erro
return shr, nil return shr, nil
} }
func (str *Store) FindShareWithTokenEvenIfDeleted(shrToken string, tx *sqlx.Tx) (*Share, error) {
shr := &Share{}
if err := tx.QueryRowx("select * from shares where token = $1", shrToken).StructScan(shr); err != nil {
return nil, errors.Wrap(err, "error selecting share by token, even if deleted")
}
return shr, nil
}
func (str *Store) ShareWithTokenExists(shrToken string, tx *sqlx.Tx) (bool, error) { func (str *Store) ShareWithTokenExists(shrToken string, tx *sqlx.Tx) (bool, error) {
count := 0 count := 0
if err := tx.QueryRowx("select count(0) from shares where token = $1 and not deleted", shrToken).Scan(&count); err != nil { if err := tx.QueryRowx("select count(0) from shares where token = $1 and not deleted", shrToken).Scan(&count); err != nil {
@ -97,12 +105,12 @@ func (str *Store) FindSharesForEnvironment(envId int, tx *sqlx.Tx) ([]*Share, er
} }
func (str *Store) UpdateShare(shr *Share, tx *sqlx.Tx) error { func (str *Store) UpdateShare(shr *Share, tx *sqlx.Tx) error {
sql := "update shares set z_id = $1, token = $2, share_mode = $3, backend_mode = $4, frontend_selection = $5, frontend_endpoint = $6, backend_proxy_endpoint = $7, reserved = $8, permission_mode = $9, updated_at = current_timestamp where id = $10" sql := "update shares set z_id = $1, token = $2, share_mode = $3, backend_mode = $4, frontend_selection = $5, frontend_endpoint = $6, backend_proxy_endpoint = $7, reserved = $8, unique_name = $9, permission_mode = $10, updated_at = current_timestamp where id = $11"
stmt, err := tx.Prepare(sql) stmt, err := tx.Prepare(sql)
if err != nil { if err != nil {
return errors.Wrap(err, "error preparing shares update statement") return errors.Wrap(err, "error preparing shares update statement")
} }
_, err = stmt.Exec(shr.ZId, shr.Token, shr.ShareMode, shr.BackendMode, shr.FrontendSelection, shr.FrontendEndpoint, shr.BackendProxyEndpoint, shr.Reserved, shr.PermissionMode, shr.Id) _, err = stmt.Exec(shr.ZId, shr.Token, shr.ShareMode, shr.BackendMode, shr.FrontendSelection, shr.FrontendEndpoint, shr.BackendProxyEndpoint, shr.Reserved, shr.UniqueName, shr.PermissionMode, shr.Id)
if err != nil { if err != nil {
return errors.Wrap(err, "error executing shares update statement") return errors.Wrap(err, "error executing shares update statement")
} }

View File

@ -1,93 +0,0 @@
package store
import (
"fmt"
"github.com/jmoiron/sqlx"
"github.com/pkg/errors"
)
type ShareLimitJournal struct {
Model
ShareId int
RxBytes int64
TxBytes int64
Action LimitJournalAction
}
func (str *Store) CreateShareLimitJournal(j *ShareLimitJournal, trx *sqlx.Tx) (int, error) {
stmt, err := trx.Prepare("insert into share_limit_journal (share_id, rx_bytes, tx_bytes, action) values ($1, $2, $3, $4) returning id")
if err != nil {
return 0, errors.Wrap(err, "error preparing share_limit_journal insert statement")
}
var id int
if err := stmt.QueryRow(j.ShareId, j.RxBytes, j.TxBytes, j.Action).Scan(&id); err != nil {
return 0, errors.Wrap(err, "error executing share_limit_journal insert statement")
}
return id, nil
}
func (str *Store) IsShareLimitJournalEmpty(shrId int, trx *sqlx.Tx) (bool, error) {
count := 0
if err := trx.QueryRowx("select count(0) from share_limit_journal where share_id = $1", shrId).Scan(&count); err != nil {
return false, err
}
return count == 0, nil
}
func (str *Store) FindLatestShareLimitJournal(shrId int, trx *sqlx.Tx) (*ShareLimitJournal, error) {
j := &ShareLimitJournal{}
if err := trx.QueryRowx("select * from share_limit_journal where share_id = $1 order by created_at desc limit 1", shrId).StructScan(j); err != nil {
return nil, errors.Wrap(err, "error finding share_limit_journal by share_id")
}
return j, nil
}
func (str *Store) FindSelectedLatestShareLimitjournal(shrIds []int, trx *sqlx.Tx) ([]*ShareLimitJournal, error) {
if len(shrIds) < 1 {
return nil, nil
}
in := "("
for i := range shrIds {
if i > 0 {
in += ", "
}
in += fmt.Sprintf("%d", shrIds[i])
}
in += ")"
rows, err := trx.Queryx("select id, share_id, rx_bytes, tx_bytes, action, created_at, updated_at from share_limit_journal where id in (select max(id) as id from share_limit_journal group by share_id) and share_id in " + in)
if err != nil {
return nil, errors.Wrap(err, "error selecting all latest share_limit_journal")
}
var sljs []*ShareLimitJournal
for rows.Next() {
slj := &ShareLimitJournal{}
if err := rows.StructScan(slj); err != nil {
return nil, errors.Wrap(err, "error scanning share_limit_journal")
}
sljs = append(sljs, slj)
}
return sljs, nil
}
func (str *Store) FindAllLatestShareLimitJournal(trx *sqlx.Tx) ([]*ShareLimitJournal, error) {
rows, err := trx.Queryx("select id, share_id, rx_bytes, tx_bytes, action, created_at, updated_at from share_limit_journal where id in (select max(id) as id from share_limit_journal group by share_id)")
if err != nil {
return nil, errors.Wrap(err, "error selecting all latest share_limit_journal")
}
var sljs []*ShareLimitJournal
for rows.Next() {
slj := &ShareLimitJournal{}
if err := rows.StructScan(slj); err != nil {
return nil, errors.Wrap(err, "error scanning share_limit_journal")
}
sljs = append(sljs, slj)
}
return sljs, nil
}
func (str *Store) DeleteShareLimitJournalForShare(shrId int, trx *sqlx.Tx) error {
if _, err := trx.Exec("delete from share_limit_journal where share_id = $1", shrId); err != nil {
return errors.Wrapf(err, "error deleting share_limit_journal for '#%d'", shrId)
}
return nil
}

View File

@ -0,0 +1,36 @@
-- +migrate Up
create type limit_action as enum ('warning', 'limit');
create table limit_classes (
id serial primary key,
backend_mode backend_mode,
environments int not null default (-1),
shares int not null default (-1),
reserved_shares int not null default (-1),
unique_names int not null default (-1),
period_minutes int not null default (1440),
rx_bytes bigint not null default (-1),
tx_bytes bigint not null default (-1),
total_bytes bigint not null default (-1),
limit_action limit_action not null default ('limit'),
created_at timestamptz not null default(current_timestamp),
updated_at timestamptz not null default(current_timestamp),
deleted boolean not null default(false)
);
create table applied_limit_classes (
id serial primary key,
account_id integer not null references accounts (id),
limit_class_id integer not null references limit_classes (id),
created_at timestamptz not null default(current_timestamp),
updated_at timestamptz not null default(current_timestamp),
deleted boolean not null default(false)
);
create index applied_limit_classes_account_id_idx on applied_limit_classes (account_id);
create index applied_limit_classes_limit_class_id_idx on applied_limit_classes (limit_class_id);

View File

@ -0,0 +1,7 @@
-- +migrate Up
create table limit_check_locks (
id serial primary key,
account_id integer not null references accounts (id) unique,
updated_at timestamptz not null default(current_timestamp)
);

View File

@ -0,0 +1,3 @@
-- +migrate Up
alter table shares add column unique_name boolean not null default (false);

View File

@ -0,0 +1,21 @@
-- +migrate Up
drop table account_limit_journal;
drop table environment_limit_journal;
drop table share_limit_journal;
drop type limit_action_type;
create type limit_action_type as enum ('warning', 'limit');
create table bandwidth_limit_journal (
id serial primary key,
account_id integer references accounts (id) not null,
limit_class_id integer references limit_classes (id),
action limit_action_type not null,
rx_bytes bigint not null,
tx_bytes bigint not null,
created_at timestamptz not null default(current_timestamp),
updated_at timestamptz not null default(current_timestamp)
);
create index bandwidth_limit_journal_account_id_idx on bandwidth_limit_journal (account_id);

View File

@ -0,0 +1,3 @@
-- +migrate Up
alter table limit_classes add column share_frontends int not null default (-1);

View File

@ -0,0 +1,17 @@
-- +migrate Up
alter table frontends add column permission_mode permission_mode_type not null default('open');
create table frontend_grants (
id serial primary key,
account_id integer references accounts (id) not null,
frontend_id integer references frontends (id) not null,
created_at timestamptz not null default(current_timestamp),
updated_at timestamptz not null default(current_timestamp),
deleted boolean not null default(false)
);
create index frontend_grants_account_id_idx on frontend_grants (account_id);
create index frontend_grants_frontend_id_idx on frontend_grants (frontend_id);

View File

@ -0,0 +1,3 @@
-- +migrate Up
alter table limit_classes add column label varchar(32);

View File

@ -0,0 +1,34 @@
-- +migrate Up
create table limit_classes (
id integer primary key,
backend_mode string,
environments integer not null default (-1),
shares integer not null default (-1),
reserved_shares integer not null default (-1),
unique_names integer not null default (-1),
period_minutes integer not null default (1440),
rx_bytes bigint not null default (-1),
tx_bytes bigint not null default (-1),
total_bytes bigint not null default (-1),
limit_action string not null default ('limit'),
created_at datetime not null default(strftime('%Y-%m-%d %H:%M:%f', 'now')),
updated_at datetime not null default(strftime('%Y-%m-%d %H:%M:%f', 'now')),
deleted boolean not null default(false)
);
create table applied_limit_classes (
id integer primary key,
account_id integer not null references accounts (id),
limit_class_id integer not null references limit_classes (id),
created_at datetime not null default(strftime('%Y-%m-%d %H:%M:%f', 'now')),
updated_at datetime not null default(strftime('%Y-%m-%d %H:%M:%f', 'now')),
deleted boolean not null default(false)
);
create index applied_limit_classes_account_id_idx on applied_limit_classes (account_id);
create index applied_limit_classes_limit_class_id_idx on applied_limit_classes (limit_class_id);

View File

@ -0,0 +1,3 @@
-- +migrate Up
alter table shares add column unique_name boolean not null default (false);

View File

@ -0,0 +1,18 @@
-- +migrate Up
drop table account_limit_journal;
drop table environment_limit_journal;
drop table share_limit_journal;
create table bandwidth_limit_journal (
id integer primary key,
account_id integer references accounts (id) not null,
limit_class_id integer references limit_classes,
action string not null,
rx_bytes bigint not null,
tx_bytes bigint not null,
created_at datetime not null default(strftime('%Y-%m-%d %H:%M:%f', 'now')),
updated_at datetime not null default(strftime('%Y-%m-%d %H:%M:%f', 'now'))
);
create index bandwidth_limit_journal_account_id_idx on bandwidth_limit_journal (account_id);

View File

@ -0,0 +1,3 @@
-- +migrate Up
alter table limit_classes add column share_frontends int not null default (-1);

View File

@ -0,0 +1,17 @@
-- +migrate Up
alter table frontends add column permission_mode string not null default('open');
create table frontend_grants (
id integer primary key,
account_id integer references accounts (id) not null,
frontend_id integer references frontends (id) not null,
created_at datetime not null default(strftime('%Y-%m-%d %H:%M:%f', 'now')),
updated_at datetime not null default(strftime('%Y-%m-%d %H:%M:%f', 'now')),
deleted boolean not null default(false)
);
create index frontend_grants_account_id_idx on frontend_grants (account_id);
create index frontend_grants_frontend_id_idx on frontend_grants (frontend_id);

View File

@ -0,0 +1,3 @@
-- +migrate Up
alter table limit_classes add column label varchar(32);

View File

@ -22,8 +22,9 @@ type Model struct {
} }
type Config struct { type Config struct {
Path string `cf:"+secret"` Path string `cf:"+secret"`
Type string Type string
EnableLocking bool
} }
type Store struct { type Store struct {

View File

@ -90,9 +90,9 @@ ZROK_OAUTH_GOOGLE_CLIENT_ID=abcd1234
ZROK_OAUTH_GOOGLE_CLIENT_SECRET=abcd1234 ZROK_OAUTH_GOOGLE_CLIENT_SECRET=abcd1234
# zrok version, e.g., 1.0.0 # zrok version, e.g., 1.0.0
ZROK_IMAGE_TAG=latest ZROK_CLI_TAG=latest
# ziti version, e.g., 1.0.0 # ziti version, e.g., 1.0.0
ZITI_IMAGE_TAG=latest ZITI_CLI_TAG=latest
``` ```
### Start the Docker Compose Project ### Start the Docker Compose Project

View File

@ -4,7 +4,7 @@
# /___|_| \___/|_|\_\ # /___|_| \___/|_|\_\
# controller configuration # controller configuration
v: 3 v: 4
admin: admin:
# generate these admin tokens from a source of randomness, e.g. # generate these admin tokens from a source of randomness, e.g.
# LC_ALL=C tr -dc _A-Z-a-z-0-9 < /dev/urandom | head -c32 # LC_ALL=C tr -dc _A-Z-a-z-0-9 < /dev/urandom | head -c32

View File

@ -4,85 +4,207 @@ sidebar_position: 40
# Configuring Limits # Configuring Limits
> If you have not yet configured [metrics](configuring-metrics.md), please visit the [metrics guide](configuring-metrics.md) first before working through the limits configuration. :::note
This guide is current as of zrok version `v0.4.31`.
:::
The limits facility in `zrok` is responsible for controlling the number of resources in use (environments, shares) and also for ensuring that any single account, environment, or share is held below the configured thresholds. :::warning
If you have not yet configured [metrics](configuring-metrics.md), please visit the [metrics guide](configuring-metrics.md) first before working through the limits configuration.
:::
Take this `zrok` controller configuration stanza as an example: ## Understanding the zrok Limits Agent
The limits agent is a component of the zrok controller. It can be enabled and configured through the zrok controller configuration.
The limits agent is responsible for controlling the number of resources in use (environments, shares, etc.) and also for ensuring that accounts are held below the configured data transfer bandwidth thresholds. The limits agent exists to manage resource consumption for larger, multi-user zrok installations.
### Types of Limits
Limits can be specified that control the number of environments, shares, reserved shares, unique names, and frontends per-share that can be created by an account. Limits that control the allowed number of resources are called _resource count limits_.
Limits can be specified to control the amount of data that can be transferred within a time period. Limits that control the amount of data that can be transferred are called _bandwidth limits_.
zrok limits can be specified _globally_, applying to all users in a service instance. Limit _classes_ can be created to provide additional levels of resource allocation. Limit classes can then be _applied_ to multiple accounts, to alter their limit allocation beyond what's configured in the global configuration.
## The Global Configuration
The reference configuration for the zrok controller (found at [`etc/ctrl.yaml`](https://github.com/openziti/zrok/blob/main/etc/ctrl.yml) in the [repository](https://github.com/openziti/zrok)) contains the global limits configuration, which looks like this:
```yaml ```yaml
# Service instance limits global configuration.
#
# See `docs/guides/metrics-and-limits/configuring-limits.md` for details.
#
limits: limits:
enforcing: true
cycle: 1m
environments: -1 environments: -1
shares: -1 shares: -1
reserved_shares: -1
unique_names: -1
share_frontends: -1
bandwidth: bandwidth:
per_account: period: 5m
period: 5m warning:
warning: rx: -1
rx: -1 tx: -1
tx: -1 total: 7242880
total: 7242880 limit:
limit: rx: -1
rx: -1 tx: -1
tx: -1 total: 10485760
total: 10485760 enforcing: false
per_environment: cycle: 5m
period: 5m
warning:
rx: -1
tx: -1
total: -1
limit:
rx: -1
tx: -1
total: -1
per_share:
period: 5m
warning:
rx: -1
tx: -1
total: -1
limit:
rx: -1
tx: -1
total: -1
``` ```
## The Global Controls :::note
A value of `-1` appearing in the limits configuration mean the value is _unlimited_.
:::
The `enforcing` boolean will globally enable or disable limits for the controller. The `enforcing` boolean specifies whether or not limits are enabled in the service instance. By default, limits is disabled. No matter what else is configured in this stanza, if `enforcing` is set to `false`, there will be no limits placed on any account in the service instance.
The `cycle` value controls how frequently the limits system will look for limited resources to re-enable. The `cycle` value controls how frequently the limits agent will evaluate enforced limits. When a user exceeds a limit and has their shares disabled, the limits agent will evaluate their bandwidth usage on this interval looking to "relax" the limit once their usage falls below the threshold.
## Resource Limits ### Global Resouce Count Limits
The `environments` and `shares` values control the number of environments and shares that are allowed per-account. Any limit value can be set to `-1`, which means _unlimited_. The `environments`, `shares`, `reserved_shares`, `unique_names`, and `share_frontends` specify the resource count limits, globally for the service instance.
## Bandwidth Limits These resource counts will be applied to all users in the service instance by default.
The `bandwidth` section is designed to provide a configurable system for controlling the amount of data transfer that can be performed by users of the `zrok` service instance. The bandwidth limits are configurable for each share, environment, and account. ### Global Bandwidth Limits
`per_account`, `per_environment`, and `per_share` are all configured the same way: The `bandwidth` section defines the global bandwidth limits for all users in the service instance.
The `period` specifies the time window for the bandwidth limit. See the documentation for [`time.Duration.ParseDuration`](https://pkg.go.dev/time#ParseDuration) for details about the format used for these durations. If the `period` is set to 5 minutes, then the limits implementation will monitor the send and receive traffic for the resource (share, environment, or account) for the last 5 minutes, and if the amount of data is greater than either the `warning` or the `limit` threshold, action will be taken. There are two levels of bandwidth limits that can be specified in the global configuration. The first limit defines a _warning_ threshold where the user will receive an email that they are using increased data transfer amounts and will ultimately be subject to a limit. If you do not want this warning email to be sent, then configure all of the values to `-1` (unlimited).
The `rx` value is the number of bytes _received_ by the resource. The `tx` value is the number of bytes _transmitted_ by the resource. And `total` is the combined `rx`+`tx` value. The second limit defines the the actual _limit_ threshold, where the limits agent will disabled traffic for the account's shares.
If the traffic quantity is greater than the `warning` threshold, the user will receive an email notification letting them know that their data transfer size is rising and will eventually be limited (the email details the limit threshold). Bandwidth limits can be specified in terms of `tx` (or _transmitted_ data), `rx` (or _received_ data), and the `total` bytes that are sent in either direction. If you only want to set the `total` transferred limit, you can set `rx` and `tx` to `-1` (for _unlimited_). You can configure any combination of these these values at either the limit or warning levels.
If the traffic quantity is greater than the `limit` threshold, the resources will be limited until the traffic in the window (the last 5 minutes in our example) falls back below the `limit` threshold. The `period` specifies the time window for the bandwidth limit. See the documentation for [`time.Duration.ParseDuration`](https://pkg.go.dev/time#ParseDuration) for details about the format used for these durations. If the `period` is set to 5 minutes, then the limits agent will monitor the transmitted and receivde traffic for the account for the last 5 minutes, and if the amount of data is greater than either the `warning` or the `limit` threshold, action will be taken.
### Limit Actions In the global configuration example above users are allowed to transfer a total of `10485760` bytes in a `5m` period, and they will receive a warning email after they transfer more than `7242880` bytes in a `5m` period.
When a resource is limited, the actions taken differ depending on what kind of resource is being limited. ## Limit Classes
When a share is limited, the dial service policies for that share are removed. No other action is taken. This means that public frontends will simply return a `404` as if the share is no longer there. Private frontends will also return `404` errors. When the limit is relaxed, the dial policies are put back in place and the share will continue operating normally. The zrok limits agent includes a concept called _limit classes_. Limit classes can be used to define resource count and bandwidth limits that can be selectively applied to individual accounts in a service instance.
When an environment is limited, all of the shares in that environment become limited, and the user is not able to create new shares in that environment. When the limit is relaxed, all of the share limits are relaxed and the user is again able to add shares to the environment. Limit classes are created by creating a record in the `limit_classes` table in the zrok controller database. The table has this schema:
When an account is limited, all of the environments in that account become limited (limiting all of the shares), and the user is not able to create new environments or shares. When the limit is relaxed, all of the environments and shares will return to normal operation. ```sql
CREATE TABLE public.limit_classes (
id integer NOT NULL,
label VARCHAR(32),
backend_mode public.backend_mode,
environments integer DEFAULT '-1'::integer NOT NULL,
shares integer DEFAULT '-1'::integer NOT NULL,
reserved_shares integer DEFAULT '-1'::integer NOT NULL,
unique_names integer DEFAULT '-1'::integer NOT NULL,
share_frontends integer DEFAULT '-1'::integer NOT NULL,
period_minutes integer DEFAULT 1440 NOT NULL,
rx_bytes bigint DEFAULT '-1'::integer NOT NULL,
tx_bytes bigint DEFAULT '-1'::integer NOT NULL,
total_bytes bigint DEFAULT '-1'::integer NOT NULL,
limit_action public.limit_action DEFAULT 'limit'::public.limit_action NOT NULL,
created_at timestamp with time zone DEFAULT CURRENT_TIMESTAMP NOT NULL,
updated_at timestamp with time zone DEFAULT CURRENT_TIMESTAMP NOT NULL,
deleted boolean DEFAULT false NOT NULL
);
```
This schema supports constructing the 3 different types of limits classes that the system supports.
After defining a limit class in the database, it can be applied to specific user accounts (overriding the relevant parts of the global configuration) by inserting a row into the `applied_limit_classes` table:
```sql
CREATE TABLE public.applied_limit_classes (
id integer NOT NULL,
account_id integer NOT NULL,
limit_class_id integer NOT NULL,
created_at timestamp with time zone DEFAULT CURRENT_TIMESTAMP NOT NULL,
updated_at timestamp with time zone DEFAULT CURRENT_TIMESTAMP NOT NULL,
deleted boolean DEFAULT false NOT NULL
);
```
Create a row in this table linking the `account_id` to the `limit_class_id` to apply the limit class to a specific user account.
### Unscoped Resource Count Classes
To support overriding the resource count limits defined in the global limits configuration, a site administrator can create a limit class by inserting a row into the `limit_classes` table structured like this:
```sql
insert into limit_classes (environments, shares, reserved_shares, unique_names, share_frontends) values (1, 1, 1, 1, 1);
```
This creates a limit class that sets the `environments`, `shares`, `reserved_shares`, and `unique_names` all to `1`.
When this limit class is applied to a user account those values would override the default resource count values configured globally.
Applying an unscoped resource count class _does not_ affect the bandwidth limits (either globally configured, or via a limit class).
### Unscoped Bandwidth Classes
To support overriding the bandwidth limits defined in the global configuration, a site administrator can create a limit class by inserting a row into the `limit_classes` table structured like this:
```sql
insert into limit_classes (period_minutes, total_bytes, limit_action) values (2, 204800, 'limit');
```
This inserts a limit class that allows for a total bandwidth transfer of `204800` bytes every `2` minutes.
When this limit class is applied to a user account, those values would override the default bandwidth values configured globally.
Applying an unscoped bandwidth class _does not_ affect the resource count limits (either globally configured, or via a limit class).
### Scoped Classes
A scoped limit class specifies _both_ the resource counts (`shares`, `reserved_shares`, and `unique_names`, but *NOT* `environments`) for a *specific* backend mode. Insert a row like this:
```sql
insert into limit_classes (backend_mode, shares, reserved_shares, unique_names, period_minutes, total_bytes, limit_action) values ('web', 2, 1, 1, 2, 4096000, 'limit');
```
Scoped limits are designed to _increase_ the limits for a specific backend mode beyond what the global configuration and the unscoped classes provide. The general approach is to use the global configuration and the unscoped classes to provide the general account limits, and then the scoped classes can be used to further increase (or potentially _decrease_) the limits for a specific backend mode.
If a scoped limit class exists for a specific backend mode, then the limits agent will use that limit in making a decision about limiting the resource count or bandwidth. All other types of shares will fall back to the unscoped classes or the global configuration.
## Limit Actions
When an account exceeds a bandwidth limit, the limits agent will seek to limit the affected shares (based on the combination of global configuration, unscoped limit classes, and scoped limit classes). It applies the limit by removing the underlying OpenZiti dial policies for any frontends that are trying to access the share.
This means that public frontends will simply return a `404` as if the share is no longer there. Private frontends will also return `404` errors. When the limit is relaxed, the dial policies are put back in place and the share will continue operating normally.
## Unlimited Accounts ## Unlimited Accounts
The `accounts` table in the database includes a `limitless` column. When this column is set to `true` the account is not subject to any of the limits in the system. The `accounts` table in the database includes a `limitless` column. When this column is set to `true` the account is not subject to any of the limits in the system.
## Experimental Limits Locking
zrok versions prior to `v0.4.31` had a potential race condition when enforcing resource count limits. This usually only manifested in cases where shares or environments were being allocated programmatically (and fast enough to win the limits race).
This occurs due to a lack of transactional database locking around the limited structures. `v0.4.31` includes a pessimistic locking facility that can be enabled _only_ on the PostgreSQL store implemention.
If you're running PostgreSQL for your service instance and you want to enable the new experimental locking facility that eliminates the potential resource count race condition, add the `enable_locking: true` flag to your `store` definition:
```yaml
store:
enable_locking: true
```
## Caveats
There are a number of caveats that are important to understand when using the limits agent with more complicated limits scenarios:
### Aggregate Bandwidth
The zrok limits agent is a work in progress. The system currently does not track bandwidth individually for each backend mode type, which means all bandwidth values are aggregated between all of the share types that an account might be using. This will likely change in an upcoming release.
### Administration Through SQL
There are currently no administrative API endpoints (or corresponding CLI tools) to support creating and applying limit classes in the current release. The limits agent infrastructure was designed to support software integrations that directly manipulate the underlying database structures.
A future release may provide API and CLI tooling to support the human administration of the limits agent.
### Performance
Be sure to minimize the number of different periods used for specifying bandwidth limits. Specifying limits in multiple different periods can cause a multiplicity of queries to be executed against the metrics store (InfluxDB). Standardizing on a period like `24h` or `6h` and using that consistently is the best way to to manage the performance of the metrics store.

View File

@ -124,6 +124,7 @@ func (t *FilesystemTarget) WriteStream(path string, stream io.Reader, mode os.Fi
if err != nil { if err != nil {
return err return err
} }
defer f.Close()
_, err = io.Copy(f, stream) _, err = io.Copy(f, stream)
if err != nil { if err != nil {
return err return err

View File

@ -13,6 +13,7 @@ type Root interface {
Client() (*rest_client_zrok.Zrok, error) Client() (*rest_client_zrok.Zrok, error)
ApiEndpoint() (string, string) ApiEndpoint() (string, string)
DefaultFrontend() (string, string)
IsEnabled() bool IsEnabled() bool
Environment() *Environment Environment() *Environment
@ -34,7 +35,8 @@ type Environment struct {
} }
type Config struct { type Config struct {
ApiEndpoint string ApiEndpoint string
DefaultFrontend string
} }
type Metadata struct { type Metadata struct {

View File

@ -85,6 +85,24 @@ func (r *Root) ApiEndpoint() (string, string) {
return apiEndpoint, from return apiEndpoint, from
} }
func (r *Root) DefaultFrontend() (string, string) {
defaultFrontend := "public"
from := "binary"
if r.Config() != nil && r.Config().DefaultFrontend != "" {
defaultFrontend = r.Config().DefaultFrontend
from = "config"
}
env := os.Getenv("ZROK_DEFAULT_FRONTEND")
if env != "" {
defaultFrontend = env
from = "ZROK_DEFAULT_FRONTEND"
}
return defaultFrontend, from
}
func (r *Root) Environment() *env_core.Environment { func (r *Root) Environment() *env_core.Environment {
return r.env return r.env
} }

View File

@ -85,6 +85,24 @@ func (r *Root) ApiEndpoint() (string, string) {
return apiEndpoint, from return apiEndpoint, from
} }
func (r *Root) DefaultFrontend() (string, string) {
defaultFrontend := "public"
from := "binary"
if r.Config() != nil && r.Config().DefaultFrontend != "" {
defaultFrontend = r.Config().DefaultFrontend
from = "config"
}
env := os.Getenv("ZROK_DEFAULT_FRONTEND")
if env != "" {
defaultFrontend = env
from = "ZROK_DEFAULT_FRONTEND"
}
return defaultFrontend, from
}
func (r *Root) Environment() *env_core.Environment { func (r *Root) Environment() *env_core.Environment {
return r.env return r.env
} }

View File

@ -223,13 +223,14 @@ func loadConfig() (*env_core.Config, error) {
return nil, errors.Wrapf(err, "error unmarshaling config file '%v'", cf) return nil, errors.Wrapf(err, "error unmarshaling config file '%v'", cf)
} }
out := &env_core.Config{ out := &env_core.Config{
ApiEndpoint: cfg.ApiEndpoint, ApiEndpoint: cfg.ApiEndpoint,
DefaultFrontend: cfg.DefaultFrontend,
} }
return out, nil return out, nil
} }
func saveConfig(cfg *env_core.Config) error { func saveConfig(cfg *env_core.Config) error {
in := &config{ApiEndpoint: cfg.ApiEndpoint} in := &config{ApiEndpoint: cfg.ApiEndpoint, DefaultFrontend: cfg.DefaultFrontend}
data, err := json.MarshalIndent(in, "", " ") data, err := json.MarshalIndent(in, "", " ")
if err != nil { if err != nil {
return errors.Wrap(err, "error marshaling config") return errors.Wrap(err, "error marshaling config")
@ -323,7 +324,8 @@ type metadata struct {
} }
type config struct { type config struct {
ApiEndpoint string `json:"api_endpoint"` ApiEndpoint string `json:"api_endpoint"`
DefaultFrontend string `json:"default_frontend"`
} }
type environment struct { type environment struct {

View File

@ -9,7 +9,7 @@
# configuration, the software will expect this field to be incremented. This protects you against invalid configuration # configuration, the software will expect this field to be incremented. This protects you against invalid configuration
# versions and will refer to you to the documentation when the configuration structure changes. # versions and will refer to you to the documentation when the configuration structure changes.
# #
v: 3 v: 4
admin: admin:
# The `secrets` array contains a list of strings that represent valid `ZROK_ADMIN_TOKEN` values to be used for # The `secrets` array contains a list of strings that represent valid `ZROK_ADMIN_TOKEN` values to be used for
@ -74,44 +74,26 @@ invites:
# #
token_contact: invite@zrok.io token_contact: invite@zrok.io
# Service instance limits configuration. # Service instance limits global configuration.
# #
# See `docs/guides/metrics-and-limits/configuring-limits.md` for details. # See `docs/guides/metrics-and-limits/configuring-limits.md` for details.
# #
limits: limits:
environments: -1 environments: -1
shares: -1 shares: -1
reserved_shares: -1
unique_names: -1
share_frontends: -1
bandwidth: bandwidth:
per_account: period: 5m
period: 5m warning:
warning: rx: -1
rx: -1 tx: -1
tx: -1 total: 7242880
total: 7242880 limit:
limit: rx: -1
rx: -1 tx: -1
tx: -1 total: 10485760
total: 10485760
per_environment:
period: 5m
warning:
rx: -1
tx: -1
total: -1
limit:
rx: -1
tx: -1
total: -1
per_share:
period: 5m
warning:
rx: -1
tx: -1
total: -1
limit:
rx: -1
tx: -1
total: -1
enforcing: false enforcing: false
cycle: 5m cycle: 5m

View File

@ -7,9 +7,12 @@ package rest_model_zrok
import ( import (
"context" "context"
"encoding/json"
"github.com/go-openapi/errors"
"github.com/go-openapi/strfmt" "github.com/go-openapi/strfmt"
"github.com/go-openapi/swag" "github.com/go-openapi/swag"
"github.com/go-openapi/validate"
) )
// CreateFrontendRequest create frontend request // CreateFrontendRequest create frontend request
@ -17,6 +20,10 @@ import (
// swagger:model createFrontendRequest // swagger:model createFrontendRequest
type CreateFrontendRequest struct { type CreateFrontendRequest struct {
// permission mode
// Enum: [open closed]
PermissionMode string `json:"permissionMode,omitempty"`
// public name // public name
PublicName string `json:"public_name,omitempty"` PublicName string `json:"public_name,omitempty"`
@ -29,6 +36,57 @@ type CreateFrontendRequest struct {
// Validate validates this create frontend request // Validate validates this create frontend request
func (m *CreateFrontendRequest) Validate(formats strfmt.Registry) error { func (m *CreateFrontendRequest) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validatePermissionMode(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
var createFrontendRequestTypePermissionModePropEnum []interface{}
func init() {
var res []string
if err := json.Unmarshal([]byte(`["open","closed"]`), &res); err != nil {
panic(err)
}
for _, v := range res {
createFrontendRequestTypePermissionModePropEnum = append(createFrontendRequestTypePermissionModePropEnum, v)
}
}
const (
// CreateFrontendRequestPermissionModeOpen captures enum value "open"
CreateFrontendRequestPermissionModeOpen string = "open"
// CreateFrontendRequestPermissionModeClosed captures enum value "closed"
CreateFrontendRequestPermissionModeClosed string = "closed"
)
// prop value enum
func (m *CreateFrontendRequest) validatePermissionModeEnum(path, location string, value string) error {
if err := validate.EnumCase(path, location, value, createFrontendRequestTypePermissionModePropEnum, true); err != nil {
return err
}
return nil
}
func (m *CreateFrontendRequest) validatePermissionMode(formats strfmt.Registry) error {
if swag.IsZero(m.PermissionMode) { // not required
return nil
}
// value enum
if err := m.validatePermissionModeEnum("permissionMode", "body", m.PermissionMode); err != nil {
return err
}
return nil return nil
} }

View File

@ -1200,6 +1200,13 @@ func init() {
"createFrontendRequest": { "createFrontendRequest": {
"type": "object", "type": "object",
"properties": { "properties": {
"permissionMode": {
"type": "string",
"enum": [
"open",
"closed"
]
},
"public_name": { "public_name": {
"type": "string" "type": "string"
}, },
@ -2956,6 +2963,13 @@ func init() {
"createFrontendRequest": { "createFrontendRequest": {
"type": "object", "type": "object",
"properties": { "properties": {
"permissionMode": {
"type": "string",
"enum": [
"open",
"closed"
]
},
"public_name": { "public_name": {
"type": "string" "type": "string"
}, },

View File

@ -9,8 +9,8 @@
"version": "0.1.0", "version": "0.1.0",
"license": "Apache-2.0", "license": "Apache-2.0",
"dependencies": { "dependencies": {
"@openziti/ziti-sdk-nodejs": "^0.16.0", "@openziti/ziti-sdk-nodejs": "^0.17.0",
"axios": "^1.6.8", "axios": "^1.7.2",
"express": "^4.19.2" "express": "^4.19.2"
}, },
"devDependencies": { "devDependencies": {
@ -498,10 +498,11 @@
} }
}, },
"node_modules/@openziti/ziti-sdk-nodejs": { "node_modules/@openziti/ziti-sdk-nodejs": {
"version": "0.16.0", "version": "0.17.0",
"resolved": "https://registry.npmjs.org/@openziti/ziti-sdk-nodejs/-/ziti-sdk-nodejs-0.16.0.tgz", "resolved": "https://registry.npmjs.org/@openziti/ziti-sdk-nodejs/-/ziti-sdk-nodejs-0.17.0.tgz",
"integrity": "sha512-jQG5Yn6XojfGXkVHliZReY48bq7P2fFWzyOtXw37GdTeo+RQRl9YS57ieRF70NrlL0oEkO1/84wSQBfpX+uj+A==", "integrity": "sha512-eufD2LxhRfB8yPUkUFStFJN4GAmLM8u2m0BKAwOdcYy7KTrgWpiDhE/tt2orCtTdd7F+opUSA590ubY48g9RNQ==",
"hasInstallScript": true, "hasInstallScript": true,
"license": "Apache-2.0",
"dependencies": { "dependencies": {
"@mapbox/node-pre-gyp": "^1.0.11", "@mapbox/node-pre-gyp": "^1.0.11",
"bindings": "^1.5.0", "bindings": "^1.5.0",
@ -771,9 +772,10 @@
"integrity": "sha512-Oei9OH4tRh0YqU3GxhX79dM/mwVgvbZJaSNaRk+bshkj0S5cfHcgYakreBjrHwatXKbz+IoIdYLxrKim2MjW0Q==" "integrity": "sha512-Oei9OH4tRh0YqU3GxhX79dM/mwVgvbZJaSNaRk+bshkj0S5cfHcgYakreBjrHwatXKbz+IoIdYLxrKim2MjW0Q=="
}, },
"node_modules/axios": { "node_modules/axios": {
"version": "1.6.8", "version": "1.7.2",
"resolved": "https://registry.npmjs.org/axios/-/axios-1.6.8.tgz", "resolved": "https://registry.npmjs.org/axios/-/axios-1.7.2.tgz",
"integrity": "sha512-v/ZHtJDU39mDpyBoFVkETcd/uNdxrWRrg3bKpOKzXFA6Bvqopts6ALSMU3y6ijYxbw2B+wPrIv46egTzJXCLGQ==", "integrity": "sha512-2A8QhOMrbomlDuiLeK9XibIBzuHeRcqqNOHp0Cyp5EoJ1IFDh+XZH3A6BkXtv0K4gFGCI0Y4BM7B1wOEi0Rmgw==",
"license": "MIT",
"dependencies": { "dependencies": {
"follow-redirects": "^1.15.6", "follow-redirects": "^1.15.6",
"form-data": "^4.0.0", "form-data": "^4.0.0",
@ -3518,9 +3520,9 @@
} }
}, },
"@openziti/ziti-sdk-nodejs": { "@openziti/ziti-sdk-nodejs": {
"version": "0.16.0", "version": "0.17.0",
"resolved": "https://registry.npmjs.org/@openziti/ziti-sdk-nodejs/-/ziti-sdk-nodejs-0.16.0.tgz", "resolved": "https://registry.npmjs.org/@openziti/ziti-sdk-nodejs/-/ziti-sdk-nodejs-0.17.0.tgz",
"integrity": "sha512-jQG5Yn6XojfGXkVHliZReY48bq7P2fFWzyOtXw37GdTeo+RQRl9YS57ieRF70NrlL0oEkO1/84wSQBfpX+uj+A==", "integrity": "sha512-eufD2LxhRfB8yPUkUFStFJN4GAmLM8u2m0BKAwOdcYy7KTrgWpiDhE/tt2orCtTdd7F+opUSA590ubY48g9RNQ==",
"requires": { "requires": {
"@mapbox/node-pre-gyp": "^1.0.11", "@mapbox/node-pre-gyp": "^1.0.11",
"bindings": "^1.5.0", "bindings": "^1.5.0",
@ -3754,9 +3756,9 @@
"integrity": "sha512-Oei9OH4tRh0YqU3GxhX79dM/mwVgvbZJaSNaRk+bshkj0S5cfHcgYakreBjrHwatXKbz+IoIdYLxrKim2MjW0Q==" "integrity": "sha512-Oei9OH4tRh0YqU3GxhX79dM/mwVgvbZJaSNaRk+bshkj0S5cfHcgYakreBjrHwatXKbz+IoIdYLxrKim2MjW0Q=="
}, },
"axios": { "axios": {
"version": "1.6.8", "version": "1.7.2",
"resolved": "https://registry.npmjs.org/axios/-/axios-1.6.8.tgz", "resolved": "https://registry.npmjs.org/axios/-/axios-1.7.2.tgz",
"integrity": "sha512-v/ZHtJDU39mDpyBoFVkETcd/uNdxrWRrg3bKpOKzXFA6Bvqopts6ALSMU3y6ijYxbw2B+wPrIv46egTzJXCLGQ==", "integrity": "sha512-2A8QhOMrbomlDuiLeK9XibIBzuHeRcqqNOHp0Cyp5EoJ1IFDh+XZH3A6BkXtv0K4gFGCI0Y4BM7B1wOEi0Rmgw==",
"requires": { "requires": {
"follow-redirects": "^1.15.6", "follow-redirects": "^1.15.6",
"form-data": "^4.0.0", "form-data": "^4.0.0",

View File

@ -30,8 +30,8 @@
}, },
"homepage": "https://github.com/openziti/zrok#readme", "homepage": "https://github.com/openziti/zrok#readme",
"dependencies": { "dependencies": {
"@openziti/ziti-sdk-nodejs": "^0.16.0", "@openziti/ziti-sdk-nodejs": "^0.17.0",
"axios": "^1.6.8", "axios": "^1.7.2",
"express": "^4.19.2" "express": "^4.19.2"
}, },
"exports": { "exports": {

View File

@ -1 +1 @@
7.4.0 7.6.0

View File

@ -16,6 +16,7 @@ export class CreateFrontendRequest {
'zId'?: string; 'zId'?: string;
'urlTemplate'?: string; 'urlTemplate'?: string;
'publicName'?: string; 'publicName'?: string;
'permissionMode'?: CreateFrontendRequest.PermissionModeEnum;
static discriminator: string | undefined = undefined; static discriminator: string | undefined = undefined;
@ -34,6 +35,11 @@ export class CreateFrontendRequest {
"name": "publicName", "name": "publicName",
"baseName": "public_name", "baseName": "public_name",
"type": "string" "type": "string"
},
{
"name": "permissionMode",
"baseName": "permissionMode",
"type": "CreateFrontendRequest.PermissionModeEnum"
} ]; } ];
static getAttributeTypeMap() { static getAttributeTypeMap() {
@ -41,3 +47,9 @@ export class CreateFrontendRequest {
} }
} }
export namespace CreateFrontendRequest {
export enum PermissionModeEnum {
Open = <any> 'open',
Closed = <any> 'closed'
}
}

View File

@ -108,6 +108,7 @@ let primitives = [
]; ];
let enumsMap: {[index: string]: any} = { let enumsMap: {[index: string]: any} = {
"CreateFrontendRequest.PermissionModeEnum": CreateFrontendRequest.PermissionModeEnum,
"ShareRequest.ShareModeEnum": ShareRequest.ShareModeEnum, "ShareRequest.ShareModeEnum": ShareRequest.ShareModeEnum,
"ShareRequest.BackendModeEnum": ShareRequest.BackendModeEnum, "ShareRequest.BackendModeEnum": ShareRequest.BackendModeEnum,
"ShareRequest.OauthProviderEnum": ShareRequest.OauthProviderEnum, "ShareRequest.OauthProviderEnum": ShareRequest.OauthProviderEnum,

View File

@ -120,7 +120,8 @@ export namespace ShareRequest {
UdpTunnel = <any> 'udpTunnel', UdpTunnel = <any> 'udpTunnel',
Caddy = <any> 'caddy', Caddy = <any> 'caddy',
Drive = <any> 'drive', Drive = <any> 'drive',
Socks = <any> 'socks' Socks = <any> 'socks',
Vpn = <any> 'vpn'
} }
export enum OauthProviderEnum { export enum OauthProviderEnum {
Github = <any> 'github', Github = <any> 'github',

View File

@ -30,20 +30,23 @@ class CreateFrontendRequest(object):
swagger_types = { swagger_types = {
'z_id': 'str', 'z_id': 'str',
'url_template': 'str', 'url_template': 'str',
'public_name': 'str' 'public_name': 'str',
'permission_mode': 'str'
} }
attribute_map = { attribute_map = {
'z_id': 'zId', 'z_id': 'zId',
'url_template': 'url_template', 'url_template': 'url_template',
'public_name': 'public_name' 'public_name': 'public_name',
'permission_mode': 'permissionMode'
} }
def __init__(self, z_id=None, url_template=None, public_name=None): # noqa: E501 def __init__(self, z_id=None, url_template=None, public_name=None, permission_mode=None): # noqa: E501
"""CreateFrontendRequest - a model defined in Swagger""" # noqa: E501 """CreateFrontendRequest - a model defined in Swagger""" # noqa: E501
self._z_id = None self._z_id = None
self._url_template = None self._url_template = None
self._public_name = None self._public_name = None
self._permission_mode = None
self.discriminator = None self.discriminator = None
if z_id is not None: if z_id is not None:
self.z_id = z_id self.z_id = z_id
@ -51,6 +54,8 @@ class CreateFrontendRequest(object):
self.url_template = url_template self.url_template = url_template
if public_name is not None: if public_name is not None:
self.public_name = public_name self.public_name = public_name
if permission_mode is not None:
self.permission_mode = permission_mode
@property @property
def z_id(self): def z_id(self):
@ -115,6 +120,33 @@ class CreateFrontendRequest(object):
self._public_name = public_name self._public_name = public_name
@property
def permission_mode(self):
"""Gets the permission_mode of this CreateFrontendRequest. # noqa: E501
:return: The permission_mode of this CreateFrontendRequest. # noqa: E501
:rtype: str
"""
return self._permission_mode
@permission_mode.setter
def permission_mode(self, permission_mode):
"""Sets the permission_mode of this CreateFrontendRequest.
:param permission_mode: The permission_mode of this CreateFrontendRequest. # noqa: E501
:type: str
"""
allowed_values = ["open", "closed"] # noqa: E501
if permission_mode not in allowed_values:
raise ValueError(
"Invalid value for `permission_mode` ({0}), must be one of {1}" # noqa: E501
.format(permission_mode, allowed_values)
)
self._permission_mode = permission_mode
def to_dict(self): def to_dict(self):
"""Returns the model properties as a dict""" """Returns the model properties as a dict"""
result = {} result = {}

View File

@ -766,6 +766,9 @@ definitions:
type: string type: string
public_name: public_name:
type: string type: string
permissionMode:
type: string
enum: ["open", "closed"]
createFrontendResponse: createFrontendResponse:
type: object type: object

View File

@ -1,7 +1,6 @@
package ui package ui
import ( import (
"github.com/sirupsen/logrus"
"io/fs" "io/fs"
"net/http" "net/http"
"os" "os"
@ -10,10 +9,8 @@ import (
) )
func Middleware(handler http.Handler, healthCheck func(w http.ResponseWriter, r *http.Request)) http.Handler { func Middleware(handler http.Handler, healthCheck func(w http.ResponseWriter, r *http.Request)) http.Handler {
logrus.Infof("building")
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if strings.HasPrefix(r.URL.Path, "/api/v1") { if strings.HasPrefix(r.URL.Path, "/api/v1") {
logrus.Debugf("directing '%v' to api handler", r.URL.Path)
handler.ServeHTTP(w, r) handler.ServeHTTP(w, r)
return return
} }
@ -22,8 +19,6 @@ func Middleware(handler http.Handler, healthCheck func(w http.ResponseWriter, r
return return
} }
logrus.Debugf("directing '%v' to static handler", r.URL.Path)
staticPath := "build" staticPath := "build"
indexPath := "index.html" indexPath := "index.html"

1002
ui/package-lock.json generated

File diff suppressed because it is too large Load Diff

View File

@ -7,20 +7,20 @@
"@emotion/styled": "^11.10.4", "@emotion/styled": "^11.10.4",
"@mdi/js": "^7.0.96", "@mdi/js": "^7.0.96",
"@mdi/react": "^1.6.1", "@mdi/react": "^1.6.1",
"@mui/material": "^5.10.4", "@mui/material": "^5.15.18",
"bootstrap": "^5.2.3", "bootstrap": "^5.2.3",
"dagre": "^0.8.5", "dagre": "^0.8.5",
"eslint-config-react-app": "^7.0.1", "eslint-config-react-app": "^7.0.1",
"humanize-duration": "^3.27.3", "humanize-duration": "^3.27.3",
"moment": "^2.29.4", "moment": "^2.29.4",
"react": "^18.2.0", "react": "^18.3.1",
"react-bootstrap": "^2.7.0", "react-bootstrap": "^2.10.2",
"react-data-table-component": "^7.5.2", "react-data-table-component": "^7.5.2",
"react-dom": "^18.2.0", "react-dom": "^18.3.1",
"react-force-graph": "^1.43.0", "react-force-graph": "^1.43.0",
"react-router-dom": "^6.4.0", "react-router-dom": "^6.23.1",
"react-sizeme": "^3.0.2", "react-sizeme": "^3.0.2",
"recharts": "^2.6.1", "recharts": "^2.12.7",
"styled-components": "^5.3.5", "styled-components": "^5.3.5",
"svgo": "^3.0.2" "svgo": "^3.0.2"
}, },

View File

@ -53,6 +53,7 @@
* @property {string} zId * @property {string} zId
* @property {string} url_template * @property {string} url_template
* @property {string} public_name * @property {string} public_name
* @property {string} permissionMode
*/ */
/** /**

View File

@ -14,7 +14,7 @@ const ActionsTab = (props) => {
return ( return (
<div className={"actions-tab"}> <div className={"actions-tab"}>
<div id={"change-password"} style={{"padding-top": "10px"}}> <div id={"change-password"} style={{"paddingTop": "10px"}}>
<h3>Change Password?</h3> <h3>Change Password?</h3>
<p>Change the password used to log into the zrok web console.</p> <p>Change the password used to log into the zrok web console.</p>
<Button variant={"danger"} onClick={openChangePasswordModal}>Change Password</Button> <Button variant={"danger"} onClick={openChangePasswordModal}>Change Password</Button>

View File

@ -11,12 +11,12 @@
"@docusaurus/core": "^3.3.2", "@docusaurus/core": "^3.3.2",
"@docusaurus/plugin-client-redirects": "^3.3.2", "@docusaurus/plugin-client-redirects": "^3.3.2",
"@docusaurus/preset-classic": "^3.3.2", "@docusaurus/preset-classic": "^3.3.2",
"@mdx-js/react": "^3.0.0", "@mdx-js/react": "^3.0.1",
"clsx": "^1.2.1", "clsx": "^1.2.1",
"prism-react-renderer": "^1.3.5", "prism-react-renderer": "^1.3.5",
"react": "^18.2.0", "react": "^18.3.1",
"react-device-detect": "^2.2.3", "react-device-detect": "^2.2.3",
"react-dom": "^18.2.0", "react-dom": "^18.3.1",
"remark-math": "^5.1.1" "remark-math": "^5.1.1"
}, },
"devDependencies": { "devDependencies": {
@ -2962,9 +2962,10 @@
} }
}, },
"node_modules/@mdx-js/react": { "node_modules/@mdx-js/react": {
"version": "3.0.0", "version": "3.0.1",
"resolved": "https://registry.npmjs.org/@mdx-js/react/-/react-3.0.0.tgz", "resolved": "https://registry.npmjs.org/@mdx-js/react/-/react-3.0.1.tgz",
"integrity": "sha512-nDctevR9KyYFyV+m+/+S4cpzCWHqj+iHDHq3QrsWezcC+B17uZdIWgCguESUkwFhM3n/56KxWVE3V6EokrmONQ==", "integrity": "sha512-9ZrPIU4MGf6et1m1ov3zKf+q9+deetI51zprKB1D/z3NOb+rUxxtEl3mCjW5wTGh6VhRdwPueh1oRzi6ezkA8A==",
"license": "MIT",
"dependencies": { "dependencies": {
"@types/mdx": "^2.0.0" "@types/mdx": "^2.0.0"
}, },
@ -12033,9 +12034,10 @@
} }
}, },
"node_modules/react": { "node_modules/react": {
"version": "18.2.0", "version": "18.3.1",
"resolved": "https://registry.npmjs.org/react/-/react-18.2.0.tgz", "resolved": "https://registry.npmjs.org/react/-/react-18.3.1.tgz",
"integrity": "sha512-/3IjMdb2L9QbBdWiW5e3P2/npwMBaU9mHCSCUzNln0ZCYbcfTsGbTJrU/kGemdH2IWmB2ioZ+zkxtmq6g09fGQ==", "integrity": "sha512-wS+hAgJShR0KhEvPJArfuPVN1+Hz1t0Y6n5jLrGQbkb4urgPE/0Rve+1kMB1v/oWgHgm4WIcV+i7F2pTVj+2iQ==",
"license": "MIT",
"dependencies": { "dependencies": {
"loose-envify": "^1.1.0" "loose-envify": "^1.1.0"
}, },
@ -12174,15 +12176,16 @@
} }
}, },
"node_modules/react-dom": { "node_modules/react-dom": {
"version": "18.2.0", "version": "18.3.1",
"resolved": "https://registry.npmjs.org/react-dom/-/react-dom-18.2.0.tgz", "resolved": "https://registry.npmjs.org/react-dom/-/react-dom-18.3.1.tgz",
"integrity": "sha512-6IMTriUmvsjHUjNtEDudZfuDQUoWXVxKHhlEGSk81n4YFS+r/Kl99wXiwlVXtPBtJenozv2P+hxDsw9eA7Xo6g==", "integrity": "sha512-5m4nQKp+rZRb09LNH59GM4BxTh9251/ylbKIbpe7TpGxfJ+9kv6BLkLBXIjjspbgbnIBNqlI23tRnTWT0snUIw==",
"license": "MIT",
"dependencies": { "dependencies": {
"loose-envify": "^1.1.0", "loose-envify": "^1.1.0",
"scheduler": "^0.23.0" "scheduler": "^0.23.2"
}, },
"peerDependencies": { "peerDependencies": {
"react": "^18.2.0" "react": "^18.3.1"
} }
}, },
"node_modules/react-error-overlay": { "node_modules/react-error-overlay": {
@ -12948,9 +12951,10 @@
"integrity": "sha512-0s+oAmw9zLl1V1cS9BtZN7JAd0cW5e0QH4W3LWEK6a4LaLEA2OTpGYWDY+6XasBLtz6wkm3u1xRw95mRuJ59WA==" "integrity": "sha512-0s+oAmw9zLl1V1cS9BtZN7JAd0cW5e0QH4W3LWEK6a4LaLEA2OTpGYWDY+6XasBLtz6wkm3u1xRw95mRuJ59WA=="
}, },
"node_modules/scheduler": { "node_modules/scheduler": {
"version": "0.23.0", "version": "0.23.2",
"resolved": "https://registry.npmjs.org/scheduler/-/scheduler-0.23.0.tgz", "resolved": "https://registry.npmjs.org/scheduler/-/scheduler-0.23.2.tgz",
"integrity": "sha512-CtuThmgHNg7zIZWAXi3AsyIzA3n4xx7aNyjwC2VJldO2LMVDhFK+63xGqq6CsJH4rTAt6/M+N4GhZiDYPx9eUw==", "integrity": "sha512-UOShsPwz7NrMUqhR6t0hWjFduvOzbtv7toDH1/hIrfRNIDBnnBWd0CwJTGvTpngVlmwGCdP9/Zl/tVrDqcuYzQ==",
"license": "MIT",
"dependencies": { "dependencies": {
"loose-envify": "^1.1.0" "loose-envify": "^1.1.0"
} }

View File

@ -17,12 +17,12 @@
"@docusaurus/core": "^3.3.2", "@docusaurus/core": "^3.3.2",
"@docusaurus/plugin-client-redirects": "^3.3.2", "@docusaurus/plugin-client-redirects": "^3.3.2",
"@docusaurus/preset-classic": "^3.3.2", "@docusaurus/preset-classic": "^3.3.2",
"@mdx-js/react": "^3.0.0", "@mdx-js/react": "^3.0.1",
"clsx": "^1.2.1", "clsx": "^1.2.1",
"prism-react-renderer": "^1.3.5", "prism-react-renderer": "^1.3.5",
"react": "^18.2.0", "react": "^18.3.1",
"react-device-detect": "^2.2.3", "react-device-detect": "^2.2.3",
"react-dom": "^18.2.0", "react-dom": "^18.3.1",
"remark-math": "^5.1.1" "remark-math": "^5.1.1"
}, },
"devDependencies": { "devDependencies": {