mirror of
https://github.com/openziti/zrok.git
synced 2024-11-07 08:44:14 +01:00
support for displaying limited shares in red in the visualizer (#320)
This commit is contained in:
parent
ebcbeeb900
commit
7d48683df7
@ -2,6 +2,7 @@ package controller
|
||||
|
||||
import (
|
||||
"github.com/go-openapi/runtime/middleware"
|
||||
"github.com/openziti/zrok/controller/store"
|
||||
"github.com/openziti/zrok/rest_model_zrok"
|
||||
"github.com/openziti/zrok/rest_server_zrok/operations/metadata"
|
||||
"github.com/sirupsen/logrus"
|
||||
@ -36,7 +37,19 @@ func overviewHandler(_ metadata.OverviewParams, principal *rest_model_zrok.Princ
|
||||
ZID: env.ZId,
|
||||
},
|
||||
}
|
||||
|
||||
var shrIds []int
|
||||
for i := range shrs {
|
||||
shrIds = append(shrIds, shrs[i].Id)
|
||||
}
|
||||
shrsLimited, err := str.FindSelectedLatestShareLimitjournal(shrIds, tx)
|
||||
if err != nil {
|
||||
logrus.Errorf("error finding limited shares for environment '%v': %v", env.ZId, err)
|
||||
return metadata.NewOverviewInternalServerError()
|
||||
}
|
||||
shrsLimitedMap := make(map[int]store.LimitJournalAction)
|
||||
for i := range shrsLimited {
|
||||
shrsLimitedMap[shrsLimited[i].ShareId] = shrsLimited[i].Action
|
||||
}
|
||||
for _, shr := range shrs {
|
||||
feEndpoint := ""
|
||||
if shr.FrontendEndpoint != nil {
|
||||
@ -50,7 +63,7 @@ func overviewHandler(_ metadata.OverviewParams, principal *rest_model_zrok.Princ
|
||||
if shr.BackendProxyEndpoint != nil {
|
||||
beProxyEndpoint = *shr.BackendProxyEndpoint
|
||||
}
|
||||
es.Shares = append(es.Shares, &rest_model_zrok.Share{
|
||||
oshr := &rest_model_zrok.Share{
|
||||
Token: shr.Token,
|
||||
ZID: shr.ZId,
|
||||
ShareMode: shr.ShareMode,
|
||||
@ -61,7 +74,13 @@ func overviewHandler(_ metadata.OverviewParams, principal *rest_model_zrok.Princ
|
||||
Reserved: shr.Reserved,
|
||||
CreatedAt: shr.CreatedAt.UnixMilli(),
|
||||
UpdatedAt: shr.UpdatedAt.UnixMilli(),
|
||||
})
|
||||
}
|
||||
if action, found := shrsLimitedMap[shr.Id]; found {
|
||||
if action == store.LimitAction {
|
||||
oshr.Limited = true
|
||||
}
|
||||
}
|
||||
es.Shares = append(es.Shares, oshr)
|
||||
}
|
||||
out = append(out, es)
|
||||
}
|
||||
|
@ -1,6 +1,7 @@
|
||||
package store
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/jmoiron/sqlx"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
@ -41,6 +42,33 @@ func (str *Store) FindLatestShareLimitJournal(shrId int, trx *sqlx.Tx) (*ShareLi
|
||||
return j, nil
|
||||
}
|
||||
|
||||
func (str *Store) FindSelectedLatestShareLimitjournal(shrIds []int, trx *sqlx.Tx) ([]*ShareLimitJournal, error) {
|
||||
if len(shrIds) < 1 {
|
||||
return nil, nil
|
||||
}
|
||||
in := "("
|
||||
for i := range shrIds {
|
||||
if i > 0 {
|
||||
in += ", "
|
||||
}
|
||||
in += fmt.Sprintf("%d", shrIds[i])
|
||||
}
|
||||
in += ")"
|
||||
rows, err := trx.Queryx("select id, share_id, rx_bytes, tx_bytes, action, created_at, updated_at from share_limit_journal where id in (select max(id) as id from share_limit_journal group by share_id) and share_id in " + in)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error selecting all latest share_limit_journal")
|
||||
}
|
||||
var sljs []*ShareLimitJournal
|
||||
for rows.Next() {
|
||||
slj := &ShareLimitJournal{}
|
||||
if err := rows.StructScan(slj); err != nil {
|
||||
return nil, errors.Wrap(err, "error scanning share_limit_journal")
|
||||
}
|
||||
sljs = append(sljs, slj)
|
||||
}
|
||||
return sljs, nil
|
||||
}
|
||||
|
||||
func (str *Store) FindAllLatestShareLimitJournal(trx *sqlx.Tx) ([]*ShareLimitJournal, error) {
|
||||
rows, err := trx.Queryx("select id, share_id, rx_bytes, tx_bytes, action, created_at, updated_at from share_limit_journal where id in (select max(id) as id from share_limit_journal group by share_id)")
|
||||
if err != nil {
|
||||
|
@ -33,6 +33,9 @@ type Environment struct {
|
||||
// host
|
||||
Host string `json:"host,omitempty"`
|
||||
|
||||
// limited
|
||||
Limited bool `json:"limited,omitempty"`
|
||||
|
||||
// updated at
|
||||
UpdatedAt int64 `json:"updatedAt,omitempty"`
|
||||
|
||||
|
@ -36,6 +36,9 @@ type Share struct {
|
||||
// frontend selection
|
||||
FrontendSelection string `json:"frontendSelection,omitempty"`
|
||||
|
||||
// limited
|
||||
Limited bool `json:"limited,omitempty"`
|
||||
|
||||
// reserved
|
||||
Reserved bool `json:"reserved,omitempty"`
|
||||
|
||||
|
@ -1104,6 +1104,9 @@ func init() {
|
||||
"host": {
|
||||
"type": "string"
|
||||
},
|
||||
"limited": {
|
||||
"type": "boolean"
|
||||
},
|
||||
"updatedAt": {
|
||||
"type": "integer"
|
||||
},
|
||||
@ -1308,6 +1311,9 @@ func init() {
|
||||
"frontendSelection": {
|
||||
"type": "string"
|
||||
},
|
||||
"limited": {
|
||||
"type": "boolean"
|
||||
},
|
||||
"reserved": {
|
||||
"type": "boolean"
|
||||
},
|
||||
@ -2575,6 +2581,9 @@ func init() {
|
||||
"host": {
|
||||
"type": "string"
|
||||
},
|
||||
"limited": {
|
||||
"type": "boolean"
|
||||
},
|
||||
"updatedAt": {
|
||||
"type": "integer"
|
||||
},
|
||||
@ -2779,6 +2788,9 @@ func init() {
|
||||
"frontendSelection": {
|
||||
"type": "string"
|
||||
},
|
||||
"limited": {
|
||||
"type": "boolean"
|
||||
},
|
||||
"reserved": {
|
||||
"type": "boolean"
|
||||
},
|
||||
|
@ -706,6 +706,8 @@ definitions:
|
||||
type: string
|
||||
activity:
|
||||
$ref: "#/definitions/sparkData"
|
||||
limited:
|
||||
type: boolean
|
||||
createdAt:
|
||||
type: integer
|
||||
updatedAt:
|
||||
@ -861,6 +863,8 @@ definitions:
|
||||
type: boolean
|
||||
activity:
|
||||
$ref: "#/definitions/sparkData"
|
||||
limited:
|
||||
type: boolean
|
||||
createdAt:
|
||||
type: integer
|
||||
updatedAt:
|
||||
|
@ -88,6 +88,7 @@
|
||||
* @property {string} address
|
||||
* @property {string} zId
|
||||
* @property {module:types.sparkData} activity
|
||||
* @property {boolean} limited
|
||||
* @property {number} createdAt
|
||||
* @property {number} updatedAt
|
||||
*/
|
||||
@ -201,6 +202,7 @@
|
||||
* @property {string} backendProxyEndpoint
|
||||
* @property {boolean} reserved
|
||||
* @property {module:types.sparkData} activity
|
||||
* @property {boolean} limited
|
||||
* @property {number} createdAt
|
||||
* @property {number} updatedAt
|
||||
*/
|
||||
|
@ -9,7 +9,6 @@ const MetricsTab = (props) => {
|
||||
const [metrics1, setMetrics1] = useState(buildMetrics([]));
|
||||
|
||||
useEffect(() => {
|
||||
console.log("token", props.share.token);
|
||||
metadata.getShareMetrics(props.share.token)
|
||||
.then(resp => {
|
||||
setMetrics30(buildMetrics(resp.data));
|
||||
@ -27,7 +26,6 @@ const MetricsTab = (props) => {
|
||||
useEffect(() => {
|
||||
let mounted = true;
|
||||
let interval = setInterval(() => {
|
||||
console.log("token", props.share.token);
|
||||
metadata.getShareMetrics(props.share.token)
|
||||
.then(resp => {
|
||||
if(mounted) {
|
||||
|
@ -21,7 +21,7 @@ const Network = (props) => {
|
||||
}, []);
|
||||
|
||||
const paintNode = (node, ctx) => {
|
||||
let nodeColor = node.selected ? "#9BF316" : "#04adef";
|
||||
let nodeColor = node.selected ? "#9BF316" : node.limited ? "#f00": "#04adef";
|
||||
let textColor = "black";
|
||||
|
||||
ctx.textBaseline = "middle";
|
||||
|
@ -12,7 +12,7 @@ const sortNodes = (nodes) => {
|
||||
|
||||
const nodesEqual = (a, b) => {
|
||||
if(a.length !== b.length) return false;
|
||||
return a.every((e, i) => e.id === b[i].id);
|
||||
return a.every((e, i) => e.id === b[i].id && e.limited === b[i].limited);
|
||||
}
|
||||
|
||||
export const mergeGraph = (oldGraph, user, newOverview) => {
|
||||
@ -34,7 +34,8 @@ export const mergeGraph = (oldGraph, user, newOverview) => {
|
||||
id: env.environment.zId,
|
||||
label: env.environment.description,
|
||||
type: "environment",
|
||||
val: 50
|
||||
val: 50,
|
||||
limited: env.limited
|
||||
};
|
||||
newGraph.nodes.push(envNode);
|
||||
newGraph.links.push({
|
||||
@ -53,6 +54,7 @@ export const mergeGraph = (oldGraph, user, newOverview) => {
|
||||
envZId: env.environment.zId,
|
||||
label: shrLabel,
|
||||
type: "share",
|
||||
limited: !!shr.limited,
|
||||
val: 50
|
||||
};
|
||||
newGraph.nodes.push(shrNode);
|
||||
@ -75,11 +77,11 @@ export const mergeGraph = (oldGraph, user, newOverview) => {
|
||||
// we're going to need to recompute a new graph... but we want to maintain the instances that already exist...
|
||||
|
||||
// we want to preserve nodes that exist in the new graph, and remove those that don't.
|
||||
let outputNodes = oldGraph.nodes.filter(oldNode => newGraph.nodes.find(newNode => newNode.id === oldNode.id));
|
||||
let outputNodes = oldGraph.nodes.filter(oldNode => newGraph.nodes.find(newNode => newNode.id === oldNode.id && newNode.limited === oldNode.limited));
|
||||
let outputLinks = oldGraph.nodes.filter(oldLink => newGraph.links.find(newLink => newLink.target === oldLink.target && newLink.source === oldLink.source));
|
||||
|
||||
// and then do the opposite; add any nodes that are in newGraph that are missing from oldGraph.
|
||||
outputNodes.push(...newGraph.nodes.filter(newNode => !outputNodes.find(oldNode => oldNode.id === newNode.id)));
|
||||
outputNodes.push(...newGraph.nodes.filter(newNode => !outputNodes.find(oldNode => oldNode.id === newNode.id && oldNode.limited === newNode.limited)));
|
||||
outputLinks.push(...newGraph.links.filter(newLink => !outputLinks.find(oldLink => oldLink.target === newLink.target && oldLink.source === newLink.source)));
|
||||
|
||||
return {
|
||||
|
Loading…
Reference in New Issue
Block a user