diff --git a/.github/workflows/golang-test-linux.yml b/.github/workflows/golang-test-linux.yml index a4a3da66c..efe1a2654 100644 --- a/.github/workflows/golang-test-linux.yml +++ b/.github/workflows/golang-test-linux.yml @@ -1,4 +1,4 @@ -name: Test Code Linux +name: Linux on: push: @@ -12,11 +12,21 @@ concurrency: jobs: build-cache: + name: "Build Cache" runs-on: ubuntu-22.04 + outputs: + management: ${{ steps.filter.outputs.management }} steps: - name: Checkout code uses: actions/checkout@v4 + - uses: dorny/paths-filter@v3 + id: filter + with: + filters: | + management: + - 'management/**' + - name: Install Go uses: actions/setup-go@v5 with: @@ -38,7 +48,6 @@ jobs: key: ${{ runner.os }}-gotest-cache-${{ hashFiles('**/go.sum') }} restore-keys: | ${{ runner.os }}-gotest-cache-${{ hashFiles('**/go.sum') }} - - name: Install dependencies if: steps.cache.outputs.cache-hit != 'true' @@ -89,6 +98,7 @@ jobs: run: CGO_ENABLED=1 GOARCH=386 go build -o relay-386 . test: + name: "Client / Unit" needs: [build-cache] strategy: fail-fast: false @@ -134,9 +144,116 @@ jobs: run: git --no-pager diff --exit-code - name: Test - run: CGO_ENABLED=1 GOARCH=${{ matrix.arch }} CI=true go test -tags devcert -exec 'sudo' -timeout 10m -p 1 $(go list ./... | grep -v /management) + run: CGO_ENABLED=1 GOARCH=${{ matrix.arch }} CI=true go test -tags devcert -exec 'sudo' -timeout 10m -p 1 $(go list ./... | grep -v -e /management -e /signal -e /relay) + + test_relay: + name: "Relay / Unit" + needs: [build-cache] + strategy: + fail-fast: false + matrix: + arch: [ '386','amd64' ] + runs-on: ubuntu-22.04 + steps: + - name: Install Go + uses: actions/setup-go@v5 + with: + go-version: "1.23.x" + cache: false + + - name: Checkout code + uses: actions/checkout@v4 + + - name: Get Go environment + run: | + echo "cache=$(go env GOCACHE)" >> $GITHUB_ENV + echo "modcache=$(go env GOMODCACHE)" >> $GITHUB_ENV + + - name: Cache Go modules + uses: actions/cache/restore@v4 + with: + path: | + ${{ env.cache }} + ${{ env.modcache }} + key: ${{ runner.os }}-gotest-cache-${{ hashFiles('**/go.sum') }} + restore-keys: | + ${{ runner.os }}-gotest-cache- + + - name: Install dependencies + run: sudo apt update && sudo apt install -y -q libgtk-3-dev libayatana-appindicator3-dev libgl1-mesa-dev xorg-dev gcc-multilib libpcap-dev + + - name: Install 32-bit libpcap + if: matrix.arch == '386' + run: sudo dpkg --add-architecture i386 && sudo apt update && sudo apt-get install -y libpcap0.8-dev:i386 + + - name: Install modules + run: go mod tidy + + - name: check git status + run: git --no-pager diff --exit-code + + - name: Test + run: | + CGO_ENABLED=1 GOARCH=${{ matrix.arch }} \ + go test \ + -exec 'sudo' \ + -timeout 10m ./signal/... + + test_signal: + name: "Signal / Unit" + needs: [build-cache] + strategy: + fail-fast: false + matrix: + arch: [ '386','amd64' ] + runs-on: ubuntu-22.04 + steps: + - name: Install Go + uses: actions/setup-go@v5 + with: + go-version: "1.23.x" + cache: false + + - name: Checkout code + uses: actions/checkout@v4 + + - name: Get Go environment + run: | + echo "cache=$(go env GOCACHE)" >> $GITHUB_ENV + echo "modcache=$(go env GOMODCACHE)" >> $GITHUB_ENV + + - name: Cache Go modules + uses: actions/cache/restore@v4 + with: + path: | + ${{ env.cache }} + ${{ env.modcache }} + key: ${{ runner.os }}-gotest-cache-${{ hashFiles('**/go.sum') }} + restore-keys: | + ${{ runner.os }}-gotest-cache- + + - name: Install dependencies + run: sudo apt update && sudo apt install -y -q libgtk-3-dev libayatana-appindicator3-dev libgl1-mesa-dev xorg-dev gcc-multilib libpcap-dev + + - name: Install 32-bit libpcap + if: matrix.arch == '386' + run: sudo dpkg --add-architecture i386 && sudo apt update && sudo apt-get install -y libpcap0.8-dev:i386 + + - name: Install modules + run: go mod tidy + + - name: check git status + run: git --no-pager diff --exit-code + + - name: Test + run: | + CGO_ENABLED=1 GOARCH=${{ matrix.arch }} \ + go test \ + -exec 'sudo' \ + -timeout 10m ./signal/... test_management: + name: "Management / Unit" needs: [ build-cache ] strategy: fail-fast: false @@ -194,10 +311,17 @@ jobs: run: docker pull mlsmaycon/warmed-mysql:8 - name: Test - run: CGO_ENABLED=1 GOARCH=${{ matrix.arch }} NETBIRD_STORE_ENGINE=${{ matrix.store }} CI=true go test -tags=devcert -p 1 -exec 'sudo --preserve-env=CI,NETBIRD_STORE_ENGINE' -timeout 10m $(go list ./... | grep /management) + run: | + CGO_ENABLED=1 GOARCH=${{ matrix.arch }} \ + NETBIRD_STORE_ENGINE=${{ matrix.store }} \ + go test -tags=devcert \ + -exec "sudo --preserve-env=CI,NETBIRD_STORE_ENGINE" \ + -timeout 10m ./management/... benchmark: + name: "Management / Benchmark" needs: [ build-cache ] + if: ${{ needs.build-cache.outputs.management == 'true' || github.event_name != 'pull_request' }} strategy: fail-fast: false matrix: @@ -254,10 +378,17 @@ jobs: run: docker pull mlsmaycon/warmed-mysql:8 - name: Test - run: CGO_ENABLED=1 GOARCH=${{ matrix.arch }} NETBIRD_STORE_ENGINE=${{ matrix.store }} CI=true go test -tags devcert -run=^$ -bench=. -exec 'sudo --preserve-env=CI,NETBIRD_STORE_ENGINE' -timeout 20m ./... + run: | + CGO_ENABLED=1 GOARCH=${{ matrix.arch }} \ + NETBIRD_STORE_ENGINE=${{ matrix.store }} CI=true \ + go test -tags devcert -run=^$ -bench=. \ + -exec 'sudo --preserve-env=CI,NETBIRD_STORE_ENGINE' \ + -timeout 20m ./... api_benchmark: + name: "Management / Benchmark (API)" needs: [ build-cache ] + if: ${{ needs.build-cache.outputs.management == 'true' || github.event_name != 'pull_request' }} strategy: fail-fast: false matrix: @@ -312,12 +443,21 @@ jobs: - name: download mysql image if: matrix.store == 'mysql' run: docker pull mlsmaycon/warmed-mysql:8 - + - name: Test - run: CGO_ENABLED=1 GOARCH=${{ matrix.arch }} NETBIRD_STORE_ENGINE=${{ matrix.store }} CI=true go test -run=^$ -tags=benchmark -bench=. -exec 'sudo --preserve-env=CI,NETBIRD_STORE_ENGINE' -timeout 30m $(go list -tags=benchmark ./... | grep /management) + run: | + CGO_ENABLED=1 GOARCH=${{ matrix.arch }} \ + NETBIRD_STORE_ENGINE=${{ matrix.store }} CI=true \ + go test -tags=benchmark \ + -run=^$ \ + -bench=. \ + -exec 'sudo --preserve-env=CI,NETBIRD_STORE_ENGINE' \ + -timeout 20m ./management/... api_integration_test: + name: "Management / Integration" needs: [ build-cache ] + if: ${{ needs.build-cache.outputs.management == 'true' || github.event_name != 'pull_request' }} strategy: fail-fast: false matrix: @@ -363,9 +503,15 @@ jobs: run: git --no-pager diff --exit-code - name: Test - run: CGO_ENABLED=1 GOARCH=${{ matrix.arch }} NETBIRD_STORE_ENGINE=${{ matrix.store }} CI=true go test -tags=integration -p 1 -exec 'sudo --preserve-env=CI,NETBIRD_STORE_ENGINE' -timeout 30m $(go list -tags=integration ./... | grep /management) + run: | + CGO_ENABLED=1 GOARCH=${{ matrix.arch }} \ + NETBIRD_STORE_ENGINE=${{ matrix.store }} CI=true \ + go test -tags=integration \ + -exec 'sudo --preserve-env=CI,NETBIRD_STORE_ENGINE' \ + -timeout 10m ./management/... test_client_on_docker: + name: "Client (Docker) / Unit" needs: [ build-cache ] runs-on: ubuntu-20.04 steps: diff --git a/.golangci.yaml b/.golangci.yaml index 44b03d0e1..461677c2e 100644 --- a/.golangci.yaml +++ b/.golangci.yaml @@ -103,7 +103,7 @@ linters: - predeclared # predeclared finds code that shadows one of Go's predeclared identifiers - revive # Fast, configurable, extensible, flexible, and beautiful linter for Go. Drop-in replacement of golint. - sqlclosecheck # checks that sql.Rows and sql.Stmt are closed - - thelper # thelper detects Go test helpers without t.Helper() call and checks the consistency of test helpers. + # - thelper # thelper detects Go test helpers without t.Helper() call and checks the consistency of test helpers. - wastedassign # wastedassign finds wasted assignment statements issues: # Maximum count of issues with the same text. diff --git a/.goreleaser_ui.yaml b/.goreleaser_ui.yaml index 06577f4e3..983aa0e78 100644 --- a/.goreleaser_ui.yaml +++ b/.goreleaser_ui.yaml @@ -53,7 +53,7 @@ nfpms: contents: - src: client/ui/netbird.desktop dst: /usr/share/applications/netbird.desktop - - src: client/ui/netbird-systemtray-connected.png + - src: client/ui/netbird.png dst: /usr/share/pixmaps/netbird.png dependencies: - netbird @@ -70,7 +70,7 @@ nfpms: contents: - src: client/ui/netbird.desktop dst: /usr/share/applications/netbird.desktop - - src: client/ui/netbird-systemtray-connected.png + - src: client/ui/netbird.png dst: /usr/share/pixmaps/netbird.png dependencies: - netbird diff --git a/client/Dockerfile-rootless b/client/Dockerfile-rootless index 62bcaf964..78314ba12 100644 --- a/client/Dockerfile-rootless +++ b/client/Dockerfile-rootless @@ -9,6 +9,7 @@ USER netbird:netbird ENV NB_FOREGROUND_MODE=true ENV NB_USE_NETSTACK_MODE=true +ENV NB_ENABLE_NETSTACK_LOCAL_FORWARDING=true ENV NB_CONFIG=config.json ENV NB_DAEMON_ADDR=unix://netbird.sock ENV NB_DISABLE_DNS=true diff --git a/client/cmd/status.go b/client/cmd/status.go index 6596eaded..1deef487b 100644 --- a/client/cmd/status.go +++ b/client/cmd/status.go @@ -39,7 +39,6 @@ type peerStateDetailOutput struct { TransferSent int64 `json:"transferSent" yaml:"transferSent"` Latency time.Duration `json:"latency" yaml:"latency"` RosenpassEnabled bool `json:"quantumResistance" yaml:"quantumResistance"` - Routes []string `json:"routes" yaml:"routes"` Networks []string `json:"networks" yaml:"networks"` } @@ -86,22 +85,23 @@ type nsServerGroupStateOutput struct { } type statusOutputOverview struct { - Peers peersStateOutput `json:"peers" yaml:"peers"` - CliVersion string `json:"cliVersion" yaml:"cliVersion"` - DaemonVersion string `json:"daemonVersion" yaml:"daemonVersion"` - ManagementState managementStateOutput `json:"management" yaml:"management"` - SignalState signalStateOutput `json:"signal" yaml:"signal"` - Relays relayStateOutput `json:"relays" yaml:"relays"` - IP string `json:"netbirdIp" yaml:"netbirdIp"` - PubKey string `json:"publicKey" yaml:"publicKey"` - KernelInterface bool `json:"usesKernelInterface" yaml:"usesKernelInterface"` - FQDN string `json:"fqdn" yaml:"fqdn"` - RosenpassEnabled bool `json:"quantumResistance" yaml:"quantumResistance"` - RosenpassPermissive bool `json:"quantumResistancePermissive" yaml:"quantumResistancePermissive"` - Routes []string `json:"routes" yaml:"routes"` + Peers peersStateOutput `json:"peers" yaml:"peers"` + CliVersion string `json:"cliVersion" yaml:"cliVersion"` + DaemonVersion string `json:"daemonVersion" yaml:"daemonVersion"` + ManagementState managementStateOutput `json:"management" yaml:"management"` + SignalState signalStateOutput `json:"signal" yaml:"signal"` + Relays relayStateOutput `json:"relays" yaml:"relays"` + IP string `json:"netbirdIp" yaml:"netbirdIp"` + PubKey string `json:"publicKey" yaml:"publicKey"` + KernelInterface bool `json:"usesKernelInterface" yaml:"usesKernelInterface"` + FQDN string `json:"fqdn" yaml:"fqdn"` + RosenpassEnabled bool `json:"quantumResistance" yaml:"quantumResistance"` + RosenpassPermissive bool `json:"quantumResistancePermissive" yaml:"quantumResistancePermissive"` + Networks []string `json:"networks" yaml:"networks"` NumberOfForwardingRules int `json:"forwardingRules" yaml:"forwardingRules"` NSServerGroups []nsServerGroupStateOutput `json:"dnsServers" yaml:"dnsServers"` + Events []systemEventOutput `json:"events" yaml:"events"` } var ( @@ -273,22 +273,23 @@ func convertToStatusOutputOverview(resp *proto.StatusResponse) statusOutputOverv peersOverview := mapPeers(resp.GetFullStatus().GetPeers()) overview := statusOutputOverview{ - Peers: peersOverview, - CliVersion: version.NetbirdVersion(), - DaemonVersion: resp.GetDaemonVersion(), - ManagementState: managementOverview, - SignalState: signalOverview, - Relays: relayOverview, - IP: pbFullStatus.GetLocalPeerState().GetIP(), - PubKey: pbFullStatus.GetLocalPeerState().GetPubKey(), - KernelInterface: pbFullStatus.GetLocalPeerState().GetKernelInterface(), - FQDN: pbFullStatus.GetLocalPeerState().GetFqdn(), - RosenpassEnabled: pbFullStatus.GetLocalPeerState().GetRosenpassEnabled(), - RosenpassPermissive: pbFullStatus.GetLocalPeerState().GetRosenpassPermissive(), - Routes: pbFullStatus.GetLocalPeerState().GetNetworks(), + Peers: peersOverview, + CliVersion: version.NetbirdVersion(), + DaemonVersion: resp.GetDaemonVersion(), + ManagementState: managementOverview, + SignalState: signalOverview, + Relays: relayOverview, + IP: pbFullStatus.GetLocalPeerState().GetIP(), + PubKey: pbFullStatus.GetLocalPeerState().GetPubKey(), + KernelInterface: pbFullStatus.GetLocalPeerState().GetKernelInterface(), + FQDN: pbFullStatus.GetLocalPeerState().GetFqdn(), + RosenpassEnabled: pbFullStatus.GetLocalPeerState().GetRosenpassEnabled(), + RosenpassPermissive: pbFullStatus.GetLocalPeerState().GetRosenpassPermissive(), + Networks: pbFullStatus.GetLocalPeerState().GetNetworks(), NumberOfForwardingRules: int(pbFullStatus.GetNumberOfForwardingRules()), NSServerGroups: mapNSGroups(pbFullStatus.GetDnsServers()), + Events: mapEvents(pbFullStatus.GetEvents()), } if anonymizeFlag { @@ -395,7 +396,6 @@ func mapPeers(peers []*proto.PeerState) peersStateOutput { TransferSent: transferSent, Latency: pbPeerState.GetLatency().AsDuration(), RosenpassEnabled: pbPeerState.GetRosenpassEnabled(), - Routes: pbPeerState.GetNetworks(), Networks: pbPeerState.GetNetworks(), } @@ -561,7 +561,6 @@ func parseGeneralSummary(overview statusOutputOverview, showURL bool, showRelays "NetBird IP: %s\n"+ "Interface type: %s\n"+ "Quantum resistance: %s\n"+ - "Routes: %s\n"+ "Networks: %s\n"+ "Forwarding rules: %d\n"+ "Peers count: %s\n", @@ -577,7 +576,6 @@ func parseGeneralSummary(overview statusOutputOverview, showURL bool, showRelays interfaceTypeString, rosenpassEnabledStatus, networks, - networks, overview.NumberOfForwardingRules, peersCountString, ) @@ -586,13 +584,17 @@ func parseGeneralSummary(overview statusOutputOverview, showURL bool, showRelays func parseToFullDetailSummary(overview statusOutputOverview) string { parsedPeersString := parsePeers(overview.Peers, overview.RosenpassEnabled, overview.RosenpassPermissive) + parsedEventsString := parseEvents(overview.Events) summary := parseGeneralSummary(overview, true, true, true) return fmt.Sprintf( "Peers detail:"+ + "%s\n"+ + "Events:"+ "%s\n"+ "%s", parsedPeersString, + parsedEventsString, summary, ) } @@ -661,7 +663,6 @@ func parsePeers(peers peersStateOutput, rosenpassEnabled, rosenpassPermissive bo " Last WireGuard handshake: %s\n"+ " Transfer status (received/sent) %s/%s\n"+ " Quantum resistance: %s\n"+ - " Routes: %s\n"+ " Networks: %s\n"+ " Latency: %s\n", peerState.FQDN, @@ -680,7 +681,6 @@ func parsePeers(peers peersStateOutput, rosenpassEnabled, rosenpassPermissive bo toIEC(peerState.TransferSent), rosenpassEnabledStatus, networks, - networks, peerState.Latency.String(), ) @@ -829,14 +829,6 @@ func anonymizePeerDetail(a *anonymize.Anonymizer, peer *peerStateDetailOutput) { for i, route := range peer.Networks { peer.Networks[i] = a.AnonymizeRoute(route) } - - for i, route := range peer.Routes { - peer.Routes[i] = a.AnonymizeIPString(route) - } - - for i, route := range peer.Routes { - peer.Routes[i] = a.AnonymizeRoute(route) - } } func anonymizeOverview(a *anonymize.Anonymizer, overview *statusOutputOverview) { @@ -874,9 +866,14 @@ func anonymizeOverview(a *anonymize.Anonymizer, overview *statusOutputOverview) overview.Networks[i] = a.AnonymizeRoute(route) } - for i, route := range overview.Routes { - overview.Routes[i] = a.AnonymizeRoute(route) - } - overview.FQDN = a.AnonymizeDomain(overview.FQDN) + + for i, event := range overview.Events { + overview.Events[i].Message = a.AnonymizeString(event.Message) + overview.Events[i].UserMessage = a.AnonymizeString(event.UserMessage) + + for k, v := range event.Metadata { + event.Metadata[k] = a.AnonymizeString(v) + } + } } diff --git a/client/cmd/status_event.go b/client/cmd/status_event.go new file mode 100644 index 000000000..9331570e6 --- /dev/null +++ b/client/cmd/status_event.go @@ -0,0 +1,69 @@ +package cmd + +import ( + "fmt" + "sort" + "strings" + "time" + + "github.com/netbirdio/netbird/client/proto" +) + +type systemEventOutput struct { + ID string `json:"id" yaml:"id"` + Severity string `json:"severity" yaml:"severity"` + Category string `json:"category" yaml:"category"` + Message string `json:"message" yaml:"message"` + UserMessage string `json:"userMessage" yaml:"userMessage"` + Timestamp time.Time `json:"timestamp" yaml:"timestamp"` + Metadata map[string]string `json:"metadata" yaml:"metadata"` +} + +func mapEvents(protoEvents []*proto.SystemEvent) []systemEventOutput { + events := make([]systemEventOutput, len(protoEvents)) + for i, event := range protoEvents { + events[i] = systemEventOutput{ + ID: event.GetId(), + Severity: event.GetSeverity().String(), + Category: event.GetCategory().String(), + Message: event.GetMessage(), + UserMessage: event.GetUserMessage(), + Timestamp: event.GetTimestamp().AsTime(), + Metadata: event.GetMetadata(), + } + } + return events +} + +func parseEvents(events []systemEventOutput) string { + if len(events) == 0 { + return " No events recorded" + } + + var eventsString strings.Builder + for _, event := range events { + timeStr := timeAgo(event.Timestamp) + + metadataStr := "" + if len(event.Metadata) > 0 { + pairs := make([]string, 0, len(event.Metadata)) + for k, v := range event.Metadata { + pairs = append(pairs, fmt.Sprintf("%s: %s", k, v)) + } + sort.Strings(pairs) + metadataStr = fmt.Sprintf("\n Metadata: %s", strings.Join(pairs, ", ")) + } + + eventsString.WriteString(fmt.Sprintf("\n [%s] %s (%s)"+ + "\n Message: %s"+ + "\n Time: %s%s", + event.Severity, + event.Category, + event.ID, + event.Message, + timeStr, + metadataStr, + )) + } + return eventsString.String() +} diff --git a/client/cmd/status_test.go b/client/cmd/status_test.go index ada44b2ac..0b0ae4c51 100644 --- a/client/cmd/status_test.go +++ b/client/cmd/status_test.go @@ -146,9 +146,6 @@ var overview = statusOutputOverview{ LastWireguardHandshake: time.Date(2001, 1, 1, 1, 1, 2, 0, time.UTC), TransferReceived: 200, TransferSent: 100, - Routes: []string{ - "10.1.0.0/24", - }, Networks: []string{ "10.1.0.0/24", }, @@ -176,6 +173,7 @@ var overview = statusOutputOverview{ }, }, }, + Events: []systemEventOutput{}, CliVersion: version.NetbirdVersion(), DaemonVersion: "0.14.1", ManagementState: managementStateOutput{ @@ -230,9 +228,6 @@ var overview = statusOutputOverview{ Error: "timeout", }, }, - Routes: []string{ - "10.10.0.0/24", - }, Networks: []string{ "10.10.0.0/24", }, @@ -299,9 +294,6 @@ func TestParsingToJSON(t *testing.T) { "transferSent": 100, "latency": 10000000, "quantumResistance": false, - "routes": [ - "10.1.0.0/24" - ], "networks": [ "10.1.0.0/24" ] @@ -327,7 +319,6 @@ func TestParsingToJSON(t *testing.T) { "transferSent": 1000, "latency": 10000000, "quantumResistance": false, - "routes": null, "networks": null } ] @@ -366,9 +357,6 @@ func TestParsingToJSON(t *testing.T) { "fqdn": "some-localhost.awesome-domain.com", "quantumResistance": false, "quantumResistancePermissive": false, - "routes": [ - "10.10.0.0/24" - ], "networks": [ "10.10.0.0/24" ], @@ -394,7 +382,8 @@ func TestParsingToJSON(t *testing.T) { "enabled": false, "error": "timeout" } - ] + ], + "events": [] }` // @formatter:on @@ -430,8 +419,6 @@ func TestParsingToYAML(t *testing.T) { transferSent: 100 latency: 10ms quantumResistance: false - routes: - - 10.1.0.0/24 networks: - 10.1.0.0/24 - fqdn: peer-2.awesome-domain.com @@ -452,7 +439,6 @@ func TestParsingToYAML(t *testing.T) { transferSent: 1000 latency: 10ms quantumResistance: false - routes: [] networks: [] cliVersion: development daemonVersion: 0.14.1 @@ -480,8 +466,6 @@ usesKernelInterface: true fqdn: some-localhost.awesome-domain.com quantumResistance: false quantumResistancePermissive: false -routes: - - 10.10.0.0/24 networks: - 10.10.0.0/24 forwardingRules: 0 @@ -499,6 +483,7 @@ dnsServers: - example.net enabled: false error: timeout +events: [] ` assert.Equal(t, expectedYAML, yaml) @@ -528,7 +513,6 @@ func TestParsingToDetail(t *testing.T) { Last WireGuard handshake: %s Transfer status (received/sent) 200 B/100 B Quantum resistance: false - Routes: 10.1.0.0/24 Networks: 10.1.0.0/24 Latency: 10ms @@ -545,10 +529,10 @@ func TestParsingToDetail(t *testing.T) { Last WireGuard handshake: %s Transfer status (received/sent) 2.0 KiB/1000 B Quantum resistance: false - Routes: - Networks: - Latency: 10ms +Events: No events recorded OS: %s/%s Daemon version: 0.14.1 CLI version: %s @@ -564,7 +548,6 @@ FQDN: some-localhost.awesome-domain.com NetBird IP: 192.168.178.100/16 Interface type: Kernel Quantum resistance: false -Routes: 10.10.0.0/24 Networks: 10.10.0.0/24 Forwarding rules: 0 Peers count: 2/2 Connected @@ -587,7 +570,6 @@ FQDN: some-localhost.awesome-domain.com NetBird IP: 192.168.178.100/16 Interface type: Kernel Quantum resistance: false -Routes: 10.10.0.0/24 Networks: 10.10.0.0/24 Forwarding rules: 0 Peers count: 2/2 Connected diff --git a/client/cmd/trace.go b/client/cmd/trace.go new file mode 100644 index 000000000..b2ff1f1b5 --- /dev/null +++ b/client/cmd/trace.go @@ -0,0 +1,137 @@ +package cmd + +import ( + "fmt" + "math/rand" + "strings" + + "github.com/spf13/cobra" + "google.golang.org/grpc/status" + + "github.com/netbirdio/netbird/client/proto" +) + +var traceCmd = &cobra.Command{ + Use: "trace ", + Short: "Trace a packet through the firewall", + Example: ` + netbird debug trace in 192.168.1.10 10.10.0.2 -p tcp --sport 12345 --dport 443 --syn --ack + netbird debug trace out 10.10.0.1 8.8.8.8 -p udp --dport 53 + netbird debug trace in 10.10.0.2 10.10.0.1 -p icmp --type 8 --code 0 + netbird debug trace in 100.64.1.1 self -p tcp --dport 80`, + Args: cobra.ExactArgs(3), + RunE: tracePacket, +} + +func init() { + debugCmd.AddCommand(traceCmd) + + traceCmd.Flags().StringP("protocol", "p", "tcp", "Protocol (tcp/udp/icmp)") + traceCmd.Flags().Uint16("sport", 0, "Source port") + traceCmd.Flags().Uint16("dport", 0, "Destination port") + traceCmd.Flags().Uint8("icmp-type", 0, "ICMP type") + traceCmd.Flags().Uint8("icmp-code", 0, "ICMP code") + traceCmd.Flags().Bool("syn", false, "TCP SYN flag") + traceCmd.Flags().Bool("ack", false, "TCP ACK flag") + traceCmd.Flags().Bool("fin", false, "TCP FIN flag") + traceCmd.Flags().Bool("rst", false, "TCP RST flag") + traceCmd.Flags().Bool("psh", false, "TCP PSH flag") + traceCmd.Flags().Bool("urg", false, "TCP URG flag") +} + +func tracePacket(cmd *cobra.Command, args []string) error { + direction := strings.ToLower(args[0]) + if direction != "in" && direction != "out" { + return fmt.Errorf("invalid direction: use 'in' or 'out'") + } + + protocol := cmd.Flag("protocol").Value.String() + if protocol != "tcp" && protocol != "udp" && protocol != "icmp" { + return fmt.Errorf("invalid protocol: use tcp/udp/icmp") + } + + sport, err := cmd.Flags().GetUint16("sport") + if err != nil { + return fmt.Errorf("invalid source port: %v", err) + } + dport, err := cmd.Flags().GetUint16("dport") + if err != nil { + return fmt.Errorf("invalid destination port: %v", err) + } + + // For TCP/UDP, generate random ephemeral port (49152-65535) if not specified + if protocol != "icmp" { + if sport == 0 { + sport = uint16(rand.Intn(16383) + 49152) + } + if dport == 0 { + dport = uint16(rand.Intn(16383) + 49152) + } + } + + var tcpFlags *proto.TCPFlags + if protocol == "tcp" { + syn, _ := cmd.Flags().GetBool("syn") + ack, _ := cmd.Flags().GetBool("ack") + fin, _ := cmd.Flags().GetBool("fin") + rst, _ := cmd.Flags().GetBool("rst") + psh, _ := cmd.Flags().GetBool("psh") + urg, _ := cmd.Flags().GetBool("urg") + + tcpFlags = &proto.TCPFlags{ + Syn: syn, + Ack: ack, + Fin: fin, + Rst: rst, + Psh: psh, + Urg: urg, + } + } + + icmpType, _ := cmd.Flags().GetUint32("icmp-type") + icmpCode, _ := cmd.Flags().GetUint32("icmp-code") + + conn, err := getClient(cmd) + if err != nil { + return err + } + defer conn.Close() + + client := proto.NewDaemonServiceClient(conn) + resp, err := client.TracePacket(cmd.Context(), &proto.TracePacketRequest{ + SourceIp: args[1], + DestinationIp: args[2], + Protocol: protocol, + SourcePort: uint32(sport), + DestinationPort: uint32(dport), + Direction: direction, + TcpFlags: tcpFlags, + IcmpType: &icmpType, + IcmpCode: &icmpCode, + }) + if err != nil { + return fmt.Errorf("trace failed: %v", status.Convert(err).Message()) + } + + printTrace(cmd, args[1], args[2], protocol, sport, dport, resp) + return nil +} + +func printTrace(cmd *cobra.Command, src, dst, proto string, sport, dport uint16, resp *proto.TracePacketResponse) { + cmd.Printf("Packet trace %s:%d -> %s:%d (%s)\n\n", src, sport, dst, dport, strings.ToUpper(proto)) + + for _, stage := range resp.Stages { + if stage.ForwardingDetails != nil { + cmd.Printf("%s: %s [%s]\n", stage.Name, stage.Message, *stage.ForwardingDetails) + } else { + cmd.Printf("%s: %s\n", stage.Name, stage.Message) + } + } + + disposition := map[bool]string{ + true: "\033[32mALLOWED\033[0m", // Green + false: "\033[31mDENIED\033[0m", // Red + }[resp.FinalDisposition] + + cmd.Printf("\nFinal disposition: %s\n", disposition) +} diff --git a/client/firewall/create.go b/client/firewall/create.go index 9466f4b4d..37ea5ceb3 100644 --- a/client/firewall/create.go +++ b/client/firewall/create.go @@ -14,13 +14,13 @@ import ( ) // NewFirewall creates a firewall manager instance -func NewFirewall(iface IFaceMapper, _ *statemanager.Manager) (firewall.Manager, error) { +func NewFirewall(iface IFaceMapper, _ *statemanager.Manager, disableServerRoutes bool) (firewall.Manager, error) { if !iface.IsUserspaceBind() { return nil, fmt.Errorf("not implemented for this OS: %s", runtime.GOOS) } // use userspace packet filtering firewall - fm, err := uspfilter.Create(iface) + fm, err := uspfilter.Create(iface, disableServerRoutes) if err != nil { return nil, err } diff --git a/client/firewall/create_linux.go b/client/firewall/create_linux.go index 076d08ec2..be1b37916 100644 --- a/client/firewall/create_linux.go +++ b/client/firewall/create_linux.go @@ -33,12 +33,12 @@ const SKIP_NFTABLES_ENV = "NB_SKIP_NFTABLES_CHECK" // FWType is the type for the firewall type type FWType int -func NewFirewall(iface IFaceMapper, stateManager *statemanager.Manager) (firewall.Manager, error) { +func NewFirewall(iface IFaceMapper, stateManager *statemanager.Manager, disableServerRoutes bool) (firewall.Manager, error) { // on the linux system we try to user nftables or iptables // in any case, because we need to allow netbird interface traffic // so we use AllowNetbird traffic from these firewall managers // for the userspace packet filtering firewall - fm, err := createNativeFirewall(iface, stateManager) + fm, err := createNativeFirewall(iface, stateManager, disableServerRoutes) if !iface.IsUserspaceBind() { return fm, err @@ -47,10 +47,10 @@ func NewFirewall(iface IFaceMapper, stateManager *statemanager.Manager) (firewal if err != nil { log.Warnf("failed to create native firewall: %v. Proceeding with userspace", err) } - return createUserspaceFirewall(iface, fm) + return createUserspaceFirewall(iface, fm, disableServerRoutes) } -func createNativeFirewall(iface IFaceMapper, stateManager *statemanager.Manager) (firewall.Manager, error) { +func createNativeFirewall(iface IFaceMapper, stateManager *statemanager.Manager, routes bool) (firewall.Manager, error) { fm, err := createFW(iface) if err != nil { return nil, fmt.Errorf("create firewall: %s", err) @@ -77,12 +77,12 @@ func createFW(iface IFaceMapper) (firewall.Manager, error) { } } -func createUserspaceFirewall(iface IFaceMapper, fm firewall.Manager) (firewall.Manager, error) { +func createUserspaceFirewall(iface IFaceMapper, fm firewall.Manager, disableServerRoutes bool) (firewall.Manager, error) { var errUsp error if fm != nil { - fm, errUsp = uspfilter.CreateWithNativeFirewall(iface, fm) + fm, errUsp = uspfilter.CreateWithNativeFirewall(iface, fm, disableServerRoutes) } else { - fm, errUsp = uspfilter.Create(iface) + fm, errUsp = uspfilter.Create(iface, disableServerRoutes) } if errUsp != nil { diff --git a/client/firewall/iface.go b/client/firewall/iface.go index f349f9210..d842abaa1 100644 --- a/client/firewall/iface.go +++ b/client/firewall/iface.go @@ -1,6 +1,8 @@ package firewall import ( + wgdevice "golang.zx2c4.com/wireguard/device" + "github.com/netbirdio/netbird/client/iface/device" ) @@ -10,4 +12,6 @@ type IFaceMapper interface { Address() device.WGAddress IsUserspaceBind() bool SetFilter(device.PacketFilter) error + GetDevice() *device.FilteredDevice + GetWGDevice() *wgdevice.Device } diff --git a/client/firewall/iptables/manager_linux.go b/client/firewall/iptables/manager_linux.go index 192fae749..a3cc46a2f 100644 --- a/client/firewall/iptables/manager_linux.go +++ b/client/firewall/iptables/manager_linux.go @@ -213,6 +213,19 @@ func (m *Manager) AllowNetbird() error { // Flush doesn't need to be implemented for this manager func (m *Manager) Flush() error { return nil } +// SetLogLevel sets the log level for the firewall manager +func (m *Manager) SetLogLevel(log.Level) { + // not supported +} + +func (m *Manager) EnableRouting() error { + return nil +} + +func (m *Manager) DisableRouting() error { + return nil +} + // AddDNATRule adds a DNAT rule func (m *Manager) AddDNATRule(rule firewall.ForwardRule) (firewall.Rule, error) { m.mutex.Lock() diff --git a/client/firewall/iptables/router_linux.go b/client/firewall/iptables/router_linux.go index e9dfbd7ab..cc2c25e55 100644 --- a/client/firewall/iptables/router_linux.go +++ b/client/firewall/iptables/router_linux.go @@ -152,7 +152,16 @@ func (r *router) AddRouteFiltering( } rule := genRouteFilteringRuleSpec(params) - if err := r.iptablesClient.Append(tableFilter, chainRTFWDIN, rule...); err != nil { + // Insert DROP rules at the beginning, append ACCEPT rules at the end + var err error + if action == firewall.ActionDrop { + // after the established rule + err = r.iptablesClient.Insert(tableFilter, chainRTFWDIN, 2, rule...) + } else { + err = r.iptablesClient.Append(tableFilter, chainRTFWDIN, rule...) + } + + if err != nil { return nil, fmt.Errorf("add route rule: %v", err) } diff --git a/client/firewall/manager/firewall.go b/client/firewall/manager/firewall.go index a031213ea..2650ac792 100644 --- a/client/firewall/manager/firewall.go +++ b/client/firewall/manager/firewall.go @@ -100,6 +100,12 @@ type Manager interface { // Flush the changes to firewall controller Flush() error + SetLogLevel(log.Level) + + EnableRouting() error + + DisableRouting() error + // AddDNATRule adds a DNAT rule AddDNATRule(ForwardRule) (Rule, error) diff --git a/client/firewall/nftables/manager_linux.go b/client/firewall/nftables/manager_linux.go index 3f2ae8a97..b6484510d 100644 --- a/client/firewall/nftables/manager_linux.go +++ b/client/firewall/nftables/manager_linux.go @@ -318,6 +318,19 @@ func (m *Manager) cleanupNetbirdTables() error { return nil } +// SetLogLevel sets the log level for the firewall manager +func (m *Manager) SetLogLevel(log.Level) { + // not supported +} + +func (m *Manager) EnableRouting() error { + return nil +} + +func (m *Manager) DisableRouting() error { + return nil +} + // Flush rule/chain/set operations from the buffer // // Method also get all rules after flush and refreshes handle values in the rulesets diff --git a/client/firewall/nftables/manager_linux_test.go b/client/firewall/nftables/manager_linux_test.go index 8d693725a..eaa8ef1f5 100644 --- a/client/firewall/nftables/manager_linux_test.go +++ b/client/firewall/nftables/manager_linux_test.go @@ -107,7 +107,7 @@ func TestNftablesManager(t *testing.T) { Kind: expr.VerdictAccept, }, } - require.ElementsMatch(t, rules[0].Exprs, expectedExprs1, "expected the same expressions") + compareExprsIgnoringCounters(t, rules[0].Exprs, expectedExprs1) ipToAdd, _ := netip.AddrFromSlice(ip) add := ipToAdd.Unmap() @@ -307,3 +307,18 @@ func TestNftablesManagerCompatibilityWithIptables(t *testing.T) { stdout, stderr = runIptablesSave(t) verifyIptablesOutput(t, stdout, stderr) } + +func compareExprsIgnoringCounters(t *testing.T, got, want []expr.Any) { + t.Helper() + require.Equal(t, len(got), len(want), "expression count mismatch") + + for i := range got { + if _, isCounter := got[i].(*expr.Counter); isCounter { + _, wantIsCounter := want[i].(*expr.Counter) + require.True(t, wantIsCounter, "expected Counter at index %d", i) + continue + } + + require.Equal(t, got[i], want[i], "expression mismatch at index %d", i) + } +} diff --git a/client/firewall/nftables/router_linux.go b/client/firewall/nftables/router_linux.go index 6f7ebde5a..6dd75ddb1 100644 --- a/client/firewall/nftables/router_linux.go +++ b/client/firewall/nftables/router_linux.go @@ -296,7 +296,13 @@ func (r *router) AddRouteFiltering( UserData: []byte(ruleKey), } - rule = r.conn.AddRule(rule) + // Insert DROP rules at the beginning, append ACCEPT rules at the end + if action == firewall.ActionDrop { + // TODO: Insert after the established rule + rule = r.conn.InsertRule(rule) + } else { + rule = r.conn.AddRule(rule) + } log.Tracef("Adding route rule %s", spew.Sdump(rule)) if err := r.conn.Flush(); err != nil { diff --git a/client/firewall/uspfilter/allow_netbird.go b/client/firewall/uspfilter/allow_netbird.go index cc0792255..03f23f5e6 100644 --- a/client/firewall/uspfilter/allow_netbird.go +++ b/client/firewall/uspfilter/allow_netbird.go @@ -3,6 +3,11 @@ package uspfilter import ( + "context" + "time" + + log "github.com/sirupsen/logrus" + "github.com/netbirdio/netbird/client/firewall/uspfilter/conntrack" "github.com/netbirdio/netbird/client/internal/statemanager" ) @@ -17,17 +22,29 @@ func (m *Manager) Reset(stateManager *statemanager.Manager) error { if m.udpTracker != nil { m.udpTracker.Close() - m.udpTracker = conntrack.NewUDPTracker(conntrack.DefaultUDPTimeout) + m.udpTracker = conntrack.NewUDPTracker(conntrack.DefaultUDPTimeout, m.logger) } if m.icmpTracker != nil { m.icmpTracker.Close() - m.icmpTracker = conntrack.NewICMPTracker(conntrack.DefaultICMPTimeout) + m.icmpTracker = conntrack.NewICMPTracker(conntrack.DefaultICMPTimeout, m.logger) } if m.tcpTracker != nil { m.tcpTracker.Close() - m.tcpTracker = conntrack.NewTCPTracker(conntrack.DefaultTCPTimeout) + m.tcpTracker = conntrack.NewTCPTracker(conntrack.DefaultTCPTimeout, m.logger) + } + + if m.forwarder != nil { + m.forwarder.Stop() + } + + if m.logger != nil { + ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) + defer cancel() + if err := m.logger.Stop(ctx); err != nil { + log.Errorf("failed to shutdown logger: %v", err) + } } if m.nativeFirewall != nil { diff --git a/client/firewall/uspfilter/allow_netbird_windows.go b/client/firewall/uspfilter/allow_netbird_windows.go index 0d55d6268..379585978 100644 --- a/client/firewall/uspfilter/allow_netbird_windows.go +++ b/client/firewall/uspfilter/allow_netbird_windows.go @@ -1,9 +1,11 @@ package uspfilter import ( + "context" "fmt" "os/exec" "syscall" + "time" log "github.com/sirupsen/logrus" @@ -29,17 +31,29 @@ func (m *Manager) Reset(*statemanager.Manager) error { if m.udpTracker != nil { m.udpTracker.Close() - m.udpTracker = conntrack.NewUDPTracker(conntrack.DefaultUDPTimeout) + m.udpTracker = conntrack.NewUDPTracker(conntrack.DefaultUDPTimeout, m.logger) } if m.icmpTracker != nil { m.icmpTracker.Close() - m.icmpTracker = conntrack.NewICMPTracker(conntrack.DefaultICMPTimeout) + m.icmpTracker = conntrack.NewICMPTracker(conntrack.DefaultICMPTimeout, m.logger) } if m.tcpTracker != nil { m.tcpTracker.Close() - m.tcpTracker = conntrack.NewTCPTracker(conntrack.DefaultTCPTimeout) + m.tcpTracker = conntrack.NewTCPTracker(conntrack.DefaultTCPTimeout, m.logger) + } + + if m.forwarder != nil { + m.forwarder.Stop() + } + + if m.logger != nil { + ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) + defer cancel() + if err := m.logger.Stop(ctx); err != nil { + log.Errorf("failed to shutdown logger: %v", err) + } } if !isWindowsFirewallReachable() { diff --git a/client/firewall/uspfilter/common/iface.go b/client/firewall/uspfilter/common/iface.go new file mode 100644 index 000000000..d44e79509 --- /dev/null +++ b/client/firewall/uspfilter/common/iface.go @@ -0,0 +1,16 @@ +package common + +import ( + wgdevice "golang.zx2c4.com/wireguard/device" + + "github.com/netbirdio/netbird/client/iface" + "github.com/netbirdio/netbird/client/iface/device" +) + +// IFaceMapper defines subset methods of interface required for manager +type IFaceMapper interface { + SetFilter(device.PacketFilter) error + Address() iface.WGAddress + GetWGDevice() *wgdevice.Device + GetDevice() *device.FilteredDevice +} diff --git a/client/firewall/uspfilter/conntrack/common.go b/client/firewall/uspfilter/conntrack/common.go index e459bc75a..f5f502540 100644 --- a/client/firewall/uspfilter/conntrack/common.go +++ b/client/firewall/uspfilter/conntrack/common.go @@ -10,12 +10,11 @@ import ( // BaseConnTrack provides common fields and locking for all connection types type BaseConnTrack struct { - SourceIP net.IP - DestIP net.IP - SourcePort uint16 - DestPort uint16 - lastSeen atomic.Int64 // Unix nano for atomic access - established atomic.Bool + SourceIP net.IP + DestIP net.IP + SourcePort uint16 + DestPort uint16 + lastSeen atomic.Int64 // Unix nano for atomic access } // these small methods will be inlined by the compiler @@ -25,16 +24,6 @@ func (b *BaseConnTrack) UpdateLastSeen() { b.lastSeen.Store(time.Now().UnixNano()) } -// IsEstablished safely checks if connection is established -func (b *BaseConnTrack) IsEstablished() bool { - return b.established.Load() -} - -// SetEstablished safely sets the established state -func (b *BaseConnTrack) SetEstablished(state bool) { - b.established.Store(state) -} - // GetLastSeen safely gets the last seen timestamp func (b *BaseConnTrack) GetLastSeen() time.Time { return time.Unix(0, b.lastSeen.Load()) diff --git a/client/firewall/uspfilter/conntrack/common_test.go b/client/firewall/uspfilter/conntrack/common_test.go index 72d006def..81fa64b19 100644 --- a/client/firewall/uspfilter/conntrack/common_test.go +++ b/client/firewall/uspfilter/conntrack/common_test.go @@ -3,8 +3,14 @@ package conntrack import ( "net" "testing" + + "github.com/sirupsen/logrus" + + "github.com/netbirdio/netbird/client/firewall/uspfilter/log" ) +var logger = log.NewFromLogrus(logrus.StandardLogger()) + func BenchmarkIPOperations(b *testing.B) { b.Run("MakeIPAddr", func(b *testing.B) { ip := net.ParseIP("192.168.1.1") @@ -34,37 +40,11 @@ func BenchmarkIPOperations(b *testing.B) { }) } -func BenchmarkAtomicOperations(b *testing.B) { - conn := &BaseConnTrack{} - b.Run("UpdateLastSeen", func(b *testing.B) { - for i := 0; i < b.N; i++ { - conn.UpdateLastSeen() - } - }) - - b.Run("IsEstablished", func(b *testing.B) { - for i := 0; i < b.N; i++ { - _ = conn.IsEstablished() - } - }) - - b.Run("SetEstablished", func(b *testing.B) { - for i := 0; i < b.N; i++ { - conn.SetEstablished(i%2 == 0) - } - }) - - b.Run("GetLastSeen", func(b *testing.B) { - for i := 0; i < b.N; i++ { - _ = conn.GetLastSeen() - } - }) -} // Memory pressure tests func BenchmarkMemoryPressure(b *testing.B) { b.Run("TCPHighLoad", func(b *testing.B) { - tracker := NewTCPTracker(DefaultTCPTimeout) + tracker := NewTCPTracker(DefaultTCPTimeout, logger) defer tracker.Close() // Generate different IPs @@ -89,7 +69,7 @@ func BenchmarkMemoryPressure(b *testing.B) { }) b.Run("UDPHighLoad", func(b *testing.B) { - tracker := NewUDPTracker(DefaultUDPTimeout) + tracker := NewUDPTracker(DefaultUDPTimeout, logger) defer tracker.Close() // Generate different IPs diff --git a/client/firewall/uspfilter/conntrack/icmp.go b/client/firewall/uspfilter/conntrack/icmp.go index e0a971678..25cd9e87d 100644 --- a/client/firewall/uspfilter/conntrack/icmp.go +++ b/client/firewall/uspfilter/conntrack/icmp.go @@ -6,6 +6,8 @@ import ( "time" "github.com/google/gopacket/layers" + + nblog "github.com/netbirdio/netbird/client/firewall/uspfilter/log" ) const ( @@ -33,6 +35,7 @@ type ICMPConnTrack struct { // ICMPTracker manages ICMP connection states type ICMPTracker struct { + logger *nblog.Logger connections map[ICMPConnKey]*ICMPConnTrack timeout time.Duration cleanupTicker *time.Ticker @@ -42,12 +45,13 @@ type ICMPTracker struct { } // NewICMPTracker creates a new ICMP connection tracker -func NewICMPTracker(timeout time.Duration) *ICMPTracker { +func NewICMPTracker(timeout time.Duration, logger *nblog.Logger) *ICMPTracker { if timeout == 0 { timeout = DefaultICMPTimeout } tracker := &ICMPTracker{ + logger: logger, connections: make(map[ICMPConnKey]*ICMPConnTrack), timeout: timeout, cleanupTicker: time.NewTicker(ICMPCleanupInterval), @@ -62,7 +66,6 @@ func NewICMPTracker(timeout time.Duration) *ICMPTracker { // TrackOutbound records an outbound ICMP Echo Request func (t *ICMPTracker) TrackOutbound(srcIP net.IP, dstIP net.IP, id uint16, seq uint16) { key := makeICMPKey(srcIP, dstIP, id, seq) - now := time.Now().UnixNano() t.mutex.Lock() conn, exists := t.connections[key] @@ -80,24 +83,19 @@ func (t *ICMPTracker) TrackOutbound(srcIP net.IP, dstIP net.IP, id uint16, seq u ID: id, Sequence: seq, } - conn.lastSeen.Store(now) - conn.established.Store(true) + conn.UpdateLastSeen() t.connections[key] = conn + + t.logger.Trace("New ICMP connection %v", key) } t.mutex.Unlock() - conn.lastSeen.Store(now) + conn.UpdateLastSeen() } // IsValidInbound checks if an inbound ICMP Echo Reply matches a tracked request func (t *ICMPTracker) IsValidInbound(srcIP net.IP, dstIP net.IP, id uint16, seq uint16, icmpType uint8) bool { - switch icmpType { - case uint8(layers.ICMPv4TypeDestinationUnreachable), - uint8(layers.ICMPv4TypeTimeExceeded): - return true - case uint8(layers.ICMPv4TypeEchoReply): - // continue processing - default: + if icmpType != uint8(layers.ICMPv4TypeEchoReply) { return false } @@ -115,8 +113,7 @@ func (t *ICMPTracker) IsValidInbound(srcIP net.IP, dstIP net.IP, id uint16, seq return false } - return conn.IsEstablished() && - ValidateIPs(MakeIPAddr(srcIP), conn.DestIP) && + return ValidateIPs(MakeIPAddr(srcIP), conn.DestIP) && ValidateIPs(MakeIPAddr(dstIP), conn.SourceIP) && conn.ID == id && conn.Sequence == seq @@ -141,6 +138,8 @@ func (t *ICMPTracker) cleanup() { t.ipPool.Put(conn.SourceIP) t.ipPool.Put(conn.DestIP) delete(t.connections, key) + + t.logger.Debug("Removed ICMP connection %v (timeout)", key) } } } diff --git a/client/firewall/uspfilter/conntrack/icmp_test.go b/client/firewall/uspfilter/conntrack/icmp_test.go index 21176e719..32553c836 100644 --- a/client/firewall/uspfilter/conntrack/icmp_test.go +++ b/client/firewall/uspfilter/conntrack/icmp_test.go @@ -7,7 +7,7 @@ import ( func BenchmarkICMPTracker(b *testing.B) { b.Run("TrackOutbound", func(b *testing.B) { - tracker := NewICMPTracker(DefaultICMPTimeout) + tracker := NewICMPTracker(DefaultICMPTimeout, logger) defer tracker.Close() srcIP := net.ParseIP("192.168.1.1") @@ -20,7 +20,7 @@ func BenchmarkICMPTracker(b *testing.B) { }) b.Run("IsValidInbound", func(b *testing.B) { - tracker := NewICMPTracker(DefaultICMPTimeout) + tracker := NewICMPTracker(DefaultICMPTimeout, logger) defer tracker.Close() srcIP := net.ParseIP("192.168.1.1") diff --git a/client/firewall/uspfilter/conntrack/tcp.go b/client/firewall/uspfilter/conntrack/tcp.go index a7968dc73..7c12e8ad0 100644 --- a/client/firewall/uspfilter/conntrack/tcp.go +++ b/client/firewall/uspfilter/conntrack/tcp.go @@ -5,7 +5,10 @@ package conntrack import ( "net" "sync" + "sync/atomic" "time" + + nblog "github.com/netbirdio/netbird/client/firewall/uspfilter/log" ) const ( @@ -61,12 +64,24 @@ type TCPConnKey struct { // TCPConnTrack represents a TCP connection state type TCPConnTrack struct { BaseConnTrack - State TCPState + State TCPState + established atomic.Bool sync.RWMutex } +// IsEstablished safely checks if connection is established +func (t *TCPConnTrack) IsEstablished() bool { + return t.established.Load() +} + +// SetEstablished safely sets the established state +func (t *TCPConnTrack) SetEstablished(state bool) { + t.established.Store(state) +} + // TCPTracker manages TCP connection states type TCPTracker struct { + logger *nblog.Logger connections map[ConnKey]*TCPConnTrack mutex sync.RWMutex cleanupTicker *time.Ticker @@ -76,8 +91,9 @@ type TCPTracker struct { } // NewTCPTracker creates a new TCP connection tracker -func NewTCPTracker(timeout time.Duration) *TCPTracker { +func NewTCPTracker(timeout time.Duration, logger *nblog.Logger) *TCPTracker { tracker := &TCPTracker{ + logger: logger, connections: make(map[ConnKey]*TCPConnTrack), cleanupTicker: time.NewTicker(TCPCleanupInterval), done: make(chan struct{}), @@ -93,7 +109,6 @@ func NewTCPTracker(timeout time.Duration) *TCPTracker { func (t *TCPTracker) TrackOutbound(srcIP net.IP, dstIP net.IP, srcPort uint16, dstPort uint16, flags uint8) { // Create key before lock key := makeConnKey(srcIP, dstIP, srcPort, dstPort) - now := time.Now().UnixNano() t.mutex.Lock() conn, exists := t.connections[key] @@ -113,9 +128,11 @@ func (t *TCPTracker) TrackOutbound(srcIP net.IP, dstIP net.IP, srcPort uint16, d }, State: TCPStateNew, } - conn.lastSeen.Store(now) + conn.UpdateLastSeen() conn.established.Store(false) t.connections[key] = conn + + t.logger.Trace("New TCP connection: %s:%d -> %s:%d", srcIP, srcPort, dstIP, dstPort) } t.mutex.Unlock() @@ -123,7 +140,7 @@ func (t *TCPTracker) TrackOutbound(srcIP net.IP, dstIP net.IP, srcPort uint16, d conn.Lock() t.updateState(conn, flags, true) conn.Unlock() - conn.lastSeen.Store(now) + conn.UpdateLastSeen() } // IsValidInbound checks if an inbound TCP packet matches a tracked connection @@ -171,6 +188,9 @@ func (t *TCPTracker) updateState(conn *TCPConnTrack, flags uint8, isOutbound boo if flags&TCPRst != 0 { conn.State = TCPStateClosed conn.SetEstablished(false) + + t.logger.Trace("TCP connection reset: %s:%d -> %s:%d", + conn.SourceIP, conn.SourcePort, conn.DestIP, conn.DestPort) return } @@ -227,6 +247,9 @@ func (t *TCPTracker) updateState(conn *TCPConnTrack, flags uint8, isOutbound boo if flags&TCPAck != 0 { conn.State = TCPStateTimeWait // Keep established = false from previous state + + t.logger.Trace("TCP connection closed (simultaneous) - %s:%d -> %s:%d", + conn.SourceIP, conn.SourcePort, conn.DestIP, conn.DestPort) } case TCPStateCloseWait: @@ -237,11 +260,17 @@ func (t *TCPTracker) updateState(conn *TCPConnTrack, flags uint8, isOutbound boo case TCPStateLastAck: if flags&TCPAck != 0 { conn.State = TCPStateClosed + + t.logger.Trace("TCP connection gracefully closed: %s:%d -> %s:%d", + conn.SourceIP, conn.SourcePort, conn.DestIP, conn.DestPort) } case TCPStateTimeWait: // Stay in TIME-WAIT for 2MSL before transitioning to closed // This is handled by the cleanup routine + + t.logger.Trace("TCP connection completed - %s:%d -> %s:%d", + conn.SourceIP, conn.SourcePort, conn.DestIP, conn.DestPort) } } @@ -318,6 +347,8 @@ func (t *TCPTracker) cleanup() { t.ipPool.Put(conn.SourceIP) t.ipPool.Put(conn.DestIP) delete(t.connections, key) + + t.logger.Trace("Cleaned up TCP connection: %s:%d -> %s:%d", conn.SourceIP, conn.SourcePort, conn.DestIP, conn.DestPort) } } } diff --git a/client/firewall/uspfilter/conntrack/tcp_test.go b/client/firewall/uspfilter/conntrack/tcp_test.go index 6c8f82423..5f4c43915 100644 --- a/client/firewall/uspfilter/conntrack/tcp_test.go +++ b/client/firewall/uspfilter/conntrack/tcp_test.go @@ -9,7 +9,7 @@ import ( ) func TestTCPStateMachine(t *testing.T) { - tracker := NewTCPTracker(DefaultTCPTimeout) + tracker := NewTCPTracker(DefaultTCPTimeout, logger) defer tracker.Close() srcIP := net.ParseIP("100.64.0.1") @@ -154,7 +154,7 @@ func TestTCPStateMachine(t *testing.T) { t.Run(tt.name, func(t *testing.T) { t.Helper() - tracker = NewTCPTracker(DefaultTCPTimeout) + tracker = NewTCPTracker(DefaultTCPTimeout, logger) tt.test(t) }) } @@ -162,7 +162,7 @@ func TestTCPStateMachine(t *testing.T) { } func TestRSTHandling(t *testing.T) { - tracker := NewTCPTracker(DefaultTCPTimeout) + tracker := NewTCPTracker(DefaultTCPTimeout, logger) defer tracker.Close() srcIP := net.ParseIP("100.64.0.1") @@ -233,7 +233,7 @@ func establishConnection(t *testing.T, tracker *TCPTracker, srcIP, dstIP net.IP, func BenchmarkTCPTracker(b *testing.B) { b.Run("TrackOutbound", func(b *testing.B) { - tracker := NewTCPTracker(DefaultTCPTimeout) + tracker := NewTCPTracker(DefaultTCPTimeout, logger) defer tracker.Close() srcIP := net.ParseIP("192.168.1.1") @@ -246,7 +246,7 @@ func BenchmarkTCPTracker(b *testing.B) { }) b.Run("IsValidInbound", func(b *testing.B) { - tracker := NewTCPTracker(DefaultTCPTimeout) + tracker := NewTCPTracker(DefaultTCPTimeout, logger) defer tracker.Close() srcIP := net.ParseIP("192.168.1.1") @@ -264,7 +264,7 @@ func BenchmarkTCPTracker(b *testing.B) { }) b.Run("ConcurrentAccess", func(b *testing.B) { - tracker := NewTCPTracker(DefaultTCPTimeout) + tracker := NewTCPTracker(DefaultTCPTimeout, logger) defer tracker.Close() srcIP := net.ParseIP("192.168.1.1") @@ -287,7 +287,7 @@ func BenchmarkTCPTracker(b *testing.B) { // Benchmark connection cleanup func BenchmarkCleanup(b *testing.B) { b.Run("TCPCleanup", func(b *testing.B) { - tracker := NewTCPTracker(100 * time.Millisecond) // Short timeout for testing + tracker := NewTCPTracker(100*time.Millisecond, logger) // Short timeout for testing defer tracker.Close() // Pre-populate with expired connections diff --git a/client/firewall/uspfilter/conntrack/udp.go b/client/firewall/uspfilter/conntrack/udp.go index a969a4e84..e73465e31 100644 --- a/client/firewall/uspfilter/conntrack/udp.go +++ b/client/firewall/uspfilter/conntrack/udp.go @@ -4,6 +4,8 @@ import ( "net" "sync" "time" + + nblog "github.com/netbirdio/netbird/client/firewall/uspfilter/log" ) const ( @@ -20,6 +22,7 @@ type UDPConnTrack struct { // UDPTracker manages UDP connection states type UDPTracker struct { + logger *nblog.Logger connections map[ConnKey]*UDPConnTrack timeout time.Duration cleanupTicker *time.Ticker @@ -29,12 +32,13 @@ type UDPTracker struct { } // NewUDPTracker creates a new UDP connection tracker -func NewUDPTracker(timeout time.Duration) *UDPTracker { +func NewUDPTracker(timeout time.Duration, logger *nblog.Logger) *UDPTracker { if timeout == 0 { timeout = DefaultUDPTimeout } tracker := &UDPTracker{ + logger: logger, connections: make(map[ConnKey]*UDPConnTrack), timeout: timeout, cleanupTicker: time.NewTicker(UDPCleanupInterval), @@ -49,7 +53,6 @@ func NewUDPTracker(timeout time.Duration) *UDPTracker { // TrackOutbound records an outbound UDP connection func (t *UDPTracker) TrackOutbound(srcIP net.IP, dstIP net.IP, srcPort uint16, dstPort uint16) { key := makeConnKey(srcIP, dstIP, srcPort, dstPort) - now := time.Now().UnixNano() t.mutex.Lock() conn, exists := t.connections[key] @@ -67,13 +70,14 @@ func (t *UDPTracker) TrackOutbound(srcIP net.IP, dstIP net.IP, srcPort uint16, d DestPort: dstPort, }, } - conn.lastSeen.Store(now) - conn.established.Store(true) + conn.UpdateLastSeen() t.connections[key] = conn + + t.logger.Trace("New UDP connection: %v", conn) } t.mutex.Unlock() - conn.lastSeen.Store(now) + conn.UpdateLastSeen() } // IsValidInbound checks if an inbound packet matches a tracked connection @@ -92,8 +96,7 @@ func (t *UDPTracker) IsValidInbound(srcIP net.IP, dstIP net.IP, srcPort uint16, return false } - return conn.IsEstablished() && - ValidateIPs(MakeIPAddr(srcIP), conn.DestIP) && + return ValidateIPs(MakeIPAddr(srcIP), conn.DestIP) && ValidateIPs(MakeIPAddr(dstIP), conn.SourceIP) && conn.DestPort == srcPort && conn.SourcePort == dstPort @@ -120,6 +123,8 @@ func (t *UDPTracker) cleanup() { t.ipPool.Put(conn.SourceIP) t.ipPool.Put(conn.DestIP) delete(t.connections, key) + + t.logger.Trace("Removed UDP connection %v (timeout)", conn) } } } diff --git a/client/firewall/uspfilter/conntrack/udp_test.go b/client/firewall/uspfilter/conntrack/udp_test.go index 671721890..fa83ee356 100644 --- a/client/firewall/uspfilter/conntrack/udp_test.go +++ b/client/firewall/uspfilter/conntrack/udp_test.go @@ -29,7 +29,7 @@ func TestNewUDPTracker(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - tracker := NewUDPTracker(tt.timeout) + tracker := NewUDPTracker(tt.timeout, logger) assert.NotNil(t, tracker) assert.Equal(t, tt.wantTimeout, tracker.timeout) assert.NotNil(t, tracker.connections) @@ -40,7 +40,7 @@ func TestNewUDPTracker(t *testing.T) { } func TestUDPTracker_TrackOutbound(t *testing.T) { - tracker := NewUDPTracker(DefaultUDPTimeout) + tracker := NewUDPTracker(DefaultUDPTimeout, logger) defer tracker.Close() srcIP := net.ParseIP("192.168.1.2") @@ -58,12 +58,11 @@ func TestUDPTracker_TrackOutbound(t *testing.T) { assert.True(t, conn.DestIP.Equal(dstIP)) assert.Equal(t, srcPort, conn.SourcePort) assert.Equal(t, dstPort, conn.DestPort) - assert.True(t, conn.IsEstablished()) assert.WithinDuration(t, time.Now(), conn.GetLastSeen(), 1*time.Second) } func TestUDPTracker_IsValidInbound(t *testing.T) { - tracker := NewUDPTracker(1 * time.Second) + tracker := NewUDPTracker(1*time.Second, logger) defer tracker.Close() srcIP := net.ParseIP("192.168.1.2") @@ -162,6 +161,7 @@ func TestUDPTracker_Cleanup(t *testing.T) { cleanupTicker: time.NewTicker(cleanupInterval), done: make(chan struct{}), ipPool: NewPreallocatedIPs(), + logger: logger, } // Start cleanup routine @@ -211,7 +211,7 @@ func TestUDPTracker_Cleanup(t *testing.T) { func BenchmarkUDPTracker(b *testing.B) { b.Run("TrackOutbound", func(b *testing.B) { - tracker := NewUDPTracker(DefaultUDPTimeout) + tracker := NewUDPTracker(DefaultUDPTimeout, logger) defer tracker.Close() srcIP := net.ParseIP("192.168.1.1") @@ -224,7 +224,7 @@ func BenchmarkUDPTracker(b *testing.B) { }) b.Run("IsValidInbound", func(b *testing.B) { - tracker := NewUDPTracker(DefaultUDPTimeout) + tracker := NewUDPTracker(DefaultUDPTimeout, logger) defer tracker.Close() srcIP := net.ParseIP("192.168.1.1") diff --git a/client/firewall/uspfilter/forwarder/endpoint.go b/client/firewall/uspfilter/forwarder/endpoint.go new file mode 100644 index 000000000..e8a265c94 --- /dev/null +++ b/client/firewall/uspfilter/forwarder/endpoint.go @@ -0,0 +1,81 @@ +package forwarder + +import ( + wgdevice "golang.zx2c4.com/wireguard/device" + "gvisor.dev/gvisor/pkg/tcpip" + "gvisor.dev/gvisor/pkg/tcpip/header" + "gvisor.dev/gvisor/pkg/tcpip/stack" + + nblog "github.com/netbirdio/netbird/client/firewall/uspfilter/log" +) + +// endpoint implements stack.LinkEndpoint and handles integration with the wireguard device +type endpoint struct { + logger *nblog.Logger + dispatcher stack.NetworkDispatcher + device *wgdevice.Device + mtu uint32 +} + +func (e *endpoint) Attach(dispatcher stack.NetworkDispatcher) { + e.dispatcher = dispatcher +} + +func (e *endpoint) IsAttached() bool { + return e.dispatcher != nil +} + +func (e *endpoint) MTU() uint32 { + return e.mtu +} + +func (e *endpoint) Capabilities() stack.LinkEndpointCapabilities { + return stack.CapabilityNone +} + +func (e *endpoint) MaxHeaderLength() uint16 { + return 0 +} + +func (e *endpoint) LinkAddress() tcpip.LinkAddress { + return "" +} + +func (e *endpoint) WritePackets(pkts stack.PacketBufferList) (int, tcpip.Error) { + var written int + for _, pkt := range pkts.AsSlice() { + netHeader := header.IPv4(pkt.NetworkHeader().View().AsSlice()) + + data := stack.PayloadSince(pkt.NetworkHeader()) + if data == nil { + continue + } + + // Send the packet through WireGuard + address := netHeader.DestinationAddress() + err := e.device.CreateOutboundPacket(data.AsSlice(), address.AsSlice()) + if err != nil { + e.logger.Error("CreateOutboundPacket: %v", err) + continue + } + written++ + } + + return written, nil +} + +func (e *endpoint) Wait() { + // not required +} + +func (e *endpoint) ARPHardwareType() header.ARPHardwareType { + return header.ARPHardwareNone +} + +func (e *endpoint) AddHeader(*stack.PacketBuffer) { + // not required +} + +func (e *endpoint) ParseHeader(*stack.PacketBuffer) bool { + return true +} diff --git a/client/firewall/uspfilter/forwarder/forwarder.go b/client/firewall/uspfilter/forwarder/forwarder.go new file mode 100644 index 000000000..4ed152b79 --- /dev/null +++ b/client/firewall/uspfilter/forwarder/forwarder.go @@ -0,0 +1,166 @@ +package forwarder + +import ( + "context" + "fmt" + "net" + "runtime" + + log "github.com/sirupsen/logrus" + "gvisor.dev/gvisor/pkg/buffer" + "gvisor.dev/gvisor/pkg/tcpip" + "gvisor.dev/gvisor/pkg/tcpip/header" + "gvisor.dev/gvisor/pkg/tcpip/network/ipv4" + "gvisor.dev/gvisor/pkg/tcpip/stack" + "gvisor.dev/gvisor/pkg/tcpip/transport/icmp" + "gvisor.dev/gvisor/pkg/tcpip/transport/tcp" + "gvisor.dev/gvisor/pkg/tcpip/transport/udp" + + "github.com/netbirdio/netbird/client/firewall/uspfilter/common" + nblog "github.com/netbirdio/netbird/client/firewall/uspfilter/log" +) + +const ( + defaultReceiveWindow = 32768 + defaultMaxInFlight = 1024 + iosReceiveWindow = 16384 + iosMaxInFlight = 256 +) + +type Forwarder struct { + logger *nblog.Logger + stack *stack.Stack + endpoint *endpoint + udpForwarder *udpForwarder + ctx context.Context + cancel context.CancelFunc + ip net.IP + netstack bool +} + +func New(iface common.IFaceMapper, logger *nblog.Logger, netstack bool) (*Forwarder, error) { + s := stack.New(stack.Options{ + NetworkProtocols: []stack.NetworkProtocolFactory{ipv4.NewProtocol}, + TransportProtocols: []stack.TransportProtocolFactory{ + tcp.NewProtocol, + udp.NewProtocol, + icmp.NewProtocol4, + }, + HandleLocal: false, + }) + + mtu, err := iface.GetDevice().MTU() + if err != nil { + return nil, fmt.Errorf("get MTU: %w", err) + } + nicID := tcpip.NICID(1) + endpoint := &endpoint{ + logger: logger, + device: iface.GetWGDevice(), + mtu: uint32(mtu), + } + + if err := s.CreateNIC(nicID, endpoint); err != nil { + return nil, fmt.Errorf("failed to create NIC: %v", err) + } + + ones, _ := iface.Address().Network.Mask.Size() + protoAddr := tcpip.ProtocolAddress{ + Protocol: ipv4.ProtocolNumber, + AddressWithPrefix: tcpip.AddressWithPrefix{ + Address: tcpip.AddrFromSlice(iface.Address().IP.To4()), + PrefixLen: ones, + }, + } + + if err := s.AddProtocolAddress(nicID, protoAddr, stack.AddressProperties{}); err != nil { + return nil, fmt.Errorf("failed to add protocol address: %s", err) + } + + defaultSubnet, err := tcpip.NewSubnet( + tcpip.AddrFrom4([4]byte{0, 0, 0, 0}), + tcpip.MaskFromBytes([]byte{0, 0, 0, 0}), + ) + if err != nil { + return nil, fmt.Errorf("creating default subnet: %w", err) + } + + if err := s.SetPromiscuousMode(nicID, true); err != nil { + return nil, fmt.Errorf("set promiscuous mode: %s", err) + } + if err := s.SetSpoofing(nicID, true); err != nil { + return nil, fmt.Errorf("set spoofing: %s", err) + } + + s.SetRouteTable([]tcpip.Route{ + { + Destination: defaultSubnet, + NIC: nicID, + }, + }) + + ctx, cancel := context.WithCancel(context.Background()) + f := &Forwarder{ + logger: logger, + stack: s, + endpoint: endpoint, + udpForwarder: newUDPForwarder(mtu, logger), + ctx: ctx, + cancel: cancel, + netstack: netstack, + ip: iface.Address().IP, + } + + receiveWindow := defaultReceiveWindow + maxInFlight := defaultMaxInFlight + if runtime.GOOS == "ios" { + receiveWindow = iosReceiveWindow + maxInFlight = iosMaxInFlight + } + + tcpForwarder := tcp.NewForwarder(s, receiveWindow, maxInFlight, f.handleTCP) + s.SetTransportProtocolHandler(tcp.ProtocolNumber, tcpForwarder.HandlePacket) + + udpForwarder := udp.NewForwarder(s, f.handleUDP) + s.SetTransportProtocolHandler(udp.ProtocolNumber, udpForwarder.HandlePacket) + + s.SetTransportProtocolHandler(icmp.ProtocolNumber4, f.handleICMP) + + log.Debugf("forwarder: Initialization complete with NIC %d", nicID) + return f, nil +} + +func (f *Forwarder) InjectIncomingPacket(payload []byte) error { + if len(payload) < header.IPv4MinimumSize { + return fmt.Errorf("packet too small: %d bytes", len(payload)) + } + + pkt := stack.NewPacketBuffer(stack.PacketBufferOptions{ + Payload: buffer.MakeWithData(payload), + }) + defer pkt.DecRef() + + if f.endpoint.dispatcher != nil { + f.endpoint.dispatcher.DeliverNetworkPacket(ipv4.ProtocolNumber, pkt) + } + return nil +} + +// Stop gracefully shuts down the forwarder +func (f *Forwarder) Stop() { + f.cancel() + + if f.udpForwarder != nil { + f.udpForwarder.Stop() + } + + f.stack.Close() + f.stack.Wait() +} + +func (f *Forwarder) determineDialAddr(addr tcpip.Address) net.IP { + if f.netstack && f.ip.Equal(addr.AsSlice()) { + return net.IPv4(127, 0, 0, 1) + } + return addr.AsSlice() +} diff --git a/client/firewall/uspfilter/forwarder/icmp.go b/client/firewall/uspfilter/forwarder/icmp.go new file mode 100644 index 000000000..14cdc37be --- /dev/null +++ b/client/firewall/uspfilter/forwarder/icmp.go @@ -0,0 +1,109 @@ +package forwarder + +import ( + "context" + "net" + "time" + + "gvisor.dev/gvisor/pkg/tcpip/header" + "gvisor.dev/gvisor/pkg/tcpip/stack" +) + +// handleICMP handles ICMP packets from the network stack +func (f *Forwarder) handleICMP(id stack.TransportEndpointID, pkt stack.PacketBufferPtr) bool { + ctx, cancel := context.WithTimeout(f.ctx, 5*time.Second) + defer cancel() + + lc := net.ListenConfig{} + // TODO: support non-root + conn, err := lc.ListenPacket(ctx, "ip4:icmp", "0.0.0.0") + if err != nil { + f.logger.Error("Failed to create ICMP socket for %v: %v", id, err) + + // This will make netstack reply on behalf of the original destination, that's ok for now + return false + } + defer func() { + if err := conn.Close(); err != nil { + f.logger.Debug("Failed to close ICMP socket: %v", err) + } + }() + + dstIP := f.determineDialAddr(id.LocalAddress) + dst := &net.IPAddr{IP: dstIP} + + // Get the complete ICMP message (header + data) + fullPacket := stack.PayloadSince(pkt.TransportHeader()) + payload := fullPacket.AsSlice() + + icmpHdr := header.ICMPv4(pkt.TransportHeader().View().AsSlice()) + + // For Echo Requests, send and handle response + switch icmpHdr.Type() { + case header.ICMPv4Echo: + return f.handleEchoResponse(icmpHdr, payload, dst, conn, id) + case header.ICMPv4EchoReply: + // dont process our own replies + return true + default: + } + + // For other ICMP types (Time Exceeded, Destination Unreachable, etc) + _, err = conn.WriteTo(payload, dst) + if err != nil { + f.logger.Error("Failed to write ICMP packet for %v: %v", id, err) + return true + } + + f.logger.Trace("Forwarded ICMP packet %v type=%v code=%v", + id, icmpHdr.Type(), icmpHdr.Code()) + + return true +} + +func (f *Forwarder) handleEchoResponse(icmpHdr header.ICMPv4, payload []byte, dst *net.IPAddr, conn net.PacketConn, id stack.TransportEndpointID) bool { + if _, err := conn.WriteTo(payload, dst); err != nil { + f.logger.Error("Failed to write ICMP packet for %v: %v", id, err) + return true + } + + f.logger.Trace("Forwarded ICMP packet %v type=%v code=%v", + id, icmpHdr.Type(), icmpHdr.Code()) + + if err := conn.SetReadDeadline(time.Now().Add(5 * time.Second)); err != nil { + f.logger.Error("Failed to set read deadline for ICMP response: %v", err) + return true + } + + response := make([]byte, f.endpoint.mtu) + n, _, err := conn.ReadFrom(response) + if err != nil { + if !isTimeout(err) { + f.logger.Error("Failed to read ICMP response: %v", err) + } + return true + } + + ipHdr := make([]byte, header.IPv4MinimumSize) + ip := header.IPv4(ipHdr) + ip.Encode(&header.IPv4Fields{ + TotalLength: uint16(header.IPv4MinimumSize + n), + TTL: 64, + Protocol: uint8(header.ICMPv4ProtocolNumber), + SrcAddr: id.LocalAddress, + DstAddr: id.RemoteAddress, + }) + ip.SetChecksum(^ip.CalculateChecksum()) + + fullPacket := make([]byte, 0, len(ipHdr)+n) + fullPacket = append(fullPacket, ipHdr...) + fullPacket = append(fullPacket, response[:n]...) + + if err := f.InjectIncomingPacket(fullPacket); err != nil { + f.logger.Error("Failed to inject ICMP response: %v", err) + return true + } + + f.logger.Trace("Forwarded ICMP echo reply for %v", id) + return true +} diff --git a/client/firewall/uspfilter/forwarder/tcp.go b/client/firewall/uspfilter/forwarder/tcp.go new file mode 100644 index 000000000..6d7cf3b6a --- /dev/null +++ b/client/firewall/uspfilter/forwarder/tcp.go @@ -0,0 +1,90 @@ +package forwarder + +import ( + "context" + "fmt" + "io" + "net" + + "gvisor.dev/gvisor/pkg/tcpip" + "gvisor.dev/gvisor/pkg/tcpip/adapters/gonet" + "gvisor.dev/gvisor/pkg/tcpip/stack" + "gvisor.dev/gvisor/pkg/tcpip/transport/tcp" + "gvisor.dev/gvisor/pkg/waiter" +) + +// handleTCP is called by the TCP forwarder for new connections. +func (f *Forwarder) handleTCP(r *tcp.ForwarderRequest) { + id := r.ID() + + dialAddr := fmt.Sprintf("%s:%d", f.determineDialAddr(id.LocalAddress), id.LocalPort) + + outConn, err := (&net.Dialer{}).DialContext(f.ctx, "tcp", dialAddr) + if err != nil { + r.Complete(true) + f.logger.Trace("forwarder: dial error for %v: %v", id, err) + return + } + + // Create wait queue for blocking syscalls + wq := waiter.Queue{} + + ep, epErr := r.CreateEndpoint(&wq) + if epErr != nil { + f.logger.Error("forwarder: failed to create TCP endpoint: %v", epErr) + if err := outConn.Close(); err != nil { + f.logger.Debug("forwarder: outConn close error: %v", err) + } + r.Complete(true) + return + } + + // Complete the handshake + r.Complete(false) + + inConn := gonet.NewTCPConn(&wq, ep) + + f.logger.Trace("forwarder: established TCP connection %v", id) + + go f.proxyTCP(id, inConn, outConn, ep) +} + +func (f *Forwarder) proxyTCP(id stack.TransportEndpointID, inConn *gonet.TCPConn, outConn net.Conn, ep tcpip.Endpoint) { + defer func() { + if err := inConn.Close(); err != nil { + f.logger.Debug("forwarder: inConn close error: %v", err) + } + if err := outConn.Close(); err != nil { + f.logger.Debug("forwarder: outConn close error: %v", err) + } + ep.Close() + }() + + // Create context for managing the proxy goroutines + ctx, cancel := context.WithCancel(f.ctx) + defer cancel() + + errChan := make(chan error, 2) + + go func() { + _, err := io.Copy(outConn, inConn) + errChan <- err + }() + + go func() { + _, err := io.Copy(inConn, outConn) + errChan <- err + }() + + select { + case <-ctx.Done(): + f.logger.Trace("forwarder: tearing down TCP connection %v due to context done", id) + return + case err := <-errChan: + if err != nil && !isClosedError(err) { + f.logger.Error("proxyTCP: copy error: %v", err) + } + f.logger.Trace("forwarder: tearing down TCP connection %v", id) + return + } +} diff --git a/client/firewall/uspfilter/forwarder/udp.go b/client/firewall/uspfilter/forwarder/udp.go new file mode 100644 index 000000000..97e4662fd --- /dev/null +++ b/client/firewall/uspfilter/forwarder/udp.go @@ -0,0 +1,288 @@ +package forwarder + +import ( + "context" + "errors" + "fmt" + "net" + "sync" + "sync/atomic" + "time" + + "gvisor.dev/gvisor/pkg/tcpip" + "gvisor.dev/gvisor/pkg/tcpip/adapters/gonet" + "gvisor.dev/gvisor/pkg/tcpip/stack" + "gvisor.dev/gvisor/pkg/tcpip/transport/udp" + "gvisor.dev/gvisor/pkg/waiter" + + nblog "github.com/netbirdio/netbird/client/firewall/uspfilter/log" +) + +const ( + udpTimeout = 30 * time.Second +) + +type udpPacketConn struct { + conn *gonet.UDPConn + outConn net.Conn + lastSeen atomic.Int64 + cancel context.CancelFunc + ep tcpip.Endpoint +} + +type udpForwarder struct { + sync.RWMutex + logger *nblog.Logger + conns map[stack.TransportEndpointID]*udpPacketConn + bufPool sync.Pool + ctx context.Context + cancel context.CancelFunc +} + +type idleConn struct { + id stack.TransportEndpointID + conn *udpPacketConn +} + +func newUDPForwarder(mtu int, logger *nblog.Logger) *udpForwarder { + ctx, cancel := context.WithCancel(context.Background()) + f := &udpForwarder{ + logger: logger, + conns: make(map[stack.TransportEndpointID]*udpPacketConn), + ctx: ctx, + cancel: cancel, + bufPool: sync.Pool{ + New: func() any { + b := make([]byte, mtu) + return &b + }, + }, + } + go f.cleanup() + return f +} + +// Stop stops the UDP forwarder and all active connections +func (f *udpForwarder) Stop() { + f.cancel() + + f.Lock() + defer f.Unlock() + + for id, conn := range f.conns { + conn.cancel() + if err := conn.conn.Close(); err != nil { + f.logger.Debug("forwarder: UDP conn close error for %v: %v", id, err) + } + if err := conn.outConn.Close(); err != nil { + f.logger.Debug("forwarder: UDP outConn close error for %v: %v", id, err) + } + + conn.ep.Close() + delete(f.conns, id) + } +} + +// cleanup periodically removes idle UDP connections +func (f *udpForwarder) cleanup() { + ticker := time.NewTicker(time.Minute) + defer ticker.Stop() + + for { + select { + case <-f.ctx.Done(): + return + case <-ticker.C: + var idleConns []idleConn + + f.RLock() + for id, conn := range f.conns { + if conn.getIdleDuration() > udpTimeout { + idleConns = append(idleConns, idleConn{id, conn}) + } + } + f.RUnlock() + + for _, idle := range idleConns { + idle.conn.cancel() + if err := idle.conn.conn.Close(); err != nil { + f.logger.Debug("forwarder: UDP conn close error for %v: %v", idle.id, err) + } + if err := idle.conn.outConn.Close(); err != nil { + f.logger.Debug("forwarder: UDP outConn close error for %v: %v", idle.id, err) + } + + idle.conn.ep.Close() + + f.Lock() + delete(f.conns, idle.id) + f.Unlock() + + f.logger.Trace("forwarder: cleaned up idle UDP connection %v", idle.id) + } + } + } +} + +// handleUDP is called by the UDP forwarder for new packets +func (f *Forwarder) handleUDP(r *udp.ForwarderRequest) { + if f.ctx.Err() != nil { + f.logger.Trace("forwarder: context done, dropping UDP packet") + return + } + + id := r.ID() + + f.udpForwarder.RLock() + _, exists := f.udpForwarder.conns[id] + f.udpForwarder.RUnlock() + if exists { + f.logger.Trace("forwarder: existing UDP connection for %v", id) + return + } + + dstAddr := fmt.Sprintf("%s:%d", f.determineDialAddr(id.LocalAddress), id.LocalPort) + outConn, err := (&net.Dialer{}).DialContext(f.ctx, "udp", dstAddr) + if err != nil { + f.logger.Debug("forwarder: UDP dial error for %v: %v", id, err) + // TODO: Send ICMP error message + return + } + + // Create wait queue for blocking syscalls + wq := waiter.Queue{} + ep, epErr := r.CreateEndpoint(&wq) + if epErr != nil { + f.logger.Debug("forwarder: failed to create UDP endpoint: %v", epErr) + if err := outConn.Close(); err != nil { + f.logger.Debug("forwarder: UDP outConn close error for %v: %v", id, err) + } + return + } + + inConn := gonet.NewUDPConn(f.stack, &wq, ep) + connCtx, connCancel := context.WithCancel(f.ctx) + + pConn := &udpPacketConn{ + conn: inConn, + outConn: outConn, + cancel: connCancel, + ep: ep, + } + pConn.updateLastSeen() + + f.udpForwarder.Lock() + // Double-check no connection was created while we were setting up + if _, exists := f.udpForwarder.conns[id]; exists { + f.udpForwarder.Unlock() + pConn.cancel() + if err := inConn.Close(); err != nil { + f.logger.Debug("forwarder: UDP inConn close error for %v: %v", id, err) + } + if err := outConn.Close(); err != nil { + f.logger.Debug("forwarder: UDP outConn close error for %v: %v", id, err) + } + return + } + f.udpForwarder.conns[id] = pConn + f.udpForwarder.Unlock() + + f.logger.Trace("forwarder: established UDP connection to %v", id) + go f.proxyUDP(connCtx, pConn, id, ep) +} + +func (f *Forwarder) proxyUDP(ctx context.Context, pConn *udpPacketConn, id stack.TransportEndpointID, ep tcpip.Endpoint) { + defer func() { + pConn.cancel() + if err := pConn.conn.Close(); err != nil { + f.logger.Debug("forwarder: UDP inConn close error for %v: %v", id, err) + } + if err := pConn.outConn.Close(); err != nil { + f.logger.Debug("forwarder: UDP outConn close error for %v: %v", id, err) + } + + ep.Close() + + f.udpForwarder.Lock() + delete(f.udpForwarder.conns, id) + f.udpForwarder.Unlock() + }() + + errChan := make(chan error, 2) + + go func() { + errChan <- pConn.copy(ctx, pConn.conn, pConn.outConn, &f.udpForwarder.bufPool, "outbound->inbound") + }() + + go func() { + errChan <- pConn.copy(ctx, pConn.outConn, pConn.conn, &f.udpForwarder.bufPool, "inbound->outbound") + }() + + select { + case <-ctx.Done(): + f.logger.Trace("forwarder: tearing down UDP connection %v due to context done", id) + return + case err := <-errChan: + if err != nil && !isClosedError(err) { + f.logger.Error("proxyUDP: copy error: %v", err) + } + f.logger.Trace("forwarder: tearing down UDP connection %v", id) + return + } +} + +func (c *udpPacketConn) updateLastSeen() { + c.lastSeen.Store(time.Now().UnixNano()) +} + +func (c *udpPacketConn) getIdleDuration() time.Duration { + lastSeen := time.Unix(0, c.lastSeen.Load()) + return time.Since(lastSeen) +} + +func (c *udpPacketConn) copy(ctx context.Context, dst net.Conn, src net.Conn, bufPool *sync.Pool, direction string) error { + bufp := bufPool.Get().(*[]byte) + defer bufPool.Put(bufp) + buffer := *bufp + + if err := src.SetReadDeadline(time.Now().Add(udpTimeout)); err != nil { + return fmt.Errorf("set read deadline: %w", err) + } + if err := src.SetWriteDeadline(time.Now().Add(udpTimeout)); err != nil { + return fmt.Errorf("set write deadline: %w", err) + } + + for { + select { + case <-ctx.Done(): + return ctx.Err() + default: + n, err := src.Read(buffer) + if err != nil { + if isTimeout(err) { + continue + } + return fmt.Errorf("read from %s: %w", direction, err) + } + + _, err = dst.Write(buffer[:n]) + if err != nil { + return fmt.Errorf("write to %s: %w", direction, err) + } + + c.updateLastSeen() + } + } +} + +func isClosedError(err error) bool { + return errors.Is(err, net.ErrClosed) || errors.Is(err, context.Canceled) +} + +func isTimeout(err error) bool { + var netErr net.Error + if errors.As(err, &netErr) { + return netErr.Timeout() + } + return false +} diff --git a/client/firewall/uspfilter/localip.go b/client/firewall/uspfilter/localip.go new file mode 100644 index 000000000..7664b65d5 --- /dev/null +++ b/client/firewall/uspfilter/localip.go @@ -0,0 +1,134 @@ +package uspfilter + +import ( + "fmt" + "net" + "sync" + + log "github.com/sirupsen/logrus" + + "github.com/netbirdio/netbird/client/firewall/uspfilter/common" +) + +type localIPManager struct { + mu sync.RWMutex + + // Use bitmap for IPv4 (32 bits * 2^16 = 256KB memory) + ipv4Bitmap [1 << 16]uint32 +} + +func newLocalIPManager() *localIPManager { + return &localIPManager{} +} + +func (m *localIPManager) setBitmapBit(ip net.IP) { + ipv4 := ip.To4() + if ipv4 == nil { + return + } + high := (uint16(ipv4[0]) << 8) | uint16(ipv4[1]) + low := (uint16(ipv4[2]) << 8) | uint16(ipv4[3]) + m.ipv4Bitmap[high] |= 1 << (low % 32) +} + +func (m *localIPManager) checkBitmapBit(ip net.IP) bool { + ipv4 := ip.To4() + if ipv4 == nil { + return false + } + high := (uint16(ipv4[0]) << 8) | uint16(ipv4[1]) + low := (uint16(ipv4[2]) << 8) | uint16(ipv4[3]) + return (m.ipv4Bitmap[high] & (1 << (low % 32))) != 0 +} + +func (m *localIPManager) processIP(ip net.IP, newIPv4Bitmap *[1 << 16]uint32, ipv4Set map[string]struct{}, ipv4Addresses *[]string) error { + if ipv4 := ip.To4(); ipv4 != nil { + high := (uint16(ipv4[0]) << 8) | uint16(ipv4[1]) + low := (uint16(ipv4[2]) << 8) | uint16(ipv4[3]) + if int(high) >= len(*newIPv4Bitmap) { + return fmt.Errorf("invalid IPv4 address: %s", ip) + } + ipStr := ip.String() + if _, exists := ipv4Set[ipStr]; !exists { + ipv4Set[ipStr] = struct{}{} + *ipv4Addresses = append(*ipv4Addresses, ipStr) + newIPv4Bitmap[high] |= 1 << (low % 32) + } + } + return nil +} + +func (m *localIPManager) processInterface(iface net.Interface, newIPv4Bitmap *[1 << 16]uint32, ipv4Set map[string]struct{}, ipv4Addresses *[]string) { + addrs, err := iface.Addrs() + if err != nil { + log.Debugf("get addresses for interface %s failed: %v", iface.Name, err) + return + } + + for _, addr := range addrs { + var ip net.IP + switch v := addr.(type) { + case *net.IPNet: + ip = v.IP + case *net.IPAddr: + ip = v.IP + default: + continue + } + + if err := m.processIP(ip, newIPv4Bitmap, ipv4Set, ipv4Addresses); err != nil { + log.Debugf("process IP failed: %v", err) + } + } +} + +func (m *localIPManager) UpdateLocalIPs(iface common.IFaceMapper) (err error) { + defer func() { + if r := recover(); r != nil { + err = fmt.Errorf("panic: %v", r) + } + }() + + var newIPv4Bitmap [1 << 16]uint32 + ipv4Set := make(map[string]struct{}) + var ipv4Addresses []string + + // 127.0.0.0/8 + high := uint16(127) << 8 + for i := uint16(0); i < 256; i++ { + newIPv4Bitmap[high|i] = 0xffffffff + } + + if iface != nil { + if err := m.processIP(iface.Address().IP, &newIPv4Bitmap, ipv4Set, &ipv4Addresses); err != nil { + return err + } + } + + interfaces, err := net.Interfaces() + if err != nil { + log.Warnf("failed to get interfaces: %v", err) + } else { + for _, intf := range interfaces { + m.processInterface(intf, &newIPv4Bitmap, ipv4Set, &ipv4Addresses) + } + } + + m.mu.Lock() + m.ipv4Bitmap = newIPv4Bitmap + m.mu.Unlock() + + log.Debugf("Local IPv4 addresses: %v", ipv4Addresses) + return nil +} + +func (m *localIPManager) IsLocalIP(ip net.IP) bool { + m.mu.RLock() + defer m.mu.RUnlock() + + if ipv4 := ip.To4(); ipv4 != nil { + return m.checkBitmapBit(ipv4) + } + + return false +} diff --git a/client/firewall/uspfilter/localip_test.go b/client/firewall/uspfilter/localip_test.go new file mode 100644 index 000000000..02f41bf4f --- /dev/null +++ b/client/firewall/uspfilter/localip_test.go @@ -0,0 +1,270 @@ +package uspfilter + +import ( + "net" + "testing" + + "github.com/stretchr/testify/require" + + "github.com/netbirdio/netbird/client/iface" +) + +func TestLocalIPManager(t *testing.T) { + tests := []struct { + name string + setupAddr iface.WGAddress + testIP net.IP + expected bool + }{ + { + name: "Localhost range", + setupAddr: iface.WGAddress{ + IP: net.ParseIP("192.168.1.1"), + Network: &net.IPNet{ + IP: net.ParseIP("192.168.1.0"), + Mask: net.CIDRMask(24, 32), + }, + }, + testIP: net.ParseIP("127.0.0.2"), + expected: true, + }, + { + name: "Localhost standard address", + setupAddr: iface.WGAddress{ + IP: net.ParseIP("192.168.1.1"), + Network: &net.IPNet{ + IP: net.ParseIP("192.168.1.0"), + Mask: net.CIDRMask(24, 32), + }, + }, + testIP: net.ParseIP("127.0.0.1"), + expected: true, + }, + { + name: "Localhost range edge", + setupAddr: iface.WGAddress{ + IP: net.ParseIP("192.168.1.1"), + Network: &net.IPNet{ + IP: net.ParseIP("192.168.1.0"), + Mask: net.CIDRMask(24, 32), + }, + }, + testIP: net.ParseIP("127.255.255.255"), + expected: true, + }, + { + name: "Local IP matches", + setupAddr: iface.WGAddress{ + IP: net.ParseIP("192.168.1.1"), + Network: &net.IPNet{ + IP: net.ParseIP("192.168.1.0"), + Mask: net.CIDRMask(24, 32), + }, + }, + testIP: net.ParseIP("192.168.1.1"), + expected: true, + }, + { + name: "Local IP doesn't match", + setupAddr: iface.WGAddress{ + IP: net.ParseIP("192.168.1.1"), + Network: &net.IPNet{ + IP: net.ParseIP("192.168.1.0"), + Mask: net.CIDRMask(24, 32), + }, + }, + testIP: net.ParseIP("192.168.1.2"), + expected: false, + }, + { + name: "IPv6 address", + setupAddr: iface.WGAddress{ + IP: net.ParseIP("fe80::1"), + Network: &net.IPNet{ + IP: net.ParseIP("fe80::"), + Mask: net.CIDRMask(64, 128), + }, + }, + testIP: net.ParseIP("fe80::1"), + expected: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + manager := newLocalIPManager() + + mock := &IFaceMock{ + AddressFunc: func() iface.WGAddress { + return tt.setupAddr + }, + } + + err := manager.UpdateLocalIPs(mock) + require.NoError(t, err) + + result := manager.IsLocalIP(tt.testIP) + require.Equal(t, tt.expected, result) + }) + } +} + +func TestLocalIPManager_AllInterfaces(t *testing.T) { + manager := newLocalIPManager() + mock := &IFaceMock{} + + // Get actual local interfaces + interfaces, err := net.Interfaces() + require.NoError(t, err) + + var tests []struct { + ip string + expected bool + } + + // Add all local interface IPs to test cases + for _, iface := range interfaces { + addrs, err := iface.Addrs() + require.NoError(t, err) + + for _, addr := range addrs { + var ip net.IP + switch v := addr.(type) { + case *net.IPNet: + ip = v.IP + case *net.IPAddr: + ip = v.IP + default: + continue + } + + if ip4 := ip.To4(); ip4 != nil { + tests = append(tests, struct { + ip string + expected bool + }{ + ip: ip4.String(), + expected: true, + }) + } + } + } + + // Add some external IPs as negative test cases + externalIPs := []string{ + "8.8.8.8", + "1.1.1.1", + "208.67.222.222", + } + for _, ip := range externalIPs { + tests = append(tests, struct { + ip string + expected bool + }{ + ip: ip, + expected: false, + }) + } + + require.NotEmpty(t, tests, "No test cases generated") + + err = manager.UpdateLocalIPs(mock) + require.NoError(t, err) + + t.Logf("Testing %d IPs", len(tests)) + for _, tt := range tests { + t.Run(tt.ip, func(t *testing.T) { + result := manager.IsLocalIP(net.ParseIP(tt.ip)) + require.Equal(t, tt.expected, result, "IP: %s", tt.ip) + }) + } +} + +// MapImplementation is a version using map[string]struct{} +type MapImplementation struct { + localIPs map[string]struct{} +} + +func BenchmarkIPChecks(b *testing.B) { + interfaces := make([]net.IP, 16) + for i := range interfaces { + interfaces[i] = net.IPv4(10, 0, byte(i>>8), byte(i)) + } + + // Setup bitmap version + bitmapManager := &localIPManager{ + ipv4Bitmap: [1 << 16]uint32{}, + } + for _, ip := range interfaces[:8] { // Add half of IPs + bitmapManager.setBitmapBit(ip) + } + + // Setup map version + mapManager := &MapImplementation{ + localIPs: make(map[string]struct{}), + } + for _, ip := range interfaces[:8] { + mapManager.localIPs[ip.String()] = struct{}{} + } + + b.Run("Bitmap_Hit", func(b *testing.B) { + ip := interfaces[4] + b.ResetTimer() + for i := 0; i < b.N; i++ { + bitmapManager.checkBitmapBit(ip) + } + }) + + b.Run("Bitmap_Miss", func(b *testing.B) { + ip := interfaces[12] + b.ResetTimer() + for i := 0; i < b.N; i++ { + bitmapManager.checkBitmapBit(ip) + } + }) + + b.Run("Map_Hit", func(b *testing.B) { + ip := interfaces[4] + b.ResetTimer() + for i := 0; i < b.N; i++ { + // nolint:gosimple + _, _ = mapManager.localIPs[ip.String()] + } + }) + + b.Run("Map_Miss", func(b *testing.B) { + ip := interfaces[12] + b.ResetTimer() + for i := 0; i < b.N; i++ { + // nolint:gosimple + _, _ = mapManager.localIPs[ip.String()] + } + }) +} + +func BenchmarkWGPosition(b *testing.B) { + wgIP := net.ParseIP("10.10.0.1") + + // Create two managers - one checks WG IP first, other checks it last + b.Run("WG_First", func(b *testing.B) { + bm := &localIPManager{ipv4Bitmap: [1 << 16]uint32{}} + bm.setBitmapBit(wgIP) + b.ResetTimer() + for i := 0; i < b.N; i++ { + bm.checkBitmapBit(wgIP) + } + }) + + b.Run("WG_Last", func(b *testing.B) { + bm := &localIPManager{ipv4Bitmap: [1 << 16]uint32{}} + // Fill with other IPs first + for i := 0; i < 15; i++ { + bm.setBitmapBit(net.IPv4(10, 0, byte(i>>8), byte(i))) + } + bm.setBitmapBit(wgIP) // Add WG IP last + b.ResetTimer() + for i := 0; i < b.N; i++ { + bm.checkBitmapBit(wgIP) + } + }) +} diff --git a/client/firewall/uspfilter/log/log.go b/client/firewall/uspfilter/log/log.go new file mode 100644 index 000000000..984b6ad08 --- /dev/null +++ b/client/firewall/uspfilter/log/log.go @@ -0,0 +1,196 @@ +// Package logger provides a high-performance, non-blocking logger for userspace networking +package log + +import ( + "context" + "fmt" + "io" + "sync" + "sync/atomic" + "time" + + log "github.com/sirupsen/logrus" +) + +const ( + maxBatchSize = 1024 * 16 // 16KB max batch size + maxMessageSize = 1024 * 2 // 2KB per message + bufferSize = 1024 * 256 // 256KB ring buffer + defaultFlushInterval = 2 * time.Second +) + +// Level represents log severity +type Level uint32 + +const ( + LevelPanic Level = iota + LevelFatal + LevelError + LevelWarn + LevelInfo + LevelDebug + LevelTrace +) + +var levelStrings = map[Level]string{ + LevelPanic: "PANC", + LevelFatal: "FATL", + LevelError: "ERRO", + LevelWarn: "WARN", + LevelInfo: "INFO", + LevelDebug: "DEBG", + LevelTrace: "TRAC", +} + +// Logger is a high-performance, non-blocking logger +type Logger struct { + output io.Writer + level atomic.Uint32 + buffer *ringBuffer + shutdown chan struct{} + closeOnce sync.Once + wg sync.WaitGroup + + // Reusable buffer pool for formatting messages + bufPool sync.Pool +} + +func NewFromLogrus(logrusLogger *log.Logger) *Logger { + l := &Logger{ + output: logrusLogger.Out, + buffer: newRingBuffer(bufferSize), + shutdown: make(chan struct{}), + bufPool: sync.Pool{ + New: func() interface{} { + // Pre-allocate buffer for message formatting + b := make([]byte, 0, maxMessageSize) + return &b + }, + }, + } + logrusLevel := logrusLogger.GetLevel() + l.level.Store(uint32(logrusLevel)) + level := levelStrings[Level(logrusLevel)] + log.Debugf("New uspfilter logger created with loglevel %v", level) + + l.wg.Add(1) + go l.worker() + + return l +} + +func (l *Logger) SetLevel(level Level) { + l.level.Store(uint32(level)) + + log.Debugf("Set uspfilter logger loglevel to %v", levelStrings[level]) +} + +func (l *Logger) formatMessage(buf *[]byte, level Level, format string, args ...interface{}) { + *buf = (*buf)[:0] + + // Timestamp + *buf = time.Now().AppendFormat(*buf, "2006-01-02T15:04:05-07:00") + *buf = append(*buf, ' ') + + // Level + *buf = append(*buf, levelStrings[level]...) + *buf = append(*buf, ' ') + + // Message + if len(args) > 0 { + *buf = append(*buf, fmt.Sprintf(format, args...)...) + } else { + *buf = append(*buf, format...) + } + + *buf = append(*buf, '\n') +} + +func (l *Logger) log(level Level, format string, args ...interface{}) { + bufp := l.bufPool.Get().(*[]byte) + l.formatMessage(bufp, level, format, args...) + + if len(*bufp) > maxMessageSize { + *bufp = (*bufp)[:maxMessageSize] + } + _, _ = l.buffer.Write(*bufp) + + l.bufPool.Put(bufp) +} + +func (l *Logger) Error(format string, args ...interface{}) { + if l.level.Load() >= uint32(LevelError) { + l.log(LevelError, format, args...) + } +} + +func (l *Logger) Warn(format string, args ...interface{}) { + if l.level.Load() >= uint32(LevelWarn) { + l.log(LevelWarn, format, args...) + } +} + +func (l *Logger) Info(format string, args ...interface{}) { + if l.level.Load() >= uint32(LevelInfo) { + l.log(LevelInfo, format, args...) + } +} + +func (l *Logger) Debug(format string, args ...interface{}) { + if l.level.Load() >= uint32(LevelDebug) { + l.log(LevelDebug, format, args...) + } +} + +func (l *Logger) Trace(format string, args ...interface{}) { + if l.level.Load() >= uint32(LevelTrace) { + l.log(LevelTrace, format, args...) + } +} + +// worker periodically flushes the buffer +func (l *Logger) worker() { + defer l.wg.Done() + + ticker := time.NewTicker(defaultFlushInterval) + defer ticker.Stop() + + buf := make([]byte, 0, maxBatchSize) + + for { + select { + case <-l.shutdown: + return + case <-ticker.C: + // Read accumulated messages + n, _ := l.buffer.Read(buf[:cap(buf)]) + if n == 0 { + continue + } + + // Write batch + _, _ = l.output.Write(buf[:n]) + } + } +} + +// Stop gracefully shuts down the logger +func (l *Logger) Stop(ctx context.Context) error { + done := make(chan struct{}) + + l.closeOnce.Do(func() { + close(l.shutdown) + }) + + go func() { + l.wg.Wait() + close(done) + }() + + select { + case <-ctx.Done(): + return ctx.Err() + case <-done: + return nil + } +} diff --git a/client/firewall/uspfilter/log/ringbuffer.go b/client/firewall/uspfilter/log/ringbuffer.go new file mode 100644 index 000000000..dbc8f1289 --- /dev/null +++ b/client/firewall/uspfilter/log/ringbuffer.go @@ -0,0 +1,85 @@ +package log + +import "sync" + +// ringBuffer is a simple ring buffer implementation +type ringBuffer struct { + buf []byte + size int + r, w int64 // Read and write positions + mu sync.Mutex +} + +func newRingBuffer(size int) *ringBuffer { + return &ringBuffer{ + buf: make([]byte, size), + size: size, + } +} + +func (r *ringBuffer) Write(p []byte) (n int, err error) { + if len(p) == 0 { + return 0, nil + } + + r.mu.Lock() + defer r.mu.Unlock() + + if len(p) > r.size { + p = p[:r.size] + } + + n = len(p) + + // Write data, handling wrap-around + pos := int(r.w % int64(r.size)) + writeLen := min(len(p), r.size-pos) + copy(r.buf[pos:], p[:writeLen]) + + // If we have more data and need to wrap around + if writeLen < len(p) { + copy(r.buf, p[writeLen:]) + } + + // Update write position + r.w += int64(n) + + return n, nil +} + +func (r *ringBuffer) Read(p []byte) (n int, err error) { + r.mu.Lock() + defer r.mu.Unlock() + + if r.w == r.r { + return 0, nil + } + + // Calculate available data accounting for wraparound + available := int(r.w - r.r) + if available < 0 { + available += r.size + } + available = min(available, r.size) + + // Limit read to buffer size + toRead := min(available, len(p)) + if toRead == 0 { + return 0, nil + } + + // Read data, handling wrap-around + pos := int(r.r % int64(r.size)) + readLen := min(toRead, r.size-pos) + n = copy(p, r.buf[pos:pos+readLen]) + + // If we need more data and need to wrap around + if readLen < toRead { + n += copy(p[readLen:toRead], r.buf[:toRead-readLen]) + } + + // Update read position + r.r += int64(n) + + return n, nil +} diff --git a/client/firewall/uspfilter/rule.go b/client/firewall/uspfilter/rule.go index aa346dea6..100c35c0a 100644 --- a/client/firewall/uspfilter/rule.go +++ b/client/firewall/uspfilter/rule.go @@ -2,14 +2,15 @@ package uspfilter import ( "net" + "net/netip" "github.com/google/gopacket" firewall "github.com/netbirdio/netbird/client/firewall/manager" ) -// Rule to handle management of rules -type Rule struct { +// PeerRule to handle management of rules +type PeerRule struct { id string ip net.IP ipLayer gopacket.LayerType @@ -23,7 +24,22 @@ type Rule struct { udpHook func([]byte) bool } -// GetRuleID returns the rule id -func (r *Rule) ID() string { +// ID returns the rule id +func (r *PeerRule) ID() string { + return r.id +} + +type RouteRule struct { + id string + sources []netip.Prefix + destination netip.Prefix + proto firewall.Protocol + srcPort *firewall.Port + dstPort *firewall.Port + action firewall.Action +} + +// ID returns the rule id +func (r *RouteRule) ID() string { return r.id } diff --git a/client/firewall/uspfilter/tracer.go b/client/firewall/uspfilter/tracer.go new file mode 100644 index 000000000..a4c653b3b --- /dev/null +++ b/client/firewall/uspfilter/tracer.go @@ -0,0 +1,390 @@ +package uspfilter + +import ( + "fmt" + "net" + "time" + + "github.com/google/gopacket" + "github.com/google/gopacket/layers" + + fw "github.com/netbirdio/netbird/client/firewall/manager" + "github.com/netbirdio/netbird/client/firewall/uspfilter/conntrack" +) + +type PacketStage int + +const ( + StageReceived PacketStage = iota + StageConntrack + StagePeerACL + StageRouting + StageRouteACL + StageForwarding + StageCompleted +) + +const msgProcessingCompleted = "Processing completed" + +func (s PacketStage) String() string { + return map[PacketStage]string{ + StageReceived: "Received", + StageConntrack: "Connection Tracking", + StagePeerACL: "Peer ACL", + StageRouting: "Routing", + StageRouteACL: "Route ACL", + StageForwarding: "Forwarding", + StageCompleted: "Completed", + }[s] +} + +type ForwarderAction struct { + Action string + RemoteAddr string + Error error +} + +type TraceResult struct { + Timestamp time.Time + Stage PacketStage + Message string + Allowed bool + ForwarderAction *ForwarderAction +} + +type PacketTrace struct { + SourceIP net.IP + DestinationIP net.IP + Protocol string + SourcePort uint16 + DestinationPort uint16 + Direction fw.RuleDirection + Results []TraceResult +} + +type TCPState struct { + SYN bool + ACK bool + FIN bool + RST bool + PSH bool + URG bool +} + +type PacketBuilder struct { + SrcIP net.IP + DstIP net.IP + Protocol fw.Protocol + SrcPort uint16 + DstPort uint16 + ICMPType uint8 + ICMPCode uint8 + Direction fw.RuleDirection + PayloadSize int + TCPState *TCPState +} + +func (t *PacketTrace) AddResult(stage PacketStage, message string, allowed bool) { + t.Results = append(t.Results, TraceResult{ + Timestamp: time.Now(), + Stage: stage, + Message: message, + Allowed: allowed, + }) +} + +func (t *PacketTrace) AddResultWithForwarder(stage PacketStage, message string, allowed bool, action *ForwarderAction) { + t.Results = append(t.Results, TraceResult{ + Timestamp: time.Now(), + Stage: stage, + Message: message, + Allowed: allowed, + ForwarderAction: action, + }) +} + +func (p *PacketBuilder) Build() ([]byte, error) { + ip := p.buildIPLayer() + pktLayers := []gopacket.SerializableLayer{ip} + + transportLayer, err := p.buildTransportLayer(ip) + if err != nil { + return nil, err + } + pktLayers = append(pktLayers, transportLayer...) + + if p.PayloadSize > 0 { + payload := make([]byte, p.PayloadSize) + pktLayers = append(pktLayers, gopacket.Payload(payload)) + } + + return serializePacket(pktLayers) +} + +func (p *PacketBuilder) buildIPLayer() *layers.IPv4 { + return &layers.IPv4{ + Version: 4, + TTL: 64, + Protocol: layers.IPProtocol(getIPProtocolNumber(p.Protocol)), + SrcIP: p.SrcIP, + DstIP: p.DstIP, + } +} + +func (p *PacketBuilder) buildTransportLayer(ip *layers.IPv4) ([]gopacket.SerializableLayer, error) { + switch p.Protocol { + case "tcp": + return p.buildTCPLayer(ip) + case "udp": + return p.buildUDPLayer(ip) + case "icmp": + return p.buildICMPLayer() + default: + return nil, fmt.Errorf("unsupported protocol: %s", p.Protocol) + } +} + +func (p *PacketBuilder) buildTCPLayer(ip *layers.IPv4) ([]gopacket.SerializableLayer, error) { + tcp := &layers.TCP{ + SrcPort: layers.TCPPort(p.SrcPort), + DstPort: layers.TCPPort(p.DstPort), + Window: 65535, + SYN: p.TCPState != nil && p.TCPState.SYN, + ACK: p.TCPState != nil && p.TCPState.ACK, + FIN: p.TCPState != nil && p.TCPState.FIN, + RST: p.TCPState != nil && p.TCPState.RST, + PSH: p.TCPState != nil && p.TCPState.PSH, + URG: p.TCPState != nil && p.TCPState.URG, + } + if err := tcp.SetNetworkLayerForChecksum(ip); err != nil { + return nil, fmt.Errorf("set network layer for TCP checksum: %w", err) + } + return []gopacket.SerializableLayer{tcp}, nil +} + +func (p *PacketBuilder) buildUDPLayer(ip *layers.IPv4) ([]gopacket.SerializableLayer, error) { + udp := &layers.UDP{ + SrcPort: layers.UDPPort(p.SrcPort), + DstPort: layers.UDPPort(p.DstPort), + } + if err := udp.SetNetworkLayerForChecksum(ip); err != nil { + return nil, fmt.Errorf("set network layer for UDP checksum: %w", err) + } + return []gopacket.SerializableLayer{udp}, nil +} + +func (p *PacketBuilder) buildICMPLayer() ([]gopacket.SerializableLayer, error) { + icmp := &layers.ICMPv4{ + TypeCode: layers.CreateICMPv4TypeCode(p.ICMPType, p.ICMPCode), + } + if p.ICMPType == layers.ICMPv4TypeEchoRequest || p.ICMPType == layers.ICMPv4TypeEchoReply { + icmp.Id = uint16(1) + icmp.Seq = uint16(1) + } + return []gopacket.SerializableLayer{icmp}, nil +} + +func serializePacket(layers []gopacket.SerializableLayer) ([]byte, error) { + buf := gopacket.NewSerializeBuffer() + opts := gopacket.SerializeOptions{ + ComputeChecksums: true, + FixLengths: true, + } + if err := gopacket.SerializeLayers(buf, opts, layers...); err != nil { + return nil, fmt.Errorf("serialize packet: %w", err) + } + return buf.Bytes(), nil +} + +func getIPProtocolNumber(protocol fw.Protocol) int { + switch protocol { + case fw.ProtocolTCP: + return int(layers.IPProtocolTCP) + case fw.ProtocolUDP: + return int(layers.IPProtocolUDP) + case fw.ProtocolICMP: + return int(layers.IPProtocolICMPv4) + default: + return 0 + } +} + +func (m *Manager) TracePacketFromBuilder(builder *PacketBuilder) (*PacketTrace, error) { + packetData, err := builder.Build() + if err != nil { + return nil, fmt.Errorf("build packet: %w", err) + } + + return m.TracePacket(packetData, builder.Direction), nil +} + +func (m *Manager) TracePacket(packetData []byte, direction fw.RuleDirection) *PacketTrace { + + d := m.decoders.Get().(*decoder) + defer m.decoders.Put(d) + + trace := &PacketTrace{Direction: direction} + + // Initial packet decoding + if err := d.parser.DecodeLayers(packetData, &d.decoded); err != nil { + trace.AddResult(StageReceived, fmt.Sprintf("Failed to decode packet: %v", err), false) + return trace + } + + // Extract base packet info + srcIP, dstIP := m.extractIPs(d) + trace.SourceIP = srcIP + trace.DestinationIP = dstIP + + // Determine protocol and ports + switch d.decoded[1] { + case layers.LayerTypeTCP: + trace.Protocol = "TCP" + trace.SourcePort = uint16(d.tcp.SrcPort) + trace.DestinationPort = uint16(d.tcp.DstPort) + case layers.LayerTypeUDP: + trace.Protocol = "UDP" + trace.SourcePort = uint16(d.udp.SrcPort) + trace.DestinationPort = uint16(d.udp.DstPort) + case layers.LayerTypeICMPv4: + trace.Protocol = "ICMP" + } + + trace.AddResult(StageReceived, fmt.Sprintf("Received %s packet: %s:%d -> %s:%d", + trace.Protocol, srcIP, trace.SourcePort, dstIP, trace.DestinationPort), true) + + if direction == fw.RuleDirectionOUT { + return m.traceOutbound(packetData, trace) + } + + return m.traceInbound(packetData, trace, d, srcIP, dstIP) +} + +func (m *Manager) traceInbound(packetData []byte, trace *PacketTrace, d *decoder, srcIP net.IP, dstIP net.IP) *PacketTrace { + if m.stateful && m.handleConntrackState(trace, d, srcIP, dstIP) { + return trace + } + + if m.handleLocalDelivery(trace, packetData, d, srcIP, dstIP) { + return trace + } + + if !m.handleRouting(trace) { + return trace + } + + if m.nativeRouter { + return m.handleNativeRouter(trace) + } + + return m.handleRouteACLs(trace, d, srcIP, dstIP) +} + +func (m *Manager) handleConntrackState(trace *PacketTrace, d *decoder, srcIP, dstIP net.IP) bool { + allowed := m.isValidTrackedConnection(d, srcIP, dstIP) + msg := "No existing connection found" + if allowed { + msg = m.buildConntrackStateMessage(d) + trace.AddResult(StageConntrack, msg, true) + trace.AddResult(StageCompleted, "Packet allowed by connection tracking", true) + return true + } + trace.AddResult(StageConntrack, msg, false) + return false +} + +func (m *Manager) buildConntrackStateMessage(d *decoder) string { + msg := "Matched existing connection state" + switch d.decoded[1] { + case layers.LayerTypeTCP: + flags := getTCPFlags(&d.tcp) + msg += fmt.Sprintf(" (TCP Flags: SYN=%v ACK=%v RST=%v FIN=%v)", + flags&conntrack.TCPSyn != 0, + flags&conntrack.TCPAck != 0, + flags&conntrack.TCPRst != 0, + flags&conntrack.TCPFin != 0) + case layers.LayerTypeICMPv4: + msg += fmt.Sprintf(" (ICMP ID=%d, Seq=%d)", d.icmp4.Id, d.icmp4.Seq) + } + return msg +} + +func (m *Manager) handleLocalDelivery(trace *PacketTrace, packetData []byte, d *decoder, srcIP, dstIP net.IP) bool { + if !m.localForwarding { + trace.AddResult(StageRouting, "Local forwarding disabled", false) + trace.AddResult(StageCompleted, "Packet dropped - local forwarding disabled", false) + return true + } + + trace.AddResult(StageRouting, "Packet destined for local delivery", true) + blocked := m.peerACLsBlock(srcIP, packetData, m.incomingRules, d) + + msg := "Allowed by peer ACL rules" + if blocked { + msg = "Blocked by peer ACL rules" + } + trace.AddResult(StagePeerACL, msg, !blocked) + + if m.netstack { + m.addForwardingResult(trace, "proxy-local", "127.0.0.1", !blocked) + } + + trace.AddResult(StageCompleted, msgProcessingCompleted, !blocked) + return true +} + +func (m *Manager) handleRouting(trace *PacketTrace) bool { + if !m.routingEnabled { + trace.AddResult(StageRouting, "Routing disabled", false) + trace.AddResult(StageCompleted, "Packet dropped - routing disabled", false) + return false + } + trace.AddResult(StageRouting, "Routing enabled, checking ACLs", true) + return true +} + +func (m *Manager) handleNativeRouter(trace *PacketTrace) *PacketTrace { + trace.AddResult(StageRouteACL, "Using native router, skipping ACL checks", true) + trace.AddResult(StageForwarding, "Forwarding via native router", true) + trace.AddResult(StageCompleted, msgProcessingCompleted, true) + return trace +} + +func (m *Manager) handleRouteACLs(trace *PacketTrace, d *decoder, srcIP, dstIP net.IP) *PacketTrace { + proto := getProtocolFromPacket(d) + srcPort, dstPort := getPortsFromPacket(d) + allowed := m.routeACLsPass(srcIP, dstIP, proto, srcPort, dstPort) + + msg := "Allowed by route ACLs" + if !allowed { + msg = "Blocked by route ACLs" + } + trace.AddResult(StageRouteACL, msg, allowed) + + if allowed && m.forwarder != nil { + m.addForwardingResult(trace, "proxy-remote", fmt.Sprintf("%s:%d", dstIP, dstPort), true) + } + + trace.AddResult(StageCompleted, msgProcessingCompleted, allowed) + return trace +} + +func (m *Manager) addForwardingResult(trace *PacketTrace, action, remoteAddr string, allowed bool) { + fwdAction := &ForwarderAction{ + Action: action, + RemoteAddr: remoteAddr, + } + trace.AddResultWithForwarder(StageForwarding, + fmt.Sprintf("Forwarding to %s", fwdAction.Action), allowed, fwdAction) +} + +func (m *Manager) traceOutbound(packetData []byte, trace *PacketTrace) *PacketTrace { + // will create or update the connection state + dropped := m.processOutgoingHooks(packetData) + if dropped { + trace.AddResult(StageCompleted, "Packet dropped by outgoing hook", false) + } else { + trace.AddResult(StageCompleted, "Packet allowed (outgoing)", true) + } + return trace +} diff --git a/client/firewall/uspfilter/uspfilter.go b/client/firewall/uspfilter/uspfilter.go index 049011f73..b3d189261 100644 --- a/client/firewall/uspfilter/uspfilter.go +++ b/client/firewall/uspfilter/uspfilter.go @@ -6,7 +6,9 @@ import ( "net" "net/netip" "os" + "slices" "strconv" + "strings" "sync" "github.com/google/gopacket" @@ -15,29 +17,53 @@ import ( log "github.com/sirupsen/logrus" firewall "github.com/netbirdio/netbird/client/firewall/manager" + "github.com/netbirdio/netbird/client/firewall/uspfilter/common" "github.com/netbirdio/netbird/client/firewall/uspfilter/conntrack" - "github.com/netbirdio/netbird/client/iface" - "github.com/netbirdio/netbird/client/iface/device" + "github.com/netbirdio/netbird/client/firewall/uspfilter/forwarder" + nblog "github.com/netbirdio/netbird/client/firewall/uspfilter/log" + "github.com/netbirdio/netbird/client/iface/netstack" "github.com/netbirdio/netbird/client/internal/statemanager" ) const layerTypeAll = 0 -const EnvDisableConntrack = "NB_DISABLE_CONNTRACK" +const ( + // EnvDisableConntrack disables the stateful filter, replies to outbound traffic won't be allowed. + EnvDisableConntrack = "NB_DISABLE_CONNTRACK" + + // EnvDisableUserspaceRouting disables userspace routing, to-be-routed packets will be dropped. + EnvDisableUserspaceRouting = "NB_DISABLE_USERSPACE_ROUTING" + + // EnvForceUserspaceRouter forces userspace routing even if native routing is available. + EnvForceUserspaceRouter = "NB_FORCE_USERSPACE_ROUTER" + + // EnvEnableNetstackLocalForwarding enables forwarding of local traffic to the native stack when running netstack + // Leaving this on by default introduces a security risk as sockets on listening on localhost only will be accessible + EnvEnableNetstackLocalForwarding = "NB_ENABLE_NETSTACK_LOCAL_FORWARDING" +) var ( errRouteNotSupported = errors.New("route not supported with userspace firewall") errNatNotSupported = errors.New("nat not supported with userspace firewall") ) -// IFaceMapper defines subset methods of interface required for manager -type IFaceMapper interface { - SetFilter(device.PacketFilter) error - Address() iface.WGAddress -} - // RuleSet is a set of rules grouped by a string key -type RuleSet map[string]Rule +type RuleSet map[string]PeerRule + +type RouteRules []RouteRule + +func (r RouteRules) Sort() { + slices.SortStableFunc(r, func(a, b RouteRule) int { + // Deny rules come first + if a.action == firewall.ActionDrop && b.action != firewall.ActionDrop { + return -1 + } + if a.action != firewall.ActionDrop && b.action == firewall.ActionDrop { + return 1 + } + return strings.Compare(a.id, b.id) + }) +} // Manager userspace firewall manager type Manager struct { @@ -45,17 +71,34 @@ type Manager struct { outgoingRules map[string]RuleSet // incomingRules is used for filtering and hooks incomingRules map[string]RuleSet + routeRules RouteRules wgNetwork *net.IPNet decoders sync.Pool - wgIface IFaceMapper + wgIface common.IFaceMapper nativeFirewall firewall.Manager mutex sync.RWMutex - stateful bool + // indicates whether server routes are disabled + disableServerRoutes bool + // indicates whether we forward packets not destined for ourselves + routingEnabled bool + // indicates whether we leave forwarding and filtering to the native firewall + nativeRouter bool + // indicates whether we track outbound connections + stateful bool + // indicates whether wireguards runs in netstack mode + netstack bool + // indicates whether we forward local traffic to the native stack + localForwarding bool + + localipmanager *localIPManager + udpTracker *conntrack.UDPTracker icmpTracker *conntrack.ICMPTracker tcpTracker *conntrack.TCPTracker + forwarder *forwarder.Forwarder + logger *nblog.Logger } // decoder for packages @@ -72,22 +115,44 @@ type decoder struct { } // Create userspace firewall manager constructor -func Create(iface IFaceMapper) (*Manager, error) { - return create(iface) +func Create(iface common.IFaceMapper, disableServerRoutes bool) (*Manager, error) { + return create(iface, nil, disableServerRoutes) } -func CreateWithNativeFirewall(iface IFaceMapper, nativeFirewall firewall.Manager) (*Manager, error) { - mgr, err := create(iface) +func CreateWithNativeFirewall(iface common.IFaceMapper, nativeFirewall firewall.Manager, disableServerRoutes bool) (*Manager, error) { + if nativeFirewall == nil { + return nil, errors.New("native firewall is nil") + } + + mgr, err := create(iface, nativeFirewall, disableServerRoutes) if err != nil { return nil, err } - mgr.nativeFirewall = nativeFirewall return mgr, nil } -func create(iface IFaceMapper) (*Manager, error) { - disableConntrack, _ := strconv.ParseBool(os.Getenv(EnvDisableConntrack)) +func parseCreateEnv() (bool, bool) { + var disableConntrack, enableLocalForwarding bool + var err error + if val := os.Getenv(EnvDisableConntrack); val != "" { + disableConntrack, err = strconv.ParseBool(val) + if err != nil { + log.Warnf("failed to parse %s: %v", EnvDisableConntrack, err) + } + } + if val := os.Getenv(EnvEnableNetstackLocalForwarding); val != "" { + enableLocalForwarding, err = strconv.ParseBool(val) + if err != nil { + log.Warnf("failed to parse %s: %v", EnvEnableNetstackLocalForwarding, err) + } + } + + return disableConntrack, enableLocalForwarding +} + +func create(iface common.IFaceMapper, nativeFirewall firewall.Manager, disableServerRoutes bool) (*Manager, error) { + disableConntrack, enableLocalForwarding := parseCreateEnv() m := &Manager{ decoders: sync.Pool{ @@ -103,52 +168,183 @@ func create(iface IFaceMapper) (*Manager, error) { return d }, }, - outgoingRules: make(map[string]RuleSet), - incomingRules: make(map[string]RuleSet), - wgIface: iface, - stateful: !disableConntrack, + nativeFirewall: nativeFirewall, + outgoingRules: make(map[string]RuleSet), + incomingRules: make(map[string]RuleSet), + wgIface: iface, + localipmanager: newLocalIPManager(), + disableServerRoutes: disableServerRoutes, + routingEnabled: false, + stateful: !disableConntrack, + logger: nblog.NewFromLogrus(log.StandardLogger()), + netstack: netstack.IsEnabled(), + // default true for non-netstack, for netstack only if explicitly enabled + localForwarding: !netstack.IsEnabled() || enableLocalForwarding, + } + + if err := m.localipmanager.UpdateLocalIPs(iface); err != nil { + return nil, fmt.Errorf("update local IPs: %w", err) } - // Only initialize trackers if stateful mode is enabled if disableConntrack { log.Info("conntrack is disabled") } else { - m.udpTracker = conntrack.NewUDPTracker(conntrack.DefaultUDPTimeout) - m.icmpTracker = conntrack.NewICMPTracker(conntrack.DefaultICMPTimeout) - m.tcpTracker = conntrack.NewTCPTracker(conntrack.DefaultTCPTimeout) + m.udpTracker = conntrack.NewUDPTracker(conntrack.DefaultUDPTimeout, m.logger) + m.icmpTracker = conntrack.NewICMPTracker(conntrack.DefaultICMPTimeout, m.logger) + m.tcpTracker = conntrack.NewTCPTracker(conntrack.DefaultTCPTimeout, m.logger) + } + + // netstack needs the forwarder for local traffic + if m.netstack && m.localForwarding { + if err := m.initForwarder(); err != nil { + log.Errorf("failed to initialize forwarder: %v", err) + } + } + + if err := m.blockInvalidRouted(iface); err != nil { + log.Errorf("failed to block invalid routed traffic: %v", err) } if err := iface.SetFilter(m); err != nil { - return nil, err + return nil, fmt.Errorf("set filter: %w", err) } return m, nil } +func (m *Manager) blockInvalidRouted(iface common.IFaceMapper) error { + if m.forwarder == nil { + return nil + } + wgPrefix, err := netip.ParsePrefix(iface.Address().Network.String()) + if err != nil { + return fmt.Errorf("parse wireguard network: %w", err) + } + log.Debugf("blocking invalid routed traffic for %s", wgPrefix) + + if _, err := m.AddRouteFiltering( + []netip.Prefix{netip.PrefixFrom(netip.IPv4Unspecified(), 0)}, + wgPrefix, + firewall.ProtocolALL, + nil, + nil, + firewall.ActionDrop, + ); err != nil { + return fmt.Errorf("block wg nte : %w", err) + } + + // TODO: Block networks that we're a client of + + return nil +} + +func (m *Manager) determineRouting() error { + var disableUspRouting, forceUserspaceRouter bool + var err error + if val := os.Getenv(EnvDisableUserspaceRouting); val != "" { + disableUspRouting, err = strconv.ParseBool(val) + if err != nil { + log.Warnf("failed to parse %s: %v", EnvDisableUserspaceRouting, err) + } + } + if val := os.Getenv(EnvForceUserspaceRouter); val != "" { + forceUserspaceRouter, err = strconv.ParseBool(val) + if err != nil { + log.Warnf("failed to parse %s: %v", EnvForceUserspaceRouter, err) + } + } + + switch { + case disableUspRouting: + m.routingEnabled = false + m.nativeRouter = false + log.Info("userspace routing is disabled") + + case m.disableServerRoutes: + // if server routes are disabled we will let packets pass to the native stack + m.routingEnabled = true + m.nativeRouter = true + + log.Info("server routes are disabled") + + case forceUserspaceRouter: + m.routingEnabled = true + m.nativeRouter = false + + log.Info("userspace routing is forced") + + case !m.netstack && m.nativeFirewall != nil && m.nativeFirewall.IsServerRouteSupported(): + // if the OS supports routing natively, then we don't need to filter/route ourselves + // netstack mode won't support native routing as there is no interface + + m.routingEnabled = true + m.nativeRouter = true + + log.Info("native routing is enabled") + + default: + m.routingEnabled = true + m.nativeRouter = false + + log.Info("userspace routing enabled by default") + } + + if m.routingEnabled && !m.nativeRouter { + return m.initForwarder() + } + + return nil +} + +// initForwarder initializes the forwarder, it disables routing on errors +func (m *Manager) initForwarder() error { + if m.forwarder != nil { + return nil + } + + // Only supported in userspace mode as we need to inject packets back into wireguard directly + intf := m.wgIface.GetWGDevice() + if intf == nil { + m.routingEnabled = false + return errors.New("forwarding not supported") + } + + forwarder, err := forwarder.New(m.wgIface, m.logger, m.netstack) + if err != nil { + m.routingEnabled = false + return fmt.Errorf("create forwarder: %w", err) + } + + m.forwarder = forwarder + + log.Debug("forwarder initialized") + + return nil +} + func (m *Manager) Init(*statemanager.Manager) error { return nil } func (m *Manager) IsServerRouteSupported() bool { - if m.nativeFirewall == nil { - return false - } else { - return true - } + return true } func (m *Manager) AddNatRule(pair firewall.RouterPair) error { - if m.nativeFirewall == nil { - return errRouteNotSupported + if m.nativeRouter && m.nativeFirewall != nil { + return m.nativeFirewall.AddNatRule(pair) } - return m.nativeFirewall.AddNatRule(pair) + + // userspace routed packets are always SNATed to the inbound direction + // TODO: implement outbound SNAT + return nil } // RemoveNatRule removes a routing firewall rule func (m *Manager) RemoveNatRule(pair firewall.RouterPair) error { - if m.nativeFirewall == nil { - return errRouteNotSupported + if m.nativeRouter && m.nativeFirewall != nil { + return m.nativeFirewall.RemoveNatRule(pair) } - return m.nativeFirewall.RemoveNatRule(pair) + return nil } // AddPeerFiltering rule to the firewall @@ -164,7 +360,7 @@ func (m *Manager) AddPeerFiltering( _ string, comment string, ) ([]firewall.Rule, error) { - r := Rule{ + r := PeerRule{ id: uuid.New().String(), ip: ip, ipLayer: layers.LayerTypeIPv6, @@ -207,18 +403,56 @@ func (m *Manager) AddPeerFiltering( return []firewall.Rule{&r}, nil } -func (m *Manager) AddRouteFiltering(sources []netip.Prefix, destination netip.Prefix, proto firewall.Protocol, sPort *firewall.Port, dPort *firewall.Port, action firewall.Action) (firewall.Rule, error) { - if m.nativeFirewall == nil { - return nil, errRouteNotSupported +func (m *Manager) AddRouteFiltering( + sources []netip.Prefix, + destination netip.Prefix, + proto firewall.Protocol, + sPort *firewall.Port, + dPort *firewall.Port, + action firewall.Action, +) (firewall.Rule, error) { + if m.nativeRouter && m.nativeFirewall != nil { + return m.nativeFirewall.AddRouteFiltering(sources, destination, proto, sPort, dPort, action) } - return m.nativeFirewall.AddRouteFiltering(sources, destination, proto, sPort, dPort, action) + + m.mutex.Lock() + defer m.mutex.Unlock() + + ruleID := uuid.New().String() + rule := RouteRule{ + id: ruleID, + sources: sources, + destination: destination, + proto: proto, + srcPort: sPort, + dstPort: dPort, + action: action, + } + + m.routeRules = append(m.routeRules, rule) + m.routeRules.Sort() + + return &rule, nil } func (m *Manager) DeleteRouteRule(rule firewall.Rule) error { - if m.nativeFirewall == nil { - return errRouteNotSupported + if m.nativeRouter && m.nativeFirewall != nil { + return m.nativeFirewall.DeleteRouteRule(rule) } - return m.nativeFirewall.DeleteRouteRule(rule) + + m.mutex.Lock() + defer m.mutex.Unlock() + + ruleID := rule.ID() + idx := slices.IndexFunc(m.routeRules, func(r RouteRule) bool { + return r.id == ruleID + }) + if idx < 0 { + return fmt.Errorf("route rule not found: %s", ruleID) + } + + m.routeRules = slices.Delete(m.routeRules, idx, idx+1) + return nil } // DeletePeerRule from the firewall by rule definition @@ -226,7 +460,7 @@ func (m *Manager) DeletePeerRule(rule firewall.Rule) error { m.mutex.Lock() defer m.mutex.Unlock() - r, ok := rule.(*Rule) + r, ok := rule.(*PeerRule) if !ok { return fmt.Errorf("delete rule: invalid rule type: %T", rule) } @@ -273,10 +507,14 @@ func (m *Manager) DropOutgoing(packetData []byte) bool { // DropIncoming filter incoming packets func (m *Manager) DropIncoming(packetData []byte) bool { - return m.dropFilter(packetData, m.incomingRules) + return m.dropFilter(packetData) +} + +// UpdateLocalIPs updates the list of local IPs +func (m *Manager) UpdateLocalIPs() error { + return m.localipmanager.UpdateLocalIPs(m.wgIface) } -// processOutgoingHooks processes UDP hooks for outgoing packets and tracks TCP/UDP/ICMP func (m *Manager) processOutgoingHooks(packetData []byte) bool { m.mutex.RLock() defer m.mutex.RUnlock() @@ -297,18 +535,11 @@ func (m *Manager) processOutgoingHooks(packetData []byte) bool { return false } - // Always process UDP hooks - if d.decoded[1] == layers.LayerTypeUDP { - // Track UDP state only if enabled - if m.stateful { - m.trackUDPOutbound(d, srcIP, dstIP) - } - return m.checkUDPHooks(d, dstIP, packetData) - } - - // Track other protocols only if stateful mode is enabled + // Track all protocols if stateful mode is enabled if m.stateful { switch d.decoded[1] { + case layers.LayerTypeUDP: + m.trackUDPOutbound(d, srcIP, dstIP) case layers.LayerTypeTCP: m.trackTCPOutbound(d, srcIP, dstIP) case layers.LayerTypeICMPv4: @@ -316,6 +547,11 @@ func (m *Manager) processOutgoingHooks(packetData []byte) bool { } } + // Process UDP hooks even if stateful mode is disabled + if d.decoded[1] == layers.LayerTypeUDP { + return m.checkUDPHooks(d, dstIP, packetData) + } + return false } @@ -397,10 +633,9 @@ func (m *Manager) trackICMPOutbound(d *decoder, srcIP, dstIP net.IP) { } } -// dropFilter implements filtering logic for incoming packets -func (m *Manager) dropFilter(packetData []byte, rules map[string]RuleSet) bool { - // TODO: Disable router if --disable-server-router is set - +// dropFilter implements filtering logic for incoming packets. +// If it returns true, the packet should be dropped. +func (m *Manager) dropFilter(packetData []byte) bool { m.mutex.RLock() defer m.mutex.RUnlock() @@ -413,39 +648,127 @@ func (m *Manager) dropFilter(packetData []byte, rules map[string]RuleSet) bool { srcIP, dstIP := m.extractIPs(d) if srcIP == nil { - log.Errorf("unknown layer: %v", d.decoded[0]) + m.logger.Error("Unknown network layer: %v", d.decoded[0]) return true } - if !m.isWireguardTraffic(srcIP, dstIP) { - return false - } - - // Check connection state only if enabled + // For all inbound traffic, first check if it matches a tracked connection. + // This must happen before any other filtering because the packets are statefully tracked. if m.stateful && m.isValidTrackedConnection(d, srcIP, dstIP) { return false } - return m.applyRules(srcIP, packetData, rules, d) + if m.localipmanager.IsLocalIP(dstIP) { + return m.handleLocalTraffic(d, srcIP, dstIP, packetData) + } + + return m.handleRoutedTraffic(d, srcIP, dstIP, packetData) +} + +// handleLocalTraffic handles local traffic. +// If it returns true, the packet should be dropped. +func (m *Manager) handleLocalTraffic(d *decoder, srcIP, dstIP net.IP, packetData []byte) bool { + if !m.localForwarding { + m.logger.Trace("Dropping local packet (local forwarding disabled): src=%s dst=%s", srcIP, dstIP) + return true + } + + if m.peerACLsBlock(srcIP, packetData, m.incomingRules, d) { + m.logger.Trace("Dropping local packet (ACL denied): src=%s dst=%s", + srcIP, dstIP) + return true + } + + // if running in netstack mode we need to pass this to the forwarder + if m.netstack { + m.handleNetstackLocalTraffic(packetData) + + // don't process this packet further + return true + } + + return false +} +func (m *Manager) handleNetstackLocalTraffic(packetData []byte) { + if m.forwarder == nil { + return + } + + if err := m.forwarder.InjectIncomingPacket(packetData); err != nil { + m.logger.Error("Failed to inject local packet: %v", err) + } +} + +// handleRoutedTraffic handles routed traffic. +// If it returns true, the packet should be dropped. +func (m *Manager) handleRoutedTraffic(d *decoder, srcIP, dstIP net.IP, packetData []byte) bool { + // Drop if routing is disabled + if !m.routingEnabled { + m.logger.Trace("Dropping routed packet (routing disabled): src=%s dst=%s", + srcIP, dstIP) + return true + } + + // Pass to native stack if native router is enabled or forced + if m.nativeRouter { + return false + } + + proto := getProtocolFromPacket(d) + srcPort, dstPort := getPortsFromPacket(d) + + if !m.routeACLsPass(srcIP, dstIP, proto, srcPort, dstPort) { + m.logger.Trace("Dropping routed packet (ACL denied): src=%s:%d dst=%s:%d proto=%v", + srcIP, srcPort, dstIP, dstPort, proto) + return true + } + + // Let forwarder handle the packet if it passed route ACLs + if err := m.forwarder.InjectIncomingPacket(packetData); err != nil { + m.logger.Error("Failed to inject incoming packet: %v", err) + } + + // Forwarded packets shouldn't reach the native stack, hence they won't be visible in a packet capture + return true +} + +func getProtocolFromPacket(d *decoder) firewall.Protocol { + switch d.decoded[1] { + case layers.LayerTypeTCP: + return firewall.ProtocolTCP + case layers.LayerTypeUDP: + return firewall.ProtocolUDP + case layers.LayerTypeICMPv4, layers.LayerTypeICMPv6: + return firewall.ProtocolICMP + default: + return firewall.ProtocolALL + } +} + +func getPortsFromPacket(d *decoder) (srcPort, dstPort uint16) { + switch d.decoded[1] { + case layers.LayerTypeTCP: + return uint16(d.tcp.SrcPort), uint16(d.tcp.DstPort) + case layers.LayerTypeUDP: + return uint16(d.udp.SrcPort), uint16(d.udp.DstPort) + default: + return 0, 0 + } } func (m *Manager) isValidPacket(d *decoder, packetData []byte) bool { if err := d.parser.DecodeLayers(packetData, &d.decoded); err != nil { - log.Tracef("couldn't decode layer, err: %s", err) + m.logger.Trace("couldn't decode packet, err: %s", err) return false } if len(d.decoded) < 2 { - log.Tracef("not enough levels in network packet") + m.logger.Trace("packet doesn't have network and transport layers") return false } return true } -func (m *Manager) isWireguardTraffic(srcIP, dstIP net.IP) bool { - return m.wgNetwork.Contains(srcIP) && m.wgNetwork.Contains(dstIP) -} - func (m *Manager) isValidTrackedConnection(d *decoder, srcIP, dstIP net.IP) bool { switch d.decoded[1] { case layers.LayerTypeTCP: @@ -480,7 +803,22 @@ func (m *Manager) isValidTrackedConnection(d *decoder, srcIP, dstIP net.IP) bool return false } -func (m *Manager) applyRules(srcIP net.IP, packetData []byte, rules map[string]RuleSet, d *decoder) bool { +// isSpecialICMP returns true if the packet is a special ICMP packet that should be allowed +func (m *Manager) isSpecialICMP(d *decoder) bool { + if d.decoded[1] != layers.LayerTypeICMPv4 { + return false + } + + icmpType := d.icmp4.TypeCode.Type() + return icmpType == layers.ICMPv4TypeDestinationUnreachable || + icmpType == layers.ICMPv4TypeTimeExceeded +} + +func (m *Manager) peerACLsBlock(srcIP net.IP, packetData []byte, rules map[string]RuleSet, d *decoder) bool { + if m.isSpecialICMP(d) { + return false + } + if filter, ok := validateRule(srcIP, packetData, rules[srcIP.String()], d); ok { return filter } @@ -514,7 +852,7 @@ func portsMatch(rulePort *firewall.Port, packetPort uint16) bool { return false } -func validateRule(ip net.IP, packetData []byte, rules map[string]Rule, d *decoder) (bool, bool) { +func validateRule(ip net.IP, packetData []byte, rules map[string]PeerRule, d *decoder) (bool, bool) { payloadLayer := d.decoded[1] for _, rule := range rules { if rule.matchByIP && !ip.Equal(rule.ip) { @@ -551,6 +889,51 @@ func validateRule(ip net.IP, packetData []byte, rules map[string]Rule, d *decode return false, false } +// routeACLsPass returns treu if the packet is allowed by the route ACLs +func (m *Manager) routeACLsPass(srcIP, dstIP net.IP, proto firewall.Protocol, srcPort, dstPort uint16) bool { + m.mutex.RLock() + defer m.mutex.RUnlock() + + srcAddr := netip.AddrFrom4([4]byte(srcIP.To4())) + dstAddr := netip.AddrFrom4([4]byte(dstIP.To4())) + + for _, rule := range m.routeRules { + if m.ruleMatches(rule, srcAddr, dstAddr, proto, srcPort, dstPort) { + return rule.action == firewall.ActionAccept + } + } + return false +} + +func (m *Manager) ruleMatches(rule RouteRule, srcAddr, dstAddr netip.Addr, proto firewall.Protocol, srcPort, dstPort uint16) bool { + if !rule.destination.Contains(dstAddr) { + return false + } + + sourceMatched := false + for _, src := range rule.sources { + if src.Contains(srcAddr) { + sourceMatched = true + break + } + } + if !sourceMatched { + return false + } + + if rule.proto != firewall.ProtocolALL && rule.proto != proto { + return false + } + + if proto == firewall.ProtocolTCP || proto == firewall.ProtocolUDP { + if !portsMatch(rule.srcPort, srcPort) || !portsMatch(rule.dstPort, dstPort) { + return false + } + } + + return true +} + // SetNetwork of the wireguard interface to which filtering applied func (m *Manager) SetNetwork(network *net.IPNet) { m.wgNetwork = network @@ -562,7 +945,7 @@ func (m *Manager) SetNetwork(network *net.IPNet) { func (m *Manager) AddUDPPacketHook( in bool, ip net.IP, dPort uint16, hook func([]byte) bool, ) string { - r := Rule{ + r := PeerRule{ id: uuid.New().String(), ip: ip, protoLayer: layers.LayerTypeUDP, @@ -579,12 +962,12 @@ func (m *Manager) AddUDPPacketHook( m.mutex.Lock() if in { if _, ok := m.incomingRules[r.ip.String()]; !ok { - m.incomingRules[r.ip.String()] = make(map[string]Rule) + m.incomingRules[r.ip.String()] = make(map[string]PeerRule) } m.incomingRules[r.ip.String()][r.id] = r } else { if _, ok := m.outgoingRules[r.ip.String()]; !ok { - m.outgoingRules[r.ip.String()] = make(map[string]Rule) + m.outgoingRules[r.ip.String()] = make(map[string]PeerRule) } m.outgoingRules[r.ip.String()][r.id] = r } @@ -617,3 +1000,41 @@ func (m *Manager) RemovePacketHook(hookID string) error { } return fmt.Errorf("hook with given id not found") } + +// SetLogLevel sets the log level for the firewall manager +func (m *Manager) SetLogLevel(level log.Level) { + if m.logger != nil { + m.logger.SetLevel(nblog.Level(level)) + } +} + +func (m *Manager) EnableRouting() error { + m.mutex.Lock() + defer m.mutex.Unlock() + + return m.determineRouting() +} + +func (m *Manager) DisableRouting() error { + m.mutex.Lock() + defer m.mutex.Unlock() + + if m.forwarder == nil { + return nil + } + + m.routingEnabled = false + m.nativeRouter = false + + // don't stop forwarder if in use by netstack + if m.netstack && m.localForwarding { + return nil + } + + m.forwarder.Stop() + m.forwarder = nil + + log.Debug("forwarder stopped") + + return nil +} diff --git a/client/firewall/uspfilter/uspfilter_bench_test.go b/client/firewall/uspfilter/uspfilter_bench_test.go index 46bc4439d..875bb2425 100644 --- a/client/firewall/uspfilter/uspfilter_bench_test.go +++ b/client/firewall/uspfilter/uspfilter_bench_test.go @@ -1,9 +1,12 @@ +//go:build uspbench + package uspfilter import ( "fmt" "math/rand" "net" + "net/netip" "os" "strings" "testing" @@ -155,7 +158,7 @@ func BenchmarkCoreFiltering(b *testing.B) { // Create manager and basic setup manager, _ := Create(&IFaceMock{ SetFilterFunc: func(device.PacketFilter) error { return nil }, - }) + }, false) defer b.Cleanup(func() { require.NoError(b, manager.Reset(nil)) }) @@ -185,7 +188,7 @@ func BenchmarkCoreFiltering(b *testing.B) { // Measure inbound packet processing b.ResetTimer() for i := 0; i < b.N; i++ { - manager.dropFilter(inbound, manager.incomingRules) + manager.dropFilter(inbound) } }) } @@ -200,7 +203,7 @@ func BenchmarkStateScaling(b *testing.B) { b.Run(fmt.Sprintf("conns_%d", count), func(b *testing.B) { manager, _ := Create(&IFaceMock{ SetFilterFunc: func(device.PacketFilter) error { return nil }, - }) + }, false) b.Cleanup(func() { require.NoError(b, manager.Reset(nil)) }) @@ -228,7 +231,7 @@ func BenchmarkStateScaling(b *testing.B) { b.ResetTimer() for i := 0; i < b.N; i++ { - manager.dropFilter(testIn, manager.incomingRules) + manager.dropFilter(testIn) } }) } @@ -248,7 +251,7 @@ func BenchmarkEstablishmentOverhead(b *testing.B) { b.Run(sc.name, func(b *testing.B) { manager, _ := Create(&IFaceMock{ SetFilterFunc: func(device.PacketFilter) error { return nil }, - }) + }, false) b.Cleanup(func() { require.NoError(b, manager.Reset(nil)) }) @@ -269,7 +272,7 @@ func BenchmarkEstablishmentOverhead(b *testing.B) { b.ResetTimer() for i := 0; i < b.N; i++ { - manager.dropFilter(inbound, manager.incomingRules) + manager.dropFilter(inbound) } }) } @@ -447,7 +450,7 @@ func BenchmarkRoutedNetworkReturn(b *testing.B) { b.Run(sc.name, func(b *testing.B) { manager, _ := Create(&IFaceMock{ SetFilterFunc: func(device.PacketFilter) error { return nil }, - }) + }, false) b.Cleanup(func() { require.NoError(b, manager.Reset(nil)) }) @@ -472,7 +475,7 @@ func BenchmarkRoutedNetworkReturn(b *testing.B) { manager.processOutgoingHooks(syn) // SYN-ACK synack := generateTCPPacketWithFlags(b, dstIP, srcIP, 80, 1024, uint16(conntrack.TCPSyn|conntrack.TCPAck)) - manager.dropFilter(synack, manager.incomingRules) + manager.dropFilter(synack) // ACK ack := generateTCPPacketWithFlags(b, srcIP, dstIP, 1024, 80, uint16(conntrack.TCPAck)) manager.processOutgoingHooks(ack) @@ -481,7 +484,7 @@ func BenchmarkRoutedNetworkReturn(b *testing.B) { b.ResetTimer() for i := 0; i < b.N; i++ { - manager.dropFilter(inbound, manager.incomingRules) + manager.dropFilter(inbound) } }) } @@ -574,7 +577,7 @@ func BenchmarkLongLivedConnections(b *testing.B) { manager, _ := Create(&IFaceMock{ SetFilterFunc: func(device.PacketFilter) error { return nil }, - }) + }, false) defer b.Cleanup(func() { require.NoError(b, manager.Reset(nil)) }) @@ -618,7 +621,7 @@ func BenchmarkLongLivedConnections(b *testing.B) { // SYN-ACK synack := generateTCPPacketWithFlags(b, dstIPs[i], srcIPs[i], 80, uint16(1024+i), uint16(conntrack.TCPSyn|conntrack.TCPAck)) - manager.dropFilter(synack, manager.incomingRules) + manager.dropFilter(synack) // ACK ack := generateTCPPacketWithFlags(b, srcIPs[i], dstIPs[i], @@ -646,7 +649,7 @@ func BenchmarkLongLivedConnections(b *testing.B) { // First outbound data manager.processOutgoingHooks(outPackets[connIdx]) // Then inbound response - this is what we're actually measuring - manager.dropFilter(inPackets[connIdx], manager.incomingRules) + manager.dropFilter(inPackets[connIdx]) } }) } @@ -665,7 +668,7 @@ func BenchmarkShortLivedConnections(b *testing.B) { manager, _ := Create(&IFaceMock{ SetFilterFunc: func(device.PacketFilter) error { return nil }, - }) + }, false) defer b.Cleanup(func() { require.NoError(b, manager.Reset(nil)) }) @@ -754,17 +757,17 @@ func BenchmarkShortLivedConnections(b *testing.B) { // Connection establishment manager.processOutgoingHooks(p.syn) - manager.dropFilter(p.synAck, manager.incomingRules) + manager.dropFilter(p.synAck) manager.processOutgoingHooks(p.ack) // Data transfer manager.processOutgoingHooks(p.request) - manager.dropFilter(p.response, manager.incomingRules) + manager.dropFilter(p.response) // Connection teardown manager.processOutgoingHooks(p.finClient) - manager.dropFilter(p.ackServer, manager.incomingRules) - manager.dropFilter(p.finServer, manager.incomingRules) + manager.dropFilter(p.ackServer) + manager.dropFilter(p.finServer) manager.processOutgoingHooks(p.ackClient) } }) @@ -784,7 +787,7 @@ func BenchmarkParallelLongLivedConnections(b *testing.B) { manager, _ := Create(&IFaceMock{ SetFilterFunc: func(device.PacketFilter) error { return nil }, - }) + }, false) defer b.Cleanup(func() { require.NoError(b, manager.Reset(nil)) }) @@ -825,7 +828,7 @@ func BenchmarkParallelLongLivedConnections(b *testing.B) { synack := generateTCPPacketWithFlags(b, dstIPs[i], srcIPs[i], 80, uint16(1024+i), uint16(conntrack.TCPSyn|conntrack.TCPAck)) - manager.dropFilter(synack, manager.incomingRules) + manager.dropFilter(synack) ack := generateTCPPacketWithFlags(b, srcIPs[i], dstIPs[i], uint16(1024+i), 80, uint16(conntrack.TCPAck)) @@ -852,7 +855,7 @@ func BenchmarkParallelLongLivedConnections(b *testing.B) { // Simulate bidirectional traffic manager.processOutgoingHooks(outPackets[connIdx]) - manager.dropFilter(inPackets[connIdx], manager.incomingRules) + manager.dropFilter(inPackets[connIdx]) } }) }) @@ -872,7 +875,7 @@ func BenchmarkParallelShortLivedConnections(b *testing.B) { manager, _ := Create(&IFaceMock{ SetFilterFunc: func(device.PacketFilter) error { return nil }, - }) + }, false) defer b.Cleanup(func() { require.NoError(b, manager.Reset(nil)) }) @@ -949,15 +952,15 @@ func BenchmarkParallelShortLivedConnections(b *testing.B) { // Full connection lifecycle manager.processOutgoingHooks(p.syn) - manager.dropFilter(p.synAck, manager.incomingRules) + manager.dropFilter(p.synAck) manager.processOutgoingHooks(p.ack) manager.processOutgoingHooks(p.request) - manager.dropFilter(p.response, manager.incomingRules) + manager.dropFilter(p.response) manager.processOutgoingHooks(p.finClient) - manager.dropFilter(p.ackServer, manager.incomingRules) - manager.dropFilter(p.finServer, manager.incomingRules) + manager.dropFilter(p.ackServer) + manager.dropFilter(p.finServer) manager.processOutgoingHooks(p.ackClient) } }) @@ -996,3 +999,72 @@ func generateTCPPacketWithFlags(b *testing.B, srcIP, dstIP net.IP, srcPort, dstP require.NoError(b, gopacket.SerializeLayers(buf, opts, ipv4, tcp, gopacket.Payload("test"))) return buf.Bytes() } + +func BenchmarkRouteACLs(b *testing.B) { + manager := setupRoutedManager(b, "10.10.0.100/16") + + // Add several route rules to simulate real-world scenario + rules := []struct { + sources []netip.Prefix + dest netip.Prefix + proto fw.Protocol + port *fw.Port + }{ + { + sources: []netip.Prefix{netip.MustParsePrefix("100.10.0.0/16")}, + dest: netip.MustParsePrefix("192.168.1.0/24"), + proto: fw.ProtocolTCP, + port: &fw.Port{Values: []uint16{80, 443}}, + }, + { + sources: []netip.Prefix{ + netip.MustParsePrefix("172.16.0.0/12"), + netip.MustParsePrefix("10.0.0.0/8"), + }, + dest: netip.MustParsePrefix("0.0.0.0/0"), + proto: fw.ProtocolICMP, + }, + { + sources: []netip.Prefix{netip.MustParsePrefix("0.0.0.0/0")}, + dest: netip.MustParsePrefix("192.168.0.0/16"), + proto: fw.ProtocolUDP, + port: &fw.Port{Values: []uint16{53}}, + }, + } + + for _, r := range rules { + _, err := manager.AddRouteFiltering( + r.sources, + r.dest, + r.proto, + nil, + r.port, + fw.ActionAccept, + ) + if err != nil { + b.Fatal(err) + } + } + + // Test cases that exercise different matching scenarios + cases := []struct { + srcIP string + dstIP string + proto fw.Protocol + dstPort uint16 + }{ + {"100.10.0.1", "192.168.1.100", fw.ProtocolTCP, 443}, // Match first rule + {"172.16.0.1", "8.8.8.8", fw.ProtocolICMP, 0}, // Match second rule + {"1.1.1.1", "192.168.1.53", fw.ProtocolUDP, 53}, // Match third rule + {"192.168.1.1", "10.0.0.1", fw.ProtocolTCP, 8080}, // No match + } + + b.ResetTimer() + for i := 0; i < b.N; i++ { + for _, tc := range cases { + srcIP := net.ParseIP(tc.srcIP) + dstIP := net.ParseIP(tc.dstIP) + manager.routeACLsPass(srcIP, dstIP, tc.proto, 0, tc.dstPort) + } + } +} diff --git a/client/firewall/uspfilter/uspfilter_filter_test.go b/client/firewall/uspfilter/uspfilter_filter_test.go new file mode 100644 index 000000000..9a1456d00 --- /dev/null +++ b/client/firewall/uspfilter/uspfilter_filter_test.go @@ -0,0 +1,1015 @@ +package uspfilter + +import ( + "net" + "net/netip" + "testing" + + "github.com/golang/mock/gomock" + "github.com/google/gopacket" + "github.com/google/gopacket/layers" + "github.com/stretchr/testify/require" + wgdevice "golang.zx2c4.com/wireguard/device" + + fw "github.com/netbirdio/netbird/client/firewall/manager" + "github.com/netbirdio/netbird/client/iface" + "github.com/netbirdio/netbird/client/iface/device" + "github.com/netbirdio/netbird/client/iface/mocks" +) + +func TestPeerACLFiltering(t *testing.T) { + localIP := net.ParseIP("100.10.0.100") + wgNet := &net.IPNet{ + IP: net.ParseIP("100.10.0.0"), + Mask: net.CIDRMask(16, 32), + } + + ifaceMock := &IFaceMock{ + SetFilterFunc: func(device.PacketFilter) error { return nil }, + AddressFunc: func() iface.WGAddress { + return iface.WGAddress{ + IP: localIP, + Network: wgNet, + } + }, + } + + manager, err := Create(ifaceMock, false) + require.NoError(t, err) + require.NotNil(t, manager) + + t.Cleanup(func() { + require.NoError(t, manager.Reset(nil)) + }) + + manager.wgNetwork = wgNet + + err = manager.UpdateLocalIPs() + require.NoError(t, err) + + testCases := []struct { + name string + srcIP string + dstIP string + proto fw.Protocol + srcPort uint16 + dstPort uint16 + ruleIP string + ruleProto fw.Protocol + ruleSrcPort *fw.Port + ruleDstPort *fw.Port + ruleAction fw.Action + shouldBeBlocked bool + }{ + { + name: "Allow TCP traffic from WG peer", + srcIP: "100.10.0.1", + dstIP: "100.10.0.100", + proto: fw.ProtocolTCP, + srcPort: 12345, + dstPort: 443, + ruleIP: "100.10.0.1", + ruleProto: fw.ProtocolTCP, + ruleDstPort: &fw.Port{Values: []uint16{443}}, + ruleAction: fw.ActionAccept, + shouldBeBlocked: false, + }, + { + name: "Allow UDP traffic from WG peer", + srcIP: "100.10.0.1", + dstIP: "100.10.0.100", + proto: fw.ProtocolUDP, + srcPort: 12345, + dstPort: 53, + ruleIP: "100.10.0.1", + ruleProto: fw.ProtocolUDP, + ruleDstPort: &fw.Port{Values: []uint16{53}}, + ruleAction: fw.ActionAccept, + shouldBeBlocked: false, + }, + { + name: "Allow ICMP traffic from WG peer", + srcIP: "100.10.0.1", + dstIP: "100.10.0.100", + proto: fw.ProtocolICMP, + ruleIP: "100.10.0.1", + ruleProto: fw.ProtocolICMP, + ruleAction: fw.ActionAccept, + shouldBeBlocked: false, + }, + { + name: "Allow all traffic from WG peer", + srcIP: "100.10.0.1", + dstIP: "100.10.0.100", + proto: fw.ProtocolTCP, + srcPort: 12345, + dstPort: 443, + ruleIP: "100.10.0.1", + ruleProto: fw.ProtocolALL, + ruleAction: fw.ActionAccept, + shouldBeBlocked: false, + }, + { + name: "Allow traffic from non-WG source", + srcIP: "192.168.1.1", + dstIP: "100.10.0.100", + proto: fw.ProtocolTCP, + srcPort: 12345, + dstPort: 443, + ruleIP: "192.168.1.1", + ruleProto: fw.ProtocolTCP, + ruleDstPort: &fw.Port{Values: []uint16{443}}, + ruleAction: fw.ActionAccept, + shouldBeBlocked: false, + }, + { + name: "Allow all traffic with 0.0.0.0 rule", + srcIP: "100.10.0.1", + dstIP: "100.10.0.100", + proto: fw.ProtocolTCP, + srcPort: 12345, + dstPort: 443, + ruleIP: "0.0.0.0", + ruleProto: fw.ProtocolALL, + ruleAction: fw.ActionAccept, + shouldBeBlocked: false, + }, + { + name: "Allow TCP traffic within port range", + srcIP: "100.10.0.1", + dstIP: "100.10.0.100", + proto: fw.ProtocolTCP, + srcPort: 12345, + dstPort: 8080, + ruleIP: "100.10.0.1", + ruleProto: fw.ProtocolTCP, + ruleDstPort: &fw.Port{IsRange: true, Values: []uint16{8000, 8100}}, + ruleAction: fw.ActionAccept, + shouldBeBlocked: false, + }, + { + name: "Block TCP traffic outside port range", + srcIP: "100.10.0.1", + dstIP: "100.10.0.100", + proto: fw.ProtocolTCP, + srcPort: 12345, + dstPort: 7999, + ruleIP: "100.10.0.1", + ruleProto: fw.ProtocolTCP, + ruleDstPort: &fw.Port{IsRange: true, Values: []uint16{8000, 8100}}, + ruleAction: fw.ActionAccept, + shouldBeBlocked: true, + }, + { + name: "Allow TCP traffic with source port range", + srcIP: "100.10.0.1", + dstIP: "100.10.0.100", + proto: fw.ProtocolTCP, + srcPort: 32100, + dstPort: 443, + ruleIP: "100.10.0.1", + ruleProto: fw.ProtocolTCP, + ruleSrcPort: &fw.Port{IsRange: true, Values: []uint16{32000, 33000}}, + ruleDstPort: &fw.Port{Values: []uint16{443}}, + ruleAction: fw.ActionAccept, + shouldBeBlocked: false, + }, + { + name: "Block TCP traffic outside source port range", + srcIP: "100.10.0.1", + dstIP: "100.10.0.100", + proto: fw.ProtocolTCP, + srcPort: 31999, + dstPort: 443, + ruleIP: "100.10.0.1", + ruleProto: fw.ProtocolTCP, + ruleSrcPort: &fw.Port{IsRange: true, Values: []uint16{32000, 33000}}, + ruleDstPort: &fw.Port{Values: []uint16{443}}, + ruleAction: fw.ActionAccept, + shouldBeBlocked: true, + }, + } + + t.Run("Implicit DROP (no rules)", func(t *testing.T) { + packet := createTestPacket(t, "100.10.0.1", "100.10.0.100", fw.ProtocolTCP, 12345, 443) + isDropped := manager.DropIncoming(packet) + require.True(t, isDropped, "Packet should be dropped when no rules exist") + }) + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + rules, err := manager.AddPeerFiltering( + net.ParseIP(tc.ruleIP), + tc.ruleProto, + tc.ruleSrcPort, + tc.ruleDstPort, + tc.ruleAction, + "", + tc.name, + ) + require.NoError(t, err) + require.NotEmpty(t, rules) + + t.Cleanup(func() { + for _, rule := range rules { + require.NoError(t, manager.DeletePeerRule(rule)) + } + }) + + packet := createTestPacket(t, tc.srcIP, tc.dstIP, tc.proto, tc.srcPort, tc.dstPort) + isDropped := manager.DropIncoming(packet) + require.Equal(t, tc.shouldBeBlocked, isDropped) + }) + } +} + +func createTestPacket(t *testing.T, srcIP, dstIP string, proto fw.Protocol, srcPort, dstPort uint16) []byte { + t.Helper() + + buf := gopacket.NewSerializeBuffer() + opts := gopacket.SerializeOptions{ + ComputeChecksums: true, + FixLengths: true, + } + + ipLayer := &layers.IPv4{ + Version: 4, + TTL: 64, + SrcIP: net.ParseIP(srcIP), + DstIP: net.ParseIP(dstIP), + } + + var err error + switch proto { + case fw.ProtocolTCP: + ipLayer.Protocol = layers.IPProtocolTCP + tcp := &layers.TCP{ + SrcPort: layers.TCPPort(srcPort), + DstPort: layers.TCPPort(dstPort), + } + err = tcp.SetNetworkLayerForChecksum(ipLayer) + require.NoError(t, err) + err = gopacket.SerializeLayers(buf, opts, ipLayer, tcp) + + case fw.ProtocolUDP: + ipLayer.Protocol = layers.IPProtocolUDP + udp := &layers.UDP{ + SrcPort: layers.UDPPort(srcPort), + DstPort: layers.UDPPort(dstPort), + } + err = udp.SetNetworkLayerForChecksum(ipLayer) + require.NoError(t, err) + err = gopacket.SerializeLayers(buf, opts, ipLayer, udp) + + case fw.ProtocolICMP: + ipLayer.Protocol = layers.IPProtocolICMPv4 + icmp := &layers.ICMPv4{ + TypeCode: layers.CreateICMPv4TypeCode(layers.ICMPv4TypeEchoRequest, 0), + } + err = gopacket.SerializeLayers(buf, opts, ipLayer, icmp) + + default: + err = gopacket.SerializeLayers(buf, opts, ipLayer) + } + + require.NoError(t, err) + return buf.Bytes() +} + +func setupRoutedManager(tb testing.TB, network string) *Manager { + tb.Helper() + + ctrl := gomock.NewController(tb) + dev := mocks.NewMockDevice(ctrl) + dev.EXPECT().MTU().Return(1500, nil).AnyTimes() + + localIP, wgNet, err := net.ParseCIDR(network) + require.NoError(tb, err) + + ifaceMock := &IFaceMock{ + SetFilterFunc: func(device.PacketFilter) error { return nil }, + AddressFunc: func() iface.WGAddress { + return iface.WGAddress{ + IP: localIP, + Network: wgNet, + } + }, + GetDeviceFunc: func() *device.FilteredDevice { + return &device.FilteredDevice{Device: dev} + }, + GetWGDeviceFunc: func() *wgdevice.Device { + return &wgdevice.Device{} + }, + } + + manager, err := Create(ifaceMock, false) + require.NoError(tb, manager.EnableRouting()) + require.NoError(tb, err) + require.NotNil(tb, manager) + require.True(tb, manager.routingEnabled) + require.False(tb, manager.nativeRouter) + + tb.Cleanup(func() { + require.NoError(tb, manager.Reset(nil)) + }) + + return manager +} + +func TestRouteACLFiltering(t *testing.T) { + manager := setupRoutedManager(t, "10.10.0.100/16") + + type rule struct { + sources []netip.Prefix + dest netip.Prefix + proto fw.Protocol + srcPort *fw.Port + dstPort *fw.Port + action fw.Action + } + + testCases := []struct { + name string + srcIP string + dstIP string + proto fw.Protocol + srcPort uint16 + dstPort uint16 + rule rule + shouldPass bool + }{ + { + name: "Allow TCP with specific source and destination", + srcIP: "100.10.0.1", + dstIP: "192.168.1.100", + proto: fw.ProtocolTCP, + srcPort: 12345, + dstPort: 443, + rule: rule{ + sources: []netip.Prefix{netip.MustParsePrefix("100.10.0.0/16")}, + dest: netip.MustParsePrefix("192.168.1.0/24"), + proto: fw.ProtocolTCP, + dstPort: &fw.Port{Values: []uint16{443}}, + action: fw.ActionAccept, + }, + shouldPass: true, + }, + { + name: "Allow any source to specific destination", + srcIP: "172.16.0.1", + dstIP: "192.168.1.100", + proto: fw.ProtocolTCP, + srcPort: 12345, + dstPort: 443, + rule: rule{ + sources: []netip.Prefix{netip.MustParsePrefix("0.0.0.0/0")}, + dest: netip.MustParsePrefix("192.168.1.0/24"), + proto: fw.ProtocolTCP, + dstPort: &fw.Port{Values: []uint16{443}}, + action: fw.ActionAccept, + }, + shouldPass: true, + }, + { + name: "Allow any source to any destination", + srcIP: "172.16.0.1", + dstIP: "203.0.113.100", + proto: fw.ProtocolTCP, + srcPort: 12345, + dstPort: 443, + rule: rule{ + sources: []netip.Prefix{netip.MustParsePrefix("0.0.0.0/0")}, + dest: netip.MustParsePrefix("0.0.0.0/0"), + proto: fw.ProtocolTCP, + dstPort: &fw.Port{Values: []uint16{443}}, + action: fw.ActionAccept, + }, + shouldPass: true, + }, + { + name: "Allow UDP DNS traffic", + srcIP: "100.10.0.1", + dstIP: "192.168.1.53", + proto: fw.ProtocolUDP, + srcPort: 54321, + dstPort: 53, + rule: rule{ + sources: []netip.Prefix{netip.MustParsePrefix("100.10.0.0/16")}, + dest: netip.MustParsePrefix("192.168.1.0/24"), + proto: fw.ProtocolUDP, + dstPort: &fw.Port{Values: []uint16{53}}, + action: fw.ActionAccept, + }, + shouldPass: true, + }, + { + name: "Allow ICMP to any destination", + srcIP: "100.10.0.1", + dstIP: "8.8.8.8", + proto: fw.ProtocolICMP, + rule: rule{ + sources: []netip.Prefix{netip.MustParsePrefix("100.10.0.0/16")}, + dest: netip.MustParsePrefix("0.0.0.0/0"), + proto: fw.ProtocolICMP, + action: fw.ActionAccept, + }, + shouldPass: true, + }, + { + name: "Allow all protocols but specific port", + srcIP: "100.10.0.1", + dstIP: "192.168.1.100", + proto: fw.ProtocolTCP, + srcPort: 12345, + dstPort: 80, + rule: rule{ + sources: []netip.Prefix{netip.MustParsePrefix("100.10.0.0/16")}, + dest: netip.MustParsePrefix("192.168.1.0/24"), + proto: fw.ProtocolALL, + dstPort: &fw.Port{Values: []uint16{80}}, + action: fw.ActionAccept, + }, + shouldPass: true, + }, + { + name: "Implicit deny - wrong destination port", + srcIP: "100.10.0.1", + dstIP: "192.168.1.100", + proto: fw.ProtocolTCP, + srcPort: 12345, + dstPort: 8080, + rule: rule{ + sources: []netip.Prefix{netip.MustParsePrefix("100.10.0.0/16")}, + dest: netip.MustParsePrefix("192.168.1.0/24"), + proto: fw.ProtocolTCP, + dstPort: &fw.Port{Values: []uint16{80}}, + action: fw.ActionAccept, + }, + shouldPass: false, + }, + { + name: "Implicit deny - wrong protocol", + srcIP: "100.10.0.1", + dstIP: "192.168.1.100", + proto: fw.ProtocolUDP, + srcPort: 12345, + dstPort: 80, + rule: rule{ + sources: []netip.Prefix{netip.MustParsePrefix("100.10.0.0/16")}, + dest: netip.MustParsePrefix("192.168.1.0/24"), + proto: fw.ProtocolTCP, + dstPort: &fw.Port{Values: []uint16{80}}, + action: fw.ActionAccept, + }, + shouldPass: false, + }, + { + name: "Implicit deny - wrong source network", + srcIP: "172.16.0.1", + dstIP: "192.168.1.100", + proto: fw.ProtocolTCP, + srcPort: 12345, + dstPort: 80, + rule: rule{ + sources: []netip.Prefix{netip.MustParsePrefix("100.10.0.0/16")}, + dest: netip.MustParsePrefix("192.168.1.0/24"), + proto: fw.ProtocolTCP, + dstPort: &fw.Port{Values: []uint16{80}}, + action: fw.ActionAccept, + }, + shouldPass: false, + }, + { + name: "Source port match", + srcIP: "100.10.0.1", + dstIP: "192.168.1.100", + proto: fw.ProtocolTCP, + srcPort: 12345, + dstPort: 80, + rule: rule{ + sources: []netip.Prefix{netip.MustParsePrefix("100.10.0.0/16")}, + dest: netip.MustParsePrefix("192.168.1.0/24"), + proto: fw.ProtocolTCP, + srcPort: &fw.Port{Values: []uint16{12345}}, + action: fw.ActionAccept, + }, + shouldPass: true, + }, + { + name: "Multiple source networks", + srcIP: "172.16.0.1", + dstIP: "192.168.1.100", + proto: fw.ProtocolTCP, + srcPort: 12345, + dstPort: 80, + rule: rule{ + sources: []netip.Prefix{ + netip.MustParsePrefix("100.10.0.0/16"), + netip.MustParsePrefix("172.16.0.0/16"), + }, + dest: netip.MustParsePrefix("192.168.1.0/24"), + proto: fw.ProtocolTCP, + dstPort: &fw.Port{Values: []uint16{80}}, + action: fw.ActionAccept, + }, + shouldPass: true, + }, + { + name: "Allow ALL protocol without ports", + srcIP: "100.10.0.1", + dstIP: "192.168.1.100", + proto: fw.ProtocolICMP, + rule: rule{ + sources: []netip.Prefix{netip.MustParsePrefix("100.10.0.0/16")}, + dest: netip.MustParsePrefix("192.168.1.0/24"), + proto: fw.ProtocolALL, + action: fw.ActionAccept, + }, + shouldPass: true, + }, + { + name: "Allow ALL protocol with specific ports", + srcIP: "100.10.0.1", + dstIP: "192.168.1.100", + proto: fw.ProtocolTCP, + srcPort: 12345, + dstPort: 80, + rule: rule{ + sources: []netip.Prefix{netip.MustParsePrefix("100.10.0.0/16")}, + dest: netip.MustParsePrefix("192.168.1.0/24"), + proto: fw.ProtocolALL, + dstPort: &fw.Port{Values: []uint16{80}}, + action: fw.ActionAccept, + }, + shouldPass: true, + }, + { + name: "Multiple source networks with mismatched protocol", + srcIP: "172.16.0.1", + dstIP: "192.168.1.100", + // Should not match TCP rule + proto: fw.ProtocolUDP, + srcPort: 12345, + dstPort: 80, + rule: rule{ + sources: []netip.Prefix{ + netip.MustParsePrefix("100.10.0.0/16"), + netip.MustParsePrefix("172.16.0.0/16"), + }, + dest: netip.MustParsePrefix("192.168.1.0/24"), + proto: fw.ProtocolTCP, + dstPort: &fw.Port{Values: []uint16{80}}, + action: fw.ActionAccept, + }, + shouldPass: false, + }, + { + name: "Allow multiple destination ports", + srcIP: "100.10.0.1", + dstIP: "192.168.1.100", + proto: fw.ProtocolTCP, + srcPort: 12345, + dstPort: 8080, + rule: rule{ + sources: []netip.Prefix{netip.MustParsePrefix("100.10.0.0/16")}, + dest: netip.MustParsePrefix("192.168.1.0/24"), + proto: fw.ProtocolTCP, + dstPort: &fw.Port{Values: []uint16{80, 8080, 443}}, + action: fw.ActionAccept, + }, + shouldPass: true, + }, + { + name: "Allow multiple source ports", + srcIP: "100.10.0.1", + dstIP: "192.168.1.100", + proto: fw.ProtocolTCP, + srcPort: 12345, + dstPort: 80, + rule: rule{ + sources: []netip.Prefix{netip.MustParsePrefix("100.10.0.0/16")}, + dest: netip.MustParsePrefix("192.168.1.0/24"), + proto: fw.ProtocolTCP, + srcPort: &fw.Port{Values: []uint16{12345, 12346, 12347}}, + action: fw.ActionAccept, + }, + shouldPass: true, + }, + { + name: "Allow ALL protocol with both src and dst ports", + srcIP: "100.10.0.1", + dstIP: "192.168.1.100", + proto: fw.ProtocolTCP, + srcPort: 12345, + dstPort: 80, + rule: rule{ + sources: []netip.Prefix{netip.MustParsePrefix("100.10.0.0/16")}, + dest: netip.MustParsePrefix("192.168.1.0/24"), + proto: fw.ProtocolALL, + srcPort: &fw.Port{Values: []uint16{12345}}, + dstPort: &fw.Port{Values: []uint16{80}}, + action: fw.ActionAccept, + }, + shouldPass: true, + }, + { + name: "Port Range - Within Range", + srcIP: "100.10.0.1", + dstIP: "192.168.1.100", + proto: fw.ProtocolTCP, + srcPort: 12345, + dstPort: 8080, + rule: rule{ + sources: []netip.Prefix{netip.MustParsePrefix("100.10.0.0/16")}, + dest: netip.MustParsePrefix("192.168.1.0/24"), + proto: fw.ProtocolTCP, + dstPort: &fw.Port{ + IsRange: true, + Values: []uint16{8000, 8100}, + }, + action: fw.ActionAccept, + }, + shouldPass: true, + }, + { + name: "Port Range - Outside Range", + srcIP: "100.10.0.1", + dstIP: "192.168.1.100", + proto: fw.ProtocolTCP, + srcPort: 12345, + dstPort: 7999, + rule: rule{ + sources: []netip.Prefix{netip.MustParsePrefix("100.10.0.0/16")}, + dest: netip.MustParsePrefix("192.168.1.0/24"), + proto: fw.ProtocolTCP, + dstPort: &fw.Port{ + IsRange: true, + Values: []uint16{8000, 8100}, + }, + action: fw.ActionAccept, + }, + shouldPass: false, + }, + { + name: "Source Port Range - Within Range", + srcIP: "100.10.0.1", + dstIP: "192.168.1.100", + proto: fw.ProtocolTCP, + srcPort: 32100, + dstPort: 80, + rule: rule{ + sources: []netip.Prefix{netip.MustParsePrefix("100.10.0.0/16")}, + dest: netip.MustParsePrefix("192.168.1.0/24"), + proto: fw.ProtocolTCP, + srcPort: &fw.Port{ + IsRange: true, + Values: []uint16{32000, 33000}, + }, + action: fw.ActionAccept, + }, + shouldPass: true, + }, + { + name: "Mixed Port Specification - Range and Single", + srcIP: "100.10.0.1", + dstIP: "192.168.1.100", + proto: fw.ProtocolTCP, + srcPort: 32100, + dstPort: 443, + rule: rule{ + sources: []netip.Prefix{netip.MustParsePrefix("100.10.0.0/16")}, + dest: netip.MustParsePrefix("192.168.1.0/24"), + proto: fw.ProtocolTCP, + srcPort: &fw.Port{ + IsRange: true, + Values: []uint16{32000, 33000}, + }, + dstPort: &fw.Port{ + Values: []uint16{443}, + }, + action: fw.ActionAccept, + }, + shouldPass: true, + }, + { + name: "Edge Case - Port at Range Boundary", + srcIP: "100.10.0.1", + dstIP: "192.168.1.100", + proto: fw.ProtocolTCP, + srcPort: 12345, + dstPort: 8100, + rule: rule{ + sources: []netip.Prefix{netip.MustParsePrefix("100.10.0.0/16")}, + dest: netip.MustParsePrefix("192.168.1.0/24"), + proto: fw.ProtocolTCP, + dstPort: &fw.Port{ + IsRange: true, + Values: []uint16{8000, 8100}, + }, + action: fw.ActionAccept, + }, + shouldPass: true, + }, + { + name: "UDP Port Range", + srcIP: "100.10.0.1", + dstIP: "192.168.1.100", + proto: fw.ProtocolUDP, + srcPort: 12345, + dstPort: 5060, + rule: rule{ + sources: []netip.Prefix{netip.MustParsePrefix("100.10.0.0/16")}, + dest: netip.MustParsePrefix("192.168.1.0/24"), + proto: fw.ProtocolUDP, + dstPort: &fw.Port{ + IsRange: true, + Values: []uint16{5060, 5070}, + }, + action: fw.ActionAccept, + }, + shouldPass: true, + }, + { + name: "ALL Protocol with Port Range", + srcIP: "100.10.0.1", + dstIP: "192.168.1.100", + proto: fw.ProtocolTCP, + srcPort: 12345, + dstPort: 8080, + rule: rule{ + sources: []netip.Prefix{netip.MustParsePrefix("100.10.0.0/16")}, + dest: netip.MustParsePrefix("192.168.1.0/24"), + proto: fw.ProtocolALL, + dstPort: &fw.Port{ + IsRange: true, + Values: []uint16{8000, 8100}, + }, + action: fw.ActionAccept, + }, + shouldPass: true, + }, + { + name: "Drop TCP traffic to specific destination", + srcIP: "100.10.0.1", + dstIP: "192.168.1.100", + proto: fw.ProtocolTCP, + srcPort: 12345, + dstPort: 443, + rule: rule{ + sources: []netip.Prefix{netip.MustParsePrefix("100.10.0.0/16")}, + dest: netip.MustParsePrefix("192.168.1.0/24"), + proto: fw.ProtocolTCP, + dstPort: &fw.Port{Values: []uint16{443}}, + action: fw.ActionDrop, + }, + shouldPass: false, + }, + { + name: "Drop all traffic to specific destination", + srcIP: "100.10.0.1", + dstIP: "192.168.1.100", + proto: fw.ProtocolTCP, + srcPort: 12345, + dstPort: 80, + rule: rule{ + sources: []netip.Prefix{netip.MustParsePrefix("100.10.0.0/16")}, + dest: netip.MustParsePrefix("192.168.1.0/24"), + proto: fw.ProtocolALL, + action: fw.ActionDrop, + }, + shouldPass: false, + }, + { + name: "Drop traffic from multiple source networks", + srcIP: "172.16.0.1", + dstIP: "192.168.1.100", + proto: fw.ProtocolTCP, + srcPort: 12345, + dstPort: 80, + rule: rule{ + sources: []netip.Prefix{ + netip.MustParsePrefix("100.10.0.0/16"), + netip.MustParsePrefix("172.16.0.0/16"), + }, + dest: netip.MustParsePrefix("192.168.1.0/24"), + proto: fw.ProtocolTCP, + dstPort: &fw.Port{Values: []uint16{80}}, + action: fw.ActionDrop, + }, + shouldPass: false, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + rule, err := manager.AddRouteFiltering( + tc.rule.sources, + tc.rule.dest, + tc.rule.proto, + tc.rule.srcPort, + tc.rule.dstPort, + tc.rule.action, + ) + require.NoError(t, err) + require.NotNil(t, rule) + + t.Cleanup(func() { + require.NoError(t, manager.DeleteRouteRule(rule)) + }) + + srcIP := net.ParseIP(tc.srcIP) + dstIP := net.ParseIP(tc.dstIP) + + // testing routeACLsPass only and not DropIncoming, as routed packets are dropped after being passed + // to the forwarder + isAllowed := manager.routeACLsPass(srcIP, dstIP, tc.proto, tc.srcPort, tc.dstPort) + require.Equal(t, tc.shouldPass, isAllowed) + }) + } +} + +func TestRouteACLOrder(t *testing.T) { + manager := setupRoutedManager(t, "10.10.0.100/16") + + type testCase struct { + name string + rules []struct { + sources []netip.Prefix + dest netip.Prefix + proto fw.Protocol + srcPort *fw.Port + dstPort *fw.Port + action fw.Action + } + packets []struct { + srcIP string + dstIP string + proto fw.Protocol + srcPort uint16 + dstPort uint16 + shouldPass bool + } + } + + testCases := []testCase{ + { + name: "Drop rules take precedence over accept", + rules: []struct { + sources []netip.Prefix + dest netip.Prefix + proto fw.Protocol + srcPort *fw.Port + dstPort *fw.Port + action fw.Action + }{ + { + // Accept rule added first + sources: []netip.Prefix{netip.MustParsePrefix("100.10.0.0/16")}, + dest: netip.MustParsePrefix("192.168.1.0/24"), + proto: fw.ProtocolTCP, + dstPort: &fw.Port{Values: []uint16{80, 443}}, + action: fw.ActionAccept, + }, + { + // Drop rule added second but should be evaluated first + sources: []netip.Prefix{netip.MustParsePrefix("100.10.0.0/16")}, + dest: netip.MustParsePrefix("192.168.1.0/24"), + proto: fw.ProtocolTCP, + dstPort: &fw.Port{Values: []uint16{443}}, + action: fw.ActionDrop, + }, + }, + packets: []struct { + srcIP string + dstIP string + proto fw.Protocol + srcPort uint16 + dstPort uint16 + shouldPass bool + }{ + { + // Should be dropped by the drop rule + srcIP: "100.10.0.1", + dstIP: "192.168.1.100", + proto: fw.ProtocolTCP, + srcPort: 12345, + dstPort: 443, + shouldPass: false, + }, + { + // Should be allowed by the accept rule (port 80 not in drop rule) + srcIP: "100.10.0.1", + dstIP: "192.168.1.100", + proto: fw.ProtocolTCP, + srcPort: 12345, + dstPort: 80, + shouldPass: true, + }, + }, + }, + { + name: "Multiple drop rules take precedence", + rules: []struct { + sources []netip.Prefix + dest netip.Prefix + proto fw.Protocol + srcPort *fw.Port + dstPort *fw.Port + action fw.Action + }{ + { + // Accept all + sources: []netip.Prefix{netip.MustParsePrefix("0.0.0.0/0")}, + dest: netip.MustParsePrefix("0.0.0.0/0"), + proto: fw.ProtocolALL, + action: fw.ActionAccept, + }, + { + // Drop specific port + sources: []netip.Prefix{netip.MustParsePrefix("100.10.0.0/16")}, + dest: netip.MustParsePrefix("192.168.1.0/24"), + proto: fw.ProtocolTCP, + dstPort: &fw.Port{Values: []uint16{443}}, + action: fw.ActionDrop, + }, + { + // Drop different port + sources: []netip.Prefix{netip.MustParsePrefix("100.10.0.0/16")}, + dest: netip.MustParsePrefix("192.168.1.0/24"), + proto: fw.ProtocolTCP, + dstPort: &fw.Port{Values: []uint16{80}}, + action: fw.ActionDrop, + }, + }, + packets: []struct { + srcIP string + dstIP string + proto fw.Protocol + srcPort uint16 + dstPort uint16 + shouldPass bool + }{ + { + // Should be dropped by first drop rule + srcIP: "100.10.0.1", + dstIP: "192.168.1.100", + proto: fw.ProtocolTCP, + srcPort: 12345, + dstPort: 443, + shouldPass: false, + }, + { + // Should be dropped by second drop rule + srcIP: "100.10.0.1", + dstIP: "192.168.1.100", + proto: fw.ProtocolTCP, + srcPort: 12345, + dstPort: 80, + shouldPass: false, + }, + { + // Should be allowed by the accept rule (different port) + srcIP: "100.10.0.1", + dstIP: "192.168.1.100", + proto: fw.ProtocolTCP, + srcPort: 12345, + dstPort: 8080, + shouldPass: true, + }, + }, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + var rules []fw.Rule + for _, r := range tc.rules { + rule, err := manager.AddRouteFiltering( + r.sources, + r.dest, + r.proto, + r.srcPort, + r.dstPort, + r.action, + ) + require.NoError(t, err) + require.NotNil(t, rule) + rules = append(rules, rule) + } + + t.Cleanup(func() { + for _, rule := range rules { + require.NoError(t, manager.DeleteRouteRule(rule)) + } + }) + + for i, p := range tc.packets { + srcIP := net.ParseIP(p.srcIP) + dstIP := net.ParseIP(p.dstIP) + + isAllowed := manager.routeACLsPass(srcIP, dstIP, p.proto, p.srcPort, p.dstPort) + require.Equal(t, p.shouldPass, isAllowed, "packet %d failed", i) + } + }) + } +} diff --git a/client/firewall/uspfilter/uspfilter_test.go b/client/firewall/uspfilter/uspfilter_test.go index c4c02330b..386fa982b 100644 --- a/client/firewall/uspfilter/uspfilter_test.go +++ b/client/firewall/uspfilter/uspfilter_test.go @@ -9,17 +9,38 @@ import ( "github.com/google/gopacket" "github.com/google/gopacket/layers" + "github.com/sirupsen/logrus" "github.com/stretchr/testify/require" + wgdevice "golang.zx2c4.com/wireguard/device" fw "github.com/netbirdio/netbird/client/firewall/manager" "github.com/netbirdio/netbird/client/firewall/uspfilter/conntrack" + "github.com/netbirdio/netbird/client/firewall/uspfilter/log" "github.com/netbirdio/netbird/client/iface" "github.com/netbirdio/netbird/client/iface/device" ) +var logger = log.NewFromLogrus(logrus.StandardLogger()) + type IFaceMock struct { - SetFilterFunc func(device.PacketFilter) error - AddressFunc func() iface.WGAddress + SetFilterFunc func(device.PacketFilter) error + AddressFunc func() iface.WGAddress + GetWGDeviceFunc func() *wgdevice.Device + GetDeviceFunc func() *device.FilteredDevice +} + +func (i *IFaceMock) GetWGDevice() *wgdevice.Device { + if i.GetWGDeviceFunc == nil { + return nil + } + return i.GetWGDeviceFunc() +} + +func (i *IFaceMock) GetDevice() *device.FilteredDevice { + if i.GetDeviceFunc == nil { + return nil + } + return i.GetDeviceFunc() } func (i *IFaceMock) SetFilter(iface device.PacketFilter) error { @@ -41,7 +62,7 @@ func TestManagerCreate(t *testing.T) { SetFilterFunc: func(device.PacketFilter) error { return nil }, } - m, err := Create(ifaceMock) + m, err := Create(ifaceMock, false) if err != nil { t.Errorf("failed to create Manager: %v", err) return @@ -61,7 +82,7 @@ func TestManagerAddPeerFiltering(t *testing.T) { }, } - m, err := Create(ifaceMock) + m, err := Create(ifaceMock, false) if err != nil { t.Errorf("failed to create Manager: %v", err) return @@ -95,7 +116,7 @@ func TestManagerDeleteRule(t *testing.T) { SetFilterFunc: func(device.PacketFilter) error { return nil }, } - m, err := Create(ifaceMock) + m, err := Create(ifaceMock, false) if err != nil { t.Errorf("failed to create Manager: %v", err) return @@ -166,12 +187,12 @@ func TestAddUDPPacketHook(t *testing.T) { t.Run(tt.name, func(t *testing.T) { manager, err := Create(&IFaceMock{ SetFilterFunc: func(device.PacketFilter) error { return nil }, - }) + }, false) require.NoError(t, err) manager.AddUDPPacketHook(tt.in, tt.ip, tt.dPort, tt.hook) - var addedRule Rule + var addedRule PeerRule if tt.in { if len(manager.incomingRules[tt.ip.String()]) != 1 { t.Errorf("expected 1 incoming rule, got %d", len(manager.incomingRules)) @@ -215,7 +236,7 @@ func TestManagerReset(t *testing.T) { SetFilterFunc: func(device.PacketFilter) error { return nil }, } - m, err := Create(ifaceMock) + m, err := Create(ifaceMock, false) if err != nil { t.Errorf("failed to create Manager: %v", err) return @@ -247,9 +268,18 @@ func TestManagerReset(t *testing.T) { func TestNotMatchByIP(t *testing.T) { ifaceMock := &IFaceMock{ SetFilterFunc: func(device.PacketFilter) error { return nil }, + AddressFunc: func() iface.WGAddress { + return iface.WGAddress{ + IP: net.ParseIP("100.10.0.100"), + Network: &net.IPNet{ + IP: net.ParseIP("100.10.0.0"), + Mask: net.CIDRMask(16, 32), + }, + } + }, } - m, err := Create(ifaceMock) + m, err := Create(ifaceMock, false) if err != nil { t.Errorf("failed to create Manager: %v", err) return @@ -298,7 +328,7 @@ func TestNotMatchByIP(t *testing.T) { return } - if m.dropFilter(buf.Bytes(), m.incomingRules) { + if m.dropFilter(buf.Bytes()) { t.Errorf("expected packet to be accepted") return } @@ -317,7 +347,7 @@ func TestRemovePacketHook(t *testing.T) { } // creating manager instance - manager, err := Create(iface) + manager, err := Create(iface, false) if err != nil { t.Fatalf("Failed to create Manager: %s", err) } @@ -363,7 +393,7 @@ func TestRemovePacketHook(t *testing.T) { func TestProcessOutgoingHooks(t *testing.T) { manager, err := Create(&IFaceMock{ SetFilterFunc: func(device.PacketFilter) error { return nil }, - }) + }, false) require.NoError(t, err) manager.wgNetwork = &net.IPNet{ @@ -371,7 +401,7 @@ func TestProcessOutgoingHooks(t *testing.T) { Mask: net.CIDRMask(16, 32), } manager.udpTracker.Close() - manager.udpTracker = conntrack.NewUDPTracker(100 * time.Millisecond) + manager.udpTracker = conntrack.NewUDPTracker(100*time.Millisecond, logger) defer func() { require.NoError(t, manager.Reset(nil)) }() @@ -449,7 +479,7 @@ func TestUSPFilterCreatePerformance(t *testing.T) { ifaceMock := &IFaceMock{ SetFilterFunc: func(device.PacketFilter) error { return nil }, } - manager, err := Create(ifaceMock) + manager, err := Create(ifaceMock, false) require.NoError(t, err) time.Sleep(time.Second) @@ -476,7 +506,7 @@ func TestUSPFilterCreatePerformance(t *testing.T) { func TestStatefulFirewall_UDPTracking(t *testing.T) { manager, err := Create(&IFaceMock{ SetFilterFunc: func(device.PacketFilter) error { return nil }, - }) + }, false) require.NoError(t, err) manager.wgNetwork = &net.IPNet{ @@ -485,7 +515,7 @@ func TestStatefulFirewall_UDPTracking(t *testing.T) { } manager.udpTracker.Close() // Close the existing tracker - manager.udpTracker = conntrack.NewUDPTracker(200 * time.Millisecond) + manager.udpTracker = conntrack.NewUDPTracker(200*time.Millisecond, logger) manager.decoders = sync.Pool{ New: func() any { d := &decoder{ @@ -606,7 +636,7 @@ func TestStatefulFirewall_UDPTracking(t *testing.T) { for _, cp := range checkPoints { time.Sleep(cp.sleep) - drop = manager.dropFilter(inboundBuf.Bytes(), manager.incomingRules) + drop = manager.dropFilter(inboundBuf.Bytes()) require.Equal(t, cp.shouldAllow, !drop, cp.description) // If the connection should still be valid, verify it exists @@ -677,7 +707,7 @@ func TestStatefulFirewall_UDPTracking(t *testing.T) { require.NoError(t, err) // Verify the invalid packet is dropped - drop = manager.dropFilter(testBuf.Bytes(), manager.incomingRules) + drop = manager.dropFilter(testBuf.Bytes()) require.True(t, drop, tc.description) }) } diff --git a/client/iface/configurer/usp.go b/client/iface/configurer/usp.go index 21d65ab2a..391269dd0 100644 --- a/client/iface/configurer/usp.go +++ b/client/iface/configurer/usp.go @@ -362,7 +362,7 @@ func toWgUserspaceString(wgCfg wgtypes.Config) string { } func getFwmark() int { - if runtime.GOOS == "linux" && !nbnet.CustomRoutingDisabled() { + if nbnet.AdvancedRouting() { return nbnet.NetbirdFwmark } return 0 diff --git a/client/iface/device.go b/client/iface/device.go index 0d4e69145..2a170adfb 100644 --- a/client/iface/device.go +++ b/client/iface/device.go @@ -3,6 +3,8 @@ package iface import ( + wgdevice "golang.zx2c4.com/wireguard/device" + "github.com/netbirdio/netbird/client/iface/bind" "github.com/netbirdio/netbird/client/iface/device" ) @@ -15,4 +17,5 @@ type WGTunDevice interface { DeviceName() string Close() error FilteredDevice() *device.FilteredDevice + Device() *wgdevice.Device } diff --git a/client/iface/device/device_darwin.go b/client/iface/device/device_darwin.go index b5a128bc1..fe7ed1752 100644 --- a/client/iface/device/device_darwin.go +++ b/client/iface/device/device_darwin.go @@ -117,6 +117,11 @@ func (t *TunDevice) FilteredDevice() *FilteredDevice { return t.filteredDevice } +// Device returns the wireguard device +func (t *TunDevice) Device() *device.Device { + return t.device +} + // assignAddr Adds IP address to the tunnel interface and network route based on the range provided func (t *TunDevice) assignAddr() error { cmd := exec.Command("ifconfig", t.name, "inet", t.address.IP.String(), t.address.IP.String()) diff --git a/client/iface/device/device_kernel_unix.go b/client/iface/device/device_kernel_unix.go index 0dfed4d90..3314b576b 100644 --- a/client/iface/device/device_kernel_unix.go +++ b/client/iface/device/device_kernel_unix.go @@ -9,6 +9,7 @@ import ( "github.com/pion/transport/v3" log "github.com/sirupsen/logrus" + "golang.zx2c4.com/wireguard/device" "github.com/netbirdio/netbird/client/iface/bind" "github.com/netbirdio/netbird/client/iface/configurer" @@ -151,6 +152,11 @@ func (t *TunKernelDevice) DeviceName() string { return t.name } +// Device returns the wireguard device, not applicable for kernel devices +func (t *TunKernelDevice) Device() *device.Device { + return nil +} + func (t *TunKernelDevice) FilteredDevice() *FilteredDevice { return nil } diff --git a/client/iface/device/device_netstack.go b/client/iface/device/device_netstack.go index f5d39e9e0..c7d297187 100644 --- a/client/iface/device/device_netstack.go +++ b/client/iface/device/device_netstack.go @@ -117,3 +117,8 @@ func (t *TunNetstackDevice) DeviceName() string { func (t *TunNetstackDevice) FilteredDevice() *FilteredDevice { return t.filteredDevice } + +// Device returns the wireguard device +func (t *TunNetstackDevice) Device() *device.Device { + return t.device +} diff --git a/client/iface/device/device_usp_unix.go b/client/iface/device/device_usp_unix.go index 3562f312d..4ac87aecb 100644 --- a/client/iface/device/device_usp_unix.go +++ b/client/iface/device/device_usp_unix.go @@ -124,6 +124,11 @@ func (t *USPDevice) FilteredDevice() *FilteredDevice { return t.filteredDevice } +// Device returns the wireguard device +func (t *USPDevice) Device() *device.Device { + return t.device +} + // assignAddr Adds IP address to the tunnel interface func (t *USPDevice) assignAddr() error { link := newWGLink(t.name) diff --git a/client/iface/device/device_windows.go b/client/iface/device/device_windows.go index 86968d06d..e603d7696 100644 --- a/client/iface/device/device_windows.go +++ b/client/iface/device/device_windows.go @@ -150,6 +150,11 @@ func (t *TunDevice) FilteredDevice() *FilteredDevice { return t.filteredDevice } +// Device returns the wireguard device +func (t *TunDevice) Device() *device.Device { + return t.device +} + func (t *TunDevice) GetInterfaceGUIDString() (string, error) { if t.nativeTunDevice == nil { return "", fmt.Errorf("interface has not been initialized yet") diff --git a/client/iface/device_android.go b/client/iface/device_android.go index 3d15080ff..028f6fa7d 100644 --- a/client/iface/device_android.go +++ b/client/iface/device_android.go @@ -1,6 +1,8 @@ package iface import ( + wgdevice "golang.zx2c4.com/wireguard/device" + "github.com/netbirdio/netbird/client/iface/bind" "github.com/netbirdio/netbird/client/iface/device" ) @@ -13,4 +15,5 @@ type WGTunDevice interface { DeviceName() string Close() error FilteredDevice() *device.FilteredDevice + Device() *wgdevice.Device } diff --git a/client/iface/iface.go b/client/iface/iface.go index 1fb9c2691..64219975f 100644 --- a/client/iface/iface.go +++ b/client/iface/iface.go @@ -11,6 +11,8 @@ import ( log "github.com/sirupsen/logrus" "golang.zx2c4.com/wireguard/wgctrl/wgtypes" + wgdevice "golang.zx2c4.com/wireguard/device" + "github.com/netbirdio/netbird/client/errors" "github.com/netbirdio/netbird/client/iface/bind" "github.com/netbirdio/netbird/client/iface/configurer" @@ -203,6 +205,11 @@ func (w *WGIface) GetDevice() *device.FilteredDevice { return w.tun.FilteredDevice() } +// GetWGDevice returns the WireGuard device +func (w *WGIface) GetWGDevice() *wgdevice.Device { + return w.tun.Device() +} + // GetStats returns the last handshake time, rx and tx bytes for the given peer func (w *WGIface) GetStats(peerKey string) (configurer.WGStats, error) { return w.configurer.GetStats(peerKey) diff --git a/client/iface/iface_moc.go b/client/iface/iface_moc.go index d91a7224f..5f57bc821 100644 --- a/client/iface/iface_moc.go +++ b/client/iface/iface_moc.go @@ -4,6 +4,7 @@ import ( "net" "time" + wgdevice "golang.zx2c4.com/wireguard/device" "golang.zx2c4.com/wireguard/wgctrl/wgtypes" "github.com/netbirdio/netbird/client/iface/bind" @@ -29,6 +30,7 @@ type MockWGIface struct { SetFilterFunc func(filter device.PacketFilter) error GetFilterFunc func() device.PacketFilter GetDeviceFunc func() *device.FilteredDevice + GetWGDeviceFunc func() *wgdevice.Device GetStatsFunc func(peerKey string) (configurer.WGStats, error) GetInterfaceGUIDStringFunc func() (string, error) GetProxyFunc func() wgproxy.Proxy @@ -102,11 +104,14 @@ func (m *MockWGIface) GetDevice() *device.FilteredDevice { return m.GetDeviceFunc() } +func (m *MockWGIface) GetWGDevice() *wgdevice.Device { + return m.GetWGDeviceFunc() +} + func (m *MockWGIface) GetStats(peerKey string) (configurer.WGStats, error) { return m.GetStatsFunc(peerKey) } func (m *MockWGIface) GetProxy() wgproxy.Proxy { - //TODO implement me - panic("implement me") + return m.GetProxyFunc() } diff --git a/client/iface/iwginterface.go b/client/iface/iwginterface.go index f5ab29539..472ab45f9 100644 --- a/client/iface/iwginterface.go +++ b/client/iface/iwginterface.go @@ -6,6 +6,7 @@ import ( "net" "time" + wgdevice "golang.zx2c4.com/wireguard/device" "golang.zx2c4.com/wireguard/wgctrl/wgtypes" "github.com/netbirdio/netbird/client/iface/bind" @@ -32,5 +33,6 @@ type IWGIface interface { SetFilter(filter device.PacketFilter) error GetFilter() device.PacketFilter GetDevice() *device.FilteredDevice + GetWGDevice() *wgdevice.Device GetStats(peerKey string) (configurer.WGStats, error) } diff --git a/client/iface/iwginterface_windows.go b/client/iface/iwginterface_windows.go index 96eec52a5..c9183cafd 100644 --- a/client/iface/iwginterface_windows.go +++ b/client/iface/iwginterface_windows.go @@ -4,6 +4,7 @@ import ( "net" "time" + wgdevice "golang.zx2c4.com/wireguard/device" "golang.zx2c4.com/wireguard/wgctrl/wgtypes" "github.com/netbirdio/netbird/client/iface/bind" @@ -30,6 +31,7 @@ type IWGIface interface { SetFilter(filter device.PacketFilter) error GetFilter() device.PacketFilter GetDevice() *device.FilteredDevice + GetWGDevice() *wgdevice.Device GetStats(peerKey string) (configurer.WGStats, error) GetInterfaceGUIDString() (string, error) } diff --git a/client/internal/acl/manager_test.go b/client/internal/acl/manager_test.go index 1edbeb9ae..c9cbe1c5a 100644 --- a/client/internal/acl/manager_test.go +++ b/client/internal/acl/manager_test.go @@ -49,9 +49,10 @@ func TestDefaultManager(t *testing.T) { IP: ip, Network: network, }).AnyTimes() + ifaceMock.EXPECT().GetWGDevice().Return(nil).AnyTimes() // we receive one rule from the management so for testing purposes ignore it - fw, err := firewall.NewFirewall(ifaceMock, nil) + fw, err := firewall.NewFirewall(ifaceMock, nil, false) if err != nil { t.Errorf("create firewall: %v", err) return @@ -342,9 +343,10 @@ func TestDefaultManagerEnableSSHRules(t *testing.T) { IP: ip, Network: network, }).AnyTimes() + ifaceMock.EXPECT().GetWGDevice().Return(nil).AnyTimes() // we receive one rule from the management so for testing purposes ignore it - fw, err := firewall.NewFirewall(ifaceMock, nil) + fw, err := firewall.NewFirewall(ifaceMock, nil, false) if err != nil { t.Errorf("create firewall: %v", err) return diff --git a/client/internal/acl/mocks/iface_mapper.go b/client/internal/acl/mocks/iface_mapper.go index 3ed12b6dd..08aa4fd5a 100644 --- a/client/internal/acl/mocks/iface_mapper.go +++ b/client/internal/acl/mocks/iface_mapper.go @@ -8,6 +8,8 @@ import ( reflect "reflect" gomock "github.com/golang/mock/gomock" + wgdevice "golang.zx2c4.com/wireguard/device" + iface "github.com/netbirdio/netbird/client/iface" "github.com/netbirdio/netbird/client/iface/device" ) @@ -90,3 +92,31 @@ func (mr *MockIFaceMapperMockRecorder) SetFilter(arg0 interface{}) *gomock.Call mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetFilter", reflect.TypeOf((*MockIFaceMapper)(nil).SetFilter), arg0) } + +// GetDevice mocks base method. +func (m *MockIFaceMapper) GetDevice() *device.FilteredDevice { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetDevice") + ret0, _ := ret[0].(*device.FilteredDevice) + return ret0 +} + +// GetDevice indicates an expected call of GetDevice. +func (mr *MockIFaceMapperMockRecorder) GetDevice() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetDevice", reflect.TypeOf((*MockIFaceMapper)(nil).GetDevice)) +} + +// GetWGDevice mocks base method. +func (m *MockIFaceMapper) GetWGDevice() *wgdevice.Device { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetWGDevice") + ret0, _ := ret[0].(*wgdevice.Device) + return ret0 +} + +// GetWGDevice indicates an expected call of GetWGDevice. +func (mr *MockIFaceMapperMockRecorder) GetWGDevice() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetWGDevice", reflect.TypeOf((*MockIFaceMapper)(nil).GetWGDevice)) +} diff --git a/client/internal/config.go b/client/internal/config.go index 3196c4e04..5703539cc 100644 --- a/client/internal/config.go +++ b/client/internal/config.go @@ -68,6 +68,8 @@ type ConfigInput struct { DisableFirewall *bool BlockLANAccess *bool + + DisableNotifications *bool } // Config Configuration type @@ -93,6 +95,8 @@ type Config struct { BlockLANAccess bool + DisableNotifications bool + // SSHKey is a private SSH key in a PEM format SSHKey string @@ -469,6 +473,16 @@ func (config *Config) apply(input ConfigInput) (updated bool, err error) { updated = true } + if input.DisableNotifications != nil && *input.DisableNotifications != config.DisableNotifications { + if *input.DisableNotifications { + log.Infof("disabling notifications") + } else { + log.Infof("enabling notifications") + } + config.DisableNotifications = *input.DisableNotifications + updated = true + } + if input.ClientCertKeyPath != "" { config.ClientCertKeyPath = input.ClientCertKeyPath updated = true diff --git a/client/internal/connect.go b/client/internal/connect.go index ddd10e5cd..a0d585ffe 100644 --- a/client/internal/connect.go +++ b/client/internal/connect.go @@ -31,6 +31,7 @@ import ( relayClient "github.com/netbirdio/netbird/relay/client" signal "github.com/netbirdio/netbird/signal/client" "github.com/netbirdio/netbird/util" + nbnet "github.com/netbirdio/netbird/util/net" "github.com/netbirdio/netbird/version" ) @@ -109,6 +110,8 @@ func (c *ConnectClient) run(mobileDependency MobileDependency, runningChan chan log.Infof("starting NetBird client version %s on %s/%s", version.NetbirdVersion(), runtime.GOOS, runtime.GOARCH) + nbnet.Init() + backOff := &backoff.ExponentialBackOff{ InitialInterval: time.Second, RandomizationFactor: 1, diff --git a/client/internal/dns/handler_chain.go b/client/internal/dns/handler_chain.go index 673f410e2..3286daabf 100644 --- a/client/internal/dns/handler_chain.go +++ b/client/internal/dns/handler_chain.go @@ -12,7 +12,7 @@ import ( const ( PriorityDNSRoute = 100 PriorityMatchDomain = 50 - PriorityDefault = 0 + PriorityDefault = 1 ) type SubdomainMatcher interface { @@ -26,7 +26,6 @@ type HandlerEntry struct { Pattern string OrigPattern string IsWildcard bool - StopHandler handlerWithStop MatchSubdomains bool } @@ -64,7 +63,7 @@ func (w *ResponseWriterChain) GetOrigPattern() string { } // AddHandler adds a new handler to the chain, replacing any existing handler with the same pattern and priority -func (c *HandlerChain) AddHandler(pattern string, handler dns.Handler, priority int, stopHandler handlerWithStop) { +func (c *HandlerChain) AddHandler(pattern string, handler dns.Handler, priority int) { c.mu.Lock() defer c.mu.Unlock() @@ -78,9 +77,6 @@ func (c *HandlerChain) AddHandler(pattern string, handler dns.Handler, priority // First remove any existing handler with same pattern (case-insensitive) and priority for i := len(c.handlers) - 1; i >= 0; i-- { if strings.EqualFold(c.handlers[i].OrigPattern, origPattern) && c.handlers[i].Priority == priority { - if c.handlers[i].StopHandler != nil { - c.handlers[i].StopHandler.stop() - } c.handlers = append(c.handlers[:i], c.handlers[i+1:]...) break } @@ -101,7 +97,6 @@ func (c *HandlerChain) AddHandler(pattern string, handler dns.Handler, priority Pattern: pattern, OrigPattern: origPattern, IsWildcard: isWildcard, - StopHandler: stopHandler, MatchSubdomains: matchSubdomains, } @@ -142,9 +137,6 @@ func (c *HandlerChain) RemoveHandler(pattern string, priority int) { for i := len(c.handlers) - 1; i >= 0; i-- { entry := c.handlers[i] if strings.EqualFold(entry.OrigPattern, pattern) && entry.Priority == priority { - if entry.StopHandler != nil { - entry.StopHandler.stop() - } c.handlers = append(c.handlers[:i], c.handlers[i+1:]...) return } @@ -180,8 +172,8 @@ func (c *HandlerChain) ServeDNS(w dns.ResponseWriter, r *dns.Msg) { if log.IsLevelEnabled(log.TraceLevel) { log.Tracef("current handlers (%d):", len(handlers)) for _, h := range handlers { - log.Tracef(" - pattern: domain=%s original: domain=%s wildcard=%v priority=%d", - h.Pattern, h.OrigPattern, h.IsWildcard, h.Priority) + log.Tracef(" - pattern: domain=%s original: domain=%s wildcard=%v match_subdomain=%v priority=%d", + h.Pattern, h.OrigPattern, h.IsWildcard, h.MatchSubdomains, h.Priority) } } @@ -206,13 +198,13 @@ func (c *HandlerChain) ServeDNS(w dns.ResponseWriter, r *dns.Msg) { } if !matched { - log.Tracef("trying domain match: request: domain=%s pattern: domain=%s wildcard=%v match_subdomain=%v matched=false", - qname, entry.OrigPattern, entry.MatchSubdomains, entry.IsWildcard) + log.Tracef("trying domain match: request: domain=%s pattern: domain=%s wildcard=%v match_subdomain=%v priority=%d matched=false", + qname, entry.OrigPattern, entry.MatchSubdomains, entry.IsWildcard, entry.Priority) continue } - log.Tracef("handler matched: request: domain=%s pattern: domain=%s wildcard=%v match_subdomain=%v", - qname, entry.OrigPattern, entry.IsWildcard, entry.MatchSubdomains) + log.Tracef("handler matched: request: domain=%s pattern: domain=%s wildcard=%v match_subdomain=%v priority=%d", + qname, entry.OrigPattern, entry.IsWildcard, entry.MatchSubdomains, entry.Priority) chainWriter := &ResponseWriterChain{ ResponseWriter: w, diff --git a/client/internal/dns/handler_chain_test.go b/client/internal/dns/handler_chain_test.go index d04bfbbb3..94aa987af 100644 --- a/client/internal/dns/handler_chain_test.go +++ b/client/internal/dns/handler_chain_test.go @@ -21,9 +21,9 @@ func TestHandlerChain_ServeDNS_Priorities(t *testing.T) { dnsRouteHandler := &nbdns.MockHandler{} // Setup handlers with different priorities - chain.AddHandler("example.com.", defaultHandler, nbdns.PriorityDefault, nil) - chain.AddHandler("example.com.", matchDomainHandler, nbdns.PriorityMatchDomain, nil) - chain.AddHandler("example.com.", dnsRouteHandler, nbdns.PriorityDNSRoute, nil) + chain.AddHandler("example.com.", defaultHandler, nbdns.PriorityDefault) + chain.AddHandler("example.com.", matchDomainHandler, nbdns.PriorityMatchDomain) + chain.AddHandler("example.com.", dnsRouteHandler, nbdns.PriorityDNSRoute) // Create test request r := new(dns.Msg) @@ -138,7 +138,7 @@ func TestHandlerChain_ServeDNS_DomainMatching(t *testing.T) { pattern = "*." + tt.handlerDomain[2:] } - chain.AddHandler(pattern, handler, nbdns.PriorityDefault, nil) + chain.AddHandler(pattern, handler, nbdns.PriorityDefault) r := new(dns.Msg) r.SetQuestion(tt.queryDomain, dns.TypeA) @@ -253,7 +253,7 @@ func TestHandlerChain_ServeDNS_OverlappingDomains(t *testing.T) { handler.On("ServeDNS", mock.Anything, mock.Anything).Maybe() } - chain.AddHandler(tt.handlers[i].pattern, handler, tt.handlers[i].priority, nil) + chain.AddHandler(tt.handlers[i].pattern, handler, tt.handlers[i].priority) } // Create and execute request @@ -280,9 +280,9 @@ func TestHandlerChain_ServeDNS_ChainContinuation(t *testing.T) { handler3 := &nbdns.MockHandler{} // Add handlers in priority order - chain.AddHandler("example.com.", handler1, nbdns.PriorityDNSRoute, nil) - chain.AddHandler("example.com.", handler2, nbdns.PriorityMatchDomain, nil) - chain.AddHandler("example.com.", handler3, nbdns.PriorityDefault, nil) + chain.AddHandler("example.com.", handler1, nbdns.PriorityDNSRoute) + chain.AddHandler("example.com.", handler2, nbdns.PriorityMatchDomain) + chain.AddHandler("example.com.", handler3, nbdns.PriorityDefault) // Create test request r := new(dns.Msg) @@ -416,7 +416,7 @@ func TestHandlerChain_PriorityDeregistration(t *testing.T) { if op.action == "add" { handler := &nbdns.MockHandler{} handlers[op.priority] = handler - chain.AddHandler(op.pattern, handler, op.priority, nil) + chain.AddHandler(op.pattern, handler, op.priority) } else { chain.RemoveHandler(op.pattern, op.priority) } @@ -471,9 +471,9 @@ func TestHandlerChain_MultiPriorityHandling(t *testing.T) { r.SetQuestion(testQuery, dns.TypeA) // Add handlers in mixed order - chain.AddHandler(testDomain, defaultHandler, nbdns.PriorityDefault, nil) - chain.AddHandler(testDomain, routeHandler, nbdns.PriorityDNSRoute, nil) - chain.AddHandler(testDomain, matchHandler, nbdns.PriorityMatchDomain, nil) + chain.AddHandler(testDomain, defaultHandler, nbdns.PriorityDefault) + chain.AddHandler(testDomain, routeHandler, nbdns.PriorityDNSRoute) + chain.AddHandler(testDomain, matchHandler, nbdns.PriorityMatchDomain) // Test 1: Initial state with all three handlers w := &nbdns.ResponseWriterChain{ResponseWriter: &mockResponseWriter{}} @@ -653,7 +653,7 @@ func TestHandlerChain_CaseSensitivity(t *testing.T) { handler = mockHandler } - chain.AddHandler(pattern, handler, h.priority, nil) + chain.AddHandler(pattern, handler, h.priority) } // Execute request @@ -795,7 +795,7 @@ func TestHandlerChain_DomainSpecificityOrdering(t *testing.T) { if op.action == "add" { handler := &nbdns.MockSubdomainHandler{Subdomains: op.subdomain} handlers[op.pattern] = handler - chain.AddHandler(op.pattern, handler, op.priority, nil) + chain.AddHandler(op.pattern, handler, op.priority) } else { chain.RemoveHandler(op.pattern, op.priority) } diff --git a/client/internal/dns/host_windows.go b/client/internal/dns/host_windows.go index 7ecca8a41..58b0a14de 100644 --- a/client/internal/dns/host_windows.go +++ b/client/internal/dns/host_windows.go @@ -1,35 +1,51 @@ package dns import ( + "errors" "fmt" "io" "strings" + "syscall" + "github.com/hashicorp/go-multierror" log "github.com/sirupsen/logrus" "golang.org/x/sys/windows/registry" + nberrors "github.com/netbirdio/netbird/client/errors" "github.com/netbirdio/netbird/client/internal/statemanager" ) +var ( + userenv = syscall.NewLazyDLL("userenv.dll") + + // https://learn.microsoft.com/en-us/windows/win32/api/userenv/nf-userenv-refreshpolicyex + refreshPolicyExFn = userenv.NewProc("RefreshPolicyEx") +) + const ( - dnsPolicyConfigMatchPath = `SYSTEM\CurrentControlSet\Services\Dnscache\Parameters\DnsPolicyConfig\NetBird-Match` + dnsPolicyConfigMatchPath = `SYSTEM\CurrentControlSet\Services\Dnscache\Parameters\DnsPolicyConfig\NetBird-Match` + gpoDnsPolicyRoot = `SOFTWARE\Policies\Microsoft\Windows NT\DNSClient` + gpoDnsPolicyConfigMatchPath = gpoDnsPolicyRoot + `\DnsPolicyConfig\NetBird-Match` + dnsPolicyConfigVersionKey = "Version" dnsPolicyConfigVersionValue = 2 dnsPolicyConfigNameKey = "Name" dnsPolicyConfigGenericDNSServersKey = "GenericDNSServers" dnsPolicyConfigConfigOptionsKey = "ConfigOptions" dnsPolicyConfigConfigOptionsValue = 0x8 -) -const ( interfaceConfigPath = `SYSTEM\CurrentControlSet\Services\Tcpip\Parameters\Interfaces` interfaceConfigNameServerKey = "NameServer" interfaceConfigSearchListKey = "SearchList" + + // RP_FORCE: Reapply all policies even if no policy change was detected + rpForce = 0x1 ) type registryConfigurator struct { guid string routingAll bool + gpo bool } func newHostManager(wgInterface WGIface) (*registryConfigurator, error) { @@ -37,12 +53,20 @@ func newHostManager(wgInterface WGIface) (*registryConfigurator, error) { if err != nil { return nil, err } - return newHostManagerWithGuid(guid) -} -func newHostManagerWithGuid(guid string) (*registryConfigurator, error) { + var useGPO bool + k, err := registry.OpenKey(registry.LOCAL_MACHINE, gpoDnsPolicyRoot, registry.QUERY_VALUE) + if err != nil { + log.Debugf("failed to open GPO DNS policy root: %v", err) + } else { + closer(k) + useGPO = true + log.Infof("detected GPO DNS policy configuration, using policy store") + } + return ®istryConfigurator{ guid: guid, + gpo: useGPO, }, nil } @@ -51,30 +75,23 @@ func (r *registryConfigurator) supportCustomPort() bool { } func (r *registryConfigurator) applyDNSConfig(config HostDNSConfig, stateManager *statemanager.Manager) error { - var err error if config.RouteAll { - err = r.addDNSSetupForAll(config.ServerIP) - if err != nil { + if err := r.addDNSSetupForAll(config.ServerIP); err != nil { return fmt.Errorf("add dns setup: %w", err) } } else if r.routingAll { - err = r.deleteInterfaceRegistryKeyProperty(interfaceConfigNameServerKey) - if err != nil { + if err := r.deleteInterfaceRegistryKeyProperty(interfaceConfigNameServerKey); err != nil { return fmt.Errorf("delete interface registry key property: %w", err) } r.routingAll = false log.Infof("removed %s as main DNS forwarder for this peer", config.ServerIP) } - if err := stateManager.UpdateState(&ShutdownState{Guid: r.guid}); err != nil { + if err := stateManager.UpdateState(&ShutdownState{Guid: r.guid, GPO: r.gpo}); err != nil { log.Errorf("failed to update shutdown state: %s", err) } - var ( - searchDomains []string - matchDomains []string - ) - + var searchDomains, matchDomains []string for _, dConf := range config.Domains { if dConf.Disabled { continue @@ -86,16 +103,16 @@ func (r *registryConfigurator) applyDNSConfig(config HostDNSConfig, stateManager } if len(matchDomains) != 0 { - err = r.addDNSMatchPolicy(matchDomains, config.ServerIP) + if err := r.addDNSMatchPolicy(matchDomains, config.ServerIP); err != nil { + return fmt.Errorf("add dns match policy: %w", err) + } } else { - err = removeRegistryKeyFromDNSPolicyConfig(dnsPolicyConfigMatchPath) - } - if err != nil { - return fmt.Errorf("add dns match policy: %w", err) + if err := r.removeDNSMatchPolicies(); err != nil { + return fmt.Errorf("remove dns match policies: %w", err) + } } - err = r.updateSearchDomains(searchDomains) - if err != nil { + if err := r.updateSearchDomains(searchDomains); err != nil { return fmt.Errorf("update search domains: %w", err) } @@ -103,9 +120,8 @@ func (r *registryConfigurator) applyDNSConfig(config HostDNSConfig, stateManager } func (r *registryConfigurator) addDNSSetupForAll(ip string) error { - err := r.setInterfaceRegistryKeyStringValue(interfaceConfigNameServerKey, ip) - if err != nil { - return fmt.Errorf("adding dns setup for all failed with error: %w", err) + if err := r.setInterfaceRegistryKeyStringValue(interfaceConfigNameServerKey, ip); err != nil { + return fmt.Errorf("adding dns setup for all failed: %w", err) } r.routingAll = true log.Infof("configured %s:53 as main DNS forwarder for this peer", ip) @@ -113,64 +129,66 @@ func (r *registryConfigurator) addDNSSetupForAll(ip string) error { } func (r *registryConfigurator) addDNSMatchPolicy(domains []string, ip string) error { - _, err := registry.OpenKey(registry.LOCAL_MACHINE, dnsPolicyConfigMatchPath, registry.QUERY_VALUE) - if err == nil { - err = registry.DeleteKey(registry.LOCAL_MACHINE, dnsPolicyConfigMatchPath) - if err != nil { - return fmt.Errorf("unable to remove existing key from registry, key: HKEY_LOCAL_MACHINE\\%s, error: %w", dnsPolicyConfigMatchPath, err) + // if the gpo key is present, we need to put our DNS settings there, otherwise our config might be ignored + // see https://learn.microsoft.com/en-us/openspecs/windows_protocols/ms-gpnrpt/8cc31cb9-20cb-4140-9e85-3e08703b4745 + if r.gpo { + if err := r.configureDNSPolicy(gpoDnsPolicyConfigMatchPath, domains, ip); err != nil { + return fmt.Errorf("configure GPO DNS policy: %w", err) + } + + if err := r.configureDNSPolicy(dnsPolicyConfigMatchPath, domains, ip); err != nil { + return fmt.Errorf("configure local DNS policy: %w", err) + } + + if err := refreshGroupPolicy(); err != nil { + log.Warnf("failed to refresh group policy: %v", err) + } + } else { + if err := r.configureDNSPolicy(dnsPolicyConfigMatchPath, domains, ip); err != nil { + return fmt.Errorf("configure local DNS policy: %w", err) } } - regKey, _, err := registry.CreateKey(registry.LOCAL_MACHINE, dnsPolicyConfigMatchPath, registry.SET_VALUE) - if err != nil { - return fmt.Errorf("unable to create registry key, key: HKEY_LOCAL_MACHINE\\%s, error: %w", dnsPolicyConfigMatchPath, err) - } - - err = regKey.SetDWordValue(dnsPolicyConfigVersionKey, dnsPolicyConfigVersionValue) - if err != nil { - return fmt.Errorf("unable to set registry value for %s, error: %w", dnsPolicyConfigVersionKey, err) - } - - err = regKey.SetStringsValue(dnsPolicyConfigNameKey, domains) - if err != nil { - return fmt.Errorf("unable to set registry value for %s, error: %w", dnsPolicyConfigNameKey, err) - } - - err = regKey.SetStringValue(dnsPolicyConfigGenericDNSServersKey, ip) - if err != nil { - return fmt.Errorf("unable to set registry value for %s, error: %w", dnsPolicyConfigGenericDNSServersKey, err) - } - - err = regKey.SetDWordValue(dnsPolicyConfigConfigOptionsKey, dnsPolicyConfigConfigOptionsValue) - if err != nil { - return fmt.Errorf("unable to set registry value for %s, error: %w", dnsPolicyConfigConfigOptionsKey, err) - } - - log.Infof("added %d match domains to the state. Domain list: %s", len(domains), domains) - + log.Infof("added %d match domains. Domain list: %s", len(domains), domains) return nil } -func (r *registryConfigurator) restoreHostDNS() error { - if err := removeRegistryKeyFromDNSPolicyConfig(dnsPolicyConfigMatchPath); err != nil { - log.Errorf("remove registry key from dns policy config: %s", err) +// configureDNSPolicy handles the actual configuration of a DNS policy at the specified path +func (r *registryConfigurator) configureDNSPolicy(policyPath string, domains []string, ip string) error { + if err := removeRegistryKeyFromDNSPolicyConfig(policyPath); err != nil { + return fmt.Errorf("remove existing dns policy: %w", err) } - if err := r.deleteInterfaceRegistryKeyProperty(interfaceConfigSearchListKey); err != nil { - return fmt.Errorf("remove interface registry key: %w", err) + regKey, _, err := registry.CreateKey(registry.LOCAL_MACHINE, policyPath, registry.SET_VALUE) + if err != nil { + return fmt.Errorf("create registry key HKEY_LOCAL_MACHINE\\%s: %w", policyPath, err) + } + defer closer(regKey) + + if err := regKey.SetDWordValue(dnsPolicyConfigVersionKey, dnsPolicyConfigVersionValue); err != nil { + return fmt.Errorf("set %s: %w", dnsPolicyConfigVersionKey, err) + } + + if err := regKey.SetStringsValue(dnsPolicyConfigNameKey, domains); err != nil { + return fmt.Errorf("set %s: %w", dnsPolicyConfigNameKey, err) + } + + if err := regKey.SetStringValue(dnsPolicyConfigGenericDNSServersKey, ip); err != nil { + return fmt.Errorf("set %s: %w", dnsPolicyConfigGenericDNSServersKey, err) + } + + if err := regKey.SetDWordValue(dnsPolicyConfigConfigOptionsKey, dnsPolicyConfigConfigOptionsValue); err != nil { + return fmt.Errorf("set %s: %w", dnsPolicyConfigConfigOptionsKey, err) } return nil } func (r *registryConfigurator) updateSearchDomains(domains []string) error { - err := r.setInterfaceRegistryKeyStringValue(interfaceConfigSearchListKey, strings.Join(domains, ",")) - if err != nil { - return fmt.Errorf("adding search domain failed with error: %w", err) + if err := r.setInterfaceRegistryKeyStringValue(interfaceConfigSearchListKey, strings.Join(domains, ",")); err != nil { + return fmt.Errorf("update search domains: %w", err) } - - log.Infof("updated the search domains in the registry with %d domains. Domain list: %s", len(domains), domains) - + log.Infof("updated search domains: %s", domains) return nil } @@ -181,11 +199,9 @@ func (r *registryConfigurator) setInterfaceRegistryKeyStringValue(key, value str } defer closer(regKey) - err = regKey.SetStringValue(key, value) - if err != nil { - return fmt.Errorf("applying key %s with value \"%s\" for interface failed with error: %w", key, value, err) + if err := regKey.SetStringValue(key, value); err != nil { + return fmt.Errorf("set key %s=%s: %w", key, value, err) } - return nil } @@ -196,43 +212,91 @@ func (r *registryConfigurator) deleteInterfaceRegistryKeyProperty(propertyKey st } defer closer(regKey) - err = regKey.DeleteValue(propertyKey) - if err != nil { - return fmt.Errorf("deleting registry key %s for interface failed with error: %w", propertyKey, err) + if err := regKey.DeleteValue(propertyKey); err != nil { + return fmt.Errorf("delete registry key %s: %w", propertyKey, err) } - return nil } func (r *registryConfigurator) getInterfaceRegistryKey() (registry.Key, error) { - var regKey registry.Key - regKeyPath := interfaceConfigPath + "\\" + r.guid - regKey, err := registry.OpenKey(registry.LOCAL_MACHINE, regKeyPath, registry.SET_VALUE) if err != nil { - return regKey, fmt.Errorf("unable to open the interface registry key, key: HKEY_LOCAL_MACHINE\\%s, error: %w", regKeyPath, err) + return regKey, fmt.Errorf("open HKEY_LOCAL_MACHINE\\%s: %w", regKeyPath, err) } - return regKey, nil } -func (r *registryConfigurator) restoreUncleanShutdownDNS() error { - if err := r.restoreHostDNS(); err != nil { - return fmt.Errorf("restoring dns via registry: %w", err) +func (r *registryConfigurator) restoreHostDNS() error { + if err := r.removeDNSMatchPolicies(); err != nil { + log.Errorf("remove dns match policies: %s", err) } + + if err := r.deleteInterfaceRegistryKeyProperty(interfaceConfigSearchListKey); err != nil { + return fmt.Errorf("remove interface registry key: %w", err) + } + return nil } +func (r *registryConfigurator) removeDNSMatchPolicies() error { + var merr *multierror.Error + if err := removeRegistryKeyFromDNSPolicyConfig(dnsPolicyConfigMatchPath); err != nil { + merr = multierror.Append(merr, fmt.Errorf("remove local registry key: %w", err)) + } + + if err := removeRegistryKeyFromDNSPolicyConfig(gpoDnsPolicyConfigMatchPath); err != nil { + merr = multierror.Append(merr, fmt.Errorf("remove GPO registry key: %w", err)) + } + + if err := refreshGroupPolicy(); err != nil { + merr = multierror.Append(merr, fmt.Errorf("refresh group policy: %w", err)) + } + + return nberrors.FormatErrorOrNil(merr) +} + +func (r *registryConfigurator) restoreUncleanShutdownDNS() error { + return r.restoreHostDNS() +} + func removeRegistryKeyFromDNSPolicyConfig(regKeyPath string) error { k, err := registry.OpenKey(registry.LOCAL_MACHINE, regKeyPath, registry.QUERY_VALUE) - if err == nil { - defer closer(k) - err = registry.DeleteKey(registry.LOCAL_MACHINE, regKeyPath) - if err != nil { - return fmt.Errorf("unable to remove existing key from registry, key: HKEY_LOCAL_MACHINE\\%s, error: %w", regKeyPath, err) - } + if err != nil { + log.Debugf("failed to open HKEY_LOCAL_MACHINE\\%s: %v", regKeyPath, err) + return nil } + + closer(k) + if err := registry.DeleteKey(registry.LOCAL_MACHINE, regKeyPath); err != nil { + return fmt.Errorf("delete HKEY_LOCAL_MACHINE\\%s: %w", regKeyPath, err) + } + + return nil +} + +func refreshGroupPolicy() error { + // refreshPolicyExFn.Call() panics if the func is not found + defer func() { + if r := recover(); r != nil { + log.Errorf("Recovered from panic: %v", r) + } + }() + + ret, _, err := refreshPolicyExFn.Call( + // bMachine = TRUE (computer policy) + uintptr(1), + // dwOptions = RP_FORCE + uintptr(rpForce), + ) + + if ret == 0 { + if err != nil && !errors.Is(err, syscall.Errno(0)) { + return fmt.Errorf("RefreshPolicyEx failed: %w", err) + } + return fmt.Errorf("RefreshPolicyEx failed") + } + return nil } diff --git a/client/internal/dns/local.go b/client/internal/dns/local.go index 9a78d4d50..80113885a 100644 --- a/client/internal/dns/local.go +++ b/client/internal/dns/local.go @@ -2,6 +2,7 @@ package dns import ( "fmt" + "strings" "sync" "github.com/miekg/dns" @@ -29,10 +30,15 @@ func (d *localResolver) String() string { return fmt.Sprintf("local resolver [%d records]", len(d.registeredMap)) } +// ID returns the unique handler ID +func (d *localResolver) id() handlerID { + return "local-resolver" +} + // ServeDNS handles a DNS request func (d *localResolver) ServeDNS(w dns.ResponseWriter, r *dns.Msg) { if len(r.Question) > 0 { - log.Tracef("received question: domain=%s type=%v class=%v", r.Question[0].Name, r.Question[0].Qtype, r.Question[0].Qclass) + log.Tracef("received local question: domain=%s type=%v class=%v", r.Question[0].Name, r.Question[0].Qtype, r.Question[0].Qclass) } replyMessage := &dns.Msg{} @@ -55,6 +61,7 @@ func (d *localResolver) ServeDNS(w dns.ResponseWriter, r *dns.Msg) { func (d *localResolver) lookupRecord(r *dns.Msg) dns.RR { question := r.Question[0] + question.Name = strings.ToLower(question.Name) record, found := d.records.Load(buildRecordKey(question.Name, question.Qclass, question.Qtype)) if !found { return nil diff --git a/client/internal/dns/server.go b/client/internal/dns/server.go index 1fe913fd9..fb94e07ac 100644 --- a/client/internal/dns/server.go +++ b/client/internal/dns/server.go @@ -5,7 +5,6 @@ import ( "fmt" "net/netip" "runtime" - "strings" "sync" "github.com/miekg/dns" @@ -42,7 +41,12 @@ type Server interface { ProbeAvailability() } -type registeredHandlerMap map[string]handlerWithStop +type handlerID string + +type nsGroupsByDomain struct { + domain string + groups []*nbdns.NameServerGroup +} // DefaultServer dns server object type DefaultServer struct { @@ -52,7 +56,6 @@ type DefaultServer struct { mux sync.Mutex service service dnsMuxMap registeredHandlerMap - handlerPriorities map[string]int localResolver *localResolver wgInterface WGIface hostManager hostManager @@ -77,14 +80,17 @@ type handlerWithStop interface { dns.Handler stop() probeAvailability() + id() handlerID } -type muxUpdate struct { +type handlerWrapper struct { domain string handler handlerWithStop priority int } +type registeredHandlerMap map[handlerID]handlerWrapper + // NewDefaultServer returns a new dns server func NewDefaultServer( ctx context.Context, @@ -158,13 +164,12 @@ func newDefaultServer( ) *DefaultServer { ctx, stop := context.WithCancel(ctx) defaultServer := &DefaultServer{ - ctx: ctx, - ctxCancel: stop, - disableSys: disableSys, - service: dnsService, - handlerChain: NewHandlerChain(), - dnsMuxMap: make(registeredHandlerMap), - handlerPriorities: make(map[string]int), + ctx: ctx, + ctxCancel: stop, + disableSys: disableSys, + service: dnsService, + handlerChain: NewHandlerChain(), + dnsMuxMap: make(registeredHandlerMap), localResolver: &localResolver{ registeredMap: make(registrationMap), }, @@ -192,8 +197,7 @@ func (s *DefaultServer) registerHandler(domains []string, handler dns.Handler, p log.Warn("skipping empty domain") continue } - s.handlerChain.AddHandler(domain, handler, priority, nil) - s.handlerPriorities[domain] = priority + s.handlerChain.AddHandler(domain, handler, priority) s.service.RegisterMux(nbdns.NormalizeZone(domain), s.handlerChain) } } @@ -209,14 +213,15 @@ func (s *DefaultServer) deregisterHandler(domains []string, priority int) { log.Debugf("deregistering handler %v with priority %d", domains, priority) for _, domain := range domains { + if domain == "" { + log.Warn("skipping empty domain") + continue + } + s.handlerChain.RemoveHandler(domain, priority) // Only deregister from service if no handlers remain if !s.handlerChain.HasHandlers(domain) { - if domain == "" { - log.Warn("skipping empty domain") - continue - } s.service.DeregisterMux(nbdns.NormalizeZone(domain)) } } @@ -283,14 +288,24 @@ func (s *DefaultServer) Stop() { // OnUpdatedHostDNSServer update the DNS servers addresses for root zones // It will be applied if the mgm server do not enforce DNS settings for root zone + func (s *DefaultServer) OnUpdatedHostDNSServer(hostsDnsList []string) { s.hostsDNSHolder.set(hostsDnsList) - _, ok := s.dnsMuxMap[nbdns.RootZone] - if ok { + // Check if there's any root handler + var hasRootHandler bool + for _, handler := range s.dnsMuxMap { + if handler.domain == nbdns.RootZone { + hasRootHandler = true + break + } + } + + if hasRootHandler { log.Debugf("on new host DNS config but skip to apply it") return } + log.Debugf("update host DNS settings: %+v", hostsDnsList) s.addHostRootZone() } @@ -364,7 +379,7 @@ func (s *DefaultServer) ProbeAvailability() { go func(mux handlerWithStop) { defer wg.Done() mux.probeAvailability() - }(mux) + }(mux.handler) } wg.Wait() } @@ -419,8 +434,8 @@ func (s *DefaultServer) applyConfiguration(update nbdns.Config) error { return nil } -func (s *DefaultServer) buildLocalHandlerUpdate(customZones []nbdns.CustomZone) ([]muxUpdate, map[string]nbdns.SimpleRecord, error) { - var muxUpdates []muxUpdate +func (s *DefaultServer) buildLocalHandlerUpdate(customZones []nbdns.CustomZone) ([]handlerWrapper, map[string]nbdns.SimpleRecord, error) { + var muxUpdates []handlerWrapper localRecords := make(map[string]nbdns.SimpleRecord, 0) for _, customZone := range customZones { @@ -428,7 +443,7 @@ func (s *DefaultServer) buildLocalHandlerUpdate(customZones []nbdns.CustomZone) return nil, nil, fmt.Errorf("received an empty list of records") } - muxUpdates = append(muxUpdates, muxUpdate{ + muxUpdates = append(muxUpdates, handlerWrapper{ domain: customZone.Domain, handler: s.localResolver, priority: PriorityMatchDomain, @@ -439,6 +454,7 @@ func (s *DefaultServer) buildLocalHandlerUpdate(customZones []nbdns.CustomZone) if record.Class != nbdns.DefaultClass { return nil, nil, fmt.Errorf("received an invalid class type: %s", record.Class) } + key := buildRecordKey(record.Name, class, uint16(record.Type)) localRecords[key] = record } @@ -446,15 +462,59 @@ func (s *DefaultServer) buildLocalHandlerUpdate(customZones []nbdns.CustomZone) return muxUpdates, localRecords, nil } -func (s *DefaultServer) buildUpstreamHandlerUpdate(nameServerGroups []*nbdns.NameServerGroup) ([]muxUpdate, error) { +func (s *DefaultServer) buildUpstreamHandlerUpdate(nameServerGroups []*nbdns.NameServerGroup) ([]handlerWrapper, error) { + var muxUpdates []handlerWrapper - var muxUpdates []muxUpdate for _, nsGroup := range nameServerGroups { if len(nsGroup.NameServers) == 0 { log.Warn("received a nameserver group with empty nameserver list") continue } + if !nsGroup.Primary && len(nsGroup.Domains) == 0 { + return nil, fmt.Errorf("received a non primary nameserver group with an empty domain list") + } + + for _, domain := range nsGroup.Domains { + if domain == "" { + return nil, fmt.Errorf("received a nameserver group with an empty domain element") + } + } + } + + groupedNS := groupNSGroupsByDomain(nameServerGroups) + + for _, domainGroup := range groupedNS { + basePriority := PriorityMatchDomain + if domainGroup.domain == nbdns.RootZone { + basePriority = PriorityDefault + } + + updates, err := s.createHandlersForDomainGroup(domainGroup, basePriority) + if err != nil { + return nil, err + } + muxUpdates = append(muxUpdates, updates...) + } + + return muxUpdates, nil +} + +func (s *DefaultServer) createHandlersForDomainGroup(domainGroup nsGroupsByDomain, basePriority int) ([]handlerWrapper, error) { + var muxUpdates []handlerWrapper + + for i, nsGroup := range domainGroup.groups { + // Decrement priority by handler index (0, 1, 2, ...) to avoid conflicts + priority := basePriority - i + + // Check if we're about to overlap with the next priority tier + if basePriority == PriorityMatchDomain && priority <= PriorityDefault { + log.Warnf("too many handlers for domain=%s, would overlap with default priority tier (diff=%d). Skipping remaining handlers", + domainGroup.domain, PriorityMatchDomain-PriorityDefault) + break + } + + log.Debugf("creating handler for domain=%s with priority=%d", domainGroup.domain, priority) handler, err := newUpstreamResolver( s.ctx, s.wgInterface.Name(), @@ -462,10 +522,12 @@ func (s *DefaultServer) buildUpstreamHandlerUpdate(nameServerGroups []*nbdns.Nam s.wgInterface.Address().Network, s.statusRecorder, s.hostsDNSHolder, + domainGroup.domain, ) if err != nil { - return nil, fmt.Errorf("unable to create a new upstream resolver, error: %v", err) + return nil, fmt.Errorf("create upstream resolver: %v", err) } + for _, ns := range nsGroup.NameServers { if ns.NSType != nbdns.UDPNameServerType { log.Warnf("skipping nameserver %s with type %s, this peer supports only %s", @@ -489,78 +551,47 @@ func (s *DefaultServer) buildUpstreamHandlerUpdate(nameServerGroups []*nbdns.Nam // after some period defined by upstream it tries to reactivate self by calling this hook // everything we need here is just to re-apply current configuration because it already // contains this upstream settings (temporal deactivation not removed it) - handler.deactivate, handler.reactivate = s.upstreamCallbacks(nsGroup, handler) + handler.deactivate, handler.reactivate = s.upstreamCallbacks(nsGroup, handler, priority) - if nsGroup.Primary { - muxUpdates = append(muxUpdates, muxUpdate{ - domain: nbdns.RootZone, - handler: handler, - priority: PriorityDefault, - }) - continue - } - - if len(nsGroup.Domains) == 0 { - handler.stop() - return nil, fmt.Errorf("received a non primary nameserver group with an empty domain list") - } - - for _, domain := range nsGroup.Domains { - if domain == "" { - handler.stop() - return nil, fmt.Errorf("received a nameserver group with an empty domain element") - } - muxUpdates = append(muxUpdates, muxUpdate{ - domain: domain, - handler: handler, - priority: PriorityMatchDomain, - }) - } + muxUpdates = append(muxUpdates, handlerWrapper{ + domain: domainGroup.domain, + handler: handler, + priority: priority, + }) } return muxUpdates, nil } -func (s *DefaultServer) updateMux(muxUpdates []muxUpdate) { - muxUpdateMap := make(registeredHandlerMap) - handlersByPriority := make(map[string]int) - - var isContainRootUpdate bool - - // First register new handlers - for _, update := range muxUpdates { - s.registerHandler([]string{update.domain}, update.handler, update.priority) - muxUpdateMap[update.domain] = update.handler - handlersByPriority[update.domain] = update.priority - - if existingHandler, ok := s.dnsMuxMap[update.domain]; ok { - existingHandler.stop() - } - - if update.domain == nbdns.RootZone { - isContainRootUpdate = true - } +func (s *DefaultServer) updateMux(muxUpdates []handlerWrapper) { + // this will introduce a short period of time when the server is not able to handle DNS requests + for _, existing := range s.dnsMuxMap { + s.deregisterHandler([]string{existing.domain}, existing.priority) + existing.handler.stop() } - // Then deregister old handlers not in the update - for key, existingHandler := range s.dnsMuxMap { - _, found := muxUpdateMap[key] - if !found { - if !isContainRootUpdate && key == nbdns.RootZone { + muxUpdateMap := make(registeredHandlerMap) + var containsRootUpdate bool + + for _, update := range muxUpdates { + if update.domain == nbdns.RootZone { + containsRootUpdate = true + } + s.registerHandler([]string{update.domain}, update.handler, update.priority) + muxUpdateMap[update.handler.id()] = update + } + + // If there's no root update and we had a root handler, restore it + if !containsRootUpdate { + for _, existing := range s.dnsMuxMap { + if existing.domain == nbdns.RootZone { s.addHostRootZone() - existingHandler.stop() - } else { - existingHandler.stop() - // Deregister with the priority that was used to register - if oldPriority, ok := s.handlerPriorities[key]; ok { - s.deregisterHandler([]string{key}, oldPriority) - } + break } } } s.dnsMuxMap = muxUpdateMap - s.handlerPriorities = handlersByPriority } func (s *DefaultServer) updateLocalResolver(update map[string]nbdns.SimpleRecord) { @@ -593,6 +624,7 @@ func getNSHostPort(ns nbdns.NameServer) string { func (s *DefaultServer) upstreamCallbacks( nsGroup *nbdns.NameServerGroup, handler dns.Handler, + priority int, ) (deactivate func(error), reactivate func()) { var removeIndex map[string]int deactivate = func(err error) { @@ -609,13 +641,13 @@ func (s *DefaultServer) upstreamCallbacks( if nsGroup.Primary { removeIndex[nbdns.RootZone] = -1 s.currentConfig.RouteAll = false - s.deregisterHandler([]string{nbdns.RootZone}, PriorityDefault) + s.deregisterHandler([]string{nbdns.RootZone}, priority) } for i, item := range s.currentConfig.Domains { if _, found := removeIndex[item.Domain]; found { s.currentConfig.Domains[i].Disabled = true - s.deregisterHandler([]string{item.Domain}, PriorityMatchDomain) + s.deregisterHandler([]string{item.Domain}, priority) removeIndex[item.Domain] = i } } @@ -635,8 +667,8 @@ func (s *DefaultServer) upstreamCallbacks( } s.updateNSState(nsGroup, err, false) - } + reactivate = func() { s.mux.Lock() defer s.mux.Unlock() @@ -646,7 +678,7 @@ func (s *DefaultServer) upstreamCallbacks( continue } s.currentConfig.Domains[i].Disabled = false - s.registerHandler([]string{domain}, handler, PriorityMatchDomain) + s.registerHandler([]string{domain}, handler, priority) } l := log.WithField("nameservers", nsGroup.NameServers) @@ -654,7 +686,7 @@ func (s *DefaultServer) upstreamCallbacks( if nsGroup.Primary { s.currentConfig.RouteAll = true - s.registerHandler([]string{nbdns.RootZone}, handler, PriorityDefault) + s.registerHandler([]string{nbdns.RootZone}, handler, priority) } if s.hostManager != nil { @@ -676,6 +708,7 @@ func (s *DefaultServer) addHostRootZone() { s.wgInterface.Address().Network, s.statusRecorder, s.hostsDNSHolder, + nbdns.RootZone, ) if err != nil { log.Errorf("unable to create a new upstream resolver, error: %v", err) @@ -732,5 +765,34 @@ func generateGroupKey(nsGroup *nbdns.NameServerGroup) string { for _, ns := range nsGroup.NameServers { servers = append(servers, fmt.Sprintf("%s:%d", ns.IP, ns.Port)) } - return fmt.Sprintf("%s_%s_%s", nsGroup.ID, nsGroup.Name, strings.Join(servers, ",")) + return fmt.Sprintf("%v_%v", servers, nsGroup.Domains) +} + +// groupNSGroupsByDomain groups nameserver groups by their match domains +func groupNSGroupsByDomain(nsGroups []*nbdns.NameServerGroup) []nsGroupsByDomain { + domainMap := make(map[string][]*nbdns.NameServerGroup) + + for _, group := range nsGroups { + if group.Primary { + domainMap[nbdns.RootZone] = append(domainMap[nbdns.RootZone], group) + continue + } + + for _, domain := range group.Domains { + if domain == "" { + continue + } + domainMap[domain] = append(domainMap[domain], group) + } + } + + var result []nsGroupsByDomain + for domain, groups := range domainMap { + result = append(result, nsGroupsByDomain{ + domain: domain, + groups: groups, + }) + } + + return result } diff --git a/client/internal/dns/server_test.go b/client/internal/dns/server_test.go index c166820c4..db49f96a2 100644 --- a/client/internal/dns/server_test.go +++ b/client/internal/dns/server_test.go @@ -13,6 +13,7 @@ import ( "github.com/golang/mock/gomock" "github.com/miekg/dns" log "github.com/sirupsen/logrus" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" "golang.zx2c4.com/wireguard/wgctrl/wgtypes" @@ -88,6 +89,18 @@ func init() { formatter.SetTextFormatter(log.StandardLogger()) } +func generateDummyHandler(domain string, servers []nbdns.NameServer) *upstreamResolverBase { + var srvs []string + for _, srv := range servers { + srvs = append(srvs, getNSHostPort(srv)) + } + return &upstreamResolverBase{ + domain: domain, + upstreamServers: srvs, + cancel: func() {}, + } +} + func TestUpdateDNSServer(t *testing.T) { nameServers := []nbdns.NameServer{ { @@ -140,15 +153,37 @@ func TestUpdateDNSServer(t *testing.T) { }, }, }, - expectedUpstreamMap: registeredHandlerMap{"netbird.io": dummyHandler, "netbird.cloud": dummyHandler, nbdns.RootZone: dummyHandler}, - expectedLocalMap: registrationMap{buildRecordKey(zoneRecords[0].Name, 1, 1): struct{}{}}, + expectedUpstreamMap: registeredHandlerMap{ + generateDummyHandler("netbird.io", nameServers).id(): handlerWrapper{ + domain: "netbird.io", + handler: dummyHandler, + priority: PriorityMatchDomain, + }, + dummyHandler.id(): handlerWrapper{ + domain: "netbird.cloud", + handler: dummyHandler, + priority: PriorityMatchDomain, + }, + generateDummyHandler(".", nameServers).id(): handlerWrapper{ + domain: nbdns.RootZone, + handler: dummyHandler, + priority: PriorityDefault, + }, + }, + expectedLocalMap: registrationMap{buildRecordKey(zoneRecords[0].Name, 1, 1): struct{}{}}, }, { - name: "New Config Should Succeed", - initLocalMap: registrationMap{"netbird.cloud": struct{}{}}, - initUpstreamMap: registeredHandlerMap{buildRecordKey(zoneRecords[0].Name, 1, 1): dummyHandler}, - initSerial: 0, - inputSerial: 1, + name: "New Config Should Succeed", + initLocalMap: registrationMap{"netbird.cloud": struct{}{}}, + initUpstreamMap: registeredHandlerMap{ + generateDummyHandler(zoneRecords[0].Name, nameServers).id(): handlerWrapper{ + domain: buildRecordKey(zoneRecords[0].Name, 1, 1), + handler: dummyHandler, + priority: PriorityMatchDomain, + }, + }, + initSerial: 0, + inputSerial: 1, inputUpdate: nbdns.Config{ ServiceEnable: true, CustomZones: []nbdns.CustomZone{ @@ -164,8 +199,19 @@ func TestUpdateDNSServer(t *testing.T) { }, }, }, - expectedUpstreamMap: registeredHandlerMap{"netbird.io": dummyHandler, "netbird.cloud": dummyHandler}, - expectedLocalMap: registrationMap{buildRecordKey(zoneRecords[0].Name, 1, 1): struct{}{}}, + expectedUpstreamMap: registeredHandlerMap{ + generateDummyHandler("netbird.io", nameServers).id(): handlerWrapper{ + domain: "netbird.io", + handler: dummyHandler, + priority: PriorityMatchDomain, + }, + "local-resolver": handlerWrapper{ + domain: "netbird.cloud", + handler: dummyHandler, + priority: PriorityMatchDomain, + }, + }, + expectedLocalMap: registrationMap{buildRecordKey(zoneRecords[0].Name, 1, 1): struct{}{}}, }, { name: "Smaller Config Serial Should Be Skipped", @@ -242,9 +288,15 @@ func TestUpdateDNSServer(t *testing.T) { shouldFail: true, }, { - name: "Empty Config Should Succeed and Clean Maps", - initLocalMap: registrationMap{"netbird.cloud": struct{}{}}, - initUpstreamMap: registeredHandlerMap{zoneRecords[0].Name: dummyHandler}, + name: "Empty Config Should Succeed and Clean Maps", + initLocalMap: registrationMap{"netbird.cloud": struct{}{}}, + initUpstreamMap: registeredHandlerMap{ + generateDummyHandler(zoneRecords[0].Name, nameServers).id(): handlerWrapper{ + domain: zoneRecords[0].Name, + handler: dummyHandler, + priority: PriorityMatchDomain, + }, + }, initSerial: 0, inputSerial: 1, inputUpdate: nbdns.Config{ServiceEnable: true}, @@ -252,9 +304,15 @@ func TestUpdateDNSServer(t *testing.T) { expectedLocalMap: make(registrationMap), }, { - name: "Disabled Service Should clean map", - initLocalMap: registrationMap{"netbird.cloud": struct{}{}}, - initUpstreamMap: registeredHandlerMap{zoneRecords[0].Name: dummyHandler}, + name: "Disabled Service Should clean map", + initLocalMap: registrationMap{"netbird.cloud": struct{}{}}, + initUpstreamMap: registeredHandlerMap{ + generateDummyHandler(zoneRecords[0].Name, nameServers).id(): handlerWrapper{ + domain: zoneRecords[0].Name, + handler: dummyHandler, + priority: PriorityMatchDomain, + }, + }, initSerial: 0, inputSerial: 1, inputUpdate: nbdns.Config{ServiceEnable: false}, @@ -421,7 +479,13 @@ func TestDNSFakeResolverHandleUpdates(t *testing.T) { } }() - dnsServer.dnsMuxMap = registeredHandlerMap{zoneRecords[0].Name: &localResolver{}} + dnsServer.dnsMuxMap = registeredHandlerMap{ + "id1": handlerWrapper{ + domain: zoneRecords[0].Name, + handler: &localResolver{}, + priority: PriorityMatchDomain, + }, + } dnsServer.localResolver.registeredMap = registrationMap{"netbird.cloud": struct{}{}} dnsServer.updateSerial = 0 @@ -562,9 +626,8 @@ func TestDNSServerUpstreamDeactivateCallback(t *testing.T) { localResolver: &localResolver{ registeredMap: make(registrationMap), }, - handlerChain: NewHandlerChain(), - handlerPriorities: make(map[string]int), - hostManager: hostManager, + handlerChain: NewHandlerChain(), + hostManager: hostManager, currentConfig: HostDNSConfig{ Domains: []DomainConfig{ {false, "domain0", false}, @@ -593,7 +656,7 @@ func TestDNSServerUpstreamDeactivateCallback(t *testing.T) { NameServers: []nbdns.NameServer{ {IP: netip.MustParseAddr("8.8.0.0"), NSType: nbdns.UDPNameServerType, Port: 53}, }, - }, nil) + }, nil, 0) deactivate(nil) expected := "domain0,domain2" @@ -849,7 +912,7 @@ func createWgInterfaceWithBind(t *testing.T) (*iface.WGIface, error) { return nil, err } - pf, err := uspfilter.Create(wgIface) + pf, err := uspfilter.Create(wgIface, false) if err != nil { t.Fatalf("failed to create uspfilter: %v", err) return nil, err @@ -903,8 +966,8 @@ func TestHandlerChain_DomainPriorities(t *testing.T) { Subdomains: true, } - chain.AddHandler("example.com.", dnsRouteHandler, PriorityDNSRoute, nil) - chain.AddHandler("example.com.", upstreamHandler, PriorityMatchDomain, nil) + chain.AddHandler("example.com.", dnsRouteHandler, PriorityDNSRoute) + chain.AddHandler("example.com.", upstreamHandler, PriorityMatchDomain) testCases := []struct { name string @@ -959,3 +1022,421 @@ func TestHandlerChain_DomainPriorities(t *testing.T) { }) } } + +type mockHandler struct { + Id string +} + +func (m *mockHandler) ServeDNS(dns.ResponseWriter, *dns.Msg) {} +func (m *mockHandler) stop() {} +func (m *mockHandler) probeAvailability() {} +func (m *mockHandler) id() handlerID { return handlerID(m.Id) } + +type mockService struct{} + +func (m *mockService) Listen() error { return nil } +func (m *mockService) Stop() {} +func (m *mockService) RuntimeIP() string { return "127.0.0.1" } +func (m *mockService) RuntimePort() int { return 53 } +func (m *mockService) RegisterMux(string, dns.Handler) {} +func (m *mockService) DeregisterMux(string) {} + +func TestDefaultServer_UpdateMux(t *testing.T) { + baseMatchHandlers := registeredHandlerMap{ + "upstream-group1": { + domain: "example.com", + handler: &mockHandler{ + Id: "upstream-group1", + }, + priority: PriorityMatchDomain, + }, + "upstream-group2": { + domain: "example.com", + handler: &mockHandler{ + Id: "upstream-group2", + }, + priority: PriorityMatchDomain - 1, + }, + } + + baseRootHandlers := registeredHandlerMap{ + "upstream-root1": { + domain: ".", + handler: &mockHandler{ + Id: "upstream-root1", + }, + priority: PriorityDefault, + }, + "upstream-root2": { + domain: ".", + handler: &mockHandler{ + Id: "upstream-root2", + }, + priority: PriorityDefault - 1, + }, + } + + baseMixedHandlers := registeredHandlerMap{ + "upstream-group1": { + domain: "example.com", + handler: &mockHandler{ + Id: "upstream-group1", + }, + priority: PriorityMatchDomain, + }, + "upstream-group2": { + domain: "example.com", + handler: &mockHandler{ + Id: "upstream-group2", + }, + priority: PriorityMatchDomain - 1, + }, + "upstream-other": { + domain: "other.com", + handler: &mockHandler{ + Id: "upstream-other", + }, + priority: PriorityMatchDomain, + }, + } + + tests := []struct { + name string + initialHandlers registeredHandlerMap + updates []handlerWrapper + expectedHandlers map[string]string // map[handlerID]domain + description string + }{ + { + name: "Remove group1 from update", + initialHandlers: baseMatchHandlers, + updates: []handlerWrapper{ + // Only group2 remains + { + domain: "example.com", + handler: &mockHandler{ + Id: "upstream-group2", + }, + priority: PriorityMatchDomain - 1, + }, + }, + expectedHandlers: map[string]string{ + "upstream-group2": "example.com", + }, + description: "When group1 is not included in the update, it should be removed while group2 remains", + }, + { + name: "Remove group2 from update", + initialHandlers: baseMatchHandlers, + updates: []handlerWrapper{ + // Only group1 remains + { + domain: "example.com", + handler: &mockHandler{ + Id: "upstream-group1", + }, + priority: PriorityMatchDomain, + }, + }, + expectedHandlers: map[string]string{ + "upstream-group1": "example.com", + }, + description: "When group2 is not included in the update, it should be removed while group1 remains", + }, + { + name: "Add group3 in first position", + initialHandlers: baseMatchHandlers, + updates: []handlerWrapper{ + // Add group3 with highest priority + { + domain: "example.com", + handler: &mockHandler{ + Id: "upstream-group3", + }, + priority: PriorityMatchDomain + 1, + }, + // Keep existing groups with their original priorities + { + domain: "example.com", + handler: &mockHandler{ + Id: "upstream-group1", + }, + priority: PriorityMatchDomain, + }, + { + domain: "example.com", + handler: &mockHandler{ + Id: "upstream-group2", + }, + priority: PriorityMatchDomain - 1, + }, + }, + expectedHandlers: map[string]string{ + "upstream-group1": "example.com", + "upstream-group2": "example.com", + "upstream-group3": "example.com", + }, + description: "When adding group3 with highest priority, it should be first in chain while maintaining existing groups", + }, + { + name: "Add group3 in last position", + initialHandlers: baseMatchHandlers, + updates: []handlerWrapper{ + // Keep existing groups with their original priorities + { + domain: "example.com", + handler: &mockHandler{ + Id: "upstream-group1", + }, + priority: PriorityMatchDomain, + }, + { + domain: "example.com", + handler: &mockHandler{ + Id: "upstream-group2", + }, + priority: PriorityMatchDomain - 1, + }, + // Add group3 with lowest priority + { + domain: "example.com", + handler: &mockHandler{ + Id: "upstream-group3", + }, + priority: PriorityMatchDomain - 2, + }, + }, + expectedHandlers: map[string]string{ + "upstream-group1": "example.com", + "upstream-group2": "example.com", + "upstream-group3": "example.com", + }, + description: "When adding group3 with lowest priority, it should be last in chain while maintaining existing groups", + }, + // Root zone tests + { + name: "Remove root1 from update", + initialHandlers: baseRootHandlers, + updates: []handlerWrapper{ + { + domain: ".", + handler: &mockHandler{ + Id: "upstream-root2", + }, + priority: PriorityDefault - 1, + }, + }, + expectedHandlers: map[string]string{ + "upstream-root2": ".", + }, + description: "When root1 is not included in the update, it should be removed while root2 remains", + }, + { + name: "Remove root2 from update", + initialHandlers: baseRootHandlers, + updates: []handlerWrapper{ + { + domain: ".", + handler: &mockHandler{ + Id: "upstream-root1", + }, + priority: PriorityDefault, + }, + }, + expectedHandlers: map[string]string{ + "upstream-root1": ".", + }, + description: "When root2 is not included in the update, it should be removed while root1 remains", + }, + { + name: "Add root3 in first position", + initialHandlers: baseRootHandlers, + updates: []handlerWrapper{ + { + domain: ".", + handler: &mockHandler{ + Id: "upstream-root3", + }, + priority: PriorityDefault + 1, + }, + { + domain: ".", + handler: &mockHandler{ + Id: "upstream-root1", + }, + priority: PriorityDefault, + }, + { + domain: ".", + handler: &mockHandler{ + Id: "upstream-root2", + }, + priority: PriorityDefault - 1, + }, + }, + expectedHandlers: map[string]string{ + "upstream-root1": ".", + "upstream-root2": ".", + "upstream-root3": ".", + }, + description: "When adding root3 with highest priority, it should be first in chain while maintaining existing root handlers", + }, + { + name: "Add root3 in last position", + initialHandlers: baseRootHandlers, + updates: []handlerWrapper{ + { + domain: ".", + handler: &mockHandler{ + Id: "upstream-root1", + }, + priority: PriorityDefault, + }, + { + domain: ".", + handler: &mockHandler{ + Id: "upstream-root2", + }, + priority: PriorityDefault - 1, + }, + { + domain: ".", + handler: &mockHandler{ + Id: "upstream-root3", + }, + priority: PriorityDefault - 2, + }, + }, + expectedHandlers: map[string]string{ + "upstream-root1": ".", + "upstream-root2": ".", + "upstream-root3": ".", + }, + description: "When adding root3 with lowest priority, it should be last in chain while maintaining existing root handlers", + }, + // Mixed domain tests + { + name: "Update with mixed domains - remove one of duplicate domain", + initialHandlers: baseMixedHandlers, + updates: []handlerWrapper{ + { + domain: "example.com", + handler: &mockHandler{ + Id: "upstream-group1", + }, + priority: PriorityMatchDomain, + }, + { + domain: "other.com", + handler: &mockHandler{ + Id: "upstream-other", + }, + priority: PriorityMatchDomain, + }, + }, + expectedHandlers: map[string]string{ + "upstream-group1": "example.com", + "upstream-other": "other.com", + }, + description: "When updating mixed domains, should correctly handle removal of one duplicate while maintaining other domains", + }, + { + name: "Update with mixed domains - add new domain", + initialHandlers: baseMixedHandlers, + updates: []handlerWrapper{ + { + domain: "example.com", + handler: &mockHandler{ + Id: "upstream-group1", + }, + priority: PriorityMatchDomain, + }, + { + domain: "example.com", + handler: &mockHandler{ + Id: "upstream-group2", + }, + priority: PriorityMatchDomain - 1, + }, + { + domain: "other.com", + handler: &mockHandler{ + Id: "upstream-other", + }, + priority: PriorityMatchDomain, + }, + { + domain: "new.com", + handler: &mockHandler{ + Id: "upstream-new", + }, + priority: PriorityMatchDomain, + }, + }, + expectedHandlers: map[string]string{ + "upstream-group1": "example.com", + "upstream-group2": "example.com", + "upstream-other": "other.com", + "upstream-new": "new.com", + }, + description: "When updating mixed domains, should maintain existing duplicates and add new domain", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + server := &DefaultServer{ + dnsMuxMap: tt.initialHandlers, + handlerChain: NewHandlerChain(), + service: &mockService{}, + } + + // Perform the update + server.updateMux(tt.updates) + + // Verify the results + assert.Equal(t, len(tt.expectedHandlers), len(server.dnsMuxMap), + "Number of handlers after update doesn't match expected") + + // Check each expected handler + for id, expectedDomain := range tt.expectedHandlers { + handler, exists := server.dnsMuxMap[handlerID(id)] + assert.True(t, exists, "Expected handler %s not found", id) + if exists { + assert.Equal(t, expectedDomain, handler.domain, + "Domain mismatch for handler %s", id) + } + } + + // Verify no unexpected handlers exist + for handlerID := range server.dnsMuxMap { + _, expected := tt.expectedHandlers[string(handlerID)] + assert.True(t, expected, "Unexpected handler found: %s", handlerID) + } + + // Verify the handlerChain state and order + previousPriority := 0 + for _, chainEntry := range server.handlerChain.handlers { + // Verify priority order + if previousPriority > 0 { + assert.True(t, chainEntry.Priority <= previousPriority, + "Handlers in chain not properly ordered by priority") + } + previousPriority = chainEntry.Priority + + // Verify handler exists in mux + foundInMux := false + for _, muxEntry := range server.dnsMuxMap { + if chainEntry.Handler == muxEntry.handler && + chainEntry.Priority == muxEntry.priority && + chainEntry.Pattern == dns.Fqdn(muxEntry.domain) { + foundInMux = true + break + } + } + assert.True(t, foundInMux, + "Handler in chain not found in dnsMuxMap") + } + }) + } +} diff --git a/client/internal/dns/unclean_shutdown_windows.go b/client/internal/dns/unclean_shutdown_windows.go index 74e40cc11..ab0b2cc63 100644 --- a/client/internal/dns/unclean_shutdown_windows.go +++ b/client/internal/dns/unclean_shutdown_windows.go @@ -6,6 +6,7 @@ import ( type ShutdownState struct { Guid string + GPO bool } func (s *ShutdownState) Name() string { @@ -13,9 +14,9 @@ func (s *ShutdownState) Name() string { } func (s *ShutdownState) Cleanup() error { - manager, err := newHostManagerWithGuid(s.Guid) - if err != nil { - return fmt.Errorf("create host manager: %w", err) + manager := ®istryConfigurator{ + guid: s.Guid, + gpo: s.GPO, } if err := manager.restoreUncleanShutdownDNS(); err != nil { diff --git a/client/internal/dns/upstream.go b/client/internal/dns/upstream.go index f0aa12b65..d269107e3 100644 --- a/client/internal/dns/upstream.go +++ b/client/internal/dns/upstream.go @@ -2,9 +2,13 @@ package dns import ( "context" + "crypto/sha256" + "encoding/hex" "errors" "fmt" "net" + "slices" + "strings" "sync" "sync/atomic" "time" @@ -15,6 +19,7 @@ import ( log "github.com/sirupsen/logrus" "github.com/netbirdio/netbird/client/internal/peer" + "github.com/netbirdio/netbird/client/proto" ) const ( @@ -40,6 +45,7 @@ type upstreamResolverBase struct { cancel context.CancelFunc upstreamClient upstreamClient upstreamServers []string + domain string disabled bool failsCount atomic.Int32 successCount atomic.Int32 @@ -53,12 +59,13 @@ type upstreamResolverBase struct { statusRecorder *peer.Status } -func newUpstreamResolverBase(ctx context.Context, statusRecorder *peer.Status) *upstreamResolverBase { +func newUpstreamResolverBase(ctx context.Context, statusRecorder *peer.Status, domain string) *upstreamResolverBase { ctx, cancel := context.WithCancel(ctx) return &upstreamResolverBase{ ctx: ctx, cancel: cancel, + domain: domain, upstreamTimeout: upstreamTimeout, reactivatePeriod: reactivatePeriod, failsTillDeact: failsTillDeact, @@ -71,6 +78,17 @@ func (u *upstreamResolverBase) String() string { return fmt.Sprintf("upstream %v", u.upstreamServers) } +// ID returns the unique handler ID +func (u *upstreamResolverBase) id() handlerID { + servers := slices.Clone(u.upstreamServers) + slices.Sort(servers) + + hash := sha256.New() + hash.Write([]byte(u.domain + ":")) + hash.Write([]byte(strings.Join(servers, ","))) + return handlerID("upstream-" + hex.EncodeToString(hash.Sum(nil)[:8])) +} + func (u *upstreamResolverBase) MatchSubdomains() bool { return true } @@ -87,7 +105,7 @@ func (u *upstreamResolverBase) ServeDNS(w dns.ResponseWriter, r *dns.Msg) { u.checkUpstreamFails(err) }() - log.WithField("question", r.Question[0]).Trace("received an upstream question") + log.Tracef("received upstream question: domain=%s type=%v class=%v", r.Question[0].Name, r.Question[0].Qtype, r.Question[0].Qclass) // set the AuthenticatedData flag and the EDNS0 buffer size to 4096 bytes to support larger dns records if r.Extra == nil { r.SetEdns0(4096, false) @@ -96,6 +114,7 @@ func (u *upstreamResolverBase) ServeDNS(w dns.ResponseWriter, r *dns.Msg) { select { case <-u.ctx.Done(): + log.Tracef("%s has been stopped", u) return default: } @@ -112,41 +131,36 @@ func (u *upstreamResolverBase) ServeDNS(w dns.ResponseWriter, r *dns.Msg) { if err != nil { if errors.Is(err, context.DeadlineExceeded) || isTimeout(err) { - log.WithError(err).WithField("upstream", upstream). - Warn("got an error while connecting to upstream") + log.Warnf("upstream %s timed out for question domain=%s", upstream, r.Question[0].Name) continue } - u.failsCount.Add(1) - log.WithError(err).WithField("upstream", upstream). - Error("got other error while querying the upstream") - return + log.Warnf("failed to query upstream %s for question domain=%s: %s", upstream, r.Question[0].Name, err) + continue } - if rm == nil { - log.WithError(err).WithField("upstream", upstream). - Warn("no response from upstream") - return - } - // those checks need to be independent of each other due to memory address issues - if !rm.Response { - log.WithError(err).WithField("upstream", upstream). - Warn("no response from upstream") - return + if rm == nil || !rm.Response { + log.Warnf("no response from upstream %s for question domain=%s", upstream, r.Question[0].Name) + continue } u.successCount.Add(1) - log.Tracef("took %s to query the upstream %s", t, upstream) + log.Tracef("took %s to query the upstream %s for question domain=%s", t, upstream, r.Question[0].Name) - err = w.WriteMsg(rm) - if err != nil { - log.WithError(err).Error("got an error while writing the upstream resolver response") + if err = w.WriteMsg(rm); err != nil { + log.Errorf("failed to write DNS response for question domain=%s: %s", r.Question[0].Name, err) } // count the fails only if they happen sequentially u.failsCount.Store(0) return } u.failsCount.Add(1) - log.Error("all queries to the upstream nameservers failed with timeout") + log.Errorf("all queries to the %s failed for question domain=%s", u, r.Question[0].Name) + + m := new(dns.Msg) + m.SetRcode(r, dns.RcodeServerFailure) + if err := w.WriteMsg(m); err != nil { + log.Errorf("failed to write error response for %s for question domain=%s: %s", u, r.Question[0].Name, err) + } } // checkUpstreamFails counts fails and disables or enables upstream resolving @@ -217,6 +231,14 @@ func (u *upstreamResolverBase) probeAvailability() { // didn't find a working upstream server, let's disable and try later if !success { u.disable(errors.ErrorOrNil()) + + u.statusRecorder.PublishEvent( + proto.SystemEvent_WARNING, + proto.SystemEvent_DNS, + "All upstream servers failed", + "Unable to reach one or more DNS servers. This might affect your ability to connect to some services.", + map[string]string{"upstreams": strings.Join(u.upstreamServers, ", ")}, + ) } } diff --git a/client/internal/dns/upstream_android.go b/client/internal/dns/upstream_android.go index 36ea05e44..a9e46ca02 100644 --- a/client/internal/dns/upstream_android.go +++ b/client/internal/dns/upstream_android.go @@ -27,8 +27,9 @@ func newUpstreamResolver( _ *net.IPNet, statusRecorder *peer.Status, hostsDNSHolder *hostsDNSHolder, + domain string, ) (*upstreamResolver, error) { - upstreamResolverBase := newUpstreamResolverBase(ctx, statusRecorder) + upstreamResolverBase := newUpstreamResolverBase(ctx, statusRecorder, domain) c := &upstreamResolver{ upstreamResolverBase: upstreamResolverBase, hostsDNSHolder: hostsDNSHolder, diff --git a/client/internal/dns/upstream_general.go b/client/internal/dns/upstream_general.go index a29350f8c..51acbf7a6 100644 --- a/client/internal/dns/upstream_general.go +++ b/client/internal/dns/upstream_general.go @@ -23,8 +23,9 @@ func newUpstreamResolver( _ *net.IPNet, statusRecorder *peer.Status, _ *hostsDNSHolder, + domain string, ) (*upstreamResolver, error) { - upstreamResolverBase := newUpstreamResolverBase(ctx, statusRecorder) + upstreamResolverBase := newUpstreamResolverBase(ctx, statusRecorder, domain) nonIOS := &upstreamResolver{ upstreamResolverBase: upstreamResolverBase, } diff --git a/client/internal/dns/upstream_ios.go b/client/internal/dns/upstream_ios.go index 60ed79d87..7d3301e14 100644 --- a/client/internal/dns/upstream_ios.go +++ b/client/internal/dns/upstream_ios.go @@ -30,8 +30,9 @@ func newUpstreamResolver( net *net.IPNet, statusRecorder *peer.Status, _ *hostsDNSHolder, + domain string, ) (*upstreamResolverIOS, error) { - upstreamResolverBase := newUpstreamResolverBase(ctx, statusRecorder) + upstreamResolverBase := newUpstreamResolverBase(ctx, statusRecorder, domain) ios := &upstreamResolverIOS{ upstreamResolverBase: upstreamResolverBase, diff --git a/client/internal/dns/upstream_test.go b/client/internal/dns/upstream_test.go index c1251dcc1..c5adc0858 100644 --- a/client/internal/dns/upstream_test.go +++ b/client/internal/dns/upstream_test.go @@ -20,6 +20,7 @@ func TestUpstreamResolver_ServeDNS(t *testing.T) { timeout time.Duration cancelCTX bool expectedAnswer string + acceptNXDomain bool }{ { name: "Should Resolve A Record", @@ -36,11 +37,11 @@ func TestUpstreamResolver_ServeDNS(t *testing.T) { expectedAnswer: "1.1.1.1", }, { - name: "Should Not Resolve If Can't Connect To Both Servers", - inputMSG: new(dns.Msg).SetQuestion("one.one.one.one.", dns.TypeA), - InputServers: []string{"8.0.0.0:53", "8.0.0.1:53"}, - timeout: 200 * time.Millisecond, - responseShouldBeNil: true, + name: "Should Not Resolve If Can't Connect To Both Servers", + inputMSG: new(dns.Msg).SetQuestion("one.one.one.one.", dns.TypeA), + InputServers: []string{"8.0.0.0:53", "8.0.0.1:53"}, + timeout: 200 * time.Millisecond, + acceptNXDomain: true, }, { name: "Should Not Resolve If Parent Context Is Canceled", @@ -51,14 +52,11 @@ func TestUpstreamResolver_ServeDNS(t *testing.T) { responseShouldBeNil: true, }, } - // should resolve if first upstream times out - // should not write when both fails - // should not resolve if parent context is canceled for _, testCase := range testCases { t.Run(testCase.name, func(t *testing.T) { ctx, cancel := context.WithCancel(context.TODO()) - resolver, _ := newUpstreamResolver(ctx, "", net.IP{}, &net.IPNet{}, nil, nil) + resolver, _ := newUpstreamResolver(ctx, "", net.IP{}, &net.IPNet{}, nil, nil, ".") resolver.upstreamServers = testCase.InputServers resolver.upstreamTimeout = testCase.timeout if testCase.cancelCTX { @@ -84,16 +82,22 @@ func TestUpstreamResolver_ServeDNS(t *testing.T) { t.Fatalf("should write a response message") } - foundAnswer := false - for _, answer := range responseMSG.Answer { - if strings.Contains(answer.String(), testCase.expectedAnswer) { - foundAnswer = true - break - } + if testCase.acceptNXDomain && responseMSG.Rcode == dns.RcodeNameError { + return } - if !foundAnswer { - t.Errorf("couldn't find the required answer, %s, in the dns response", testCase.expectedAnswer) + if testCase.expectedAnswer != "" { + foundAnswer := false + for _, answer := range responseMSG.Answer { + if strings.Contains(answer.String(), testCase.expectedAnswer) { + foundAnswer = true + break + } + } + + if !foundAnswer { + t.Errorf("couldn't find the required answer, %s, in the dns response", testCase.expectedAnswer) + } } }) } diff --git a/client/internal/engine.go b/client/internal/engine.go index 6d2c0c6f2..1386233cb 100644 --- a/client/internal/engine.go +++ b/client/internal/engine.go @@ -43,13 +43,13 @@ import ( "github.com/netbirdio/netbird/client/internal/routemanager" "github.com/netbirdio/netbird/client/internal/routemanager/systemops" "github.com/netbirdio/netbird/client/internal/statemanager" + "github.com/netbirdio/netbird/management/domain" semaphoregroup "github.com/netbirdio/netbird/util/semaphore-group" nbssh "github.com/netbirdio/netbird/client/ssh" "github.com/netbirdio/netbird/client/system" nbdns "github.com/netbirdio/netbird/dns" mgm "github.com/netbirdio/netbird/management/client" - "github.com/netbirdio/netbird/management/domain" mgmProto "github.com/netbirdio/netbird/management/proto" auth "github.com/netbirdio/netbird/relay/auth/hmac" relayClient "github.com/netbirdio/netbird/relay/client" @@ -195,6 +195,10 @@ type Peer struct { WgAllowedIps string } +type localIpUpdater interface { + UpdateLocalIPs() error +} + // NewEngine creates a new Connection Engine with probes attached func NewEngine( clientCtx context.Context, @@ -442,7 +446,7 @@ func (e *Engine) createFirewall() error { } var err error - e.firewall, err = firewall.NewFirewall(e.wgInterface, e.stateManager) + e.firewall, err = firewall.NewFirewall(e.wgInterface, e.stateManager, e.config.DisableServerRoutes) if err != nil || e.firewall == nil { log.Errorf("failed creating firewall manager: %s", err) return nil @@ -892,6 +896,14 @@ func (e *Engine) updateNetworkMap(networkMap *mgmProto.NetworkMap) error { e.acl.ApplyFiltering(networkMap) } + if e.firewall != nil { + if localipfw, ok := e.firewall.(localIpUpdater); ok { + if err := localipfw.UpdateLocalIPs(); err != nil { + log.Errorf("failed to update local IPs: %v", err) + } + } + } + // DNS forwarder dnsRouteFeatureFlag := toDNSFeatureFlag(networkMap) dnsRouteDomains := toRouteDomains(e.config.WgPrivateKey.PublicKey().String(), networkMap.GetRoutes()) @@ -1460,6 +1472,11 @@ func (e *Engine) GetRouteManager() routemanager.Manager { return e.routeManager } +// GetFirewallManager returns the firewall manager +func (e *Engine) GetFirewallManager() firewallManager.Manager { + return e.firewall +} + func findIPFromInterfaceName(ifaceName string) (net.IP, error) { iface, err := net.InterfaceByName(ifaceName) if err != nil { @@ -1671,6 +1688,14 @@ func (e *Engine) GetLatestNetworkMap() (*mgmProto.NetworkMap, error) { return nm, nil } +// GetWgAddr returns the wireguard address +func (e *Engine) GetWgAddr() net.IP { + if e.wgInterface == nil { + return nil + } + return e.wgInterface.Address().IP +} + // updateDNSForwarder start or stop the DNS forwarder based on the domains and the feature flag func (e *Engine) updateDNSForwarder(enabled bool, domains []string) { if !enabled { diff --git a/client/internal/peer/conn.go b/client/internal/peer/conn.go index b8cb2582f..8bbea6a2b 100644 --- a/client/internal/peer/conn.go +++ b/client/internal/peer/conn.go @@ -2,6 +2,7 @@ package peer import ( "context" + "fmt" "math/rand" "net" "os" @@ -28,12 +29,28 @@ import ( type ConnPriority int +func (cp ConnPriority) String() string { + switch cp { + case connPriorityNone: + return "None" + case connPriorityRelay: + return "PriorityRelay" + case connPriorityICETurn: + return "PriorityICETurn" + case connPriorityICEP2P: + return "PriorityICEP2P" + default: + return fmt.Sprintf("ConnPriority(%d)", cp) + } +} + const ( defaultWgKeepAlive = 25 * time.Second + connPriorityNone ConnPriority = 0 connPriorityRelay ConnPriority = 1 - connPriorityICETurn ConnPriority = 1 - connPriorityICEP2P ConnPriority = 2 + connPriorityICETurn ConnPriority = 2 + connPriorityICEP2P ConnPriority = 3 ) type WgConfig struct { @@ -66,14 +83,6 @@ type ConnConfig struct { ICEConfig icemaker.Config } -type WorkerCallbacks struct { - OnRelayReadyCallback func(info RelayConnInfo) - OnRelayStatusChanged func(ConnStatus) - - OnICEConnReadyCallback func(ConnPriority, ICEConnInfo) - OnICEStatusChanged func(ConnStatus) -} - type Conn struct { log *log.Entry mu sync.Mutex @@ -135,21 +144,11 @@ func NewConn(engineCtx context.Context, config ConnConfig, statusRecorder *Statu semaphore: semaphore, } - rFns := WorkerRelayCallbacks{ - OnConnReady: conn.relayConnectionIsReady, - OnDisconnected: conn.onWorkerRelayStateDisconnected, - } - - wFns := WorkerICECallbacks{ - OnConnReady: conn.iCEConnectionIsReady, - OnStatusChanged: conn.onWorkerICEStateDisconnected, - } - ctrl := isController(config) - conn.workerRelay = NewWorkerRelay(connLog, ctrl, config, relayManager, rFns) + conn.workerRelay = NewWorkerRelay(connLog, ctrl, config, conn, relayManager) relayIsSupportedLocally := conn.workerRelay.RelayIsSupportedLocally() - conn.workerICE, err = NewWorkerICE(ctx, connLog, config, signaler, iFaceDiscover, statusRecorder, relayIsSupportedLocally, wFns) + conn.workerICE, err = NewWorkerICE(ctx, connLog, config, conn, signaler, iFaceDiscover, statusRecorder, relayIsSupportedLocally) if err != nil { return nil, err } @@ -304,7 +303,7 @@ func (conn *Conn) GetKey() string { } // configureConnection starts proxying traffic from/to local Wireguard and sets connection status to StatusConnected -func (conn *Conn) iCEConnectionIsReady(priority ConnPriority, iceConnInfo ICEConnInfo) { +func (conn *Conn) onICEConnectionIsReady(priority ConnPriority, iceConnInfo ICEConnInfo) { conn.mu.Lock() defer conn.mu.Unlock() @@ -317,9 +316,10 @@ func (conn *Conn) iCEConnectionIsReady(priority ConnPriority, iceConnInfo ICECon return } - conn.log.Debugf("ICE connection is ready") - + // this never should happen, because Relay is the lower priority and ICE always close the deprecated connection before upgrade + // todo consider to remove this check if conn.currentConnPriority > priority { + conn.log.Infof("current connection priority (%s) is higher than the new one (%s), do not upgrade connection", conn.currentConnPriority, priority) conn.statusICE.Set(StatusConnected) conn.updateIceState(iceConnInfo) return @@ -375,8 +375,7 @@ func (conn *Conn) iCEConnectionIsReady(priority ConnPriority, iceConnInfo ICECon conn.doOnConnected(iceConnInfo.RosenpassPubKey, iceConnInfo.RosenpassAddr) } -// todo review to make sense to handle connecting and disconnected status also? -func (conn *Conn) onWorkerICEStateDisconnected(newState ConnStatus) { +func (conn *Conn) onICEStateDisconnected() { conn.mu.Lock() defer conn.mu.Unlock() @@ -384,7 +383,7 @@ func (conn *Conn) onWorkerICEStateDisconnected(newState ConnStatus) { return } - conn.log.Tracef("ICE connection state changed to %s", newState) + conn.log.Tracef("ICE connection state changed to disconnected") if conn.wgProxyICE != nil { if err := conn.wgProxyICE.CloseConn(); err != nil { @@ -394,7 +393,7 @@ func (conn *Conn) onWorkerICEStateDisconnected(newState ConnStatus) { // switch back to relay connection if conn.isReadyToUpgrade() { - conn.log.Debugf("ICE disconnected, set Relay to active connection") + conn.log.Infof("ICE disconnected, set Relay to active connection") conn.wgProxyRelay.Work() if err := conn.configureWGEndpoint(conn.wgProxyRelay.EndpointAddr()); err != nil { @@ -402,12 +401,16 @@ func (conn *Conn) onWorkerICEStateDisconnected(newState ConnStatus) { } conn.workerRelay.EnableWgWatcher(conn.ctx) conn.currentConnPriority = connPriorityRelay + } else { + conn.log.Infof("ICE disconnected, do not switch to Relay. Reset priority to: %s", connPriorityNone.String()) + conn.currentConnPriority = connPriorityNone } - changed := conn.statusICE.Get() != newState && newState != StatusConnecting - conn.statusICE.Set(newState) - - conn.guard.SetICEConnDisconnected(changed) + changed := conn.statusICE.Get() != StatusDisconnected + if changed { + conn.guard.SetICEConnDisconnected() + } + conn.statusICE.Set(StatusDisconnected) peerState := State{ PubKey: conn.config.Key, @@ -422,7 +425,7 @@ func (conn *Conn) onWorkerICEStateDisconnected(newState ConnStatus) { } } -func (conn *Conn) relayConnectionIsReady(rci RelayConnInfo) { +func (conn *Conn) onRelayConnectionIsReady(rci RelayConnInfo) { conn.mu.Lock() defer conn.mu.Unlock() @@ -444,7 +447,7 @@ func (conn *Conn) relayConnectionIsReady(rci RelayConnInfo) { conn.log.Infof("created new wgProxy for relay connection: %s", wgProxy.EndpointAddr().String()) if conn.iceP2PIsActive() { - conn.log.Debugf("do not switch to relay because current priority is: %v", conn.currentConnPriority) + conn.log.Debugf("do not switch to relay because current priority is: %s", conn.currentConnPriority.String()) conn.setRelayedProxy(wgProxy) conn.statusRelay.Set(StatusConnected) conn.updateRelayStatus(rci.relayedConn.RemoteAddr().String(), rci.rosenpassPubKey) @@ -474,7 +477,7 @@ func (conn *Conn) relayConnectionIsReady(rci RelayConnInfo) { conn.doOnConnected(rci.rosenpassPubKey, rci.rosenpassAddr) } -func (conn *Conn) onWorkerRelayStateDisconnected() { +func (conn *Conn) onRelayDisconnected() { conn.mu.Lock() defer conn.mu.Unlock() @@ -497,8 +500,10 @@ func (conn *Conn) onWorkerRelayStateDisconnected() { } changed := conn.statusRelay.Get() != StatusDisconnected + if changed { + conn.guard.SetRelayedConnDisconnected() + } conn.statusRelay.Set(StatusDisconnected) - conn.guard.SetRelayedConnDisconnected(changed) peerState := State{ PubKey: conn.config.Key, diff --git a/client/internal/peer/guard/guard.go b/client/internal/peer/guard/guard.go index bf3527a62..1fc2b4a4a 100644 --- a/client/internal/peer/guard/guard.go +++ b/client/internal/peer/guard/guard.go @@ -29,8 +29,8 @@ type Guard struct { isConnectedOnAllWay isConnectedFunc timeout time.Duration srWatcher *SRWatcher - relayedConnDisconnected chan bool - iCEConnDisconnected chan bool + relayedConnDisconnected chan struct{} + iCEConnDisconnected chan struct{} } func NewGuard(log *log.Entry, isController bool, isConnectedFn isConnectedFunc, timeout time.Duration, srWatcher *SRWatcher) *Guard { @@ -41,8 +41,8 @@ func NewGuard(log *log.Entry, isController bool, isConnectedFn isConnectedFunc, isConnectedOnAllWay: isConnectedFn, timeout: timeout, srWatcher: srWatcher, - relayedConnDisconnected: make(chan bool, 1), - iCEConnDisconnected: make(chan bool, 1), + relayedConnDisconnected: make(chan struct{}, 1), + iCEConnDisconnected: make(chan struct{}, 1), } } @@ -54,16 +54,16 @@ func (g *Guard) Start(ctx context.Context) { } } -func (g *Guard) SetRelayedConnDisconnected(changed bool) { +func (g *Guard) SetRelayedConnDisconnected() { select { - case g.relayedConnDisconnected <- changed: + case g.relayedConnDisconnected <- struct{}{}: default: } } -func (g *Guard) SetICEConnDisconnected(changed bool) { +func (g *Guard) SetICEConnDisconnected() { select { - case g.iCEConnDisconnected <- changed: + case g.iCEConnDisconnected <- struct{}{}: default: } } @@ -96,19 +96,13 @@ func (g *Guard) reconnectLoopWithRetry(ctx context.Context) { g.triggerOfferSending() } - case changed := <-g.relayedConnDisconnected: - if !changed { - continue - } + case <-g.relayedConnDisconnected: g.log.Debugf("Relay connection changed, reset reconnection ticker") ticker.Stop() ticker = g.prepareExponentTicker(ctx) tickerChannel = ticker.C - case changed := <-g.iCEConnDisconnected: - if !changed { - continue - } + case <-g.iCEConnDisconnected: g.log.Debugf("ICE connection changed, reset reconnection ticker") ticker.Stop() ticker = g.prepareExponentTicker(ctx) @@ -138,16 +132,10 @@ func (g *Guard) listenForDisconnectEvents(ctx context.Context) { g.log.Infof("start listen for reconnect events...") for { select { - case changed := <-g.relayedConnDisconnected: - if !changed { - continue - } + case <-g.relayedConnDisconnected: g.log.Debugf("Relay connection changed, triggering reconnect") g.triggerOfferSending() - case changed := <-g.iCEConnDisconnected: - if !changed { - continue - } + case <-g.iCEConnDisconnected: g.log.Debugf("ICE state changed, try to send new offer") g.triggerOfferSending() case <-srReconnectedChan: diff --git a/client/internal/peer/status.go b/client/internal/peer/status.go index 0531db726..ee884a76e 100644 --- a/client/internal/peer/status.go +++ b/client/internal/peer/status.go @@ -7,23 +7,33 @@ import ( "sync" "time" + "github.com/google/uuid" + log "github.com/sirupsen/logrus" "golang.org/x/exp/maps" "google.golang.org/grpc/codes" gstatus "google.golang.org/grpc/status" + "google.golang.org/protobuf/types/known/timestamppb" firewall "github.com/netbirdio/netbird/client/firewall/manager" "github.com/netbirdio/netbird/client/iface/configurer" "github.com/netbirdio/netbird/client/internal/ingressgw" "github.com/netbirdio/netbird/client/internal/relay" + "github.com/netbirdio/netbird/client/proto" "github.com/netbirdio/netbird/management/domain" relayClient "github.com/netbirdio/netbird/relay/client" ) +const eventQueueSize = 10 + type ResolvedDomainInfo struct { Prefixes []netip.Prefix ParentDomain domain.Domain } +type EventListener interface { + OnEvent(event *proto.SystemEvent) +} + // State contains the latest state of a peer type State struct { Mux *sync.RWMutex @@ -161,6 +171,10 @@ type Status struct { relayMgr *relayClient.Manager + eventMux sync.RWMutex + eventStreams map[string]chan *proto.SystemEvent + eventQueue *EventQueue + ingressGwMgr *ingressgw.Manager } @@ -169,6 +183,8 @@ func NewRecorder(mgmAddress string) *Status { return &Status{ peers: make(map[string]State), changeNotify: make(map[string]chan struct{}), + eventStreams: make(map[string]chan *proto.SystemEvent), + eventQueue: NewEventQueue(eventQueueSize), offlinePeers: make([]State, 0), notifier: newNotifier(), mgmAddress: mgmAddress, @@ -754,7 +770,9 @@ func (d *Status) ForwardingRules() []firewall.ForwardRule { func (d *Status) GetDNSStates() []NSGroupState { d.mux.Lock() defer d.mux.Unlock() - return d.nsGroupStates + + // shallow copy is good enough, as slices fields are currently not updated + return slices.Clone(d.nsGroupStates) } func (d *Status) GetResolvedDomainsStates() map[domain.Domain]ResolvedDomainInfo { @@ -838,3 +856,112 @@ func (d *Status) notifyAddressChanged() { func (d *Status) numOfPeers() int { return len(d.peers) + len(d.offlinePeers) } + +// PublishEvent adds an event to the queue and distributes it to all subscribers +func (d *Status) PublishEvent( + severity proto.SystemEvent_Severity, + category proto.SystemEvent_Category, + msg string, + userMsg string, + metadata map[string]string, +) { + event := &proto.SystemEvent{ + Id: uuid.New().String(), + Severity: severity, + Category: category, + Message: msg, + UserMessage: userMsg, + Metadata: metadata, + Timestamp: timestamppb.Now(), + } + + d.eventMux.Lock() + defer d.eventMux.Unlock() + + d.eventQueue.Add(event) + + for _, stream := range d.eventStreams { + select { + case stream <- event: + default: + log.Debugf("event stream buffer full, skipping event: %v", event) + } + } + + log.Debugf("event published: %v", event) +} + +// SubscribeToEvents returns a new event subscription +func (d *Status) SubscribeToEvents() *EventSubscription { + d.eventMux.Lock() + defer d.eventMux.Unlock() + + id := uuid.New().String() + stream := make(chan *proto.SystemEvent, 10) + d.eventStreams[id] = stream + + return &EventSubscription{ + id: id, + events: stream, + } +} + +// UnsubscribeFromEvents removes an event subscription +func (d *Status) UnsubscribeFromEvents(sub *EventSubscription) { + if sub == nil { + return + } + + d.eventMux.Lock() + defer d.eventMux.Unlock() + + if stream, exists := d.eventStreams[sub.id]; exists { + close(stream) + delete(d.eventStreams, sub.id) + } +} + +// GetEventHistory returns all events in the queue +func (d *Status) GetEventHistory() []*proto.SystemEvent { + return d.eventQueue.GetAll() +} + +type EventQueue struct { + maxSize int + events []*proto.SystemEvent + mutex sync.RWMutex +} + +func NewEventQueue(size int) *EventQueue { + return &EventQueue{ + maxSize: size, + events: make([]*proto.SystemEvent, 0, size), + } +} + +func (q *EventQueue) Add(event *proto.SystemEvent) { + q.mutex.Lock() + defer q.mutex.Unlock() + + q.events = append(q.events, event) + + if len(q.events) > q.maxSize { + q.events = q.events[len(q.events)-q.maxSize:] + } +} + +func (q *EventQueue) GetAll() []*proto.SystemEvent { + q.mutex.RLock() + defer q.mutex.RUnlock() + + return slices.Clone(q.events) +} + +type EventSubscription struct { + id string + events chan *proto.SystemEvent +} + +func (s *EventSubscription) Events() <-chan *proto.SystemEvent { + return s.events +} diff --git a/client/internal/peer/wg_watcher.go b/client/internal/peer/wg_watcher.go new file mode 100644 index 000000000..6670c6517 --- /dev/null +++ b/client/internal/peer/wg_watcher.go @@ -0,0 +1,154 @@ +package peer + +import ( + "context" + "sync" + "time" + + log "github.com/sirupsen/logrus" + + "github.com/netbirdio/netbird/client/iface/configurer" +) + +const ( + wgHandshakePeriod = 3 * time.Minute +) + +var ( + wgHandshakeOvertime = 30 * time.Second // allowed delay in network + checkPeriod = wgHandshakePeriod + wgHandshakeOvertime +) + +type WGInterfaceStater interface { + GetStats(key string) (configurer.WGStats, error) +} + +type WGWatcher struct { + log *log.Entry + wgIfaceStater WGInterfaceStater + peerKey string + + ctx context.Context + ctxCancel context.CancelFunc + ctxLock sync.Mutex + waitGroup sync.WaitGroup +} + +func NewWGWatcher(log *log.Entry, wgIfaceStater WGInterfaceStater, peerKey string) *WGWatcher { + return &WGWatcher{ + log: log, + wgIfaceStater: wgIfaceStater, + peerKey: peerKey, + } +} + +// EnableWgWatcher starts the WireGuard watcher. If it is already enabled, it will return immediately and do nothing. +func (w *WGWatcher) EnableWgWatcher(parentCtx context.Context, onDisconnectedFn func()) { + w.log.Debugf("enable WireGuard watcher") + w.ctxLock.Lock() + defer w.ctxLock.Unlock() + + if w.ctx != nil && w.ctx.Err() == nil { + w.log.Errorf("WireGuard watcher already enabled") + return + } + + ctx, ctxCancel := context.WithCancel(parentCtx) + w.ctx = ctx + w.ctxCancel = ctxCancel + + initialHandshake, err := w.wgState() + if err != nil { + w.log.Warnf("failed to read initial wg stats: %v", err) + } + + w.waitGroup.Add(1) + go w.periodicHandshakeCheck(ctx, ctxCancel, onDisconnectedFn, initialHandshake) +} + +// DisableWgWatcher stops the WireGuard watcher and wait for the watcher to exit +func (w *WGWatcher) DisableWgWatcher() { + w.ctxLock.Lock() + defer w.ctxLock.Unlock() + + if w.ctxCancel == nil { + return + } + + w.log.Debugf("disable WireGuard watcher") + + w.ctxCancel() + w.ctxCancel = nil + w.waitGroup.Wait() +} + +// wgStateCheck help to check the state of the WireGuard handshake and relay connection +func (w *WGWatcher) periodicHandshakeCheck(ctx context.Context, ctxCancel context.CancelFunc, onDisconnectedFn func(), initialHandshake time.Time) { + w.log.Infof("WireGuard watcher started") + defer w.waitGroup.Done() + + timer := time.NewTimer(wgHandshakeOvertime) + defer timer.Stop() + defer ctxCancel() + + lastHandshake := initialHandshake + + for { + select { + case <-timer.C: + handshake, ok := w.handshakeCheck(lastHandshake) + if !ok { + onDisconnectedFn() + return + } + lastHandshake = *handshake + + resetTime := time.Until(handshake.Add(checkPeriod)) + timer.Reset(resetTime) + + w.log.Debugf("WireGuard watcher reset timer: %v", resetTime) + case <-ctx.Done(): + w.log.Infof("WireGuard watcher stopped") + return + } + } +} + +// handshakeCheck checks the WireGuard handshake and return the new handshake time if it is different from the previous one +func (w *WGWatcher) handshakeCheck(lastHandshake time.Time) (*time.Time, bool) { + handshake, err := w.wgState() + if err != nil { + w.log.Errorf("failed to read wg stats: %v", err) + return nil, false + } + + w.log.Tracef("previous handshake, handshake: %v, %v", lastHandshake, handshake) + + // the current know handshake did not change + if handshake.Equal(lastHandshake) { + w.log.Warnf("WireGuard handshake timed out, closing relay connection: %v", handshake) + return nil, false + } + + // in case if the machine is suspended, the handshake time will be in the past + if handshake.Add(checkPeriod).Before(time.Now()) { + w.log.Warnf("WireGuard handshake timed out, closing relay connection: %v", handshake) + return nil, false + } + + // error handling for handshake time in the future + if handshake.After(time.Now()) { + w.log.Warnf("WireGuard handshake is in the future, closing relay connection: %v", handshake) + return nil, false + } + + return &handshake, true +} + +func (w *WGWatcher) wgState() (time.Time, error) { + wgState, err := w.wgIfaceStater.GetStats(w.peerKey) + if err != nil { + return time.Time{}, err + } + return wgState.LastHandshake, nil +} diff --git a/client/internal/peer/wg_watcher_test.go b/client/internal/peer/wg_watcher_test.go new file mode 100644 index 000000000..a5b9026ad --- /dev/null +++ b/client/internal/peer/wg_watcher_test.go @@ -0,0 +1,98 @@ +package peer + +import ( + "context" + "testing" + "time" + + log "github.com/sirupsen/logrus" + + "github.com/netbirdio/netbird/client/iface/configurer" +) + +type MocWgIface struct { + initial bool + lastHandshake time.Time + stop bool +} + +func (m *MocWgIface) GetStats(key string) (configurer.WGStats, error) { + if !m.initial { + m.initial = true + return configurer.WGStats{}, nil + } + + if !m.stop { + m.lastHandshake = time.Now() + } + + stats := configurer.WGStats{ + LastHandshake: m.lastHandshake, + } + + return stats, nil +} + +func (m *MocWgIface) disconnect() { + m.stop = true +} + +func TestWGWatcher_EnableWgWatcher(t *testing.T) { + checkPeriod = 5 * time.Second + wgHandshakeOvertime = 1 * time.Second + + mlog := log.WithField("peer", "tet") + mocWgIface := &MocWgIface{} + watcher := NewWGWatcher(mlog, mocWgIface, "") + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + onDisconnected := make(chan struct{}, 1) + watcher.EnableWgWatcher(ctx, func() { + mlog.Infof("onDisconnectedFn") + onDisconnected <- struct{}{} + }) + + // wait for initial reading + time.Sleep(2 * time.Second) + mocWgIface.disconnect() + + select { + case <-onDisconnected: + case <-time.After(10 * time.Second): + t.Errorf("timeout") + } + watcher.DisableWgWatcher() +} + +func TestWGWatcher_ReEnable(t *testing.T) { + checkPeriod = 5 * time.Second + wgHandshakeOvertime = 1 * time.Second + + mlog := log.WithField("peer", "tet") + mocWgIface := &MocWgIface{} + watcher := NewWGWatcher(mlog, mocWgIface, "") + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + onDisconnected := make(chan struct{}, 1) + + watcher.EnableWgWatcher(ctx, func() {}) + watcher.DisableWgWatcher() + + watcher.EnableWgWatcher(ctx, func() { + onDisconnected <- struct{}{} + }) + + time.Sleep(2 * time.Second) + mocWgIface.disconnect() + + select { + case <-onDisconnected: + case <-time.After(10 * time.Second): + t.Errorf("timeout") + } + watcher.DisableWgWatcher() +} diff --git a/client/internal/peer/worker_ice.go b/client/internal/peer/worker_ice.go index 008318492..7dd84a98e 100644 --- a/client/internal/peer/worker_ice.go +++ b/client/internal/peer/worker_ice.go @@ -31,20 +31,15 @@ type ICEConnInfo struct { RelayedOnLocal bool } -type WorkerICECallbacks struct { - OnConnReady func(ConnPriority, ICEConnInfo) - OnStatusChanged func(ConnStatus) -} - type WorkerICE struct { ctx context.Context log *log.Entry config ConnConfig + conn *Conn signaler *Signaler iFaceDiscover stdnet.ExternalIFaceDiscover statusRecorder *Status hasRelayOnLocally bool - conn WorkerICECallbacks agent *ice.Agent muxAgent sync.Mutex @@ -60,16 +55,16 @@ type WorkerICE struct { lastKnownState ice.ConnectionState } -func NewWorkerICE(ctx context.Context, log *log.Entry, config ConnConfig, signaler *Signaler, ifaceDiscover stdnet.ExternalIFaceDiscover, statusRecorder *Status, hasRelayOnLocally bool, callBacks WorkerICECallbacks) (*WorkerICE, error) { +func NewWorkerICE(ctx context.Context, log *log.Entry, config ConnConfig, conn *Conn, signaler *Signaler, ifaceDiscover stdnet.ExternalIFaceDiscover, statusRecorder *Status, hasRelayOnLocally bool) (*WorkerICE, error) { w := &WorkerICE{ ctx: ctx, log: log, config: config, + conn: conn, signaler: signaler, iFaceDiscover: ifaceDiscover, statusRecorder: statusRecorder, hasRelayOnLocally: hasRelayOnLocally, - conn: callBacks, } localUfrag, localPwd, err := icemaker.GenerateICECredentials() @@ -154,8 +149,8 @@ func (w *WorkerICE) OnNewOffer(remoteOfferAnswer *OfferAnswer) { Relayed: isRelayed(pair), RelayedOnLocal: isRelayCandidate(pair.Local), } - w.log.Debugf("on ICE conn read to use ready") - go w.conn.OnConnReady(selectedPriority(pair), ci) + w.log.Debugf("on ICE conn is ready to use") + go w.conn.onICEConnectionIsReady(selectedPriority(pair), ci) } // OnRemoteCandidate Handles ICE connection Candidate provided by the remote peer. @@ -220,7 +215,7 @@ func (w *WorkerICE) reCreateAgent(agentCancel context.CancelFunc, candidates []i case ice.ConnectionStateFailed, ice.ConnectionStateDisconnected: if w.lastKnownState != ice.ConnectionStateDisconnected { w.lastKnownState = ice.ConnectionStateDisconnected - w.conn.OnStatusChanged(StatusDisconnected) + w.conn.onICEStateDisconnected() } w.closeAgent(agentCancel) default: diff --git a/client/internal/peer/worker_relay.go b/client/internal/peer/worker_relay.go index c22dcdeda..56c19cd1e 100644 --- a/client/internal/peer/worker_relay.go +++ b/client/internal/peer/worker_relay.go @@ -6,52 +6,41 @@ import ( "net" "sync" "sync/atomic" - "time" log "github.com/sirupsen/logrus" relayClient "github.com/netbirdio/netbird/relay/client" ) -var ( - wgHandshakePeriod = 3 * time.Minute - wgHandshakeOvertime = 30 * time.Second -) - type RelayConnInfo struct { relayedConn net.Conn rosenpassPubKey []byte rosenpassAddr string } -type WorkerRelayCallbacks struct { - OnConnReady func(RelayConnInfo) - OnDisconnected func() -} - type WorkerRelay struct { log *log.Entry isController bool config ConnConfig + conn *Conn relayManager relayClient.ManagerService - callBacks WorkerRelayCallbacks - relayedConn net.Conn - relayLock sync.Mutex - ctxWgWatch context.Context - ctxCancelWgWatch context.CancelFunc - ctxLock sync.Mutex + relayedConn net.Conn + relayLock sync.Mutex relaySupportedOnRemotePeer atomic.Bool + + wgWatcher *WGWatcher } -func NewWorkerRelay(log *log.Entry, ctrl bool, config ConnConfig, relayManager relayClient.ManagerService, callbacks WorkerRelayCallbacks) *WorkerRelay { +func NewWorkerRelay(log *log.Entry, ctrl bool, config ConnConfig, conn *Conn, relayManager relayClient.ManagerService) *WorkerRelay { r := &WorkerRelay{ log: log, isController: ctrl, config: config, + conn: conn, relayManager: relayManager, - callBacks: callbacks, + wgWatcher: NewWGWatcher(log, config.WgConfig.WgInterface, config.Key), } return r } @@ -87,7 +76,7 @@ func (w *WorkerRelay) OnNewOffer(remoteOfferAnswer *OfferAnswer) { w.relayedConn = relayedConn w.relayLock.Unlock() - err = w.relayManager.AddCloseListener(srv, w.onRelayMGDisconnected) + err = w.relayManager.AddCloseListener(srv, w.onRelayClientDisconnected) if err != nil { log.Errorf("failed to add close listener: %s", err) _ = relayedConn.Close() @@ -95,7 +84,7 @@ func (w *WorkerRelay) OnNewOffer(remoteOfferAnswer *OfferAnswer) { } w.log.Debugf("peer conn opened via Relay: %s", srv) - go w.callBacks.OnConnReady(RelayConnInfo{ + go w.conn.onRelayConnectionIsReady(RelayConnInfo{ relayedConn: relayedConn, rosenpassPubKey: remoteOfferAnswer.RosenpassPubKey, rosenpassAddr: remoteOfferAnswer.RosenpassAddr, @@ -103,32 +92,11 @@ func (w *WorkerRelay) OnNewOffer(remoteOfferAnswer *OfferAnswer) { } func (w *WorkerRelay) EnableWgWatcher(ctx context.Context) { - w.log.Debugf("enable WireGuard watcher") - w.ctxLock.Lock() - defer w.ctxLock.Unlock() - - if w.ctxWgWatch != nil && w.ctxWgWatch.Err() == nil { - return - } - - ctx, ctxCancel := context.WithCancel(ctx) - w.ctxWgWatch = ctx - w.ctxCancelWgWatch = ctxCancel - - w.wgStateCheck(ctx, ctxCancel) + w.wgWatcher.EnableWgWatcher(ctx, w.onWGDisconnected) } func (w *WorkerRelay) DisableWgWatcher() { - w.ctxLock.Lock() - defer w.ctxLock.Unlock() - - if w.ctxCancelWgWatch == nil { - return - } - - w.log.Debugf("disable WireGuard watcher") - - w.ctxCancelWgWatch() + w.wgWatcher.DisableWgWatcher() } func (w *WorkerRelay) RelayInstanceAddress() (string, error) { @@ -150,57 +118,17 @@ func (w *WorkerRelay) CloseConn() { return } - err := w.relayedConn.Close() - if err != nil { + if err := w.relayedConn.Close(); err != nil { w.log.Warnf("failed to close relay connection: %v", err) } } -// wgStateCheck help to check the state of the WireGuard handshake and relay connection -func (w *WorkerRelay) wgStateCheck(ctx context.Context, ctxCancel context.CancelFunc) { - w.log.Debugf("WireGuard watcher started") - lastHandshake, err := w.wgState() - if err != nil { - w.log.Warnf("failed to read wg stats: %v", err) - lastHandshake = time.Time{} - } - - go func(lastHandshake time.Time) { - timer := time.NewTimer(wgHandshakeOvertime) - defer timer.Stop() - defer ctxCancel() - - for { - select { - case <-timer.C: - handshake, err := w.wgState() - if err != nil { - w.log.Errorf("failed to read wg stats: %v", err) - timer.Reset(wgHandshakeOvertime) - continue - } - - w.log.Tracef("previous handshake, handshake: %v, %v", lastHandshake, handshake) - - if handshake.Equal(lastHandshake) { - w.log.Infof("WireGuard handshake timed out, closing relay connection: %v", handshake) - w.relayLock.Lock() - _ = w.relayedConn.Close() - w.relayLock.Unlock() - w.callBacks.OnDisconnected() - return - } - - resetTime := time.Until(handshake.Add(wgHandshakePeriod + wgHandshakeOvertime)) - lastHandshake = handshake - timer.Reset(resetTime) - case <-ctx.Done(): - w.log.Debugf("WireGuard watcher stopped") - return - } - } - }(lastHandshake) +func (w *WorkerRelay) onWGDisconnected() { + w.relayLock.Lock() + _ = w.relayedConn.Close() + w.relayLock.Unlock() + w.conn.onRelayDisconnected() } func (w *WorkerRelay) isRelaySupported(answer *OfferAnswer) bool { @@ -217,20 +145,7 @@ func (w *WorkerRelay) preferredRelayServer(myRelayAddress, remoteRelayAddress st return remoteRelayAddress } -func (w *WorkerRelay) wgState() (time.Time, error) { - wgState, err := w.config.WgConfig.WgInterface.GetStats(w.config.Key) - if err != nil { - return time.Time{}, err - } - return wgState.LastHandshake, nil -} - -func (w *WorkerRelay) onRelayMGDisconnected() { - w.ctxLock.Lock() - defer w.ctxLock.Unlock() - - if w.ctxCancelWgWatch != nil { - w.ctxCancelWgWatch() - } - go w.callBacks.OnDisconnected() +func (w *WorkerRelay) onRelayClientDisconnected() { + w.wgWatcher.DisableWgWatcher() + go w.conn.onRelayDisconnected() } diff --git a/client/internal/routemanager/client.go b/client/internal/routemanager/client.go index c755194f0..5984e69cb 100644 --- a/client/internal/routemanager/client.go +++ b/client/internal/routemanager/client.go @@ -19,6 +19,7 @@ import ( "github.com/netbirdio/netbird/client/internal/routemanager/dynamic" "github.com/netbirdio/netbird/client/internal/routemanager/refcounter" "github.com/netbirdio/netbird/client/internal/routemanager/static" + "github.com/netbirdio/netbird/client/proto" "github.com/netbirdio/netbird/route" ) @@ -28,6 +29,15 @@ const ( handlerTypeStatic ) +type reason int + +const ( + reasonUnknown reason = iota + reasonRouteUpdate + reasonPeerUpdate + reasonShutdown +) + type routerPeerStatus struct { connected bool relayed bool @@ -255,7 +265,7 @@ func (c *clientNetwork) removeRouteFromWireGuardPeer() error { return nil } -func (c *clientNetwork) removeRouteFromPeerAndSystem() error { +func (c *clientNetwork) removeRouteFromPeerAndSystem(rsn reason) error { if c.currentChosen == nil { return nil } @@ -269,17 +279,19 @@ func (c *clientNetwork) removeRouteFromPeerAndSystem() error { merr = multierror.Append(merr, fmt.Errorf("remove route: %w", err)) } + c.disconnectEvent(rsn) + return nberrors.FormatErrorOrNil(merr) } -func (c *clientNetwork) recalculateRouteAndUpdatePeerAndSystem() error { +func (c *clientNetwork) recalculateRouteAndUpdatePeerAndSystem(rsn reason) error { routerPeerStatuses := c.getRouterPeerStatuses() newChosenID := c.getBestRouteFromStatuses(routerPeerStatuses) // If no route is chosen, remove the route from the peer and system if newChosenID == "" { - if err := c.removeRouteFromPeerAndSystem(); err != nil { + if err := c.removeRouteFromPeerAndSystem(rsn); err != nil { return fmt.Errorf("remove route for peer %s: %w", c.currentChosen.Peer, err) } @@ -319,6 +331,58 @@ func (c *clientNetwork) recalculateRouteAndUpdatePeerAndSystem() error { return nil } +func (c *clientNetwork) disconnectEvent(rsn reason) { + var defaultRoute bool + for _, r := range c.routes { + if r.Network.Bits() == 0 { + defaultRoute = true + break + } + } + + if !defaultRoute { + return + } + + var severity proto.SystemEvent_Severity + var message string + var userMessage string + meta := make(map[string]string) + + switch rsn { + case reasonShutdown: + severity = proto.SystemEvent_INFO + message = "Default route removed" + userMessage = "Exit node disconnected." + meta["network"] = c.handler.String() + case reasonRouteUpdate: + severity = proto.SystemEvent_INFO + message = "Default route updated due to configuration change" + meta["network"] = c.handler.String() + case reasonPeerUpdate: + severity = proto.SystemEvent_WARNING + message = "Default route disconnected due to peer unreachability" + userMessage = "Exit node connection lost. Your internet access might be affected." + if c.currentChosen != nil { + meta["peer"] = c.currentChosen.Peer + meta["network"] = c.handler.String() + } + default: + severity = proto.SystemEvent_ERROR + message = "Default route disconnected for unknown reason" + userMessage = "Exit node disconnected for unknown reasons." + meta["network"] = c.handler.String() + } + + c.statusRecorder.PublishEvent( + severity, + proto.SystemEvent_NETWORK, + message, + userMessage, + meta, + ) +} + func (c *clientNetwork) sendUpdateToClientNetworkWatcher(update routesUpdate) { go func() { c.routeUpdate <- update @@ -361,12 +425,12 @@ func (c *clientNetwork) peersStateAndUpdateWatcher() { select { case <-c.ctx.Done(): log.Debugf("Stopping watcher for network [%v]", c.handler) - if err := c.removeRouteFromPeerAndSystem(); err != nil { + if err := c.removeRouteFromPeerAndSystem(reasonShutdown); err != nil { log.Errorf("Failed to remove routes for [%v]: %v", c.handler, err) } return case <-c.peerStateUpdate: - err := c.recalculateRouteAndUpdatePeerAndSystem() + err := c.recalculateRouteAndUpdatePeerAndSystem(reasonPeerUpdate) if err != nil { log.Errorf("Failed to recalculate routes for network [%v]: %v", c.handler, err) } @@ -385,7 +449,7 @@ func (c *clientNetwork) peersStateAndUpdateWatcher() { if isTrueRouteUpdate { log.Debug("Client network update contains different routes, recalculating routes") - err := c.recalculateRouteAndUpdatePeerAndSystem() + err := c.recalculateRouteAndUpdatePeerAndSystem(reasonRouteUpdate) if err != nil { log.Errorf("Failed to recalculate routes for network [%v]: %v", c.handler, err) } diff --git a/client/internal/routemanager/manager.go b/client/internal/routemanager/manager.go index 6f73fb166..52de0948b 100644 --- a/client/internal/routemanager/manager.go +++ b/client/internal/routemanager/manager.go @@ -113,13 +113,14 @@ func NewManager(config ManagerConfig) *DefaultManager { disableServerRoutes: config.DisableServerRoutes, } + useNoop := netstack.IsEnabled() || config.DisableClientRoutes + dm.setupRefCounters(useNoop) + // don't proceed with client routes if it is disabled if config.DisableClientRoutes { return dm } - dm.setupRefCounters() - if runtime.GOOS == "android" { cr := dm.initialClientRoutes(config.InitialRoutes) dm.notifier.SetInitialClientRoutes(cr) @@ -127,7 +128,7 @@ func NewManager(config ManagerConfig) *DefaultManager { return dm } -func (m *DefaultManager) setupRefCounters() { +func (m *DefaultManager) setupRefCounters(useNoop bool) { m.routeRefCounter = refcounter.New( func(prefix netip.Prefix, _ struct{}) (struct{}, error) { return struct{}{}, m.sysOps.AddVPNRoute(prefix, m.wgInterface.ToInterface()) @@ -137,7 +138,7 @@ func (m *DefaultManager) setupRefCounters() { }, ) - if netstack.IsEnabled() { + if useNoop { m.routeRefCounter = refcounter.New( func(netip.Prefix, struct{}) (struct{}, error) { return struct{}{}, refcounter.ErrIgnore @@ -285,15 +286,15 @@ func (m *DefaultManager) UpdateRoutes(updateSerial uint64, newRoutes []*route.Ro m.updateClientNetworks(updateSerial, filteredClientRoutes) m.notifier.OnNewRoutes(filteredClientRoutes) } + m.clientRoutes = newClientRoutesIDMap - if m.serverRouter != nil { - err := m.serverRouter.updateRoutes(newServerRoutesMap) - if err != nil { - return err - } + if m.serverRouter == nil { + return nil } - m.clientRoutes = newClientRoutesIDMap + if err := m.serverRouter.updateRoutes(newServerRoutesMap); err != nil { + return fmt.Errorf("update routes: %w", err) + } return nil } @@ -422,11 +423,6 @@ func (m *DefaultManager) classifyRoutes(newRoutes []*route.Route) (map[route.ID] haID := newRoute.GetHAUniqueID() if newRoute.Peer == m.pubKey { ownNetworkIDs[haID] = true - // only linux is supported for now - if runtime.GOOS != "linux" { - log.Warnf("received a route to manage, but agent doesn't support router mode on %s OS", runtime.GOOS) - continue - } newServerRoutesMap[newRoute.ID] = newRoute } } @@ -454,7 +450,7 @@ func (m *DefaultManager) initialClientRoutes(initialRoutes []*route.Route) []*ro } func isRouteSupported(route *route.Route) bool { - if !nbnet.CustomRoutingDisabled() || route.IsDynamic() { + if netstack.IsEnabled() || !nbnet.CustomRoutingDisabled() || route.IsDynamic() { return true } diff --git a/client/internal/routemanager/server_nonandroid.go b/client/internal/routemanager/server_nonandroid.go index 68218c0e2..7ddcccb0b 100644 --- a/client/internal/routemanager/server_nonandroid.go +++ b/client/internal/routemanager/server_nonandroid.go @@ -69,6 +69,16 @@ func (m *serverRouter) updateRoutes(routesMap map[route.ID]*route.Route) error { m.routes[id] = newRoute } + if len(m.routes) > 0 { + if err := m.firewall.EnableRouting(); err != nil { + return fmt.Errorf("enable routing: %w", err) + } + } else { + if err := m.firewall.DisableRouting(); err != nil { + return fmt.Errorf("disable routing: %w", err) + } + } + return nil } diff --git a/client/internal/routemanager/systemops/systemops_linux.go b/client/internal/routemanager/systemops/systemops_linux.go index 1da92cc80..d724cb1a7 100644 --- a/client/internal/routemanager/systemops/systemops_linux.go +++ b/client/internal/routemanager/systemops/systemops_linux.go @@ -53,20 +53,6 @@ type ruleParams struct { description string } -// isLegacy determines whether to use the legacy routing setup -func isLegacy() bool { - return os.Getenv("NB_USE_LEGACY_ROUTING") == "true" || nbnet.CustomRoutingDisabled() || nbnet.SkipSocketMark() -} - -// setIsLegacy sets the legacy routing setup -func setIsLegacy(b bool) { - if b { - os.Setenv("NB_USE_LEGACY_ROUTING", "true") - } else { - os.Unsetenv("NB_USE_LEGACY_ROUTING") - } -} - func getSetupRules() []ruleParams { return []ruleParams{ {100, -1, syscall.RT_TABLE_MAIN, netlink.FAMILY_V4, false, 0, "rule with suppress prefixlen v4"}, @@ -87,7 +73,7 @@ func getSetupRules() []ruleParams { // This table is where a default route or other specific routes received from the management server are configured, // enabling VPN connectivity. func (r *SysOps) SetupRouting(initAddresses []net.IP, stateManager *statemanager.Manager) (_ nbnet.AddHookFunc, _ nbnet.RemoveHookFunc, err error) { - if isLegacy() { + if !nbnet.AdvancedRouting() { log.Infof("Using legacy routing setup") return r.setupRefCounter(initAddresses, stateManager) } @@ -103,11 +89,6 @@ func (r *SysOps) SetupRouting(initAddresses []net.IP, stateManager *statemanager rules := getSetupRules() for _, rule := range rules { if err := addRule(rule); err != nil { - if errors.Is(err, syscall.EOPNOTSUPP) { - log.Warnf("Rule operations are not supported, falling back to the legacy routing setup") - setIsLegacy(true) - return r.setupRefCounter(initAddresses, stateManager) - } return nil, nil, fmt.Errorf("%s: %w", rule.description, err) } } @@ -130,7 +111,7 @@ func (r *SysOps) SetupRouting(initAddresses []net.IP, stateManager *statemanager // It systematically removes the three rules and any associated routing table entries to ensure a clean state. // The function uses error aggregation to report any errors encountered during the cleanup process. func (r *SysOps) CleanupRouting(stateManager *statemanager.Manager) error { - if isLegacy() { + if !nbnet.AdvancedRouting() { return r.cleanupRefCounter(stateManager) } @@ -168,7 +149,7 @@ func (r *SysOps) removeFromRouteTable(prefix netip.Prefix, nexthop Nexthop) erro } func (r *SysOps) AddVPNRoute(prefix netip.Prefix, intf *net.Interface) error { - if isLegacy() { + if !nbnet.AdvancedRouting() { return r.genericAddVPNRoute(prefix, intf) } @@ -191,7 +172,7 @@ func (r *SysOps) AddVPNRoute(prefix netip.Prefix, intf *net.Interface) error { } func (r *SysOps) RemoveVPNRoute(prefix netip.Prefix, intf *net.Interface) error { - if isLegacy() { + if !nbnet.AdvancedRouting() { return r.genericRemoveVPNRoute(prefix, intf) } @@ -504,7 +485,7 @@ func getAddressFamily(prefix netip.Prefix) int { } func hasSeparateRouting() ([]netip.Prefix, error) { - if isLegacy() { + if !nbnet.AdvancedRouting() { return GetRoutesFromTable() } return nil, ErrRoutingIsSeparate diff --git a/client/internal/routemanager/systemops/systemops_unix_test.go b/client/internal/routemanager/systemops/systemops_unix_test.go index a6000d963..d88c1ab6b 100644 --- a/client/internal/routemanager/systemops/systemops_unix_test.go +++ b/client/internal/routemanager/systemops/systemops_unix_test.go @@ -85,6 +85,7 @@ var testCases = []testCase{ } func TestRouting(t *testing.T) { + nbnet.Init() for _, tc := range testCases { // todo resolve test execution on freebsd if runtime.GOOS == "freebsd" { diff --git a/client/proto/daemon.pb.go b/client/proto/daemon.pb.go index 0c3430684..ba0a3b62e 100644 --- a/client/proto/daemon.pb.go +++ b/client/proto/daemon.pb.go @@ -87,6 +87,110 @@ func (LogLevel) EnumDescriptor() ([]byte, []int) { return file_daemon_proto_rawDescGZIP(), []int{0} } +type SystemEvent_Severity int32 + +const ( + SystemEvent_INFO SystemEvent_Severity = 0 + SystemEvent_WARNING SystemEvent_Severity = 1 + SystemEvent_ERROR SystemEvent_Severity = 2 + SystemEvent_CRITICAL SystemEvent_Severity = 3 +) + +// Enum value maps for SystemEvent_Severity. +var ( + SystemEvent_Severity_name = map[int32]string{ + 0: "INFO", + 1: "WARNING", + 2: "ERROR", + 3: "CRITICAL", + } + SystemEvent_Severity_value = map[string]int32{ + "INFO": 0, + "WARNING": 1, + "ERROR": 2, + "CRITICAL": 3, + } +) + +func (x SystemEvent_Severity) Enum() *SystemEvent_Severity { + p := new(SystemEvent_Severity) + *p = x + return p +} + +func (x SystemEvent_Severity) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (SystemEvent_Severity) Descriptor() protoreflect.EnumDescriptor { + return file_daemon_proto_enumTypes[1].Descriptor() +} + +func (SystemEvent_Severity) Type() protoreflect.EnumType { + return &file_daemon_proto_enumTypes[1] +} + +func (x SystemEvent_Severity) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use SystemEvent_Severity.Descriptor instead. +func (SystemEvent_Severity) EnumDescriptor() ([]byte, []int) { + return file_daemon_proto_rawDescGZIP(), []int{49, 0} +} + +type SystemEvent_Category int32 + +const ( + SystemEvent_NETWORK SystemEvent_Category = 0 + SystemEvent_DNS SystemEvent_Category = 1 + SystemEvent_AUTHENTICATION SystemEvent_Category = 2 + SystemEvent_CONNECTIVITY SystemEvent_Category = 3 +) + +// Enum value maps for SystemEvent_Category. +var ( + SystemEvent_Category_name = map[int32]string{ + 0: "NETWORK", + 1: "DNS", + 2: "AUTHENTICATION", + 3: "CONNECTIVITY", + } + SystemEvent_Category_value = map[string]int32{ + "NETWORK": 0, + "DNS": 1, + "AUTHENTICATION": 2, + "CONNECTIVITY": 3, + } +) + +func (x SystemEvent_Category) Enum() *SystemEvent_Category { + p := new(SystemEvent_Category) + *p = x + return p +} + +func (x SystemEvent_Category) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (SystemEvent_Category) Descriptor() protoreflect.EnumDescriptor { + return file_daemon_proto_enumTypes[2].Descriptor() +} + +func (SystemEvent_Category) Type() protoreflect.EnumType { + return &file_daemon_proto_enumTypes[2] +} + +func (x SystemEvent_Category) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use SystemEvent_Category.Descriptor instead. +func (SystemEvent_Category) EnumDescriptor() ([]byte, []int) { + return file_daemon_proto_rawDescGZIP(), []int{49, 1} +} + type EmptyRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -165,6 +269,7 @@ type LoginRequest struct { DisableDns *bool `protobuf:"varint,22,opt,name=disable_dns,json=disableDns,proto3,oneof" json:"disable_dns,omitempty"` DisableFirewall *bool `protobuf:"varint,23,opt,name=disable_firewall,json=disableFirewall,proto3,oneof" json:"disable_firewall,omitempty"` BlockLanAccess *bool `protobuf:"varint,24,opt,name=block_lan_access,json=blockLanAccess,proto3,oneof" json:"block_lan_access,omitempty"` + DisableNotifications *bool `protobuf:"varint,25,opt,name=disable_notifications,json=disableNotifications,proto3,oneof" json:"disable_notifications,omitempty"` } func (x *LoginRequest) Reset() { @@ -368,6 +473,13 @@ func (x *LoginRequest) GetBlockLanAccess() bool { return false } +func (x *LoginRequest) GetDisableNotifications() bool { + if x != nil && x.DisableNotifications != nil { + return *x.DisableNotifications + } + return false +} + type LoginResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -848,13 +960,14 @@ type GetConfigResponse struct { // preSharedKey settings value. PreSharedKey string `protobuf:"bytes,4,opt,name=preSharedKey,proto3" json:"preSharedKey,omitempty"` // adminURL settings value. - AdminURL string `protobuf:"bytes,5,opt,name=adminURL,proto3" json:"adminURL,omitempty"` - InterfaceName string `protobuf:"bytes,6,opt,name=interfaceName,proto3" json:"interfaceName,omitempty"` - WireguardPort int64 `protobuf:"varint,7,opt,name=wireguardPort,proto3" json:"wireguardPort,omitempty"` - DisableAutoConnect bool `protobuf:"varint,9,opt,name=disableAutoConnect,proto3" json:"disableAutoConnect,omitempty"` - ServerSSHAllowed bool `protobuf:"varint,10,opt,name=serverSSHAllowed,proto3" json:"serverSSHAllowed,omitempty"` - RosenpassEnabled bool `protobuf:"varint,11,opt,name=rosenpassEnabled,proto3" json:"rosenpassEnabled,omitempty"` - RosenpassPermissive bool `protobuf:"varint,12,opt,name=rosenpassPermissive,proto3" json:"rosenpassPermissive,omitempty"` + AdminURL string `protobuf:"bytes,5,opt,name=adminURL,proto3" json:"adminURL,omitempty"` + InterfaceName string `protobuf:"bytes,6,opt,name=interfaceName,proto3" json:"interfaceName,omitempty"` + WireguardPort int64 `protobuf:"varint,7,opt,name=wireguardPort,proto3" json:"wireguardPort,omitempty"` + DisableAutoConnect bool `protobuf:"varint,9,opt,name=disableAutoConnect,proto3" json:"disableAutoConnect,omitempty"` + ServerSSHAllowed bool `protobuf:"varint,10,opt,name=serverSSHAllowed,proto3" json:"serverSSHAllowed,omitempty"` + RosenpassEnabled bool `protobuf:"varint,11,opt,name=rosenpassEnabled,proto3" json:"rosenpassEnabled,omitempty"` + RosenpassPermissive bool `protobuf:"varint,12,opt,name=rosenpassPermissive,proto3" json:"rosenpassPermissive,omitempty"` + DisableNotifications bool `protobuf:"varint,13,opt,name=disable_notifications,json=disableNotifications,proto3" json:"disable_notifications,omitempty"` } func (x *GetConfigResponse) Reset() { @@ -966,6 +1079,13 @@ func (x *GetConfigResponse) GetRosenpassPermissive() bool { return false } +func (x *GetConfigResponse) GetDisableNotifications() bool { + if x != nil { + return x.DisableNotifications + } + return false +} + // PeerState contains the latest state of a peer type PeerState struct { state protoimpl.MessageState @@ -1514,6 +1634,7 @@ type FullStatus struct { Relays []*RelayState `protobuf:"bytes,5,rep,name=relays,proto3" json:"relays,omitempty"` DnsServers []*NSGroupState `protobuf:"bytes,6,rep,name=dns_servers,json=dnsServers,proto3" json:"dns_servers,omitempty"` NumberOfForwardingRules int32 `protobuf:"varint,8,opt,name=NumberOfForwardingRules,proto3" json:"NumberOfForwardingRules,omitempty"` + Events []*SystemEvent `protobuf:"bytes,7,rep,name=events,proto3" json:"events,omitempty"` } func (x *FullStatus) Reset() { @@ -1597,6 +1718,13 @@ func (x *FullStatus) GetNumberOfForwardingRules() int32 { return 0 } +func (x *FullStatus) GetEvents() []*SystemEvent { + if x != nil { + return x.Events + } + return nil +} + // Networks type ListNetworksRequest struct { state protoimpl.MessageState @@ -2827,6 +2955,548 @@ func (*SetNetworkMapPersistenceResponse) Descriptor() ([]byte, []int) { return file_daemon_proto_rawDescGZIP(), []int{43} } +type TCPFlags struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Syn bool `protobuf:"varint,1,opt,name=syn,proto3" json:"syn,omitempty"` + Ack bool `protobuf:"varint,2,opt,name=ack,proto3" json:"ack,omitempty"` + Fin bool `protobuf:"varint,3,opt,name=fin,proto3" json:"fin,omitempty"` + Rst bool `protobuf:"varint,4,opt,name=rst,proto3" json:"rst,omitempty"` + Psh bool `protobuf:"varint,5,opt,name=psh,proto3" json:"psh,omitempty"` + Urg bool `protobuf:"varint,6,opt,name=urg,proto3" json:"urg,omitempty"` +} + +func (x *TCPFlags) Reset() { + *x = TCPFlags{} + if protoimpl.UnsafeEnabled { + mi := &file_daemon_proto_msgTypes[44] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TCPFlags) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TCPFlags) ProtoMessage() {} + +func (x *TCPFlags) ProtoReflect() protoreflect.Message { + mi := &file_daemon_proto_msgTypes[44] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TCPFlags.ProtoReflect.Descriptor instead. +func (*TCPFlags) Descriptor() ([]byte, []int) { + return file_daemon_proto_rawDescGZIP(), []int{44} +} + +func (x *TCPFlags) GetSyn() bool { + if x != nil { + return x.Syn + } + return false +} + +func (x *TCPFlags) GetAck() bool { + if x != nil { + return x.Ack + } + return false +} + +func (x *TCPFlags) GetFin() bool { + if x != nil { + return x.Fin + } + return false +} + +func (x *TCPFlags) GetRst() bool { + if x != nil { + return x.Rst + } + return false +} + +func (x *TCPFlags) GetPsh() bool { + if x != nil { + return x.Psh + } + return false +} + +func (x *TCPFlags) GetUrg() bool { + if x != nil { + return x.Urg + } + return false +} + +type TracePacketRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + SourceIp string `protobuf:"bytes,1,opt,name=source_ip,json=sourceIp,proto3" json:"source_ip,omitempty"` + DestinationIp string `protobuf:"bytes,2,opt,name=destination_ip,json=destinationIp,proto3" json:"destination_ip,omitempty"` + Protocol string `protobuf:"bytes,3,opt,name=protocol,proto3" json:"protocol,omitempty"` + SourcePort uint32 `protobuf:"varint,4,opt,name=source_port,json=sourcePort,proto3" json:"source_port,omitempty"` + DestinationPort uint32 `protobuf:"varint,5,opt,name=destination_port,json=destinationPort,proto3" json:"destination_port,omitempty"` + Direction string `protobuf:"bytes,6,opt,name=direction,proto3" json:"direction,omitempty"` + TcpFlags *TCPFlags `protobuf:"bytes,7,opt,name=tcp_flags,json=tcpFlags,proto3,oneof" json:"tcp_flags,omitempty"` + IcmpType *uint32 `protobuf:"varint,8,opt,name=icmp_type,json=icmpType,proto3,oneof" json:"icmp_type,omitempty"` + IcmpCode *uint32 `protobuf:"varint,9,opt,name=icmp_code,json=icmpCode,proto3,oneof" json:"icmp_code,omitempty"` +} + +func (x *TracePacketRequest) Reset() { + *x = TracePacketRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_daemon_proto_msgTypes[45] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TracePacketRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TracePacketRequest) ProtoMessage() {} + +func (x *TracePacketRequest) ProtoReflect() protoreflect.Message { + mi := &file_daemon_proto_msgTypes[45] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TracePacketRequest.ProtoReflect.Descriptor instead. +func (*TracePacketRequest) Descriptor() ([]byte, []int) { + return file_daemon_proto_rawDescGZIP(), []int{45} +} + +func (x *TracePacketRequest) GetSourceIp() string { + if x != nil { + return x.SourceIp + } + return "" +} + +func (x *TracePacketRequest) GetDestinationIp() string { + if x != nil { + return x.DestinationIp + } + return "" +} + +func (x *TracePacketRequest) GetProtocol() string { + if x != nil { + return x.Protocol + } + return "" +} + +func (x *TracePacketRequest) GetSourcePort() uint32 { + if x != nil { + return x.SourcePort + } + return 0 +} + +func (x *TracePacketRequest) GetDestinationPort() uint32 { + if x != nil { + return x.DestinationPort + } + return 0 +} + +func (x *TracePacketRequest) GetDirection() string { + if x != nil { + return x.Direction + } + return "" +} + +func (x *TracePacketRequest) GetTcpFlags() *TCPFlags { + if x != nil { + return x.TcpFlags + } + return nil +} + +func (x *TracePacketRequest) GetIcmpType() uint32 { + if x != nil && x.IcmpType != nil { + return *x.IcmpType + } + return 0 +} + +func (x *TracePacketRequest) GetIcmpCode() uint32 { + if x != nil && x.IcmpCode != nil { + return *x.IcmpCode + } + return 0 +} + +type TraceStage struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"` + Allowed bool `protobuf:"varint,3,opt,name=allowed,proto3" json:"allowed,omitempty"` + ForwardingDetails *string `protobuf:"bytes,4,opt,name=forwarding_details,json=forwardingDetails,proto3,oneof" json:"forwarding_details,omitempty"` +} + +func (x *TraceStage) Reset() { + *x = TraceStage{} + if protoimpl.UnsafeEnabled { + mi := &file_daemon_proto_msgTypes[46] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TraceStage) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TraceStage) ProtoMessage() {} + +func (x *TraceStage) ProtoReflect() protoreflect.Message { + mi := &file_daemon_proto_msgTypes[46] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TraceStage.ProtoReflect.Descriptor instead. +func (*TraceStage) Descriptor() ([]byte, []int) { + return file_daemon_proto_rawDescGZIP(), []int{46} +} + +func (x *TraceStage) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *TraceStage) GetMessage() string { + if x != nil { + return x.Message + } + return "" +} + +func (x *TraceStage) GetAllowed() bool { + if x != nil { + return x.Allowed + } + return false +} + +func (x *TraceStage) GetForwardingDetails() string { + if x != nil && x.ForwardingDetails != nil { + return *x.ForwardingDetails + } + return "" +} + +type TracePacketResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Stages []*TraceStage `protobuf:"bytes,1,rep,name=stages,proto3" json:"stages,omitempty"` + FinalDisposition bool `protobuf:"varint,2,opt,name=final_disposition,json=finalDisposition,proto3" json:"final_disposition,omitempty"` +} + +func (x *TracePacketResponse) Reset() { + *x = TracePacketResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_daemon_proto_msgTypes[47] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TracePacketResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TracePacketResponse) ProtoMessage() {} + +func (x *TracePacketResponse) ProtoReflect() protoreflect.Message { + mi := &file_daemon_proto_msgTypes[47] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TracePacketResponse.ProtoReflect.Descriptor instead. +func (*TracePacketResponse) Descriptor() ([]byte, []int) { + return file_daemon_proto_rawDescGZIP(), []int{47} +} + +func (x *TracePacketResponse) GetStages() []*TraceStage { + if x != nil { + return x.Stages + } + return nil +} + +func (x *TracePacketResponse) GetFinalDisposition() bool { + if x != nil { + return x.FinalDisposition + } + return false +} + +type SubscribeRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *SubscribeRequest) Reset() { + *x = SubscribeRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_daemon_proto_msgTypes[48] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SubscribeRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SubscribeRequest) ProtoMessage() {} + +func (x *SubscribeRequest) ProtoReflect() protoreflect.Message { + mi := &file_daemon_proto_msgTypes[48] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SubscribeRequest.ProtoReflect.Descriptor instead. +func (*SubscribeRequest) Descriptor() ([]byte, []int) { + return file_daemon_proto_rawDescGZIP(), []int{48} +} + +type SystemEvent struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + Severity SystemEvent_Severity `protobuf:"varint,2,opt,name=severity,proto3,enum=daemon.SystemEvent_Severity" json:"severity,omitempty"` + Category SystemEvent_Category `protobuf:"varint,3,opt,name=category,proto3,enum=daemon.SystemEvent_Category" json:"category,omitempty"` + Message string `protobuf:"bytes,4,opt,name=message,proto3" json:"message,omitempty"` + UserMessage string `protobuf:"bytes,5,opt,name=userMessage,proto3" json:"userMessage,omitempty"` + Timestamp *timestamppb.Timestamp `protobuf:"bytes,6,opt,name=timestamp,proto3" json:"timestamp,omitempty"` + Metadata map[string]string `protobuf:"bytes,7,rep,name=metadata,proto3" json:"metadata,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (x *SystemEvent) Reset() { + *x = SystemEvent{} + if protoimpl.UnsafeEnabled { + mi := &file_daemon_proto_msgTypes[49] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SystemEvent) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SystemEvent) ProtoMessage() {} + +func (x *SystemEvent) ProtoReflect() protoreflect.Message { + mi := &file_daemon_proto_msgTypes[49] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SystemEvent.ProtoReflect.Descriptor instead. +func (*SystemEvent) Descriptor() ([]byte, []int) { + return file_daemon_proto_rawDescGZIP(), []int{49} +} + +func (x *SystemEvent) GetId() string { + if x != nil { + return x.Id + } + return "" +} + +func (x *SystemEvent) GetSeverity() SystemEvent_Severity { + if x != nil { + return x.Severity + } + return SystemEvent_INFO +} + +func (x *SystemEvent) GetCategory() SystemEvent_Category { + if x != nil { + return x.Category + } + return SystemEvent_NETWORK +} + +func (x *SystemEvent) GetMessage() string { + if x != nil { + return x.Message + } + return "" +} + +func (x *SystemEvent) GetUserMessage() string { + if x != nil { + return x.UserMessage + } + return "" +} + +func (x *SystemEvent) GetTimestamp() *timestamppb.Timestamp { + if x != nil { + return x.Timestamp + } + return nil +} + +func (x *SystemEvent) GetMetadata() map[string]string { + if x != nil { + return x.Metadata + } + return nil +} + +type GetEventsRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *GetEventsRequest) Reset() { + *x = GetEventsRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_daemon_proto_msgTypes[50] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetEventsRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetEventsRequest) ProtoMessage() {} + +func (x *GetEventsRequest) ProtoReflect() protoreflect.Message { + mi := &file_daemon_proto_msgTypes[50] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetEventsRequest.ProtoReflect.Descriptor instead. +func (*GetEventsRequest) Descriptor() ([]byte, []int) { + return file_daemon_proto_rawDescGZIP(), []int{50} +} + +type GetEventsResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Events []*SystemEvent `protobuf:"bytes,1,rep,name=events,proto3" json:"events,omitempty"` +} + +func (x *GetEventsResponse) Reset() { + *x = GetEventsResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_daemon_proto_msgTypes[51] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetEventsResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetEventsResponse) ProtoMessage() {} + +func (x *GetEventsResponse) ProtoReflect() protoreflect.Message { + mi := &file_daemon_proto_msgTypes[51] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetEventsResponse.ProtoReflect.Descriptor instead. +func (*GetEventsResponse) Descriptor() ([]byte, []int) { + return file_daemon_proto_rawDescGZIP(), []int{51} +} + +func (x *GetEventsResponse) GetEvents() []*SystemEvent { + if x != nil { + return x.Events + } + return nil +} + type PortInfo_Range struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -2839,7 +3509,7 @@ type PortInfo_Range struct { func (x *PortInfo_Range) Reset() { *x = PortInfo_Range{} if protoimpl.UnsafeEnabled { - mi := &file_daemon_proto_msgTypes[45] + mi := &file_daemon_proto_msgTypes[53] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2852,7 +3522,7 @@ func (x *PortInfo_Range) String() string { func (*PortInfo_Range) ProtoMessage() {} func (x *PortInfo_Range) ProtoReflect() protoreflect.Message { - mi := &file_daemon_proto_msgTypes[45] + mi := &file_daemon_proto_msgTypes[53] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2893,7 +3563,7 @@ var file_daemon_proto_rawDesc = []byte{ 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x0e, 0x0a, 0x0c, 0x45, 0x6d, 0x70, - 0x74, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x95, 0x0b, 0x0a, 0x0c, 0x4c, 0x6f, + 0x74, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0xe9, 0x0b, 0x0a, 0x0c, 0x4c, 0x6f, 0x67, 0x69, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x73, 0x65, 0x74, 0x75, 0x70, 0x4b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x73, 0x65, 0x74, 0x75, 0x70, 0x4b, 0x65, 0x79, 0x12, 0x26, 0x0a, 0x0c, 0x70, 0x72, 0x65, 0x53, 0x68, 0x61, @@ -2964,394 +3634,507 @@ var file_daemon_proto_rawDesc = []byte{ 0x61, 0x6c, 0x6c, 0x88, 0x01, 0x01, 0x12, 0x2d, 0x0a, 0x10, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x5f, 0x6c, 0x61, 0x6e, 0x5f, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x18, 0x18, 0x20, 0x01, 0x28, 0x08, 0x48, 0x0d, 0x52, 0x0e, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x4c, 0x61, 0x6e, 0x41, 0x63, 0x63, 0x65, - 0x73, 0x73, 0x88, 0x01, 0x01, 0x42, 0x13, 0x0a, 0x11, 0x5f, 0x72, 0x6f, 0x73, 0x65, 0x6e, 0x70, - 0x61, 0x73, 0x73, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x42, 0x10, 0x0a, 0x0e, 0x5f, 0x69, - 0x6e, 0x74, 0x65, 0x72, 0x66, 0x61, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x42, 0x10, 0x0a, 0x0e, - 0x5f, 0x77, 0x69, 0x72, 0x65, 0x67, 0x75, 0x61, 0x72, 0x64, 0x50, 0x6f, 0x72, 0x74, 0x42, 0x17, - 0x0a, 0x15, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x50, 0x72, 0x65, 0x53, 0x68, - 0x61, 0x72, 0x65, 0x64, 0x4b, 0x65, 0x79, 0x42, 0x15, 0x0a, 0x13, 0x5f, 0x64, 0x69, 0x73, 0x61, - 0x62, 0x6c, 0x65, 0x41, 0x75, 0x74, 0x6f, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x42, 0x13, - 0x0a, 0x11, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x53, 0x53, 0x48, 0x41, 0x6c, 0x6c, 0x6f, - 0x77, 0x65, 0x64, 0x42, 0x16, 0x0a, 0x14, 0x5f, 0x72, 0x6f, 0x73, 0x65, 0x6e, 0x70, 0x61, 0x73, - 0x73, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x76, 0x65, 0x42, 0x11, 0x0a, 0x0f, 0x5f, - 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x42, 0x13, - 0x0a, 0x11, 0x5f, 0x64, 0x6e, 0x73, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x49, 0x6e, 0x74, 0x65, 0x72, - 0x76, 0x61, 0x6c, 0x42, 0x18, 0x0a, 0x16, 0x5f, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x5f, - 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x42, 0x18, 0x0a, - 0x16, 0x5f, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, - 0x5f, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x42, 0x0e, 0x0a, 0x0c, 0x5f, 0x64, 0x69, 0x73, 0x61, - 0x62, 0x6c, 0x65, 0x5f, 0x64, 0x6e, 0x73, 0x42, 0x13, 0x0a, 0x11, 0x5f, 0x64, 0x69, 0x73, 0x61, - 0x62, 0x6c, 0x65, 0x5f, 0x66, 0x69, 0x72, 0x65, 0x77, 0x61, 0x6c, 0x6c, 0x42, 0x13, 0x0a, 0x11, - 0x5f, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x5f, 0x6c, 0x61, 0x6e, 0x5f, 0x61, 0x63, 0x63, 0x65, 0x73, - 0x73, 0x22, 0xb5, 0x01, 0x0a, 0x0d, 0x4c, 0x6f, 0x67, 0x69, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x12, 0x24, 0x0a, 0x0d, 0x6e, 0x65, 0x65, 0x64, 0x73, 0x53, 0x53, 0x4f, 0x4c, - 0x6f, 0x67, 0x69, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x6e, 0x65, 0x65, 0x64, - 0x73, 0x53, 0x53, 0x4f, 0x4c, 0x6f, 0x67, 0x69, 0x6e, 0x12, 0x1a, 0x0a, 0x08, 0x75, 0x73, 0x65, - 0x72, 0x43, 0x6f, 0x64, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x75, 0x73, 0x65, - 0x72, 0x43, 0x6f, 0x64, 0x65, 0x12, 0x28, 0x0a, 0x0f, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x55, 0x52, 0x49, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, - 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x55, 0x52, 0x49, 0x12, - 0x38, 0x0a, 0x17, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x55, - 0x52, 0x49, 0x43, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x17, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x55, 0x52, - 0x49, 0x43, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x22, 0x4d, 0x0a, 0x13, 0x57, 0x61, 0x69, - 0x74, 0x53, 0x53, 0x4f, 0x4c, 0x6f, 0x67, 0x69, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x12, 0x1a, 0x0a, 0x08, 0x75, 0x73, 0x65, 0x72, 0x43, 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x08, 0x75, 0x73, 0x65, 0x72, 0x43, 0x6f, 0x64, 0x65, 0x12, 0x1a, 0x0a, 0x08, - 0x68, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, - 0x68, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0x16, 0x0a, 0x14, 0x57, 0x61, 0x69, 0x74, - 0x53, 0x53, 0x4f, 0x4c, 0x6f, 0x67, 0x69, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x22, 0x0b, 0x0a, 0x09, 0x55, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x0c, 0x0a, - 0x0a, 0x55, 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x3d, 0x0a, 0x0d, 0x53, - 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2c, 0x0a, 0x11, - 0x67, 0x65, 0x74, 0x46, 0x75, 0x6c, 0x6c, 0x50, 0x65, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x75, - 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x11, 0x67, 0x65, 0x74, 0x46, 0x75, 0x6c, 0x6c, - 0x50, 0x65, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x22, 0x82, 0x01, 0x0a, 0x0e, 0x53, - 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x16, 0x0a, - 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, - 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x32, 0x0a, 0x0a, 0x66, 0x75, 0x6c, 0x6c, 0x53, 0x74, 0x61, - 0x74, 0x75, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x64, 0x61, 0x65, 0x6d, - 0x6f, 0x6e, 0x2e, 0x46, 0x75, 0x6c, 0x6c, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x0a, 0x66, - 0x75, 0x6c, 0x6c, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x24, 0x0a, 0x0d, 0x64, 0x61, 0x65, - 0x6d, 0x6f, 0x6e, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x0d, 0x64, 0x61, 0x65, 0x6d, 0x6f, 0x6e, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x22, - 0x0d, 0x0a, 0x0b, 0x44, 0x6f, 0x77, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x0e, - 0x0a, 0x0c, 0x44, 0x6f, 0x77, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x12, - 0x0a, 0x10, 0x47, 0x65, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x22, 0xb9, 0x03, 0x0a, 0x11, 0x47, 0x65, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x24, 0x0a, 0x0d, 0x6d, 0x61, 0x6e, 0x61, - 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x55, 0x72, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x0d, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x55, 0x72, 0x6c, 0x12, 0x1e, - 0x0a, 0x0a, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x46, 0x69, 0x6c, 0x65, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x46, 0x69, 0x6c, 0x65, 0x12, 0x18, - 0x0a, 0x07, 0x6c, 0x6f, 0x67, 0x46, 0x69, 0x6c, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x07, 0x6c, 0x6f, 0x67, 0x46, 0x69, 0x6c, 0x65, 0x12, 0x22, 0x0a, 0x0c, 0x70, 0x72, 0x65, 0x53, - 0x68, 0x61, 0x72, 0x65, 0x64, 0x4b, 0x65, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, - 0x70, 0x72, 0x65, 0x53, 0x68, 0x61, 0x72, 0x65, 0x64, 0x4b, 0x65, 0x79, 0x12, 0x1a, 0x0a, 0x08, - 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x55, 0x52, 0x4c, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, - 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x55, 0x52, 0x4c, 0x12, 0x24, 0x0a, 0x0d, 0x69, 0x6e, 0x74, 0x65, - 0x72, 0x66, 0x61, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x0d, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x66, 0x61, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x24, - 0x0a, 0x0d, 0x77, 0x69, 0x72, 0x65, 0x67, 0x75, 0x61, 0x72, 0x64, 0x50, 0x6f, 0x72, 0x74, 0x18, - 0x07, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0d, 0x77, 0x69, 0x72, 0x65, 0x67, 0x75, 0x61, 0x72, 0x64, - 0x50, 0x6f, 0x72, 0x74, 0x12, 0x2e, 0x0a, 0x12, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x41, - 0x75, 0x74, 0x6f, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x18, 0x09, 0x20, 0x01, 0x28, 0x08, - 0x52, 0x12, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x41, 0x75, 0x74, 0x6f, 0x43, 0x6f, 0x6e, - 0x6e, 0x65, 0x63, 0x74, 0x12, 0x2a, 0x0a, 0x10, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x53, 0x53, - 0x48, 0x41, 0x6c, 0x6c, 0x6f, 0x77, 0x65, 0x64, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, - 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x53, 0x53, 0x48, 0x41, 0x6c, 0x6c, 0x6f, 0x77, 0x65, 0x64, - 0x12, 0x2a, 0x0a, 0x10, 0x72, 0x6f, 0x73, 0x65, 0x6e, 0x70, 0x61, 0x73, 0x73, 0x45, 0x6e, 0x61, - 0x62, 0x6c, 0x65, 0x64, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x72, 0x6f, 0x73, 0x65, - 0x6e, 0x70, 0x61, 0x73, 0x73, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, 0x30, 0x0a, 0x13, - 0x72, 0x6f, 0x73, 0x65, 0x6e, 0x70, 0x61, 0x73, 0x73, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, - 0x69, 0x76, 0x65, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x08, 0x52, 0x13, 0x72, 0x6f, 0x73, 0x65, 0x6e, - 0x70, 0x61, 0x73, 0x73, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x76, 0x65, 0x22, 0xde, - 0x05, 0x0a, 0x09, 0x50, 0x65, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x0e, 0x0a, 0x02, - 0x49, 0x50, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x49, 0x50, 0x12, 0x16, 0x0a, 0x06, - 0x70, 0x75, 0x62, 0x4b, 0x65, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x70, 0x75, - 0x62, 0x4b, 0x65, 0x79, 0x12, 0x1e, 0x0a, 0x0a, 0x63, 0x6f, 0x6e, 0x6e, 0x53, 0x74, 0x61, 0x74, - 0x75, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6f, 0x6e, 0x6e, 0x53, 0x74, - 0x61, 0x74, 0x75, 0x73, 0x12, 0x46, 0x0a, 0x10, 0x63, 0x6f, 0x6e, 0x6e, 0x53, 0x74, 0x61, 0x74, - 0x75, 0x73, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x10, 0x63, 0x6f, 0x6e, 0x6e, - 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x12, 0x18, 0x0a, 0x07, - 0x72, 0x65, 0x6c, 0x61, 0x79, 0x65, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x72, - 0x65, 0x6c, 0x61, 0x79, 0x65, 0x64, 0x12, 0x34, 0x0a, 0x15, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x49, + 0x73, 0x73, 0x88, 0x01, 0x01, 0x12, 0x38, 0x0a, 0x15, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, + 0x5f, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x19, + 0x20, 0x01, 0x28, 0x08, 0x48, 0x0e, 0x52, 0x14, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x4e, + 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x88, 0x01, 0x01, 0x42, + 0x13, 0x0a, 0x11, 0x5f, 0x72, 0x6f, 0x73, 0x65, 0x6e, 0x70, 0x61, 0x73, 0x73, 0x45, 0x6e, 0x61, + 0x62, 0x6c, 0x65, 0x64, 0x42, 0x10, 0x0a, 0x0e, 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x66, 0x61, + 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x42, 0x10, 0x0a, 0x0e, 0x5f, 0x77, 0x69, 0x72, 0x65, 0x67, + 0x75, 0x61, 0x72, 0x64, 0x50, 0x6f, 0x72, 0x74, 0x42, 0x17, 0x0a, 0x15, 0x5f, 0x6f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x50, 0x72, 0x65, 0x53, 0x68, 0x61, 0x72, 0x65, 0x64, 0x4b, 0x65, + 0x79, 0x42, 0x15, 0x0a, 0x13, 0x5f, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x41, 0x75, 0x74, + 0x6f, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x42, 0x13, 0x0a, 0x11, 0x5f, 0x73, 0x65, 0x72, + 0x76, 0x65, 0x72, 0x53, 0x53, 0x48, 0x41, 0x6c, 0x6c, 0x6f, 0x77, 0x65, 0x64, 0x42, 0x16, 0x0a, + 0x14, 0x5f, 0x72, 0x6f, 0x73, 0x65, 0x6e, 0x70, 0x61, 0x73, 0x73, 0x50, 0x65, 0x72, 0x6d, 0x69, + 0x73, 0x73, 0x69, 0x76, 0x65, 0x42, 0x11, 0x0a, 0x0f, 0x5f, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, + 0x6b, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x42, 0x13, 0x0a, 0x11, 0x5f, 0x64, 0x6e, 0x73, + 0x52, 0x6f, 0x75, 0x74, 0x65, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x42, 0x18, 0x0a, + 0x16, 0x5f, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, + 0x5f, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x42, 0x18, 0x0a, 0x16, 0x5f, 0x64, 0x69, 0x73, 0x61, + 0x62, 0x6c, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x72, 0x6f, 0x75, 0x74, 0x65, + 0x73, 0x42, 0x0e, 0x0a, 0x0c, 0x5f, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x64, 0x6e, + 0x73, 0x42, 0x13, 0x0a, 0x11, 0x5f, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x66, 0x69, + 0x72, 0x65, 0x77, 0x61, 0x6c, 0x6c, 0x42, 0x13, 0x0a, 0x11, 0x5f, 0x62, 0x6c, 0x6f, 0x63, 0x6b, + 0x5f, 0x6c, 0x61, 0x6e, 0x5f, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x42, 0x18, 0x0a, 0x16, 0x5f, + 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0xb5, 0x01, 0x0a, 0x0d, 0x4c, 0x6f, 0x67, 0x69, 0x6e, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x24, 0x0a, 0x0d, 0x6e, 0x65, 0x65, 0x64, 0x73, + 0x53, 0x53, 0x4f, 0x4c, 0x6f, 0x67, 0x69, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, + 0x6e, 0x65, 0x65, 0x64, 0x73, 0x53, 0x53, 0x4f, 0x4c, 0x6f, 0x67, 0x69, 0x6e, 0x12, 0x1a, 0x0a, + 0x08, 0x75, 0x73, 0x65, 0x72, 0x43, 0x6f, 0x64, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x08, 0x75, 0x73, 0x65, 0x72, 0x43, 0x6f, 0x64, 0x65, 0x12, 0x28, 0x0a, 0x0f, 0x76, 0x65, 0x72, + 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x55, 0x52, 0x49, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x0f, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x55, 0x52, 0x49, 0x12, 0x38, 0x0a, 0x17, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x55, 0x52, 0x49, 0x43, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x18, 0x04, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x17, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x55, 0x52, 0x49, 0x43, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x22, 0x4d, 0x0a, + 0x13, 0x57, 0x61, 0x69, 0x74, 0x53, 0x53, 0x4f, 0x4c, 0x6f, 0x67, 0x69, 0x6e, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x75, 0x73, 0x65, 0x72, 0x43, 0x6f, 0x64, 0x65, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x75, 0x73, 0x65, 0x72, 0x43, 0x6f, 0x64, 0x65, + 0x12, 0x1a, 0x0a, 0x08, 0x68, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x08, 0x68, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0x16, 0x0a, 0x14, + 0x57, 0x61, 0x69, 0x74, 0x53, 0x53, 0x4f, 0x4c, 0x6f, 0x67, 0x69, 0x6e, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x0b, 0x0a, 0x09, 0x55, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x22, 0x0c, 0x0a, 0x0a, 0x55, 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, + 0x3d, 0x0a, 0x0d, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x12, 0x2c, 0x0a, 0x11, 0x67, 0x65, 0x74, 0x46, 0x75, 0x6c, 0x6c, 0x50, 0x65, 0x65, 0x72, 0x53, + 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x11, 0x67, 0x65, 0x74, + 0x46, 0x75, 0x6c, 0x6c, 0x50, 0x65, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x22, 0x82, + 0x01, 0x0a, 0x0e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x32, 0x0a, 0x0a, 0x66, 0x75, 0x6c, + 0x6c, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, + 0x64, 0x61, 0x65, 0x6d, 0x6f, 0x6e, 0x2e, 0x46, 0x75, 0x6c, 0x6c, 0x53, 0x74, 0x61, 0x74, 0x75, + 0x73, 0x52, 0x0a, 0x66, 0x75, 0x6c, 0x6c, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x24, 0x0a, + 0x0d, 0x64, 0x61, 0x65, 0x6d, 0x6f, 0x6e, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x64, 0x61, 0x65, 0x6d, 0x6f, 0x6e, 0x56, 0x65, 0x72, 0x73, + 0x69, 0x6f, 0x6e, 0x22, 0x0d, 0x0a, 0x0b, 0x44, 0x6f, 0x77, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x22, 0x0e, 0x0a, 0x0c, 0x44, 0x6f, 0x77, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x22, 0x12, 0x0a, 0x10, 0x47, 0x65, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0xee, 0x03, 0x0a, 0x11, 0x47, 0x65, 0x74, 0x43, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x24, 0x0a, 0x0d, + 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x55, 0x72, 0x6c, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x0d, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x55, + 0x72, 0x6c, 0x12, 0x1e, 0x0a, 0x0a, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x46, 0x69, 0x6c, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x46, 0x69, + 0x6c, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x6c, 0x6f, 0x67, 0x46, 0x69, 0x6c, 0x65, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x07, 0x6c, 0x6f, 0x67, 0x46, 0x69, 0x6c, 0x65, 0x12, 0x22, 0x0a, 0x0c, + 0x70, 0x72, 0x65, 0x53, 0x68, 0x61, 0x72, 0x65, 0x64, 0x4b, 0x65, 0x79, 0x18, 0x04, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x0c, 0x70, 0x72, 0x65, 0x53, 0x68, 0x61, 0x72, 0x65, 0x64, 0x4b, 0x65, 0x79, + 0x12, 0x1a, 0x0a, 0x08, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x55, 0x52, 0x4c, 0x18, 0x05, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x08, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x55, 0x52, 0x4c, 0x12, 0x24, 0x0a, 0x0d, + 0x69, 0x6e, 0x74, 0x65, 0x72, 0x66, 0x61, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x18, 0x06, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x0d, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x66, 0x61, 0x63, 0x65, 0x4e, 0x61, + 0x6d, 0x65, 0x12, 0x24, 0x0a, 0x0d, 0x77, 0x69, 0x72, 0x65, 0x67, 0x75, 0x61, 0x72, 0x64, 0x50, + 0x6f, 0x72, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0d, 0x77, 0x69, 0x72, 0x65, 0x67, + 0x75, 0x61, 0x72, 0x64, 0x50, 0x6f, 0x72, 0x74, 0x12, 0x2e, 0x0a, 0x12, 0x64, 0x69, 0x73, 0x61, + 0x62, 0x6c, 0x65, 0x41, 0x75, 0x74, 0x6f, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x18, 0x09, + 0x20, 0x01, 0x28, 0x08, 0x52, 0x12, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x41, 0x75, 0x74, + 0x6f, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x12, 0x2a, 0x0a, 0x10, 0x73, 0x65, 0x72, 0x76, + 0x65, 0x72, 0x53, 0x53, 0x48, 0x41, 0x6c, 0x6c, 0x6f, 0x77, 0x65, 0x64, 0x18, 0x0a, 0x20, 0x01, + 0x28, 0x08, 0x52, 0x10, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x53, 0x53, 0x48, 0x41, 0x6c, 0x6c, + 0x6f, 0x77, 0x65, 0x64, 0x12, 0x2a, 0x0a, 0x10, 0x72, 0x6f, 0x73, 0x65, 0x6e, 0x70, 0x61, 0x73, + 0x73, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, + 0x72, 0x6f, 0x73, 0x65, 0x6e, 0x70, 0x61, 0x73, 0x73, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, + 0x12, 0x30, 0x0a, 0x13, 0x72, 0x6f, 0x73, 0x65, 0x6e, 0x70, 0x61, 0x73, 0x73, 0x50, 0x65, 0x72, + 0x6d, 0x69, 0x73, 0x73, 0x69, 0x76, 0x65, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x08, 0x52, 0x13, 0x72, + 0x6f, 0x73, 0x65, 0x6e, 0x70, 0x61, 0x73, 0x73, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, + 0x76, 0x65, 0x12, 0x33, 0x0a, 0x15, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6e, 0x6f, + 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x0d, 0x20, 0x01, 0x28, + 0x08, 0x52, 0x14, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, + 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0xde, 0x05, 0x0a, 0x09, 0x50, 0x65, 0x65, 0x72, + 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x49, 0x50, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x02, 0x49, 0x50, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x75, 0x62, 0x4b, 0x65, 0x79, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x70, 0x75, 0x62, 0x4b, 0x65, 0x79, 0x12, 0x1e, 0x0a, + 0x0a, 0x63, 0x6f, 0x6e, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x0a, 0x63, 0x6f, 0x6e, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x46, 0x0a, + 0x10, 0x63, 0x6f, 0x6e, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x55, 0x70, 0x64, 0x61, 0x74, + 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, + 0x61, 0x6d, 0x70, 0x52, 0x10, 0x63, 0x6f, 0x6e, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x55, + 0x70, 0x64, 0x61, 0x74, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x72, 0x65, 0x6c, 0x61, 0x79, 0x65, 0x64, + 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x72, 0x65, 0x6c, 0x61, 0x79, 0x65, 0x64, 0x12, + 0x34, 0x0a, 0x15, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x49, 0x63, 0x65, 0x43, 0x61, 0x6e, 0x64, 0x69, + 0x64, 0x61, 0x74, 0x65, 0x54, 0x79, 0x70, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x15, + 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x49, 0x63, 0x65, 0x43, 0x61, 0x6e, 0x64, 0x69, 0x64, 0x61, 0x74, + 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, 0x36, 0x0a, 0x16, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x49, 0x63, 0x65, 0x43, 0x61, 0x6e, 0x64, 0x69, 0x64, 0x61, 0x74, 0x65, 0x54, 0x79, 0x70, 0x65, 0x18, - 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x15, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x49, 0x63, 0x65, 0x43, - 0x61, 0x6e, 0x64, 0x69, 0x64, 0x61, 0x74, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, 0x36, 0x0a, 0x16, - 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x49, 0x63, 0x65, 0x43, 0x61, 0x6e, 0x64, 0x69, 0x64, 0x61, - 0x74, 0x65, 0x54, 0x79, 0x70, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x16, 0x72, 0x65, - 0x6d, 0x6f, 0x74, 0x65, 0x49, 0x63, 0x65, 0x43, 0x61, 0x6e, 0x64, 0x69, 0x64, 0x61, 0x74, 0x65, - 0x54, 0x79, 0x70, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x66, 0x71, 0x64, 0x6e, 0x18, 0x09, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x04, 0x66, 0x71, 0x64, 0x6e, 0x12, 0x3c, 0x0a, 0x19, 0x6c, 0x6f, 0x63, 0x61, - 0x6c, 0x49, 0x63, 0x65, 0x43, 0x61, 0x6e, 0x64, 0x69, 0x64, 0x61, 0x74, 0x65, 0x45, 0x6e, 0x64, - 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x19, 0x6c, 0x6f, 0x63, - 0x61, 0x6c, 0x49, 0x63, 0x65, 0x43, 0x61, 0x6e, 0x64, 0x69, 0x64, 0x61, 0x74, 0x65, 0x45, 0x6e, - 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x3e, 0x0a, 0x1a, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, - 0x49, 0x63, 0x65, 0x43, 0x61, 0x6e, 0x64, 0x69, 0x64, 0x61, 0x74, 0x65, 0x45, 0x6e, 0x64, 0x70, - 0x6f, 0x69, 0x6e, 0x74, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x1a, 0x72, 0x65, 0x6d, 0x6f, - 0x74, 0x65, 0x49, 0x63, 0x65, 0x43, 0x61, 0x6e, 0x64, 0x69, 0x64, 0x61, 0x74, 0x65, 0x45, 0x6e, - 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x52, 0x0a, 0x16, 0x6c, 0x61, 0x73, 0x74, 0x57, 0x69, - 0x72, 0x65, 0x67, 0x75, 0x61, 0x72, 0x64, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, - 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, - 0x6d, 0x70, 0x52, 0x16, 0x6c, 0x61, 0x73, 0x74, 0x57, 0x69, 0x72, 0x65, 0x67, 0x75, 0x61, 0x72, - 0x64, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x62, 0x79, - 0x74, 0x65, 0x73, 0x52, 0x78, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x03, 0x52, 0x07, 0x62, 0x79, 0x74, - 0x65, 0x73, 0x52, 0x78, 0x12, 0x18, 0x0a, 0x07, 0x62, 0x79, 0x74, 0x65, 0x73, 0x54, 0x78, 0x18, - 0x0e, 0x20, 0x01, 0x28, 0x03, 0x52, 0x07, 0x62, 0x79, 0x74, 0x65, 0x73, 0x54, 0x78, 0x12, 0x2a, - 0x0a, 0x10, 0x72, 0x6f, 0x73, 0x65, 0x6e, 0x70, 0x61, 0x73, 0x73, 0x45, 0x6e, 0x61, 0x62, 0x6c, - 0x65, 0x64, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x72, 0x6f, 0x73, 0x65, 0x6e, 0x70, - 0x61, 0x73, 0x73, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x6e, 0x65, - 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x73, 0x18, 0x10, 0x20, 0x03, 0x28, 0x09, 0x52, 0x08, 0x6e, 0x65, - 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x73, 0x12, 0x33, 0x0a, 0x07, 0x6c, 0x61, 0x74, 0x65, 0x6e, 0x63, - 0x79, 0x18, 0x11, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x52, 0x07, 0x6c, 0x61, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x12, 0x22, 0x0a, 0x0c, 0x72, - 0x65, 0x6c, 0x61, 0x79, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x12, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x0c, 0x72, 0x65, 0x6c, 0x61, 0x79, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x22, - 0xf0, 0x01, 0x0a, 0x0e, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x50, 0x65, 0x65, 0x72, 0x53, 0x74, 0x61, - 0x74, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x49, 0x50, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, - 0x49, 0x50, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x75, 0x62, 0x4b, 0x65, 0x79, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x06, 0x70, 0x75, 0x62, 0x4b, 0x65, 0x79, 0x12, 0x28, 0x0a, 0x0f, 0x6b, 0x65, - 0x72, 0x6e, 0x65, 0x6c, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x66, 0x61, 0x63, 0x65, 0x18, 0x03, 0x20, - 0x01, 0x28, 0x08, 0x52, 0x0f, 0x6b, 0x65, 0x72, 0x6e, 0x65, 0x6c, 0x49, 0x6e, 0x74, 0x65, 0x72, - 0x66, 0x61, 0x63, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x66, 0x71, 0x64, 0x6e, 0x18, 0x04, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x04, 0x66, 0x71, 0x64, 0x6e, 0x12, 0x2a, 0x0a, 0x10, 0x72, 0x6f, 0x73, 0x65, - 0x6e, 0x70, 0x61, 0x73, 0x73, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x05, 0x20, 0x01, - 0x28, 0x08, 0x52, 0x10, 0x72, 0x6f, 0x73, 0x65, 0x6e, 0x70, 0x61, 0x73, 0x73, 0x45, 0x6e, 0x61, - 0x62, 0x6c, 0x65, 0x64, 0x12, 0x30, 0x0a, 0x13, 0x72, 0x6f, 0x73, 0x65, 0x6e, 0x70, 0x61, 0x73, - 0x73, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x76, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, - 0x08, 0x52, 0x13, 0x72, 0x6f, 0x73, 0x65, 0x6e, 0x70, 0x61, 0x73, 0x73, 0x50, 0x65, 0x72, 0x6d, - 0x69, 0x73, 0x73, 0x69, 0x76, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, - 0x6b, 0x73, 0x18, 0x07, 0x20, 0x03, 0x28, 0x09, 0x52, 0x08, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, - 0x6b, 0x73, 0x22, 0x53, 0x0a, 0x0b, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x6c, 0x53, 0x74, 0x61, 0x74, - 0x65, 0x12, 0x10, 0x0a, 0x03, 0x55, 0x52, 0x4c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, - 0x55, 0x52, 0x4c, 0x12, 0x1c, 0x0a, 0x09, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x65, 0x64, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x65, - 0x64, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x22, 0x57, 0x0a, 0x0f, 0x4d, 0x61, 0x6e, 0x61, 0x67, - 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x55, 0x52, + 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x16, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x49, 0x63, 0x65, + 0x43, 0x61, 0x6e, 0x64, 0x69, 0x64, 0x61, 0x74, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, 0x12, 0x0a, + 0x04, 0x66, 0x71, 0x64, 0x6e, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x66, 0x71, 0x64, + 0x6e, 0x12, 0x3c, 0x0a, 0x19, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x49, 0x63, 0x65, 0x43, 0x61, 0x6e, + 0x64, 0x69, 0x64, 0x61, 0x74, 0x65, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x18, 0x0a, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x19, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x49, 0x63, 0x65, 0x43, 0x61, + 0x6e, 0x64, 0x69, 0x64, 0x61, 0x74, 0x65, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x12, + 0x3e, 0x0a, 0x1a, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x49, 0x63, 0x65, 0x43, 0x61, 0x6e, 0x64, + 0x69, 0x64, 0x61, 0x74, 0x65, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x18, 0x0b, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x1a, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x49, 0x63, 0x65, 0x43, 0x61, + 0x6e, 0x64, 0x69, 0x64, 0x61, 0x74, 0x65, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x12, + 0x52, 0x0a, 0x16, 0x6c, 0x61, 0x73, 0x74, 0x57, 0x69, 0x72, 0x65, 0x67, 0x75, 0x61, 0x72, 0x64, + 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x16, 0x6c, 0x61, 0x73, + 0x74, 0x57, 0x69, 0x72, 0x65, 0x67, 0x75, 0x61, 0x72, 0x64, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, + 0x61, 0x6b, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x62, 0x79, 0x74, 0x65, 0x73, 0x52, 0x78, 0x18, 0x0d, + 0x20, 0x01, 0x28, 0x03, 0x52, 0x07, 0x62, 0x79, 0x74, 0x65, 0x73, 0x52, 0x78, 0x12, 0x18, 0x0a, + 0x07, 0x62, 0x79, 0x74, 0x65, 0x73, 0x54, 0x78, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x03, 0x52, 0x07, + 0x62, 0x79, 0x74, 0x65, 0x73, 0x54, 0x78, 0x12, 0x2a, 0x0a, 0x10, 0x72, 0x6f, 0x73, 0x65, 0x6e, + 0x70, 0x61, 0x73, 0x73, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x0f, 0x20, 0x01, 0x28, + 0x08, 0x52, 0x10, 0x72, 0x6f, 0x73, 0x65, 0x6e, 0x70, 0x61, 0x73, 0x73, 0x45, 0x6e, 0x61, 0x62, + 0x6c, 0x65, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x73, 0x18, + 0x10, 0x20, 0x03, 0x28, 0x09, 0x52, 0x08, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x73, 0x12, + 0x33, 0x0a, 0x07, 0x6c, 0x61, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x18, 0x11, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x6c, 0x61, 0x74, + 0x65, 0x6e, 0x63, 0x79, 0x12, 0x22, 0x0a, 0x0c, 0x72, 0x65, 0x6c, 0x61, 0x79, 0x41, 0x64, 0x64, + 0x72, 0x65, 0x73, 0x73, 0x18, 0x12, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x6c, 0x61, + 0x79, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x22, 0xf0, 0x01, 0x0a, 0x0e, 0x4c, 0x6f, 0x63, + 0x61, 0x6c, 0x50, 0x65, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x49, + 0x50, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x49, 0x50, 0x12, 0x16, 0x0a, 0x06, 0x70, + 0x75, 0x62, 0x4b, 0x65, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x70, 0x75, 0x62, + 0x4b, 0x65, 0x79, 0x12, 0x28, 0x0a, 0x0f, 0x6b, 0x65, 0x72, 0x6e, 0x65, 0x6c, 0x49, 0x6e, 0x74, + 0x65, 0x72, 0x66, 0x61, 0x63, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0f, 0x6b, 0x65, + 0x72, 0x6e, 0x65, 0x6c, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x66, 0x61, 0x63, 0x65, 0x12, 0x12, 0x0a, + 0x04, 0x66, 0x71, 0x64, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x66, 0x71, 0x64, + 0x6e, 0x12, 0x2a, 0x0a, 0x10, 0x72, 0x6f, 0x73, 0x65, 0x6e, 0x70, 0x61, 0x73, 0x73, 0x45, 0x6e, + 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x72, 0x6f, 0x73, + 0x65, 0x6e, 0x70, 0x61, 0x73, 0x73, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, 0x30, 0x0a, + 0x13, 0x72, 0x6f, 0x73, 0x65, 0x6e, 0x70, 0x61, 0x73, 0x73, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, + 0x73, 0x69, 0x76, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x13, 0x72, 0x6f, 0x73, 0x65, + 0x6e, 0x70, 0x61, 0x73, 0x73, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x76, 0x65, 0x12, + 0x1a, 0x0a, 0x08, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x73, 0x18, 0x07, 0x20, 0x03, 0x28, + 0x09, 0x52, 0x08, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x73, 0x22, 0x53, 0x0a, 0x0b, 0x53, + 0x69, 0x67, 0x6e, 0x61, 0x6c, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x55, 0x52, 0x4c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x55, 0x52, 0x4c, 0x12, 0x1c, 0x0a, 0x09, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x65, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, - 0x22, 0x52, 0x0a, 0x0a, 0x52, 0x65, 0x6c, 0x61, 0x79, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x10, - 0x0a, 0x03, 0x55, 0x52, 0x49, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x55, 0x52, 0x49, - 0x12, 0x1c, 0x0a, 0x09, 0x61, 0x76, 0x61, 0x69, 0x6c, 0x61, 0x62, 0x6c, 0x65, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x08, 0x52, 0x09, 0x61, 0x76, 0x61, 0x69, 0x6c, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x14, - 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, - 0x72, 0x72, 0x6f, 0x72, 0x22, 0x72, 0x0a, 0x0c, 0x4e, 0x53, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x53, - 0x74, 0x61, 0x74, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x73, 0x18, - 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x73, 0x12, 0x18, - 0x0a, 0x07, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, - 0x07, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x65, 0x6e, 0x61, 0x62, - 0x6c, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, - 0x65, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x22, 0x8c, 0x03, 0x0a, 0x0a, 0x46, 0x75, 0x6c, - 0x6c, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x41, 0x0a, 0x0f, 0x6d, 0x61, 0x6e, 0x61, 0x67, - 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x53, 0x74, 0x61, 0x74, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x17, 0x2e, 0x64, 0x61, 0x65, 0x6d, 0x6f, 0x6e, 0x2e, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, - 0x6d, 0x65, 0x6e, 0x74, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x0f, 0x6d, 0x61, 0x6e, 0x61, 0x67, - 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x35, 0x0a, 0x0b, 0x73, 0x69, - 0x67, 0x6e, 0x61, 0x6c, 0x53, 0x74, 0x61, 0x74, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x13, 0x2e, 0x64, 0x61, 0x65, 0x6d, 0x6f, 0x6e, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x6c, 0x53, - 0x74, 0x61, 0x74, 0x65, 0x52, 0x0b, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x6c, 0x53, 0x74, 0x61, 0x74, - 0x65, 0x12, 0x3e, 0x0a, 0x0e, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x50, 0x65, 0x65, 0x72, 0x53, 0x74, - 0x61, 0x74, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x64, 0x61, 0x65, 0x6d, - 0x6f, 0x6e, 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x50, 0x65, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, - 0x65, 0x52, 0x0e, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x50, 0x65, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, - 0x65, 0x12, 0x27, 0x0a, 0x05, 0x70, 0x65, 0x65, 0x72, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x11, 0x2e, 0x64, 0x61, 0x65, 0x6d, 0x6f, 0x6e, 0x2e, 0x50, 0x65, 0x65, 0x72, 0x53, 0x74, - 0x61, 0x74, 0x65, 0x52, 0x05, 0x70, 0x65, 0x65, 0x72, 0x73, 0x12, 0x2a, 0x0a, 0x06, 0x72, 0x65, - 0x6c, 0x61, 0x79, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x64, 0x61, 0x65, - 0x6d, 0x6f, 0x6e, 0x2e, 0x52, 0x65, 0x6c, 0x61, 0x79, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x06, - 0x72, 0x65, 0x6c, 0x61, 0x79, 0x73, 0x12, 0x35, 0x0a, 0x0b, 0x64, 0x6e, 0x73, 0x5f, 0x73, 0x65, - 0x72, 0x76, 0x65, 0x72, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x64, 0x61, - 0x65, 0x6d, 0x6f, 0x6e, 0x2e, 0x4e, 0x53, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x53, 0x74, 0x61, 0x74, - 0x65, 0x52, 0x0a, 0x64, 0x6e, 0x73, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x73, 0x12, 0x38, 0x0a, - 0x17, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x4f, 0x66, 0x46, 0x6f, 0x72, 0x77, 0x61, 0x72, 0x64, - 0x69, 0x6e, 0x67, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x05, 0x52, 0x17, - 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x4f, 0x66, 0x46, 0x6f, 0x72, 0x77, 0x61, 0x72, 0x64, 0x69, - 0x6e, 0x67, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x22, 0x15, 0x0a, 0x13, 0x4c, 0x69, 0x73, 0x74, 0x4e, - 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x3f, - 0x0a, 0x14, 0x4c, 0x69, 0x73, 0x74, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x73, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x27, 0x0a, 0x06, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x73, - 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x64, 0x61, 0x65, 0x6d, 0x6f, 0x6e, 0x2e, - 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x52, 0x06, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x22, - 0x61, 0x0a, 0x15, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, - 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1e, 0x0a, 0x0a, 0x6e, 0x65, 0x74, 0x77, - 0x6f, 0x72, 0x6b, 0x49, 0x44, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x6e, 0x65, - 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x49, 0x44, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x61, 0x70, 0x70, 0x65, - 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x61, 0x70, 0x70, 0x65, 0x6e, 0x64, - 0x12, 0x10, 0x0a, 0x03, 0x61, 0x6c, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x03, 0x61, - 0x6c, 0x6c, 0x22, 0x18, 0x0a, 0x16, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x4e, 0x65, 0x74, 0x77, - 0x6f, 0x72, 0x6b, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x1a, 0x0a, 0x06, - 0x49, 0x50, 0x4c, 0x69, 0x73, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x69, 0x70, 0x73, 0x18, 0x01, 0x20, - 0x03, 0x28, 0x09, 0x52, 0x03, 0x69, 0x70, 0x73, 0x22, 0xf9, 0x01, 0x0a, 0x07, 0x4e, 0x65, 0x74, - 0x77, 0x6f, 0x72, 0x6b, 0x12, 0x0e, 0x0a, 0x02, 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x02, 0x49, 0x44, 0x12, 0x14, 0x0a, 0x05, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x05, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x73, 0x65, - 0x6c, 0x65, 0x63, 0x74, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x73, 0x65, - 0x6c, 0x65, 0x63, 0x74, 0x65, 0x64, 0x12, 0x18, 0x0a, 0x07, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, - 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x73, - 0x12, 0x42, 0x0a, 0x0b, 0x72, 0x65, 0x73, 0x6f, 0x6c, 0x76, 0x65, 0x64, 0x49, 0x50, 0x73, 0x18, - 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x64, 0x61, 0x65, 0x6d, 0x6f, 0x6e, 0x2e, 0x4e, - 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x6c, 0x76, 0x65, 0x64, 0x49, - 0x50, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0b, 0x72, 0x65, 0x73, 0x6f, 0x6c, 0x76, 0x65, - 0x64, 0x49, 0x50, 0x73, 0x1a, 0x4e, 0x0a, 0x10, 0x52, 0x65, 0x73, 0x6f, 0x6c, 0x76, 0x65, 0x64, - 0x49, 0x50, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x24, 0x0a, 0x05, 0x76, 0x61, - 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x64, 0x61, 0x65, 0x6d, - 0x6f, 0x6e, 0x2e, 0x49, 0x50, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, - 0x3a, 0x02, 0x38, 0x01, 0x22, 0x92, 0x01, 0x0a, 0x08, 0x50, 0x6f, 0x72, 0x74, 0x49, 0x6e, 0x66, - 0x6f, 0x12, 0x14, 0x0a, 0x04, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x48, - 0x00, 0x52, 0x04, 0x70, 0x6f, 0x72, 0x74, 0x12, 0x2e, 0x0a, 0x05, 0x72, 0x61, 0x6e, 0x67, 0x65, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x64, 0x61, 0x65, 0x6d, 0x6f, 0x6e, 0x2e, - 0x50, 0x6f, 0x72, 0x74, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x48, 0x00, - 0x52, 0x05, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x1a, 0x2f, 0x0a, 0x05, 0x52, 0x61, 0x6e, 0x67, 0x65, - 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, - 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x0d, 0x52, 0x03, 0x65, 0x6e, 0x64, 0x42, 0x0f, 0x0a, 0x0d, 0x70, 0x6f, 0x72, 0x74, - 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x80, 0x02, 0x0a, 0x0e, 0x46, 0x6f, - 0x72, 0x77, 0x61, 0x72, 0x64, 0x69, 0x6e, 0x67, 0x52, 0x75, 0x6c, 0x65, 0x12, 0x1a, 0x0a, 0x08, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x12, 0x3a, 0x0a, 0x0f, 0x64, 0x65, 0x73, 0x74, - 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x72, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x10, 0x2e, 0x64, 0x61, 0x65, 0x6d, 0x6f, 0x6e, 0x2e, 0x50, 0x6f, 0x72, 0x74, 0x49, - 0x6e, 0x66, 0x6f, 0x52, 0x0f, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x50, 0x6f, 0x72, 0x74, 0x12, 0x2c, 0x0a, 0x11, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x6c, 0x61, 0x74, - 0x65, 0x64, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x11, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x6c, 0x61, 0x74, 0x65, 0x64, 0x41, 0x64, 0x64, 0x72, 0x65, - 0x73, 0x73, 0x12, 0x2e, 0x0a, 0x12, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x6c, 0x61, 0x74, 0x65, 0x64, - 0x48, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, - 0x74, 0x72, 0x61, 0x6e, 0x73, 0x6c, 0x61, 0x74, 0x65, 0x64, 0x48, 0x6f, 0x73, 0x74, 0x6e, 0x61, - 0x6d, 0x65, 0x12, 0x38, 0x0a, 0x0e, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x6c, 0x61, 0x74, 0x65, 0x64, - 0x50, 0x6f, 0x72, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x64, 0x61, 0x65, - 0x6d, 0x6f, 0x6e, 0x2e, 0x50, 0x6f, 0x72, 0x74, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x0e, 0x74, 0x72, - 0x61, 0x6e, 0x73, 0x6c, 0x61, 0x74, 0x65, 0x64, 0x50, 0x6f, 0x72, 0x74, 0x22, 0x47, 0x0a, 0x17, - 0x46, 0x6f, 0x72, 0x77, 0x61, 0x72, 0x64, 0x69, 0x6e, 0x67, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2c, 0x0a, 0x05, 0x72, 0x75, 0x6c, 0x65, 0x73, - 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x64, 0x61, 0x65, 0x6d, 0x6f, 0x6e, 0x2e, - 0x46, 0x6f, 0x72, 0x77, 0x61, 0x72, 0x64, 0x69, 0x6e, 0x67, 0x52, 0x75, 0x6c, 0x65, 0x52, 0x05, - 0x72, 0x75, 0x6c, 0x65, 0x73, 0x22, 0x6a, 0x0a, 0x12, 0x44, 0x65, 0x62, 0x75, 0x67, 0x42, 0x75, - 0x6e, 0x64, 0x6c, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1c, 0x0a, 0x09, 0x61, - 0x6e, 0x6f, 0x6e, 0x79, 0x6d, 0x69, 0x7a, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, - 0x61, 0x6e, 0x6f, 0x6e, 0x79, 0x6d, 0x69, 0x7a, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x74, 0x61, - 0x74, 0x75, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, - 0x73, 0x12, 0x1e, 0x0a, 0x0a, 0x73, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x49, 0x6e, 0x66, 0x6f, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x73, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x49, 0x6e, 0x66, - 0x6f, 0x22, 0x29, 0x0a, 0x13, 0x44, 0x65, 0x62, 0x75, 0x67, 0x42, 0x75, 0x6e, 0x64, 0x6c, 0x65, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x22, 0x14, 0x0a, 0x12, - 0x47, 0x65, 0x74, 0x4c, 0x6f, 0x67, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x22, 0x3d, 0x0a, 0x13, 0x47, 0x65, 0x74, 0x4c, 0x6f, 0x67, 0x4c, 0x65, 0x76, 0x65, - 0x6c, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x26, 0x0a, 0x05, 0x6c, 0x65, 0x76, - 0x65, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x10, 0x2e, 0x64, 0x61, 0x65, 0x6d, 0x6f, - 0x6e, 0x2e, 0x4c, 0x6f, 0x67, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x52, 0x05, 0x6c, 0x65, 0x76, 0x65, - 0x6c, 0x22, 0x3c, 0x0a, 0x12, 0x53, 0x65, 0x74, 0x4c, 0x6f, 0x67, 0x4c, 0x65, 0x76, 0x65, 0x6c, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x26, 0x0a, 0x05, 0x6c, 0x65, 0x76, 0x65, 0x6c, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x10, 0x2e, 0x64, 0x61, 0x65, 0x6d, 0x6f, 0x6e, 0x2e, - 0x4c, 0x6f, 0x67, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x52, 0x05, 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x22, - 0x15, 0x0a, 0x13, 0x53, 0x65, 0x74, 0x4c, 0x6f, 0x67, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x1b, 0x0a, 0x05, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, - 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, - 0x61, 0x6d, 0x65, 0x22, 0x13, 0x0a, 0x11, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x74, 0x61, 0x74, 0x65, - 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x3b, 0x0a, 0x12, 0x4c, 0x69, 0x73, 0x74, - 0x53, 0x74, 0x61, 0x74, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x25, - 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0d, - 0x2e, 0x64, 0x61, 0x65, 0x6d, 0x6f, 0x6e, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x06, 0x73, - 0x74, 0x61, 0x74, 0x65, 0x73, 0x22, 0x44, 0x0a, 0x11, 0x43, 0x6c, 0x65, 0x61, 0x6e, 0x53, 0x74, - 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x74, - 0x61, 0x74, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, - 0x73, 0x74, 0x61, 0x74, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x61, 0x6c, 0x6c, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x03, 0x61, 0x6c, 0x6c, 0x22, 0x3b, 0x0a, 0x12, 0x43, - 0x6c, 0x65, 0x61, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x63, 0x6c, 0x65, 0x61, 0x6e, 0x65, 0x64, 0x5f, 0x73, 0x74, 0x61, - 0x74, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0d, 0x63, 0x6c, 0x65, 0x61, 0x6e, - 0x65, 0x64, 0x53, 0x74, 0x61, 0x74, 0x65, 0x73, 0x22, 0x45, 0x0a, 0x12, 0x44, 0x65, 0x6c, 0x65, - 0x74, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, - 0x0a, 0x0a, 0x73, 0x74, 0x61, 0x74, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x09, 0x73, 0x74, 0x61, 0x74, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x10, 0x0a, - 0x03, 0x61, 0x6c, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x03, 0x61, 0x6c, 0x6c, 0x22, - 0x3c, 0x0a, 0x13, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, - 0x64, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0d, - 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x53, 0x74, 0x61, 0x74, 0x65, 0x73, 0x22, 0x3b, 0x0a, - 0x1f, 0x53, 0x65, 0x74, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x4d, 0x61, 0x70, 0x50, 0x65, - 0x72, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x12, 0x18, 0x0a, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x08, 0x52, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x22, 0x22, 0x0a, 0x20, 0x53, 0x65, + 0x22, 0x57, 0x0a, 0x0f, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x53, 0x74, + 0x61, 0x74, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x55, 0x52, 0x4c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x03, 0x55, 0x52, 0x4c, 0x12, 0x1c, 0x0a, 0x09, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, + 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, + 0x74, 0x65, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x22, 0x52, 0x0a, 0x0a, 0x52, 0x65, 0x6c, + 0x61, 0x79, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x55, 0x52, 0x49, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x55, 0x52, 0x49, 0x12, 0x1c, 0x0a, 0x09, 0x61, 0x76, 0x61, + 0x69, 0x6c, 0x61, 0x62, 0x6c, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x61, 0x76, + 0x61, 0x69, 0x6c, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x22, 0x72, 0x0a, + 0x0c, 0x4e, 0x53, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x18, 0x0a, + 0x07, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, + 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x64, 0x6f, 0x6d, 0x61, 0x69, + 0x6e, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, + 0x73, 0x12, 0x18, 0x0a, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x08, 0x52, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x65, + 0x72, 0x72, 0x6f, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, + 0x72, 0x22, 0xb9, 0x03, 0x0a, 0x0a, 0x46, 0x75, 0x6c, 0x6c, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, + 0x12, 0x41, 0x0a, 0x0f, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x53, 0x74, + 0x61, 0x74, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x64, 0x61, 0x65, 0x6d, + 0x6f, 0x6e, 0x2e, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x53, 0x74, 0x61, + 0x74, 0x65, 0x52, 0x0f, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x53, 0x74, + 0x61, 0x74, 0x65, 0x12, 0x35, 0x0a, 0x0b, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x6c, 0x53, 0x74, 0x61, + 0x74, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x64, 0x61, 0x65, 0x6d, 0x6f, + 0x6e, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x6c, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x0b, 0x73, + 0x69, 0x67, 0x6e, 0x61, 0x6c, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x3e, 0x0a, 0x0e, 0x6c, 0x6f, + 0x63, 0x61, 0x6c, 0x50, 0x65, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x65, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x64, 0x61, 0x65, 0x6d, 0x6f, 0x6e, 0x2e, 0x4c, 0x6f, 0x63, 0x61, + 0x6c, 0x50, 0x65, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x0e, 0x6c, 0x6f, 0x63, 0x61, + 0x6c, 0x50, 0x65, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x27, 0x0a, 0x05, 0x70, 0x65, + 0x65, 0x72, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x64, 0x61, 0x65, 0x6d, + 0x6f, 0x6e, 0x2e, 0x50, 0x65, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x05, 0x70, 0x65, + 0x65, 0x72, 0x73, 0x12, 0x2a, 0x0a, 0x06, 0x72, 0x65, 0x6c, 0x61, 0x79, 0x73, 0x18, 0x05, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x64, 0x61, 0x65, 0x6d, 0x6f, 0x6e, 0x2e, 0x52, 0x65, 0x6c, + 0x61, 0x79, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x06, 0x72, 0x65, 0x6c, 0x61, 0x79, 0x73, 0x12, + 0x35, 0x0a, 0x0b, 0x64, 0x6e, 0x73, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x73, 0x18, 0x06, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x64, 0x61, 0x65, 0x6d, 0x6f, 0x6e, 0x2e, 0x4e, 0x53, + 0x47, 0x72, 0x6f, 0x75, 0x70, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x0a, 0x64, 0x6e, 0x73, 0x53, + 0x65, 0x72, 0x76, 0x65, 0x72, 0x73, 0x12, 0x38, 0x0a, 0x17, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, + 0x4f, 0x66, 0x46, 0x6f, 0x72, 0x77, 0x61, 0x72, 0x64, 0x69, 0x6e, 0x67, 0x52, 0x75, 0x6c, 0x65, + 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x05, 0x52, 0x17, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x4f, + 0x66, 0x46, 0x6f, 0x72, 0x77, 0x61, 0x72, 0x64, 0x69, 0x6e, 0x67, 0x52, 0x75, 0x6c, 0x65, 0x73, + 0x12, 0x2b, 0x0a, 0x06, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x07, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x13, 0x2e, 0x64, 0x61, 0x65, 0x6d, 0x6f, 0x6e, 0x2e, 0x53, 0x79, 0x73, 0x74, 0x65, 0x6d, + 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x06, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x22, 0x15, 0x0a, + 0x13, 0x4c, 0x69, 0x73, 0x74, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x73, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x22, 0x3f, 0x0a, 0x14, 0x4c, 0x69, 0x73, 0x74, 0x4e, 0x65, 0x74, 0x77, + 0x6f, 0x72, 0x6b, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x27, 0x0a, 0x06, + 0x72, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x64, + 0x61, 0x65, 0x6d, 0x6f, 0x6e, 0x2e, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x52, 0x06, 0x72, + 0x6f, 0x75, 0x74, 0x65, 0x73, 0x22, 0x61, 0x0a, 0x15, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x4e, + 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1e, + 0x0a, 0x0a, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x49, 0x44, 0x73, 0x18, 0x01, 0x20, 0x03, + 0x28, 0x09, 0x52, 0x0a, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x49, 0x44, 0x73, 0x12, 0x16, + 0x0a, 0x06, 0x61, 0x70, 0x70, 0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, + 0x61, 0x70, 0x70, 0x65, 0x6e, 0x64, 0x12, 0x10, 0x0a, 0x03, 0x61, 0x6c, 0x6c, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x08, 0x52, 0x03, 0x61, 0x6c, 0x6c, 0x22, 0x18, 0x0a, 0x16, 0x53, 0x65, 0x6c, 0x65, + 0x63, 0x74, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x22, 0x1a, 0x0a, 0x06, 0x49, 0x50, 0x4c, 0x69, 0x73, 0x74, 0x12, 0x10, 0x0a, 0x03, + 0x69, 0x70, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x03, 0x69, 0x70, 0x73, 0x22, 0xf9, + 0x01, 0x0a, 0x07, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x12, 0x0e, 0x0a, 0x02, 0x49, 0x44, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x49, 0x44, 0x12, 0x14, 0x0a, 0x05, 0x72, 0x61, + 0x6e, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x72, 0x61, 0x6e, 0x67, 0x65, + 0x12, 0x1a, 0x0a, 0x08, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x08, 0x52, 0x08, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x65, 0x64, 0x12, 0x18, 0x0a, 0x07, + 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x64, + 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x73, 0x12, 0x42, 0x0a, 0x0b, 0x72, 0x65, 0x73, 0x6f, 0x6c, 0x76, + 0x65, 0x64, 0x49, 0x50, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x64, 0x61, + 0x65, 0x6d, 0x6f, 0x6e, 0x2e, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x2e, 0x52, 0x65, 0x73, + 0x6f, 0x6c, 0x76, 0x65, 0x64, 0x49, 0x50, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0b, 0x72, + 0x65, 0x73, 0x6f, 0x6c, 0x76, 0x65, 0x64, 0x49, 0x50, 0x73, 0x1a, 0x4e, 0x0a, 0x10, 0x52, 0x65, + 0x73, 0x6f, 0x6c, 0x76, 0x65, 0x64, 0x49, 0x50, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, + 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, + 0x12, 0x24, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x0e, 0x2e, 0x64, 0x61, 0x65, 0x6d, 0x6f, 0x6e, 0x2e, 0x49, 0x50, 0x4c, 0x69, 0x73, 0x74, 0x52, + 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x92, 0x01, 0x0a, 0x08, 0x50, + 0x6f, 0x72, 0x74, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x14, 0x0a, 0x04, 0x70, 0x6f, 0x72, 0x74, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0d, 0x48, 0x00, 0x52, 0x04, 0x70, 0x6f, 0x72, 0x74, 0x12, 0x2e, 0x0a, + 0x05, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x64, + 0x61, 0x65, 0x6d, 0x6f, 0x6e, 0x2e, 0x50, 0x6f, 0x72, 0x74, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x52, + 0x61, 0x6e, 0x67, 0x65, 0x48, 0x00, 0x52, 0x05, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x1a, 0x2f, 0x0a, + 0x05, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x12, 0x10, 0x0a, 0x03, + 0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x03, 0x65, 0x6e, 0x64, 0x42, 0x0f, + 0x0a, 0x0d, 0x70, 0x6f, 0x72, 0x74, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x22, + 0x80, 0x02, 0x0a, 0x0e, 0x46, 0x6f, 0x72, 0x77, 0x61, 0x72, 0x64, 0x69, 0x6e, 0x67, 0x52, 0x75, + 0x6c, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x12, 0x3a, + 0x0a, 0x0f, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x72, + 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x64, 0x61, 0x65, 0x6d, 0x6f, 0x6e, + 0x2e, 0x50, 0x6f, 0x72, 0x74, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x0f, 0x64, 0x65, 0x73, 0x74, 0x69, + 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x72, 0x74, 0x12, 0x2c, 0x0a, 0x11, 0x74, 0x72, + 0x61, 0x6e, 0x73, 0x6c, 0x61, 0x74, 0x65, 0x64, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x11, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x6c, 0x61, 0x74, 0x65, + 0x64, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x2e, 0x0a, 0x12, 0x74, 0x72, 0x61, 0x6e, + 0x73, 0x6c, 0x61, 0x74, 0x65, 0x64, 0x48, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x04, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x6c, 0x61, 0x74, 0x65, 0x64, + 0x48, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x38, 0x0a, 0x0e, 0x74, 0x72, 0x61, 0x6e, + 0x73, 0x6c, 0x61, 0x74, 0x65, 0x64, 0x50, 0x6f, 0x72, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x10, 0x2e, 0x64, 0x61, 0x65, 0x6d, 0x6f, 0x6e, 0x2e, 0x50, 0x6f, 0x72, 0x74, 0x49, 0x6e, + 0x66, 0x6f, 0x52, 0x0e, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x6c, 0x61, 0x74, 0x65, 0x64, 0x50, 0x6f, + 0x72, 0x74, 0x22, 0x47, 0x0a, 0x17, 0x46, 0x6f, 0x72, 0x77, 0x61, 0x72, 0x64, 0x69, 0x6e, 0x67, + 0x52, 0x75, 0x6c, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2c, 0x0a, + 0x05, 0x72, 0x75, 0x6c, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x64, + 0x61, 0x65, 0x6d, 0x6f, 0x6e, 0x2e, 0x46, 0x6f, 0x72, 0x77, 0x61, 0x72, 0x64, 0x69, 0x6e, 0x67, + 0x52, 0x75, 0x6c, 0x65, 0x52, 0x05, 0x72, 0x75, 0x6c, 0x65, 0x73, 0x22, 0x6a, 0x0a, 0x12, 0x44, + 0x65, 0x62, 0x75, 0x67, 0x42, 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x12, 0x1c, 0x0a, 0x09, 0x61, 0x6e, 0x6f, 0x6e, 0x79, 0x6d, 0x69, 0x7a, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x61, 0x6e, 0x6f, 0x6e, 0x79, 0x6d, 0x69, 0x7a, 0x65, 0x12, + 0x16, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x1e, 0x0a, 0x0a, 0x73, 0x79, 0x73, 0x74, 0x65, + 0x6d, 0x49, 0x6e, 0x66, 0x6f, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x73, 0x79, 0x73, + 0x74, 0x65, 0x6d, 0x49, 0x6e, 0x66, 0x6f, 0x22, 0x29, 0x0a, 0x13, 0x44, 0x65, 0x62, 0x75, 0x67, + 0x42, 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x12, + 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x70, 0x61, + 0x74, 0x68, 0x22, 0x14, 0x0a, 0x12, 0x47, 0x65, 0x74, 0x4c, 0x6f, 0x67, 0x4c, 0x65, 0x76, 0x65, + 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x3d, 0x0a, 0x13, 0x47, 0x65, 0x74, 0x4c, + 0x6f, 0x67, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, + 0x26, 0x0a, 0x05, 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x10, + 0x2e, 0x64, 0x61, 0x65, 0x6d, 0x6f, 0x6e, 0x2e, 0x4c, 0x6f, 0x67, 0x4c, 0x65, 0x76, 0x65, 0x6c, + 0x52, 0x05, 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x22, 0x3c, 0x0a, 0x12, 0x53, 0x65, 0x74, 0x4c, 0x6f, + 0x67, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x26, 0x0a, + 0x05, 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x10, 0x2e, 0x64, + 0x61, 0x65, 0x6d, 0x6f, 0x6e, 0x2e, 0x4c, 0x6f, 0x67, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x52, 0x05, + 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x22, 0x15, 0x0a, 0x13, 0x53, 0x65, 0x74, 0x4c, 0x6f, 0x67, 0x4c, + 0x65, 0x76, 0x65, 0x6c, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x1b, 0x0a, 0x05, + 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0x13, 0x0a, 0x11, 0x4c, 0x69, 0x73, + 0x74, 0x53, 0x74, 0x61, 0x74, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x3b, + 0x0a, 0x12, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x74, 0x61, 0x74, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x25, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x65, 0x73, 0x18, 0x01, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0d, 0x2e, 0x64, 0x61, 0x65, 0x6d, 0x6f, 0x6e, 0x2e, 0x53, 0x74, + 0x61, 0x74, 0x65, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x65, 0x73, 0x22, 0x44, 0x0a, 0x11, 0x43, + 0x6c, 0x65, 0x61, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x74, 0x61, 0x74, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x74, 0x61, 0x74, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, + 0x10, 0x0a, 0x03, 0x61, 0x6c, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x03, 0x61, 0x6c, + 0x6c, 0x22, 0x3b, 0x0a, 0x12, 0x43, 0x6c, 0x65, 0x61, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x63, 0x6c, 0x65, 0x61, 0x6e, + 0x65, 0x64, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, + 0x0d, 0x63, 0x6c, 0x65, 0x61, 0x6e, 0x65, 0x64, 0x53, 0x74, 0x61, 0x74, 0x65, 0x73, 0x22, 0x45, + 0x0a, 0x12, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x74, 0x61, 0x74, 0x65, 0x5f, 0x6e, 0x61, + 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x74, 0x61, 0x74, 0x65, 0x4e, + 0x61, 0x6d, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x61, 0x6c, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, + 0x52, 0x03, 0x61, 0x6c, 0x6c, 0x22, 0x3c, 0x0a, 0x13, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x53, + 0x74, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x25, 0x0a, 0x0e, + 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x73, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x05, 0x52, 0x0d, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x53, 0x74, 0x61, + 0x74, 0x65, 0x73, 0x22, 0x3b, 0x0a, 0x1f, 0x53, 0x65, 0x74, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, + 0x6b, 0x4d, 0x61, 0x70, 0x50, 0x65, 0x72, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x63, 0x65, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, + 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, + 0x22, 0x22, 0x0a, 0x20, 0x53, 0x65, 0x74, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x4d, 0x61, + 0x70, 0x50, 0x65, 0x72, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x76, 0x0a, 0x08, 0x54, 0x43, 0x50, 0x46, 0x6c, 0x61, 0x67, 0x73, + 0x12, 0x10, 0x0a, 0x03, 0x73, 0x79, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x03, 0x73, + 0x79, 0x6e, 0x12, 0x10, 0x0a, 0x03, 0x61, 0x63, 0x6b, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, + 0x03, 0x61, 0x63, 0x6b, 0x12, 0x10, 0x0a, 0x03, 0x66, 0x69, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x08, 0x52, 0x03, 0x66, 0x69, 0x6e, 0x12, 0x10, 0x0a, 0x03, 0x72, 0x73, 0x74, 0x18, 0x04, 0x20, + 0x01, 0x28, 0x08, 0x52, 0x03, 0x72, 0x73, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x70, 0x73, 0x68, 0x18, + 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x03, 0x70, 0x73, 0x68, 0x12, 0x10, 0x0a, 0x03, 0x75, 0x72, + 0x67, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x03, 0x75, 0x72, 0x67, 0x22, 0x80, 0x03, 0x0a, + 0x12, 0x54, 0x72, 0x61, 0x63, 0x65, 0x50, 0x61, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x69, 0x70, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x70, + 0x12, 0x25, 0x0a, 0x0e, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, + 0x69, 0x70, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x70, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x63, 0x6f, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x63, 0x6f, 0x6c, 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x70, 0x6f, + 0x72, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0a, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, + 0x50, 0x6f, 0x72, 0x74, 0x12, 0x29, 0x0a, 0x10, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0f, + 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x72, 0x74, 0x12, + 0x1c, 0x0a, 0x09, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x09, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x32, 0x0a, + 0x09, 0x74, 0x63, 0x70, 0x5f, 0x66, 0x6c, 0x61, 0x67, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x10, 0x2e, 0x64, 0x61, 0x65, 0x6d, 0x6f, 0x6e, 0x2e, 0x54, 0x43, 0x50, 0x46, 0x6c, 0x61, + 0x67, 0x73, 0x48, 0x00, 0x52, 0x08, 0x74, 0x63, 0x70, 0x46, 0x6c, 0x61, 0x67, 0x73, 0x88, 0x01, + 0x01, 0x12, 0x20, 0x0a, 0x09, 0x69, 0x63, 0x6d, 0x70, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x08, + 0x20, 0x01, 0x28, 0x0d, 0x48, 0x01, 0x52, 0x08, 0x69, 0x63, 0x6d, 0x70, 0x54, 0x79, 0x70, 0x65, + 0x88, 0x01, 0x01, 0x12, 0x20, 0x0a, 0x09, 0x69, 0x63, 0x6d, 0x70, 0x5f, 0x63, 0x6f, 0x64, 0x65, + 0x18, 0x09, 0x20, 0x01, 0x28, 0x0d, 0x48, 0x02, 0x52, 0x08, 0x69, 0x63, 0x6d, 0x70, 0x43, 0x6f, + 0x64, 0x65, 0x88, 0x01, 0x01, 0x42, 0x0c, 0x0a, 0x0a, 0x5f, 0x74, 0x63, 0x70, 0x5f, 0x66, 0x6c, + 0x61, 0x67, 0x73, 0x42, 0x0c, 0x0a, 0x0a, 0x5f, 0x69, 0x63, 0x6d, 0x70, 0x5f, 0x74, 0x79, 0x70, + 0x65, 0x42, 0x0c, 0x0a, 0x0a, 0x5f, 0x69, 0x63, 0x6d, 0x70, 0x5f, 0x63, 0x6f, 0x64, 0x65, 0x22, + 0x9f, 0x01, 0x0a, 0x0a, 0x54, 0x72, 0x61, 0x63, 0x65, 0x53, 0x74, 0x61, 0x67, 0x65, 0x12, 0x12, + 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, + 0x6d, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x18, 0x0a, 0x07, + 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x61, + 0x6c, 0x6c, 0x6f, 0x77, 0x65, 0x64, 0x12, 0x32, 0x0a, 0x12, 0x66, 0x6f, 0x72, 0x77, 0x61, 0x72, + 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x18, 0x04, 0x20, 0x01, + 0x28, 0x09, 0x48, 0x00, 0x52, 0x11, 0x66, 0x6f, 0x72, 0x77, 0x61, 0x72, 0x64, 0x69, 0x6e, 0x67, + 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x88, 0x01, 0x01, 0x42, 0x15, 0x0a, 0x13, 0x5f, 0x66, + 0x6f, 0x72, 0x77, 0x61, 0x72, 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, + 0x73, 0x22, 0x6e, 0x0a, 0x13, 0x54, 0x72, 0x61, 0x63, 0x65, 0x50, 0x61, 0x63, 0x6b, 0x65, 0x74, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2a, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x67, + 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x64, 0x61, 0x65, 0x6d, 0x6f, + 0x6e, 0x2e, 0x54, 0x72, 0x61, 0x63, 0x65, 0x53, 0x74, 0x61, 0x67, 0x65, 0x52, 0x06, 0x73, 0x74, + 0x61, 0x67, 0x65, 0x73, 0x12, 0x2b, 0x0a, 0x11, 0x66, 0x69, 0x6e, 0x61, 0x6c, 0x5f, 0x64, 0x69, + 0x73, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, + 0x10, 0x66, 0x69, 0x6e, 0x61, 0x6c, 0x44, 0x69, 0x73, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, + 0x6e, 0x22, 0x12, 0x0a, 0x10, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x87, 0x04, 0x0a, 0x0b, 0x53, 0x79, 0x73, 0x74, 0x65, 0x6d, + 0x45, 0x76, 0x65, 0x6e, 0x74, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x38, 0x0a, 0x08, 0x73, 0x65, 0x76, 0x65, 0x72, 0x69, 0x74, + 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1c, 0x2e, 0x64, 0x61, 0x65, 0x6d, 0x6f, 0x6e, + 0x2e, 0x53, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x2e, 0x53, 0x65, 0x76, + 0x65, 0x72, 0x69, 0x74, 0x79, 0x52, 0x08, 0x73, 0x65, 0x76, 0x65, 0x72, 0x69, 0x74, 0x79, 0x12, + 0x38, 0x0a, 0x08, 0x63, 0x61, 0x74, 0x65, 0x67, 0x6f, 0x72, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x0e, 0x32, 0x1c, 0x2e, 0x64, 0x61, 0x65, 0x6d, 0x6f, 0x6e, 0x2e, 0x53, 0x79, 0x73, 0x74, 0x65, + 0x6d, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x2e, 0x43, 0x61, 0x74, 0x65, 0x67, 0x6f, 0x72, 0x79, 0x52, + 0x08, 0x63, 0x61, 0x74, 0x65, 0x67, 0x6f, 0x72, 0x79, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, 0x73, + 0x73, 0x61, 0x67, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6d, 0x65, 0x73, 0x73, + 0x61, 0x67, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x75, 0x73, 0x65, 0x72, 0x4d, 0x65, 0x73, 0x73, 0x61, + 0x67, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x75, 0x73, 0x65, 0x72, 0x4d, 0x65, + 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x38, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, + 0x6d, 0x70, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, + 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, + 0x3d, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x07, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x21, 0x2e, 0x64, 0x61, 0x65, 0x6d, 0x6f, 0x6e, 0x2e, 0x53, 0x79, 0x73, 0x74, 0x65, + 0x6d, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x45, + 0x6e, 0x74, 0x72, 0x79, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x1a, 0x3b, + 0x0a, 0x0d, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, + 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, + 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x3a, 0x0a, 0x08, 0x53, + 0x65, 0x76, 0x65, 0x72, 0x69, 0x74, 0x79, 0x12, 0x08, 0x0a, 0x04, 0x49, 0x4e, 0x46, 0x4f, 0x10, + 0x00, 0x12, 0x0b, 0x0a, 0x07, 0x57, 0x41, 0x52, 0x4e, 0x49, 0x4e, 0x47, 0x10, 0x01, 0x12, 0x09, + 0x0a, 0x05, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x10, 0x02, 0x12, 0x0c, 0x0a, 0x08, 0x43, 0x52, 0x49, + 0x54, 0x49, 0x43, 0x41, 0x4c, 0x10, 0x03, 0x22, 0x46, 0x0a, 0x08, 0x43, 0x61, 0x74, 0x65, 0x67, + 0x6f, 0x72, 0x79, 0x12, 0x0b, 0x0a, 0x07, 0x4e, 0x45, 0x54, 0x57, 0x4f, 0x52, 0x4b, 0x10, 0x00, + 0x12, 0x07, 0x0a, 0x03, 0x44, 0x4e, 0x53, 0x10, 0x01, 0x12, 0x12, 0x0a, 0x0e, 0x41, 0x55, 0x54, + 0x48, 0x45, 0x4e, 0x54, 0x49, 0x43, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x10, 0x02, 0x12, 0x10, 0x0a, + 0x0c, 0x43, 0x4f, 0x4e, 0x4e, 0x45, 0x43, 0x54, 0x49, 0x56, 0x49, 0x54, 0x59, 0x10, 0x03, 0x22, + 0x12, 0x0a, 0x10, 0x47, 0x65, 0x74, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x22, 0x40, 0x0a, 0x11, 0x47, 0x65, 0x74, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x73, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2b, 0x0a, 0x06, 0x65, 0x76, 0x65, 0x6e, + 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x64, 0x61, 0x65, 0x6d, 0x6f, + 0x6e, 0x2e, 0x53, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x06, 0x65, + 0x76, 0x65, 0x6e, 0x74, 0x73, 0x2a, 0x62, 0x0a, 0x08, 0x4c, 0x6f, 0x67, 0x4c, 0x65, 0x76, 0x65, + 0x6c, 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x09, + 0x0a, 0x05, 0x50, 0x41, 0x4e, 0x49, 0x43, 0x10, 0x01, 0x12, 0x09, 0x0a, 0x05, 0x46, 0x41, 0x54, + 0x41, 0x4c, 0x10, 0x02, 0x12, 0x09, 0x0a, 0x05, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x10, 0x03, 0x12, + 0x08, 0x0a, 0x04, 0x57, 0x41, 0x52, 0x4e, 0x10, 0x04, 0x12, 0x08, 0x0a, 0x04, 0x49, 0x4e, 0x46, + 0x4f, 0x10, 0x05, 0x12, 0x09, 0x0a, 0x05, 0x44, 0x45, 0x42, 0x55, 0x47, 0x10, 0x06, 0x12, 0x09, + 0x0a, 0x05, 0x54, 0x52, 0x41, 0x43, 0x45, 0x10, 0x07, 0x32, 0xb3, 0x0b, 0x0a, 0x0d, 0x44, 0x61, + 0x65, 0x6d, 0x6f, 0x6e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x36, 0x0a, 0x05, 0x4c, + 0x6f, 0x67, 0x69, 0x6e, 0x12, 0x14, 0x2e, 0x64, 0x61, 0x65, 0x6d, 0x6f, 0x6e, 0x2e, 0x4c, 0x6f, + 0x67, 0x69, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x15, 0x2e, 0x64, 0x61, 0x65, + 0x6d, 0x6f, 0x6e, 0x2e, 0x4c, 0x6f, 0x67, 0x69, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x22, 0x00, 0x12, 0x4b, 0x0a, 0x0c, 0x57, 0x61, 0x69, 0x74, 0x53, 0x53, 0x4f, 0x4c, 0x6f, + 0x67, 0x69, 0x6e, 0x12, 0x1b, 0x2e, 0x64, 0x61, 0x65, 0x6d, 0x6f, 0x6e, 0x2e, 0x57, 0x61, 0x69, + 0x74, 0x53, 0x53, 0x4f, 0x4c, 0x6f, 0x67, 0x69, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x1c, 0x2e, 0x64, 0x61, 0x65, 0x6d, 0x6f, 0x6e, 0x2e, 0x57, 0x61, 0x69, 0x74, 0x53, 0x53, + 0x4f, 0x4c, 0x6f, 0x67, 0x69, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, + 0x12, 0x2d, 0x0a, 0x02, 0x55, 0x70, 0x12, 0x11, 0x2e, 0x64, 0x61, 0x65, 0x6d, 0x6f, 0x6e, 0x2e, + 0x55, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x12, 0x2e, 0x64, 0x61, 0x65, 0x6d, + 0x6f, 0x6e, 0x2e, 0x55, 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, + 0x39, 0x0a, 0x06, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x15, 0x2e, 0x64, 0x61, 0x65, 0x6d, + 0x6f, 0x6e, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x16, 0x2e, 0x64, 0x61, 0x65, 0x6d, 0x6f, 0x6e, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x33, 0x0a, 0x04, 0x44, 0x6f, + 0x77, 0x6e, 0x12, 0x13, 0x2e, 0x64, 0x61, 0x65, 0x6d, 0x6f, 0x6e, 0x2e, 0x44, 0x6f, 0x77, 0x6e, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x14, 0x2e, 0x64, 0x61, 0x65, 0x6d, 0x6f, 0x6e, + 0x2e, 0x44, 0x6f, 0x77, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, + 0x42, 0x0a, 0x09, 0x47, 0x65, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x18, 0x2e, 0x64, + 0x61, 0x65, 0x6d, 0x6f, 0x6e, 0x2e, 0x47, 0x65, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x64, 0x61, 0x65, 0x6d, 0x6f, 0x6e, 0x2e, + 0x47, 0x65, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x22, 0x00, 0x12, 0x4b, 0x0a, 0x0c, 0x4c, 0x69, 0x73, 0x74, 0x4e, 0x65, 0x74, 0x77, 0x6f, + 0x72, 0x6b, 0x73, 0x12, 0x1b, 0x2e, 0x64, 0x61, 0x65, 0x6d, 0x6f, 0x6e, 0x2e, 0x4c, 0x69, 0x73, + 0x74, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x1c, 0x2e, 0x64, 0x61, 0x65, 0x6d, 0x6f, 0x6e, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x4e, 0x65, + 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, + 0x12, 0x51, 0x0a, 0x0e, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, + 0x6b, 0x73, 0x12, 0x1d, 0x2e, 0x64, 0x61, 0x65, 0x6d, 0x6f, 0x6e, 0x2e, 0x53, 0x65, 0x6c, 0x65, + 0x63, 0x74, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x1a, 0x1e, 0x2e, 0x64, 0x61, 0x65, 0x6d, 0x6f, 0x6e, 0x2e, 0x53, 0x65, 0x6c, 0x65, 0x63, + 0x74, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x22, 0x00, 0x12, 0x53, 0x0a, 0x10, 0x44, 0x65, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x4e, + 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x73, 0x12, 0x1d, 0x2e, 0x64, 0x61, 0x65, 0x6d, 0x6f, 0x6e, + 0x2e, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x73, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x64, 0x61, 0x65, 0x6d, 0x6f, 0x6e, 0x2e, + 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x73, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x4a, 0x0a, 0x0f, 0x46, 0x6f, 0x72, 0x77, + 0x61, 0x72, 0x64, 0x69, 0x6e, 0x67, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x12, 0x14, 0x2e, 0x64, 0x61, + 0x65, 0x6d, 0x6f, 0x6e, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x1a, 0x1f, 0x2e, 0x64, 0x61, 0x65, 0x6d, 0x6f, 0x6e, 0x2e, 0x46, 0x6f, 0x72, 0x77, 0x61, + 0x72, 0x64, 0x69, 0x6e, 0x67, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x22, 0x00, 0x12, 0x48, 0x0a, 0x0b, 0x44, 0x65, 0x62, 0x75, 0x67, 0x42, 0x75, 0x6e, + 0x64, 0x6c, 0x65, 0x12, 0x1a, 0x2e, 0x64, 0x61, 0x65, 0x6d, 0x6f, 0x6e, 0x2e, 0x44, 0x65, 0x62, + 0x75, 0x67, 0x42, 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x1b, 0x2e, 0x64, 0x61, 0x65, 0x6d, 0x6f, 0x6e, 0x2e, 0x44, 0x65, 0x62, 0x75, 0x67, 0x42, 0x75, + 0x6e, 0x64, 0x6c, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x48, + 0x0a, 0x0b, 0x47, 0x65, 0x74, 0x4c, 0x6f, 0x67, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x12, 0x1a, 0x2e, + 0x64, 0x61, 0x65, 0x6d, 0x6f, 0x6e, 0x2e, 0x47, 0x65, 0x74, 0x4c, 0x6f, 0x67, 0x4c, 0x65, 0x76, + 0x65, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1b, 0x2e, 0x64, 0x61, 0x65, 0x6d, + 0x6f, 0x6e, 0x2e, 0x47, 0x65, 0x74, 0x4c, 0x6f, 0x67, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x48, 0x0a, 0x0b, 0x53, 0x65, 0x74, 0x4c, + 0x6f, 0x67, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x12, 0x1a, 0x2e, 0x64, 0x61, 0x65, 0x6d, 0x6f, 0x6e, + 0x2e, 0x53, 0x65, 0x74, 0x4c, 0x6f, 0x67, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x1a, 0x1b, 0x2e, 0x64, 0x61, 0x65, 0x6d, 0x6f, 0x6e, 0x2e, 0x53, 0x65, 0x74, + 0x4c, 0x6f, 0x67, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x22, 0x00, 0x12, 0x45, 0x0a, 0x0a, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x74, 0x61, 0x74, 0x65, 0x73, + 0x12, 0x19, 0x2e, 0x64, 0x61, 0x65, 0x6d, 0x6f, 0x6e, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x74, + 0x61, 0x74, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1a, 0x2e, 0x64, 0x61, + 0x65, 0x6d, 0x6f, 0x6e, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x74, 0x61, 0x74, 0x65, 0x73, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x45, 0x0a, 0x0a, 0x43, 0x6c, 0x65, + 0x61, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x19, 0x2e, 0x64, 0x61, 0x65, 0x6d, 0x6f, 0x6e, + 0x2e, 0x43, 0x6c, 0x65, 0x61, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x1a, 0x1a, 0x2e, 0x64, 0x61, 0x65, 0x6d, 0x6f, 0x6e, 0x2e, 0x43, 0x6c, 0x65, 0x61, + 0x6e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, + 0x12, 0x48, 0x0a, 0x0b, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, + 0x1a, 0x2e, 0x64, 0x61, 0x65, 0x6d, 0x6f, 0x6e, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x53, + 0x74, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1b, 0x2e, 0x64, 0x61, + 0x65, 0x6d, 0x6f, 0x6e, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x6f, 0x0a, 0x18, 0x53, 0x65, 0x74, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x4d, 0x61, 0x70, 0x50, 0x65, 0x72, 0x73, 0x69, - 0x73, 0x74, 0x65, 0x6e, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2a, 0x62, - 0x0a, 0x08, 0x4c, 0x6f, 0x67, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e, - 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x09, 0x0a, 0x05, 0x50, 0x41, 0x4e, 0x49, 0x43, - 0x10, 0x01, 0x12, 0x09, 0x0a, 0x05, 0x46, 0x41, 0x54, 0x41, 0x4c, 0x10, 0x02, 0x12, 0x09, 0x0a, - 0x05, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x10, 0x03, 0x12, 0x08, 0x0a, 0x04, 0x57, 0x41, 0x52, 0x4e, - 0x10, 0x04, 0x12, 0x08, 0x0a, 0x04, 0x49, 0x4e, 0x46, 0x4f, 0x10, 0x05, 0x12, 0x09, 0x0a, 0x05, - 0x44, 0x45, 0x42, 0x55, 0x47, 0x10, 0x06, 0x12, 0x09, 0x0a, 0x05, 0x54, 0x52, 0x41, 0x43, 0x45, - 0x10, 0x07, 0x32, 0xdf, 0x09, 0x0a, 0x0d, 0x44, 0x61, 0x65, 0x6d, 0x6f, 0x6e, 0x53, 0x65, 0x72, - 0x76, 0x69, 0x63, 0x65, 0x12, 0x36, 0x0a, 0x05, 0x4c, 0x6f, 0x67, 0x69, 0x6e, 0x12, 0x14, 0x2e, - 0x64, 0x61, 0x65, 0x6d, 0x6f, 0x6e, 0x2e, 0x4c, 0x6f, 0x67, 0x69, 0x6e, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x1a, 0x15, 0x2e, 0x64, 0x61, 0x65, 0x6d, 0x6f, 0x6e, 0x2e, 0x4c, 0x6f, 0x67, - 0x69, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x4b, 0x0a, 0x0c, - 0x57, 0x61, 0x69, 0x74, 0x53, 0x53, 0x4f, 0x4c, 0x6f, 0x67, 0x69, 0x6e, 0x12, 0x1b, 0x2e, 0x64, - 0x61, 0x65, 0x6d, 0x6f, 0x6e, 0x2e, 0x57, 0x61, 0x69, 0x74, 0x53, 0x53, 0x4f, 0x4c, 0x6f, 0x67, - 0x69, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1c, 0x2e, 0x64, 0x61, 0x65, 0x6d, - 0x6f, 0x6e, 0x2e, 0x57, 0x61, 0x69, 0x74, 0x53, 0x53, 0x4f, 0x4c, 0x6f, 0x67, 0x69, 0x6e, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x2d, 0x0a, 0x02, 0x55, 0x70, 0x12, - 0x11, 0x2e, 0x64, 0x61, 0x65, 0x6d, 0x6f, 0x6e, 0x2e, 0x55, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x1a, 0x12, 0x2e, 0x64, 0x61, 0x65, 0x6d, 0x6f, 0x6e, 0x2e, 0x55, 0x70, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x39, 0x0a, 0x06, 0x53, 0x74, 0x61, 0x74, - 0x75, 0x73, 0x12, 0x15, 0x2e, 0x64, 0x61, 0x65, 0x6d, 0x6f, 0x6e, 0x2e, 0x53, 0x74, 0x61, 0x74, - 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x64, 0x61, 0x65, 0x6d, - 0x6f, 0x6e, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x22, 0x00, 0x12, 0x33, 0x0a, 0x04, 0x44, 0x6f, 0x77, 0x6e, 0x12, 0x13, 0x2e, 0x64, 0x61, - 0x65, 0x6d, 0x6f, 0x6e, 0x2e, 0x44, 0x6f, 0x77, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x1a, 0x14, 0x2e, 0x64, 0x61, 0x65, 0x6d, 0x6f, 0x6e, 0x2e, 0x44, 0x6f, 0x77, 0x6e, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x42, 0x0a, 0x09, 0x47, 0x65, 0x74, 0x43, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x18, 0x2e, 0x64, 0x61, 0x65, 0x6d, 0x6f, 0x6e, 0x2e, 0x47, - 0x65, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, - 0x19, 0x2e, 0x64, 0x61, 0x65, 0x6d, 0x6f, 0x6e, 0x2e, 0x47, 0x65, 0x74, 0x43, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x4b, 0x0a, 0x0c, - 0x4c, 0x69, 0x73, 0x74, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x73, 0x12, 0x1b, 0x2e, 0x64, - 0x61, 0x65, 0x6d, 0x6f, 0x6e, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, - 0x6b, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1c, 0x2e, 0x64, 0x61, 0x65, 0x6d, - 0x6f, 0x6e, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x73, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x51, 0x0a, 0x0e, 0x53, 0x65, 0x6c, - 0x65, 0x63, 0x74, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x73, 0x12, 0x1d, 0x2e, 0x64, 0x61, - 0x65, 0x6d, 0x6f, 0x6e, 0x2e, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x4e, 0x65, 0x74, 0x77, 0x6f, - 0x72, 0x6b, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x64, 0x61, 0x65, - 0x6d, 0x6f, 0x6e, 0x2e, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, - 0x6b, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x53, 0x0a, 0x10, - 0x44, 0x65, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x73, - 0x12, 0x1d, 0x2e, 0x64, 0x61, 0x65, 0x6d, 0x6f, 0x6e, 0x2e, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, - 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, - 0x1e, 0x2e, 0x64, 0x61, 0x65, 0x6d, 0x6f, 0x6e, 0x2e, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x4e, - 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, - 0x00, 0x12, 0x4a, 0x0a, 0x0f, 0x46, 0x6f, 0x72, 0x77, 0x61, 0x72, 0x64, 0x69, 0x6e, 0x67, 0x52, - 0x75, 0x6c, 0x65, 0x73, 0x12, 0x14, 0x2e, 0x64, 0x61, 0x65, 0x6d, 0x6f, 0x6e, 0x2e, 0x45, 0x6d, - 0x70, 0x74, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x64, 0x61, 0x65, - 0x6d, 0x6f, 0x6e, 0x2e, 0x46, 0x6f, 0x72, 0x77, 0x61, 0x72, 0x64, 0x69, 0x6e, 0x67, 0x52, 0x75, - 0x6c, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x48, 0x0a, - 0x0b, 0x44, 0x65, 0x62, 0x75, 0x67, 0x42, 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x12, 0x1a, 0x2e, 0x64, - 0x61, 0x65, 0x6d, 0x6f, 0x6e, 0x2e, 0x44, 0x65, 0x62, 0x75, 0x67, 0x42, 0x75, 0x6e, 0x64, 0x6c, - 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1b, 0x2e, 0x64, 0x61, 0x65, 0x6d, 0x6f, - 0x6e, 0x2e, 0x44, 0x65, 0x62, 0x75, 0x67, 0x42, 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x48, 0x0a, 0x0b, 0x47, 0x65, 0x74, 0x4c, 0x6f, - 0x67, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x12, 0x1a, 0x2e, 0x64, 0x61, 0x65, 0x6d, 0x6f, 0x6e, 0x2e, - 0x47, 0x65, 0x74, 0x4c, 0x6f, 0x67, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x1a, 0x1b, 0x2e, 0x64, 0x61, 0x65, 0x6d, 0x6f, 0x6e, 0x2e, 0x47, 0x65, 0x74, 0x4c, - 0x6f, 0x67, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, - 0x00, 0x12, 0x48, 0x0a, 0x0b, 0x53, 0x65, 0x74, 0x4c, 0x6f, 0x67, 0x4c, 0x65, 0x76, 0x65, 0x6c, - 0x12, 0x1a, 0x2e, 0x64, 0x61, 0x65, 0x6d, 0x6f, 0x6e, 0x2e, 0x53, 0x65, 0x74, 0x4c, 0x6f, 0x67, - 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1b, 0x2e, 0x64, - 0x61, 0x65, 0x6d, 0x6f, 0x6e, 0x2e, 0x53, 0x65, 0x74, 0x4c, 0x6f, 0x67, 0x4c, 0x65, 0x76, 0x65, - 0x6c, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x45, 0x0a, 0x0a, 0x4c, - 0x69, 0x73, 0x74, 0x53, 0x74, 0x61, 0x74, 0x65, 0x73, 0x12, 0x19, 0x2e, 0x64, 0x61, 0x65, 0x6d, - 0x6f, 0x6e, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x74, 0x61, 0x74, 0x65, 0x73, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1a, 0x2e, 0x64, 0x61, 0x65, 0x6d, 0x6f, 0x6e, 0x2e, 0x4c, 0x69, - 0x73, 0x74, 0x53, 0x74, 0x61, 0x74, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x22, 0x00, 0x12, 0x45, 0x0a, 0x0a, 0x43, 0x6c, 0x65, 0x61, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x65, - 0x12, 0x19, 0x2e, 0x64, 0x61, 0x65, 0x6d, 0x6f, 0x6e, 0x2e, 0x43, 0x6c, 0x65, 0x61, 0x6e, 0x53, - 0x74, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1a, 0x2e, 0x64, 0x61, - 0x65, 0x6d, 0x6f, 0x6e, 0x2e, 0x43, 0x6c, 0x65, 0x61, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x48, 0x0a, 0x0b, 0x44, 0x65, 0x6c, - 0x65, 0x74, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x1a, 0x2e, 0x64, 0x61, 0x65, 0x6d, 0x6f, - 0x6e, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1b, 0x2e, 0x64, 0x61, 0x65, 0x6d, 0x6f, 0x6e, 0x2e, 0x44, 0x65, - 0x6c, 0x65, 0x74, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x22, 0x00, 0x12, 0x6f, 0x0a, 0x18, 0x53, 0x65, 0x74, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, - 0x6b, 0x4d, 0x61, 0x70, 0x50, 0x65, 0x72, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x63, 0x65, 0x12, - 0x27, 0x2e, 0x64, 0x61, 0x65, 0x6d, 0x6f, 0x6e, 0x2e, 0x53, 0x65, 0x74, 0x4e, 0x65, 0x74, 0x77, + 0x73, 0x74, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x27, 0x2e, 0x64, 0x61, 0x65, 0x6d, 0x6f, 0x6e, 0x2e, + 0x53, 0x65, 0x74, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x4d, 0x61, 0x70, 0x50, 0x65, 0x72, + 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x28, 0x2e, 0x64, 0x61, 0x65, 0x6d, 0x6f, 0x6e, 0x2e, 0x53, 0x65, 0x74, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x4d, 0x61, 0x70, 0x50, 0x65, 0x72, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x63, - 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x28, 0x2e, 0x64, 0x61, 0x65, 0x6d, 0x6f, - 0x6e, 0x2e, 0x53, 0x65, 0x74, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x4d, 0x61, 0x70, 0x50, - 0x65, 0x72, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x22, 0x00, 0x42, 0x08, 0x5a, 0x06, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x06, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x48, 0x0a, 0x0b, 0x54, + 0x72, 0x61, 0x63, 0x65, 0x50, 0x61, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x1a, 0x2e, 0x64, 0x61, 0x65, + 0x6d, 0x6f, 0x6e, 0x2e, 0x54, 0x72, 0x61, 0x63, 0x65, 0x50, 0x61, 0x63, 0x6b, 0x65, 0x74, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1b, 0x2e, 0x64, 0x61, 0x65, 0x6d, 0x6f, 0x6e, 0x2e, + 0x54, 0x72, 0x61, 0x63, 0x65, 0x50, 0x61, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x44, 0x0a, 0x0f, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, + 0x62, 0x65, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x18, 0x2e, 0x64, 0x61, 0x65, 0x6d, 0x6f, + 0x6e, 0x2e, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x1a, 0x13, 0x2e, 0x64, 0x61, 0x65, 0x6d, 0x6f, 0x6e, 0x2e, 0x53, 0x79, 0x73, 0x74, + 0x65, 0x6d, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x22, 0x00, 0x30, 0x01, 0x12, 0x42, 0x0a, 0x09, 0x47, + 0x65, 0x74, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x18, 0x2e, 0x64, 0x61, 0x65, 0x6d, 0x6f, + 0x6e, 0x2e, 0x47, 0x65, 0x74, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x64, 0x61, 0x65, 0x6d, 0x6f, 0x6e, 0x2e, 0x47, 0x65, 0x74, 0x45, + 0x76, 0x65, 0x6e, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x42, + 0x08, 0x5a, 0x06, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x33, } var ( @@ -3366,120 +4149,145 @@ func file_daemon_proto_rawDescGZIP() []byte { return file_daemon_proto_rawDescData } -var file_daemon_proto_enumTypes = make([]protoimpl.EnumInfo, 1) -var file_daemon_proto_msgTypes = make([]protoimpl.MessageInfo, 46) +var file_daemon_proto_enumTypes = make([]protoimpl.EnumInfo, 3) +var file_daemon_proto_msgTypes = make([]protoimpl.MessageInfo, 55) var file_daemon_proto_goTypes = []interface{}{ (LogLevel)(0), // 0: daemon.LogLevel - (*EmptyRequest)(nil), // 1: daemon.EmptyRequest - (*LoginRequest)(nil), // 2: daemon.LoginRequest - (*LoginResponse)(nil), // 3: daemon.LoginResponse - (*WaitSSOLoginRequest)(nil), // 4: daemon.WaitSSOLoginRequest - (*WaitSSOLoginResponse)(nil), // 5: daemon.WaitSSOLoginResponse - (*UpRequest)(nil), // 6: daemon.UpRequest - (*UpResponse)(nil), // 7: daemon.UpResponse - (*StatusRequest)(nil), // 8: daemon.StatusRequest - (*StatusResponse)(nil), // 9: daemon.StatusResponse - (*DownRequest)(nil), // 10: daemon.DownRequest - (*DownResponse)(nil), // 11: daemon.DownResponse - (*GetConfigRequest)(nil), // 12: daemon.GetConfigRequest - (*GetConfigResponse)(nil), // 13: daemon.GetConfigResponse - (*PeerState)(nil), // 14: daemon.PeerState - (*LocalPeerState)(nil), // 15: daemon.LocalPeerState - (*SignalState)(nil), // 16: daemon.SignalState - (*ManagementState)(nil), // 17: daemon.ManagementState - (*RelayState)(nil), // 18: daemon.RelayState - (*NSGroupState)(nil), // 19: daemon.NSGroupState - (*FullStatus)(nil), // 20: daemon.FullStatus - (*ListNetworksRequest)(nil), // 21: daemon.ListNetworksRequest - (*ListNetworksResponse)(nil), // 22: daemon.ListNetworksResponse - (*SelectNetworksRequest)(nil), // 23: daemon.SelectNetworksRequest - (*SelectNetworksResponse)(nil), // 24: daemon.SelectNetworksResponse - (*IPList)(nil), // 25: daemon.IPList - (*Network)(nil), // 26: daemon.Network - (*PortInfo)(nil), // 27: daemon.PortInfo - (*ForwardingRule)(nil), // 28: daemon.ForwardingRule - (*ForwardingRulesResponse)(nil), // 29: daemon.ForwardingRulesResponse - (*DebugBundleRequest)(nil), // 30: daemon.DebugBundleRequest - (*DebugBundleResponse)(nil), // 31: daemon.DebugBundleResponse - (*GetLogLevelRequest)(nil), // 32: daemon.GetLogLevelRequest - (*GetLogLevelResponse)(nil), // 33: daemon.GetLogLevelResponse - (*SetLogLevelRequest)(nil), // 34: daemon.SetLogLevelRequest - (*SetLogLevelResponse)(nil), // 35: daemon.SetLogLevelResponse - (*State)(nil), // 36: daemon.State - (*ListStatesRequest)(nil), // 37: daemon.ListStatesRequest - (*ListStatesResponse)(nil), // 38: daemon.ListStatesResponse - (*CleanStateRequest)(nil), // 39: daemon.CleanStateRequest - (*CleanStateResponse)(nil), // 40: daemon.CleanStateResponse - (*DeleteStateRequest)(nil), // 41: daemon.DeleteStateRequest - (*DeleteStateResponse)(nil), // 42: daemon.DeleteStateResponse - (*SetNetworkMapPersistenceRequest)(nil), // 43: daemon.SetNetworkMapPersistenceRequest - (*SetNetworkMapPersistenceResponse)(nil), // 44: daemon.SetNetworkMapPersistenceResponse - nil, // 45: daemon.Network.ResolvedIPsEntry - (*PortInfo_Range)(nil), // 46: daemon.PortInfo.Range - (*durationpb.Duration)(nil), // 47: google.protobuf.Duration - (*timestamppb.Timestamp)(nil), // 48: google.protobuf.Timestamp + (SystemEvent_Severity)(0), // 1: daemon.SystemEvent.Severity + (SystemEvent_Category)(0), // 2: daemon.SystemEvent.Category + (*EmptyRequest)(nil), // 3: daemon.EmptyRequest + (*LoginRequest)(nil), // 4: daemon.LoginRequest + (*LoginResponse)(nil), // 5: daemon.LoginResponse + (*WaitSSOLoginRequest)(nil), // 6: daemon.WaitSSOLoginRequest + (*WaitSSOLoginResponse)(nil), // 7: daemon.WaitSSOLoginResponse + (*UpRequest)(nil), // 8: daemon.UpRequest + (*UpResponse)(nil), // 9: daemon.UpResponse + (*StatusRequest)(nil), // 10: daemon.StatusRequest + (*StatusResponse)(nil), // 11: daemon.StatusResponse + (*DownRequest)(nil), // 12: daemon.DownRequest + (*DownResponse)(nil), // 13: daemon.DownResponse + (*GetConfigRequest)(nil), // 14: daemon.GetConfigRequest + (*GetConfigResponse)(nil), // 15: daemon.GetConfigResponse + (*PeerState)(nil), // 16: daemon.PeerState + (*LocalPeerState)(nil), // 17: daemon.LocalPeerState + (*SignalState)(nil), // 18: daemon.SignalState + (*ManagementState)(nil), // 19: daemon.ManagementState + (*RelayState)(nil), // 20: daemon.RelayState + (*NSGroupState)(nil), // 21: daemon.NSGroupState + (*FullStatus)(nil), // 22: daemon.FullStatus + (*ListNetworksRequest)(nil), // 23: daemon.ListNetworksRequest + (*ListNetworksResponse)(nil), // 24: daemon.ListNetworksResponse + (*SelectNetworksRequest)(nil), // 25: daemon.SelectNetworksRequest + (*SelectNetworksResponse)(nil), // 26: daemon.SelectNetworksResponse + (*IPList)(nil), // 27: daemon.IPList + (*Network)(nil), // 28: daemon.Network + (*PortInfo)(nil), // 29: daemon.PortInfo + (*ForwardingRule)(nil), // 30: daemon.ForwardingRule + (*ForwardingRulesResponse)(nil), // 31: daemon.ForwardingRulesResponse + (*DebugBundleRequest)(nil), // 32: daemon.DebugBundleRequest + (*DebugBundleResponse)(nil), // 33: daemon.DebugBundleResponse + (*GetLogLevelRequest)(nil), // 34: daemon.GetLogLevelRequest + (*GetLogLevelResponse)(nil), // 35: daemon.GetLogLevelResponse + (*SetLogLevelRequest)(nil), // 36: daemon.SetLogLevelRequest + (*SetLogLevelResponse)(nil), // 37: daemon.SetLogLevelResponse + (*State)(nil), // 38: daemon.State + (*ListStatesRequest)(nil), // 39: daemon.ListStatesRequest + (*ListStatesResponse)(nil), // 40: daemon.ListStatesResponse + (*CleanStateRequest)(nil), // 41: daemon.CleanStateRequest + (*CleanStateResponse)(nil), // 42: daemon.CleanStateResponse + (*DeleteStateRequest)(nil), // 43: daemon.DeleteStateRequest + (*DeleteStateResponse)(nil), // 44: daemon.DeleteStateResponse + (*SetNetworkMapPersistenceRequest)(nil), // 45: daemon.SetNetworkMapPersistenceRequest + (*SetNetworkMapPersistenceResponse)(nil), // 46: daemon.SetNetworkMapPersistenceResponse + (*TCPFlags)(nil), // 47: daemon.TCPFlags + (*TracePacketRequest)(nil), // 48: daemon.TracePacketRequest + (*TraceStage)(nil), // 49: daemon.TraceStage + (*TracePacketResponse)(nil), // 50: daemon.TracePacketResponse + (*SubscribeRequest)(nil), // 51: daemon.SubscribeRequest + (*SystemEvent)(nil), // 52: daemon.SystemEvent + (*GetEventsRequest)(nil), // 53: daemon.GetEventsRequest + (*GetEventsResponse)(nil), // 54: daemon.GetEventsResponse + nil, // 55: daemon.Network.ResolvedIPsEntry + (*PortInfo_Range)(nil), // 56: daemon.PortInfo.Range + nil, // 57: daemon.SystemEvent.MetadataEntry + (*durationpb.Duration)(nil), // 58: google.protobuf.Duration + (*timestamppb.Timestamp)(nil), // 59: google.protobuf.Timestamp } var file_daemon_proto_depIdxs = []int32{ - 47, // 0: daemon.LoginRequest.dnsRouteInterval:type_name -> google.protobuf.Duration - 20, // 1: daemon.StatusResponse.fullStatus:type_name -> daemon.FullStatus - 48, // 2: daemon.PeerState.connStatusUpdate:type_name -> google.protobuf.Timestamp - 48, // 3: daemon.PeerState.lastWireguardHandshake:type_name -> google.protobuf.Timestamp - 47, // 4: daemon.PeerState.latency:type_name -> google.protobuf.Duration - 17, // 5: daemon.FullStatus.managementState:type_name -> daemon.ManagementState - 16, // 6: daemon.FullStatus.signalState:type_name -> daemon.SignalState - 15, // 7: daemon.FullStatus.localPeerState:type_name -> daemon.LocalPeerState - 14, // 8: daemon.FullStatus.peers:type_name -> daemon.PeerState - 18, // 9: daemon.FullStatus.relays:type_name -> daemon.RelayState - 19, // 10: daemon.FullStatus.dns_servers:type_name -> daemon.NSGroupState - 26, // 11: daemon.ListNetworksResponse.routes:type_name -> daemon.Network - 45, // 12: daemon.Network.resolvedIPs:type_name -> daemon.Network.ResolvedIPsEntry - 46, // 13: daemon.PortInfo.range:type_name -> daemon.PortInfo.Range - 27, // 14: daemon.ForwardingRule.destinationPort:type_name -> daemon.PortInfo - 27, // 15: daemon.ForwardingRule.translatedPort:type_name -> daemon.PortInfo - 28, // 16: daemon.ForwardingRulesResponse.rules:type_name -> daemon.ForwardingRule - 0, // 17: daemon.GetLogLevelResponse.level:type_name -> daemon.LogLevel - 0, // 18: daemon.SetLogLevelRequest.level:type_name -> daemon.LogLevel - 36, // 19: daemon.ListStatesResponse.states:type_name -> daemon.State - 25, // 20: daemon.Network.ResolvedIPsEntry.value:type_name -> daemon.IPList - 2, // 21: daemon.DaemonService.Login:input_type -> daemon.LoginRequest - 4, // 22: daemon.DaemonService.WaitSSOLogin:input_type -> daemon.WaitSSOLoginRequest - 6, // 23: daemon.DaemonService.Up:input_type -> daemon.UpRequest - 8, // 24: daemon.DaemonService.Status:input_type -> daemon.StatusRequest - 10, // 25: daemon.DaemonService.Down:input_type -> daemon.DownRequest - 12, // 26: daemon.DaemonService.GetConfig:input_type -> daemon.GetConfigRequest - 21, // 27: daemon.DaemonService.ListNetworks:input_type -> daemon.ListNetworksRequest - 23, // 28: daemon.DaemonService.SelectNetworks:input_type -> daemon.SelectNetworksRequest - 23, // 29: daemon.DaemonService.DeselectNetworks:input_type -> daemon.SelectNetworksRequest - 1, // 30: daemon.DaemonService.ForwardingRules:input_type -> daemon.EmptyRequest - 30, // 31: daemon.DaemonService.DebugBundle:input_type -> daemon.DebugBundleRequest - 32, // 32: daemon.DaemonService.GetLogLevel:input_type -> daemon.GetLogLevelRequest - 34, // 33: daemon.DaemonService.SetLogLevel:input_type -> daemon.SetLogLevelRequest - 37, // 34: daemon.DaemonService.ListStates:input_type -> daemon.ListStatesRequest - 39, // 35: daemon.DaemonService.CleanState:input_type -> daemon.CleanStateRequest - 41, // 36: daemon.DaemonService.DeleteState:input_type -> daemon.DeleteStateRequest - 43, // 37: daemon.DaemonService.SetNetworkMapPersistence:input_type -> daemon.SetNetworkMapPersistenceRequest - 3, // 38: daemon.DaemonService.Login:output_type -> daemon.LoginResponse - 5, // 39: daemon.DaemonService.WaitSSOLogin:output_type -> daemon.WaitSSOLoginResponse - 7, // 40: daemon.DaemonService.Up:output_type -> daemon.UpResponse - 9, // 41: daemon.DaemonService.Status:output_type -> daemon.StatusResponse - 11, // 42: daemon.DaemonService.Down:output_type -> daemon.DownResponse - 13, // 43: daemon.DaemonService.GetConfig:output_type -> daemon.GetConfigResponse - 22, // 44: daemon.DaemonService.ListNetworks:output_type -> daemon.ListNetworksResponse - 24, // 45: daemon.DaemonService.SelectNetworks:output_type -> daemon.SelectNetworksResponse - 24, // 46: daemon.DaemonService.DeselectNetworks:output_type -> daemon.SelectNetworksResponse - 29, // 47: daemon.DaemonService.ForwardingRules:output_type -> daemon.ForwardingRulesResponse - 31, // 48: daemon.DaemonService.DebugBundle:output_type -> daemon.DebugBundleResponse - 33, // 49: daemon.DaemonService.GetLogLevel:output_type -> daemon.GetLogLevelResponse - 35, // 50: daemon.DaemonService.SetLogLevel:output_type -> daemon.SetLogLevelResponse - 38, // 51: daemon.DaemonService.ListStates:output_type -> daemon.ListStatesResponse - 40, // 52: daemon.DaemonService.CleanState:output_type -> daemon.CleanStateResponse - 42, // 53: daemon.DaemonService.DeleteState:output_type -> daemon.DeleteStateResponse - 44, // 54: daemon.DaemonService.SetNetworkMapPersistence:output_type -> daemon.SetNetworkMapPersistenceResponse - 38, // [38:55] is the sub-list for method output_type - 21, // [21:38] is the sub-list for method input_type - 21, // [21:21] is the sub-list for extension type_name - 21, // [21:21] is the sub-list for extension extendee - 0, // [0:21] is the sub-list for field type_name + 58, // 0: daemon.LoginRequest.dnsRouteInterval:type_name -> google.protobuf.Duration + 22, // 1: daemon.StatusResponse.fullStatus:type_name -> daemon.FullStatus + 59, // 2: daemon.PeerState.connStatusUpdate:type_name -> google.protobuf.Timestamp + 59, // 3: daemon.PeerState.lastWireguardHandshake:type_name -> google.protobuf.Timestamp + 58, // 4: daemon.PeerState.latency:type_name -> google.protobuf.Duration + 19, // 5: daemon.FullStatus.managementState:type_name -> daemon.ManagementState + 18, // 6: daemon.FullStatus.signalState:type_name -> daemon.SignalState + 17, // 7: daemon.FullStatus.localPeerState:type_name -> daemon.LocalPeerState + 16, // 8: daemon.FullStatus.peers:type_name -> daemon.PeerState + 20, // 9: daemon.FullStatus.relays:type_name -> daemon.RelayState + 21, // 10: daemon.FullStatus.dns_servers:type_name -> daemon.NSGroupState + 52, // 11: daemon.FullStatus.events:type_name -> daemon.SystemEvent + 28, // 12: daemon.ListNetworksResponse.routes:type_name -> daemon.Network + 55, // 13: daemon.Network.resolvedIPs:type_name -> daemon.Network.ResolvedIPsEntry + 56, // 14: daemon.PortInfo.range:type_name -> daemon.PortInfo.Range + 29, // 15: daemon.ForwardingRule.destinationPort:type_name -> daemon.PortInfo + 29, // 16: daemon.ForwardingRule.translatedPort:type_name -> daemon.PortInfo + 30, // 17: daemon.ForwardingRulesResponse.rules:type_name -> daemon.ForwardingRule + 0, // 18: daemon.GetLogLevelResponse.level:type_name -> daemon.LogLevel + 0, // 19: daemon.SetLogLevelRequest.level:type_name -> daemon.LogLevel + 38, // 20: daemon.ListStatesResponse.states:type_name -> daemon.State + 47, // 21: daemon.TracePacketRequest.tcp_flags:type_name -> daemon.TCPFlags + 49, // 22: daemon.TracePacketResponse.stages:type_name -> daemon.TraceStage + 1, // 23: daemon.SystemEvent.severity:type_name -> daemon.SystemEvent.Severity + 2, // 24: daemon.SystemEvent.category:type_name -> daemon.SystemEvent.Category + 59, // 25: daemon.SystemEvent.timestamp:type_name -> google.protobuf.Timestamp + 57, // 26: daemon.SystemEvent.metadata:type_name -> daemon.SystemEvent.MetadataEntry + 52, // 27: daemon.GetEventsResponse.events:type_name -> daemon.SystemEvent + 27, // 28: daemon.Network.ResolvedIPsEntry.value:type_name -> daemon.IPList + 4, // 29: daemon.DaemonService.Login:input_type -> daemon.LoginRequest + 6, // 30: daemon.DaemonService.WaitSSOLogin:input_type -> daemon.WaitSSOLoginRequest + 8, // 31: daemon.DaemonService.Up:input_type -> daemon.UpRequest + 10, // 32: daemon.DaemonService.Status:input_type -> daemon.StatusRequest + 12, // 33: daemon.DaemonService.Down:input_type -> daemon.DownRequest + 14, // 34: daemon.DaemonService.GetConfig:input_type -> daemon.GetConfigRequest + 23, // 35: daemon.DaemonService.ListNetworks:input_type -> daemon.ListNetworksRequest + 25, // 36: daemon.DaemonService.SelectNetworks:input_type -> daemon.SelectNetworksRequest + 25, // 37: daemon.DaemonService.DeselectNetworks:input_type -> daemon.SelectNetworksRequest + 3, // 38: daemon.DaemonService.ForwardingRules:input_type -> daemon.EmptyRequest + 32, // 39: daemon.DaemonService.DebugBundle:input_type -> daemon.DebugBundleRequest + 34, // 40: daemon.DaemonService.GetLogLevel:input_type -> daemon.GetLogLevelRequest + 36, // 41: daemon.DaemonService.SetLogLevel:input_type -> daemon.SetLogLevelRequest + 39, // 42: daemon.DaemonService.ListStates:input_type -> daemon.ListStatesRequest + 41, // 43: daemon.DaemonService.CleanState:input_type -> daemon.CleanStateRequest + 43, // 44: daemon.DaemonService.DeleteState:input_type -> daemon.DeleteStateRequest + 45, // 45: daemon.DaemonService.SetNetworkMapPersistence:input_type -> daemon.SetNetworkMapPersistenceRequest + 48, // 46: daemon.DaemonService.TracePacket:input_type -> daemon.TracePacketRequest + 51, // 47: daemon.DaemonService.SubscribeEvents:input_type -> daemon.SubscribeRequest + 53, // 48: daemon.DaemonService.GetEvents:input_type -> daemon.GetEventsRequest + 5, // 49: daemon.DaemonService.Login:output_type -> daemon.LoginResponse + 7, // 50: daemon.DaemonService.WaitSSOLogin:output_type -> daemon.WaitSSOLoginResponse + 9, // 51: daemon.DaemonService.Up:output_type -> daemon.UpResponse + 11, // 52: daemon.DaemonService.Status:output_type -> daemon.StatusResponse + 13, // 53: daemon.DaemonService.Down:output_type -> daemon.DownResponse + 15, // 54: daemon.DaemonService.GetConfig:output_type -> daemon.GetConfigResponse + 24, // 55: daemon.DaemonService.ListNetworks:output_type -> daemon.ListNetworksResponse + 26, // 56: daemon.DaemonService.SelectNetworks:output_type -> daemon.SelectNetworksResponse + 26, // 57: daemon.DaemonService.DeselectNetworks:output_type -> daemon.SelectNetworksResponse + 31, // 58: daemon.DaemonService.ForwardingRules:output_type -> daemon.ForwardingRulesResponse + 33, // 59: daemon.DaemonService.DebugBundle:output_type -> daemon.DebugBundleResponse + 35, // 60: daemon.DaemonService.GetLogLevel:output_type -> daemon.GetLogLevelResponse + 37, // 61: daemon.DaemonService.SetLogLevel:output_type -> daemon.SetLogLevelResponse + 40, // 62: daemon.DaemonService.ListStates:output_type -> daemon.ListStatesResponse + 42, // 63: daemon.DaemonService.CleanState:output_type -> daemon.CleanStateResponse + 44, // 64: daemon.DaemonService.DeleteState:output_type -> daemon.DeleteStateResponse + 46, // 65: daemon.DaemonService.SetNetworkMapPersistence:output_type -> daemon.SetNetworkMapPersistenceResponse + 50, // 66: daemon.DaemonService.TracePacket:output_type -> daemon.TracePacketResponse + 52, // 67: daemon.DaemonService.SubscribeEvents:output_type -> daemon.SystemEvent + 54, // 68: daemon.DaemonService.GetEvents:output_type -> daemon.GetEventsResponse + 49, // [49:69] is the sub-list for method output_type + 29, // [29:49] is the sub-list for method input_type + 29, // [29:29] is the sub-list for extension type_name + 29, // [29:29] is the sub-list for extension extendee + 0, // [0:29] is the sub-list for field type_name } func init() { file_daemon_proto_init() } @@ -4016,7 +4824,103 @@ func file_daemon_proto_init() { return nil } } + file_daemon_proto_msgTypes[44].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TCPFlags); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } file_daemon_proto_msgTypes[45].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TracePacketRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_daemon_proto_msgTypes[46].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TraceStage); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_daemon_proto_msgTypes[47].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TracePacketResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_daemon_proto_msgTypes[48].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SubscribeRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_daemon_proto_msgTypes[49].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SystemEvent); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_daemon_proto_msgTypes[50].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetEventsRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_daemon_proto_msgTypes[51].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetEventsResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_daemon_proto_msgTypes[53].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*PortInfo_Range); i { case 0: return &v.state @@ -4034,13 +4938,15 @@ func file_daemon_proto_init() { (*PortInfo_Port)(nil), (*PortInfo_Range_)(nil), } + file_daemon_proto_msgTypes[45].OneofWrappers = []interface{}{} + file_daemon_proto_msgTypes[46].OneofWrappers = []interface{}{} type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_daemon_proto_rawDesc, - NumEnums: 1, - NumMessages: 46, + NumEnums: 3, + NumMessages: 55, NumExtensions: 0, NumServices: 1, }, diff --git a/client/proto/daemon.proto b/client/proto/daemon.proto index ba878a72e..e0620ee42 100644 --- a/client/proto/daemon.proto +++ b/client/proto/daemon.proto @@ -61,6 +61,12 @@ service DaemonService { // SetNetworkMapPersistence enables or disables network map persistence rpc SetNetworkMapPersistence(SetNetworkMapPersistenceRequest) returns (SetNetworkMapPersistenceResponse) {} + + rpc TracePacket(TracePacketRequest) returns (TracePacketResponse) {} + + rpc SubscribeEvents(SubscribeRequest) returns (stream SystemEvent) {} + + rpc GetEvents(GetEventsRequest) returns (GetEventsResponse) {} } @@ -118,6 +124,8 @@ message LoginRequest { optional bool disable_firewall = 23; optional bool block_lan_access = 24; + + optional bool disable_notifications = 25; } message LoginResponse { @@ -183,6 +191,8 @@ message GetConfigResponse { bool rosenpassEnabled = 11; bool rosenpassPermissive = 12; + + bool disable_notifications = 13; } // PeerState contains the latest state of a peer @@ -254,6 +264,8 @@ message FullStatus { repeated RelayState relays = 5; repeated NSGroupState dns_servers = 6; int32 NumberOfForwardingRules = 8; + + repeated SystemEvent events = 7; } // Networks @@ -388,3 +400,68 @@ message SetNetworkMapPersistenceRequest { } message SetNetworkMapPersistenceResponse {} + +message TCPFlags { + bool syn = 1; + bool ack = 2; + bool fin = 3; + bool rst = 4; + bool psh = 5; + bool urg = 6; +} + +message TracePacketRequest { + string source_ip = 1; + string destination_ip = 2; + string protocol = 3; + uint32 source_port = 4; + uint32 destination_port = 5; + string direction = 6; + optional TCPFlags tcp_flags = 7; + optional uint32 icmp_type = 8; + optional uint32 icmp_code = 9; +} + +message TraceStage { + string name = 1; + string message = 2; + bool allowed = 3; + optional string forwarding_details = 4; +} + +message TracePacketResponse { + repeated TraceStage stages = 1; + bool final_disposition = 2; +} + +message SubscribeRequest{} + +message SystemEvent { + enum Severity { + INFO = 0; + WARNING = 1; + ERROR = 2; + CRITICAL = 3; + } + + enum Category { + NETWORK = 0; + DNS = 1; + AUTHENTICATION = 2; + CONNECTIVITY = 3; + } + + string id = 1; + Severity severity = 2; + Category category = 3; + string message = 4; + string userMessage = 5; + google.protobuf.Timestamp timestamp = 6; + map metadata = 7; +} + +message GetEventsRequest {} + +message GetEventsResponse { + repeated SystemEvent events = 1; +} diff --git a/client/proto/daemon_grpc.pb.go b/client/proto/daemon_grpc.pb.go index ff018b773..6251f7c52 100644 --- a/client/proto/daemon_grpc.pb.go +++ b/client/proto/daemon_grpc.pb.go @@ -52,6 +52,9 @@ type DaemonServiceClient interface { DeleteState(ctx context.Context, in *DeleteStateRequest, opts ...grpc.CallOption) (*DeleteStateResponse, error) // SetNetworkMapPersistence enables or disables network map persistence SetNetworkMapPersistence(ctx context.Context, in *SetNetworkMapPersistenceRequest, opts ...grpc.CallOption) (*SetNetworkMapPersistenceResponse, error) + TracePacket(ctx context.Context, in *TracePacketRequest, opts ...grpc.CallOption) (*TracePacketResponse, error) + SubscribeEvents(ctx context.Context, in *SubscribeRequest, opts ...grpc.CallOption) (DaemonService_SubscribeEventsClient, error) + GetEvents(ctx context.Context, in *GetEventsRequest, opts ...grpc.CallOption) (*GetEventsResponse, error) } type daemonServiceClient struct { @@ -215,6 +218,56 @@ func (c *daemonServiceClient) SetNetworkMapPersistence(ctx context.Context, in * return out, nil } +func (c *daemonServiceClient) TracePacket(ctx context.Context, in *TracePacketRequest, opts ...grpc.CallOption) (*TracePacketResponse, error) { + out := new(TracePacketResponse) + err := c.cc.Invoke(ctx, "/daemon.DaemonService/TracePacket", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *daemonServiceClient) SubscribeEvents(ctx context.Context, in *SubscribeRequest, opts ...grpc.CallOption) (DaemonService_SubscribeEventsClient, error) { + stream, err := c.cc.NewStream(ctx, &DaemonService_ServiceDesc.Streams[0], "/daemon.DaemonService/SubscribeEvents", opts...) + if err != nil { + return nil, err + } + x := &daemonServiceSubscribeEventsClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type DaemonService_SubscribeEventsClient interface { + Recv() (*SystemEvent, error) + grpc.ClientStream +} + +type daemonServiceSubscribeEventsClient struct { + grpc.ClientStream +} + +func (x *daemonServiceSubscribeEventsClient) Recv() (*SystemEvent, error) { + m := new(SystemEvent) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *daemonServiceClient) GetEvents(ctx context.Context, in *GetEventsRequest, opts ...grpc.CallOption) (*GetEventsResponse, error) { + out := new(GetEventsResponse) + err := c.cc.Invoke(ctx, "/daemon.DaemonService/GetEvents", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + // DaemonServiceServer is the server API for DaemonService service. // All implementations must embed UnimplementedDaemonServiceServer // for forward compatibility @@ -253,6 +306,9 @@ type DaemonServiceServer interface { DeleteState(context.Context, *DeleteStateRequest) (*DeleteStateResponse, error) // SetNetworkMapPersistence enables or disables network map persistence SetNetworkMapPersistence(context.Context, *SetNetworkMapPersistenceRequest) (*SetNetworkMapPersistenceResponse, error) + TracePacket(context.Context, *TracePacketRequest) (*TracePacketResponse, error) + SubscribeEvents(*SubscribeRequest, DaemonService_SubscribeEventsServer) error + GetEvents(context.Context, *GetEventsRequest) (*GetEventsResponse, error) mustEmbedUnimplementedDaemonServiceServer() } @@ -311,6 +367,15 @@ func (UnimplementedDaemonServiceServer) DeleteState(context.Context, *DeleteStat func (UnimplementedDaemonServiceServer) SetNetworkMapPersistence(context.Context, *SetNetworkMapPersistenceRequest) (*SetNetworkMapPersistenceResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method SetNetworkMapPersistence not implemented") } +func (UnimplementedDaemonServiceServer) TracePacket(context.Context, *TracePacketRequest) (*TracePacketResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method TracePacket not implemented") +} +func (UnimplementedDaemonServiceServer) SubscribeEvents(*SubscribeRequest, DaemonService_SubscribeEventsServer) error { + return status.Errorf(codes.Unimplemented, "method SubscribeEvents not implemented") +} +func (UnimplementedDaemonServiceServer) GetEvents(context.Context, *GetEventsRequest) (*GetEventsResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetEvents not implemented") +} func (UnimplementedDaemonServiceServer) mustEmbedUnimplementedDaemonServiceServer() {} // UnsafeDaemonServiceServer may be embedded to opt out of forward compatibility for this service. @@ -630,6 +695,63 @@ func _DaemonService_SetNetworkMapPersistence_Handler(srv interface{}, ctx contex return interceptor(ctx, in, info, handler) } +func _DaemonService_TracePacket_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(TracePacketRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DaemonServiceServer).TracePacket(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/daemon.DaemonService/TracePacket", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DaemonServiceServer).TracePacket(ctx, req.(*TracePacketRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _DaemonService_SubscribeEvents_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(SubscribeRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(DaemonServiceServer).SubscribeEvents(m, &daemonServiceSubscribeEventsServer{stream}) +} + +type DaemonService_SubscribeEventsServer interface { + Send(*SystemEvent) error + grpc.ServerStream +} + +type daemonServiceSubscribeEventsServer struct { + grpc.ServerStream +} + +func (x *daemonServiceSubscribeEventsServer) Send(m *SystemEvent) error { + return x.ServerStream.SendMsg(m) +} + +func _DaemonService_GetEvents_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetEventsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DaemonServiceServer).GetEvents(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/daemon.DaemonService/GetEvents", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DaemonServiceServer).GetEvents(ctx, req.(*GetEventsRequest)) + } + return interceptor(ctx, in, info, handler) +} + // DaemonService_ServiceDesc is the grpc.ServiceDesc for DaemonService service. // It's only intended for direct use with grpc.RegisterService, // and not to be introspected or modified (even as a copy) @@ -705,7 +827,21 @@ var DaemonService_ServiceDesc = grpc.ServiceDesc{ MethodName: "SetNetworkMapPersistence", Handler: _DaemonService_SetNetworkMapPersistence_Handler, }, + { + MethodName: "TracePacket", + Handler: _DaemonService_TracePacket_Handler, + }, + { + MethodName: "GetEvents", + Handler: _DaemonService_GetEvents_Handler, + }, + }, + Streams: []grpc.StreamDesc{ + { + StreamName: "SubscribeEvents", + Handler: _DaemonService_SubscribeEvents_Handler, + ServerStreams: true, + }, }, - Streams: []grpc.StreamDesc{}, Metadata: "daemon.proto", } diff --git a/client/server/debug.go b/client/server/debug.go index a37195b29..749220d62 100644 --- a/client/server/debug.go +++ b/client/server/debug.go @@ -538,7 +538,24 @@ func (s *Server) SetLogLevel(_ context.Context, req *proto.SetLogLevelRequest) ( } log.SetLevel(level) + + if s.connectClient == nil { + return nil, fmt.Errorf("connect client not initialized") + } + engine := s.connectClient.Engine() + if engine == nil { + return nil, fmt.Errorf("engine not initialized") + } + + fwManager := engine.GetFirewallManager() + if fwManager == nil { + return nil, fmt.Errorf("firewall manager not initialized") + } + + fwManager.SetLogLevel(level) + log.Infof("Log level set to %s", level.String()) + return &proto.SetLogLevelResponse{}, nil } diff --git a/client/server/event.go b/client/server/event.go new file mode 100644 index 000000000..9a4e0fbf5 --- /dev/null +++ b/client/server/event.go @@ -0,0 +1,36 @@ +package server + +import ( + "context" + + log "github.com/sirupsen/logrus" + + "github.com/netbirdio/netbird/client/proto" +) + +func (s *Server) SubscribeEvents(req *proto.SubscribeRequest, stream proto.DaemonService_SubscribeEventsServer) error { + subscription := s.statusRecorder.SubscribeToEvents() + defer func() { + s.statusRecorder.UnsubscribeFromEvents(subscription) + log.Debug("client unsubscribed from events") + }() + + log.Debug("client subscribed to events") + + for { + select { + case event := <-subscription.Events(): + if err := stream.Send(event); err != nil { + log.Warnf("error sending event to %v: %v", req, err) + return err + } + case <-stream.Context().Done(): + return nil + } + } +} + +func (s *Server) GetEvents(context.Context, *proto.GetEventsRequest) (*proto.GetEventsResponse, error) { + events := s.statusRecorder.GetEventHistory() + return &proto.GetEventsResponse{Events: events}, nil +} diff --git a/client/server/server.go b/client/server/server.go index f84269cc1..906fe5085 100644 --- a/client/server/server.go +++ b/client/server/server.go @@ -404,6 +404,11 @@ func (s *Server) Login(callerCtx context.Context, msg *proto.LoginRequest) (*pro s.latestConfigInput.BlockLANAccess = msg.BlockLanAccess } + if msg.DisableNotifications != nil { + inputConfig.DisableNotifications = msg.DisableNotifications + s.latestConfigInput.DisableNotifications = msg.DisableNotifications + } + s.mutex.Unlock() if msg.OptionalPreSharedKey != nil { @@ -687,6 +692,7 @@ func (s *Server) Status( fullStatus := s.statusRecorder.GetFullStatus() pbFullStatus := toProtoFullStatus(fullStatus) + pbFullStatus.Events = s.statusRecorder.GetEventHistory() statusResponse.FullStatus = pbFullStatus } @@ -736,17 +742,18 @@ func (s *Server) GetConfig(_ context.Context, _ *proto.GetConfigRequest) (*proto } return &proto.GetConfigResponse{ - ManagementUrl: managementURL, - ConfigFile: s.latestConfigInput.ConfigPath, - LogFile: s.logFile, - PreSharedKey: preSharedKey, - AdminURL: adminURL, - InterfaceName: s.config.WgIface, - WireguardPort: int64(s.config.WgPort), - DisableAutoConnect: s.config.DisableAutoConnect, - ServerSSHAllowed: *s.config.ServerSSHAllowed, - RosenpassEnabled: s.config.RosenpassEnabled, - RosenpassPermissive: s.config.RosenpassPermissive, + ManagementUrl: managementURL, + ConfigFile: s.latestConfigInput.ConfigPath, + LogFile: s.logFile, + PreSharedKey: preSharedKey, + AdminURL: adminURL, + InterfaceName: s.config.WgIface, + WireguardPort: int64(s.config.WgPort), + DisableAutoConnect: s.config.DisableAutoConnect, + ServerSSHAllowed: *s.config.ServerSSHAllowed, + RosenpassEnabled: s.config.RosenpassEnabled, + RosenpassPermissive: s.config.RosenpassPermissive, + DisableNotifications: s.config.DisableNotifications, }, nil } func (s *Server) onSessionExpire() { diff --git a/client/server/trace.go b/client/server/trace.go new file mode 100644 index 000000000..66b83d8cf --- /dev/null +++ b/client/server/trace.go @@ -0,0 +1,123 @@ +package server + +import ( + "context" + "fmt" + "net" + + fw "github.com/netbirdio/netbird/client/firewall/manager" + "github.com/netbirdio/netbird/client/firewall/uspfilter" + "github.com/netbirdio/netbird/client/proto" +) + +type packetTracer interface { + TracePacketFromBuilder(builder *uspfilter.PacketBuilder) (*uspfilter.PacketTrace, error) +} + +func (s *Server) TracePacket(_ context.Context, req *proto.TracePacketRequest) (*proto.TracePacketResponse, error) { + s.mutex.Lock() + defer s.mutex.Unlock() + + if s.connectClient == nil { + return nil, fmt.Errorf("connect client not initialized") + } + engine := s.connectClient.Engine() + if engine == nil { + return nil, fmt.Errorf("engine not initialized") + } + + fwManager := engine.GetFirewallManager() + if fwManager == nil { + return nil, fmt.Errorf("firewall manager not initialized") + } + + tracer, ok := fwManager.(packetTracer) + if !ok { + return nil, fmt.Errorf("firewall manager does not support packet tracing") + } + + srcIP := net.ParseIP(req.GetSourceIp()) + if req.GetSourceIp() == "self" { + srcIP = engine.GetWgAddr() + } + + dstIP := net.ParseIP(req.GetDestinationIp()) + if req.GetDestinationIp() == "self" { + dstIP = engine.GetWgAddr() + } + + if srcIP == nil || dstIP == nil { + return nil, fmt.Errorf("invalid IP address") + } + + var tcpState *uspfilter.TCPState + if flags := req.GetTcpFlags(); flags != nil { + tcpState = &uspfilter.TCPState{ + SYN: flags.GetSyn(), + ACK: flags.GetAck(), + FIN: flags.GetFin(), + RST: flags.GetRst(), + PSH: flags.GetPsh(), + URG: flags.GetUrg(), + } + } + + var dir fw.RuleDirection + switch req.GetDirection() { + case "in": + dir = fw.RuleDirectionIN + case "out": + dir = fw.RuleDirectionOUT + default: + return nil, fmt.Errorf("invalid direction") + } + + var protocol fw.Protocol + switch req.GetProtocol() { + case "tcp": + protocol = fw.ProtocolTCP + case "udp": + protocol = fw.ProtocolUDP + case "icmp": + protocol = fw.ProtocolICMP + default: + return nil, fmt.Errorf("invalid protocolcol") + } + + builder := &uspfilter.PacketBuilder{ + SrcIP: srcIP, + DstIP: dstIP, + Protocol: protocol, + SrcPort: uint16(req.GetSourcePort()), + DstPort: uint16(req.GetDestinationPort()), + Direction: dir, + TCPState: tcpState, + ICMPType: uint8(req.GetIcmpType()), + ICMPCode: uint8(req.GetIcmpCode()), + } + trace, err := tracer.TracePacketFromBuilder(builder) + if err != nil { + return nil, fmt.Errorf("trace packet: %w", err) + } + + resp := &proto.TracePacketResponse{} + + for _, result := range trace.Results { + stage := &proto.TraceStage{ + Name: result.Stage.String(), + Message: result.Message, + Allowed: result.Allowed, + } + if result.ForwarderAction != nil { + details := fmt.Sprintf("%s to %s", result.ForwarderAction.Action, result.ForwarderAction.RemoteAddr) + stage.ForwardingDetails = &details + } + resp.Stages = append(resp.Stages, stage) + } + + if len(trace.Results) > 0 { + resp.FinalDisposition = trace.Results[len(trace.Results)-1].Allowed + } + + return resp, nil +} diff --git a/client/ui/client_ui.go b/client/ui/client_ui.go index f22ee377b..9ed40b0be 100644 --- a/client/ui/client_ui.go +++ b/client/ui/client_ui.go @@ -21,6 +21,7 @@ import ( "fyne.io/fyne/v2" "fyne.io/fyne/v2/app" "fyne.io/fyne/v2/dialog" + "fyne.io/fyne/v2/theme" "fyne.io/fyne/v2/widget" "fyne.io/systray" "github.com/cenkalti/backoff/v4" @@ -33,6 +34,7 @@ import ( "github.com/netbirdio/netbird/client/internal" "github.com/netbirdio/netbird/client/proto" "github.com/netbirdio/netbird/client/system" + "github.com/netbirdio/netbird/client/ui/event" "github.com/netbirdio/netbird/util" "github.com/netbirdio/netbird/version" ) @@ -82,7 +84,7 @@ func main() { } a := app.NewWithID("NetBird") - a.SetIcon(fyne.NewStaticResource("netbird", iconDisconnectedPNG)) + a.SetIcon(fyne.NewStaticResource("netbird", iconDisconnected)) if errorMSG != "" { showErrorMSG(errorMSG) @@ -90,6 +92,14 @@ func main() { } client := newServiceClient(daemonAddr, a, showSettings, showRoutes) + settingsChangeChan := make(chan fyne.Settings) + a.Settings().AddChangeListener(settingsChangeChan) + go func() { + for range settingsChangeChan { + client.updateIcon() + } + }() + if showSettings || showRoutes { a.Run() } else { @@ -106,46 +116,36 @@ func main() { } } -//go:embed netbird-systemtray-connected.ico -var iconConnectedICO []byte +//go:embed netbird-systemtray-connected-macos.png +var iconConnectedMacOS []byte -//go:embed netbird-systemtray-connected.png -var iconConnectedPNG []byte +//go:embed netbird-systemtray-disconnected-macos.png +var iconDisconnectedMacOS []byte -//go:embed netbird-systemtray-disconnected.ico -var iconDisconnectedICO []byte +//go:embed netbird-systemtray-update-disconnected-macos.png +var iconUpdateDisconnectedMacOS []byte -//go:embed netbird-systemtray-disconnected.png -var iconDisconnectedPNG []byte +//go:embed netbird-systemtray-update-connected-macos.png +var iconUpdateConnectedMacOS []byte -//go:embed netbird-systemtray-update-disconnected.ico -var iconUpdateDisconnectedICO []byte +//go:embed netbird-systemtray-connecting-macos.png +var iconConnectingMacOS []byte -//go:embed netbird-systemtray-update-disconnected.png -var iconUpdateDisconnectedPNG []byte - -//go:embed netbird-systemtray-update-connected.ico -var iconUpdateConnectedICO []byte - -//go:embed netbird-systemtray-update-connected.png -var iconUpdateConnectedPNG []byte - -//go:embed netbird-systemtray-update-cloud.ico -var iconUpdateCloudICO []byte - -//go:embed netbird-systemtray-update-cloud.png -var iconUpdateCloudPNG []byte +//go:embed netbird-systemtray-error-macos.png +var iconErrorMacOS []byte type serviceClient struct { ctx context.Context addr string conn proto.DaemonServiceClient + icAbout []byte icConnected []byte icDisconnected []byte icUpdateConnected []byte icUpdateDisconnected []byte - icUpdateCloud []byte + icConnecting []byte + icError []byte // systray menu items mStatus *systray.MenuItem @@ -162,6 +162,7 @@ type serviceClient struct { mAllowSSH *systray.MenuItem mAutoConnect *systray.MenuItem mEnableRosenpass *systray.MenuItem + mNotifications *systray.MenuItem mAdvancedSettings *systray.MenuItem // application with main windows. @@ -197,6 +198,8 @@ type serviceClient struct { isUpdateIconActive bool showRoutes bool wRoutes fyne.Window + + eventManager *event.Manager } // newServiceClient instance constructor @@ -214,20 +217,7 @@ func newServiceClient(addr string, a fyne.App, showSettings bool, showRoutes boo update: version.NewUpdate(), } - if runtime.GOOS == "windows" { - s.icConnected = iconConnectedICO - s.icDisconnected = iconDisconnectedICO - s.icUpdateConnected = iconUpdateConnectedICO - s.icUpdateDisconnected = iconUpdateDisconnectedICO - s.icUpdateCloud = iconUpdateCloudICO - - } else { - s.icConnected = iconConnectedPNG - s.icDisconnected = iconDisconnectedPNG - s.icUpdateConnected = iconUpdateConnectedPNG - s.icUpdateDisconnected = iconUpdateDisconnectedPNG - s.icUpdateCloud = iconUpdateCloudPNG - } + s.setNewIcons() if showSettings { s.showSettingsUI() @@ -239,6 +229,44 @@ func newServiceClient(addr string, a fyne.App, showSettings bool, showRoutes boo return s } +func (s *serviceClient) setNewIcons() { + s.icAbout = iconAbout + if s.app.Settings().ThemeVariant() == theme.VariantDark { + s.icConnected = iconConnectedDark + s.icDisconnected = iconDisconnected + s.icUpdateConnected = iconUpdateConnectedDark + s.icUpdateDisconnected = iconUpdateDisconnectedDark + s.icConnecting = iconConnectingDark + s.icError = iconErrorDark + } else { + s.icConnected = iconConnected + s.icDisconnected = iconDisconnected + s.icUpdateConnected = iconUpdateConnected + s.icUpdateDisconnected = iconUpdateDisconnected + s.icConnecting = iconConnecting + s.icError = iconError + } +} + +func (s *serviceClient) updateIcon() { + s.setNewIcons() + s.updateIndicationLock.Lock() + if s.connected { + if s.isUpdateIconActive { + systray.SetTemplateIcon(iconUpdateConnectedMacOS, s.icUpdateConnected) + } else { + systray.SetTemplateIcon(iconConnectedMacOS, s.icConnected) + } + } else { + if s.isUpdateIconActive { + systray.SetTemplateIcon(iconUpdateDisconnectedMacOS, s.icUpdateDisconnected) + } else { + systray.SetTemplateIcon(iconDisconnectedMacOS, s.icDisconnected) + } + } + s.updateIndicationLock.Unlock() +} + func (s *serviceClient) showSettingsUI() { // add settings window UI elements. s.wSettings = s.app.NewWindow("NetBird Settings") @@ -376,8 +404,10 @@ func (s *serviceClient) login() error { } func (s *serviceClient) menuUpClick() error { + systray.SetTemplateIcon(iconConnectingMacOS, s.icConnecting) conn, err := s.getSrvClient(defaultFailTimeout) if err != nil { + systray.SetTemplateIcon(iconErrorMacOS, s.icError) log.Errorf("get client: %v", err) return err } @@ -403,10 +433,12 @@ func (s *serviceClient) menuUpClick() error { log.Errorf("up service: %v", err) return err } + return nil } func (s *serviceClient) menuDownClick() error { + systray.SetTemplateIcon(iconConnectingMacOS, s.icConnecting) conn, err := s.getSrvClient(defaultFailTimeout) if err != nil { log.Errorf("get client: %v", err) @@ -458,9 +490,9 @@ func (s *serviceClient) updateStatus() error { s.connected = true s.sendNotification = true if s.isUpdateIconActive { - systray.SetIcon(s.icUpdateConnected) + systray.SetTemplateIcon(iconUpdateConnectedMacOS, s.icUpdateConnected) } else { - systray.SetIcon(s.icConnected) + systray.SetTemplateIcon(iconConnectedMacOS, s.icConnected) } systray.SetTooltip("NetBird (Connected)") s.mStatus.SetTitle("Connected") @@ -482,11 +514,9 @@ func (s *serviceClient) updateStatus() error { s.isUpdateIconActive = s.update.SetDaemonVersion(status.DaemonVersion) if !s.isUpdateIconActive { if systrayIconState { - systray.SetIcon(s.icConnected) - s.mAbout.SetIcon(s.icConnected) + systray.SetTemplateIcon(iconConnectedMacOS, s.icConnected) } else { - systray.SetIcon(s.icDisconnected) - s.mAbout.SetIcon(s.icDisconnected) + systray.SetTemplateIcon(iconDisconnectedMacOS, s.icDisconnected) } } @@ -506,7 +536,6 @@ func (s *serviceClient) updateStatus() error { Stop: backoff.Stop, Clock: backoff.SystemClock, }) - if err != nil { return err } @@ -517,9 +546,9 @@ func (s *serviceClient) updateStatus() error { func (s *serviceClient) setDisconnectedStatus() { s.connected = false if s.isUpdateIconActive { - systray.SetIcon(s.icUpdateDisconnected) + systray.SetTemplateIcon(iconUpdateDisconnectedMacOS, s.icUpdateDisconnected) } else { - systray.SetIcon(s.icDisconnected) + systray.SetTemplateIcon(iconDisconnectedMacOS, s.icDisconnected) } systray.SetTooltip("NetBird (Disconnected)") s.mStatus.SetTitle("Disconnected") @@ -529,7 +558,7 @@ func (s *serviceClient) setDisconnectedStatus() { } func (s *serviceClient) onTrayReady() { - systray.SetIcon(s.icDisconnected) + systray.SetTemplateIcon(iconDisconnectedMacOS, s.icDisconnected) systray.SetTooltip("NetBird") // setup systray menu items @@ -546,6 +575,7 @@ func (s *serviceClient) onTrayReady() { s.mAllowSSH = s.mSettings.AddSubMenuItemCheckbox("Allow SSH", "Allow SSH connections", false) s.mAutoConnect = s.mSettings.AddSubMenuItemCheckbox("Connect on Startup", "Connect automatically when the service starts", false) s.mEnableRosenpass = s.mSettings.AddSubMenuItemCheckbox("Enable Quantum-Resistance", "Enable post-quantum security via Rosenpass", false) + s.mNotifications = s.mSettings.AddSubMenuItemCheckbox("Notifications", "Enable notifications", true) s.mAdvancedSettings = s.mSettings.AddSubMenuItem("Advanced Settings", "Advanced settings of the application") s.loadSettings() @@ -554,7 +584,7 @@ func (s *serviceClient) onTrayReady() { systray.AddSeparator() s.mAbout = systray.AddMenuItem("About", "About") - s.mAbout.SetIcon(s.icDisconnected) + s.mAbout.SetIcon(s.icAbout) versionString := normalizedVersion(version.NetbirdVersion()) s.mVersionUI = s.mAbout.AddSubMenuItem(fmt.Sprintf("GUI: %s", versionString), fmt.Sprintf("GUI Version: %s", versionString)) s.mVersionUI.Disable() @@ -582,6 +612,10 @@ func (s *serviceClient) onTrayReady() { } }() + s.eventManager = event.NewManager(s.app, s.addr) + s.eventManager.SetNotificationsEnabled(s.mNotifications.Checked()) + go s.eventManager.Start(s.ctx) + go func() { var err error for { @@ -616,7 +650,6 @@ func (s *serviceClient) onTrayReady() { } if err := s.updateConfig(); err != nil { log.Errorf("failed to update config: %v", err) - return } case <-s.mAutoConnect.ClickedCh: if s.mAutoConnect.Checked() { @@ -626,7 +659,6 @@ func (s *serviceClient) onTrayReady() { } if err := s.updateConfig(); err != nil { log.Errorf("failed to update config: %v", err) - return } case <-s.mEnableRosenpass.ClickedCh: if s.mEnableRosenpass.Checked() { @@ -636,7 +668,6 @@ func (s *serviceClient) onTrayReady() { } if err := s.updateConfig(); err != nil { log.Errorf("failed to update config: %v", err) - return } case <-s.mAdvancedSettings.ClickedCh: s.mAdvancedSettings.Disable() @@ -659,7 +690,20 @@ func (s *serviceClient) onTrayReady() { defer s.mRoutes.Enable() s.runSelfCommand("networks", "true") }() + case <-s.mNotifications.ClickedCh: + if s.mNotifications.Checked() { + s.mNotifications.Uncheck() + } else { + s.mNotifications.Check() + } + if s.eventManager != nil { + s.eventManager.SetNotificationsEnabled(s.mNotifications.Checked()) + } + if err := s.updateConfig(); err != nil { + log.Errorf("failed to update config: %v", err) + } } + if err != nil { log.Errorf("process connection: %v", err) } @@ -759,8 +803,20 @@ func (s *serviceClient) getSrvConfig() { if !cfg.RosenpassEnabled { s.sRosenpassPermissive.Disable() } - } + + if s.mNotifications == nil { + return + } + if cfg.DisableNotifications { + s.mNotifications.Uncheck() + } else { + s.mNotifications.Check() + } + if s.eventManager != nil { + s.eventManager.SetNotificationsEnabled(s.mNotifications.Checked()) + } + } func (s *serviceClient) onUpdateAvailable() { @@ -771,9 +827,9 @@ func (s *serviceClient) onUpdateAvailable() { s.isUpdateIconActive = true if s.connected { - systray.SetIcon(s.icUpdateConnected) + systray.SetTemplateIcon(iconUpdateConnectedMacOS, s.icUpdateConnected) } else { - systray.SetIcon(s.icUpdateDisconnected) + systray.SetTemplateIcon(iconUpdateDisconnectedMacOS, s.icUpdateDisconnected) } } @@ -825,6 +881,15 @@ func (s *serviceClient) loadSettings() { } else { s.mEnableRosenpass.Uncheck() } + + if cfg.DisableNotifications { + s.mNotifications.Uncheck() + } else { + s.mNotifications.Check() + } + if s.eventManager != nil { + s.eventManager.SetNotificationsEnabled(s.mNotifications.Checked()) + } } // updateConfig updates the configuration parameters @@ -833,12 +898,14 @@ func (s *serviceClient) updateConfig() error { disableAutoStart := !s.mAutoConnect.Checked() sshAllowed := s.mAllowSSH.Checked() rosenpassEnabled := s.mEnableRosenpass.Checked() + notificationsDisabled := !s.mNotifications.Checked() loginRequest := proto.LoginRequest{ IsLinuxDesktopClient: runtime.GOOS == "linux", ServerSSHAllowed: &sshAllowed, RosenpassEnabled: &rosenpassEnabled, DisableAutoConnect: &disableAutoStart, + DisableNotifications: ¬ificationsDisabled, } if err := s.restartClient(&loginRequest); err != nil { @@ -851,17 +918,20 @@ func (s *serviceClient) updateConfig() error { // restartClient restarts the client connection. func (s *serviceClient) restartClient(loginRequest *proto.LoginRequest) error { + ctx, cancel := context.WithTimeout(s.ctx, defaultFailTimeout) + defer cancel() + client, err := s.getSrvClient(failFastTimeout) if err != nil { return err } - _, err = client.Login(s.ctx, loginRequest) + _, err = client.Login(ctx, loginRequest) if err != nil { return err } - _, err = client.Up(s.ctx, &proto.UpRequest{}) + _, err = client.Up(ctx, &proto.UpRequest{}) if err != nil { return err } diff --git a/client/ui/event/event.go b/client/ui/event/event.go new file mode 100644 index 000000000..7925ee4d3 --- /dev/null +++ b/client/ui/event/event.go @@ -0,0 +1,151 @@ +package event + +import ( + "context" + "fmt" + "strings" + "sync" + "time" + + "fyne.io/fyne/v2" + "github.com/cenkalti/backoff/v4" + log "github.com/sirupsen/logrus" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials/insecure" + + "github.com/netbirdio/netbird/client/proto" + "github.com/netbirdio/netbird/client/system" +) + +type Manager struct { + app fyne.App + addr string + + mu sync.Mutex + ctx context.Context + cancel context.CancelFunc + enabled bool +} + +func NewManager(app fyne.App, addr string) *Manager { + return &Manager{ + app: app, + addr: addr, + } +} + +func (e *Manager) Start(ctx context.Context) { + e.mu.Lock() + e.ctx, e.cancel = context.WithCancel(ctx) + e.mu.Unlock() + + expBackOff := backoff.WithContext(&backoff.ExponentialBackOff{ + InitialInterval: time.Second, + RandomizationFactor: backoff.DefaultRandomizationFactor, + Multiplier: backoff.DefaultMultiplier, + MaxInterval: 10 * time.Second, + MaxElapsedTime: 0, + Stop: backoff.Stop, + Clock: backoff.SystemClock, + }, ctx) + + if err := backoff.Retry(e.streamEvents, expBackOff); err != nil { + log.Errorf("event stream ended: %v", err) + } +} + +func (e *Manager) streamEvents() error { + e.mu.Lock() + ctx := e.ctx + e.mu.Unlock() + + client, err := getClient(e.addr) + if err != nil { + return fmt.Errorf("create client: %w", err) + } + + stream, err := client.SubscribeEvents(ctx, &proto.SubscribeRequest{}) + if err != nil { + return fmt.Errorf("failed to subscribe to events: %w", err) + } + + log.Info("subscribed to daemon events") + defer func() { + log.Info("unsubscribed from daemon events") + }() + + for { + event, err := stream.Recv() + if err != nil { + return fmt.Errorf("error receiving event: %w", err) + } + e.handleEvent(event) + } +} + +func (e *Manager) Stop() { + e.mu.Lock() + defer e.mu.Unlock() + if e.cancel != nil { + e.cancel() + } +} + +func (e *Manager) SetNotificationsEnabled(enabled bool) { + e.mu.Lock() + defer e.mu.Unlock() + e.enabled = enabled +} + +func (e *Manager) handleEvent(event *proto.SystemEvent) { + e.mu.Lock() + enabled := e.enabled + e.mu.Unlock() + + if !enabled { + return + } + + title := e.getEventTitle(event) + e.app.SendNotification(fyne.NewNotification(title, event.UserMessage)) +} + +func (e *Manager) getEventTitle(event *proto.SystemEvent) string { + var prefix string + switch event.Severity { + case proto.SystemEvent_ERROR, proto.SystemEvent_CRITICAL: + prefix = "Error" + case proto.SystemEvent_WARNING: + prefix = "Warning" + default: + prefix = "Info" + } + + var category string + switch event.Category { + case proto.SystemEvent_DNS: + category = "DNS" + case proto.SystemEvent_NETWORK: + category = "Network" + case proto.SystemEvent_AUTHENTICATION: + category = "Authentication" + case proto.SystemEvent_CONNECTIVITY: + category = "Connectivity" + default: + category = "System" + } + + return fmt.Sprintf("%s: %s", prefix, category) +} + +func getClient(addr string) (proto.DaemonServiceClient, error) { + conn, err := grpc.NewClient( + strings.TrimPrefix(addr, "tcp://"), + grpc.WithTransportCredentials(insecure.NewCredentials()), + grpc.WithUserAgent(system.GetDesktopUIUserAgent()), + ) + if err != nil { + return nil, err + } + return proto.NewDaemonServiceClient(conn), nil +} diff --git a/client/ui/icons.go b/client/ui/icons.go new file mode 100644 index 000000000..6f3a9dbc9 --- /dev/null +++ b/client/ui/icons.go @@ -0,0 +1,43 @@ +//go:build !(linux && 386) && !windows + +package main + +import ( + _ "embed" +) + +//go:embed netbird.png +var iconAbout []byte + +//go:embed netbird-systemtray-connected.png +var iconConnected []byte + +//go:embed netbird-systemtray-connected-dark.png +var iconConnectedDark []byte + +//go:embed netbird-systemtray-disconnected.png +var iconDisconnected []byte + +//go:embed netbird-systemtray-update-disconnected.png +var iconUpdateDisconnected []byte + +//go:embed netbird-systemtray-update-disconnected-dark.png +var iconUpdateDisconnectedDark []byte + +//go:embed netbird-systemtray-update-connected.png +var iconUpdateConnected []byte + +//go:embed netbird-systemtray-update-connected-dark.png +var iconUpdateConnectedDark []byte + +//go:embed netbird-systemtray-connecting.png +var iconConnecting []byte + +//go:embed netbird-systemtray-connecting-dark.png +var iconConnectingDark []byte + +//go:embed netbird-systemtray-error.png +var iconError []byte + +//go:embed netbird-systemtray-error-dark.png +var iconErrorDark []byte diff --git a/client/ui/icons_windows.go b/client/ui/icons_windows.go new file mode 100644 index 000000000..a2a924763 --- /dev/null +++ b/client/ui/icons_windows.go @@ -0,0 +1,41 @@ +package main + +import ( + _ "embed" +) + +//go:embed netbird.ico +var iconAbout []byte + +//go:embed netbird-systemtray-connected.ico +var iconConnected []byte + +//go:embed netbird-systemtray-connected-dark.ico +var iconConnectedDark []byte + +//go:embed netbird-systemtray-disconnected.ico +var iconDisconnected []byte + +//go:embed netbird-systemtray-update-disconnected.ico +var iconUpdateDisconnected []byte + +//go:embed netbird-systemtray-update-disconnected-dark.ico +var iconUpdateDisconnectedDark []byte + +//go:embed netbird-systemtray-update-connected.ico +var iconUpdateConnected []byte + +//go:embed netbird-systemtray-update-connected-dark.ico +var iconUpdateConnectedDark []byte + +//go:embed netbird-systemtray-connecting.ico +var iconConnecting []byte + +//go:embed netbird-systemtray-connecting-dark.ico +var iconConnectingDark []byte + +//go:embed netbird-systemtray-error.ico +var iconError []byte + +//go:embed netbird-systemtray-error-dark.ico +var iconErrorDark []byte diff --git a/client/ui/netbird-systemtray-connected-dark.ico b/client/ui/netbird-systemtray-connected-dark.ico new file mode 100644 index 000000000..0db8a0862 Binary files /dev/null and b/client/ui/netbird-systemtray-connected-dark.ico differ diff --git a/client/ui/netbird-systemtray-connected-dark.png b/client/ui/netbird-systemtray-connected-dark.png new file mode 100644 index 000000000..f18a929a0 Binary files /dev/null and b/client/ui/netbird-systemtray-connected-dark.png differ diff --git a/client/ui/netbird-systemtray-connected-macos.png b/client/ui/netbird-systemtray-connected-macos.png new file mode 100644 index 000000000..ead210250 Binary files /dev/null and b/client/ui/netbird-systemtray-connected-macos.png differ diff --git a/client/ui/netbird-systemtray-connected.ico b/client/ui/netbird-systemtray-connected.ico index 80550aa37..c16bec3f5 100644 Binary files a/client/ui/netbird-systemtray-connected.ico and b/client/ui/netbird-systemtray-connected.ico differ diff --git a/client/ui/netbird-systemtray-connected.png b/client/ui/netbird-systemtray-connected.png index f4d156da8..4258a5c1c 100644 Binary files a/client/ui/netbird-systemtray-connected.png and b/client/ui/netbird-systemtray-connected.png differ diff --git a/client/ui/netbird-systemtray-connecting-dark.ico b/client/ui/netbird-systemtray-connecting-dark.ico new file mode 100644 index 000000000..615d40f07 Binary files /dev/null and b/client/ui/netbird-systemtray-connecting-dark.ico differ diff --git a/client/ui/netbird-systemtray-connecting-dark.png b/client/ui/netbird-systemtray-connecting-dark.png new file mode 100644 index 000000000..a665eb61c Binary files /dev/null and b/client/ui/netbird-systemtray-connecting-dark.png differ diff --git a/client/ui/netbird-systemtray-connecting-macos.png b/client/ui/netbird-systemtray-connecting-macos.png new file mode 100644 index 000000000..0fe7fa0db Binary files /dev/null and b/client/ui/netbird-systemtray-connecting-macos.png differ diff --git a/client/ui/netbird-systemtray-connecting.ico b/client/ui/netbird-systemtray-connecting.ico new file mode 100644 index 000000000..4e4c3a9b1 Binary files /dev/null and b/client/ui/netbird-systemtray-connecting.ico differ diff --git a/client/ui/netbird-systemtray-connecting.png b/client/ui/netbird-systemtray-connecting.png new file mode 100644 index 000000000..4f607c997 Binary files /dev/null and b/client/ui/netbird-systemtray-connecting.png differ diff --git a/client/ui/netbird-systemtray-disconnected-macos.png b/client/ui/netbird-systemtray-disconnected-macos.png new file mode 100644 index 000000000..36b9a488f Binary files /dev/null and b/client/ui/netbird-systemtray-disconnected-macos.png differ diff --git a/client/ui/netbird-systemtray-disconnected.ico b/client/ui/netbird-systemtray-disconnected.ico index aa75268b0..dcb9f4bf8 100644 Binary files a/client/ui/netbird-systemtray-disconnected.ico and b/client/ui/netbird-systemtray-disconnected.ico differ diff --git a/client/ui/netbird-systemtray-disconnected.png b/client/ui/netbird-systemtray-disconnected.png index 3aae73231..a92e9ed4c 100644 Binary files a/client/ui/netbird-systemtray-disconnected.png and b/client/ui/netbird-systemtray-disconnected.png differ diff --git a/client/ui/netbird-systemtray-error-dark.ico b/client/ui/netbird-systemtray-error-dark.ico new file mode 100644 index 000000000..083816188 Binary files /dev/null and b/client/ui/netbird-systemtray-error-dark.ico differ diff --git a/client/ui/netbird-systemtray-error-dark.png b/client/ui/netbird-systemtray-error-dark.png new file mode 100644 index 000000000..969554b16 Binary files /dev/null and b/client/ui/netbird-systemtray-error-dark.png differ diff --git a/client/ui/netbird-systemtray-error-macos.png b/client/ui/netbird-systemtray-error-macos.png new file mode 100644 index 000000000..9a9998bcf Binary files /dev/null and b/client/ui/netbird-systemtray-error-macos.png differ diff --git a/client/ui/netbird-systemtray-error.ico b/client/ui/netbird-systemtray-error.ico new file mode 100644 index 000000000..1abc45c2a Binary files /dev/null and b/client/ui/netbird-systemtray-error.ico differ diff --git a/client/ui/netbird-systemtray-error.png b/client/ui/netbird-systemtray-error.png new file mode 100644 index 000000000..722342989 Binary files /dev/null and b/client/ui/netbird-systemtray-error.png differ diff --git a/client/ui/netbird-systemtray-update-cloud.ico b/client/ui/netbird-systemtray-update-cloud.ico deleted file mode 100644 index b87c6f4b5..000000000 Binary files a/client/ui/netbird-systemtray-update-cloud.ico and /dev/null differ diff --git a/client/ui/netbird-systemtray-update-cloud.png b/client/ui/netbird-systemtray-update-cloud.png deleted file mode 100644 index e9d0b8035..000000000 Binary files a/client/ui/netbird-systemtray-update-cloud.png and /dev/null differ diff --git a/client/ui/netbird-systemtray-update-connected-dark.ico b/client/ui/netbird-systemtray-update-connected-dark.ico new file mode 100644 index 000000000..b11bb5492 Binary files /dev/null and b/client/ui/netbird-systemtray-update-connected-dark.ico differ diff --git a/client/ui/netbird-systemtray-update-connected-dark.png b/client/ui/netbird-systemtray-update-connected-dark.png new file mode 100644 index 000000000..52ae621ac Binary files /dev/null and b/client/ui/netbird-systemtray-update-connected-dark.png differ diff --git a/client/ui/netbird-systemtray-update-connected-macos.png b/client/ui/netbird-systemtray-update-connected-macos.png new file mode 100644 index 000000000..8a6b2f2db Binary files /dev/null and b/client/ui/netbird-systemtray-update-connected-macos.png differ diff --git a/client/ui/netbird-systemtray-update-connected.ico b/client/ui/netbird-systemtray-update-connected.ico index cc056e68e..d3ce2f0f3 100644 Binary files a/client/ui/netbird-systemtray-update-connected.ico and b/client/ui/netbird-systemtray-update-connected.ico differ diff --git a/client/ui/netbird-systemtray-update-connected.png b/client/ui/netbird-systemtray-update-connected.png index a0c453340..90bb0b7f1 100644 Binary files a/client/ui/netbird-systemtray-update-connected.png and b/client/ui/netbird-systemtray-update-connected.png differ diff --git a/client/ui/netbird-systemtray-update-disconnected-dark.ico b/client/ui/netbird-systemtray-update-disconnected-dark.ico new file mode 100644 index 000000000..123237f66 Binary files /dev/null and b/client/ui/netbird-systemtray-update-disconnected-dark.ico differ diff --git a/client/ui/netbird-systemtray-update-disconnected-dark.png b/client/ui/netbird-systemtray-update-disconnected-dark.png new file mode 100644 index 000000000..9e05351f1 Binary files /dev/null and b/client/ui/netbird-systemtray-update-disconnected-dark.png differ diff --git a/client/ui/netbird-systemtray-update-disconnected-macos.png b/client/ui/netbird-systemtray-update-disconnected-macos.png new file mode 100644 index 000000000..8b190034e Binary files /dev/null and b/client/ui/netbird-systemtray-update-disconnected-macos.png differ diff --git a/client/ui/netbird-systemtray-update-disconnected.ico b/client/ui/netbird-systemtray-update-disconnected.ico index 04c35b058..968dc4105 100644 Binary files a/client/ui/netbird-systemtray-update-disconnected.ico and b/client/ui/netbird-systemtray-update-disconnected.ico differ diff --git a/client/ui/netbird-systemtray-update-disconnected.png b/client/ui/netbird-systemtray-update-disconnected.png index 3fbe88953..3adc39034 100644 Binary files a/client/ui/netbird-systemtray-update-disconnected.png and b/client/ui/netbird-systemtray-update-disconnected.png differ diff --git a/client/ui/netbird.png b/client/ui/netbird.png new file mode 100644 index 000000000..a92e9ed4c Binary files /dev/null and b/client/ui/netbird.png differ diff --git a/go.mod b/go.mod index 8fce5aa20..6f6045cb7 100644 --- a/go.mod +++ b/go.mod @@ -102,6 +102,7 @@ require ( gorm.io/driver/postgres v1.5.7 gorm.io/driver/sqlite v1.5.7 gorm.io/gorm v1.25.12 + gvisor.dev/gvisor v0.0.0-20231020174304-b8a429915ff1 ) require ( @@ -237,7 +238,6 @@ require ( gopkg.in/square/go-jose.v2 v2.6.0 // indirect gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect gopkg.in/tomb.v2 v2.0.0-20161208151619-d5d1b5820637 // indirect - gvisor.dev/gvisor v0.0.0-20231020174304-db3d49b921f9 // indirect k8s.io/apimachinery v0.26.2 // indirect ) @@ -245,7 +245,7 @@ replace github.com/kardianos/service => github.com/netbirdio/service v0.0.0-2024 replace github.com/getlantern/systray => github.com/netbirdio/systray v0.0.0-20231030152038-ef1ed2a27949 -replace golang.zx2c4.com/wireguard => github.com/netbirdio/wireguard-go v0.0.0-20241125150134-f9cdce5e32e9 +replace golang.zx2c4.com/wireguard => github.com/netbirdio/wireguard-go v0.0.0-20241230120307-6a676aebaaf6 replace github.com/cloudflare/circl => github.com/cunicu/circl v0.0.0-20230801113412-fec58fc7b5f6 diff --git a/go.sum b/go.sum index 3ae3aaa58..c0685caa9 100644 --- a/go.sum +++ b/go.sum @@ -535,8 +535,8 @@ github.com/netbirdio/service v0.0.0-20240911161631-f62744f42502 h1:3tHlFmhTdX9ax github.com/netbirdio/service v0.0.0-20240911161631-f62744f42502/go.mod h1:CIMRFEJVL+0DS1a3Nx06NaMn4Dz63Ng6O7dl0qH0zVM= github.com/netbirdio/signal-dispatcher/dispatcher v0.0.0-20241010133937-e0df50df217d h1:bRq5TKgC7Iq20pDiuC54yXaWnAVeS5PdGpSokFTlR28= github.com/netbirdio/signal-dispatcher/dispatcher v0.0.0-20241010133937-e0df50df217d/go.mod h1:5/sjFmLb8O96B5737VCqhHyGRzNFIaN/Bu7ZodXc3qQ= -github.com/netbirdio/wireguard-go v0.0.0-20241125150134-f9cdce5e32e9 h1:Pu/7EukijT09ynHUOzQYW7cC3M/BKU8O4qyN/TvTGoY= -github.com/netbirdio/wireguard-go v0.0.0-20241125150134-f9cdce5e32e9/go.mod h1:tkCQ4FQXmpAgYVh++1cq16/dH4QJtmvpRv19DWGAHSA= +github.com/netbirdio/wireguard-go v0.0.0-20241230120307-6a676aebaaf6 h1:X5h5QgP7uHAv78FWgHV8+WYLjHxK9v3ilkVXT1cpCrQ= +github.com/netbirdio/wireguard-go v0.0.0-20241230120307-6a676aebaaf6/go.mod h1:tkCQ4FQXmpAgYVh++1cq16/dH4QJtmvpRv19DWGAHSA= github.com/nicksnyder/go-i18n/v2 v2.4.0 h1:3IcvPOAvnCKwNm0TB0dLDTuawWEj+ax/RERNC+diLMM= github.com/nicksnyder/go-i18n/v2 v2.4.0/go.mod h1:nxYSZE9M0bf3Y70gPQjN9ha7XNHX7gMc814+6wVyEI4= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= @@ -1250,8 +1250,8 @@ gorm.io/gorm v1.25.12 h1:I0u8i2hWQItBq1WfE0o2+WuL9+8L21K9e2HHSTE/0f8= gorm.io/gorm v1.25.12/go.mod h1:xh7N7RHfYlNc5EmcI/El95gXusucDrQnHXe0+CgWcLQ= gotest.tools/v3 v3.5.0 h1:Ljk6PdHdOhAb5aDMWXjDLMMhph+BpztA4v1QdqEW2eY= gotest.tools/v3 v3.5.0/go.mod h1:isy3WKz7GK6uNw/sbHzfKBLvlvXwUyV06n6brMxxopU= -gvisor.dev/gvisor v0.0.0-20231020174304-db3d49b921f9 h1:sCEaoA7ZmkuFwa2IR61pl4+RYZPwCJOiaSYT0k+BRf8= -gvisor.dev/gvisor v0.0.0-20231020174304-db3d49b921f9/go.mod h1:8hmigyCdYtw5xJGfQDJzSH5Ju8XEIDBnpyi8+O6GRt8= +gvisor.dev/gvisor v0.0.0-20231020174304-b8a429915ff1 h1:qDCwdCWECGnwQSQC01Dpnp09fRHxJs9PbktotUqG+hs= +gvisor.dev/gvisor v0.0.0-20231020174304-b8a429915ff1/go.mod h1:8hmigyCdYtw5xJGfQDJzSH5Ju8XEIDBnpyi8+O6GRt8= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/infrastructure_files/docker-compose.yml.tmpl.traefik b/infrastructure_files/docker-compose.yml.tmpl.traefik index 71471c3ef..dcd3f955c 100644 --- a/infrastructure_files/docker-compose.yml.tmpl.traefik +++ b/infrastructure_files/docker-compose.yml.tmpl.traefik @@ -67,6 +67,10 @@ services: options: max-size: "500m" max-file: "2" + labels: + - traefik.enable=true + - traefik.http.routers.netbird-relay.rule=Host(`$NETBIRD_DOMAIN`) && PathPrefix(`/relay`) + - traefik.http.services.netbird-relay.loadbalancer.server.port=$NETBIRD_RELAY_PORT # Management management: diff --git a/management/README.md b/management/README.md index f0eb0cb70..1122a9e76 100644 --- a/management/README.md +++ b/management/README.md @@ -111,4 +111,3 @@ Generate gRpc code: #!/bin/bash protoc -I proto/ proto/management.proto --go_out=. --go-grpc_out=. ``` - diff --git a/management/client/client_test.go b/management/client/client_test.go index d2d68b3fb..8afa8c263 100644 --- a/management/client/client_test.go +++ b/management/client/client_test.go @@ -259,8 +259,11 @@ func TestClient_Sync(t *testing.T) { ch := make(chan *mgmtProto.SyncResponse, 1) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + go func() { - err = client.Sync(context.Background(), info, func(msg *mgmtProto.SyncResponse) error { + err = client.Sync(ctx, info, func(msg *mgmtProto.SyncResponse) error { ch <- msg return nil }) diff --git a/management/client/rest/accounts_test.go b/management/client/rest/accounts_test.go index 3c1925fbc..621228261 100644 --- a/management/client/rest/accounts_test.go +++ b/management/client/rest/accounts_test.go @@ -1,4 +1,7 @@ -package rest +//go:build integration +// +build integration + +package rest_test import ( "context" @@ -7,10 +10,12 @@ import ( "net/http" "testing" - "github.com/netbirdio/netbird/management/server/http/api" - "github.com/netbirdio/netbird/management/server/http/util" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + + "github.com/netbirdio/netbird/management/client/rest" + "github.com/netbirdio/netbird/management/server/http/api" + "github.com/netbirdio/netbird/management/server/http/util" ) var ( @@ -33,7 +38,7 @@ var ( ) func TestAccounts_List_200(t *testing.T) { - withMockClient(func(c *Client, mux *http.ServeMux) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { mux.HandleFunc("/api/accounts", func(w http.ResponseWriter, r *http.Request) { retBytes, _ := json.Marshal([]api.Account{testAccount}) _, err := w.Write(retBytes) @@ -47,7 +52,7 @@ func TestAccounts_List_200(t *testing.T) { } func TestAccounts_List_Err(t *testing.T) { - withMockClient(func(c *Client, mux *http.ServeMux) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { mux.HandleFunc("/api/accounts", func(w http.ResponseWriter, r *http.Request) { retBytes, _ := json.Marshal(util.ErrorResponse{Message: "No", Code: 400}) w.WriteHeader(400) @@ -62,7 +67,7 @@ func TestAccounts_List_Err(t *testing.T) { } func TestAccounts_Update_200(t *testing.T) { - withMockClient(func(c *Client, mux *http.ServeMux) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { mux.HandleFunc("/api/accounts/Test", func(w http.ResponseWriter, r *http.Request) { assert.Equal(t, "PUT", r.Method) reqBytes, err := io.ReadAll(r.Body) @@ -87,7 +92,7 @@ func TestAccounts_Update_200(t *testing.T) { } func TestAccounts_Update_Err(t *testing.T) { - withMockClient(func(c *Client, mux *http.ServeMux) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { mux.HandleFunc("/api/accounts/Test", func(w http.ResponseWriter, r *http.Request) { retBytes, _ := json.Marshal(util.ErrorResponse{Message: "No", Code: 400}) w.WriteHeader(400) @@ -106,7 +111,7 @@ func TestAccounts_Update_Err(t *testing.T) { } func TestAccounts_Delete_200(t *testing.T) { - withMockClient(func(c *Client, mux *http.ServeMux) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { mux.HandleFunc("/api/accounts/Test", func(w http.ResponseWriter, r *http.Request) { assert.Equal(t, "DELETE", r.Method) w.WriteHeader(200) @@ -117,7 +122,7 @@ func TestAccounts_Delete_200(t *testing.T) { } func TestAccounts_Delete_Err(t *testing.T) { - withMockClient(func(c *Client, mux *http.ServeMux) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { mux.HandleFunc("/api/accounts/Test", func(w http.ResponseWriter, r *http.Request) { retBytes, _ := json.Marshal(util.ErrorResponse{Message: "Not found", Code: 404}) w.WriteHeader(404) @@ -131,7 +136,7 @@ func TestAccounts_Delete_Err(t *testing.T) { } func TestAccounts_Integration_List(t *testing.T) { - withBlackBoxServer(t, func(c *Client) { + withBlackBoxServer(t, func(c *rest.Client) { accounts, err := c.Accounts.List(context.Background()) require.NoError(t, err) assert.Len(t, accounts, 1) @@ -141,7 +146,7 @@ func TestAccounts_Integration_List(t *testing.T) { } func TestAccounts_Integration_Update(t *testing.T) { - withBlackBoxServer(t, func(c *Client) { + withBlackBoxServer(t, func(c *rest.Client) { accounts, err := c.Accounts.List(context.Background()) require.NoError(t, err) assert.Len(t, accounts, 1) @@ -157,7 +162,7 @@ func TestAccounts_Integration_Update(t *testing.T) { // Account deletion on MySQL and PostgreSQL databases causes unknown errors // func TestAccounts_Integration_Delete(t *testing.T) { -// withBlackBoxServer(t, func(c *Client) { +// withBlackBoxServer(t, func(c *rest.Client) { // accounts, err := c.Accounts.List(context.Background()) // require.NoError(t, err) // assert.Len(t, accounts, 1) diff --git a/management/client/rest/client_test.go b/management/client/rest/client_test.go index a42b12fa3..70e6c73e1 100644 --- a/management/client/rest/client_test.go +++ b/management/client/rest/client_test.go @@ -1,18 +1,22 @@ -package rest +//go:build integration +// +build integration + +package rest_test import ( "net/http" "net/http/httptest" "testing" + "github.com/netbirdio/netbird/management/client/rest" "github.com/netbirdio/netbird/management/server/http/testing/testing_tools" ) -func withMockClient(callback func(*Client, *http.ServeMux)) { +func withMockClient(callback func(*rest.Client, *http.ServeMux)) { mux := &http.ServeMux{} server := httptest.NewServer(mux) defer server.Close() - c := New(server.URL, "ABC") + c := rest.New(server.URL, "ABC") callback(c, mux) } @@ -20,11 +24,11 @@ func ptr[T any, PT *T](x T) PT { return &x } -func withBlackBoxServer(t *testing.T, callback func(*Client)) { +func withBlackBoxServer(t *testing.T, callback func(*rest.Client)) { t.Helper() handler, _, _ := testing_tools.BuildApiBlackBoxWithDBState(t, "../../server/testdata/store.sql", nil, false) server := httptest.NewServer(handler) defer server.Close() - c := New(server.URL, "nbp_apTmlmUXHSC4PKmHwtIZNaGr8eqcVI2gMURp") + c := rest.New(server.URL, "nbp_apTmlmUXHSC4PKmHwtIZNaGr8eqcVI2gMURp") callback(c) } diff --git a/management/client/rest/dns_test.go b/management/client/rest/dns_test.go index d2c00549c..b2e0a0bee 100644 --- a/management/client/rest/dns_test.go +++ b/management/client/rest/dns_test.go @@ -1,4 +1,7 @@ -package rest +//go:build integration +// +build integration + +package rest_test import ( "context" @@ -7,10 +10,12 @@ import ( "net/http" "testing" - "github.com/netbirdio/netbird/management/server/http/api" - "github.com/netbirdio/netbird/management/server/http/util" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + + "github.com/netbirdio/netbird/management/client/rest" + "github.com/netbirdio/netbird/management/server/http/api" + "github.com/netbirdio/netbird/management/server/http/util" ) var ( @@ -25,7 +30,7 @@ var ( ) func TestDNSNameserverGroup_List_200(t *testing.T) { - withMockClient(func(c *Client, mux *http.ServeMux) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { mux.HandleFunc("/api/dns/nameservers", func(w http.ResponseWriter, r *http.Request) { retBytes, _ := json.Marshal([]api.NameserverGroup{testNameserverGroup}) _, err := w.Write(retBytes) @@ -39,7 +44,7 @@ func TestDNSNameserverGroup_List_200(t *testing.T) { } func TestDNSNameserverGroup_List_Err(t *testing.T) { - withMockClient(func(c *Client, mux *http.ServeMux) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { mux.HandleFunc("/api/dns/nameservers", func(w http.ResponseWriter, r *http.Request) { retBytes, _ := json.Marshal(util.ErrorResponse{Message: "No", Code: 400}) w.WriteHeader(400) @@ -54,7 +59,7 @@ func TestDNSNameserverGroup_List_Err(t *testing.T) { } func TestDNSNameserverGroup_Get_200(t *testing.T) { - withMockClient(func(c *Client, mux *http.ServeMux) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { mux.HandleFunc("/api/dns/nameservers/Test", func(w http.ResponseWriter, r *http.Request) { retBytes, _ := json.Marshal(testNameserverGroup) _, err := w.Write(retBytes) @@ -67,7 +72,7 @@ func TestDNSNameserverGroup_Get_200(t *testing.T) { } func TestDNSNameserverGroup_Get_Err(t *testing.T) { - withMockClient(func(c *Client, mux *http.ServeMux) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { mux.HandleFunc("/api/dns/nameservers/Test", func(w http.ResponseWriter, r *http.Request) { retBytes, _ := json.Marshal(util.ErrorResponse{Message: "No", Code: 400}) w.WriteHeader(400) @@ -82,7 +87,7 @@ func TestDNSNameserverGroup_Get_Err(t *testing.T) { } func TestDNSNameserverGroup_Create_200(t *testing.T) { - withMockClient(func(c *Client, mux *http.ServeMux) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { mux.HandleFunc("/api/dns/nameservers", func(w http.ResponseWriter, r *http.Request) { assert.Equal(t, "POST", r.Method) reqBytes, err := io.ReadAll(r.Body) @@ -104,7 +109,7 @@ func TestDNSNameserverGroup_Create_200(t *testing.T) { } func TestDNSNameserverGroup_Create_Err(t *testing.T) { - withMockClient(func(c *Client, mux *http.ServeMux) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { mux.HandleFunc("/api/dns/nameservers", func(w http.ResponseWriter, r *http.Request) { retBytes, _ := json.Marshal(util.ErrorResponse{Message: "No", Code: 400}) w.WriteHeader(400) @@ -121,7 +126,7 @@ func TestDNSNameserverGroup_Create_Err(t *testing.T) { } func TestDNSNameserverGroup_Update_200(t *testing.T) { - withMockClient(func(c *Client, mux *http.ServeMux) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { mux.HandleFunc("/api/dns/nameservers/Test", func(w http.ResponseWriter, r *http.Request) { assert.Equal(t, "PUT", r.Method) reqBytes, err := io.ReadAll(r.Body) @@ -143,7 +148,7 @@ func TestDNSNameserverGroup_Update_200(t *testing.T) { } func TestDNSNameserverGroup_Update_Err(t *testing.T) { - withMockClient(func(c *Client, mux *http.ServeMux) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { mux.HandleFunc("/api/dns/nameservers/Test", func(w http.ResponseWriter, r *http.Request) { retBytes, _ := json.Marshal(util.ErrorResponse{Message: "No", Code: 400}) w.WriteHeader(400) @@ -160,7 +165,7 @@ func TestDNSNameserverGroup_Update_Err(t *testing.T) { } func TestDNSNameserverGroup_Delete_200(t *testing.T) { - withMockClient(func(c *Client, mux *http.ServeMux) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { mux.HandleFunc("/api/dns/nameservers/Test", func(w http.ResponseWriter, r *http.Request) { assert.Equal(t, "DELETE", r.Method) w.WriteHeader(200) @@ -171,7 +176,7 @@ func TestDNSNameserverGroup_Delete_200(t *testing.T) { } func TestDNSNameserverGroup_Delete_Err(t *testing.T) { - withMockClient(func(c *Client, mux *http.ServeMux) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { mux.HandleFunc("/api/dns/nameservers/Test", func(w http.ResponseWriter, r *http.Request) { retBytes, _ := json.Marshal(util.ErrorResponse{Message: "Not found", Code: 404}) w.WriteHeader(404) @@ -185,7 +190,7 @@ func TestDNSNameserverGroup_Delete_Err(t *testing.T) { } func TestDNSSettings_Get_200(t *testing.T) { - withMockClient(func(c *Client, mux *http.ServeMux) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { mux.HandleFunc("/api/dns/settings", func(w http.ResponseWriter, r *http.Request) { retBytes, _ := json.Marshal(testSettings) _, err := w.Write(retBytes) @@ -198,7 +203,7 @@ func TestDNSSettings_Get_200(t *testing.T) { } func TestDNSSettings_Get_Err(t *testing.T) { - withMockClient(func(c *Client, mux *http.ServeMux) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { mux.HandleFunc("/api/dns/settings", func(w http.ResponseWriter, r *http.Request) { retBytes, _ := json.Marshal(util.ErrorResponse{Message: "No", Code: 400}) w.WriteHeader(400) @@ -213,7 +218,7 @@ func TestDNSSettings_Get_Err(t *testing.T) { } func TestDNSSettings_Update_200(t *testing.T) { - withMockClient(func(c *Client, mux *http.ServeMux) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { mux.HandleFunc("/api/dns/settings", func(w http.ResponseWriter, r *http.Request) { assert.Equal(t, "PUT", r.Method) reqBytes, err := io.ReadAll(r.Body) @@ -235,7 +240,7 @@ func TestDNSSettings_Update_200(t *testing.T) { } func TestDNSSettings_Update_Err(t *testing.T) { - withMockClient(func(c *Client, mux *http.ServeMux) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { mux.HandleFunc("/api/dns/settings", func(w http.ResponseWriter, r *http.Request) { retBytes, _ := json.Marshal(util.ErrorResponse{Message: "No", Code: 400}) w.WriteHeader(400) @@ -255,6 +260,7 @@ func TestDNS_Integration(t *testing.T) { nsGroupReq := api.NameserverGroupRequest{ Description: "Test", Enabled: true, + Domains: []string{}, Groups: []string{"cs1tnh0hhcjnqoiuebeg"}, Name: "test", Nameservers: []api.Nameserver{ @@ -267,7 +273,7 @@ func TestDNS_Integration(t *testing.T) { Primary: true, SearchDomainsEnabled: false, } - withBlackBoxServer(t, func(c *Client) { + withBlackBoxServer(t, func(c *rest.Client) { // Create nsGroup, err := c.DNS.CreateNameserverGroup(context.Background(), nsGroupReq) require.NoError(t, err) diff --git a/management/client/rest/events_test.go b/management/client/rest/events_test.go index 515c227e6..2589193a2 100644 --- a/management/client/rest/events_test.go +++ b/management/client/rest/events_test.go @@ -1,4 +1,7 @@ -package rest +//go:build integration +// +build integration + +package rest_test import ( "context" @@ -6,10 +9,12 @@ import ( "net/http" "testing" - "github.com/netbirdio/netbird/management/server/http/api" - "github.com/netbirdio/netbird/management/server/http/util" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + + "github.com/netbirdio/netbird/management/client/rest" + "github.com/netbirdio/netbird/management/server/http/api" + "github.com/netbirdio/netbird/management/server/http/util" ) var ( @@ -20,7 +25,7 @@ var ( ) func TestEvents_List_200(t *testing.T) { - withMockClient(func(c *Client, mux *http.ServeMux) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { mux.HandleFunc("/api/events", func(w http.ResponseWriter, r *http.Request) { retBytes, _ := json.Marshal([]api.Event{testEvent}) _, err := w.Write(retBytes) @@ -34,7 +39,7 @@ func TestEvents_List_200(t *testing.T) { } func TestEvents_List_Err(t *testing.T) { - withMockClient(func(c *Client, mux *http.ServeMux) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { mux.HandleFunc("/api/events", func(w http.ResponseWriter, r *http.Request) { retBytes, _ := json.Marshal(util.ErrorResponse{Message: "No", Code: 400}) w.WriteHeader(400) @@ -49,7 +54,7 @@ func TestEvents_List_Err(t *testing.T) { } func TestEvents_Integration(t *testing.T) { - withBlackBoxServer(t, func(c *Client) { + withBlackBoxServer(t, func(c *rest.Client) { // Do something that would trigger any event _, err := c.SetupKeys.Create(context.Background(), api.CreateSetupKeyRequest{ Ephemeral: ptr(true), diff --git a/management/client/rest/geo_test.go b/management/client/rest/geo_test.go index dd42ecba8..d24405094 100644 --- a/management/client/rest/geo_test.go +++ b/management/client/rest/geo_test.go @@ -1,4 +1,7 @@ -package rest +//go:build integration +// +build integration + +package rest_test import ( "context" @@ -6,10 +9,12 @@ import ( "net/http" "testing" - "github.com/netbirdio/netbird/management/server/http/api" - "github.com/netbirdio/netbird/management/server/http/util" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + + "github.com/netbirdio/netbird/management/client/rest" + "github.com/netbirdio/netbird/management/server/http/api" + "github.com/netbirdio/netbird/management/server/http/util" ) var ( @@ -25,7 +30,7 @@ var ( ) func TestGeo_ListCountries_200(t *testing.T) { - withMockClient(func(c *Client, mux *http.ServeMux) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { mux.HandleFunc("/api/locations/countries", func(w http.ResponseWriter, r *http.Request) { retBytes, _ := json.Marshal([]api.Country{testCountry}) _, err := w.Write(retBytes) @@ -39,7 +44,7 @@ func TestGeo_ListCountries_200(t *testing.T) { } func TestGeo_ListCountries_Err(t *testing.T) { - withMockClient(func(c *Client, mux *http.ServeMux) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { mux.HandleFunc("/api/locations/countries", func(w http.ResponseWriter, r *http.Request) { retBytes, _ := json.Marshal(util.ErrorResponse{Message: "No", Code: 400}) w.WriteHeader(400) @@ -54,7 +59,7 @@ func TestGeo_ListCountries_Err(t *testing.T) { } func TestGeo_ListCountryCities_200(t *testing.T) { - withMockClient(func(c *Client, mux *http.ServeMux) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { mux.HandleFunc("/api/locations/countries/Test/cities", func(w http.ResponseWriter, r *http.Request) { retBytes, _ := json.Marshal([]api.City{testCity}) _, err := w.Write(retBytes) @@ -68,7 +73,7 @@ func TestGeo_ListCountryCities_200(t *testing.T) { } func TestGeo_ListCountryCities_Err(t *testing.T) { - withMockClient(func(c *Client, mux *http.ServeMux) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { mux.HandleFunc("/api/locations/countries/Test/cities", func(w http.ResponseWriter, r *http.Request) { retBytes, _ := json.Marshal(util.ErrorResponse{Message: "No", Code: 400}) w.WriteHeader(400) @@ -84,7 +89,7 @@ func TestGeo_ListCountryCities_Err(t *testing.T) { func TestGeo_Integration(t *testing.T) { // Blackbox is initialized with empty GeoLocations - withBlackBoxServer(t, func(c *Client) { + withBlackBoxServer(t, func(c *rest.Client) { countries, err := c.GeoLocation.ListCountries(context.Background()) require.NoError(t, err) assert.Empty(t, countries) diff --git a/management/client/rest/groups_test.go b/management/client/rest/groups_test.go index ac534437d..d6a5410e0 100644 --- a/management/client/rest/groups_test.go +++ b/management/client/rest/groups_test.go @@ -1,4 +1,7 @@ -package rest +//go:build integration +// +build integration + +package rest_test import ( "context" @@ -7,10 +10,12 @@ import ( "net/http" "testing" - "github.com/netbirdio/netbird/management/server/http/api" - "github.com/netbirdio/netbird/management/server/http/util" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + + "github.com/netbirdio/netbird/management/client/rest" + "github.com/netbirdio/netbird/management/server/http/api" + "github.com/netbirdio/netbird/management/server/http/util" ) var ( @@ -22,7 +27,7 @@ var ( ) func TestGroups_List_200(t *testing.T) { - withMockClient(func(c *Client, mux *http.ServeMux) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { mux.HandleFunc("/api/groups", func(w http.ResponseWriter, r *http.Request) { retBytes, _ := json.Marshal([]api.Group{testGroup}) _, err := w.Write(retBytes) @@ -36,7 +41,7 @@ func TestGroups_List_200(t *testing.T) { } func TestGroups_List_Err(t *testing.T) { - withMockClient(func(c *Client, mux *http.ServeMux) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { mux.HandleFunc("/api/groups", func(w http.ResponseWriter, r *http.Request) { retBytes, _ := json.Marshal(util.ErrorResponse{Message: "No", Code: 400}) w.WriteHeader(400) @@ -51,7 +56,7 @@ func TestGroups_List_Err(t *testing.T) { } func TestGroups_Get_200(t *testing.T) { - withMockClient(func(c *Client, mux *http.ServeMux) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { mux.HandleFunc("/api/groups/Test", func(w http.ResponseWriter, r *http.Request) { retBytes, _ := json.Marshal(testGroup) _, err := w.Write(retBytes) @@ -64,7 +69,7 @@ func TestGroups_Get_200(t *testing.T) { } func TestGroups_Get_Err(t *testing.T) { - withMockClient(func(c *Client, mux *http.ServeMux) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { mux.HandleFunc("/api/groups/Test", func(w http.ResponseWriter, r *http.Request) { retBytes, _ := json.Marshal(util.ErrorResponse{Message: "No", Code: 400}) w.WriteHeader(400) @@ -79,7 +84,7 @@ func TestGroups_Get_Err(t *testing.T) { } func TestGroups_Create_200(t *testing.T) { - withMockClient(func(c *Client, mux *http.ServeMux) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { mux.HandleFunc("/api/groups", func(w http.ResponseWriter, r *http.Request) { assert.Equal(t, "POST", r.Method) reqBytes, err := io.ReadAll(r.Body) @@ -101,7 +106,7 @@ func TestGroups_Create_200(t *testing.T) { } func TestGroups_Create_Err(t *testing.T) { - withMockClient(func(c *Client, mux *http.ServeMux) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { mux.HandleFunc("/api/groups", func(w http.ResponseWriter, r *http.Request) { retBytes, _ := json.Marshal(util.ErrorResponse{Message: "No", Code: 400}) w.WriteHeader(400) @@ -118,7 +123,7 @@ func TestGroups_Create_Err(t *testing.T) { } func TestGroups_Update_200(t *testing.T) { - withMockClient(func(c *Client, mux *http.ServeMux) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { mux.HandleFunc("/api/groups/Test", func(w http.ResponseWriter, r *http.Request) { assert.Equal(t, "PUT", r.Method) reqBytes, err := io.ReadAll(r.Body) @@ -140,7 +145,7 @@ func TestGroups_Update_200(t *testing.T) { } func TestGroups_Update_Err(t *testing.T) { - withMockClient(func(c *Client, mux *http.ServeMux) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { mux.HandleFunc("/api/groups/Test", func(w http.ResponseWriter, r *http.Request) { retBytes, _ := json.Marshal(util.ErrorResponse{Message: "No", Code: 400}) w.WriteHeader(400) @@ -157,7 +162,7 @@ func TestGroups_Update_Err(t *testing.T) { } func TestGroups_Delete_200(t *testing.T) { - withMockClient(func(c *Client, mux *http.ServeMux) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { mux.HandleFunc("/api/groups/Test", func(w http.ResponseWriter, r *http.Request) { assert.Equal(t, "DELETE", r.Method) w.WriteHeader(200) @@ -168,7 +173,7 @@ func TestGroups_Delete_200(t *testing.T) { } func TestGroups_Delete_Err(t *testing.T) { - withMockClient(func(c *Client, mux *http.ServeMux) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { mux.HandleFunc("/api/groups/Test", func(w http.ResponseWriter, r *http.Request) { retBytes, _ := json.Marshal(util.ErrorResponse{Message: "Not found", Code: 404}) w.WriteHeader(404) @@ -182,7 +187,7 @@ func TestGroups_Delete_Err(t *testing.T) { } func TestGroups_Integration(t *testing.T) { - withBlackBoxServer(t, func(c *Client) { + withBlackBoxServer(t, func(c *rest.Client) { groups, err := c.Groups.List(context.Background()) require.NoError(t, err) assert.Len(t, groups, 1) diff --git a/management/client/rest/networks_test.go b/management/client/rest/networks_test.go index 934c55380..0772d7540 100644 --- a/management/client/rest/networks_test.go +++ b/management/client/rest/networks_test.go @@ -1,4 +1,7 @@ -package rest +//go:build integration +// +build integration + +package rest_test import ( "context" @@ -7,10 +10,12 @@ import ( "net/http" "testing" - "github.com/netbirdio/netbird/management/server/http/api" - "github.com/netbirdio/netbird/management/server/http/util" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + + "github.com/netbirdio/netbird/management/client/rest" + "github.com/netbirdio/netbird/management/server/http/api" + "github.com/netbirdio/netbird/management/server/http/util" ) var ( @@ -30,7 +35,7 @@ var ( ) func TestNetworks_List_200(t *testing.T) { - withMockClient(func(c *Client, mux *http.ServeMux) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { mux.HandleFunc("/api/networks", func(w http.ResponseWriter, r *http.Request) { retBytes, _ := json.Marshal([]api.Network{testNetwork}) _, err := w.Write(retBytes) @@ -44,7 +49,7 @@ func TestNetworks_List_200(t *testing.T) { } func TestNetworks_List_Err(t *testing.T) { - withMockClient(func(c *Client, mux *http.ServeMux) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { mux.HandleFunc("/api/networks", func(w http.ResponseWriter, r *http.Request) { retBytes, _ := json.Marshal(util.ErrorResponse{Message: "No", Code: 400}) w.WriteHeader(400) @@ -59,7 +64,7 @@ func TestNetworks_List_Err(t *testing.T) { } func TestNetworks_Get_200(t *testing.T) { - withMockClient(func(c *Client, mux *http.ServeMux) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { mux.HandleFunc("/api/networks/Test", func(w http.ResponseWriter, r *http.Request) { retBytes, _ := json.Marshal(testNetwork) _, err := w.Write(retBytes) @@ -72,7 +77,7 @@ func TestNetworks_Get_200(t *testing.T) { } func TestNetworks_Get_Err(t *testing.T) { - withMockClient(func(c *Client, mux *http.ServeMux) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { mux.HandleFunc("/api/networks/Test", func(w http.ResponseWriter, r *http.Request) { retBytes, _ := json.Marshal(util.ErrorResponse{Message: "No", Code: 400}) w.WriteHeader(400) @@ -87,7 +92,7 @@ func TestNetworks_Get_Err(t *testing.T) { } func TestNetworks_Create_200(t *testing.T) { - withMockClient(func(c *Client, mux *http.ServeMux) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { mux.HandleFunc("/api/networks", func(w http.ResponseWriter, r *http.Request) { assert.Equal(t, "POST", r.Method) reqBytes, err := io.ReadAll(r.Body) @@ -109,7 +114,7 @@ func TestNetworks_Create_200(t *testing.T) { } func TestNetworks_Create_Err(t *testing.T) { - withMockClient(func(c *Client, mux *http.ServeMux) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { mux.HandleFunc("/api/networks", func(w http.ResponseWriter, r *http.Request) { retBytes, _ := json.Marshal(util.ErrorResponse{Message: "No", Code: 400}) w.WriteHeader(400) @@ -126,7 +131,7 @@ func TestNetworks_Create_Err(t *testing.T) { } func TestNetworks_Update_200(t *testing.T) { - withMockClient(func(c *Client, mux *http.ServeMux) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { mux.HandleFunc("/api/networks/Test", func(w http.ResponseWriter, r *http.Request) { assert.Equal(t, "PUT", r.Method) reqBytes, err := io.ReadAll(r.Body) @@ -148,7 +153,7 @@ func TestNetworks_Update_200(t *testing.T) { } func TestNetworks_Update_Err(t *testing.T) { - withMockClient(func(c *Client, mux *http.ServeMux) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { mux.HandleFunc("/api/networks/Test", func(w http.ResponseWriter, r *http.Request) { retBytes, _ := json.Marshal(util.ErrorResponse{Message: "No", Code: 400}) w.WriteHeader(400) @@ -165,7 +170,7 @@ func TestNetworks_Update_Err(t *testing.T) { } func TestNetworks_Delete_200(t *testing.T) { - withMockClient(func(c *Client, mux *http.ServeMux) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { mux.HandleFunc("/api/networks/Test", func(w http.ResponseWriter, r *http.Request) { assert.Equal(t, "DELETE", r.Method) w.WriteHeader(200) @@ -176,7 +181,7 @@ func TestNetworks_Delete_200(t *testing.T) { } func TestNetworks_Delete_Err(t *testing.T) { - withMockClient(func(c *Client, mux *http.ServeMux) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { mux.HandleFunc("/api/networks/Test", func(w http.ResponseWriter, r *http.Request) { retBytes, _ := json.Marshal(util.ErrorResponse{Message: "Not found", Code: 404}) w.WriteHeader(404) @@ -190,7 +195,7 @@ func TestNetworks_Delete_Err(t *testing.T) { } func TestNetworks_Integration(t *testing.T) { - withBlackBoxServer(t, func(c *Client) { + withBlackBoxServer(t, func(c *rest.Client) { network, err := c.Networks.Create(context.Background(), api.NetworkRequest{ Description: ptr("TestNetwork"), Name: "Test", @@ -216,7 +221,7 @@ func TestNetworks_Integration(t *testing.T) { } func TestNetworkResources_List_200(t *testing.T) { - withMockClient(func(c *Client, mux *http.ServeMux) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { mux.HandleFunc("/api/networks/Meow/resources", func(w http.ResponseWriter, r *http.Request) { retBytes, _ := json.Marshal([]api.NetworkResource{testNetworkResource}) _, err := w.Write(retBytes) @@ -230,7 +235,7 @@ func TestNetworkResources_List_200(t *testing.T) { } func TestNetworkResources_List_Err(t *testing.T) { - withMockClient(func(c *Client, mux *http.ServeMux) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { mux.HandleFunc("/api/networks/Meow/resources", func(w http.ResponseWriter, r *http.Request) { retBytes, _ := json.Marshal(util.ErrorResponse{Message: "No", Code: 400}) w.WriteHeader(400) @@ -245,7 +250,7 @@ func TestNetworkResources_List_Err(t *testing.T) { } func TestNetworkResources_Get_200(t *testing.T) { - withMockClient(func(c *Client, mux *http.ServeMux) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { mux.HandleFunc("/api/networks/Meow/resources/Test", func(w http.ResponseWriter, r *http.Request) { retBytes, _ := json.Marshal(testNetworkResource) _, err := w.Write(retBytes) @@ -258,7 +263,7 @@ func TestNetworkResources_Get_200(t *testing.T) { } func TestNetworkResources_Get_Err(t *testing.T) { - withMockClient(func(c *Client, mux *http.ServeMux) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { mux.HandleFunc("/api/networks/Meow/resources/Test", func(w http.ResponseWriter, r *http.Request) { retBytes, _ := json.Marshal(util.ErrorResponse{Message: "No", Code: 400}) w.WriteHeader(400) @@ -273,7 +278,7 @@ func TestNetworkResources_Get_Err(t *testing.T) { } func TestNetworkResources_Create_200(t *testing.T) { - withMockClient(func(c *Client, mux *http.ServeMux) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { mux.HandleFunc("/api/networks/Meow/resources", func(w http.ResponseWriter, r *http.Request) { assert.Equal(t, "POST", r.Method) reqBytes, err := io.ReadAll(r.Body) @@ -295,7 +300,7 @@ func TestNetworkResources_Create_200(t *testing.T) { } func TestNetworkResources_Create_Err(t *testing.T) { - withMockClient(func(c *Client, mux *http.ServeMux) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { mux.HandleFunc("/api/networks/Meow/resources", func(w http.ResponseWriter, r *http.Request) { retBytes, _ := json.Marshal(util.ErrorResponse{Message: "No", Code: 400}) w.WriteHeader(400) @@ -312,7 +317,7 @@ func TestNetworkResources_Create_Err(t *testing.T) { } func TestNetworkResources_Update_200(t *testing.T) { - withMockClient(func(c *Client, mux *http.ServeMux) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { mux.HandleFunc("/api/networks/Meow/resources/Test", func(w http.ResponseWriter, r *http.Request) { assert.Equal(t, "PUT", r.Method) reqBytes, err := io.ReadAll(r.Body) @@ -334,7 +339,7 @@ func TestNetworkResources_Update_200(t *testing.T) { } func TestNetworkResources_Update_Err(t *testing.T) { - withMockClient(func(c *Client, mux *http.ServeMux) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { mux.HandleFunc("/api/networks/Meow/resources/Test", func(w http.ResponseWriter, r *http.Request) { retBytes, _ := json.Marshal(util.ErrorResponse{Message: "No", Code: 400}) w.WriteHeader(400) @@ -351,7 +356,7 @@ func TestNetworkResources_Update_Err(t *testing.T) { } func TestNetworkResources_Delete_200(t *testing.T) { - withMockClient(func(c *Client, mux *http.ServeMux) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { mux.HandleFunc("/api/networks/Meow/resources/Test", func(w http.ResponseWriter, r *http.Request) { assert.Equal(t, "DELETE", r.Method) w.WriteHeader(200) @@ -362,7 +367,7 @@ func TestNetworkResources_Delete_200(t *testing.T) { } func TestNetworkResources_Delete_Err(t *testing.T) { - withMockClient(func(c *Client, mux *http.ServeMux) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { mux.HandleFunc("/api/networks/Meow/resources/Test", func(w http.ResponseWriter, r *http.Request) { retBytes, _ := json.Marshal(util.ErrorResponse{Message: "Not found", Code: 404}) w.WriteHeader(404) @@ -376,7 +381,7 @@ func TestNetworkResources_Delete_Err(t *testing.T) { } func TestNetworkResources_Integration(t *testing.T) { - withBlackBoxServer(t, func(c *Client) { + withBlackBoxServer(t, func(c *rest.Client) { _, err := c.Networks.Resources("TestNetwork").Create(context.Background(), api.NetworkResourceRequest{ Address: "test.com", Description: ptr("Description"), @@ -403,7 +408,7 @@ func TestNetworkResources_Integration(t *testing.T) { } func TestNetworkRouters_List_200(t *testing.T) { - withMockClient(func(c *Client, mux *http.ServeMux) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { mux.HandleFunc("/api/networks/Meow/routers", func(w http.ResponseWriter, r *http.Request) { retBytes, _ := json.Marshal([]api.NetworkRouter{testNetworkRouter}) _, err := w.Write(retBytes) @@ -417,7 +422,7 @@ func TestNetworkRouters_List_200(t *testing.T) { } func TestNetworkRouters_List_Err(t *testing.T) { - withMockClient(func(c *Client, mux *http.ServeMux) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { mux.HandleFunc("/api/networks/Meow/routers", func(w http.ResponseWriter, r *http.Request) { retBytes, _ := json.Marshal(util.ErrorResponse{Message: "No", Code: 400}) w.WriteHeader(400) @@ -432,7 +437,7 @@ func TestNetworkRouters_List_Err(t *testing.T) { } func TestNetworkRouters_Get_200(t *testing.T) { - withMockClient(func(c *Client, mux *http.ServeMux) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { mux.HandleFunc("/api/networks/Meow/routers/Test", func(w http.ResponseWriter, r *http.Request) { retBytes, _ := json.Marshal(testNetworkRouter) _, err := w.Write(retBytes) @@ -445,7 +450,7 @@ func TestNetworkRouters_Get_200(t *testing.T) { } func TestNetworkRouters_Get_Err(t *testing.T) { - withMockClient(func(c *Client, mux *http.ServeMux) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { mux.HandleFunc("/api/networks/Meow/routers/Test", func(w http.ResponseWriter, r *http.Request) { retBytes, _ := json.Marshal(util.ErrorResponse{Message: "No", Code: 400}) w.WriteHeader(400) @@ -460,7 +465,7 @@ func TestNetworkRouters_Get_Err(t *testing.T) { } func TestNetworkRouters_Create_200(t *testing.T) { - withMockClient(func(c *Client, mux *http.ServeMux) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { mux.HandleFunc("/api/networks/Meow/routers", func(w http.ResponseWriter, r *http.Request) { assert.Equal(t, "POST", r.Method) reqBytes, err := io.ReadAll(r.Body) @@ -482,7 +487,7 @@ func TestNetworkRouters_Create_200(t *testing.T) { } func TestNetworkRouters_Create_Err(t *testing.T) { - withMockClient(func(c *Client, mux *http.ServeMux) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { mux.HandleFunc("/api/networks/Meow/routers", func(w http.ResponseWriter, r *http.Request) { retBytes, _ := json.Marshal(util.ErrorResponse{Message: "No", Code: 400}) w.WriteHeader(400) @@ -499,7 +504,7 @@ func TestNetworkRouters_Create_Err(t *testing.T) { } func TestNetworkRouters_Update_200(t *testing.T) { - withMockClient(func(c *Client, mux *http.ServeMux) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { mux.HandleFunc("/api/networks/Meow/routers/Test", func(w http.ResponseWriter, r *http.Request) { assert.Equal(t, "PUT", r.Method) reqBytes, err := io.ReadAll(r.Body) @@ -521,7 +526,7 @@ func TestNetworkRouters_Update_200(t *testing.T) { } func TestNetworkRouters_Update_Err(t *testing.T) { - withMockClient(func(c *Client, mux *http.ServeMux) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { mux.HandleFunc("/api/networks/Meow/routers/Test", func(w http.ResponseWriter, r *http.Request) { retBytes, _ := json.Marshal(util.ErrorResponse{Message: "No", Code: 400}) w.WriteHeader(400) @@ -538,7 +543,7 @@ func TestNetworkRouters_Update_Err(t *testing.T) { } func TestNetworkRouters_Delete_200(t *testing.T) { - withMockClient(func(c *Client, mux *http.ServeMux) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { mux.HandleFunc("/api/networks/Meow/routers/Test", func(w http.ResponseWriter, r *http.Request) { assert.Equal(t, "DELETE", r.Method) w.WriteHeader(200) @@ -549,7 +554,7 @@ func TestNetworkRouters_Delete_200(t *testing.T) { } func TestNetworkRouters_Delete_Err(t *testing.T) { - withMockClient(func(c *Client, mux *http.ServeMux) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { mux.HandleFunc("/api/networks/Meow/routers/Test", func(w http.ResponseWriter, r *http.Request) { retBytes, _ := json.Marshal(util.ErrorResponse{Message: "Not found", Code: 404}) w.WriteHeader(404) @@ -563,7 +568,7 @@ func TestNetworkRouters_Delete_Err(t *testing.T) { } func TestNetworkRouters_Integration(t *testing.T) { - withBlackBoxServer(t, func(c *Client) { + withBlackBoxServer(t, func(c *rest.Client) { _, err := c.Networks.Routers("TestNetwork").Create(context.Background(), api.NetworkRouterRequest{ Enabled: false, Masquerade: false, diff --git a/management/client/rest/peers_test.go b/management/client/rest/peers_test.go index 216ee990c..4c5cd1e60 100644 --- a/management/client/rest/peers_test.go +++ b/management/client/rest/peers_test.go @@ -1,4 +1,7 @@ -package rest +//go:build integration +// +build integration + +package rest_test import ( "context" @@ -7,10 +10,12 @@ import ( "net/http" "testing" - "github.com/netbirdio/netbird/management/server/http/api" - "github.com/netbirdio/netbird/management/server/http/util" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + + "github.com/netbirdio/netbird/management/client/rest" + "github.com/netbirdio/netbird/management/server/http/api" + "github.com/netbirdio/netbird/management/server/http/util" ) var ( @@ -24,7 +29,7 @@ var ( ) func TestPeers_List_200(t *testing.T) { - withMockClient(func(c *Client, mux *http.ServeMux) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { mux.HandleFunc("/api/peers", func(w http.ResponseWriter, r *http.Request) { retBytes, _ := json.Marshal([]api.Peer{testPeer}) _, err := w.Write(retBytes) @@ -38,7 +43,7 @@ func TestPeers_List_200(t *testing.T) { } func TestPeers_List_Err(t *testing.T) { - withMockClient(func(c *Client, mux *http.ServeMux) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { mux.HandleFunc("/api/peers", func(w http.ResponseWriter, r *http.Request) { retBytes, _ := json.Marshal(util.ErrorResponse{Message: "No", Code: 400}) w.WriteHeader(400) @@ -53,7 +58,7 @@ func TestPeers_List_Err(t *testing.T) { } func TestPeers_Get_200(t *testing.T) { - withMockClient(func(c *Client, mux *http.ServeMux) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { mux.HandleFunc("/api/peers/Test", func(w http.ResponseWriter, r *http.Request) { retBytes, _ := json.Marshal(testPeer) _, err := w.Write(retBytes) @@ -66,7 +71,7 @@ func TestPeers_Get_200(t *testing.T) { } func TestPeers_Get_Err(t *testing.T) { - withMockClient(func(c *Client, mux *http.ServeMux) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { mux.HandleFunc("/api/peers/Test", func(w http.ResponseWriter, r *http.Request) { retBytes, _ := json.Marshal(util.ErrorResponse{Message: "No", Code: 400}) w.WriteHeader(400) @@ -81,7 +86,7 @@ func TestPeers_Get_Err(t *testing.T) { } func TestPeers_Update_200(t *testing.T) { - withMockClient(func(c *Client, mux *http.ServeMux) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { mux.HandleFunc("/api/peers/Test", func(w http.ResponseWriter, r *http.Request) { assert.Equal(t, "PUT", r.Method) reqBytes, err := io.ReadAll(r.Body) @@ -103,7 +108,7 @@ func TestPeers_Update_200(t *testing.T) { } func TestPeers_Update_Err(t *testing.T) { - withMockClient(func(c *Client, mux *http.ServeMux) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { mux.HandleFunc("/api/peers/Test", func(w http.ResponseWriter, r *http.Request) { retBytes, _ := json.Marshal(util.ErrorResponse{Message: "No", Code: 400}) w.WriteHeader(400) @@ -120,7 +125,7 @@ func TestPeers_Update_Err(t *testing.T) { } func TestPeers_Delete_200(t *testing.T) { - withMockClient(func(c *Client, mux *http.ServeMux) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { mux.HandleFunc("/api/peers/Test", func(w http.ResponseWriter, r *http.Request) { assert.Equal(t, "DELETE", r.Method) w.WriteHeader(200) @@ -131,7 +136,7 @@ func TestPeers_Delete_200(t *testing.T) { } func TestPeers_Delete_Err(t *testing.T) { - withMockClient(func(c *Client, mux *http.ServeMux) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { mux.HandleFunc("/api/peers/Test", func(w http.ResponseWriter, r *http.Request) { retBytes, _ := json.Marshal(util.ErrorResponse{Message: "Not found", Code: 404}) w.WriteHeader(404) @@ -145,7 +150,7 @@ func TestPeers_Delete_Err(t *testing.T) { } func TestPeers_ListAccessiblePeers_200(t *testing.T) { - withMockClient(func(c *Client, mux *http.ServeMux) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { mux.HandleFunc("/api/peers/Test/accessible-peers", func(w http.ResponseWriter, r *http.Request) { retBytes, _ := json.Marshal([]api.Peer{testPeer}) _, err := w.Write(retBytes) @@ -159,7 +164,7 @@ func TestPeers_ListAccessiblePeers_200(t *testing.T) { } func TestPeers_ListAccessiblePeers_Err(t *testing.T) { - withMockClient(func(c *Client, mux *http.ServeMux) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { mux.HandleFunc("/api/peers/Test/accessible-peers", func(w http.ResponseWriter, r *http.Request) { retBytes, _ := json.Marshal(util.ErrorResponse{Message: "No", Code: 400}) w.WriteHeader(400) @@ -174,7 +179,7 @@ func TestPeers_ListAccessiblePeers_Err(t *testing.T) { } func TestPeers_Integration(t *testing.T) { - withBlackBoxServer(t, func(c *Client) { + withBlackBoxServer(t, func(c *rest.Client) { peers, err := c.Peers.List(context.Background()) require.NoError(t, err) require.NotEmpty(t, peers) diff --git a/management/client/rest/policies_test.go b/management/client/rest/policies_test.go index f7fc6ff10..5792048df 100644 --- a/management/client/rest/policies_test.go +++ b/management/client/rest/policies_test.go @@ -1,4 +1,7 @@ -package rest +//go:build integration +// +build integration + +package rest_test import ( "context" @@ -7,10 +10,12 @@ import ( "net/http" "testing" - "github.com/netbirdio/netbird/management/server/http/api" - "github.com/netbirdio/netbird/management/server/http/util" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + + "github.com/netbirdio/netbird/management/client/rest" + "github.com/netbirdio/netbird/management/server/http/api" + "github.com/netbirdio/netbird/management/server/http/util" ) var ( @@ -22,7 +27,7 @@ var ( ) func TestPolicies_List_200(t *testing.T) { - withMockClient(func(c *Client, mux *http.ServeMux) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { mux.HandleFunc("/api/policies", func(w http.ResponseWriter, r *http.Request) { retBytes, _ := json.Marshal([]api.Policy{testPolicy}) _, err := w.Write(retBytes) @@ -36,7 +41,7 @@ func TestPolicies_List_200(t *testing.T) { } func TestPolicies_List_Err(t *testing.T) { - withMockClient(func(c *Client, mux *http.ServeMux) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { mux.HandleFunc("/api/policies", func(w http.ResponseWriter, r *http.Request) { retBytes, _ := json.Marshal(util.ErrorResponse{Message: "No", Code: 400}) w.WriteHeader(400) @@ -51,7 +56,7 @@ func TestPolicies_List_Err(t *testing.T) { } func TestPolicies_Get_200(t *testing.T) { - withMockClient(func(c *Client, mux *http.ServeMux) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { mux.HandleFunc("/api/policies/Test", func(w http.ResponseWriter, r *http.Request) { retBytes, _ := json.Marshal(testPolicy) _, err := w.Write(retBytes) @@ -64,7 +69,7 @@ func TestPolicies_Get_200(t *testing.T) { } func TestPolicies_Get_Err(t *testing.T) { - withMockClient(func(c *Client, mux *http.ServeMux) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { mux.HandleFunc("/api/policies/Test", func(w http.ResponseWriter, r *http.Request) { retBytes, _ := json.Marshal(util.ErrorResponse{Message: "No", Code: 400}) w.WriteHeader(400) @@ -79,7 +84,7 @@ func TestPolicies_Get_Err(t *testing.T) { } func TestPolicies_Create_200(t *testing.T) { - withMockClient(func(c *Client, mux *http.ServeMux) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { mux.HandleFunc("/api/policies", func(w http.ResponseWriter, r *http.Request) { assert.Equal(t, "POST", r.Method) reqBytes, err := io.ReadAll(r.Body) @@ -101,7 +106,7 @@ func TestPolicies_Create_200(t *testing.T) { } func TestPolicies_Create_Err(t *testing.T) { - withMockClient(func(c *Client, mux *http.ServeMux) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { mux.HandleFunc("/api/policies", func(w http.ResponseWriter, r *http.Request) { retBytes, _ := json.Marshal(util.ErrorResponse{Message: "No", Code: 400}) w.WriteHeader(400) @@ -118,7 +123,7 @@ func TestPolicies_Create_Err(t *testing.T) { } func TestPolicies_Update_200(t *testing.T) { - withMockClient(func(c *Client, mux *http.ServeMux) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { mux.HandleFunc("/api/policies/Test", func(w http.ResponseWriter, r *http.Request) { assert.Equal(t, "PUT", r.Method) reqBytes, err := io.ReadAll(r.Body) @@ -140,7 +145,7 @@ func TestPolicies_Update_200(t *testing.T) { } func TestPolicies_Update_Err(t *testing.T) { - withMockClient(func(c *Client, mux *http.ServeMux) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { mux.HandleFunc("/api/policies/Test", func(w http.ResponseWriter, r *http.Request) { retBytes, _ := json.Marshal(util.ErrorResponse{Message: "No", Code: 400}) w.WriteHeader(400) @@ -157,7 +162,7 @@ func TestPolicies_Update_Err(t *testing.T) { } func TestPolicies_Delete_200(t *testing.T) { - withMockClient(func(c *Client, mux *http.ServeMux) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { mux.HandleFunc("/api/policies/Test", func(w http.ResponseWriter, r *http.Request) { assert.Equal(t, "DELETE", r.Method) w.WriteHeader(200) @@ -168,7 +173,7 @@ func TestPolicies_Delete_200(t *testing.T) { } func TestPolicies_Delete_Err(t *testing.T) { - withMockClient(func(c *Client, mux *http.ServeMux) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { mux.HandleFunc("/api/policies/Test", func(w http.ResponseWriter, r *http.Request) { retBytes, _ := json.Marshal(util.ErrorResponse{Message: "Not found", Code: 404}) w.WriteHeader(404) @@ -182,7 +187,7 @@ func TestPolicies_Delete_Err(t *testing.T) { } func TestPolicies_Integration(t *testing.T) { - withBlackBoxServer(t, func(c *Client) { + withBlackBoxServer(t, func(c *rest.Client) { policies, err := c.Policies.List(context.Background()) require.NoError(t, err) require.NotEmpty(t, policies) diff --git a/management/client/rest/posturechecks_test.go b/management/client/rest/posturechecks_test.go index 6fefc0140..a891d6ac9 100644 --- a/management/client/rest/posturechecks_test.go +++ b/management/client/rest/posturechecks_test.go @@ -1,4 +1,7 @@ -package rest +//go:build integration +// +build integration + +package rest_test import ( "context" @@ -7,10 +10,12 @@ import ( "net/http" "testing" - "github.com/netbirdio/netbird/management/server/http/api" - "github.com/netbirdio/netbird/management/server/http/util" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + + "github.com/netbirdio/netbird/management/client/rest" + "github.com/netbirdio/netbird/management/server/http/api" + "github.com/netbirdio/netbird/management/server/http/util" ) var ( @@ -21,7 +26,7 @@ var ( ) func TestPostureChecks_List_200(t *testing.T) { - withMockClient(func(c *Client, mux *http.ServeMux) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { mux.HandleFunc("/api/posture-checks", func(w http.ResponseWriter, r *http.Request) { retBytes, _ := json.Marshal([]api.PostureCheck{testPostureCheck}) _, err := w.Write(retBytes) @@ -35,7 +40,7 @@ func TestPostureChecks_List_200(t *testing.T) { } func TestPostureChecks_List_Err(t *testing.T) { - withMockClient(func(c *Client, mux *http.ServeMux) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { mux.HandleFunc("/api/posture-checks", func(w http.ResponseWriter, r *http.Request) { retBytes, _ := json.Marshal(util.ErrorResponse{Message: "No", Code: 400}) w.WriteHeader(400) @@ -50,7 +55,7 @@ func TestPostureChecks_List_Err(t *testing.T) { } func TestPostureChecks_Get_200(t *testing.T) { - withMockClient(func(c *Client, mux *http.ServeMux) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { mux.HandleFunc("/api/posture-checks/Test", func(w http.ResponseWriter, r *http.Request) { retBytes, _ := json.Marshal(testPostureCheck) _, err := w.Write(retBytes) @@ -63,7 +68,7 @@ func TestPostureChecks_Get_200(t *testing.T) { } func TestPostureChecks_Get_Err(t *testing.T) { - withMockClient(func(c *Client, mux *http.ServeMux) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { mux.HandleFunc("/api/posture-checks/Test", func(w http.ResponseWriter, r *http.Request) { retBytes, _ := json.Marshal(util.ErrorResponse{Message: "No", Code: 400}) w.WriteHeader(400) @@ -78,7 +83,7 @@ func TestPostureChecks_Get_Err(t *testing.T) { } func TestPostureChecks_Create_200(t *testing.T) { - withMockClient(func(c *Client, mux *http.ServeMux) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { mux.HandleFunc("/api/posture-checks", func(w http.ResponseWriter, r *http.Request) { assert.Equal(t, "POST", r.Method) reqBytes, err := io.ReadAll(r.Body) @@ -100,7 +105,7 @@ func TestPostureChecks_Create_200(t *testing.T) { } func TestPostureChecks_Create_Err(t *testing.T) { - withMockClient(func(c *Client, mux *http.ServeMux) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { mux.HandleFunc("/api/posture-checks", func(w http.ResponseWriter, r *http.Request) { retBytes, _ := json.Marshal(util.ErrorResponse{Message: "No", Code: 400}) w.WriteHeader(400) @@ -117,7 +122,7 @@ func TestPostureChecks_Create_Err(t *testing.T) { } func TestPostureChecks_Update_200(t *testing.T) { - withMockClient(func(c *Client, mux *http.ServeMux) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { mux.HandleFunc("/api/posture-checks/Test", func(w http.ResponseWriter, r *http.Request) { assert.Equal(t, "PUT", r.Method) reqBytes, err := io.ReadAll(r.Body) @@ -139,7 +144,7 @@ func TestPostureChecks_Update_200(t *testing.T) { } func TestPostureChecks_Update_Err(t *testing.T) { - withMockClient(func(c *Client, mux *http.ServeMux) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { mux.HandleFunc("/api/posture-checks/Test", func(w http.ResponseWriter, r *http.Request) { retBytes, _ := json.Marshal(util.ErrorResponse{Message: "No", Code: 400}) w.WriteHeader(400) @@ -156,7 +161,7 @@ func TestPostureChecks_Update_Err(t *testing.T) { } func TestPostureChecks_Delete_200(t *testing.T) { - withMockClient(func(c *Client, mux *http.ServeMux) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { mux.HandleFunc("/api/posture-checks/Test", func(w http.ResponseWriter, r *http.Request) { assert.Equal(t, "DELETE", r.Method) w.WriteHeader(200) @@ -167,7 +172,7 @@ func TestPostureChecks_Delete_200(t *testing.T) { } func TestPostureChecks_Delete_Err(t *testing.T) { - withMockClient(func(c *Client, mux *http.ServeMux) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { mux.HandleFunc("/api/posture-checks/Test", func(w http.ResponseWriter, r *http.Request) { retBytes, _ := json.Marshal(util.ErrorResponse{Message: "Not found", Code: 404}) w.WriteHeader(404) @@ -181,7 +186,7 @@ func TestPostureChecks_Delete_Err(t *testing.T) { } func TestPostureChecks_Integration(t *testing.T) { - withBlackBoxServer(t, func(c *Client) { + withBlackBoxServer(t, func(c *rest.Client) { check, err := c.PostureChecks.Create(context.Background(), api.PostureCheckUpdate{ Name: "Test", Description: "Testing", diff --git a/management/client/rest/routes_test.go b/management/client/rest/routes_test.go index 123bd41d4..1c698a7fb 100644 --- a/management/client/rest/routes_test.go +++ b/management/client/rest/routes_test.go @@ -1,4 +1,7 @@ -package rest +//go:build integration +// +build integration + +package rest_test import ( "context" @@ -7,10 +10,12 @@ import ( "net/http" "testing" - "github.com/netbirdio/netbird/management/server/http/api" - "github.com/netbirdio/netbird/management/server/http/util" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + + "github.com/netbirdio/netbird/management/client/rest" + "github.com/netbirdio/netbird/management/server/http/api" + "github.com/netbirdio/netbird/management/server/http/util" ) var ( @@ -21,7 +26,7 @@ var ( ) func TestRoutes_List_200(t *testing.T) { - withMockClient(func(c *Client, mux *http.ServeMux) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { mux.HandleFunc("/api/routes", func(w http.ResponseWriter, r *http.Request) { retBytes, _ := json.Marshal([]api.Route{testRoute}) _, err := w.Write(retBytes) @@ -35,7 +40,7 @@ func TestRoutes_List_200(t *testing.T) { } func TestRoutes_List_Err(t *testing.T) { - withMockClient(func(c *Client, mux *http.ServeMux) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { mux.HandleFunc("/api/routes", func(w http.ResponseWriter, r *http.Request) { retBytes, _ := json.Marshal(util.ErrorResponse{Message: "No", Code: 400}) w.WriteHeader(400) @@ -50,7 +55,7 @@ func TestRoutes_List_Err(t *testing.T) { } func TestRoutes_Get_200(t *testing.T) { - withMockClient(func(c *Client, mux *http.ServeMux) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { mux.HandleFunc("/api/routes/Test", func(w http.ResponseWriter, r *http.Request) { retBytes, _ := json.Marshal(testRoute) _, err := w.Write(retBytes) @@ -63,7 +68,7 @@ func TestRoutes_Get_200(t *testing.T) { } func TestRoutes_Get_Err(t *testing.T) { - withMockClient(func(c *Client, mux *http.ServeMux) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { mux.HandleFunc("/api/routes/Test", func(w http.ResponseWriter, r *http.Request) { retBytes, _ := json.Marshal(util.ErrorResponse{Message: "No", Code: 400}) w.WriteHeader(400) @@ -78,7 +83,7 @@ func TestRoutes_Get_Err(t *testing.T) { } func TestRoutes_Create_200(t *testing.T) { - withMockClient(func(c *Client, mux *http.ServeMux) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { mux.HandleFunc("/api/routes", func(w http.ResponseWriter, r *http.Request) { assert.Equal(t, "POST", r.Method) reqBytes, err := io.ReadAll(r.Body) @@ -100,7 +105,7 @@ func TestRoutes_Create_200(t *testing.T) { } func TestRoutes_Create_Err(t *testing.T) { - withMockClient(func(c *Client, mux *http.ServeMux) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { mux.HandleFunc("/api/routes", func(w http.ResponseWriter, r *http.Request) { retBytes, _ := json.Marshal(util.ErrorResponse{Message: "No", Code: 400}) w.WriteHeader(400) @@ -117,7 +122,7 @@ func TestRoutes_Create_Err(t *testing.T) { } func TestRoutes_Update_200(t *testing.T) { - withMockClient(func(c *Client, mux *http.ServeMux) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { mux.HandleFunc("/api/routes/Test", func(w http.ResponseWriter, r *http.Request) { assert.Equal(t, "PUT", r.Method) reqBytes, err := io.ReadAll(r.Body) @@ -139,7 +144,7 @@ func TestRoutes_Update_200(t *testing.T) { } func TestRoutes_Update_Err(t *testing.T) { - withMockClient(func(c *Client, mux *http.ServeMux) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { mux.HandleFunc("/api/routes/Test", func(w http.ResponseWriter, r *http.Request) { retBytes, _ := json.Marshal(util.ErrorResponse{Message: "No", Code: 400}) w.WriteHeader(400) @@ -156,7 +161,7 @@ func TestRoutes_Update_Err(t *testing.T) { } func TestRoutes_Delete_200(t *testing.T) { - withMockClient(func(c *Client, mux *http.ServeMux) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { mux.HandleFunc("/api/routes/Test", func(w http.ResponseWriter, r *http.Request) { assert.Equal(t, "DELETE", r.Method) w.WriteHeader(200) @@ -167,7 +172,7 @@ func TestRoutes_Delete_200(t *testing.T) { } func TestRoutes_Delete_Err(t *testing.T) { - withMockClient(func(c *Client, mux *http.ServeMux) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { mux.HandleFunc("/api/routes/Test", func(w http.ResponseWriter, r *http.Request) { retBytes, _ := json.Marshal(util.ErrorResponse{Message: "Not found", Code: 404}) w.WriteHeader(404) @@ -181,7 +186,7 @@ func TestRoutes_Delete_Err(t *testing.T) { } func TestRoutes_Integration(t *testing.T) { - withBlackBoxServer(t, func(c *Client) { + withBlackBoxServer(t, func(c *rest.Client) { route, err := c.Routes.Create(context.Background(), api.RouteRequest{ Description: "Meow", Enabled: false, diff --git a/management/client/rest/setupkeys_test.go b/management/client/rest/setupkeys_test.go index 82c3d1fc8..8edce8428 100644 --- a/management/client/rest/setupkeys_test.go +++ b/management/client/rest/setupkeys_test.go @@ -1,4 +1,7 @@ -package rest +//go:build integration +// +build integration + +package rest_test import ( "context" @@ -7,10 +10,12 @@ import ( "net/http" "testing" - "github.com/netbirdio/netbird/management/server/http/api" - "github.com/netbirdio/netbird/management/server/http/util" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + + "github.com/netbirdio/netbird/management/client/rest" + "github.com/netbirdio/netbird/management/server/http/api" + "github.com/netbirdio/netbird/management/server/http/util" ) var ( @@ -31,7 +36,7 @@ var ( ) func TestSetupKeys_List_200(t *testing.T) { - withMockClient(func(c *Client, mux *http.ServeMux) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { mux.HandleFunc("/api/setup-keys", func(w http.ResponseWriter, r *http.Request) { retBytes, _ := json.Marshal([]api.SetupKey{testSetupKey}) _, err := w.Write(retBytes) @@ -45,7 +50,7 @@ func TestSetupKeys_List_200(t *testing.T) { } func TestSetupKeys_List_Err(t *testing.T) { - withMockClient(func(c *Client, mux *http.ServeMux) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { mux.HandleFunc("/api/setup-keys", func(w http.ResponseWriter, r *http.Request) { retBytes, _ := json.Marshal(util.ErrorResponse{Message: "No", Code: 400}) w.WriteHeader(400) @@ -60,7 +65,7 @@ func TestSetupKeys_List_Err(t *testing.T) { } func TestSetupKeys_Get_200(t *testing.T) { - withMockClient(func(c *Client, mux *http.ServeMux) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { mux.HandleFunc("/api/setup-keys/Test", func(w http.ResponseWriter, r *http.Request) { retBytes, _ := json.Marshal(testSetupKey) _, err := w.Write(retBytes) @@ -73,7 +78,7 @@ func TestSetupKeys_Get_200(t *testing.T) { } func TestSetupKeys_Get_Err(t *testing.T) { - withMockClient(func(c *Client, mux *http.ServeMux) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { mux.HandleFunc("/api/setup-keys/Test", func(w http.ResponseWriter, r *http.Request) { retBytes, _ := json.Marshal(util.ErrorResponse{Message: "No", Code: 400}) w.WriteHeader(400) @@ -88,7 +93,7 @@ func TestSetupKeys_Get_Err(t *testing.T) { } func TestSetupKeys_Create_200(t *testing.T) { - withMockClient(func(c *Client, mux *http.ServeMux) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { mux.HandleFunc("/api/setup-keys", func(w http.ResponseWriter, r *http.Request) { assert.Equal(t, "POST", r.Method) reqBytes, err := io.ReadAll(r.Body) @@ -110,7 +115,7 @@ func TestSetupKeys_Create_200(t *testing.T) { } func TestSetupKeys_Create_Err(t *testing.T) { - withMockClient(func(c *Client, mux *http.ServeMux) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { mux.HandleFunc("/api/setup-keys", func(w http.ResponseWriter, r *http.Request) { retBytes, _ := json.Marshal(util.ErrorResponse{Message: "No", Code: 400}) w.WriteHeader(400) @@ -127,7 +132,7 @@ func TestSetupKeys_Create_Err(t *testing.T) { } func TestSetupKeys_Update_200(t *testing.T) { - withMockClient(func(c *Client, mux *http.ServeMux) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { mux.HandleFunc("/api/setup-keys/Test", func(w http.ResponseWriter, r *http.Request) { assert.Equal(t, "PUT", r.Method) reqBytes, err := io.ReadAll(r.Body) @@ -149,7 +154,7 @@ func TestSetupKeys_Update_200(t *testing.T) { } func TestSetupKeys_Update_Err(t *testing.T) { - withMockClient(func(c *Client, mux *http.ServeMux) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { mux.HandleFunc("/api/setup-keys/Test", func(w http.ResponseWriter, r *http.Request) { retBytes, _ := json.Marshal(util.ErrorResponse{Message: "No", Code: 400}) w.WriteHeader(400) @@ -166,7 +171,7 @@ func TestSetupKeys_Update_Err(t *testing.T) { } func TestSetupKeys_Delete_200(t *testing.T) { - withMockClient(func(c *Client, mux *http.ServeMux) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { mux.HandleFunc("/api/setup-keys/Test", func(w http.ResponseWriter, r *http.Request) { assert.Equal(t, "DELETE", r.Method) w.WriteHeader(200) @@ -177,7 +182,7 @@ func TestSetupKeys_Delete_200(t *testing.T) { } func TestSetupKeys_Delete_Err(t *testing.T) { - withMockClient(func(c *Client, mux *http.ServeMux) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { mux.HandleFunc("/api/setup-keys/Test", func(w http.ResponseWriter, r *http.Request) { retBytes, _ := json.Marshal(util.ErrorResponse{Message: "Not found", Code: 404}) w.WriteHeader(404) @@ -191,7 +196,7 @@ func TestSetupKeys_Delete_Err(t *testing.T) { } func TestSetupKeys_Integration(t *testing.T) { - withBlackBoxServer(t, func(c *Client) { + withBlackBoxServer(t, func(c *rest.Client) { group, err := c.Groups.Create(context.Background(), api.GroupRequest{ Name: "Test", }) diff --git a/management/client/rest/tokens_test.go b/management/client/rest/tokens_test.go index 478fae93e..eea55d22f 100644 --- a/management/client/rest/tokens_test.go +++ b/management/client/rest/tokens_test.go @@ -1,4 +1,7 @@ -package rest +//go:build integration +// +build integration + +package rest_test import ( "context" @@ -8,10 +11,12 @@ import ( "testing" "time" - "github.com/netbirdio/netbird/management/server/http/api" - "github.com/netbirdio/netbird/management/server/http/util" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + + "github.com/netbirdio/netbird/management/client/rest" + "github.com/netbirdio/netbird/management/server/http/api" + "github.com/netbirdio/netbird/management/server/http/util" ) var ( @@ -31,7 +36,7 @@ var ( ) func TestTokens_List_200(t *testing.T) { - withMockClient(func(c *Client, mux *http.ServeMux) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { mux.HandleFunc("/api/users/meow/tokens", func(w http.ResponseWriter, r *http.Request) { retBytes, _ := json.Marshal([]api.PersonalAccessToken{testToken}) _, err := w.Write(retBytes) @@ -45,7 +50,7 @@ func TestTokens_List_200(t *testing.T) { } func TestTokens_List_Err(t *testing.T) { - withMockClient(func(c *Client, mux *http.ServeMux) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { mux.HandleFunc("/api/users/meow/tokens", func(w http.ResponseWriter, r *http.Request) { retBytes, _ := json.Marshal(util.ErrorResponse{Message: "No", Code: 400}) w.WriteHeader(400) @@ -60,7 +65,7 @@ func TestTokens_List_Err(t *testing.T) { } func TestTokens_Get_200(t *testing.T) { - withMockClient(func(c *Client, mux *http.ServeMux) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { mux.HandleFunc("/api/users/meow/tokens/Test", func(w http.ResponseWriter, r *http.Request) { retBytes, _ := json.Marshal(testToken) _, err := w.Write(retBytes) @@ -73,7 +78,7 @@ func TestTokens_Get_200(t *testing.T) { } func TestTokens_Get_Err(t *testing.T) { - withMockClient(func(c *Client, mux *http.ServeMux) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { mux.HandleFunc("/api/users/meow/tokens/Test", func(w http.ResponseWriter, r *http.Request) { retBytes, _ := json.Marshal(util.ErrorResponse{Message: "No", Code: 400}) w.WriteHeader(400) @@ -88,7 +93,7 @@ func TestTokens_Get_Err(t *testing.T) { } func TestTokens_Create_200(t *testing.T) { - withMockClient(func(c *Client, mux *http.ServeMux) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { mux.HandleFunc("/api/users/meow/tokens", func(w http.ResponseWriter, r *http.Request) { assert.Equal(t, "POST", r.Method) reqBytes, err := io.ReadAll(r.Body) @@ -110,7 +115,7 @@ func TestTokens_Create_200(t *testing.T) { } func TestTokens_Create_Err(t *testing.T) { - withMockClient(func(c *Client, mux *http.ServeMux) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { mux.HandleFunc("/api/users/meow/tokens", func(w http.ResponseWriter, r *http.Request) { retBytes, _ := json.Marshal(util.ErrorResponse{Message: "No", Code: 400}) w.WriteHeader(400) @@ -127,7 +132,7 @@ func TestTokens_Create_Err(t *testing.T) { } func TestTokens_Delete_200(t *testing.T) { - withMockClient(func(c *Client, mux *http.ServeMux) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { mux.HandleFunc("/api/users/meow/tokens/Test", func(w http.ResponseWriter, r *http.Request) { assert.Equal(t, "DELETE", r.Method) w.WriteHeader(200) @@ -138,7 +143,7 @@ func TestTokens_Delete_200(t *testing.T) { } func TestTokens_Delete_Err(t *testing.T) { - withMockClient(func(c *Client, mux *http.ServeMux) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { mux.HandleFunc("/api/users/meow/tokens/Test", func(w http.ResponseWriter, r *http.Request) { retBytes, _ := json.Marshal(util.ErrorResponse{Message: "Not found", Code: 404}) w.WriteHeader(404) @@ -152,7 +157,7 @@ func TestTokens_Delete_Err(t *testing.T) { } func TestTokens_Integration(t *testing.T) { - withBlackBoxServer(t, func(c *Client) { + withBlackBoxServer(t, func(c *rest.Client) { tokenClear, err := c.Tokens.Create(context.Background(), "a23efe53-63fb-11ec-90d6-0242ac120003", api.PersonalAccessTokenRequest{ Name: "Test", ExpiresIn: 365, diff --git a/management/client/rest/users_test.go b/management/client/rest/users_test.go index aaec3bf42..2ff8a0327 100644 --- a/management/client/rest/users_test.go +++ b/management/client/rest/users_test.go @@ -1,4 +1,7 @@ -package rest +//go:build integration +// +build integration + +package rest_test import ( "context" @@ -8,10 +11,12 @@ import ( "testing" "time" - "github.com/netbirdio/netbird/management/server/http/api" - "github.com/netbirdio/netbird/management/server/http/util" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + + "github.com/netbirdio/netbird/management/client/rest" + "github.com/netbirdio/netbird/management/server/http/api" + "github.com/netbirdio/netbird/management/server/http/util" ) var ( @@ -34,7 +39,7 @@ var ( ) func TestUsers_List_200(t *testing.T) { - withMockClient(func(c *Client, mux *http.ServeMux) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { mux.HandleFunc("/api/users", func(w http.ResponseWriter, r *http.Request) { retBytes, _ := json.Marshal([]api.User{testUser}) _, err := w.Write(retBytes) @@ -48,7 +53,7 @@ func TestUsers_List_200(t *testing.T) { } func TestUsers_List_Err(t *testing.T) { - withMockClient(func(c *Client, mux *http.ServeMux) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { mux.HandleFunc("/api/users", func(w http.ResponseWriter, r *http.Request) { retBytes, _ := json.Marshal(util.ErrorResponse{Message: "No", Code: 400}) w.WriteHeader(400) @@ -63,7 +68,7 @@ func TestUsers_List_Err(t *testing.T) { } func TestUsers_Create_200(t *testing.T) { - withMockClient(func(c *Client, mux *http.ServeMux) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { mux.HandleFunc("/api/users", func(w http.ResponseWriter, r *http.Request) { assert.Equal(t, "POST", r.Method) reqBytes, err := io.ReadAll(r.Body) @@ -85,7 +90,7 @@ func TestUsers_Create_200(t *testing.T) { } func TestUsers_Create_Err(t *testing.T) { - withMockClient(func(c *Client, mux *http.ServeMux) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { mux.HandleFunc("/api/users", func(w http.ResponseWriter, r *http.Request) { retBytes, _ := json.Marshal(util.ErrorResponse{Message: "No", Code: 400}) w.WriteHeader(400) @@ -102,7 +107,7 @@ func TestUsers_Create_Err(t *testing.T) { } func TestUsers_Update_200(t *testing.T) { - withMockClient(func(c *Client, mux *http.ServeMux) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { mux.HandleFunc("/api/users/Test", func(w http.ResponseWriter, r *http.Request) { assert.Equal(t, "PUT", r.Method) reqBytes, err := io.ReadAll(r.Body) @@ -125,7 +130,7 @@ func TestUsers_Update_200(t *testing.T) { } func TestUsers_Update_Err(t *testing.T) { - withMockClient(func(c *Client, mux *http.ServeMux) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { mux.HandleFunc("/api/users/Test", func(w http.ResponseWriter, r *http.Request) { retBytes, _ := json.Marshal(util.ErrorResponse{Message: "No", Code: 400}) w.WriteHeader(400) @@ -142,7 +147,7 @@ func TestUsers_Update_Err(t *testing.T) { } func TestUsers_Delete_200(t *testing.T) { - withMockClient(func(c *Client, mux *http.ServeMux) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { mux.HandleFunc("/api/users/Test", func(w http.ResponseWriter, r *http.Request) { assert.Equal(t, "DELETE", r.Method) w.WriteHeader(200) @@ -153,7 +158,7 @@ func TestUsers_Delete_200(t *testing.T) { } func TestUsers_Delete_Err(t *testing.T) { - withMockClient(func(c *Client, mux *http.ServeMux) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { mux.HandleFunc("/api/users/Test", func(w http.ResponseWriter, r *http.Request) { retBytes, _ := json.Marshal(util.ErrorResponse{Message: "Not found", Code: 404}) w.WriteHeader(404) @@ -167,7 +172,7 @@ func TestUsers_Delete_Err(t *testing.T) { } func TestUsers_ResendInvitation_200(t *testing.T) { - withMockClient(func(c *Client, mux *http.ServeMux) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { mux.HandleFunc("/api/users/Test/invite", func(w http.ResponseWriter, r *http.Request) { assert.Equal(t, "POST", r.Method) w.WriteHeader(200) @@ -178,7 +183,7 @@ func TestUsers_ResendInvitation_200(t *testing.T) { } func TestUsers_ResendInvitation_Err(t *testing.T) { - withMockClient(func(c *Client, mux *http.ServeMux) { + withMockClient(func(c *rest.Client, mux *http.ServeMux) { mux.HandleFunc("/api/users/Test/invite", func(w http.ResponseWriter, r *http.Request) { retBytes, _ := json.Marshal(util.ErrorResponse{Message: "Not found", Code: 404}) w.WriteHeader(404) @@ -192,7 +197,7 @@ func TestUsers_ResendInvitation_Err(t *testing.T) { } func TestUsers_Integration(t *testing.T) { - withBlackBoxServer(t, func(c *Client) { + withBlackBoxServer(t, func(c *rest.Client) { user, err := c.Users.Create(context.Background(), api.UserCreateRequest{ AutoGroups: []string{}, Email: ptr("test@example.com"), diff --git a/management/server/account.go b/management/server/account.go index d87b90244..1fce8bba3 100644 --- a/management/server/account.go +++ b/management/server/account.go @@ -68,7 +68,7 @@ type AccountManager interface { SaveSetupKey(ctx context.Context, accountID string, key *types.SetupKey, userID string) (*types.SetupKey, error) CreateUser(ctx context.Context, accountID, initiatorUserID string, key *types.UserInfo) (*types.UserInfo, error) DeleteUser(ctx context.Context, accountID, initiatorUserID string, targetUserID string) error - DeleteRegularUsers(ctx context.Context, accountID, initiatorUserID string, targetUserIDs []string) error + DeleteRegularUsers(ctx context.Context, accountID, initiatorUserID string, targetUserIDs []string, userInfos map[string]*types.UserInfo) error InviteUser(ctx context.Context, accountID string, initiatorUserID string, targetUserID string) error ListSetupKeys(ctx context.Context, accountID, userID string) ([]*types.SetupKey, error) SaveUser(ctx context.Context, accountID, initiatorUserID string, update *types.User) (*types.UserInfo, error) @@ -80,7 +80,7 @@ type AccountManager interface { GetAccountIDByUserID(ctx context.Context, userID, domain string) (string, error) GetAccountIDFromToken(ctx context.Context, claims jwtclaims.AuthorizationClaims) (string, string, error) CheckUserAccessByJWTGroups(ctx context.Context, claims jwtclaims.AuthorizationClaims) error - GetAccountFromPAT(ctx context.Context, pat string) (*types.Account, *types.User, *types.PersonalAccessToken, error) + GetPATInfo(ctx context.Context, token string) (*types.User, *types.PersonalAccessToken, string, string, error) DeleteAccount(ctx context.Context, accountID, userID string) error MarkPATUsed(ctx context.Context, tokenID string) error GetUserByID(ctx context.Context, id string) (*types.User, error) @@ -97,7 +97,7 @@ type AccountManager interface { DeletePAT(ctx context.Context, accountID string, initiatorUserID string, targetUserID string, tokenID string) error GetPAT(ctx context.Context, accountID string, initiatorUserID string, targetUserID string, tokenID string) (*types.PersonalAccessToken, error) GetAllPATs(ctx context.Context, accountID string, initiatorUserID string, targetUserID string) ([]*types.PersonalAccessToken, error) - GetUsersFromAccount(ctx context.Context, accountID, userID string) ([]*types.UserInfo, error) + GetUsersFromAccount(ctx context.Context, accountID, userID string) (map[string]*types.UserInfo, error) GetGroup(ctx context.Context, accountId, groupID, userID string) (*types.Group, error) GetAllGroups(ctx context.Context, accountID, userID string) ([]*types.Group, error) GetGroupByName(ctx context.Context, groupName, accountID string) (*types.Group, error) @@ -150,6 +150,7 @@ type AccountManager interface { GetAccountSettings(ctx context.Context, accountID string, userID string) (*types.Settings, error) DeleteSetupKey(ctx context.Context, accountID, userID, keyID string) error UpdateAccountPeers(ctx context.Context, accountID string) + BuildUserInfosForAccount(ctx context.Context, accountID, initiatorUserID string, accountUsers []*types.User) (map[string]*types.UserInfo, error) } type DefaultAccountManager struct { @@ -622,6 +623,12 @@ func (am *DefaultAccountManager) DeleteAccount(ctx context.Context, accountID, u if user.Role != types.UserRoleOwner { return status.Errorf(status.PermissionDenied, "user is not allowed to delete account. Only account owner can delete account") } + + userInfosMap, err := am.BuildUserInfosForAccount(ctx, accountID, userID, maps.Values(account.Users)) + if err != nil { + return status.Errorf(status.Internal, "failed to build user infos for account %s: %v", accountID, err) + } + for _, otherUser := range account.Users { if otherUser.IsServiceUser { continue @@ -631,13 +638,23 @@ func (am *DefaultAccountManager) DeleteAccount(ctx context.Context, accountID, u continue } - deleteUserErr := am.deleteRegularUser(ctx, account, userID, otherUser.Id) + userInfo, ok := userInfosMap[otherUser.Id] + if !ok { + return status.Errorf(status.NotFound, "user info not found for user %s", otherUser.Id) + } + + _, deleteUserErr := am.deleteRegularUser(ctx, accountID, userID, userInfo) if deleteUserErr != nil { return deleteUserErr } } - err = am.deleteRegularUser(ctx, account, userID, userID) + userInfo, ok := userInfosMap[userID] + if !ok { + return status.Errorf(status.NotFound, "user info not found for user %s", userID) + } + + _, err = am.deleteRegularUser(ctx, accountID, userID, userInfo) if err != nil { log.WithContext(ctx).Errorf("failed deleting user %s. error: %s", userID, err) return err @@ -694,20 +711,8 @@ func isNil(i idp.Manager) bool { // addAccountIDToIDPAppMeta update user's app metadata in idp manager func (am *DefaultAccountManager) addAccountIDToIDPAppMeta(ctx context.Context, userID string, accountID string) error { if !isNil(am.idpManager) { - accountUsers, err := am.Store.GetAccountUsers(ctx, store.LockingStrengthShare, accountID) - if err != nil { - return err - } - cachedAccount := &types.Account{ - Id: accountID, - Users: make(map[string]*types.User), - } - for _, user := range accountUsers { - cachedAccount.Users[user.Id] = user - } - // user can be nil if it wasn't found (e.g., just created) - user, err := am.lookupUserInCache(ctx, userID, cachedAccount) + user, err := am.lookupUserInCache(ctx, userID, accountID) if err != nil { return err } @@ -783,10 +788,15 @@ func (am *DefaultAccountManager) lookupUserInCacheByEmail(ctx context.Context, e } // lookupUserInCache looks up user in the IdP cache and returns it. If the user wasn't found, the function returns nil -func (am *DefaultAccountManager) lookupUserInCache(ctx context.Context, userID string, account *types.Account) (*idp.UserData, error) { - users := make(map[string]userLoggedInOnce, len(account.Users)) +func (am *DefaultAccountManager) lookupUserInCache(ctx context.Context, userID string, accountID string) (*idp.UserData, error) { + accountUsers, err := am.Store.GetAccountUsers(ctx, store.LockingStrengthShare, accountID) + if err != nil { + return nil, err + } + + users := make(map[string]userLoggedInOnce, len(accountUsers)) // ignore service users and users provisioned by integrations than are never logged in - for _, user := range account.Users { + for _, user := range accountUsers { if user.IsServiceUser { continue } @@ -795,8 +805,8 @@ func (am *DefaultAccountManager) lookupUserInCache(ctx context.Context, userID s } users[user.Id] = userLoggedInOnce(!user.GetLastLogin().IsZero()) } - log.WithContext(ctx).Debugf("looking up user %s of account %s in cache", userID, account.Id) - userData, err := am.lookupCache(ctx, users, account.Id) + log.WithContext(ctx).Debugf("looking up user %s of account %s in cache", userID, accountID) + userData, err := am.lookupCache(ctx, users, accountID) if err != nil { return nil, err } @@ -809,13 +819,13 @@ func (am *DefaultAccountManager) lookupUserInCache(ctx context.Context, userID s // add extra check on external cache manager. We may get to this point when the user is not yet findable in IDP, // or it didn't have its metadata updated with am.addAccountIDToIDPAppMeta - user, err := account.FindUser(userID) + user, err := am.Store.GetUserByUserID(ctx, store.LockingStrengthShare, userID) if err != nil { - log.WithContext(ctx).Errorf("failed finding user %s in account %s", userID, account.Id) + log.WithContext(ctx).Errorf("failed finding user %s in account %s", userID, accountID) return nil, err } - key := user.IntegrationReference.CacheKey(account.Id, userID) + key := user.IntegrationReference.CacheKey(accountID, userID) ud, err := am.externalCacheManager.Get(am.ctx, key) if err != nil { log.WithContext(ctx).Debugf("failed to get externalCache for key: %s, error: %s", key, err) @@ -1055,9 +1065,9 @@ func (am *DefaultAccountManager) addNewUserToDomainAccount(ctx context.Context, unlockAccount := am.Store.AcquireWriteLockByUID(ctx, domainAccountID) defer unlockAccount() - usersMap := make(map[string]*types.User) - usersMap[claims.UserId] = types.NewRegularUser(claims.UserId) - err := am.Store.SaveUsers(domainAccountID, usersMap) + newUser := types.NewRegularUser(claims.UserId) + newUser.AccountID = domainAccountID + err := am.Store.SaveUser(ctx, store.LockingStrengthUpdate, newUser) if err != nil { return "", err } @@ -1080,12 +1090,7 @@ func (am *DefaultAccountManager) redeemInvite(ctx context.Context, accountID str return nil } - account, err := am.Store.GetAccount(ctx, accountID) - if err != nil { - return err - } - - user, err := am.lookupUserInCache(ctx, userID, account) + user, err := am.lookupUserInCache(ctx, userID, accountID) if err != nil { return err } @@ -1095,17 +1100,17 @@ func (am *DefaultAccountManager) redeemInvite(ctx context.Context, accountID str } if user.AppMetadata.WTPendingInvite != nil && *user.AppMetadata.WTPendingInvite { - log.WithContext(ctx).Infof("redeeming invite for user %s account %s", userID, account.Id) + log.WithContext(ctx).Infof("redeeming invite for user %s account %s", userID, accountID) // User has already logged in, meaning that IdP should have set wt_pending_invite to false. // Our job is to just reload cache. go func() { - _, err = am.refreshCache(ctx, account.Id) + _, err = am.refreshCache(ctx, accountID) if err != nil { - log.WithContext(ctx).Warnf("failed reloading cache when redeeming user %s under account %s", userID, account.Id) + log.WithContext(ctx).Warnf("failed reloading cache when redeeming user %s under account %s", userID, accountID) return } - log.WithContext(ctx).Debugf("user %s of account %s redeemed invite", user.ID, account.Id) - am.StoreEvent(ctx, userID, userID, account.Id, activity.UserJoined, nil) + log.WithContext(ctx).Debugf("user %s of account %s redeemed invite", user.ID, accountID) + am.StoreEvent(ctx, userID, userID, accountID, activity.UserJoined, nil) }() } @@ -1114,33 +1119,7 @@ func (am *DefaultAccountManager) redeemInvite(ctx context.Context, accountID str // MarkPATUsed marks a personal access token as used func (am *DefaultAccountManager) MarkPATUsed(ctx context.Context, tokenID string) error { - - user, err := am.Store.GetUserByTokenID(ctx, tokenID) - if err != nil { - return err - } - - account, err := am.Store.GetAccountByUser(ctx, user.Id) - if err != nil { - return err - } - - unlock := am.Store.AcquireWriteLockByUID(ctx, account.Id) - defer unlock() - - account, err = am.Store.GetAccountByUser(ctx, user.Id) - if err != nil { - return err - } - - pat, ok := account.Users[user.Id].PATs[tokenID] - if !ok { - return fmt.Errorf("token not found") - } - - pat.LastUsed = util.ToPtr(time.Now().UTC()) - - return am.Store.SaveAccount(ctx, account) + return am.Store.MarkPATUsed(ctx, store.LockingStrengthUpdate, tokenID) } // GetAccount returns an account associated with this account ID. @@ -1148,52 +1127,64 @@ func (am *DefaultAccountManager) GetAccount(ctx context.Context, accountID strin return am.Store.GetAccount(ctx, accountID) } -// GetAccountFromPAT returns Account and User associated with a personal access token -func (am *DefaultAccountManager) GetAccountFromPAT(ctx context.Context, token string) (*types.Account, *types.User, *types.PersonalAccessToken, error) { +// GetPATInfo retrieves user, personal access token, domain, and category details from a personal access token. +func (am *DefaultAccountManager) GetPATInfo(ctx context.Context, token string) (user *types.User, pat *types.PersonalAccessToken, domain string, category string, err error) { + user, pat, err = am.extractPATFromToken(ctx, token) + if err != nil { + return nil, nil, "", "", err + } + + domain, category, err = am.Store.GetAccountDomainAndCategory(ctx, store.LockingStrengthShare, user.AccountID) + if err != nil { + return nil, nil, "", "", err + } + + return user, pat, domain, category, nil +} + +// extractPATFromToken validates the token structure and retrieves associated User and PAT. +func (am *DefaultAccountManager) extractPATFromToken(ctx context.Context, token string) (*types.User, *types.PersonalAccessToken, error) { if len(token) != types.PATLength { - return nil, nil, nil, fmt.Errorf("token has wrong length") + return nil, nil, fmt.Errorf("token has incorrect length") } prefix := token[:len(types.PATPrefix)] if prefix != types.PATPrefix { - return nil, nil, nil, fmt.Errorf("token has wrong prefix") + return nil, nil, fmt.Errorf("token has wrong prefix") } secret := token[len(types.PATPrefix) : len(types.PATPrefix)+types.PATSecretLength] encodedChecksum := token[len(types.PATPrefix)+types.PATSecretLength : len(types.PATPrefix)+types.PATSecretLength+types.PATChecksumLength] verificationChecksum, err := base62.Decode(encodedChecksum) if err != nil { - return nil, nil, nil, fmt.Errorf("token checksum decoding failed: %w", err) + return nil, nil, fmt.Errorf("token checksum decoding failed: %w", err) } secretChecksum := crc32.ChecksumIEEE([]byte(secret)) if secretChecksum != verificationChecksum { - return nil, nil, nil, fmt.Errorf("token checksum does not match") + return nil, nil, fmt.Errorf("token checksum does not match") } hashedToken := sha256.Sum256([]byte(token)) encodedHashedToken := b64.StdEncoding.EncodeToString(hashedToken[:]) - tokenID, err := am.Store.GetTokenIDByHashedToken(ctx, encodedHashedToken) + + var user *types.User + var pat *types.PersonalAccessToken + + err = am.Store.ExecuteInTransaction(ctx, func(transaction store.Store) error { + pat, err = transaction.GetPATByHashedToken(ctx, store.LockingStrengthShare, encodedHashedToken) + if err != nil { + return err + } + + user, err = transaction.GetUserByPATID(ctx, store.LockingStrengthShare, pat.ID) + return err + }) if err != nil { - return nil, nil, nil, err + return nil, nil, err } - user, err := am.Store.GetUserByTokenID(ctx, tokenID) - if err != nil { - return nil, nil, nil, err - } - - account, err := am.Store.GetAccountByUser(ctx, user.Id) - if err != nil { - return nil, nil, nil, err - } - - pat := user.PATs[tokenID] - if pat == nil { - return nil, nil, nil, fmt.Errorf("personal access token not found") - } - - return account, user, pat, nil + return user, pat, nil } // GetAccountByID returns an account associated with this account ID. @@ -1339,7 +1330,7 @@ func (am *DefaultAccountManager) syncJWTGroups(ctx context.Context, accountID st return fmt.Errorf("error getting user peers: %w", err) } - updatedGroups, err := am.updateUserPeersInGroups(groupsMap, peers, addNewGroups, removeOldGroups) + updatedGroups, err := updateUserPeersInGroups(groupsMap, peers, addNewGroups, removeOldGroups) if err != nil { return fmt.Errorf("error modifying user peers in groups: %w", err) } diff --git a/management/server/account_test.go b/management/server/account_test.go index accbf7689..475073c21 100644 --- a/management/server/account_test.go +++ b/management/server/account_test.go @@ -733,6 +733,7 @@ func TestAccountManager_GetAccountFromPAT(t *testing.T) { PATs: map[string]*types.PersonalAccessToken{ "tokenId": { ID: "tokenId", + UserID: "someUser", HashedToken: encodedHashedToken, }, }, @@ -746,14 +747,14 @@ func TestAccountManager_GetAccountFromPAT(t *testing.T) { Store: store, } - account, user, pat, err := am.GetAccountFromPAT(context.Background(), token) + user, pat, _, _, err := am.GetPATInfo(context.Background(), token) if err != nil { t.Fatalf("Error when getting Account from PAT: %s", err) } - assert.Equal(t, "account_id", account.Id) + assert.Equal(t, "account_id", user.AccountID) assert.Equal(t, "someUser", user.Id) - assert.Equal(t, account.Users["someUser"].PATs["tokenId"], pat) + assert.Equal(t, account.Users["someUser"].PATs["tokenId"].ID, pat.ID) } func TestDefaultAccountManager_MarkPATUsed(t *testing.T) { @@ -3018,11 +3019,11 @@ func BenchmarkSyncAndMarkPeer(b *testing.B) { minMsPerOpCICD float64 maxMsPerOpCICD float64 }{ - {"Small", 50, 5, 1, 5, 3, 19}, - {"Medium", 500, 100, 7, 22, 10, 90}, - {"Large", 5000, 200, 65, 110, 60, 240}, + {"Small", 50, 5, 1, 5, 3, 24}, + {"Medium", 500, 100, 7, 22, 10, 135}, + {"Large", 5000, 200, 65, 110, 60, 320}, {"Small single", 50, 10, 1, 4, 3, 80}, - {"Medium single", 500, 10, 7, 13, 10, 37}, + {"Medium single", 500, 10, 7, 13, 10, 43}, {"Large 5", 5000, 15, 65, 80, 60, 220}, } @@ -3087,8 +3088,8 @@ func BenchmarkLoginPeer_ExistingPeer(b *testing.B) { maxMsPerOpCICD float64 }{ {"Small", 50, 5, 2, 10, 3, 35}, - {"Medium", 500, 100, 5, 40, 20, 110}, - {"Large", 5000, 200, 60, 100, 120, 260}, + {"Medium", 500, 100, 5, 40, 20, 140}, + {"Large", 5000, 200, 60, 100, 120, 320}, {"Small single", 50, 10, 2, 10, 5, 40}, {"Medium single", 500, 10, 5, 40, 10, 60}, {"Large 5", 5000, 15, 60, 100, 60, 180}, @@ -3163,9 +3164,9 @@ func BenchmarkLoginPeer_NewPeer(b *testing.B) { }{ {"Small", 50, 5, 7, 20, 10, 80}, {"Medium", 500, 100, 5, 40, 30, 140}, - {"Large", 5000, 200, 80, 120, 140, 300}, + {"Large", 5000, 200, 80, 120, 140, 390}, {"Small single", 50, 10, 7, 20, 10, 80}, - {"Medium single", 500, 10, 5, 40, 20, 60}, + {"Medium single", 500, 10, 5, 40, 20, 85}, {"Large 5", 5000, 15, 80, 120, 80, 200}, } diff --git a/management/server/activity/sqlite/sqlite.go b/management/server/activity/sqlite/sqlite.go index 823e0b4ac..ffb863de9 100644 --- a/management/server/activity/sqlite/sqlite.go +++ b/management/server/activity/sqlite/sqlite.go @@ -6,6 +6,7 @@ import ( "encoding/json" "fmt" "path/filepath" + "runtime" "time" _ "github.com/mattn/go-sqlite3" @@ -95,6 +96,7 @@ func NewSQLiteStore(ctx context.Context, dataDir string, encryptionKey string) ( if err != nil { return nil, err } + db.SetMaxOpenConns(runtime.NumCPU()) crypt, err := NewFieldEncrypt(encryptionKey) if err != nil { diff --git a/management/server/dns_test.go b/management/server/dns_test.go index 429f430b6..3318dbaed 100644 --- a/management/server/dns_test.go +++ b/management/server/dns_test.go @@ -43,7 +43,7 @@ func TestGetDNSSettings(t *testing.T) { account, err := initTestDNSAccount(t, am) if err != nil { - t.Fatal("failed to init testing account") + t.Fatalf("failed to init testing account: %s", err) } dnsSettings, err := am.GetDNSSettings(context.Background(), account.Id, dnsAdminUserID) @@ -125,12 +125,12 @@ func TestSaveDNSSettings(t *testing.T) { t.Run(testCase.name, func(t *testing.T) { am, err := createDNSManager(t) if err != nil { - t.Error("failed to create account manager") + t.Fatalf("failed to create account manager") } account, err := initTestDNSAccount(t, am) if err != nil { - t.Error("failed to init testing account") + t.Fatalf("failed to init testing account: %v", err) } err = am.SaveDNSSettings(context.Background(), account.Id, testCase.userID, testCase.inputSettings) @@ -157,22 +157,22 @@ func TestGetNetworkMap_DNSConfigSync(t *testing.T) { am, err := createDNSManager(t) if err != nil { - t.Error("failed to create account manager") + t.Fatalf("failed to create account manager: %s", err) } account, err := initTestDNSAccount(t, am) if err != nil { - t.Error("failed to init testing account") + t.Fatalf("failed to init testing account: %s", err) } peer1, err := account.FindPeerByPubKey(dnsPeer1Key) if err != nil { - t.Error("failed to init testing account") + t.Fatalf("failed to init testing account: %s", err) } peer2, err := account.FindPeerByPubKey(dnsPeer2Key) if err != nil { - t.Error("failed to init testing account") + t.Fatalf("failed to init testing account: %s", err) } newAccountDNSConfig, err := am.GetNetworkMap(context.Background(), peer1.ID) diff --git a/management/server/geolocation/database.go b/management/server/geolocation/database.go index 21ae93b9d..97ab398fb 100644 --- a/management/server/geolocation/database.go +++ b/management/server/geolocation/database.go @@ -123,7 +123,6 @@ func importCsvToSqlite(dataDir string, csvFile string, geonamesdbFile string) er db, err := gorm.Open(sqlite.Open(path.Join(dataDir, geonamesdbFile)), &gorm.Config{ Logger: logger.Default.LogMode(logger.Silent), CreateBatchSize: 1000, - PrepareStmt: true, }) if err != nil { return err diff --git a/management/server/geolocation/store.go b/management/server/geolocation/store.go index 1f94bf47e..5af8276b5 100644 --- a/management/server/geolocation/store.go +++ b/management/server/geolocation/store.go @@ -132,8 +132,7 @@ func connectDB(ctx context.Context, filePath string) (*gorm.DB, error) { } db, err := gorm.Open(sqlite.Open(storeStr), &gorm.Config{ - Logger: logger.Default.LogMode(logger.Silent), - PrepareStmt: true, + Logger: logger.Default.LogMode(logger.Silent), }) if err != nil { return nil, err diff --git a/management/server/group_test.go b/management/server/group_test.go index cc90f187b..b21b5e834 100644 --- a/management/server/group_test.go +++ b/management/server/group_test.go @@ -29,7 +29,7 @@ func TestDefaultAccountManager_CreateGroup(t *testing.T) { _, account, err := initTestGroupAccount(am) if err != nil { - t.Error("failed to init testing account") + t.Fatalf("failed to init testing account: %s", err) } for _, group := range account.Groups { group.Issued = types.GroupIssuedIntegration @@ -59,12 +59,12 @@ func TestDefaultAccountManager_CreateGroup(t *testing.T) { func TestDefaultAccountManager_DeleteGroup(t *testing.T) { am, err := createManager(t) if err != nil { - t.Error("failed to create account manager") + t.Fatalf("failed to create account manager: %s", err) } _, account, err := initTestGroupAccount(am) if err != nil { - t.Error("failed to init testing account") + t.Fatalf("failed to init testing account: %s", err) } testCases := []struct { diff --git a/management/server/grpcserver.go b/management/server/grpcserver.go index 7a0303ce6..905c42144 100644 --- a/management/server/grpcserver.go +++ b/management/server/grpcserver.go @@ -15,6 +15,7 @@ import ( log "github.com/sirupsen/logrus" "golang.zx2c4.com/wireguard/wgctrl/wgtypes" "google.golang.org/grpc/codes" + "google.golang.org/grpc/peer" "google.golang.org/grpc/status" "github.com/netbirdio/netbird/encryption" @@ -114,6 +115,18 @@ func NewServer( } func (s *GRPCServer) GetServerKey(ctx context.Context, req *proto.Empty) (*proto.ServerKeyResponse, error) { + ip := "" + p, ok := peer.FromContext(ctx) + if ok { + ip = p.Addr.String() + } + + log.WithContext(ctx).Tracef("GetServerKey request from %s", ip) + start := time.Now() + defer func() { + log.WithContext(ctx).Tracef("GetServerKey from %s took %v", ip, time.Since(start)) + }() + // todo introduce something more meaningful with the key expiration/rotation if s.appMetrics != nil { s.appMetrics.GRPCMetrics().CountGetKeyRequest() @@ -725,6 +738,12 @@ func (s *GRPCServer) sendInitialSync(ctx context.Context, peerKey wgtypes.Key, p // This is used for initiating an Oauth 2 device authorization grant flow // which will be used by our clients to Login func (s *GRPCServer) GetDeviceAuthorizationFlow(ctx context.Context, req *proto.EncryptedMessage) (*proto.EncryptedMessage, error) { + log.WithContext(ctx).Tracef("GetDeviceAuthorizationFlow request for pubKey: %s", req.WgPubKey) + start := time.Now() + defer func() { + log.WithContext(ctx).Tracef("GetDeviceAuthorizationFlow for pubKey: %s took %v", req.WgPubKey, time.Since(start)) + }() + peerKey, err := wgtypes.ParseKey(req.GetWgPubKey()) if err != nil { errMSG := fmt.Sprintf("error while parsing peer's Wireguard public key %s on GetDeviceAuthorizationFlow request.", req.WgPubKey) @@ -777,6 +796,12 @@ func (s *GRPCServer) GetDeviceAuthorizationFlow(ctx context.Context, req *proto. // This is used for initiating an Oauth 2 pkce authorization grant flow // which will be used by our clients to Login func (s *GRPCServer) GetPKCEAuthorizationFlow(ctx context.Context, req *proto.EncryptedMessage) (*proto.EncryptedMessage, error) { + log.WithContext(ctx).Tracef("GetPKCEAuthorizationFlow request for pubKey: %s", req.WgPubKey) + start := time.Now() + defer func() { + log.WithContext(ctx).Tracef("GetPKCEAuthorizationFlow for pubKey %s took %v", req.WgPubKey, time.Since(start)) + }() + peerKey, err := wgtypes.ParseKey(req.GetWgPubKey()) if err != nil { errMSG := fmt.Sprintf("error while parsing peer's Wireguard public key %s on GetPKCEAuthorizationFlow request.", req.WgPubKey) diff --git a/management/server/http/handler.go b/management/server/http/handler.go index a082f50df..eb1cfb5dd 100644 --- a/management/server/http/handler.go +++ b/management/server/http/handler.go @@ -46,7 +46,7 @@ func NewAPIHandler(ctx context.Context, accountManager s.AccountManager, network ) authMiddleware := middleware.NewAuthMiddleware( - accountManager.GetAccountFromPAT, + accountManager.GetPATInfo, jwtValidator.ValidateAndParse, accountManager.MarkPATUsed, accountManager.CheckUserAccessByJWTGroups, diff --git a/management/server/http/handlers/events/events_handler_test.go b/management/server/http/handlers/events/events_handler_test.go index 17478aba3..fd603f289 100644 --- a/management/server/http/handlers/events/events_handler_test.go +++ b/management/server/http/handlers/events/events_handler_test.go @@ -32,8 +32,8 @@ func initEventsTestData(account string, events ...*activity.Event) *handler { GetAccountIDFromTokenFunc: func(_ context.Context, claims jwtclaims.AuthorizationClaims) (string, string, error) { return claims.AccountId, claims.UserId, nil }, - GetUsersFromAccountFunc: func(_ context.Context, accountID, userID string) ([]*types.UserInfo, error) { - return make([]*types.UserInfo, 0), nil + GetUsersFromAccountFunc: func(_ context.Context, accountID, userID string) (map[string]*types.UserInfo, error) { + return make(map[string]*types.UserInfo), nil }, }, claimsExtractor: jwtclaims.NewClaimsExtractor( diff --git a/management/server/http/handlers/networks/handler.go b/management/server/http/handlers/networks/handler.go index 316b93611..f716348d6 100644 --- a/management/server/http/handlers/networks/handler.go +++ b/management/server/http/handlers/networks/handler.go @@ -7,6 +7,7 @@ import ( "net/http" "github.com/gorilla/mux" + log "github.com/sirupsen/logrus" s "github.com/netbirdio/netbird/management/server" "github.com/netbirdio/netbird/management/server/groups" @@ -281,7 +282,12 @@ func (h *handler) collectIDsInNetwork(ctx context.Context, accountID, userID, ne } if len(router.PeerGroups) > 0 { for _, groupID := range router.PeerGroups { - peerCounter += len(groups[groupID].Peers) + group, ok := groups[groupID] + if !ok { + log.WithContext(ctx).Warnf("group %s not found", groupID) + continue + } + peerCounter += len(group.Peers) } } } diff --git a/management/server/http/handlers/users/users_handler_test.go b/management/server/http/handlers/users/users_handler_test.go index 90081830a..ff77cedff 100644 --- a/management/server/http/handlers/users/users_handler_test.go +++ b/management/server/http/handlers/users/users_handler_test.go @@ -52,7 +52,7 @@ var usersTestAccount = &types.Account{ Issued: types.UserIssuedAPI, }, nonDeletableServiceUserID: { - Id: serviceUserID, + Id: nonDeletableServiceUserID, Role: "admin", IsServiceUser: true, NonDeletable: true, @@ -70,10 +70,10 @@ func initUsersTestData() *handler { GetUserByIDFunc: func(ctx context.Context, id string) (*types.User, error) { return usersTestAccount.Users[id], nil }, - GetUsersFromAccountFunc: func(_ context.Context, accountID, userID string) ([]*types.UserInfo, error) { - users := make([]*types.UserInfo, 0) + GetUsersFromAccountFunc: func(_ context.Context, accountID, userID string) (map[string]*types.UserInfo, error) { + usersInfos := make(map[string]*types.UserInfo) for _, v := range usersTestAccount.Users { - users = append(users, &types.UserInfo{ + usersInfos[v.Id] = &types.UserInfo{ ID: v.Id, Role: string(v.Role), Name: "", @@ -81,9 +81,9 @@ func initUsersTestData() *handler { IsServiceUser: v.IsServiceUser, NonDeletable: v.NonDeletable, Issued: v.Issued, - }) + } } - return users, nil + return usersInfos, nil }, CreateUserFunc: func(_ context.Context, accountID, userID string, key *types.UserInfo) (*types.UserInfo, error) { if userID != existingUserID { diff --git a/management/server/http/middleware/auth_middleware.go b/management/server/http/middleware/auth_middleware.go index 182c30cf6..dcf73259a 100644 --- a/management/server/http/middleware/auth_middleware.go +++ b/management/server/http/middleware/auth_middleware.go @@ -19,8 +19,8 @@ import ( "github.com/netbirdio/netbird/management/server/types" ) -// GetAccountFromPATFunc function -type GetAccountFromPATFunc func(ctx context.Context, token string) (*types.Account, *types.User, *types.PersonalAccessToken, error) +// GetAccountInfoFromPATFunc function +type GetAccountInfoFromPATFunc func(ctx context.Context, token string) (user *types.User, pat *types.PersonalAccessToken, domain string, category string, err error) // ValidateAndParseTokenFunc function type ValidateAndParseTokenFunc func(ctx context.Context, token string) (*jwt.Token, error) @@ -33,7 +33,7 @@ type CheckUserAccessByJWTGroupsFunc func(ctx context.Context, claims jwtclaims.A // AuthMiddleware middleware to verify personal access tokens (PAT) and JWT tokens type AuthMiddleware struct { - getAccountFromPAT GetAccountFromPATFunc + getAccountInfoFromPAT GetAccountInfoFromPATFunc validateAndParseToken ValidateAndParseTokenFunc markPATUsed MarkPATUsedFunc checkUserAccessByJWTGroups CheckUserAccessByJWTGroupsFunc @@ -47,7 +47,7 @@ const ( ) // NewAuthMiddleware instance constructor -func NewAuthMiddleware(getAccountFromPAT GetAccountFromPATFunc, validateAndParseToken ValidateAndParseTokenFunc, +func NewAuthMiddleware(getAccountInfoFromPAT GetAccountInfoFromPATFunc, validateAndParseToken ValidateAndParseTokenFunc, markPATUsed MarkPATUsedFunc, checkUserAccessByJWTGroups CheckUserAccessByJWTGroupsFunc, claimsExtractor *jwtclaims.ClaimsExtractor, audience string, userIdClaim string) *AuthMiddleware { if userIdClaim == "" { @@ -55,7 +55,7 @@ func NewAuthMiddleware(getAccountFromPAT GetAccountFromPATFunc, validateAndParse } return &AuthMiddleware{ - getAccountFromPAT: getAccountFromPAT, + getAccountInfoFromPAT: getAccountInfoFromPAT, validateAndParseToken: validateAndParseToken, markPATUsed: markPATUsed, checkUserAccessByJWTGroups: checkUserAccessByJWTGroups, @@ -151,13 +151,11 @@ func (m *AuthMiddleware) verifyUserAccess(ctx context.Context, validatedToken *j // CheckPATFromRequest checks if the PAT is valid func (m *AuthMiddleware) checkPATFromRequest(w http.ResponseWriter, r *http.Request, auth []string) error { token, err := getTokenFromPATRequest(auth) - - // If an error occurs, call the error handler and return an error if err != nil { - return fmt.Errorf("Error extracting token: %w", err) + return fmt.Errorf("error extracting token: %w", err) } - account, user, pat, err := m.getAccountFromPAT(r.Context(), token) + user, pat, accDomain, accCategory, err := m.getAccountInfoFromPAT(r.Context(), token) if err != nil { return fmt.Errorf("invalid Token: %w", err) } @@ -172,9 +170,9 @@ func (m *AuthMiddleware) checkPATFromRequest(w http.ResponseWriter, r *http.Requ claimMaps := jwt.MapClaims{} claimMaps[m.userIDClaim] = user.Id - claimMaps[m.audience+jwtclaims.AccountIDSuffix] = account.Id - claimMaps[m.audience+jwtclaims.DomainIDSuffix] = account.Domain - claimMaps[m.audience+jwtclaims.DomainCategorySuffix] = account.DomainCategory + claimMaps[m.audience+jwtclaims.AccountIDSuffix] = user.AccountID + claimMaps[m.audience+jwtclaims.DomainIDSuffix] = accDomain + claimMaps[m.audience+jwtclaims.DomainCategorySuffix] = accCategory claimMaps[jwtclaims.IsToken] = true jwtToken := jwt.NewWithClaims(jwt.SigningMethodHS256, claimMaps) newRequest := r.WithContext(context.WithValue(r.Context(), jwtclaims.TokenUserProperty, jwtToken)) //nolint diff --git a/management/server/http/middleware/auth_middleware_test.go b/management/server/http/middleware/auth_middleware_test.go index 41bdb7fc5..c1686ed44 100644 --- a/management/server/http/middleware/auth_middleware_test.go +++ b/management/server/http/middleware/auth_middleware_test.go @@ -34,7 +34,8 @@ var testAccount = &types.Account{ Domain: domain, Users: map[string]*types.User{ userID: { - Id: userID, + Id: userID, + AccountID: accountID, PATs: map[string]*types.PersonalAccessToken{ tokenID: { ID: tokenID, @@ -50,11 +51,11 @@ var testAccount = &types.Account{ }, } -func mockGetAccountFromPAT(_ context.Context, token string) (*types.Account, *types.User, *types.PersonalAccessToken, error) { +func mockGetAccountInfoFromPAT(_ context.Context, token string) (user *types.User, pat *types.PersonalAccessToken, domain string, category string, err error) { if token == PAT { - return testAccount, testAccount.Users[userID], testAccount.Users[userID].PATs[tokenID], nil + return testAccount.Users[userID], testAccount.Users[userID].PATs[tokenID], testAccount.Domain, testAccount.DomainCategory, nil } - return nil, nil, nil, fmt.Errorf("PAT invalid") + return nil, nil, "", "", fmt.Errorf("PAT invalid") } func mockValidateAndParseToken(_ context.Context, token string) (*jwt.Token, error) { @@ -166,7 +167,7 @@ func TestAuthMiddleware_Handler(t *testing.T) { ) authMiddleware := NewAuthMiddleware( - mockGetAccountFromPAT, + mockGetAccountInfoFromPAT, mockValidateAndParseToken, mockMarkPATUsed, mockCheckUserAccessByJWTGroups, diff --git a/management/server/http/testing/benchmarks/users_handler_benchmark_test.go b/management/server/http/testing/benchmarks/users_handler_benchmark_test.go index 549a51c0e..0baf76328 100644 --- a/management/server/http/testing/benchmarks/users_handler_benchmark_test.go +++ b/management/server/http/testing/benchmarks/users_handler_benchmark_test.go @@ -35,14 +35,14 @@ var benchCasesUsers = map[string]testing_tools.BenchmarkCase{ func BenchmarkUpdateUser(b *testing.B) { var expectedMetrics = map[string]testing_tools.PerformanceMetrics{ - "Users - XS": {MinMsPerOpLocal: 700, MaxMsPerOpLocal: 1000, MinMsPerOpCICD: 1300, MaxMsPerOpCICD: 8000}, - "Users - S": {MinMsPerOpLocal: 1, MaxMsPerOpLocal: 5, MinMsPerOpCICD: 4, MaxMsPerOpCICD: 50}, - "Users - M": {MinMsPerOpLocal: 20, MaxMsPerOpLocal: 40, MinMsPerOpCICD: 30, MaxMsPerOpCICD: 250}, - "Users - L": {MinMsPerOpLocal: 60, MaxMsPerOpLocal: 100, MinMsPerOpCICD: 90, MaxMsPerOpCICD: 700}, - "Peers - L": {MinMsPerOpLocal: 300, MaxMsPerOpLocal: 500, MinMsPerOpCICD: 550, MaxMsPerOpCICD: 2400}, - "Groups - L": {MinMsPerOpLocal: 400, MaxMsPerOpLocal: 600, MinMsPerOpCICD: 750, MaxMsPerOpCICD: 5000}, - "Setup Keys - L": {MinMsPerOpLocal: 50, MaxMsPerOpLocal: 200, MinMsPerOpCICD: 130, MaxMsPerOpCICD: 1000}, - "Users - XL": {MinMsPerOpLocal: 350, MaxMsPerOpLocal: 550, MinMsPerOpCICD: 650, MaxMsPerOpCICD: 3500}, + "Users - XS": {MinMsPerOpLocal: 100, MaxMsPerOpLocal: 160, MinMsPerOpCICD: 100, MaxMsPerOpCICD: 310}, + "Users - S": {MinMsPerOpLocal: 0.3, MaxMsPerOpLocal: 3, MinMsPerOpCICD: 1, MaxMsPerOpCICD: 15}, + "Users - M": {MinMsPerOpLocal: 1, MaxMsPerOpLocal: 10, MinMsPerOpCICD: 3, MaxMsPerOpCICD: 20}, + "Users - L": {MinMsPerOpLocal: 5, MaxMsPerOpLocal: 20, MinMsPerOpCICD: 2, MaxMsPerOpCICD: 50}, + "Peers - L": {MinMsPerOpLocal: 80, MaxMsPerOpLocal: 150, MinMsPerOpCICD: 80, MaxMsPerOpCICD: 310}, + "Groups - L": {MinMsPerOpLocal: 10, MaxMsPerOpLocal: 50, MinMsPerOpCICD: 20, MaxMsPerOpCICD: 120}, + "Setup Keys - L": {MinMsPerOpLocal: 5, MaxMsPerOpLocal: 20, MinMsPerOpCICD: 2, MaxMsPerOpCICD: 50}, + "Users - XL": {MinMsPerOpLocal: 30, MaxMsPerOpLocal: 100, MinMsPerOpCICD: 60, MaxMsPerOpCICD: 280}, } log.SetOutput(io.Discard) @@ -118,14 +118,14 @@ func BenchmarkGetOneUser(b *testing.B) { func BenchmarkGetAllUsers(b *testing.B) { var expectedMetrics = map[string]testing_tools.PerformanceMetrics{ - "Users - XS": {MinMsPerOpLocal: 50, MaxMsPerOpLocal: 90, MinMsPerOpCICD: 60, MaxMsPerOpCICD: 180}, - "Users - S": {MinMsPerOpLocal: 0, MaxMsPerOpLocal: 2, MinMsPerOpCICD: 0, MaxMsPerOpCICD: 30}, - "Users - M": {MinMsPerOpLocal: 5, MaxMsPerOpLocal: 12, MinMsPerOpCICD: 0, MaxMsPerOpCICD: 30}, - "Users - L": {MinMsPerOpLocal: 0, MaxMsPerOpLocal: 2, MinMsPerOpCICD: 0, MaxMsPerOpCICD: 30}, - "Peers - L": {MinMsPerOpLocal: 0, MaxMsPerOpLocal: 2, MinMsPerOpCICD: 0, MaxMsPerOpCICD: 30}, - "Groups - L": {MinMsPerOpLocal: 0, MaxMsPerOpLocal: 2, MinMsPerOpCICD: 0, MaxMsPerOpCICD: 30}, - "Setup Keys - L": {MinMsPerOpLocal: 40, MaxMsPerOpLocal: 140, MinMsPerOpCICD: 60, MaxMsPerOpCICD: 200}, - "Users - XL": {MinMsPerOpLocal: 15, MaxMsPerOpLocal: 40, MinMsPerOpCICD: 20, MaxMsPerOpCICD: 90}, + "Users - XS": {MinMsPerOpLocal: 0, MaxMsPerOpLocal: 2, MinMsPerOpCICD: 0, MaxMsPerOpCICD: 10}, + "Users - S": {MinMsPerOpLocal: 0, MaxMsPerOpLocal: 2, MinMsPerOpCICD: 0, MaxMsPerOpCICD: 10}, + "Users - M": {MinMsPerOpLocal: 3, MaxMsPerOpLocal: 10, MinMsPerOpCICD: 5, MaxMsPerOpCICD: 15}, + "Users - L": {MinMsPerOpLocal: 10, MaxMsPerOpLocal: 20, MinMsPerOpCICD: 20, MaxMsPerOpCICD: 50}, + "Peers - L": {MinMsPerOpLocal: 15, MaxMsPerOpLocal: 25, MinMsPerOpCICD: 20, MaxMsPerOpCICD: 55}, + "Groups - L": {MinMsPerOpLocal: 15, MaxMsPerOpLocal: 25, MinMsPerOpCICD: 25, MaxMsPerOpCICD: 55}, + "Setup Keys - L": {MinMsPerOpLocal: 15, MaxMsPerOpLocal: 25, MinMsPerOpCICD: 25, MaxMsPerOpCICD: 55}, + "Users - XL": {MinMsPerOpLocal: 80, MaxMsPerOpLocal: 120, MinMsPerOpCICD: 100, MaxMsPerOpCICD: 300}, } log.SetOutput(io.Discard) @@ -141,7 +141,7 @@ func BenchmarkGetAllUsers(b *testing.B) { b.ResetTimer() start := time.Now() for i := 0; i < b.N; i++ { - req := testing_tools.BuildRequest(b, nil, http.MethodGet, "/api/setup-keys", testing_tools.TestAdminId) + req := testing_tools.BuildRequest(b, nil, http.MethodGet, "/api/users", testing_tools.TestAdminId) apiHandler.ServeHTTP(recorder, req) } @@ -152,14 +152,14 @@ func BenchmarkGetAllUsers(b *testing.B) { func BenchmarkDeleteUsers(b *testing.B) { var expectedMetrics = map[string]testing_tools.PerformanceMetrics{ - "Users - XS": {MinMsPerOpLocal: 1000, MaxMsPerOpLocal: 1600, MinMsPerOpCICD: 1900, MaxMsPerOpCICD: 11000}, - "Users - S": {MinMsPerOpLocal: 15, MaxMsPerOpLocal: 40, MinMsPerOpCICD: 30, MaxMsPerOpCICD: 200}, - "Users - M": {MinMsPerOpLocal: 15, MaxMsPerOpLocal: 70, MinMsPerOpCICD: 15, MaxMsPerOpCICD: 230}, - "Users - L": {MinMsPerOpLocal: 15, MaxMsPerOpLocal: 45, MinMsPerOpCICD: 30, MaxMsPerOpCICD: 190}, - "Peers - L": {MinMsPerOpLocal: 400, MaxMsPerOpLocal: 600, MinMsPerOpCICD: 650, MaxMsPerOpCICD: 1800}, - "Groups - L": {MinMsPerOpLocal: 600, MaxMsPerOpLocal: 800, MinMsPerOpCICD: 1200, MaxMsPerOpCICD: 7500}, - "Setup Keys - L": {MinMsPerOpLocal: 20, MaxMsPerOpLocal: 200, MinMsPerOpCICD: 40, MaxMsPerOpCICD: 600}, - "Users - XL": {MinMsPerOpLocal: 50, MaxMsPerOpLocal: 150, MinMsPerOpCICD: 80, MaxMsPerOpCICD: 400}, + "Users - XS": {MinMsPerOpLocal: 0, MaxMsPerOpLocal: 5, MinMsPerOpCICD: 2, MaxMsPerOpCICD: 15}, + "Users - S": {MinMsPerOpLocal: 0, MaxMsPerOpLocal: 5, MinMsPerOpCICD: 2, MaxMsPerOpCICD: 15}, + "Users - M": {MinMsPerOpLocal: 0, MaxMsPerOpLocal: 5, MinMsPerOpCICD: 2, MaxMsPerOpCICD: 15}, + "Users - L": {MinMsPerOpLocal: 0, MaxMsPerOpLocal: 5, MinMsPerOpCICD: 2, MaxMsPerOpCICD: 15}, + "Peers - L": {MinMsPerOpLocal: 0, MaxMsPerOpLocal: 5, MinMsPerOpCICD: 2, MaxMsPerOpCICD: 15}, + "Groups - L": {MinMsPerOpLocal: 0, MaxMsPerOpLocal: 5, MinMsPerOpCICD: 2, MaxMsPerOpCICD: 15}, + "Setup Keys - L": {MinMsPerOpLocal: 0, MaxMsPerOpLocal: 5, MinMsPerOpCICD: 2, MaxMsPerOpCICD: 15}, + "Users - XL": {MinMsPerOpLocal: 0, MaxMsPerOpLocal: 5, MinMsPerOpCICD: 2, MaxMsPerOpCICD: 15}, } log.SetOutput(io.Discard) diff --git a/management/server/management_suite_test.go b/management/server/management_suite_test.go deleted file mode 100644 index cc99624a0..000000000 --- a/management/server/management_suite_test.go +++ /dev/null @@ -1,13 +0,0 @@ -package server_test - -import ( - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" - - "testing" -) - -func TestManagement(t *testing.T) { - RegisterFailHandler(Fail) - RunSpecs(t, "Management Service Suite") -} diff --git a/management/server/management_test.go b/management/server/management_test.go index 03b87c33c..ae75acc0f 100644 --- a/management/server/management_test.go +++ b/management/server/management_test.go @@ -6,13 +6,13 @@ import ( "net" "os" "runtime" - sync2 "sync" + "sync" + "testing" "time" pb "github.com/golang/protobuf/proto" //nolint - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" log "github.com/sirupsen/logrus" + "github.com/stretchr/testify/assert" "golang.zx2c4.com/wireguard/wgctrl/wgtypes" "google.golang.org/grpc" "google.golang.org/grpc/credentials/insecure" @@ -31,424 +31,77 @@ import ( const ( ValidSetupKey = "A2C8E62B-38F5-4553-B31E-DD66C696CEBB" - AccountKey = "bf1c8084-ba50-4ce7-9439-34653001fc3b" ) -var _ = Describe("Management service", func() { - var ( - addr string - s *grpc.Server - dataDir string - client mgmtProto.ManagementServiceClient - serverPubKey wgtypes.Key - conn *grpc.ClientConn - ) - - BeforeEach(func() { - level, _ := log.ParseLevel("Debug") - log.SetLevel(level) - var err error - dataDir, err = os.MkdirTemp("", "netbird_mgmt_test_tmp_*") - Expect(err).NotTo(HaveOccurred()) - - var listener net.Listener - - config := &server.Config{} - _, err = util.ReadJson("testdata/management.json", config) - Expect(err).NotTo(HaveOccurred()) - config.Datadir = dataDir - - s, listener = startServer(config, dataDir, "testdata/store.sql") - addr = listener.Addr().String() - client, conn = createRawClient(addr) - - // s public key - resp, err := client.GetServerKey(context.TODO(), &mgmtProto.Empty{}) - Expect(err).NotTo(HaveOccurred()) - serverPubKey, err = wgtypes.ParseKey(resp.Key) - Expect(err).NotTo(HaveOccurred()) - }) - - AfterEach(func() { - s.Stop() - err := conn.Close() - Expect(err).NotTo(HaveOccurred()) - os.RemoveAll(dataDir) - }) - - Context("when calling IsHealthy endpoint", func() { - Specify("a non-error result is returned", func() { - healthy, err := client.IsHealthy(context.TODO(), &mgmtProto.Empty{}) - - Expect(err).NotTo(HaveOccurred()) - Expect(healthy).ToNot(BeNil()) - }) - }) - - Context("when calling Sync endpoint", func() { - Context("when there is a new peer registered", func() { - Specify("a proper configuration is returned", func() { - key, _ := wgtypes.GenerateKey() - loginPeerWithValidSetupKey(serverPubKey, key, client) - - syncReq := &mgmtProto.SyncRequest{Meta: &mgmtProto.PeerSystemMeta{}} - encryptedBytes, err := encryption.EncryptMessage(serverPubKey, key, syncReq) - Expect(err).NotTo(HaveOccurred()) - - sync, err := client.Sync(context.TODO(), &mgmtProto.EncryptedMessage{ - WgPubKey: key.PublicKey().String(), - Body: encryptedBytes, - }) - Expect(err).NotTo(HaveOccurred()) - - encryptedResponse := &mgmtProto.EncryptedMessage{} - err = sync.RecvMsg(encryptedResponse) - Expect(err).NotTo(HaveOccurred()) - - resp := &mgmtProto.SyncResponse{} - err = encryption.DecryptMessage(serverPubKey, key, encryptedResponse.Body, resp) - Expect(err).NotTo(HaveOccurred()) - - expectedSignalConfig := &mgmtProto.HostConfig{ - Uri: "signal.netbird.io:10000", - Protocol: mgmtProto.HostConfig_HTTP, - } - expectedStunsConfig := &mgmtProto.HostConfig{ - Uri: "stun:stun.netbird.io:3468", - Protocol: mgmtProto.HostConfig_UDP, - } - expectedTRUNHost := &mgmtProto.HostConfig{ - Uri: "turn:stun.netbird.io:3468", - Protocol: mgmtProto.HostConfig_UDP, - } - - Expect(resp.NetbirdConfig.Signal).To(BeEquivalentTo(expectedSignalConfig)) - Expect(resp.NetbirdConfig.Stuns).To(ConsistOf(expectedStunsConfig)) - // TURN validation is special because credentials are dynamically generated - Expect(resp.NetbirdConfig.Turns).To(HaveLen(1)) - actualTURN := resp.NetbirdConfig.Turns[0] - Expect(len(actualTURN.User) > 0).To(BeTrue()) - Expect(actualTURN.HostConfig).To(BeEquivalentTo(expectedTRUNHost)) - Expect(len(resp.NetworkMap.OfflinePeers) == 0).To(BeTrue()) - }) - }) - - Context("when there are 3 peers registered under one account", func() { - Specify("a list containing other 2 peers is returned", func() { - key, _ := wgtypes.GenerateKey() - key1, _ := wgtypes.GenerateKey() - key2, _ := wgtypes.GenerateKey() - loginPeerWithValidSetupKey(serverPubKey, key, client) - loginPeerWithValidSetupKey(serverPubKey, key1, client) - loginPeerWithValidSetupKey(serverPubKey, key2, client) - - messageBytes, err := pb.Marshal(&mgmtProto.SyncRequest{Meta: &mgmtProto.PeerSystemMeta{}}) - Expect(err).NotTo(HaveOccurred()) - encryptedBytes, err := encryption.Encrypt(messageBytes, serverPubKey, key) - Expect(err).NotTo(HaveOccurred()) - - sync, err := client.Sync(context.TODO(), &mgmtProto.EncryptedMessage{ - WgPubKey: key.PublicKey().String(), - Body: encryptedBytes, - }) - Expect(err).NotTo(HaveOccurred()) - - encryptedResponse := &mgmtProto.EncryptedMessage{} - err = sync.RecvMsg(encryptedResponse) - Expect(err).NotTo(HaveOccurred()) - decryptedBytes, err := encryption.Decrypt(encryptedResponse.Body, serverPubKey, key) - Expect(err).NotTo(HaveOccurred()) - - resp := &mgmtProto.SyncResponse{} - err = pb.Unmarshal(decryptedBytes, resp) - Expect(err).NotTo(HaveOccurred()) - - Expect(resp.GetRemotePeers()).To(HaveLen(2)) - peers := []string{resp.GetRemotePeers()[0].WgPubKey, resp.GetRemotePeers()[1].WgPubKey} - Expect(peers).To(ContainElements(key1.PublicKey().String(), key2.PublicKey().String())) - }) - }) - - Context("when there is a new peer registered", func() { - Specify("an update is returned", func() { - // register only a single peer - key, _ := wgtypes.GenerateKey() - loginPeerWithValidSetupKey(serverPubKey, key, client) - - messageBytes, err := pb.Marshal(&mgmtProto.SyncRequest{Meta: &mgmtProto.PeerSystemMeta{}}) - Expect(err).NotTo(HaveOccurred()) - encryptedBytes, err := encryption.Encrypt(messageBytes, serverPubKey, key) - Expect(err).NotTo(HaveOccurred()) - - sync, err := client.Sync(context.TODO(), &mgmtProto.EncryptedMessage{ - WgPubKey: key.PublicKey().String(), - Body: encryptedBytes, - }) - Expect(err).NotTo(HaveOccurred()) - - // after the initial sync call we have 0 peer updates - encryptedResponse := &mgmtProto.EncryptedMessage{} - err = sync.RecvMsg(encryptedResponse) - Expect(err).NotTo(HaveOccurred()) - decryptedBytes, err := encryption.Decrypt(encryptedResponse.Body, serverPubKey, key) - Expect(err).NotTo(HaveOccurred()) - resp := &mgmtProto.SyncResponse{} - err = pb.Unmarshal(decryptedBytes, resp) - Expect(resp.GetRemotePeers()).To(HaveLen(0)) - - wg := sync2.WaitGroup{} - wg.Add(1) - - // continue listening on updates for a peer - go func() { - err = sync.RecvMsg(encryptedResponse) - - decryptedBytes, err = encryption.Decrypt(encryptedResponse.Body, serverPubKey, key) - Expect(err).NotTo(HaveOccurred()) - resp = &mgmtProto.SyncResponse{} - err = pb.Unmarshal(decryptedBytes, resp) - wg.Done() - }() - - // register a new peer - key1, _ := wgtypes.GenerateKey() - loginPeerWithValidSetupKey(serverPubKey, key1, client) - - wg.Wait() - - Expect(err).NotTo(HaveOccurred()) - Expect(resp.GetRemotePeers()).To(HaveLen(1)) - Expect(resp.GetRemotePeers()[0].WgPubKey).To(BeEquivalentTo(key1.PublicKey().String())) - }) - }) - }) - - Context("when calling GetServerKey endpoint", func() { - Specify("a public Wireguard key of the service is returned", func() { - resp, err := client.GetServerKey(context.TODO(), &mgmtProto.Empty{}) - - Expect(err).NotTo(HaveOccurred()) - Expect(resp).ToNot(BeNil()) - Expect(resp.Key).ToNot(BeNil()) - Expect(resp.ExpiresAt).ToNot(BeNil()) - - // check if the key is a valid Wireguard key - key, err := wgtypes.ParseKey(resp.Key) - Expect(err).NotTo(HaveOccurred()) - Expect(key).ToNot(BeNil()) - }) - }) - - Context("when calling Login endpoint", func() { - Context("with an invalid setup key", func() { - Specify("an error is returned", func() { - key, _ := wgtypes.GenerateKey() - message, err := encryption.EncryptMessage(serverPubKey, key, &mgmtProto.LoginRequest{SetupKey: "invalid setup key", - Meta: &mgmtProto.PeerSystemMeta{}}) - Expect(err).NotTo(HaveOccurred()) - - resp, err := client.Login(context.TODO(), &mgmtProto.EncryptedMessage{ - WgPubKey: key.PublicKey().String(), - Body: message, - }) - - Expect(err).To(HaveOccurred()) - Expect(resp).To(BeNil()) - }) - }) - - Context("with a valid setup key", func() { - It("a non error result is returned", func() { - key, _ := wgtypes.GenerateKey() - resp := loginPeerWithValidSetupKey(serverPubKey, key, client) - - Expect(resp).ToNot(BeNil()) - }) - }) - - Context("with a registered peer", func() { - It("a non error result is returned", func() { - key, _ := wgtypes.GenerateKey() - regResp := loginPeerWithValidSetupKey(serverPubKey, key, client) - Expect(regResp).NotTo(BeNil()) - - // just login without registration - message, err := encryption.EncryptMessage(serverPubKey, key, &mgmtProto.LoginRequest{Meta: &mgmtProto.PeerSystemMeta{}}) - Expect(err).NotTo(HaveOccurred()) - loginResp, err := client.Login(context.TODO(), &mgmtProto.EncryptedMessage{ - WgPubKey: key.PublicKey().String(), - Body: message, - }) - - Expect(err).NotTo(HaveOccurred()) - - decryptedResp := &mgmtProto.LoginResponse{} - err = encryption.DecryptMessage(serverPubKey, key, loginResp.Body, decryptedResp) - Expect(err).NotTo(HaveOccurred()) - - expectedSignalConfig := &mgmtProto.HostConfig{ - Uri: "signal.netbird.io:10000", - Protocol: mgmtProto.HostConfig_HTTP, - } - expectedStunsConfig := &mgmtProto.HostConfig{ - Uri: "stun:stun.netbird.io:3468", - Protocol: mgmtProto.HostConfig_UDP, - } - expectedTurnsConfig := &mgmtProto.ProtectedHostConfig{ - HostConfig: &mgmtProto.HostConfig{ - Uri: "turn:stun.netbird.io:3468", - Protocol: mgmtProto.HostConfig_UDP, - }, - User: "some_user", - Password: "some_password", - } - - Expect(decryptedResp.GetNetbirdConfig().Signal).To(BeEquivalentTo(expectedSignalConfig)) - Expect(decryptedResp.GetNetbirdConfig().Stuns).To(ConsistOf(expectedStunsConfig)) - Expect(decryptedResp.GetNetbirdConfig().Turns).To(ConsistOf(expectedTurnsConfig)) - }) - }) - }) - - Context("when there are 10 peers registered under one account", func() { - Context("when there are 10 more peers registered under the same account", func() { - Specify("all of the 10 peers will get updates of 10 newly registered peers", func() { - initialPeers := 10 - additionalPeers := 10 - - var peers []wgtypes.Key - for i := 0; i < initialPeers; i++ { - key, _ := wgtypes.GenerateKey() - loginPeerWithValidSetupKey(serverPubKey, key, client) - peers = append(peers, key) - } - - wg := sync2.WaitGroup{} - wg.Add(initialPeers + initialPeers*additionalPeers) - - var clients []mgmtProto.ManagementService_SyncClient - for _, peer := range peers { - messageBytes, err := pb.Marshal(&mgmtProto.SyncRequest{Meta: &mgmtProto.PeerSystemMeta{}}) - Expect(err).NotTo(HaveOccurred()) - encryptedBytes, err := encryption.Encrypt(messageBytes, serverPubKey, peer) - Expect(err).NotTo(HaveOccurred()) - - // open stream - sync, err := client.Sync(context.TODO(), &mgmtProto.EncryptedMessage{ - WgPubKey: peer.PublicKey().String(), - Body: encryptedBytes, - }) - Expect(err).NotTo(HaveOccurred()) - clients = append(clients, sync) - - // receive stream - peer := peer - go func() { - for { - encryptedResponse := &mgmtProto.EncryptedMessage{} - err = sync.RecvMsg(encryptedResponse) - if err != nil { - break - } - decryptedBytes, err := encryption.Decrypt(encryptedResponse.Body, serverPubKey, peer) - Expect(err).NotTo(HaveOccurred()) - - resp := &mgmtProto.SyncResponse{} - err = pb.Unmarshal(decryptedBytes, resp) - Expect(err).NotTo(HaveOccurred()) - if len(resp.GetRemotePeers()) > 0 { - // only consider peer updates - wg.Done() - } - } - }() - } - - time.Sleep(1 * time.Second) - for i := 0; i < additionalPeers; i++ { - key, _ := wgtypes.GenerateKey() - loginPeerWithValidSetupKey(serverPubKey, key, client) - r := rand.New(rand.NewSource(time.Now().UnixNano())) - n := r.Intn(200) - time.Sleep(time.Duration(n) * time.Millisecond) - } - - wg.Wait() - - for _, syncClient := range clients { - err := syncClient.CloseSend() - Expect(err).NotTo(HaveOccurred()) - } - }) - }) - }) - - Context("when there are peers registered under one account concurrently", func() { - Specify("then there are no duplicate IPs", func() { - initialPeers := 30 - - ipChannel := make(chan string, 20) - for i := 0; i < initialPeers; i++ { - go func() { - defer GinkgoRecover() - key, _ := wgtypes.GenerateKey() - loginPeerWithValidSetupKey(serverPubKey, key, client) - syncReq := &mgmtProto.SyncRequest{Meta: &mgmtProto.PeerSystemMeta{}} - encryptedBytes, err := encryption.EncryptMessage(serverPubKey, key, syncReq) - Expect(err).NotTo(HaveOccurred()) - - // open stream - sync, err := client.Sync(context.TODO(), &mgmtProto.EncryptedMessage{ - WgPubKey: key.PublicKey().String(), - Body: encryptedBytes, - }) - Expect(err).NotTo(HaveOccurred()) - encryptedResponse := &mgmtProto.EncryptedMessage{} - err = sync.RecvMsg(encryptedResponse) - Expect(err).NotTo(HaveOccurred()) - - resp := &mgmtProto.SyncResponse{} - err = encryption.DecryptMessage(serverPubKey, key, encryptedResponse.Body, resp) - Expect(err).NotTo(HaveOccurred()) - - ipChannel <- resp.GetPeerConfig().Address - }() - } - - ips := make(map[string]struct{}) - for ip := range ipChannel { - if _, ok := ips[ip]; ok { - Fail("found duplicate IP: " + ip) - } - ips[ip] = struct{}{} - if len(ips) == initialPeers { - break - } - } - close(ipChannel) - }) - }) - - Context("after login two peers", func() { - Specify("then they receive the same network", func() { - key, _ := wgtypes.GenerateKey() - firstLogin := loginPeerWithValidSetupKey(serverPubKey, key, client) - key, _ = wgtypes.GenerateKey() - secondLogin := loginPeerWithValidSetupKey(serverPubKey, key, client) - - _, firstLoginNetwork, err := net.ParseCIDR(firstLogin.GetPeerConfig().GetAddress()) - Expect(err).NotTo(HaveOccurred()) - _, secondLoginNetwork, err := net.ParseCIDR(secondLogin.GetPeerConfig().GetAddress()) - Expect(err).NotTo(HaveOccurred()) - - Expect(secondLoginNetwork.String()).To(BeEquivalentTo(firstLoginNetwork.String())) - }) - }) -}) - -func loginPeerWithValidSetupKey(serverPubKey wgtypes.Key, key wgtypes.Key, client mgmtProto.ManagementServiceClient) *mgmtProto.LoginResponse { - defer GinkgoRecover() - +type testSuite struct { + t *testing.T + addr string + grpcServer *grpc.Server + dataDir string + client mgmtProto.ManagementServiceClient + serverPubKey wgtypes.Key + conn *grpc.ClientConn +} + +func setupTest(t *testing.T) *testSuite { + t.Helper() + level, _ := log.ParseLevel("Debug") + log.SetLevel(level) + + ts := &testSuite{t: t} + + var err error + ts.dataDir, err = os.MkdirTemp("", "netbird_mgmt_test_tmp_*") + if err != nil { + t.Fatalf("failed to create temp directory: %v", err) + } + + config := &server.Config{} + _, err = util.ReadJson("testdata/management.json", config) + if err != nil { + t.Fatalf("failed to read management.json: %v", err) + } + config.Datadir = ts.dataDir + + var listener net.Listener + ts.grpcServer, listener = startServer(t, config, ts.dataDir, "testdata/store.sql") + ts.addr = listener.Addr().String() + + ts.client, ts.conn = createRawClient(t, ts.addr) + + resp, err := ts.client.GetServerKey(context.TODO(), &mgmtProto.Empty{}) + if err != nil { + t.Fatalf("failed to get server key: %v", err) + } + + serverKey, err := wgtypes.ParseKey(resp.Key) + if err != nil { + t.Fatalf("failed to parse server key: %v", err) + } + ts.serverPubKey = serverKey + + return ts +} + +func tearDownTest(t *testing.T, ts *testSuite) { + t.Helper() + ts.grpcServer.Stop() + if err := ts.conn.Close(); err != nil { + t.Fatalf("failed to close client connection: %v", err) + } + time.Sleep(100 * time.Millisecond) + if err := os.RemoveAll(ts.dataDir); err != nil { + t.Fatalf("failed to remove data directory %s: %v", ts.dataDir, err) + } +} + +func loginPeerWithValidSetupKey( + t *testing.T, + serverPubKey wgtypes.Key, + key wgtypes.Key, + client mgmtProto.ManagementServiceClient, +) *mgmtProto.LoginResponse { + t.Helper() meta := &mgmtProto.PeerSystemMeta{ Hostname: key.PublicKey().String(), GoOS: runtime.GOOS, @@ -458,23 +111,30 @@ func loginPeerWithValidSetupKey(serverPubKey wgtypes.Key, key wgtypes.Key, clien Kernel: "kernel", NetbirdVersion: "", } - message, err := encryption.EncryptMessage(serverPubKey, key, &mgmtProto.LoginRequest{SetupKey: ValidSetupKey, Meta: meta}) - Expect(err).NotTo(HaveOccurred()) + msgToEncrypt := &mgmtProto.LoginRequest{SetupKey: ValidSetupKey, Meta: meta} + message, err := encryption.EncryptMessage(serverPubKey, key, msgToEncrypt) + if err != nil { + t.Fatalf("failed to encrypt login request: %v", err) + } resp, err := client.Login(context.TODO(), &mgmtProto.EncryptedMessage{ WgPubKey: key.PublicKey().String(), Body: message, }) - - Expect(err).NotTo(HaveOccurred()) + if err != nil { + t.Fatalf("login request failed: %v", err) + } loginResp := &mgmtProto.LoginResponse{} err = encryption.DecryptMessage(serverPubKey, key, resp.Body, loginResp) - Expect(err).NotTo(HaveOccurred()) + if err != nil { + t.Fatalf("failed to decrypt login response: %v", err) + } return loginResp } -func createRawClient(addr string) (mgmtProto.ManagementServiceClient, *grpc.ClientConn) { +func createRawClient(t *testing.T, addr string) (mgmtProto.ManagementServiceClient, *grpc.ClientConn) { + t.Helper() ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() @@ -485,17 +145,27 @@ func createRawClient(addr string) (mgmtProto.ManagementServiceClient, *grpc.Clie Time: 10 * time.Second, Timeout: 2 * time.Second, })) - Expect(err).NotTo(HaveOccurred()) + if err != nil { + t.Fatalf("failed to dial gRPC server: %v", err) + } return mgmtProto.NewManagementServiceClient(conn), conn } -func startServer(config *server.Config, dataDir string, testFile string) (*grpc.Server, net.Listener) { +func startServer( + t *testing.T, + config *server.Config, + dataDir string, + testFile string, +) (*grpc.Server, net.Listener) { + t.Helper() lis, err := net.Listen("tcp", ":0") - Expect(err).NotTo(HaveOccurred()) + if err != nil { + t.Fatalf("failed to listen on a random port: %v", err) + } s := grpc.NewServer() - store, _, err := store.NewTestStoreFromSQL(context.Background(), testFile, dataDir) + str, _, err := store.NewTestStoreFromSQL(context.Background(), testFile, dataDir) if err != nil { log.Fatalf("failed creating a store: %s: %v", config.Datadir, err) } @@ -505,23 +175,534 @@ func startServer(config *server.Config, dataDir string, testFile string) (*grpc. metrics, err := telemetry.NewDefaultAppMetrics(context.Background()) if err != nil { - log.Fatalf("failed creating metrics: %v", err) + t.Fatalf("failed creating metrics: %v", err) } - accountManager, err := server.BuildManager(context.Background(), store, peersUpdateManager, nil, "", "netbird.selfhosted", eventStore, nil, false, server.MocIntegratedValidator{}, metrics, port_forwarding.NewControllerMock()) + accountManager, err := server.BuildManager( + context.Background(), + str, + peersUpdateManager, + nil, + "", + "netbird.selfhosted", + eventStore, + nil, + false, + server.MocIntegratedValidator{}, + metrics, + ) if err != nil { - log.Fatalf("failed creating a manager: %v", err) + t.Fatalf("failed creating an account manager: %v", err) } secretsManager := server.NewTimeBasedAuthSecretsManager(peersUpdateManager, config.TURNConfig, config.Relay) - mgmtServer, err := server.NewServer(context.Background(), config, accountManager, settings.NewManager(store), peersUpdateManager, secretsManager, nil, nil) - Expect(err).NotTo(HaveOccurred()) + mgmtServer, err := server.NewServer( + context.Background(), + config, + accountManager, + settings.NewManager(str), + peersUpdateManager, + secretsManager, + nil, + nil, + ) + if err != nil { + t.Fatalf("failed creating management server: %v", err) + } + mgmtProto.RegisterManagementServiceServer(s, mgmtServer) + go func() { if err := s.Serve(lis); err != nil { - Expect(err).NotTo(HaveOccurred()) + t.Errorf("failed to serve gRPC: %v", err) + return } }() return s, lis } + +func TestIsHealthy(t *testing.T) { + ts := setupTest(t) + defer tearDownTest(t, ts) + + healthy, err := ts.client.IsHealthy(context.TODO(), &mgmtProto.Empty{}) + if err != nil { + t.Fatalf("IsHealthy call returned an error: %v", err) + } + if healthy == nil { + t.Fatal("IsHealthy returned a nil response") + } +} + +func TestSyncNewPeerConfiguration(t *testing.T) { + ts := setupTest(t) + defer tearDownTest(t, ts) + + peerKey, _ := wgtypes.GenerateKey() + loginPeerWithValidSetupKey(t, ts.serverPubKey, peerKey, ts.client) + + syncReq := &mgmtProto.SyncRequest{Meta: &mgmtProto.PeerSystemMeta{}} + encryptedBytes, err := encryption.EncryptMessage(ts.serverPubKey, peerKey, syncReq) + if err != nil { + t.Fatalf("failed to encrypt sync request: %v", err) + } + + syncStream, err := ts.client.Sync(context.TODO(), &mgmtProto.EncryptedMessage{ + WgPubKey: peerKey.PublicKey().String(), + Body: encryptedBytes, + }) + if err != nil { + t.Fatalf("failed to call Sync: %v", err) + } + + encryptedResponse := &mgmtProto.EncryptedMessage{} + err = syncStream.RecvMsg(encryptedResponse) + if err != nil { + t.Fatalf("failed to receive sync response message: %v", err) + } + + resp := &mgmtProto.SyncResponse{} + err = encryption.DecryptMessage(ts.serverPubKey, peerKey, encryptedResponse.Body, resp) + if err != nil { + t.Fatalf("failed to decrypt sync response: %v", err) + } + + expectedSignalConfig := &mgmtProto.HostConfig{ + Uri: "signal.netbird.io:10000", + Protocol: mgmtProto.HostConfig_HTTP, + } + expectedStunsConfig := &mgmtProto.HostConfig{ + Uri: "stun:stun.netbird.io:3468", + Protocol: mgmtProto.HostConfig_UDP, + } + expectedTRUNHost := &mgmtProto.HostConfig{ + Uri: "turn:stun.netbird.io:3468", + Protocol: mgmtProto.HostConfig_UDP, + } + + assert.NotNil(t, resp.NetbirdConfig) + assert.Equal(t, resp.NetbirdConfig.Signal, expectedSignalConfig) + assert.Contains(t, resp.NetbirdConfig.Stuns, expectedStunsConfig) + assert.Equal(t, len(resp.NetbirdConfig.Turns), 1) + actualTURN := resp.NetbirdConfig.Turns[0] + assert.Greater(t, len(actualTURN.User), 0) + assert.Equal(t, actualTURN.HostConfig, expectedTRUNHost) + assert.Equal(t, len(resp.NetworkMap.OfflinePeers), 0) +} + +func TestSyncThreePeers(t *testing.T) { + ts := setupTest(t) + defer tearDownTest(t, ts) + + peerKey, _ := wgtypes.GenerateKey() + peerKey1, _ := wgtypes.GenerateKey() + peerKey2, _ := wgtypes.GenerateKey() + + loginPeerWithValidSetupKey(t, ts.serverPubKey, peerKey, ts.client) + loginPeerWithValidSetupKey(t, ts.serverPubKey, peerKey1, ts.client) + loginPeerWithValidSetupKey(t, ts.serverPubKey, peerKey2, ts.client) + + syncReq := &mgmtProto.SyncRequest{Meta: &mgmtProto.PeerSystemMeta{}} + syncBytes, err := pb.Marshal(syncReq) + if err != nil { + t.Fatalf("failed to marshal sync request: %v", err) + } + encryptedBytes, err := encryption.Encrypt(syncBytes, ts.serverPubKey, peerKey) + if err != nil { + t.Fatalf("failed to encrypt sync request: %v", err) + } + + syncStream, err := ts.client.Sync(context.TODO(), &mgmtProto.EncryptedMessage{ + WgPubKey: peerKey.PublicKey().String(), + Body: encryptedBytes, + }) + if err != nil { + t.Fatalf("failed to call Sync: %v", err) + } + + encryptedResponse := &mgmtProto.EncryptedMessage{} + err = syncStream.RecvMsg(encryptedResponse) + if err != nil { + t.Fatalf("failed to receive sync response: %v", err) + } + + decryptedBytes, err := encryption.Decrypt(encryptedResponse.Body, ts.serverPubKey, peerKey) + if err != nil { + t.Fatalf("failed to decrypt sync response: %v", err) + } + + resp := &mgmtProto.SyncResponse{} + err = pb.Unmarshal(decryptedBytes, resp) + if err != nil { + t.Fatalf("failed to unmarshal sync response: %v", err) + } + + if len(resp.GetRemotePeers()) != 2 { + t.Fatalf("expected 2 remote peers, got %d", len(resp.GetRemotePeers())) + } + + var found1, found2 bool + for _, rp := range resp.GetRemotePeers() { + if rp.WgPubKey == peerKey1.PublicKey().String() { + found1 = true + } else if rp.WgPubKey == peerKey2.PublicKey().String() { + found2 = true + } + } + if !found1 || !found2 { + t.Fatalf("did not find the expected peer keys %s, %s among %v", + peerKey1.PublicKey().String(), + peerKey2.PublicKey().String(), + resp.GetRemotePeers()) + } +} + +func TestSyncNewPeerUpdate(t *testing.T) { + ts := setupTest(t) + defer tearDownTest(t, ts) + + peerKey, _ := wgtypes.GenerateKey() + loginPeerWithValidSetupKey(t, ts.serverPubKey, peerKey, ts.client) + + syncReq := &mgmtProto.SyncRequest{Meta: &mgmtProto.PeerSystemMeta{}} + syncBytes, err := pb.Marshal(syncReq) + if err != nil { + t.Fatalf("failed to marshal sync request: %v", err) + } + + encryptedBytes, err := encryption.Encrypt(syncBytes, ts.serverPubKey, peerKey) + if err != nil { + t.Fatalf("failed to encrypt sync request: %v", err) + } + + syncStream, err := ts.client.Sync(context.TODO(), &mgmtProto.EncryptedMessage{ + WgPubKey: peerKey.PublicKey().String(), + Body: encryptedBytes, + }) + if err != nil { + t.Fatalf("failed to call Sync: %v", err) + } + + encryptedResponse := &mgmtProto.EncryptedMessage{} + err = syncStream.RecvMsg(encryptedResponse) + if err != nil { + t.Fatalf("failed to receive first sync response: %v", err) + } + + decryptedBytes, err := encryption.Decrypt(encryptedResponse.Body, ts.serverPubKey, peerKey) + if err != nil { + t.Fatalf("failed to decrypt first sync response: %v", err) + } + + resp := &mgmtProto.SyncResponse{} + if err := pb.Unmarshal(decryptedBytes, resp); err != nil { + t.Fatalf("failed to unmarshal first sync response: %v", err) + } + + if len(resp.GetRemotePeers()) != 0 { + t.Fatalf("expected 0 remote peers at first sync, got %d", len(resp.GetRemotePeers())) + } + + var wg sync.WaitGroup + wg.Add(1) + go func() { + defer wg.Done() + encryptedResponse := &mgmtProto.EncryptedMessage{} + err = syncStream.RecvMsg(encryptedResponse) + if err != nil { + t.Errorf("failed to receive second sync response: %v", err) + return + } + + decryptedBytes, err := encryption.Decrypt(encryptedResponse.Body, ts.serverPubKey, peerKey) + if err != nil { + t.Errorf("failed to decrypt second sync response: %v", err) + return + } + err = pb.Unmarshal(decryptedBytes, resp) + if err != nil { + t.Errorf("failed to unmarshal second sync response: %v", err) + return + } + }() + + newPeerKey, _ := wgtypes.GenerateKey() + loginPeerWithValidSetupKey(t, ts.serverPubKey, newPeerKey, ts.client) + + wg.Wait() + + if len(resp.GetRemotePeers()) != 1 { + t.Fatalf("expected exactly 1 remote peer update, got %d", len(resp.GetRemotePeers())) + } + if resp.GetRemotePeers()[0].WgPubKey != newPeerKey.PublicKey().String() { + t.Fatalf("expected new peer key %s, got %s", + newPeerKey.PublicKey().String(), + resp.GetRemotePeers()[0].WgPubKey) + } +} + +func TestGetServerKey(t *testing.T) { + ts := setupTest(t) + defer tearDownTest(t, ts) + + resp, err := ts.client.GetServerKey(context.TODO(), &mgmtProto.Empty{}) + if err != nil { + t.Fatalf("GetServerKey returned error: %v", err) + } + if resp == nil { + t.Fatal("GetServerKey returned nil response") + } + if resp.Key == "" { + t.Fatal("GetServerKey returned empty key") + } + if resp.ExpiresAt.AsTime().IsZero() { + t.Fatal("GetServerKey returned 0 for ExpiresAt") + } + + _, err = wgtypes.ParseKey(resp.Key) + if err != nil { + t.Fatalf("GetServerKey returned an invalid WG key: %v", err) + } +} + +func TestLoginInvalidSetupKey(t *testing.T) { + ts := setupTest(t) + defer tearDownTest(t, ts) + + peerKey, _ := wgtypes.GenerateKey() + request := &mgmtProto.LoginRequest{ + SetupKey: "invalid setup key", + Meta: &mgmtProto.PeerSystemMeta{}, + } + encryptedMsg, err := encryption.EncryptMessage(ts.serverPubKey, peerKey, request) + if err != nil { + t.Fatalf("failed to encrypt login request: %v", err) + } + + resp, err := ts.client.Login(context.TODO(), &mgmtProto.EncryptedMessage{ + WgPubKey: peerKey.PublicKey().String(), + Body: encryptedMsg, + }) + if err == nil { + t.Fatal("expected error for invalid setup key but got nil") + } + if resp != nil { + t.Fatalf("expected nil response for invalid setup key but got: %+v", resp) + } +} + +func TestLoginValidSetupKey(t *testing.T) { + ts := setupTest(t) + defer tearDownTest(t, ts) + + peerKey, _ := wgtypes.GenerateKey() + resp := loginPeerWithValidSetupKey(t, ts.serverPubKey, peerKey, ts.client) + if resp == nil { + t.Fatal("loginPeerWithValidSetupKey returned nil, expected a valid response") + } +} + +func TestLoginRegisteredPeer(t *testing.T) { + ts := setupTest(t) + defer tearDownTest(t, ts) + + peerKey, _ := wgtypes.GenerateKey() + regResp := loginPeerWithValidSetupKey(t, ts.serverPubKey, peerKey, ts.client) + if regResp == nil { + t.Fatal("registration with valid setup key failed") + } + + loginReq := &mgmtProto.LoginRequest{Meta: &mgmtProto.PeerSystemMeta{}} + encryptedLogin, err := encryption.EncryptMessage(ts.serverPubKey, peerKey, loginReq) + if err != nil { + t.Fatalf("failed to encrypt login request: %v", err) + } + loginRespEnc, err := ts.client.Login(context.TODO(), &mgmtProto.EncryptedMessage{ + WgPubKey: peerKey.PublicKey().String(), + Body: encryptedLogin, + }) + if err != nil { + t.Fatalf("login call returned an error: %v", err) + } + + loginResp := &mgmtProto.LoginResponse{} + err = encryption.DecryptMessage(ts.serverPubKey, peerKey, loginRespEnc.Body, loginResp) + if err != nil { + t.Fatalf("failed to decrypt login response: %v", err) + } + + expectedSignalConfig := &mgmtProto.HostConfig{ + Uri: "signal.netbird.io:10000", + Protocol: mgmtProto.HostConfig_HTTP, + } + expectedStunsConfig := &mgmtProto.HostConfig{ + Uri: "stun:stun.netbird.io:3468", + Protocol: mgmtProto.HostConfig_UDP, + } + expectedTurnsConfig := &mgmtProto.ProtectedHostConfig{ + HostConfig: &mgmtProto.HostConfig{ + Uri: "turn:stun.netbird.io:3468", + Protocol: mgmtProto.HostConfig_UDP, + }, + User: "some_user", + Password: "some_password", + } + + assert.NotNil(t, loginResp.GetNetbirdConfig()) + assert.Equal(t, loginResp.GetNetbirdConfig().Signal, expectedSignalConfig) + assert.Contains(t, loginResp.GetNetbirdConfig().Stuns, expectedStunsConfig) + assert.Contains(t, loginResp.GetNetbirdConfig().Turns, expectedTurnsConfig) +} + +func TestSync10PeersGetUpdates(t *testing.T) { + ts := setupTest(t) + defer tearDownTest(t, ts) + + initialPeers := 10 + additionalPeers := 10 + + var peers []wgtypes.Key + for i := 0; i < initialPeers; i++ { + key, _ := wgtypes.GenerateKey() + loginPeerWithValidSetupKey(t, ts.serverPubKey, key, ts.client) + peers = append(peers, key) + } + + var wg sync.WaitGroup + wg.Add(initialPeers + initialPeers*additionalPeers) + + var syncClients []mgmtProto.ManagementService_SyncClient + for _, pk := range peers { + syncReq := &mgmtProto.SyncRequest{Meta: &mgmtProto.PeerSystemMeta{}} + msgBytes, err := pb.Marshal(syncReq) + if err != nil { + t.Fatalf("failed to marshal SyncRequest: %v", err) + } + encBytes, err := encryption.Encrypt(msgBytes, ts.serverPubKey, pk) + if err != nil { + t.Fatalf("failed to encrypt SyncRequest: %v", err) + } + + s, err := ts.client.Sync(context.TODO(), &mgmtProto.EncryptedMessage{ + WgPubKey: pk.PublicKey().String(), + Body: encBytes, + }) + if err != nil { + t.Fatalf("failed to call Sync for peer: %v", err) + } + syncClients = append(syncClients, s) + + go func(pk wgtypes.Key, syncStream mgmtProto.ManagementService_SyncClient) { + for { + encMsg := &mgmtProto.EncryptedMessage{} + err := syncStream.RecvMsg(encMsg) + if err != nil { + return + } + decryptedBytes, decErr := encryption.Decrypt(encMsg.Body, ts.serverPubKey, pk) + if decErr != nil { + t.Errorf("failed to decrypt SyncResponse for peer %s: %v", pk.PublicKey().String(), decErr) + return + } + resp := &mgmtProto.SyncResponse{} + umErr := pb.Unmarshal(decryptedBytes, resp) + if umErr != nil { + t.Errorf("failed to unmarshal SyncResponse for peer %s: %v", pk.PublicKey().String(), umErr) + return + } + // We only count if there's a new peer update + if len(resp.GetRemotePeers()) > 0 { + wg.Done() + } + } + }(pk, s) + } + + time.Sleep(500 * time.Millisecond) + for i := 0; i < additionalPeers; i++ { + key, _ := wgtypes.GenerateKey() + loginPeerWithValidSetupKey(t, ts.serverPubKey, key, ts.client) + r := rand.New(rand.NewSource(time.Now().UnixNano())) + n := r.Intn(200) + time.Sleep(time.Duration(n) * time.Millisecond) + } + + wg.Wait() + + for _, sc := range syncClients { + err := sc.CloseSend() + if err != nil { + t.Fatalf("failed to close sync client: %v", err) + } + } +} + +func TestConcurrentPeersNoDuplicateIPs(t *testing.T) { + ts := setupTest(t) + defer tearDownTest(t, ts) + + initialPeers := 30 + ipChan := make(chan string, initialPeers) + + var wg sync.WaitGroup + wg.Add(initialPeers) + + for i := 0; i < initialPeers; i++ { + go func() { + defer wg.Done() + key, _ := wgtypes.GenerateKey() + loginPeerWithValidSetupKey(t, ts.serverPubKey, key, ts.client) + + syncReq := &mgmtProto.SyncRequest{Meta: &mgmtProto.PeerSystemMeta{}} + encryptedBytes, err := encryption.EncryptMessage(ts.serverPubKey, key, syncReq) + if err != nil { + t.Errorf("failed to encrypt sync request: %v", err) + return + } + + s, err := ts.client.Sync(context.TODO(), &mgmtProto.EncryptedMessage{ + WgPubKey: key.PublicKey().String(), + Body: encryptedBytes, + }) + if err != nil { + t.Errorf("failed to call Sync: %v", err) + return + } + + accountManager, err := server.BuildManager(context.Background(), store, peersUpdateManager, nil, "", "netbird.selfhosted", eventStore, nil, false, server.MocIntegratedValidator{}, metrics, port_forwarding.NewControllerMock()) + if err != nil { + log.Fatalf("failed creating a manager: %v", err) + } + + encResp := &mgmtProto.EncryptedMessage{} + if err = s.RecvMsg(encResp); err != nil { + t.Errorf("failed to receive sync response: %v", err) + return + } + + resp := &mgmtProto.SyncResponse{} + if err = encryption.DecryptMessage(ts.serverPubKey, key, encResp.Body, resp); err != nil { + t.Errorf("failed to decrypt sync response: %v", err) + return + } + ipChan <- resp.GetPeerConfig().Address + }() + } + + wg.Wait() + close(ipChan) + + ipMap := make(map[string]bool) + for ip := range ipChan { + if ipMap[ip] { + t.Fatalf("found duplicate IP: %s", ip) + } + ipMap[ip] = true + } + + // Ensure we collected all peers + if len(ipMap) != initialPeers { + t.Fatalf("expected %d unique IPs, got %d", initialPeers, len(ipMap)) + } +} diff --git a/management/server/migration/migration_test.go b/management/server/migration/migration_test.go index a645ae325..e907d6853 100644 --- a/management/server/migration/migration_test.go +++ b/management/server/migration/migration_test.go @@ -21,9 +21,7 @@ import ( func setupDatabase(t *testing.T) *gorm.DB { t.Helper() - db, err := gorm.Open(sqlite.Open("file::memory:?cache=shared"), &gorm.Config{ - PrepareStmt: true, - }) + db, err := gorm.Open(sqlite.Open("file::memory:?cache=shared"), &gorm.Config{}) require.NoError(t, err, "Failed to open database") return db diff --git a/management/server/mock_server/account_mock.go b/management/server/mock_server/account_mock.go index e7f2a3c13..f07d120b8 100644 --- a/management/server/mock_server/account_mock.go +++ b/management/server/mock_server/account_mock.go @@ -53,8 +53,8 @@ type MockAccountManager struct { SavePolicyFunc func(ctx context.Context, accountID, userID string, policy *types.Policy) (*types.Policy, error) DeletePolicyFunc func(ctx context.Context, accountID, policyID, userID string) error ListPoliciesFunc func(ctx context.Context, accountID, userID string) ([]*types.Policy, error) - GetUsersFromAccountFunc func(ctx context.Context, accountID, userID string) ([]*types.UserInfo, error) - GetAccountFromPATFunc func(ctx context.Context, pat string) (*types.Account, *types.User, *types.PersonalAccessToken, error) + GetUsersFromAccountFunc func(ctx context.Context, accountID, userID string) (map[string]*types.UserInfo, error) + GetPATInfoFunc func(ctx context.Context, token string) (*types.User, *types.PersonalAccessToken, string, string, error) MarkPATUsedFunc func(ctx context.Context, pat string) error UpdatePeerMetaFunc func(ctx context.Context, peerID string, meta nbpeer.PeerSystemMeta) error UpdatePeerFunc func(ctx context.Context, accountID, userID string, peer *nbpeer.Peer) (*nbpeer.Peer, error) @@ -69,7 +69,7 @@ type MockAccountManager struct { SaveOrAddUserFunc func(ctx context.Context, accountID, userID string, user *types.User, addIfNotExists bool) (*types.UserInfo, error) SaveOrAddUsersFunc func(ctx context.Context, accountID, initiatorUserID string, update []*types.User, addIfNotExists bool) ([]*types.UserInfo, error) DeleteUserFunc func(ctx context.Context, accountID string, initiatorUserID string, targetUserID string) error - DeleteRegularUsersFunc func(ctx context.Context, accountID, initiatorUserID string, targetUserIDs []string) error + DeleteRegularUsersFunc func(ctx context.Context, accountID, initiatorUserID string, targetUserIDs []string, userInfos map[string]*types.UserInfo) error CreatePATFunc func(ctx context.Context, accountID string, initiatorUserID string, targetUserId string, tokenName string, expiresIn int) (*types.PersonalAccessTokenGenerated, error) DeletePATFunc func(ctx context.Context, accountID string, initiatorUserID string, targetUserId string, tokenID string) error GetPATFunc func(ctx context.Context, accountID string, initiatorUserID string, targetUserId string, tokenID string) (*types.PersonalAccessToken, error) @@ -110,6 +110,7 @@ type MockAccountManager struct { GetUserByIDFunc func(ctx context.Context, id string) (*types.User, error) GetAccountSettingsFunc func(ctx context.Context, accountID string, userID string) (*types.Settings, error) DeleteSetupKeyFunc func(ctx context.Context, accountID, userID, keyID string) error + BuildUserInfosForAccountFunc func(ctx context.Context, accountID, initiatorUserID string, accountUsers []*types.User) (map[string]*types.UserInfo, error) } func (am *MockAccountManager) UpdateAccountPeers(ctx context.Context, accountID string) { @@ -165,7 +166,7 @@ func (am *MockAccountManager) GetAllGroups(ctx context.Context, accountID, userI } // GetUsersFromAccount mock implementation of GetUsersFromAccount from server.AccountManager interface -func (am *MockAccountManager) GetUsersFromAccount(ctx context.Context, accountID string, userID string) ([]*types.UserInfo, error) { +func (am *MockAccountManager) GetUsersFromAccount(ctx context.Context, accountID string, userID string) (map[string]*types.UserInfo, error) { if am.GetUsersFromAccountFunc != nil { return am.GetUsersFromAccountFunc(ctx, accountID, userID) } @@ -238,12 +239,12 @@ func (am *MockAccountManager) MarkPeerConnected(ctx context.Context, peerKey str return status.Errorf(codes.Unimplemented, "method MarkPeerConnected is not implemented") } -// GetAccountFromPAT mock implementation of GetAccountFromPAT from server.AccountManager interface -func (am *MockAccountManager) GetAccountFromPAT(ctx context.Context, pat string) (*types.Account, *types.User, *types.PersonalAccessToken, error) { - if am.GetAccountFromPATFunc != nil { - return am.GetAccountFromPATFunc(ctx, pat) +// GetPATInfo mock implementation of GetPATInfo from server.AccountManager interface +func (am *MockAccountManager) GetPATInfo(ctx context.Context, pat string) (*types.User, *types.PersonalAccessToken, string, string, error) { + if am.GetPATInfoFunc != nil { + return am.GetPATInfoFunc(ctx, pat) } - return nil, nil, nil, status.Errorf(codes.Unimplemented, "method GetAccountFromPAT is not implemented") + return nil, nil, "", "", status.Errorf(codes.Unimplemented, "method GetPATInfo is not implemented") } // DeleteAccount mock implementation of DeleteAccount from server.AccountManager interface @@ -550,9 +551,9 @@ func (am *MockAccountManager) DeleteUser(ctx context.Context, accountID string, } // DeleteRegularUsers mocks DeleteRegularUsers of the AccountManager interface -func (am *MockAccountManager) DeleteRegularUsers(ctx context.Context, accountID string, initiatorUserID string, targetUserIDs []string) error { +func (am *MockAccountManager) DeleteRegularUsers(ctx context.Context, accountID, initiatorUserID string, targetUserIDs []string, userInfos map[string]*types.UserInfo) error { if am.DeleteRegularUsersFunc != nil { - return am.DeleteRegularUsersFunc(ctx, accountID, initiatorUserID, targetUserIDs) + return am.DeleteRegularUsersFunc(ctx, accountID, initiatorUserID, targetUserIDs, userInfos) } return status.Errorf(codes.Unimplemented, "method DeleteRegularUsers is not implemented") } @@ -849,3 +850,11 @@ func (am *MockAccountManager) GetPeerGroups(ctx context.Context, accountID, peer } return nil, status.Errorf(codes.Unimplemented, "method GetPeerGroups is not implemented") } + +// BuildUserInfosForAccount mocks BuildUserInfosForAccount of the AccountManager interface +func (am *MockAccountManager) BuildUserInfosForAccount(ctx context.Context, accountID, initiatorUserID string, accountUsers []*types.User) (map[string]*types.UserInfo, error) { + if am.BuildUserInfosForAccountFunc != nil { + return am.BuildUserInfosForAccountFunc(ctx, accountID, initiatorUserID, accountUsers) + } + return nil, status.Errorf(codes.Unimplemented, "method BuildUserInfosForAccount is not implemented") +} diff --git a/management/server/nameserver_test.go b/management/server/nameserver_test.go index c699e1444..064a645d7 100644 --- a/management/server/nameserver_test.go +++ b/management/server/nameserver_test.go @@ -380,12 +380,12 @@ func TestCreateNameServerGroup(t *testing.T) { t.Run(testCase.name, func(t *testing.T) { am, err := createNSManager(t) if err != nil { - t.Error("failed to create account manager") + t.Fatalf("failed to create account manager: %s", err) } account, err := initTestNSAccount(t, am) if err != nil { - t.Error("failed to init testing account") + t.Fatalf("failed to init testing account: %s", err) } outNSGroup, err := am.CreateNameServerGroup( @@ -608,12 +608,12 @@ func TestSaveNameServerGroup(t *testing.T) { t.Run(testCase.name, func(t *testing.T) { am, err := createNSManager(t) if err != nil { - t.Error("failed to create account manager") + t.Fatalf("failed to create account manager: %s", err) } account, err := initTestNSAccount(t, am) if err != nil { - t.Error("failed to init testing account") + t.Fatalf("failed to init testing account: %s", err) } account.NameServerGroups[testCase.existingNSGroup.ID] = testCase.existingNSGroup @@ -707,7 +707,7 @@ func TestDeleteNameServerGroup(t *testing.T) { account, err := initTestNSAccount(t, am) if err != nil { - t.Error("failed to init testing account") + t.Fatalf("failed to init testing account: %s", err) } account.NameServerGroups[testingNSGroup.ID] = testingNSGroup @@ -742,7 +742,7 @@ func TestGetNameServerGroup(t *testing.T) { account, err := initTestNSAccount(t, am) if err != nil { - t.Error("failed to init testing account") + t.Fatalf("failed to init testing account: %s", err) } foundGroup, err := am.GetNameServerGroup(context.Background(), account.Id, testUserID, existingNSGroupID) @@ -762,6 +762,7 @@ func TestGetNameServerGroup(t *testing.T) { func createNSManager(t *testing.T) (*DefaultAccountManager, error) { t.Helper() + store, err := createNSStore(t) if err != nil { return nil, err diff --git a/management/server/peer_test.go b/management/server/peer_test.go index e11f32076..9a8118282 100644 --- a/management/server/peer_test.go +++ b/management/server/peer_test.go @@ -13,6 +13,7 @@ import ( "testing" "time" + nbAccount "github.com/netbirdio/netbird/management/server/account" "github.com/rs/xid" log "github.com/sirupsen/logrus" "github.com/stretchr/testify/assert" @@ -29,7 +30,6 @@ import ( nbdns "github.com/netbirdio/netbird/dns" "github.com/netbirdio/netbird/management/domain" "github.com/netbirdio/netbird/management/proto" - nbAccount "github.com/netbirdio/netbird/management/server/account" "github.com/netbirdio/netbird/management/server/activity" nbpeer "github.com/netbirdio/netbird/management/server/peer" "github.com/netbirdio/netbird/management/server/posture" @@ -1577,7 +1577,8 @@ func TestPeerAccountPeersUpdate(t *testing.T) { // Adding peer to group linked with policy should update account peers and send peer update t.Run("adding peer to group linked with policy", func(t *testing.T) { _, err = manager.SavePolicy(context.Background(), account.Id, userID, &types.Policy{ - Enabled: true, + AccountID: account.Id, + Enabled: true, Rules: []*types.PolicyRule{ { Enabled: true, diff --git a/management/server/route_test.go b/management/server/route_test.go index e4585753f..c5a5f2040 100644 --- a/management/server/route_test.go +++ b/management/server/route_test.go @@ -13,13 +13,12 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "github.com/netbirdio/netbird/management/domain" + "github.com/netbirdio/netbird/management/server/activity" "github.com/netbirdio/netbird/management/server/integrations/port_forwarding" resourceTypes "github.com/netbirdio/netbird/management/server/networks/resources/types" routerTypes "github.com/netbirdio/netbird/management/server/networks/routers/types" networkTypes "github.com/netbirdio/netbird/management/server/networks/types" - - "github.com/netbirdio/netbird/management/domain" - "github.com/netbirdio/netbird/management/server/activity" nbpeer "github.com/netbirdio/netbird/management/server/peer" "github.com/netbirdio/netbird/management/server/store" "github.com/netbirdio/netbird/management/server/telemetry" diff --git a/management/server/status/error.go b/management/server/status/error.go index 7e384922d..96b103183 100644 --- a/management/server/status/error.go +++ b/management/server/status/error.go @@ -93,7 +93,7 @@ func NewPeerNotPartOfAccountError() error { // NewUserNotFoundError creates a new Error with NotFound type for a missing user func NewUserNotFoundError(userKey string) error { - return Errorf(NotFound, "user not found: %s", userKey) + return Errorf(NotFound, "user: %s not found", userKey) } // NewPeerNotRegisteredError creates a new Error with NotFound type for a missing peer @@ -191,3 +191,18 @@ func NewResourceNotPartOfNetworkError(resourceID, networkID string) error { func NewRouterNotPartOfNetworkError(routerID, networkID string) error { return Errorf(BadRequest, "router %s is not part of the network %s", routerID, networkID) } + +// NewServiceUserRoleInvalidError creates a new Error with InvalidArgument type for creating a service user with owner role +func NewServiceUserRoleInvalidError() error { + return Errorf(InvalidArgument, "can't create a service user with owner role") +} + +// NewOwnerDeletePermissionError creates a new Error with PermissionDenied type for attempting +// to delete a user with the owner role. +func NewOwnerDeletePermissionError() error { + return Errorf(PermissionDenied, "can't delete a user with the owner role") +} + +func NewPATNotFoundError(patID string) error { + return Errorf(NotFound, "PAT: %s not found", patID) +} diff --git a/management/server/store/sql_store.go b/management/server/store/sql_store.go index b05b17755..6d86557df 100644 --- a/management/server/store/sql_store.go +++ b/management/server/store/sql_store.go @@ -15,6 +15,7 @@ import ( "sync" "time" + "github.com/netbirdio/netbird/management/server/util" log "github.com/sirupsen/logrus" "gorm.io/driver/mysql" "gorm.io/driver/postgres" @@ -414,24 +415,16 @@ func (s *SqlStore) SavePeerLocation(ctx context.Context, lockStrength LockingStr } // SaveUsers saves the given list of users to the database. -// It updates existing users if a conflict occurs. -func (s *SqlStore) SaveUsers(accountID string, users map[string]*types.User) error { - usersToSave := make([]types.User, 0, len(users)) - for _, user := range users { - user.AccountID = accountID - for id, pat := range user.PATs { - pat.ID = id - user.PATsG = append(user.PATsG, *pat) - } - usersToSave = append(usersToSave, *user) - } - err := s.db.Session(&gorm.Session{FullSaveAssociations: true}). - Clauses(clause.OnConflict{UpdateAll: true}). - Create(&usersToSave).Error - if err != nil { - return status.Errorf(status.Internal, "failed to save users to store: %v", err) +func (s *SqlStore) SaveUsers(ctx context.Context, lockStrength LockingStrength, users []*types.User) error { + if len(users) == 0 { + return nil } + result := s.db.Clauses(clause.Locking{Strength: string(lockStrength)}, clause.OnConflict{UpdateAll: true}).Create(&users) + if result.Error != nil { + log.WithContext(ctx).Errorf("failed to save users to store: %s", result.Error) + return status.Errorf(status.Internal, "failed to save users to store") + } return nil } @@ -439,7 +432,8 @@ func (s *SqlStore) SaveUsers(accountID string, users map[string]*types.User) err func (s *SqlStore) SaveUser(ctx context.Context, lockStrength LockingStrength, user *types.User) error { result := s.db.Clauses(clause.Locking{Strength: string(lockStrength)}).Save(user) if result.Error != nil { - return status.Errorf(status.Internal, "failed to save user to store: %v", result.Error) + log.WithContext(ctx).Errorf("failed to save user to store: %s", result.Error) + return status.Errorf(status.Internal, "failed to save user to store") } return nil } @@ -450,7 +444,7 @@ func (s *SqlStore) SaveGroups(ctx context.Context, lockStrength LockingStrength, return nil } - result := s.db.Clauses(clause.Locking{Strength: string(lockStrength)}).Save(&groups) + result := s.db.Clauses(clause.Locking{Strength: string(lockStrength)}, clause.OnConflict{UpdateAll: true}).Create(&groups) if result.Error != nil { return status.Errorf(status.Internal, "failed to save groups to store: %v", result.Error) } @@ -526,30 +520,17 @@ func (s *SqlStore) GetTokenIDByHashedToken(ctx context.Context, hashedToken stri return token.ID, nil } -func (s *SqlStore) GetUserByTokenID(ctx context.Context, tokenID string) (*types.User, error) { - var token types.PersonalAccessToken - result := s.db.First(&token, idQueryCondition, tokenID) +func (s *SqlStore) GetUserByPATID(ctx context.Context, lockStrength LockingStrength, patID string) (*types.User, error) { + var user types.User + result := s.db.Clauses(clause.Locking{Strength: string(lockStrength)}). + Joins("JOIN personal_access_tokens ON personal_access_tokens.user_id = users.id"). + Where("personal_access_tokens.id = ?", patID).First(&user) if result.Error != nil { if errors.Is(result.Error, gorm.ErrRecordNotFound) { - return nil, status.Errorf(status.NotFound, "account not found: index lookup failed") + return nil, status.NewPATNotFoundError(patID) } - log.WithContext(ctx).Errorf("error when getting token from the store: %s", result.Error) - return nil, status.NewGetAccountFromStoreError(result.Error) - } - - if token.UserID == "" { - return nil, status.Errorf(status.NotFound, "account not found: index lookup failed") - } - - var user types.User - result = s.db.Preload("PATsG").First(&user, idQueryCondition, token.UserID) - if result.Error != nil { - return nil, status.Errorf(status.NotFound, "account not found: index lookup failed") - } - - user.PATs = make(map[string]*types.PersonalAccessToken, len(user.PATsG)) - for _, pat := range user.PATsG { - user.PATs[pat.ID] = pat.Copy() + log.WithContext(ctx).Errorf("failed to get token user from the store: %s", result.Error) + return nil, status.NewGetUserFromStoreError() } return &user, nil @@ -557,8 +538,7 @@ func (s *SqlStore) GetUserByTokenID(ctx context.Context, tokenID string) (*types func (s *SqlStore) GetUserByUserID(ctx context.Context, lockStrength LockingStrength, userID string) (*types.User, error) { var user types.User - result := s.db.Clauses(clause.Locking{Strength: string(lockStrength)}). - Preload(clause.Associations).First(&user, idQueryCondition, userID) + result := s.db.Clauses(clause.Locking{Strength: string(lockStrength)}).First(&user, idQueryCondition, userID) if result.Error != nil { if errors.Is(result.Error, gorm.ErrRecordNotFound) { return nil, status.NewUserNotFoundError(userID) @@ -569,6 +549,25 @@ func (s *SqlStore) GetUserByUserID(ctx context.Context, lockStrength LockingStre return &user, nil } +func (s *SqlStore) DeleteUser(ctx context.Context, lockStrength LockingStrength, accountID, userID string) error { + err := s.db.Transaction(func(tx *gorm.DB) error { + result := tx.Clauses(clause.Locking{Strength: string(lockStrength)}). + Delete(&types.PersonalAccessToken{}, "user_id = ?", userID) + if result.Error != nil { + return result.Error + } + + return tx.Clauses(clause.Locking{Strength: string(lockStrength)}). + Delete(&types.User{}, accountAndIDQueryCondition, accountID, userID).Error + }) + if err != nil { + log.WithContext(ctx).Errorf("failed to delete user from the store: %s", err) + return status.Errorf(status.Internal, "failed to delete user from store") + } + + return nil +} + func (s *SqlStore) GetAccountUsers(ctx context.Context, lockStrength LockingStrength, accountID string) ([]*types.User, error) { var users []*types.User result := s.db.Clauses(clause.Locking{Strength: string(lockStrength)}).Find(&users, accountIDCondition, accountID) @@ -899,6 +898,20 @@ func (s *SqlStore) GetAccountSettings(ctx context.Context, lockStrength LockingS return accountSettings.Settings, nil } +func (s *SqlStore) GetAccountCreatedBy(ctx context.Context, lockStrength LockingStrength, accountID string) (string, error) { + var createdBy string + result := s.db.Clauses(clause.Locking{Strength: string(lockStrength)}).Model(&types.Account{}). + Select("created_by").First(&createdBy, idQueryCondition, accountID) + if result.Error != nil { + if errors.Is(result.Error, gorm.ErrRecordNotFound) { + return "", status.NewAccountNotFoundError(accountID) + } + return "", status.NewGetAccountFromStoreError(result.Error) + } + + return createdBy, nil +} + // SaveUserLastLogin stores the last login time for a user in DB. func (s *SqlStore) SaveUserLastLogin(ctx context.Context, accountID, userID string, lastLogin time.Time) error { var user types.User @@ -956,7 +969,7 @@ func NewSqliteStore(ctx context.Context, dataDir string, metrics telemetry.AppMe } file := filepath.Join(dataDir, storeStr) - db, err := gorm.Open(sqlite.Open(file), getGormConfig(SqliteStoreEngine)) + db, err := gorm.Open(sqlite.Open(file), getGormConfig()) if err != nil { return nil, err } @@ -966,7 +979,7 @@ func NewSqliteStore(ctx context.Context, dataDir string, metrics telemetry.AppMe // NewPostgresqlStore creates a new Postgres store. func NewPostgresqlStore(ctx context.Context, dsn string, metrics telemetry.AppMetrics) (*SqlStore, error) { - db, err := gorm.Open(postgres.Open(dsn), getGormConfig(PostgresStoreEngine)) + db, err := gorm.Open(postgres.Open(dsn), getGormConfig()) if err != nil { return nil, err } @@ -976,7 +989,7 @@ func NewPostgresqlStore(ctx context.Context, dsn string, metrics telemetry.AppMe // NewMysqlStore creates a new MySQL store. func NewMysqlStore(ctx context.Context, dsn string, metrics telemetry.AppMetrics) (*SqlStore, error) { - db, err := gorm.Open(mysql.Open(dsn+"?charset=utf8&parseTime=True&loc=Local"), getGormConfig(MysqlStoreEngine)) + db, err := gorm.Open(mysql.Open(dsn+"?charset=utf8&parseTime=True&loc=Local"), getGormConfig()) if err != nil { return nil, err } @@ -984,15 +997,10 @@ func NewMysqlStore(ctx context.Context, dsn string, metrics telemetry.AppMetrics return NewSqlStore(ctx, db, MysqlStoreEngine, metrics) } -func getGormConfig(engine Engine) *gorm.Config { - prepStmt := true - if engine == SqliteStoreEngine { - prepStmt = false - } +func getGormConfig() *gorm.Config { return &gorm.Config{ Logger: logger.Default.LogMode(logger.Silent), CreateBatchSize: 400, - PrepareStmt: prepStmt, } } @@ -2061,3 +2069,94 @@ func (s *SqlStore) DeleteNetworkResource(ctx context.Context, lockStrength Locki return nil } + +// GetPATByHashedToken returns a PersonalAccessToken by its hashed token. +func (s *SqlStore) GetPATByHashedToken(ctx context.Context, lockStrength LockingStrength, hashedToken string) (*types.PersonalAccessToken, error) { + var pat types.PersonalAccessToken + result := s.db.Clauses(clause.Locking{Strength: string(lockStrength)}).First(&pat, "hashed_token = ?", hashedToken) + if result.Error != nil { + if errors.Is(result.Error, gorm.ErrRecordNotFound) { + return nil, status.NewPATNotFoundError(hashedToken) + } + log.WithContext(ctx).Errorf("failed to get pat by hash from the store: %s", result.Error) + return nil, status.Errorf(status.Internal, "failed to get pat by hash from store") + } + + return &pat, nil +} + +// GetPATByID retrieves a personal access token by its ID and user ID. +func (s *SqlStore) GetPATByID(ctx context.Context, lockStrength LockingStrength, userID string, patID string) (*types.PersonalAccessToken, error) { + var pat types.PersonalAccessToken + result := s.db.Clauses(clause.Locking{Strength: string(lockStrength)}). + First(&pat, "id = ? AND user_id = ?", patID, userID) + if err := result.Error; err != nil { + if errors.Is(result.Error, gorm.ErrRecordNotFound) { + return nil, status.NewPATNotFoundError(patID) + } + log.WithContext(ctx).Errorf("failed to get pat from the store: %s", err) + return nil, status.Errorf(status.Internal, "failed to get pat from store") + } + + return &pat, nil +} + +// GetUserPATs retrieves personal access tokens for a user. +func (s *SqlStore) GetUserPATs(ctx context.Context, lockStrength LockingStrength, userID string) ([]*types.PersonalAccessToken, error) { + var pats []*types.PersonalAccessToken + result := s.db.Clauses(clause.Locking{Strength: string(lockStrength)}).Find(&pats, "user_id = ?", userID) + if err := result.Error; err != nil { + log.WithContext(ctx).Errorf("failed to get user pat's from the store: %s", err) + return nil, status.Errorf(status.Internal, "failed to get user pat's from store") + } + + return pats, nil +} + +// MarkPATUsed marks a personal access token as used. +func (s *SqlStore) MarkPATUsed(ctx context.Context, lockStrength LockingStrength, patID string) error { + patCopy := types.PersonalAccessToken{ + LastUsed: util.ToPtr(time.Now().UTC()), + } + + fieldsToUpdate := []string{"last_used"} + result := s.db.Clauses(clause.Locking{Strength: string(lockStrength)}).Select(fieldsToUpdate). + Where(idQueryCondition, patID).Updates(&patCopy) + if result.Error != nil { + log.WithContext(ctx).Errorf("failed to mark pat as used: %s", result.Error) + return status.Errorf(status.Internal, "failed to mark pat as used") + } + + if result.RowsAffected == 0 { + return status.NewPATNotFoundError(patID) + } + + return nil +} + +// SavePAT saves a personal access token to the database. +func (s *SqlStore) SavePAT(ctx context.Context, lockStrength LockingStrength, pat *types.PersonalAccessToken) error { + result := s.db.Clauses(clause.Locking{Strength: string(lockStrength)}).Save(pat) + if err := result.Error; err != nil { + log.WithContext(ctx).Errorf("failed to save pat to the store: %s", err) + return status.Errorf(status.Internal, "failed to save pat to store") + } + + return nil +} + +// DeletePAT deletes a personal access token from the database. +func (s *SqlStore) DeletePAT(ctx context.Context, lockStrength LockingStrength, userID, patID string) error { + result := s.db.Clauses(clause.Locking{Strength: string(lockStrength)}). + Delete(&types.PersonalAccessToken{}, "user_id = ? AND id = ?", userID, patID) + if err := result.Error; err != nil { + log.WithContext(ctx).Errorf("failed to delete pat from the store: %s", err) + return status.Errorf(status.Internal, "failed to delete pat from store") + } + + if result.RowsAffected == 0 { + return status.NewPATNotFoundError(patID) + } + + return nil +} diff --git a/management/server/store/sql_store_test.go b/management/server/store/sql_store_test.go index cf22d5be5..bdb5905bd 100644 --- a/management/server/store/sql_store_test.go +++ b/management/server/store/sql_store_test.go @@ -37,40 +37,44 @@ import ( nbroute "github.com/netbirdio/netbird/route" ) -func TestSqlite_NewStore(t *testing.T) { +func runTestForAllEngines(t *testing.T, testDataFile string, f func(t *testing.T, store Store)) { + t.Helper() + for _, engine := range supportedEngines { + if os.Getenv("NETBIRD_STORE_ENGINE") != "" && os.Getenv("NETBIRD_STORE_ENGINE") != string(engine) { + continue + } + t.Setenv("NETBIRD_STORE_ENGINE", string(engine)) + store, cleanUp, err := NewTestStoreFromSQL(context.Background(), testDataFile, t.TempDir()) + t.Cleanup(cleanUp) + assert.NoError(t, err) + t.Run(string(engine), func(t *testing.T) { + f(t, store) + }) + os.Unsetenv("NETBIRD_STORE_ENGINE") + } +} + +func Test_NewStore(t *testing.T) { if runtime.GOOS == "windows" { t.Skip("The SQLite store is not properly supported by Windows yet") } - t.Setenv("NETBIRD_STORE_ENGINE", string(SqliteStoreEngine)) - store, cleanUp, err := NewTestStoreFromSQL(context.Background(), "", t.TempDir()) - t.Cleanup(cleanUp) - assert.NoError(t, err) - - if len(store.GetAllAccounts(context.Background())) != 0 { - t.Errorf("expected to create a new empty Accounts map when creating a new FileStore") - } + runTestForAllEngines(t, "", func(t *testing.T, store Store) { + if store == nil { + t.Errorf("expected to create a new Store") + } + if len(store.GetAllAccounts(context.Background())) != 0 { + t.Errorf("expected to create a new empty Accounts map when creating a new FileStore") + } + }) } -func TestSqlite_SaveAccount_Large(t *testing.T) { +func Test_SaveAccount_Large(t *testing.T) { if (os.Getenv("CI") == "true" && runtime.GOOS == "darwin") || runtime.GOOS == "windows" { t.Skip("skip CI tests on darwin and windows") } - t.Run("SQLite", func(t *testing.T) { - t.Setenv("NETBIRD_STORE_ENGINE", string(SqliteStoreEngine)) - store, cleanUp, err := NewTestStoreFromSQL(context.Background(), "", t.TempDir()) - t.Cleanup(cleanUp) - assert.NoError(t, err) - runLargeTest(t, store) - }) - - // create store outside to have a better time counter for the test - t.Setenv("NETBIRD_STORE_ENGINE", string(SqliteStoreEngine)) - store, cleanUp, err := NewTestStoreFromSQL(context.Background(), "", t.TempDir()) - t.Cleanup(cleanUp) - assert.NoError(t, err) - t.Run("PostgreSQL", func(t *testing.T) { + runTestForAllEngines(t, "", func(t *testing.T, store Store) { runLargeTest(t, store) }) } @@ -215,77 +219,74 @@ func randomIPv4() net.IP { return net.IP(b) } -func TestSqlite_SaveAccount(t *testing.T) { +func Test_SaveAccount(t *testing.T) { if runtime.GOOS == "windows" { t.Skip("The SQLite store is not properly supported by Windows yet") } - t.Setenv("NETBIRD_STORE_ENGINE", string(SqliteStoreEngine)) - store, cleanUp, err := NewTestStoreFromSQL(context.Background(), "", t.TempDir()) - t.Cleanup(cleanUp) - assert.NoError(t, err) + runTestForAllEngines(t, "", func(t *testing.T, store Store) { + account := newAccountWithId(context.Background(), "account_id", "testuser", "") + setupKey, _ := types.GenerateDefaultSetupKey() + account.SetupKeys[setupKey.Key] = setupKey + account.Peers["testpeer"] = &nbpeer.Peer{ + Key: "peerkey", + IP: net.IP{127, 0, 0, 1}, + Meta: nbpeer.PeerSystemMeta{}, + Name: "peer name", + Status: &nbpeer.PeerStatus{Connected: true, LastSeen: time.Now().UTC()}, + } - account := newAccountWithId(context.Background(), "account_id", "testuser", "") - setupKey, _ := types.GenerateDefaultSetupKey() - account.SetupKeys[setupKey.Key] = setupKey - account.Peers["testpeer"] = &nbpeer.Peer{ - Key: "peerkey", - IP: net.IP{127, 0, 0, 1}, - Meta: nbpeer.PeerSystemMeta{}, - Name: "peer name", - Status: &nbpeer.PeerStatus{Connected: true, LastSeen: time.Now().UTC()}, - } + err := store.SaveAccount(context.Background(), account) + require.NoError(t, err) - err = store.SaveAccount(context.Background(), account) - require.NoError(t, err) + account2 := newAccountWithId(context.Background(), "account_id2", "testuser2", "") + setupKey, _ = types.GenerateDefaultSetupKey() + account2.SetupKeys[setupKey.Key] = setupKey + account2.Peers["testpeer2"] = &nbpeer.Peer{ + Key: "peerkey2", + IP: net.IP{127, 0, 0, 2}, + Meta: nbpeer.PeerSystemMeta{}, + Name: "peer name 2", + Status: &nbpeer.PeerStatus{Connected: true, LastSeen: time.Now().UTC()}, + } - account2 := newAccountWithId(context.Background(), "account_id2", "testuser2", "") - setupKey, _ = types.GenerateDefaultSetupKey() - account2.SetupKeys[setupKey.Key] = setupKey - account2.Peers["testpeer2"] = &nbpeer.Peer{ - Key: "peerkey2", - IP: net.IP{127, 0, 0, 2}, - Meta: nbpeer.PeerSystemMeta{}, - Name: "peer name 2", - Status: &nbpeer.PeerStatus{Connected: true, LastSeen: time.Now().UTC()}, - } + err = store.SaveAccount(context.Background(), account2) + require.NoError(t, err) - err = store.SaveAccount(context.Background(), account2) - require.NoError(t, err) + if len(store.GetAllAccounts(context.Background())) != 2 { + t.Errorf("expecting 2 Accounts to be stored after SaveAccount()") + } - if len(store.GetAllAccounts(context.Background())) != 2 { - t.Errorf("expecting 2 Accounts to be stored after SaveAccount()") - } + a, err := store.GetAccount(context.Background(), account.Id) + if a == nil { + t.Errorf("expecting Account to be stored after SaveAccount(): %v", err) + } - a, err := store.GetAccount(context.Background(), account.Id) - if a == nil { - t.Errorf("expecting Account to be stored after SaveAccount(): %v", err) - } + if a != nil && len(a.Policies) != 1 { + t.Errorf("expecting Account to have one policy stored after SaveAccount(), got %d", len(a.Policies)) + } - if a != nil && len(a.Policies) != 1 { - t.Errorf("expecting Account to have one policy stored after SaveAccount(), got %d", len(a.Policies)) - } + if a != nil && len(a.Policies[0].Rules) != 1 { + t.Errorf("expecting Account to have one policy rule stored after SaveAccount(), got %d", len(a.Policies[0].Rules)) + return + } - if a != nil && len(a.Policies[0].Rules) != 1 { - t.Errorf("expecting Account to have one policy rule stored after SaveAccount(), got %d", len(a.Policies[0].Rules)) - return - } + if a, err := store.GetAccountByPeerPubKey(context.Background(), "peerkey"); a == nil { + t.Errorf("expecting PeerKeyID2AccountID index updated after SaveAccount(): %v", err) + } - if a, err := store.GetAccountByPeerPubKey(context.Background(), "peerkey"); a == nil { - t.Errorf("expecting PeerKeyID2AccountID index updated after SaveAccount(): %v", err) - } + if a, err := store.GetAccountByUser(context.Background(), "testuser"); a == nil { + t.Errorf("expecting UserID2AccountID index updated after SaveAccount(): %v", err) + } - if a, err := store.GetAccountByUser(context.Background(), "testuser"); a == nil { - t.Errorf("expecting UserID2AccountID index updated after SaveAccount(): %v", err) - } + if a, err := store.GetAccountByPeerID(context.Background(), "testpeer"); a == nil { + t.Errorf("expecting PeerID2AccountID index updated after SaveAccount(): %v", err) + } - if a, err := store.GetAccountByPeerID(context.Background(), "testpeer"); a == nil { - t.Errorf("expecting PeerID2AccountID index updated after SaveAccount(): %v", err) - } - - if a, err := store.GetAccountBySetupKey(context.Background(), setupKey.Key); a == nil { - t.Errorf("expecting SetupKeyID2AccountID index updated after SaveAccount(): %v", err) - } + if a, err := store.GetAccountBySetupKey(context.Background(), setupKey.Key); a == nil { + t.Errorf("expecting SetupKeyID2AccountID index updated after SaveAccount(): %v", err) + } + }) } func TestSqlite_DeleteAccount(t *testing.T) { @@ -402,27 +403,24 @@ func TestSqlite_DeleteAccount(t *testing.T) { } } -func TestSqlite_GetAccount(t *testing.T) { +func Test_GetAccount(t *testing.T) { if runtime.GOOS == "windows" { t.Skip("The SQLite store is not properly supported by Windows yet") } - t.Setenv("NETBIRD_STORE_ENGINE", string(SqliteStoreEngine)) - store, cleanUp, err := NewTestStoreFromSQL(context.Background(), "../testdata/store.sql", t.TempDir()) - t.Cleanup(cleanUp) - assert.NoError(t, err) + runTestForAllEngines(t, "../testdata/store.sql", func(t *testing.T, store Store) { + id := "bf1c8084-ba50-4ce7-9439-34653001fc3b" - id := "bf1c8084-ba50-4ce7-9439-34653001fc3b" + account, err := store.GetAccount(context.Background(), id) + require.NoError(t, err) + require.Equal(t, id, account.Id, "account id should match") - account, err := store.GetAccount(context.Background(), id) - require.NoError(t, err) - require.Equal(t, id, account.Id, "account id should match") - - _, err = store.GetAccount(context.Background(), "non-existing-account") - assert.Error(t, err) - parsedErr, ok := status.FromError(err) - require.True(t, ok) - require.Equal(t, status.NotFound, parsedErr.Type(), "should return not found error") + _, err = store.GetAccount(context.Background(), "non-existing-account") + assert.Error(t, err) + parsedErr, ok := status.FromError(err) + require.True(t, ok) + require.Equal(t, status.NotFound, parsedErr.Type(), "should return not found error") + }) } func TestSqlStore_SavePeer(t *testing.T) { @@ -580,74 +578,45 @@ func TestSqlStore_SavePeerLocation(t *testing.T) { require.Equal(t, status.NotFound, parsedErr.Type(), "should return not found error") } -func TestSqlite_TestGetAccountByPrivateDomain(t *testing.T) { +func Test_TestGetAccountByPrivateDomain(t *testing.T) { if runtime.GOOS == "windows" { t.Skip("The SQLite store is not properly supported by Windows yet") } - t.Setenv("NETBIRD_STORE_ENGINE", string(SqliteStoreEngine)) - store, cleanUp, err := NewTestStoreFromSQL(context.Background(), "../testdata/store.sql", t.TempDir()) - t.Cleanup(cleanUp) - assert.NoError(t, err) + runTestForAllEngines(t, "../testdata/store.sql", func(t *testing.T, store Store) { + existingDomain := "test.com" - existingDomain := "test.com" + account, err := store.GetAccountByPrivateDomain(context.Background(), existingDomain) + require.NoError(t, err, "should found account") + require.Equal(t, existingDomain, account.Domain, "domains should match") - account, err := store.GetAccountByPrivateDomain(context.Background(), existingDomain) - require.NoError(t, err, "should found account") - require.Equal(t, existingDomain, account.Domain, "domains should match") - - _, err = store.GetAccountByPrivateDomain(context.Background(), "missing-domain.com") - require.Error(t, err, "should return error on domain lookup") - parsedErr, ok := status.FromError(err) - require.True(t, ok) - require.Equal(t, status.NotFound, parsedErr.Type(), "should return not found error") + _, err = store.GetAccountByPrivateDomain(context.Background(), "missing-domain.com") + require.Error(t, err, "should return error on domain lookup") + parsedErr, ok := status.FromError(err) + require.True(t, ok) + require.Equal(t, status.NotFound, parsedErr.Type(), "should return not found error") + }) } -func TestSqlite_GetTokenIDByHashedToken(t *testing.T) { +func Test_GetTokenIDByHashedToken(t *testing.T) { if runtime.GOOS == "windows" { t.Skip("The SQLite store is not properly supported by Windows yet") } - t.Setenv("NETBIRD_STORE_ENGINE", string(SqliteStoreEngine)) - store, cleanUp, err := NewTestStoreFromSQL(context.Background(), "../testdata/store.sql", t.TempDir()) - t.Cleanup(cleanUp) - assert.NoError(t, err) + runTestForAllEngines(t, "../testdata/store.sql", func(t *testing.T, store Store) { + hashed := "SoMeHaShEdToKeN" + id := "9dj38s35-63fb-11ec-90d6-0242ac120003" - hashed := "SoMeHaShEdToKeN" - id := "9dj38s35-63fb-11ec-90d6-0242ac120003" + token, err := store.GetTokenIDByHashedToken(context.Background(), hashed) + require.NoError(t, err) + require.Equal(t, id, token) - token, err := store.GetTokenIDByHashedToken(context.Background(), hashed) - require.NoError(t, err) - require.Equal(t, id, token) - - _, err = store.GetTokenIDByHashedToken(context.Background(), "non-existing-hash") - require.Error(t, err) - parsedErr, ok := status.FromError(err) - require.True(t, ok) - require.Equal(t, status.NotFound, parsedErr.Type(), "should return not found error") -} - -func TestSqlite_GetUserByTokenID(t *testing.T) { - if runtime.GOOS == "windows" { - t.Skip("The SQLite store is not properly supported by Windows yet") - } - - t.Setenv("NETBIRD_STORE_ENGINE", string(SqliteStoreEngine)) - store, cleanUp, err := NewTestStoreFromSQL(context.Background(), "../testdata/store.sql", t.TempDir()) - t.Cleanup(cleanUp) - assert.NoError(t, err) - - id := "9dj38s35-63fb-11ec-90d6-0242ac120003" - - user, err := store.GetUserByTokenID(context.Background(), id) - require.NoError(t, err) - require.Equal(t, id, user.PATs[id].ID) - - _, err = store.GetUserByTokenID(context.Background(), "non-existing-id") - require.Error(t, err) - parsedErr, ok := status.FromError(err) - require.True(t, ok) - require.Equal(t, status.NotFound, parsedErr.Type(), "should return not found error") + _, err = store.GetTokenIDByHashedToken(context.Background(), "non-existing-hash") + require.Error(t, err) + parsedErr, ok := status.FromError(err) + require.True(t, ok) + require.Equal(t, status.NotFound, parsedErr.Type(), "should return not found error") + }) } func TestMigrate(t *testing.T) { @@ -962,23 +931,6 @@ func TestPostgresql_GetTokenIDByHashedToken(t *testing.T) { require.Equal(t, id, token) } -func TestPostgresql_GetUserByTokenID(t *testing.T) { - if (os.Getenv("CI") == "true" && runtime.GOOS == "darwin") || runtime.GOOS == "windows" { - t.Skip("skip CI tests on darwin and windows") - } - - t.Setenv("NETBIRD_STORE_ENGINE", string(PostgresStoreEngine)) - store, cleanUp, err := NewTestStoreFromSQL(context.Background(), "../testdata/store.sql", t.TempDir()) - t.Cleanup(cleanUp) - assert.NoError(t, err) - - id := "9dj38s35-63fb-11ec-90d6-0242ac120003" - - user, err := store.GetUserByTokenID(context.Background(), id) - require.NoError(t, err) - require.Equal(t, id, user.PATs[id].ID) -} - func TestSqlite_GetTakenIPs(t *testing.T) { t.Setenv("NETBIRD_STORE_ENGINE", string(SqliteStoreEngine)) store, cleanup, err := NewTestStoreFromSQL(context.Background(), "../testdata/extended-store.sql", t.TempDir()) @@ -1182,7 +1134,7 @@ func TestSqlite_CreateAndGetObjectInTransaction(t *testing.T) { assert.NoError(t, err) } -func TestSqlite_GetAccoundUsers(t *testing.T) { +func TestSqlStore_GetAccountUsers(t *testing.T) { store, cleanup, err := NewTestStoreFromSQL(context.Background(), "../testdata/extended-store.sql", t.TempDir()) t.Cleanup(cleanup) if err != nil { @@ -1371,6 +1323,14 @@ func TestSqlStore_SaveGroups(t *testing.T) { } err = store.SaveGroups(context.Background(), LockingStrengthUpdate, groups) require.NoError(t, err) + + groups[1].Peers = []string{} + err = store.SaveGroups(context.Background(), LockingStrengthUpdate, groups) + require.NoError(t, err) + + group, err := store.GetGroupByID(context.Background(), LockingStrengthShare, accountID, groups[1].ID) + require.NoError(t, err) + require.Equal(t, groups[1], group) } func TestSqlStore_DeleteGroup(t *testing.T) { @@ -2935,3 +2895,392 @@ func TestSqlStore_DatabaseBlocking(t *testing.T) { t.Logf("Test completed") } + +func TestSqlStore_GetAccountCreatedBy(t *testing.T) { + store, cleanup, err := NewTestStoreFromSQL(context.Background(), "../testdata/store.sql", t.TempDir()) + t.Cleanup(cleanup) + require.NoError(t, err) + + tests := []struct { + name string + accountID string + expectError bool + createdBy string + }{ + { + name: "existing account ID", + accountID: "bf1c8084-ba50-4ce7-9439-34653001fc3b", + expectError: false, + createdBy: "edafee4e-63fb-11ec-90d6-0242ac120003", + }, + { + name: "non-existing account ID", + accountID: "nonexistent", + expectError: true, + }, + { + name: "empty account ID", + accountID: "", + expectError: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + createdBy, err := store.GetAccountCreatedBy(context.Background(), LockingStrengthShare, tt.accountID) + if tt.expectError { + require.Error(t, err) + sErr, ok := status.FromError(err) + require.True(t, ok) + require.Equal(t, sErr.Type(), status.NotFound) + require.Empty(t, createdBy) + } else { + require.NoError(t, err) + require.NotNil(t, createdBy) + require.Equal(t, tt.createdBy, createdBy) + } + }) + } + +} + +func TestSqlStore_GetUserByUserID(t *testing.T) { + store, cleanup, err := NewTestStoreFromSQL(context.Background(), "../testdata/extended-store.sql", t.TempDir()) + t.Cleanup(cleanup) + require.NoError(t, err) + + tests := []struct { + name string + userID string + expectError bool + }{ + { + name: "retrieve existing user", + userID: "edafee4e-63fb-11ec-90d6-0242ac120003", + expectError: false, + }, + { + name: "retrieve non-existing user", + userID: "non-existing", + expectError: true, + }, + { + name: "retrieve with empty user ID", + userID: "", + expectError: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + user, err := store.GetUserByUserID(context.Background(), LockingStrengthShare, tt.userID) + if tt.expectError { + require.Error(t, err) + sErr, ok := status.FromError(err) + require.True(t, ok) + require.Equal(t, sErr.Type(), status.NotFound) + require.Nil(t, user) + } else { + require.NoError(t, err) + require.NotNil(t, user) + require.Equal(t, tt.userID, user.Id) + } + }) + } +} + +func TestSqlStore_GetUserByPATID(t *testing.T) { + store, cleanUp, err := NewTestStoreFromSQL(context.Background(), "../testdata/store.sql", t.TempDir()) + t.Cleanup(cleanUp) + assert.NoError(t, err) + + id := "9dj38s35-63fb-11ec-90d6-0242ac120003" + + user, err := store.GetUserByPATID(context.Background(), LockingStrengthShare, id) + require.NoError(t, err) + require.Equal(t, "f4f6d672-63fb-11ec-90d6-0242ac120003", user.Id) +} + +func TestSqlStore_SaveUser(t *testing.T) { + store, cleanup, err := NewTestStoreFromSQL(context.Background(), "../testdata/extended-store.sql", t.TempDir()) + t.Cleanup(cleanup) + require.NoError(t, err) + + accountID := "bf1c8084-ba50-4ce7-9439-34653001fc3b" + + user := &types.User{ + Id: "user-id", + AccountID: accountID, + Role: types.UserRoleAdmin, + IsServiceUser: false, + AutoGroups: []string{"groupA", "groupB"}, + Blocked: false, + LastLogin: util.ToPtr(time.Now().UTC()), + CreatedAt: time.Now().UTC().Add(-time.Hour), + Issued: types.UserIssuedIntegration, + } + err = store.SaveUser(context.Background(), LockingStrengthUpdate, user) + require.NoError(t, err) + + saveUser, err := store.GetUserByUserID(context.Background(), LockingStrengthShare, user.Id) + require.NoError(t, err) + require.Equal(t, user.Id, saveUser.Id) + require.Equal(t, user.AccountID, saveUser.AccountID) + require.Equal(t, user.Role, saveUser.Role) + require.Equal(t, user.AutoGroups, saveUser.AutoGroups) + require.WithinDurationf(t, user.GetLastLogin(), saveUser.LastLogin.UTC(), time.Millisecond, "LastLogin should be equal") + require.WithinDurationf(t, user.CreatedAt, saveUser.CreatedAt.UTC(), time.Millisecond, "CreatedAt should be equal") + require.Equal(t, user.Issued, saveUser.Issued) + require.Equal(t, user.Blocked, saveUser.Blocked) + require.Equal(t, user.IsServiceUser, saveUser.IsServiceUser) +} + +func TestSqlStore_SaveUsers(t *testing.T) { + store, cleanup, err := NewTestStoreFromSQL(context.Background(), "../testdata/extended-store.sql", t.TempDir()) + t.Cleanup(cleanup) + require.NoError(t, err) + + accountID := "bf1c8084-ba50-4ce7-9439-34653001fc3b" + + accountUsers, err := store.GetAccountUsers(context.Background(), LockingStrengthShare, accountID) + require.NoError(t, err) + require.Len(t, accountUsers, 2) + + users := []*types.User{ + { + Id: "user-1", + AccountID: accountID, + Issued: "api", + AutoGroups: []string{"groupA", "groupB"}, + }, + { + Id: "user-2", + AccountID: accountID, + Issued: "integration", + AutoGroups: []string{"groupA"}, + }, + } + err = store.SaveUsers(context.Background(), LockingStrengthUpdate, users) + require.NoError(t, err) + + accountUsers, err = store.GetAccountUsers(context.Background(), LockingStrengthShare, accountID) + require.NoError(t, err) + require.Len(t, accountUsers, 4) + + users[1].AutoGroups = []string{"groupA", "groupC"} + err = store.SaveUsers(context.Background(), LockingStrengthUpdate, users) + require.NoError(t, err) + + user, err := store.GetUserByUserID(context.Background(), LockingStrengthShare, users[1].Id) + require.NoError(t, err) + require.Equal(t, users[1].AutoGroups, user.AutoGroups) +} + +func TestSqlStore_DeleteUser(t *testing.T) { + store, cleanup, err := NewTestStoreFromSQL(context.Background(), "../testdata/extended-store.sql", t.TempDir()) + t.Cleanup(cleanup) + require.NoError(t, err) + + accountID := "bf1c8084-ba50-4ce7-9439-34653001fc3b" + userID := "f4f6d672-63fb-11ec-90d6-0242ac120003" + + err = store.DeleteUser(context.Background(), LockingStrengthUpdate, accountID, userID) + require.NoError(t, err) + + user, err := store.GetUserByUserID(context.Background(), LockingStrengthShare, userID) + require.Error(t, err) + require.Nil(t, user) + + userPATs, err := store.GetUserPATs(context.Background(), LockingStrengthShare, userID) + require.NoError(t, err) + require.Len(t, userPATs, 0) +} + +func TestSqlStore_GetPATByID(t *testing.T) { + store, cleanup, err := NewTestStoreFromSQL(context.Background(), "../testdata/extended-store.sql", t.TempDir()) + t.Cleanup(cleanup) + require.NoError(t, err) + + userID := "f4f6d672-63fb-11ec-90d6-0242ac120003" + + tests := []struct { + name string + patID string + expectError bool + }{ + { + name: "retrieve existing PAT", + patID: "9dj38s35-63fb-11ec-90d6-0242ac120003", + expectError: false, + }, + { + name: "retrieve non-existing PAT", + patID: "non-existing", + expectError: true, + }, + { + name: "retrieve with empty PAT ID", + patID: "", + expectError: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + pat, err := store.GetPATByID(context.Background(), LockingStrengthShare, userID, tt.patID) + if tt.expectError { + require.Error(t, err) + sErr, ok := status.FromError(err) + require.True(t, ok) + require.Equal(t, sErr.Type(), status.NotFound) + require.Nil(t, pat) + } else { + require.NoError(t, err) + require.NotNil(t, pat) + require.Equal(t, tt.patID, pat.ID) + } + }) + } +} + +func TestSqlStore_GetUserPATs(t *testing.T) { + store, cleanup, err := NewTestStoreFromSQL(context.Background(), "../testdata/extended-store.sql", t.TempDir()) + t.Cleanup(cleanup) + require.NoError(t, err) + + userPATs, err := store.GetUserPATs(context.Background(), LockingStrengthShare, "f4f6d672-63fb-11ec-90d6-0242ac120003") + require.NoError(t, err) + require.Len(t, userPATs, 1) +} + +func TestSqlStore_GetPATByHashedToken(t *testing.T) { + store, cleanup, err := NewTestStoreFromSQL(context.Background(), "../testdata/extended-store.sql", t.TempDir()) + t.Cleanup(cleanup) + require.NoError(t, err) + + pat, err := store.GetPATByHashedToken(context.Background(), LockingStrengthShare, "SoMeHaShEdToKeN") + require.NoError(t, err) + require.Equal(t, "9dj38s35-63fb-11ec-90d6-0242ac120003", pat.ID) +} + +func TestSqlStore_MarkPATUsed(t *testing.T) { + store, cleanup, err := NewTestStoreFromSQL(context.Background(), "../testdata/extended-store.sql", t.TempDir()) + t.Cleanup(cleanup) + require.NoError(t, err) + + userID := "f4f6d672-63fb-11ec-90d6-0242ac120003" + patID := "9dj38s35-63fb-11ec-90d6-0242ac120003" + + err = store.MarkPATUsed(context.Background(), LockingStrengthUpdate, patID) + require.NoError(t, err) + + pat, err := store.GetPATByID(context.Background(), LockingStrengthShare, userID, patID) + require.NoError(t, err) + now := time.Now().UTC() + require.WithinRange(t, pat.LastUsed.UTC(), now.Add(-15*time.Second), now, "LastUsed should be within 1 second of now") +} + +func TestSqlStore_SavePAT(t *testing.T) { + store, cleanup, err := NewTestStoreFromSQL(context.Background(), "../testdata/extended-store.sql", t.TempDir()) + t.Cleanup(cleanup) + require.NoError(t, err) + + userID := "edafee4e-63fb-11ec-90d6-0242ac120003" + + pat := &types.PersonalAccessToken{ + ID: "pat-id", + UserID: userID, + Name: "token", + HashedToken: "SoMeHaShEdToKeN", + ExpirationDate: util.ToPtr(time.Now().UTC().Add(12 * time.Hour)), + CreatedBy: userID, + CreatedAt: time.Now().UTC().Add(time.Hour), + LastUsed: util.ToPtr(time.Now().UTC().Add(-15 * time.Minute)), + } + err = store.SavePAT(context.Background(), LockingStrengthUpdate, pat) + require.NoError(t, err) + + savePAT, err := store.GetPATByID(context.Background(), LockingStrengthShare, userID, pat.ID) + require.NoError(t, err) + require.Equal(t, pat.ID, savePAT.ID) + require.Equal(t, pat.UserID, savePAT.UserID) + require.Equal(t, pat.HashedToken, savePAT.HashedToken) + require.Equal(t, pat.CreatedBy, savePAT.CreatedBy) + require.WithinDurationf(t, pat.GetExpirationDate(), savePAT.ExpirationDate.UTC(), time.Millisecond, "ExpirationDate should be equal") + require.WithinDurationf(t, pat.CreatedAt, savePAT.CreatedAt.UTC(), time.Millisecond, "CreatedAt should be equal") + require.WithinDurationf(t, pat.GetLastUsed(), savePAT.LastUsed.UTC(), time.Millisecond, "LastUsed should be equal") +} + +func TestSqlStore_DeletePAT(t *testing.T) { + store, cleanup, err := NewTestStoreFromSQL(context.Background(), "../testdata/extended-store.sql", t.TempDir()) + t.Cleanup(cleanup) + require.NoError(t, err) + + userID := "f4f6d672-63fb-11ec-90d6-0242ac120003" + patID := "9dj38s35-63fb-11ec-90d6-0242ac120003" + + err = store.DeletePAT(context.Background(), LockingStrengthUpdate, userID, patID) + require.NoError(t, err) + + pat, err := store.GetPATByID(context.Background(), LockingStrengthShare, userID, patID) + require.Error(t, err) + require.Nil(t, pat) +} + +func TestSqlStore_SaveUsers_LargeBatch(t *testing.T) { + store, cleanup, err := NewTestStoreFromSQL(context.Background(), "../testdata/extended-store.sql", t.TempDir()) + t.Cleanup(cleanup) + require.NoError(t, err) + + accountID := "bf1c8084-ba50-4ce7-9439-34653001fc3b" + + accountUsers, err := store.GetAccountUsers(context.Background(), LockingStrengthShare, accountID) + require.NoError(t, err) + require.Len(t, accountUsers, 2) + + usersToSave := make([]*types.User, 0) + + for i := 1; i <= 8000; i++ { + usersToSave = append(usersToSave, &types.User{ + Id: fmt.Sprintf("user-%d", i), + AccountID: accountID, + Role: types.UserRoleUser, + }) + } + + err = store.SaveUsers(context.Background(), LockingStrengthUpdate, usersToSave) + require.NoError(t, err) + + accountUsers, err = store.GetAccountUsers(context.Background(), LockingStrengthShare, accountID) + require.NoError(t, err) + require.Equal(t, 8002, len(accountUsers)) +} + +func TestSqlStore_SaveGroups_LargeBatch(t *testing.T) { + store, cleanup, err := NewTestStoreFromSQL(context.Background(), "../testdata/extended-store.sql", t.TempDir()) + t.Cleanup(cleanup) + require.NoError(t, err) + + accountID := "bf1c8084-ba50-4ce7-9439-34653001fc3b" + + accountGroups, err := store.GetAccountGroups(context.Background(), LockingStrengthShare, accountID) + require.NoError(t, err) + require.Len(t, accountGroups, 3) + + groupsToSave := make([]*types.Group, 0) + + for i := 1; i <= 8000; i++ { + groupsToSave = append(groupsToSave, &types.Group{ + ID: fmt.Sprintf("%d", i), + AccountID: accountID, + Name: fmt.Sprintf("group-%d", i), + }) + } + + err = store.SaveGroups(context.Background(), LockingStrengthUpdate, groupsToSave) + require.NoError(t, err) + + accountGroups, err = store.GetAccountGroups(context.Background(), LockingStrengthShare, accountID) + require.NoError(t, err) + require.Equal(t, 8003, len(accountGroups)) +} diff --git a/management/server/store/store.go b/management/server/store/store.go index b58fc68ec..e94ba2f35 100644 --- a/management/server/store/store.go +++ b/management/server/store/store.go @@ -9,11 +9,16 @@ import ( "os" "path" "path/filepath" + "regexp" "runtime" + "slices" "strings" "time" + "github.com/google/uuid" log "github.com/sirupsen/logrus" + "gorm.io/driver/mysql" + "gorm.io/driver/postgres" "gorm.io/driver/sqlite" "gorm.io/gorm" @@ -59,21 +64,30 @@ type Store interface { GetAccountIDByPrivateDomain(ctx context.Context, lockStrength LockingStrength, domain string) (string, error) GetAccountSettings(ctx context.Context, lockStrength LockingStrength, accountID string) (*types.Settings, error) GetAccountDNSSettings(ctx context.Context, lockStrength LockingStrength, accountID string) (*types.DNSSettings, error) + GetAccountCreatedBy(ctx context.Context, lockStrength LockingStrength, accountID string) (string, error) SaveAccount(ctx context.Context, account *types.Account) error DeleteAccount(ctx context.Context, account *types.Account) error UpdateAccountDomainAttributes(ctx context.Context, accountID string, domain string, category string, isPrimaryDomain bool) error SaveDNSSettings(ctx context.Context, lockStrength LockingStrength, accountID string, settings *types.DNSSettings) error - GetUserByTokenID(ctx context.Context, tokenID string) (*types.User, error) + GetUserByPATID(ctx context.Context, lockStrength LockingStrength, patID string) (*types.User, error) GetUserByUserID(ctx context.Context, lockStrength LockingStrength, userID string) (*types.User, error) GetAccountUsers(ctx context.Context, lockStrength LockingStrength, accountID string) ([]*types.User, error) - SaveUsers(accountID string, users map[string]*types.User) error + SaveUsers(ctx context.Context, lockStrength LockingStrength, users []*types.User) error SaveUser(ctx context.Context, lockStrength LockingStrength, user *types.User) error SaveUserLastLogin(ctx context.Context, accountID, userID string, lastLogin time.Time) error + DeleteUser(ctx context.Context, lockStrength LockingStrength, accountID, userID string) error GetTokenIDByHashedToken(ctx context.Context, secret string) (string, error) DeleteHashedPAT2TokenIDIndex(hashedToken string) error DeleteTokenID2UserIDIndex(tokenID string) error + GetPATByID(ctx context.Context, lockStrength LockingStrength, userID, patID string) (*types.PersonalAccessToken, error) + GetUserPATs(ctx context.Context, lockStrength LockingStrength, userID string) ([]*types.PersonalAccessToken, error) + GetPATByHashedToken(ctx context.Context, lockStrength LockingStrength, hashedToken string) (*types.PersonalAccessToken, error) + MarkPATUsed(ctx context.Context, lockStrength LockingStrength, patID string) error + SavePAT(ctx context.Context, strength LockingStrength, pat *types.PersonalAccessToken) error + DeletePAT(ctx context.Context, strength LockingStrength, userID, patID string) error + GetAccountGroups(ctx context.Context, lockStrength LockingStrength, accountID string) ([]*types.Group, error) GetResourceGroups(ctx context.Context, lockStrength LockingStrength, accountID, resourceID string) ([]*types.Group, error) GetGroupByID(ctx context.Context, lockStrength LockingStrength, accountID, groupID string) (*types.Group, error) @@ -184,6 +198,8 @@ const ( mysqlDsnEnv = "NETBIRD_STORE_ENGINE_MYSQL_DSN" ) +var supportedEngines = []Engine{SqliteStoreEngine, PostgresStoreEngine, MysqlStoreEngine} + func getStoreEngineFromEnv() Engine { // NETBIRD_STORE_ENGINE supposed to be used in tests. Otherwise, rely on the config file. kind, ok := os.LookupEnv("NETBIRD_STORE_ENGINE") @@ -192,7 +208,7 @@ func getStoreEngineFromEnv() Engine { } value := Engine(strings.ToLower(kind)) - if value == SqliteStoreEngine || value == PostgresStoreEngine || value == MysqlStoreEngine { + if slices.Contains(supportedEngines, value) { return value } @@ -319,7 +335,7 @@ func NewTestStoreFromSQL(ctx context.Context, filename string, dataDir string) ( } file := filepath.Join(dataDir, storeStr) - db, err := gorm.Open(sqlite.Open(file), getGormConfig(kind)) + db, err := gorm.Open(sqlite.Open(file), getGormConfig()) if err != nil { return nil, nil, err } @@ -340,51 +356,126 @@ func NewTestStoreFromSQL(ctx context.Context, filename string, dataDir string) ( } func getSqlStoreEngine(ctx context.Context, store *SqlStore, kind Engine) (Store, func(), error) { - if kind == PostgresStoreEngine { - cleanUp, err := testutil.CreatePostgresTestContainer() - if err != nil { - return nil, nil, err + var cleanup func() + var err error + switch kind { + case PostgresStoreEngine: + store, cleanup, err = newReusedPostgresStore(ctx, store, kind) + case MysqlStoreEngine: + store, cleanup, err = newReusedMysqlStore(ctx, store, kind) + default: + cleanup = func() { + // sqlite doesn't need to be cleaned up } - - dsn, ok := os.LookupEnv(postgresDsnEnv) - if !ok { - return nil, nil, fmt.Errorf("%s is not set", postgresDsnEnv) - } - - store, err = NewPostgresqlStoreFromSqlStore(ctx, store, dsn, nil) - if err != nil { - return nil, nil, err - } - - return store, cleanUp, nil } - - if kind == MysqlStoreEngine { - cleanUp, err := testutil.CreateMysqlTestContainer() - if err != nil { - return nil, nil, err - } - - dsn, ok := os.LookupEnv(mysqlDsnEnv) - if !ok { - return nil, nil, fmt.Errorf("%s is not set", mysqlDsnEnv) - } - - store, err = NewMysqlStoreFromSqlStore(ctx, store, dsn, nil) - if err != nil { - return nil, nil, err - } - - return store, cleanUp, nil + if err != nil { + return nil, cleanup, fmt.Errorf("failed to create test store: %v", err) } closeConnection := func() { + cleanup() store.Close(ctx) } return store, closeConnection, nil } +func newReusedPostgresStore(ctx context.Context, store *SqlStore, kind Engine) (*SqlStore, func(), error) { + if envDsn, ok := os.LookupEnv(postgresDsnEnv); !ok || envDsn == "" { + var err error + _, err = testutil.CreatePostgresTestContainer() + if err != nil { + return nil, nil, err + } + } + + dsn, ok := os.LookupEnv(postgresDsnEnv) + if !ok { + return nil, nil, fmt.Errorf("%s is not set", postgresDsnEnv) + } + + db, err := gorm.Open(postgres.Open(dsn), &gorm.Config{}) + if err != nil { + return nil, nil, fmt.Errorf("failed to open postgres connection: %v", err) + } + + dsn, cleanup, err := createRandomDB(dsn, db, kind) + if err != nil { + return nil, cleanup, err + } + + store, err = NewPostgresqlStoreFromSqlStore(ctx, store, dsn, nil) + if err != nil { + return nil, cleanup, err + } + + return store, cleanup, nil +} + +func newReusedMysqlStore(ctx context.Context, store *SqlStore, kind Engine) (*SqlStore, func(), error) { + if envDsn, ok := os.LookupEnv(mysqlDsnEnv); !ok || envDsn == "" { + var err error + _, err = testutil.CreateMysqlTestContainer() + if err != nil { + return nil, nil, err + } + } + + dsn, ok := os.LookupEnv(mysqlDsnEnv) + if !ok { + return nil, nil, fmt.Errorf("%s is not set", mysqlDsnEnv) + } + + db, err := gorm.Open(mysql.Open(dsn+"?charset=utf8&parseTime=True&loc=Local"), &gorm.Config{}) + if err != nil { + return nil, nil, fmt.Errorf("failed to open mysql connection: %v", err) + } + + dsn, cleanup, err := createRandomDB(dsn, db, kind) + if err != nil { + return nil, cleanup, err + } + + store, err = NewMysqlStoreFromSqlStore(ctx, store, dsn, nil) + if err != nil { + return nil, nil, err + } + + return store, cleanup, nil +} + +func createRandomDB(dsn string, db *gorm.DB, engine Engine) (string, func(), error) { + dbName := fmt.Sprintf("test_db_%s", strings.ReplaceAll(uuid.New().String(), "-", "_")) + + if err := db.Exec(fmt.Sprintf("CREATE DATABASE %s", dbName)).Error; err != nil { + return "", nil, fmt.Errorf("failed to create database: %v", err) + } + + var err error + cleanup := func() { + switch engine { + case PostgresStoreEngine: + err = db.Exec(fmt.Sprintf("DROP DATABASE %s WITH (FORCE)", dbName)).Error + case MysqlStoreEngine: + // err = killMySQLConnections(dsn, dbName) + err = db.Exec(fmt.Sprintf("DROP DATABASE %s", dbName)).Error + } + if err != nil { + log.Errorf("failed to drop database %s: %v", dbName, err) + panic(err) + } + sqlDB, _ := db.DB() + _ = sqlDB.Close() + } + + return replaceDBName(dsn, dbName), cleanup, nil +} + +func replaceDBName(dsn, newDBName string) string { + re := regexp.MustCompile(`(?P
[:/@])(?P[^/?]+)(?P\?|$)`)
+	return re.ReplaceAllString(dsn, `${pre}`+newDBName+`${post}`)
+}
+
 func loadSQL(db *gorm.DB, filepath string) error {
 	sqlContent, err := os.ReadFile(filepath)
 	if err != nil {
diff --git a/management/server/testdata/store.sql b/management/server/testdata/store.sql
index 1c0767bde..41b8fa2f7 100644
--- a/management/server/testdata/store.sql
+++ b/management/server/testdata/store.sql
@@ -37,7 +37,7 @@ CREATE INDEX `idx_network_resources_id` ON `network_resources`(`id`);
 CREATE INDEX `idx_networks_id` ON `networks`(`id`);
 CREATE INDEX `idx_networks_account_id` ON `networks`(`account_id`);
 
-INSERT INTO accounts VALUES('bf1c8084-ba50-4ce7-9439-34653001fc3b','','2024-10-02 16:03:06.778746+02:00','test.com','private',1,'af1c8024-ha40-4ce2-9418-34653101fc3c','{"IP":"100.64.0.0","Mask":"//8AAA=="}','',0,'[]',0,86400000000000,0,0,0,'',NULL,NULL,NULL);
+INSERT INTO accounts VALUES('bf1c8084-ba50-4ce7-9439-34653001fc3b','edafee4e-63fb-11ec-90d6-0242ac120003','2024-10-02 16:03:06.778746+02:00','test.com','private',1,'af1c8024-ha40-4ce2-9418-34653101fc3c','{"IP":"100.64.0.0","Mask":"//8AAA=="}','',0,'[]',0,86400000000000,0,0,0,'',NULL,NULL,NULL);
 INSERT INTO "groups" VALUES('cs1tnh0hhcjnqoiuebeg','bf1c8084-ba50-4ce7-9439-34653001fc3b','All','api','[]',0,'');
 INSERT INTO setup_keys VALUES('','bf1c8084-ba50-4ce7-9439-34653001fc3b','A2C8E62B-38F5-4553-B31E-DD66C696CEBB','Default key','reusable','2021-08-19 20:46:20.005936822+02:00','2321-09-18 20:46:20.005936822+02:00','2021-08-19 20:46:20.005936822+02:00',0,0,NULL,'["cs1tnh0hhcjnqoiuebeg"]',0,0);
 INSERT INTO users VALUES('a23efe53-63fb-11ec-90d6-0242ac120003','bf1c8084-ba50-4ce7-9439-34653001fc3b','owner',0,0,'','[]',0,NULL,'2024-10-02 16:03:06.779156+02:00','api',0,'');
diff --git a/management/server/testutil/store.go b/management/server/testutil/store.go
index 16438cab8..8672efa7f 100644
--- a/management/server/testutil/store.go
+++ b/management/server/testutil/store.go
@@ -22,7 +22,7 @@ func CreateMysqlTestContainer() (func(), error) {
 	myContainer, err := mysql.RunContainer(ctx,
 		testcontainers.WithImage("mlsmaycon/warmed-mysql:8"),
 		mysql.WithDatabase("testing"),
-		mysql.WithUsername("testing"),
+		mysql.WithUsername("root"),
 		mysql.WithPassword("testing"),
 		testcontainers.WithWaitStrategy(
 			wait.ForLog("/usr/sbin/mysqld: ready for connections").
@@ -34,6 +34,7 @@ func CreateMysqlTestContainer() (func(), error) {
 	}
 
 	cleanup := func() {
+		os.Unsetenv("NETBIRD_STORE_ENGINE_MYSQL_DSN")
 		timeoutCtx, cancelFunc := context.WithTimeout(ctx, 1*time.Second)
 		defer cancelFunc()
 		if err = myContainer.Terminate(timeoutCtx); err != nil {
@@ -68,6 +69,7 @@ func CreatePostgresTestContainer() (func(), error) {
 	}
 
 	cleanup := func() {
+		os.Unsetenv("NETBIRD_STORE_ENGINE_POSTGRES_DSN")
 		timeoutCtx, cancelFunc := context.WithTimeout(ctx, 1*time.Second)
 		defer cancelFunc()
 		if err = pgContainer.Terminate(timeoutCtx); err != nil {
diff --git a/management/server/types/personal_access_token.go b/management/server/types/personal_access_token.go
index ff157fcc6..0aa6b152b 100644
--- a/management/server/types/personal_access_token.go
+++ b/management/server/types/personal_access_token.go
@@ -75,7 +75,7 @@ type PersonalAccessTokenGenerated struct {
 
 // CreateNewPAT will generate a new PersonalAccessToken that can be assigned to a User.
 // Additionally, it will return the token in plain text once, to give to the user and only save a hashed version
-func CreateNewPAT(name string, expirationInDays int, createdBy string) (*PersonalAccessTokenGenerated, error) {
+func CreateNewPAT(name string, expirationInDays int, targetID, createdBy string) (*PersonalAccessTokenGenerated, error) {
 	hashedToken, plainToken, err := generateNewToken()
 	if err != nil {
 		return nil, err
@@ -84,6 +84,7 @@ func CreateNewPAT(name string, expirationInDays int, createdBy string) (*Persona
 	return &PersonalAccessTokenGenerated{
 		PersonalAccessToken: PersonalAccessToken{
 			ID:             xid.New().String(),
+			UserID:         targetID,
 			Name:           name,
 			HashedToken:    hashedToken,
 			ExpirationDate: util.ToPtr(currentTime.AddDate(0, 0, expirationInDays)),
diff --git a/management/server/types/user.go b/management/server/types/user.go
index 348fbfb22..5f7a4f2cb 100644
--- a/management/server/types/user.go
+++ b/management/server/types/user.go
@@ -80,7 +80,7 @@ type User struct {
 	// AutoGroups is a list of Group IDs to auto-assign to peers registered by this user
 	AutoGroups []string                        `gorm:"serializer:json"`
 	PATs       map[string]*PersonalAccessToken `gorm:"-"`
-	PATsG      []PersonalAccessToken           `json:"-" gorm:"foreignKey:UserID;references:id"`
+	PATsG      []PersonalAccessToken           `json:"-" gorm:"foreignKey:UserID;references:id;constraint:OnDelete:CASCADE;"`
 	// Blocked indicates whether the user is blocked. Blocked users can't use the system.
 	Blocked bool
 	// LastLogin is the last time the user logged in to IdP
diff --git a/management/server/user.go b/management/server/user.go
index 17770a423..6ba9b68d3 100644
--- a/management/server/user.go
+++ b/management/server/user.go
@@ -4,13 +4,10 @@ import (
 	"context"
 	"errors"
 	"fmt"
-	"slices"
 	"strings"
 	"time"
 
 	"github.com/google/uuid"
-	log "github.com/sirupsen/logrus"
-
 	"github.com/netbirdio/netbird/management/server/activity"
 	nbContext "github.com/netbirdio/netbird/management/server/context"
 	"github.com/netbirdio/netbird/management/server/idp"
@@ -20,6 +17,7 @@ import (
 	"github.com/netbirdio/netbird/management/server/store"
 	"github.com/netbirdio/netbird/management/server/types"
 	"github.com/netbirdio/netbird/management/server/util"
+	log "github.com/sirupsen/logrus"
 )
 
 // createServiceUser creates a new service user under the given account.
@@ -27,30 +25,29 @@ func (am *DefaultAccountManager) createServiceUser(ctx context.Context, accountI
 	unlock := am.Store.AcquireWriteLockByUID(ctx, accountID)
 	defer unlock()
 
-	account, err := am.Store.GetAccount(ctx, accountID)
+	initiatorUser, err := am.Store.GetUserByUserID(ctx, store.LockingStrengthShare, initiatorUserID)
 	if err != nil {
-		return nil, status.Errorf(status.NotFound, "account %s doesn't exist", accountID)
+		return nil, err
 	}
 
-	executingUser := account.Users[initiatorUserID]
-	if executingUser == nil {
-		return nil, status.Errorf(status.NotFound, "user not found")
+	if initiatorUser.AccountID != accountID {
+		return nil, status.NewUserNotPartOfAccountError()
 	}
-	if !executingUser.HasAdminPower() {
-		return nil, status.Errorf(status.PermissionDenied, "only users with admin power can create service users")
+
+	if !initiatorUser.HasAdminPower() {
+		return nil, status.NewAdminPermissionError()
 	}
 
 	if role == types.UserRoleOwner {
-		return nil, status.Errorf(status.InvalidArgument, "can't create a service user with owner role")
+		return nil, status.NewServiceUserRoleInvalidError()
 	}
 
 	newUserID := uuid.New().String()
 	newUser := types.NewUser(newUserID, role, true, nonDeletable, serviceUserName, autoGroups, types.UserIssuedAPI)
+	newUser.AccountID = accountID
 	log.WithContext(ctx).Debugf("New User: %v", newUser)
-	account.Users[newUserID] = newUser
 
-	err = am.Store.SaveAccount(ctx, account)
-	if err != nil {
+	if err = am.Store.SaveUser(ctx, store.LockingStrengthUpdate, newUser); err != nil {
 		return nil, err
 	}
 
@@ -87,40 +84,67 @@ func (am *DefaultAccountManager) inviteNewUser(ctx context.Context, accountID, u
 		return nil, status.Errorf(status.PreconditionFailed, "IdP manager must be enabled to send user invites")
 	}
 
-	if invite == nil {
-		return nil, fmt.Errorf("provided user update is nil")
+	if err := validateUserInvite(invite); err != nil {
+		return nil, err
 	}
 
-	invitedRole := types.StrRoleToUserRole(invite.Role)
-
-	switch {
-	case invite.Name == "":
-		return nil, status.Errorf(status.InvalidArgument, "name can't be empty")
-	case invite.Email == "":
-		return nil, status.Errorf(status.InvalidArgument, "email can't be empty")
-	case invitedRole == types.UserRoleOwner:
-		return nil, status.Errorf(status.InvalidArgument, "can't invite a user with owner role")
-	default:
-	}
-
-	account, err := am.Store.GetAccount(ctx, accountID)
+	initiatorUser, err := am.Store.GetUserByUserID(ctx, store.LockingStrengthShare, userID)
 	if err != nil {
-		return nil, status.Errorf(status.NotFound, "account %s doesn't exist", accountID)
+		return nil, err
 	}
 
-	initiatorUser, err := account.FindUser(userID)
-	if err != nil {
-		return nil, status.Errorf(status.NotFound, "initiator user with ID %s doesn't exist", userID)
+	if initiatorUser.AccountID != accountID {
+		return nil, status.NewUserNotPartOfAccountError()
 	}
 
 	inviterID := userID
 	if initiatorUser.IsServiceUser {
-		inviterID = account.CreatedBy
+		createdBy, err := am.Store.GetAccountCreatedBy(ctx, store.LockingStrengthShare, accountID)
+		if err != nil {
+			return nil, err
+		}
+		inviterID = createdBy
 	}
 
+	idpUser, err := am.createNewIdpUser(ctx, accountID, inviterID, invite)
+	if err != nil {
+		return nil, err
+	}
+
+	newUser := &types.User{
+		Id:                   idpUser.ID,
+		AccountID:            accountID,
+		Role:                 types.StrRoleToUserRole(invite.Role),
+		AutoGroups:           invite.AutoGroups,
+		Issued:               invite.Issued,
+		IntegrationReference: invite.IntegrationReference,
+		CreatedAt:            time.Now().UTC(),
+	}
+
+	settings, err := am.Store.GetAccountSettings(ctx, store.LockingStrengthShare, accountID)
+	if err != nil {
+		return nil, err
+	}
+
+	if err = am.Store.SaveUser(ctx, store.LockingStrengthUpdate, newUser); err != nil {
+		return nil, err
+	}
+
+	_, err = am.refreshCache(ctx, accountID)
+	if err != nil {
+		return nil, err
+	}
+
+	am.StoreEvent(ctx, userID, newUser.Id, accountID, activity.UserInvited, nil)
+
+	return newUser.ToUserInfo(idpUser, settings)
+}
+
+// createNewIdpUser validates the invite and creates a new user in the IdP
+func (am *DefaultAccountManager) createNewIdpUser(ctx context.Context, accountID string, inviterID string, invite *types.UserInfo) (*idp.UserData, error) {
 	// inviterUser is the one who is inviting the new user
-	inviterUser, err := am.lookupUserInCache(ctx, inviterID, account)
-	if err != nil || inviterUser == nil {
+	inviterUser, err := am.lookupUserInCache(ctx, inviterID, accountID)
+	if err != nil {
 		return nil, status.Errorf(status.NotFound, "inviter user with ID %s doesn't exist in IdP", inviterID)
 	}
 
@@ -143,34 +167,7 @@ func (am *DefaultAccountManager) inviteNewUser(ctx context.Context, accountID, u
 		return nil, status.Errorf(status.UserAlreadyExists, "can't invite a user with an existing NetBird account")
 	}
 
-	idpUser, err := am.idpManager.CreateUser(ctx, invite.Email, invite.Name, accountID, inviterUser.Email)
-	if err != nil {
-		return nil, err
-	}
-
-	newUser := &types.User{
-		Id:                   idpUser.ID,
-		Role:                 invitedRole,
-		AutoGroups:           invite.AutoGroups,
-		Issued:               invite.Issued,
-		IntegrationReference: invite.IntegrationReference,
-		CreatedAt:            time.Now().UTC(),
-	}
-	account.Users[idpUser.ID] = newUser
-
-	err = am.Store.SaveAccount(ctx, account)
-	if err != nil {
-		return nil, err
-	}
-
-	_, err = am.refreshCache(ctx, account.Id)
-	if err != nil {
-		return nil, err
-	}
-
-	am.StoreEvent(ctx, userID, newUser.Id, accountID, activity.UserInvited, nil)
-
-	return newUser.ToUserInfo(idpUser, account.Settings)
+	return am.idpManager.CreateUser(ctx, invite.Email, invite.Name, accountID, inviterUser.Email)
 }
 
 func (am *DefaultAccountManager) GetUserByID(ctx context.Context, id string) (*types.User, error) {
@@ -210,60 +207,51 @@ func (am *DefaultAccountManager) GetUser(ctx context.Context, claims jwtclaims.A
 // ListUsers returns lists of all users under the account.
 // It doesn't populate user information such as email or name.
 func (am *DefaultAccountManager) ListUsers(ctx context.Context, accountID string) ([]*types.User, error) {
-	unlock := am.Store.AcquireWriteLockByUID(ctx, accountID)
-	defer unlock()
-
-	account, err := am.Store.GetAccount(ctx, accountID)
-	if err != nil {
-		return nil, err
-	}
-
-	users := make([]*types.User, 0, len(account.Users))
-	for _, item := range account.Users {
-		users = append(users, item)
-	}
-
-	return users, nil
+	return am.Store.GetAccountUsers(ctx, store.LockingStrengthShare, accountID)
 }
 
-func (am *DefaultAccountManager) deleteServiceUser(ctx context.Context, account *types.Account, initiatorUserID string, targetUser *types.User) {
+func (am *DefaultAccountManager) deleteServiceUser(ctx context.Context, accountID string, initiatorUserID string, targetUser *types.User) error {
+	if err := am.Store.DeleteUser(ctx, store.LockingStrengthUpdate, accountID, targetUser.Id); err != nil {
+		return err
+	}
 	meta := map[string]any{"name": targetUser.ServiceUserName, "created_at": targetUser.CreatedAt}
-	am.StoreEvent(ctx, initiatorUserID, targetUser.Id, account.Id, activity.ServiceUserDeleted, meta)
-	delete(account.Users, targetUser.Id)
+	am.StoreEvent(ctx, initiatorUserID, targetUser.Id, accountID, activity.ServiceUserDeleted, meta)
+	return nil
 }
 
 // DeleteUser deletes a user from the given account.
-func (am *DefaultAccountManager) DeleteUser(ctx context.Context, accountID, initiatorUserID string, targetUserID string) error {
+func (am *DefaultAccountManager) DeleteUser(ctx context.Context, accountID, initiatorUserID, targetUserID string) error {
 	if initiatorUserID == targetUserID {
 		return status.Errorf(status.InvalidArgument, "self deletion is not allowed")
 	}
+
 	unlock := am.Store.AcquireWriteLockByUID(ctx, accountID)
 	defer unlock()
 
-	account, err := am.Store.GetAccount(ctx, accountID)
+	initiatorUser, err := am.Store.GetUserByUserID(ctx, store.LockingStrengthShare, initiatorUserID)
 	if err != nil {
 		return err
 	}
 
-	executingUser := account.Users[initiatorUserID]
-	if executingUser == nil {
-		return status.Errorf(status.NotFound, "user not found")
-	}
-	if !executingUser.HasAdminPower() {
-		return status.Errorf(status.PermissionDenied, "only users with admin power can delete users")
+	if initiatorUser.AccountID != accountID {
+		return status.NewUserNotPartOfAccountError()
 	}
 
-	targetUser := account.Users[targetUserID]
-	if targetUser == nil {
-		return status.Errorf(status.NotFound, "target user not found")
+	if !initiatorUser.HasAdminPower() {
+		return status.NewAdminPermissionError()
+	}
+
+	targetUser, err := am.Store.GetUserByUserID(ctx, store.LockingStrengthShare, targetUserID)
+	if err != nil {
+		return err
 	}
 
 	if targetUser.Role == types.UserRoleOwner {
-		return status.Errorf(status.PermissionDenied, "unable to delete a user with owner role")
+		return status.NewOwnerDeletePermissionError()
 	}
 
 	// disable deleting integration user if the initiator is not admin service user
-	if targetUser.Issued == types.UserIssuedIntegration && !executingUser.IsServiceUser {
+	if targetUser.Issued == types.UserIssuedIntegration && !initiatorUser.IsServiceUser {
 		return status.Errorf(status.PermissionDenied, "only integration service user can delete this user")
 	}
 
@@ -273,64 +261,26 @@ func (am *DefaultAccountManager) DeleteUser(ctx context.Context, accountID, init
 			return status.Errorf(status.PermissionDenied, "service user is marked as non-deletable")
 		}
 
-		am.deleteServiceUser(ctx, account, initiatorUserID, targetUser)
-		return am.Store.SaveAccount(ctx, account)
+		return am.deleteServiceUser(ctx, accountID, initiatorUserID, targetUser)
 	}
 
-	return am.deleteRegularUser(ctx, account, initiatorUserID, targetUserID)
-}
-
-func (am *DefaultAccountManager) deleteRegularUser(ctx context.Context, account *types.Account, initiatorUserID, targetUserID string) error {
-	meta, updateAccountPeers, err := am.prepareUserDeletion(ctx, account, initiatorUserID, targetUserID)
+	userInfo, err := am.getUserInfo(ctx, targetUser, accountID)
 	if err != nil {
 		return err
 	}
 
-	delete(account.Users, targetUserID)
-	if updateAccountPeers {
-		account.Network.IncSerial()
-	}
-
-	err = am.Store.SaveAccount(ctx, account)
+	updateAccountPeers, err := am.deleteRegularUser(ctx, accountID, initiatorUserID, userInfo)
 	if err != nil {
 		return err
 	}
 
-	am.StoreEvent(ctx, initiatorUserID, targetUserID, account.Id, activity.UserDeleted, meta)
 	if updateAccountPeers {
-		am.UpdateAccountPeers(ctx, account.Id)
+		am.UpdateAccountPeers(ctx, accountID)
 	}
 
 	return nil
 }
 
-func (am *DefaultAccountManager) deleteUserPeers(ctx context.Context, initiatorUserID string, targetUserID string, account *types.Account) (bool, error) {
-	peers, err := account.FindUserPeers(targetUserID)
-	if err != nil {
-		return false, status.Errorf(status.Internal, "failed to find user peers")
-	}
-
-	hadPeers := len(peers) > 0
-	if !hadPeers {
-		return false, nil
-	}
-
-	eventsToStore, err := deletePeers(ctx, am, am.Store, account.Id, initiatorUserID, peers)
-	if err != nil {
-		return false, err
-	}
-
-	for _, storeEvent := range eventsToStore {
-		storeEvent()
-	}
-
-	for _, peer := range peers {
-		account.DeletePeer(peer.ID)
-	}
-
-	return hadPeers, nil
-}
-
 // InviteUser resend invitations to users who haven't activated their accounts prior to the expiration period.
 func (am *DefaultAccountManager) InviteUser(ctx context.Context, accountID string, initiatorUserID string, targetUserID string) error {
 	unlock := am.Store.AcquireWriteLockByUID(ctx, accountID)
@@ -340,13 +290,17 @@ func (am *DefaultAccountManager) InviteUser(ctx context.Context, accountID strin
 		return status.Errorf(status.PreconditionFailed, "IdP manager must be enabled to send user invites")
 	}
 
-	account, err := am.Store.GetAccount(ctx, accountID)
+	initiatorUser, err := am.Store.GetUserByUserID(ctx, store.LockingStrengthShare, initiatorUserID)
 	if err != nil {
-		return status.Errorf(status.NotFound, "account %s doesn't exist", accountID)
+		return err
+	}
+
+	if initiatorUser.AccountID != accountID {
+		return status.NewUserNotPartOfAccountError()
 	}
 
 	// check if the user is already registered with this ID
-	user, err := am.lookupUserInCache(ctx, targetUserID, account)
+	user, err := am.lookupUserInCache(ctx, targetUserID, accountID)
 	if err != nil {
 		return err
 	}
@@ -384,35 +338,31 @@ func (am *DefaultAccountManager) CreatePAT(ctx context.Context, accountID string
 		return nil, status.Errorf(status.InvalidArgument, "expiration has to be between 1 and 365")
 	}
 
-	account, err := am.Store.GetAccount(ctx, accountID)
+	initiatorUser, err := am.Store.GetUserByUserID(ctx, store.LockingStrengthShare, initiatorUserID)
 	if err != nil {
 		return nil, err
 	}
 
-	targetUser, ok := account.Users[targetUserID]
-	if !ok {
-		return nil, status.Errorf(status.NotFound, "user not found")
+	if initiatorUser.AccountID != accountID {
+		return nil, status.NewUserNotPartOfAccountError()
 	}
 
-	executingUser, ok := account.Users[initiatorUserID]
-	if !ok {
-		return nil, status.Errorf(status.NotFound, "user not found")
+	targetUser, err := am.Store.GetUserByUserID(ctx, store.LockingStrengthShare, targetUserID)
+	if err != nil {
+		return nil, err
 	}
 
-	if !(initiatorUserID == targetUserID || (executingUser.HasAdminPower() && targetUser.IsServiceUser)) {
-		return nil, status.Errorf(status.PermissionDenied, "no permission to create PAT for this user")
+	if initiatorUserID != targetUserID && !(initiatorUser.HasAdminPower() && targetUser.IsServiceUser) {
+		return nil, status.NewAdminPermissionError()
 	}
 
-	pat, err := types.CreateNewPAT(tokenName, expiresIn, executingUser.Id)
+	pat, err := types.CreateNewPAT(tokenName, expiresIn, targetUserID, initiatorUser.Id)
 	if err != nil {
 		return nil, status.Errorf(status.Internal, "failed to create PAT: %v", err)
 	}
 
-	targetUser.PATs[pat.ID] = &pat.PersonalAccessToken
-
-	err = am.Store.SaveAccount(ctx, account)
-	if err != nil {
-		return nil, status.Errorf(status.Internal, "failed to save account: %v", err)
+	if err = am.Store.SavePAT(ctx, store.LockingStrengthUpdate, &pat.PersonalAccessToken); err != nil {
+		return nil, err
 	}
 
 	meta := map[string]any{"name": pat.Name, "is_service_user": targetUser.IsServiceUser, "user_name": targetUser.ServiceUserName}
@@ -426,48 +376,36 @@ func (am *DefaultAccountManager) DeletePAT(ctx context.Context, accountID string
 	unlock := am.Store.AcquireWriteLockByUID(ctx, accountID)
 	defer unlock()
 
-	account, err := am.Store.GetAccount(ctx, accountID)
+	initiatorUser, err := am.Store.GetUserByUserID(ctx, store.LockingStrengthShare, initiatorUserID)
 	if err != nil {
-		return status.Errorf(status.NotFound, "account not found: %s", err)
+		return err
 	}
 
-	targetUser, ok := account.Users[targetUserID]
-	if !ok {
-		return status.Errorf(status.NotFound, "user not found")
+	if initiatorUser.AccountID != accountID {
+		return status.NewUserNotPartOfAccountError()
 	}
 
-	executingUser, ok := account.Users[initiatorUserID]
-	if !ok {
-		return status.Errorf(status.NotFound, "user not found")
+	if initiatorUserID != targetUserID && initiatorUser.IsRegularUser() {
+		return status.NewAdminPermissionError()
 	}
 
-	if !(initiatorUserID == targetUserID || (executingUser.HasAdminPower() && targetUser.IsServiceUser)) {
-		return status.Errorf(status.PermissionDenied, "no permission to delete PAT for this user")
-	}
-
-	pat := targetUser.PATs[tokenID]
-	if pat == nil {
-		return status.Errorf(status.NotFound, "PAT not found")
-	}
-
-	err = am.Store.DeleteTokenID2UserIDIndex(pat.ID)
+	pat, err := am.Store.GetPATByID(ctx, store.LockingStrengthShare, targetUserID, tokenID)
 	if err != nil {
-		return status.Errorf(status.Internal, "Failed to delete token id index: %s", err)
+		return err
 	}
-	err = am.Store.DeleteHashedPAT2TokenIDIndex(pat.HashedToken)
+
+	targetUser, err := am.Store.GetUserByUserID(ctx, store.LockingStrengthShare, targetUserID)
 	if err != nil {
-		return status.Errorf(status.Internal, "Failed to delete hashed token index: %s", err)
+		return err
+	}
+
+	if err = am.Store.DeletePAT(ctx, store.LockingStrengthUpdate, targetUserID, tokenID); err != nil {
+		return err
 	}
 
 	meta := map[string]any{"name": pat.Name, "is_service_user": targetUser.IsServiceUser, "user_name": targetUser.ServiceUserName}
 	am.StoreEvent(ctx, initiatorUserID, targetUserID, accountID, activity.PersonalAccessTokenDeleted, meta)
 
-	delete(targetUser.PATs, tokenID)
-
-	err = am.Store.SaveAccount(ctx, account)
-	if err != nil {
-		return status.Errorf(status.Internal, "Failed to save account: %s", err)
-	}
 	return nil
 }
 
@@ -478,22 +416,15 @@ func (am *DefaultAccountManager) GetPAT(ctx context.Context, accountID string, i
 		return nil, err
 	}
 
-	targetUser, err := am.Store.GetUserByUserID(ctx, store.LockingStrengthShare, targetUserID)
-	if err != nil {
-		return nil, err
+	if initiatorUser.AccountID != accountID {
+		return nil, status.NewUserNotPartOfAccountError()
 	}
 
-	if (initiatorUserID != targetUserID && !initiatorUser.IsAdminOrServiceUser()) || initiatorUser.AccountID != accountID {
-		return nil, status.Errorf(status.PermissionDenied, "no permission to get PAT for this user")
+	if initiatorUserID != targetUserID && initiatorUser.IsRegularUser() {
+		return nil, status.NewAdminPermissionError()
 	}
 
-	for _, pat := range targetUser.PATsG {
-		if pat.ID == tokenID {
-			return pat.Copy(), nil
-		}
-	}
-
-	return nil, status.Errorf(status.NotFound, "PAT not found")
+	return am.Store.GetPATByID(ctx, store.LockingStrengthShare, targetUserID, tokenID)
 }
 
 // GetAllPATs returns all PATs for a user
@@ -503,21 +434,15 @@ func (am *DefaultAccountManager) GetAllPATs(ctx context.Context, accountID strin
 		return nil, err
 	}
 
-	targetUser, err := am.Store.GetUserByUserID(ctx, store.LockingStrengthShare, targetUserID)
-	if err != nil {
-		return nil, err
+	if initiatorUser.AccountID != accountID {
+		return nil, status.NewUserNotPartOfAccountError()
 	}
 
-	if (initiatorUserID != targetUserID && !initiatorUser.IsAdminOrServiceUser()) || initiatorUser.AccountID != accountID {
-		return nil, status.Errorf(status.PermissionDenied, "no permission to get PAT for this user")
+	if initiatorUserID != targetUserID && initiatorUser.IsRegularUser() {
+		return nil, status.NewAdminPermissionError()
 	}
 
-	pats := make([]*types.PersonalAccessToken, 0, len(targetUser.PATsG))
-	for _, pat := range targetUser.PATsG {
-		pats = append(pats, pat.Copy())
-	}
-
-	return pats, nil
+	return am.Store.GetUserPATs(ctx, store.LockingStrengthShare, targetUserID)
 }
 
 // SaveUser saves updates to the given user. If the user doesn't exist, it will throw status.NotFound error.
@@ -528,10 +453,6 @@ func (am *DefaultAccountManager) SaveUser(ctx context.Context, accountID, initia
 // SaveOrAddUser updates the given user. If addIfNotExists is set to true it will add user when no exist
 // Only User.AutoGroups, User.Role, and User.Blocked fields are allowed to be updated for now.
 func (am *DefaultAccountManager) SaveOrAddUser(ctx context.Context, accountID, initiatorUserID string, update *types.User, addIfNotExists bool) (*types.UserInfo, error) {
-	if update == nil {
-		return nil, status.Errorf(status.InvalidArgument, "provided user update is nil")
-	}
-
 	unlock := am.Store.AcquireWriteLockByUID(ctx, accountID)
 	defer unlock()
 
@@ -555,125 +476,113 @@ func (am *DefaultAccountManager) SaveOrAddUsers(ctx context.Context, accountID,
 		return nil, nil //nolint:nilnil
 	}
 
-	account, err := am.Store.GetAccount(ctx, accountID)
+	initiatorUser, err := am.Store.GetUserByUserID(ctx, store.LockingStrengthShare, initiatorUserID)
 	if err != nil {
 		return nil, err
 	}
 
-	initiatorUser, err := account.FindUser(initiatorUserID)
-	if err != nil {
-		return nil, err
+	if initiatorUser.AccountID != accountID {
+		return nil, status.NewUserNotPartOfAccountError()
 	}
 
 	if !initiatorUser.HasAdminPower() || initiatorUser.IsBlocked() {
-		return nil, status.Errorf(status.PermissionDenied, "only users with admin power are authorized to perform user update operations")
+		return nil, status.NewAdminPermissionError()
 	}
 
-	updatedUsers := make([]*types.UserInfo, 0, len(updates))
-	var (
-		expiredPeers  []*nbpeer.Peer
-		userIDs       []string
-		eventsToStore []func()
-	)
+	settings, err := am.Store.GetAccountSettings(ctx, store.LockingStrengthShare, accountID)
+	if err != nil {
+		return nil, err
+	}
 
-	for _, update := range updates {
-		if update == nil {
-			return nil, status.Errorf(status.InvalidArgument, "provided user update is nil")
-		}
+	var updateAccountPeers bool
+	var peersToExpire []*nbpeer.Peer
+	var addUserEvents []func()
+	var usersToSave = make([]*types.User, 0, len(updates))
 
-		userIDs = append(userIDs, update.Id)
+	groups, err := am.Store.GetAccountGroups(ctx, store.LockingStrengthShare, accountID)
+	if err != nil {
+		return nil, fmt.Errorf("error getting account groups: %w", err)
+	}
 
-		oldUser := account.Users[update.Id]
-		if oldUser == nil {
-			if !addIfNotExists {
-				return nil, status.Errorf(status.NotFound, "user to update doesn't exist: %s", update.Id)
+	groupsMap := make(map[string]*types.Group, len(groups))
+	for _, group := range groups {
+		groupsMap[group.ID] = group
+	}
+
+	err = am.Store.ExecuteInTransaction(ctx, func(transaction store.Store) error {
+		for _, update := range updates {
+			if update == nil {
+				return status.Errorf(status.InvalidArgument, "provided user update is nil")
 			}
-			// when addIfNotExists is set to true, the newUser will use all fields from the update input
-			oldUser = update
-		}
 
-		if err := validateUserUpdate(account, initiatorUser, oldUser, update); err != nil {
-			return nil, err
-		}
-
-		// only auto groups, revoked status, and integration reference can be updated for now
-		newUser := oldUser.Copy()
-		newUser.Role = update.Role
-		newUser.Blocked = update.Blocked
-		newUser.AutoGroups = update.AutoGroups
-		// these two fields can't be set via API, only via direct call to the method
-		newUser.Issued = update.Issued
-		newUser.IntegrationReference = update.IntegrationReference
-
-		transferredOwnerRole := handleOwnerRoleTransfer(account, initiatorUser, update)
-		account.Users[newUser.Id] = newUser
-
-		if !oldUser.IsBlocked() && update.IsBlocked() {
-			// expire peers that belong to the user who's getting blocked
-			blockedPeers, err := account.FindUserPeers(update.Id)
+			userHadPeers, updatedUser, userPeersToExpire, userEvents, err := am.processUserUpdate(
+				ctx, transaction, groupsMap, initiatorUser, update, addIfNotExists, settings,
+			)
 			if err != nil {
-				return nil, err
+				return fmt.Errorf("failed to process user update: %w", err)
+			}
+			usersToSave = append(usersToSave, updatedUser)
+			addUserEvents = append(addUserEvents, userEvents...)
+			peersToExpire = append(peersToExpire, userPeersToExpire...)
+
+			if userHadPeers {
+				updateAccountPeers = true
 			}
-			expiredPeers = append(expiredPeers, blockedPeers...)
 		}
-
-		peerGroupsAdded := make(map[string][]string)
-		peerGroupsRemoved := make(map[string][]string)
-		if update.AutoGroups != nil && account.Settings.GroupsPropagationEnabled {
-			removedGroups := util.Difference(oldUser.AutoGroups, update.AutoGroups)
-			// need force update all auto groups in any case they will not be duplicated
-			peerGroupsAdded = account.UserGroupsAddToPeers(oldUser.Id, update.AutoGroups...)
-			peerGroupsRemoved = account.UserGroupsRemoveFromPeers(oldUser.Id, removedGroups...)
-		}
-
-		userUpdateEvents := am.prepareUserUpdateEvents(ctx, initiatorUser.Id, oldUser, newUser, account, transferredOwnerRole)
-		eventsToStore = append(eventsToStore, userUpdateEvents...)
-
-		userGroupsEvents := am.prepareUserGroupsEvents(ctx, initiatorUser.Id, oldUser, newUser, account, peerGroupsAdded, peerGroupsRemoved)
-		eventsToStore = append(eventsToStore, userGroupsEvents...)
-
-		updatedUserInfo, err := getUserInfo(ctx, am, newUser, account)
-		if err != nil {
-			return nil, err
-		}
-		updatedUsers = append(updatedUsers, updatedUserInfo)
+		return transaction.SaveUsers(ctx, store.LockingStrengthUpdate, usersToSave)
+	})
+	if err != nil {
+		return nil, err
 	}
 
-	if len(expiredPeers) > 0 {
-		if err := am.expireAndUpdatePeers(ctx, account.Id, expiredPeers); err != nil {
+	var updatedUsersInfo = make([]*types.UserInfo, 0, len(updates))
+
+	userInfos, err := am.GetUsersFromAccount(ctx, accountID, initiatorUserID)
+	if err != nil {
+		return nil, err
+	}
+
+	for _, updatedUser := range usersToSave {
+		updatedUserInfo, ok := userInfos[updatedUser.Id]
+		if !ok || updatedUserInfo == nil {
+			return nil, fmt.Errorf("failed to get user: %s updated user info", updatedUser.Id)
+		}
+		updatedUsersInfo = append(updatedUsersInfo, updatedUserInfo)
+	}
+
+	for _, addUserEvent := range addUserEvents {
+		addUserEvent()
+	}
+
+	if len(peersToExpire) > 0 {
+		if err := am.expireAndUpdatePeers(ctx, accountID, peersToExpire); err != nil {
 			log.WithContext(ctx).Errorf("failed update expired peers: %s", err)
 			return nil, err
 		}
 	}
 
-	account.Network.IncSerial()
-	if err = am.Store.SaveAccount(ctx, account); err != nil {
-		return nil, err
+	if settings.GroupsPropagationEnabled && updateAccountPeers {
+		if err = am.Store.IncrementNetworkSerial(ctx, store.LockingStrengthUpdate, accountID); err != nil {
+			return nil, fmt.Errorf("failed to increment network serial: %w", err)
+		}
+		am.UpdateAccountPeers(ctx, accountID)
 	}
 
-	if account.Settings.GroupsPropagationEnabled && areUsersLinkedToPeers(account, userIDs) {
-		am.UpdateAccountPeers(ctx, account.Id)
-	}
-
-	for _, storeEvent := range eventsToStore {
-		storeEvent()
-	}
-
-	return updatedUsers, nil
+	return updatedUsersInfo, nil
 }
 
 // prepareUserUpdateEvents prepares a list user update events based on the changes between the old and new user data.
-func (am *DefaultAccountManager) prepareUserUpdateEvents(ctx context.Context, initiatorUserID string, oldUser, newUser *types.User, account *types.Account, transferredOwnerRole bool) []func() {
+func (am *DefaultAccountManager) prepareUserUpdateEvents(ctx context.Context, accountID string, initiatorUserID string, oldUser, newUser *types.User, transferredOwnerRole bool) []func() {
 	var eventsToStore []func()
 
 	if oldUser.IsBlocked() != newUser.IsBlocked() {
 		if newUser.IsBlocked() {
 			eventsToStore = append(eventsToStore, func() {
-				am.StoreEvent(ctx, initiatorUserID, oldUser.Id, account.Id, activity.UserBlocked, nil)
+				am.StoreEvent(ctx, initiatorUserID, oldUser.Id, accountID, activity.UserBlocked, nil)
 			})
 		} else {
 			eventsToStore = append(eventsToStore, func() {
-				am.StoreEvent(ctx, initiatorUserID, oldUser.Id, account.Id, activity.UserUnblocked, nil)
+				am.StoreEvent(ctx, initiatorUserID, oldUser.Id, accountID, activity.UserUnblocked, nil)
 			})
 		}
 	}
@@ -681,115 +590,126 @@ func (am *DefaultAccountManager) prepareUserUpdateEvents(ctx context.Context, in
 	switch {
 	case transferredOwnerRole:
 		eventsToStore = append(eventsToStore, func() {
-			am.StoreEvent(ctx, initiatorUserID, oldUser.Id, account.Id, activity.TransferredOwnerRole, nil)
+			am.StoreEvent(ctx, initiatorUserID, oldUser.Id, accountID, activity.TransferredOwnerRole, nil)
 		})
 	case oldUser.Role != newUser.Role:
 		eventsToStore = append(eventsToStore, func() {
-			am.StoreEvent(ctx, initiatorUserID, oldUser.Id, account.Id, activity.UserRoleUpdated, map[string]any{"role": newUser.Role})
+			am.StoreEvent(ctx, initiatorUserID, oldUser.Id, accountID, activity.UserRoleUpdated, map[string]any{"role": newUser.Role})
 		})
 	}
 
 	return eventsToStore
 }
 
-func (am *DefaultAccountManager) prepareUserGroupsEvents(ctx context.Context, initiatorUserID string, oldUser, newUser *types.User, account *types.Account, peerGroupsAdded, peerGroupsRemoved map[string][]string) []func() {
-	var eventsToStore []func()
-	if newUser.AutoGroups != nil {
-		removedGroups := util.Difference(oldUser.AutoGroups, newUser.AutoGroups)
-		addedGroups := util.Difference(newUser.AutoGroups, oldUser.AutoGroups)
+func (am *DefaultAccountManager) processUserUpdate(ctx context.Context, transaction store.Store, groupsMap map[string]*types.Group,
+	initiatorUser, update *types.User, addIfNotExists bool, settings *types.Settings) (bool, *types.User, []*nbpeer.Peer, []func(), error) {
 
-		removedEvents := am.handleGroupRemovedFromUser(ctx, initiatorUserID, oldUser, newUser, account, removedGroups, peerGroupsRemoved)
-		eventsToStore = append(eventsToStore, removedEvents...)
-
-		addedEvents := am.handleGroupAddedToUser(ctx, initiatorUserID, oldUser, newUser, account, addedGroups, peerGroupsAdded)
-		eventsToStore = append(eventsToStore, addedEvents...)
+	if update == nil {
+		return false, nil, nil, nil, status.Errorf(status.InvalidArgument, "provided user update is nil")
 	}
-	return eventsToStore
+
+	oldUser, err := getUserOrCreateIfNotExists(ctx, transaction, update, addIfNotExists)
+	if err != nil {
+		return false, nil, nil, nil, err
+	}
+
+	if err := validateUserUpdate(groupsMap, initiatorUser, oldUser, update); err != nil {
+		return false, nil, nil, nil, err
+	}
+
+	// only auto groups, revoked status, and integration reference can be updated for now
+	updatedUser := oldUser.Copy()
+	updatedUser.AccountID = initiatorUser.AccountID
+	updatedUser.Role = update.Role
+	updatedUser.Blocked = update.Blocked
+	updatedUser.AutoGroups = update.AutoGroups
+	// these two fields can't be set via API, only via direct call to the method
+	updatedUser.Issued = update.Issued
+	updatedUser.IntegrationReference = update.IntegrationReference
+
+	transferredOwnerRole, err := handleOwnerRoleTransfer(ctx, transaction, initiatorUser, update)
+	if err != nil {
+		return false, nil, nil, nil, err
+	}
+
+	userPeers, err := transaction.GetUserPeers(ctx, store.LockingStrengthUpdate, updatedUser.AccountID, update.Id)
+	if err != nil {
+		return false, nil, nil, nil, err
+	}
+
+	var peersToExpire []*nbpeer.Peer
+
+	if !oldUser.IsBlocked() && update.IsBlocked() {
+		peersToExpire = userPeers
+	}
+
+	if update.AutoGroups != nil && settings.GroupsPropagationEnabled {
+		removedGroups := util.Difference(oldUser.AutoGroups, update.AutoGroups)
+		updatedGroups, err := updateUserPeersInGroups(groupsMap, userPeers, update.AutoGroups, removedGroups)
+		if err != nil {
+			return false, nil, nil, nil, fmt.Errorf("error modifying user peers in groups: %w", err)
+		}
+
+		if err = transaction.SaveGroups(ctx, store.LockingStrengthUpdate, updatedGroups); err != nil {
+			return false, nil, nil, nil, fmt.Errorf("error saving groups: %w", err)
+		}
+	}
+
+	updateAccountPeers := len(userPeers) > 0
+	userEventsToAdd := am.prepareUserUpdateEvents(ctx, updatedUser.AccountID, initiatorUser.Id, oldUser, updatedUser, transferredOwnerRole)
+
+	return updateAccountPeers, updatedUser, peersToExpire, userEventsToAdd, nil
 }
 
-func (am *DefaultAccountManager) handleGroupAddedToUser(ctx context.Context, initiatorUserID string, oldUser, newUser *types.User, account *types.Account, addedGroups []string, peerGroupsAdded map[string][]string) []func() {
-	var eventsToStore []func()
-	for _, g := range addedGroups {
-		group := account.GetGroup(g)
-		if group != nil {
-			eventsToStore = append(eventsToStore, func() {
-				am.StoreEvent(ctx, initiatorUserID, oldUser.Id, account.Id, activity.GroupAddedToUser,
-					map[string]any{"group": group.Name, "group_id": group.ID, "is_service_user": newUser.IsServiceUser, "user_name": newUser.ServiceUserName})
-			})
+// getUserOrCreateIfNotExists retrieves the existing user or creates a new one if it doesn't exist.
+func getUserOrCreateIfNotExists(ctx context.Context, transaction store.Store, update *types.User, addIfNotExists bool) (*types.User, error) {
+	existingUser, err := transaction.GetUserByUserID(ctx, store.LockingStrengthShare, update.Id)
+	if err != nil {
+		if sErr, ok := status.FromError(err); ok && sErr.Type() == status.NotFound {
+			if !addIfNotExists {
+				return nil, status.Errorf(status.NotFound, "user to update doesn't exist: %s", update.Id)
+			}
+			return update, nil // use all fields from update if addIfNotExists is true
 		}
+		return nil, err
 	}
-	for groupID, peerIDs := range peerGroupsAdded {
-		group := account.GetGroup(groupID)
-		for _, peerID := range peerIDs {
-			peer := account.GetPeer(peerID)
-			eventsToStore = append(eventsToStore, func() {
-				meta := map[string]any{
-					"group": group.Name, "group_id": group.ID,
-					"peer_ip": peer.IP.String(), "peer_fqdn": peer.FQDN(am.GetDNSDomain()),
-				}
-				am.StoreEvent(ctx, activity.SystemInitiator, peer.ID, account.Id, activity.GroupAddedToPeer, meta)
-			})
-		}
-	}
-	return eventsToStore
+	return existingUser, nil
 }
 
-func (am *DefaultAccountManager) handleGroupRemovedFromUser(ctx context.Context, initiatorUserID string, oldUser, newUser *types.User, account *types.Account, removedGroups []string, peerGroupsRemoved map[string][]string) []func() {
-	var eventsToStore []func()
-	for _, g := range removedGroups {
-		group := account.GetGroup(g)
-		if group != nil {
-			eventsToStore = append(eventsToStore, func() {
-				am.StoreEvent(ctx, initiatorUserID, oldUser.Id, account.Id, activity.GroupRemovedFromUser,
-					map[string]any{"group": group.Name, "group_id": group.ID, "is_service_user": newUser.IsServiceUser, "user_name": newUser.ServiceUserName})
-			})
-
-		} else {
-			log.WithContext(ctx).Errorf("group %s not found while saving user activity event of account %s", g, account.Id)
-		}
-	}
-	for groupID, peerIDs := range peerGroupsRemoved {
-		group := account.GetGroup(groupID)
-		for _, peerID := range peerIDs {
-			peer := account.GetPeer(peerID)
-			eventsToStore = append(eventsToStore, func() {
-				meta := map[string]any{
-					"group": group.Name, "group_id": group.ID,
-					"peer_ip": peer.IP.String(), "peer_fqdn": peer.FQDN(am.GetDNSDomain()),
-				}
-				am.StoreEvent(ctx, activity.SystemInitiator, peer.ID, account.Id, activity.GroupRemovedFromPeer, meta)
-			})
-		}
-	}
-	return eventsToStore
-}
-
-func handleOwnerRoleTransfer(account *types.Account, initiatorUser, update *types.User) bool {
+func handleOwnerRoleTransfer(ctx context.Context, transaction store.Store, initiatorUser, update *types.User) (bool, error) {
 	if initiatorUser.Role == types.UserRoleOwner && initiatorUser.Id != update.Id && update.Role == types.UserRoleOwner {
 		newInitiatorUser := initiatorUser.Copy()
 		newInitiatorUser.Role = types.UserRoleAdmin
-		account.Users[initiatorUser.Id] = newInitiatorUser
-		return true
+
+		if err := transaction.SaveUser(ctx, store.LockingStrengthUpdate, newInitiatorUser); err != nil {
+			return false, err
+		}
+		return true, nil
 	}
-	return false
+	return false, nil
 }
 
 // getUserInfo retrieves the UserInfo for a given User and Account.
 // If the AccountManager has a non-nil idpManager and the User is not a service user,
 // it will attempt to look up the UserData from the cache.
-func getUserInfo(ctx context.Context, am *DefaultAccountManager, user *types.User, account *types.Account) (*types.UserInfo, error) {
+func (am *DefaultAccountManager) getUserInfo(ctx context.Context, user *types.User, accountID string) (*types.UserInfo, error) {
+	settings, err := am.Store.GetAccountSettings(ctx, store.LockingStrengthShare, accountID)
+	if err != nil {
+		return nil, err
+	}
+
 	if !isNil(am.idpManager) && !user.IsServiceUser {
-		userData, err := am.lookupUserInCache(ctx, user.Id, account)
+		userData, err := am.lookupUserInCache(ctx, user.Id, accountID)
 		if err != nil {
 			return nil, err
 		}
-		return user.ToUserInfo(userData, account.Settings)
+		return user.ToUserInfo(userData, settings)
 	}
-	return user.ToUserInfo(nil, account.Settings)
+	return user.ToUserInfo(nil, settings)
 }
 
 // validateUserUpdate validates the update operation for a user.
-func validateUserUpdate(account *types.Account, initiatorUser, oldUser, update *types.User) error {
+func validateUserUpdate(groupsMap map[string]*types.Group, initiatorUser, oldUser, update *types.User) error {
 	if initiatorUser.HasAdminPower() && initiatorUser.Id == update.Id && oldUser.Blocked != update.Blocked {
 		return status.Errorf(status.PermissionDenied, "admins can't block or unblock themselves")
 	}
@@ -810,12 +730,12 @@ func validateUserUpdate(account *types.Account, initiatorUser, oldUser, update *
 	}
 
 	for _, newGroupID := range update.AutoGroups {
-		group, ok := account.Groups[newGroupID]
+		group, ok := groupsMap[newGroupID]
 		if !ok {
 			return status.Errorf(status.InvalidArgument, "provided group ID %s in the user %s update doesn't exist",
 				newGroupID, update.Id)
 		}
-		if group.Name == "All" {
+		if group.IsGroupAll() {
 			return status.Errorf(status.InvalidArgument, "can't add All group to the user")
 		}
 	}
@@ -864,22 +784,38 @@ func (am *DefaultAccountManager) GetOrCreateAccountByUser(ctx context.Context, u
 
 // GetUsersFromAccount performs a batched request for users from IDP by account ID apply filter on what data to return
 // based on provided user role.
-func (am *DefaultAccountManager) GetUsersFromAccount(ctx context.Context, accountID, userID string) ([]*types.UserInfo, error) {
-	account, err := am.Store.GetAccount(ctx, accountID)
+func (am *DefaultAccountManager) GetUsersFromAccount(ctx context.Context, accountID, initiatorUserID string) (map[string]*types.UserInfo, error) {
+	accountUsers, err := am.Store.GetAccountUsers(ctx, store.LockingStrengthShare, accountID)
 	if err != nil {
 		return nil, err
 	}
 
-	user, err := account.FindUser(userID)
+	initiatorUser, err := am.Store.GetUserByUserID(ctx, store.LockingStrengthShare, initiatorUserID)
+	if err != nil {
+		return nil, err
+	}
+
+	if initiatorUser.AccountID != accountID {
+		return nil, status.NewUserNotPartOfAccountError()
+	}
+
+	return am.BuildUserInfosForAccount(ctx, accountID, initiatorUserID, accountUsers)
+}
+
+// BuildUserInfosForAccount builds user info for the given account.
+func (am *DefaultAccountManager) BuildUserInfosForAccount(ctx context.Context, accountID, initiatorUserID string, accountUsers []*types.User) (map[string]*types.UserInfo, error) {
+	var queriedUsers []*idp.UserData
+	var err error
+
+	initiatorUser, err := am.Store.GetUserByUserID(ctx, store.LockingStrengthShare, initiatorUserID)
 	if err != nil {
 		return nil, err
 	}
 
-	queriedUsers := make([]*idp.UserData, 0)
 	if !isNil(am.idpManager) {
-		users := make(map[string]userLoggedInOnce, len(account.Users))
+		users := make(map[string]userLoggedInOnce, len(accountUsers))
 		usersFromIntegration := make([]*idp.UserData, 0)
-		for _, user := range account.Users {
+		for _, user := range accountUsers {
 			if user.Issued == types.UserIssuedIntegration {
 				key := user.IntegrationReference.CacheKey(accountID, user.Id)
 				info, err := am.externalCacheManager.Get(am.ctx, key)
@@ -904,33 +840,40 @@ func (am *DefaultAccountManager) GetUsersFromAccount(ctx context.Context, accoun
 		queriedUsers = append(queriedUsers, usersFromIntegration...)
 	}
 
-	userInfos := make([]*types.UserInfo, 0)
+	settings, err := am.Store.GetAccountSettings(ctx, store.LockingStrengthShare, accountID)
+	if err != nil {
+		return nil, err
+	}
+
+	userInfosMap := make(map[string]*types.UserInfo)
 
 	// in case of self-hosted, or IDP doesn't return anything, we will return the locally stored userInfo
 	if len(queriedUsers) == 0 {
-		for _, accountUser := range account.Users {
-			if !(user.HasAdminPower() || user.IsServiceUser || user.Id == accountUser.Id) {
+		for _, accountUser := range accountUsers {
+			if initiatorUser.IsRegularUser() && initiatorUser.Id != accountUser.Id {
 				// if user is not an admin then show only current user and do not show other users
 				continue
 			}
-			info, err := accountUser.ToUserInfo(nil, account.Settings)
+
+			info, err := accountUser.ToUserInfo(nil, settings)
 			if err != nil {
 				return nil, err
 			}
-			userInfos = append(userInfos, info)
+			userInfosMap[accountUser.Id] = info
 		}
-		return userInfos, nil
+
+		return userInfosMap, nil
 	}
 
-	for _, localUser := range account.Users {
-		if !(user.HasAdminPower() || user.IsServiceUser) && user.Id != localUser.Id {
+	for _, localUser := range accountUsers {
+		if initiatorUser.IsRegularUser() && initiatorUser.Id != localUser.Id {
 			// if user is not an admin then show only current user and do not show other users
 			continue
 		}
 
 		var info *types.UserInfo
 		if queriedUser, contains := findUserInIDPUserdata(localUser.Id, queriedUsers); contains {
-			info, err = localUser.ToUserInfo(queriedUser, account.Settings)
+			info, err = localUser.ToUserInfo(queriedUser, settings)
 			if err != nil {
 				return nil, err
 			}
@@ -943,7 +886,7 @@ func (am *DefaultAccountManager) GetUsersFromAccount(ctx context.Context, accoun
 			dashboardViewPermissions := "full"
 			if !localUser.HasAdminPower() {
 				dashboardViewPermissions = "limited"
-				if account.Settings.RegularUsersViewBlocked {
+				if settings.RegularUsersViewBlocked {
 					dashboardViewPermissions = "blocked"
 				}
 			}
@@ -960,10 +903,10 @@ func (am *DefaultAccountManager) GetUsersFromAccount(ctx context.Context, accoun
 				Permissions:   types.UserPermissions{DashboardView: dashboardViewPermissions},
 			}
 		}
-		userInfos = append(userInfos, info)
+		userInfosMap[info.ID] = info
 	}
 
-	return userInfos, nil
+	return userInfosMap, nil
 }
 
 // expireAndUpdatePeers expires all peers of the given user and updates them in the account
@@ -1017,55 +960,34 @@ func (am *DefaultAccountManager) deleteUserFromIDP(ctx context.Context, targetUs
 	return nil
 }
 
-func (am *DefaultAccountManager) getEmailAndNameOfTargetUser(ctx context.Context, accountId, initiatorId, targetId string) (string, string, error) {
-	userInfos, err := am.GetUsersFromAccount(ctx, accountId, initiatorId)
-	if err != nil {
-		return "", "", err
-	}
-	for _, ui := range userInfos {
-		if ui.ID == targetId {
-			return ui.Email, ui.Name, nil
-		}
-	}
-
-	return "", "", fmt.Errorf("user info not found for user: %s", targetId)
-}
-
 // DeleteRegularUsers deletes regular users from an account.
 // Note: This function does not acquire the global lock.
 // It is the caller's responsibility to ensure proper locking is in place before invoking this method.
 //
 // If an error occurs while deleting the user, the function skips it and continues deleting other users.
 // Errors are collected and returned at the end.
-func (am *DefaultAccountManager) DeleteRegularUsers(ctx context.Context, accountID, initiatorUserID string, targetUserIDs []string) error {
-	account, err := am.Store.GetAccount(ctx, accountID)
+func (am *DefaultAccountManager) DeleteRegularUsers(ctx context.Context, accountID, initiatorUserID string, targetUserIDs []string, userInfos map[string]*types.UserInfo) error {
+	initiatorUser, err := am.Store.GetUserByUserID(ctx, store.LockingStrengthShare, initiatorUserID)
 	if err != nil {
 		return err
 	}
 
-	executingUser := account.Users[initiatorUserID]
-	if executingUser == nil {
-		return status.Errorf(status.NotFound, "user not found")
-	}
-	if !executingUser.HasAdminPower() {
-		return status.Errorf(status.PermissionDenied, "only users with admin power can delete users")
+	if !initiatorUser.HasAdminPower() {
+		return status.NewAdminPermissionError()
 	}
 
-	var (
-		allErrors          error
-		updateAccountPeers bool
-	)
+	var allErrors error
+	var updateAccountPeers bool
 
-	deletedUsersMeta := make(map[string]map[string]any)
 	for _, targetUserID := range targetUserIDs {
 		if initiatorUserID == targetUserID {
 			allErrors = errors.Join(allErrors, errors.New("self deletion is not allowed"))
 			continue
 		}
 
-		targetUser := account.Users[targetUserID]
-		if targetUser == nil {
-			allErrors = errors.Join(allErrors, fmt.Errorf("target user: %s not found", targetUserID))
+		targetUser, err := am.Store.GetUserByUserID(ctx, store.LockingStrengthShare, targetUserID)
+		if err != nil {
+			allErrors = errors.Join(allErrors, err)
 			continue
 		}
 
@@ -1075,88 +997,97 @@ func (am *DefaultAccountManager) DeleteRegularUsers(ctx context.Context, account
 		}
 
 		// disable deleting integration user if the initiator is not admin service user
-		if targetUser.Issued == types.UserIssuedIntegration && !executingUser.IsServiceUser {
+		if targetUser.Issued == types.UserIssuedIntegration && !initiatorUser.IsServiceUser {
 			allErrors = errors.Join(allErrors, errors.New("only integration service user can delete this user"))
 			continue
 		}
 
-		meta, hadPeers, err := am.prepareUserDeletion(ctx, account, initiatorUserID, targetUserID)
-		if err != nil {
-			allErrors = errors.Join(allErrors, fmt.Errorf("failed to delete user %s: %s", targetUserID, err))
+		userInfo, ok := userInfos[targetUserID]
+		if !ok || userInfo == nil {
+			allErrors = errors.Join(allErrors, fmt.Errorf("user info not found for user: %s", targetUserID))
 			continue
 		}
 
-		if hadPeers {
-			updateAccountPeers = true
+		userHadPeers, err := am.deleteRegularUser(ctx, accountID, initiatorUserID, userInfo)
+		if err != nil {
+			allErrors = errors.Join(allErrors, err)
+			continue
 		}
 
-		delete(account.Users, targetUserID)
-		deletedUsersMeta[targetUserID] = meta
-	}
-
-	if updateAccountPeers {
-		account.Network.IncSerial()
-	}
-	err = am.Store.SaveAccount(ctx, account)
-	if err != nil {
-		return fmt.Errorf("failed to delete users: %w", err)
+		if userHadPeers {
+			updateAccountPeers = true
+		}
 	}
 
 	if updateAccountPeers {
 		am.UpdateAccountPeers(ctx, accountID)
 	}
 
-	for targetUserID, meta := range deletedUsersMeta {
-		am.StoreEvent(ctx, initiatorUserID, targetUserID, account.Id, activity.UserDeleted, meta)
-	}
-
 	return allErrors
 }
 
-func (am *DefaultAccountManager) prepareUserDeletion(ctx context.Context, account *types.Account, initiatorUserID, targetUserID string) (map[string]any, bool, error) {
-	tuEmail, tuName, err := am.getEmailAndNameOfTargetUser(ctx, account.Id, initiatorUserID, targetUserID)
-	if err != nil {
-		log.WithContext(ctx).Errorf("failed to resolve email address: %s", err)
-		return nil, false, err
-	}
-
+// deleteRegularUser deletes a specified user and their related peers from the account.
+func (am *DefaultAccountManager) deleteRegularUser(ctx context.Context, accountID, initiatorUserID string, targetUserInfo *types.UserInfo) (bool, error) {
 	if !isNil(am.idpManager) {
 		// Delete if the user already exists in the IdP. Necessary in cases where a user account
 		// was created where a user account was provisioned but the user did not sign in
-		_, err = am.idpManager.GetUserDataByID(ctx, targetUserID, idp.AppMetadata{WTAccountID: account.Id})
+		_, err := am.idpManager.GetUserDataByID(ctx, targetUserInfo.ID, idp.AppMetadata{WTAccountID: accountID})
 		if err == nil {
-			err = am.deleteUserFromIDP(ctx, targetUserID, account.Id)
+			err = am.deleteUserFromIDP(ctx, targetUserInfo.ID, accountID)
 			if err != nil {
-				log.WithContext(ctx).Debugf("failed to delete user from IDP: %s", targetUserID)
-				return nil, false, err
+				log.WithContext(ctx).Debugf("failed to delete user from IDP: %s", targetUserInfo.ID)
+				return false, err
 			}
 		} else {
-			log.WithContext(ctx).Debugf("skipped deleting user %s from IDP, error: %v", targetUserID, err)
+			log.WithContext(ctx).Debugf("skipped deleting user %s from IDP, error: %v", targetUserInfo.ID, err)
 		}
 	}
 
-	hadPeers, err := am.deleteUserPeers(ctx, initiatorUserID, targetUserID, account)
+	var addPeerRemovedEvents []func()
+	var updateAccountPeers bool
+	var targetUser *types.User
+	var err error
+
+	err = am.Store.ExecuteInTransaction(ctx, func(transaction store.Store) error {
+		targetUser, err = transaction.GetUserByUserID(ctx, store.LockingStrengthShare, targetUserInfo.ID)
+		if err != nil {
+			return fmt.Errorf("failed to get user to delete: %w", err)
+		}
+
+		userPeers, err := transaction.GetUserPeers(ctx, store.LockingStrengthShare, accountID, targetUserInfo.ID)
+		if err != nil {
+			return fmt.Errorf("failed to get user peers: %w", err)
+		}
+
+		if len(userPeers) > 0 {
+			updateAccountPeers = true
+			addPeerRemovedEvents, err = deletePeers(ctx, am, transaction, accountID, targetUserInfo.ID, userPeers)
+			if err != nil {
+				return fmt.Errorf("failed to delete user peers: %w", err)
+			}
+		}
+
+		if err = transaction.DeleteUser(ctx, store.LockingStrengthUpdate, accountID, targetUserInfo.ID); err != nil {
+			return fmt.Errorf("failed to delete user: %s %w", targetUserInfo.ID, err)
+		}
+
+		return nil
+	})
 	if err != nil {
-		return nil, false, err
+		return false, err
 	}
 
-	u, err := account.FindUser(targetUserID)
-	if err != nil {
-		log.WithContext(ctx).Errorf("failed to find user %s for deletion, this should never happen: %s", targetUserID, err)
+	for _, addPeerRemovedEvent := range addPeerRemovedEvents {
+		addPeerRemovedEvent()
 	}
+	meta := map[string]any{"name": targetUserInfo.Name, "email": targetUserInfo.Email, "created_at": targetUser.CreatedAt}
+	am.StoreEvent(ctx, initiatorUserID, targetUser.Id, accountID, activity.UserDeleted, meta)
 
-	var tuCreatedAt time.Time
-	if u != nil {
-		tuCreatedAt = u.CreatedAt
-	}
-
-	return map[string]any{"name": tuName, "email": tuEmail, "created_at": tuCreatedAt}, hadPeers, nil
+	return updateAccountPeers, nil
 }
 
 // updateUserPeersInGroups updates the user's peers in the specified groups by adding or removing them.
-func (am *DefaultAccountManager) updateUserPeersInGroups(accountGroups map[string]*types.Group, peers []*nbpeer.Peer, groupsToAdd,
-	groupsToRemove []string) (groupsToUpdate []*types.Group, err error) {
-
+func updateUserPeersInGroups(accountGroups map[string]*types.Group, peers []*nbpeer.Peer, groupsToAdd, groupsToRemove []string) (groupsToUpdate []*types.Group, err error) {
 	if len(groupsToAdd) == 0 && len(groupsToRemove) == 0 {
 		return
 	}
@@ -1230,12 +1161,22 @@ func findUserInIDPUserdata(userID string, userData []*idp.UserData) (*idp.UserDa
 	return nil, false
 }
 
-// areUsersLinkedToPeers checks if any of the given userIDs are linked to any of the peers in the account.
-func areUsersLinkedToPeers(account *types.Account, userIDs []string) bool {
-	for _, peer := range account.Peers {
-		if slices.Contains(userIDs, peer.UserID) {
-			return true
-		}
+func validateUserInvite(invite *types.UserInfo) error {
+	if invite == nil {
+		return fmt.Errorf("provided user update is nil")
 	}
-	return false
+
+	invitedRole := types.StrRoleToUserRole(invite.Role)
+
+	switch {
+	case invite.Name == "":
+		return status.Errorf(status.InvalidArgument, "name can't be empty")
+	case invite.Email == "":
+		return status.Errorf(status.InvalidArgument, "email can't be empty")
+	case invitedRole == types.UserRoleOwner:
+		return status.Errorf(status.InvalidArgument, "can't invite a user with owner role")
+	default:
+	}
+
+	return nil
 }
diff --git a/management/server/user_test.go b/management/server/user_test.go
index a028d164b..4a532c8a6 100644
--- a/management/server/user_test.go
+++ b/management/server/user_test.go
@@ -11,6 +11,7 @@ import (
 	cacheStore "github.com/eko/gocache/v3/store"
 	"github.com/google/go-cmp/cmp"
 	"github.com/netbirdio/netbird/management/server/util"
+	"golang.org/x/exp/maps"
 
 	nbpeer "github.com/netbirdio/netbird/management/server/peer"
 	"github.com/netbirdio/netbird/management/server/store"
@@ -45,7 +46,7 @@ const (
 )
 
 func TestUser_CreatePAT_ForSameUser(t *testing.T) {
-	store, cleanup, err := store.NewTestStoreFromSQL(context.Background(), "", t.TempDir())
+	s, cleanup, err := store.NewTestStoreFromSQL(context.Background(), "", t.TempDir())
 	if err != nil {
 		t.Fatalf("Error when creating store: %s", err)
 	}
@@ -53,13 +54,13 @@ func TestUser_CreatePAT_ForSameUser(t *testing.T) {
 
 	account := newAccountWithId(context.Background(), mockAccountID, mockUserID, "")
 
-	err = store.SaveAccount(context.Background(), account)
+	err = s.SaveAccount(context.Background(), account)
 	if err != nil {
 		t.Fatalf("Error when saving account: %s", err)
 	}
 
 	am := DefaultAccountManager{
-		Store:      store,
+		Store:      s,
 		eventStore: &activity.InMemoryEventStore{},
 	}
 
@@ -81,7 +82,7 @@ func TestUser_CreatePAT_ForSameUser(t *testing.T) {
 
 	assert.Equal(t, pat.ID, tokenID)
 
-	user, err := am.Store.GetUserByTokenID(context.Background(), tokenID)
+	user, err := am.Store.GetUserByPATID(context.Background(), store.LockingStrengthShare, tokenID)
 	if err != nil {
 		t.Fatalf("Error when getting user by token ID: %s", err)
 	}
@@ -855,7 +856,7 @@ func TestUser_DeleteUser_RegularUsers(t *testing.T) {
 		{
 			name:               "Delete non-existent user",
 			userIDs:            []string{"non-existent-user"},
-			expectedReasons:    []string{"target user: non-existent-user not found"},
+			expectedReasons:    []string{"user: non-existent-user not found"},
 			expectedNotDeleted: []string{},
 		},
 		{
@@ -867,7 +868,10 @@ func TestUser_DeleteUser_RegularUsers(t *testing.T) {
 
 	for _, tc := range testCases {
 		t.Run(tc.name, func(t *testing.T) {
-			err = am.DeleteRegularUsers(context.Background(), mockAccountID, mockUserID, tc.userIDs)
+			userInfos, err := am.BuildUserInfosForAccount(context.Background(), mockAccountID, mockUserID, maps.Values(account.Users))
+			assert.NoError(t, err)
+
+			err = am.DeleteRegularUsers(context.Background(), mockAccountID, mockUserID, tc.userIDs, userInfos)
 			if len(tc.expectedReasons) > 0 {
 				assert.Error(t, err)
 				var foundExpectedErrors int
diff --git a/relay/client/client.go b/relay/client/client.go
index 3c23b70d2..9e7e54393 100644
--- a/relay/client/client.go
+++ b/relay/client/client.go
@@ -141,7 +141,6 @@ type Client struct {
 	muInstanceURL    sync.Mutex
 
 	onDisconnectListener func(string)
-	onConnectedListener  func()
 	listenerMutex        sync.Mutex
 }
 
@@ -190,7 +189,6 @@ func (c *Client) Connect() error {
 
 	c.wgReadLoop.Add(1)
 	go c.readLoop(c.relayConn)
-	go c.notifyConnected()
 
 	return nil
 }
@@ -238,12 +236,6 @@ func (c *Client) SetOnDisconnectListener(fn func(string)) {
 	c.onDisconnectListener = fn
 }
 
-func (c *Client) SetOnConnectedListener(fn func()) {
-	c.listenerMutex.Lock()
-	defer c.listenerMutex.Unlock()
-	c.onConnectedListener = fn
-}
-
 // HasConns returns true if there are connections.
 func (c *Client) HasConns() bool {
 	c.mu.Lock()
@@ -559,16 +551,6 @@ func (c *Client) notifyDisconnected() {
 	go c.onDisconnectListener(c.connectionURL)
 }
 
-func (c *Client) notifyConnected() {
-	c.listenerMutex.Lock()
-	defer c.listenerMutex.Unlock()
-
-	if c.onConnectedListener == nil {
-		return
-	}
-	go c.onConnectedListener()
-}
-
 func (c *Client) writeCloseMsg() {
 	msg := messages.MarshalCloseMsg()
 	_, err := c.relayConn.Write(msg)
diff --git a/relay/client/dialer/ws/ws.go b/relay/client/dialer/ws/ws.go
index b007e24bb..cb525865b 100644
--- a/relay/client/dialer/ws/ws.go
+++ b/relay/client/dialer/ws/ws.go
@@ -11,8 +11,8 @@ import (
 	"net/url"
 	"strings"
 
-	log "github.com/sirupsen/logrus"
 	"github.com/coder/websocket"
+	log "github.com/sirupsen/logrus"
 
 	"github.com/netbirdio/netbird/relay/server/listener/ws"
 	"github.com/netbirdio/netbird/util/embeddedroots"
diff --git a/relay/client/guard.go b/relay/client/guard.go
index b971363a8..554330ea3 100644
--- a/relay/client/guard.go
+++ b/relay/client/guard.go
@@ -14,8 +14,9 @@ var (
 
 // Guard manage the reconnection tries to the Relay server in case of disconnection event.
 type Guard struct {
-	// OnNewRelayClient is a channel that is used to notify the relay client about a new relay client instance.
+	// OnNewRelayClient is a channel that is used to notify the relay manager about a new relay client instance.
 	OnNewRelayClient chan *Client
+	OnReconnected    chan struct{}
 	serverPicker     *ServerPicker
 }
 
@@ -23,6 +24,7 @@ type Guard struct {
 func NewGuard(sp *ServerPicker) *Guard {
 	g := &Guard{
 		OnNewRelayClient: make(chan *Client, 1),
+		OnReconnected:    make(chan struct{}, 1),
 		serverPicker:     sp,
 	}
 	return g
@@ -39,14 +41,13 @@ func NewGuard(sp *ServerPicker) *Guard {
 // - relayClient: The relay client instance that was disconnected.
 // todo prevent multiple reconnection instances. In the current usage it should not happen, but it is better to prevent
 func (g *Guard) StartReconnectTrys(ctx context.Context, relayClient *Client) {
-	if relayClient == nil {
-		goto RETRY
-	}
-	if g.isServerURLStillValid(relayClient) && g.quickReconnect(ctx, relayClient) {
+	// try to reconnect to the same server
+	if ok := g.tryToQuickReconnect(ctx, relayClient); ok {
+		g.notifyReconnected()
 		return
 	}
 
-RETRY:
+	// start a ticker to pick a new server
 	ticker := exponentTicker(ctx)
 	defer ticker.Stop()
 
@@ -64,6 +65,28 @@ RETRY:
 	}
 }
 
+func (g *Guard) tryToQuickReconnect(parentCtx context.Context, rc *Client) bool {
+	if rc == nil {
+		return false
+	}
+
+	if !g.isServerURLStillValid(rc) {
+		return false
+	}
+
+	if cancelled := waiteBeforeRetry(parentCtx); !cancelled {
+		return false
+	}
+
+	log.Infof("try to reconnect to Relay server: %s", rc.connectionURL)
+
+	if err := rc.Connect(); err != nil {
+		log.Errorf("failed to reconnect to relay server: %s", err)
+		return false
+	}
+	return true
+}
+
 func (g *Guard) retry(ctx context.Context) error {
 	log.Infof("try to pick up a new Relay server")
 	relayClient, err := g.serverPicker.PickServer(ctx)
@@ -78,23 +101,6 @@ func (g *Guard) retry(ctx context.Context) error {
 	return nil
 }
 
-func (g *Guard) quickReconnect(parentCtx context.Context, rc *Client) bool {
-	ctx, cancel := context.WithTimeout(parentCtx, 1500*time.Millisecond)
-	defer cancel()
-	<-ctx.Done()
-
-	if parentCtx.Err() != nil {
-		return false
-	}
-	log.Infof("try to reconnect to Relay server: %s", rc.connectionURL)
-
-	if err := rc.Connect(); err != nil {
-		log.Errorf("failed to reconnect to relay server: %s", err)
-		return false
-	}
-	return true
-}
-
 func (g *Guard) drainRelayClientChan() {
 	select {
 	case <-g.OnNewRelayClient:
@@ -111,6 +117,13 @@ func (g *Guard) isServerURLStillValid(rc *Client) bool {
 	return false
 }
 
+func (g *Guard) notifyReconnected() {
+	select {
+	case g.OnReconnected <- struct{}{}:
+	default:
+	}
+}
+
 func exponentTicker(ctx context.Context) *backoff.Ticker {
 	bo := backoff.WithContext(&backoff.ExponentialBackOff{
 		InitialInterval: 2 * time.Second,
@@ -121,3 +134,15 @@ func exponentTicker(ctx context.Context) *backoff.Ticker {
 
 	return backoff.NewTicker(bo)
 }
+
+func waiteBeforeRetry(ctx context.Context) bool {
+	timer := time.NewTimer(1500 * time.Millisecond)
+	defer timer.Stop()
+
+	select {
+	case <-timer.C:
+		return true
+	case <-ctx.Done():
+		return false
+	}
+}
diff --git a/relay/client/manager.go b/relay/client/manager.go
index d847bb879..26b113050 100644
--- a/relay/client/manager.go
+++ b/relay/client/manager.go
@@ -165,6 +165,9 @@ func (m *Manager) Ready() bool {
 }
 
 func (m *Manager) SetOnReconnectedListener(f func()) {
+	m.listenerLock.Lock()
+	defer m.listenerLock.Unlock()
+
 	m.onReconnectedListenerFn = f
 }
 
@@ -284,6 +287,9 @@ func (m *Manager) openConnVia(serverAddress, peerKey string) (net.Conn, error) {
 }
 
 func (m *Manager) onServerConnected() {
+	m.listenerLock.Lock()
+	defer m.listenerLock.Unlock()
+
 	if m.onReconnectedListenerFn == nil {
 		return
 	}
@@ -304,8 +310,11 @@ func (m *Manager) onServerDisconnected(serverAddress string) {
 func (m *Manager) listenGuardEvent(ctx context.Context) {
 	for {
 		select {
+		case <-m.reconnectGuard.OnReconnected:
+			m.onServerConnected()
 		case rc := <-m.reconnectGuard.OnNewRelayClient:
 			m.storeClient(rc)
+			m.onServerConnected()
 		case <-ctx.Done():
 			return
 		}
@@ -317,7 +326,6 @@ func (m *Manager) storeClient(client *Client) {
 	defer m.relayClientMu.Unlock()
 
 	m.relayClient = client
-	m.relayClient.SetOnConnectedListener(m.onServerConnected)
 	m.relayClient.SetOnDisconnectListener(m.onServerDisconnected)
 }
 
diff --git a/relay/server/listener/ws/conn.go b/relay/server/listener/ws/conn.go
index 3466b2abd..3ec08945b 100644
--- a/relay/server/listener/ws/conn.go
+++ b/relay/server/listener/ws/conn.go
@@ -8,8 +8,8 @@ import (
 	"sync"
 	"time"
 
-	log "github.com/sirupsen/logrus"
 	"github.com/coder/websocket"
+	log "github.com/sirupsen/logrus"
 )
 
 const (
diff --git a/relay/server/listener/ws/listener.go b/relay/server/listener/ws/listener.go
index 4597669dc..3a95951ee 100644
--- a/relay/server/listener/ws/listener.go
+++ b/relay/server/listener/ws/listener.go
@@ -8,8 +8,8 @@ import (
 	"net"
 	"net/http"
 
-	log "github.com/sirupsen/logrus"
 	"github.com/coder/websocket"
+	log "github.com/sirupsen/logrus"
 )
 
 // URLPath is the path for the websocket connection.
diff --git a/signal/cmd/run.go b/signal/cmd/run.go
index 1bb2f1d0c..3a671a848 100644
--- a/signal/cmd/run.go
+++ b/signal/cmd/run.go
@@ -8,6 +8,8 @@ import (
 	"fmt"
 	"net"
 	"net/http"
+	// nolint:gosec
+	_ "net/http/pprof"
 	"strings"
 	"time"
 
@@ -82,6 +84,8 @@ var (
 		RunE: func(cmd *cobra.Command, args []string) error {
 			flag.Parse()
 
+			startPprof()
+
 			opts, certManager, err := getTLSConfigurations()
 			if err != nil {
 				return err
@@ -170,6 +174,15 @@ var (
 	}
 )
 
+func startPprof() {
+	go func() {
+		log.Debugf("Starting pprof server on 127.0.0.1:6060")
+		if err := http.ListenAndServe("127.0.0.1:6060", nil); err != nil {
+			log.Fatalf("pprof server failed: %v", err)
+		}
+	}()
+}
+
 func getTLSConfigurations() ([]grpc.ServerOption, *autocert.Manager, error) {
 	var (
 		err         error
diff --git a/signal/metrics/app.go b/signal/metrics/app.go
index b3457cf96..e3b1c67cd 100644
--- a/signal/metrics/app.go
+++ b/signal/metrics/app.go
@@ -20,6 +20,8 @@ type AppMetrics struct {
 	MessagesForwarded      metric.Int64Counter
 	MessageForwardFailures metric.Int64Counter
 	MessageForwardLatency  metric.Float64Histogram
+
+	MessageSize metric.Int64Histogram
 }
 
 func NewAppMetrics(meter metric.Meter) (*AppMetrics, error) {
@@ -97,6 +99,16 @@ func NewAppMetrics(meter metric.Meter) (*AppMetrics, error) {
 		return nil, err
 	}
 
+	messageSize, err := meter.Int64Histogram(
+		"message.size.bytes",
+		metric.WithUnit("bytes"),
+		metric.WithExplicitBucketBoundaries(getMessageSizeBucketBoundaries()...),
+		metric.WithDescription("Records the size of each message sent"),
+	)
+	if err != nil {
+		return nil, err
+	}
+
 	return &AppMetrics{
 		Meter: meter,
 
@@ -112,9 +124,26 @@ func NewAppMetrics(meter metric.Meter) (*AppMetrics, error) {
 		MessagesForwarded:      messagesForwarded,
 		MessageForwardFailures: messageForwardFailures,
 		MessageForwardLatency:  messageForwardLatency,
+
+		MessageSize: messageSize,
 	}, nil
 }
 
+func getMessageSizeBucketBoundaries() []float64 {
+	return []float64{
+		100,
+		250,
+		500,
+		1000,
+		5000,
+		10000,
+		50000,
+		100000,
+		500000,
+		1000000,
+	}
+}
+
 func getStandardBucketBoundaries() []float64 {
 	return []float64{
 		0.1,
diff --git a/signal/server/signal.go b/signal/server/signal.go
index 305fd052b..3cae7e860 100644
--- a/signal/server/signal.go
+++ b/signal/server/signal.go
@@ -13,6 +13,7 @@ import (
 	"google.golang.org/grpc/codes"
 	"google.golang.org/grpc/metadata"
 	"google.golang.org/grpc/status"
+	gproto "google.golang.org/protobuf/proto"
 
 	"github.com/netbirdio/netbird/signal/metrics"
 	"github.com/netbirdio/netbird/signal/peer"
@@ -52,13 +53,13 @@ func NewServer(ctx context.Context, meter metric.Meter) (*Server, error) {
 		return nil, fmt.Errorf("creating app metrics: %v", err)
 	}
 
-	dispatcher, err := dispatcher.NewDispatcher(ctx, meter)
+	d, err := dispatcher.NewDispatcher(ctx, meter)
 	if err != nil {
 		return nil, fmt.Errorf("creating dispatcher: %v", err)
 	}
 
 	s := &Server{
-		dispatcher: dispatcher,
+		dispatcher: d,
 		registry:   peer.NewRegistry(appMetrics),
 		metrics:    appMetrics,
 	}
@@ -75,7 +76,7 @@ func (s *Server) Send(ctx context.Context, msg *proto.EncryptedMessage) (*proto.
 		return &proto.EncryptedMessage{}, nil
 	}
 
-	return s.dispatcher.SendMessage(context.Background(), msg)
+	return s.dispatcher.SendMessage(ctx, msg)
 }
 
 // ConnectStream connects to the exchange stream
@@ -98,76 +99,83 @@ func (s *Server) ConnectStream(stream proto.SignalExchange_ConnectStreamServer)
 	log.Debugf("peer connected [%s] [streamID %d] ", p.Id, p.StreamID)
 
 	for {
-		// read incoming messages
-		msg, err := stream.Recv()
-		if err == io.EOF {
-			break
-		} else if err != nil {
-			return err
-		}
+		select {
+		case <-stream.Context().Done():
+			log.Debugf("stream closed for peer [%s] [streamID %d] due to context cancellation", p.Id, p.StreamID)
+			return stream.Context().Err()
+		default:
+			// read incoming messages
+			msg, err := stream.Recv()
+			if err == io.EOF {
+				break
+			} else if err != nil {
+				return err
+			}
 
-		log.Debugf("Received a response from peer [%s] to peer [%s]", msg.Key, msg.RemoteKey)
+			log.Debugf("Received a response from peer [%s] to peer [%s]", msg.Key, msg.RemoteKey)
 
-		_, err = s.dispatcher.SendMessage(stream.Context(), msg)
-		if err != nil {
-			log.Debugf("error while sending message from peer [%s] to peer [%s] %v", msg.Key, msg.RemoteKey, err)
+			_, err = s.dispatcher.SendMessage(stream.Context(), msg)
+			if err != nil {
+				log.Debugf("error while sending message from peer [%s] to peer [%s] %v", msg.Key, msg.RemoteKey, err)
+			}
 		}
 	}
-
-	<-stream.Context().Done()
-	return stream.Context().Err()
 }
 
 func (s *Server) RegisterPeer(stream proto.SignalExchange_ConnectStreamServer) (*peer.Peer, error) {
 	log.Debugf("registering new peer")
-	if meta, hasMeta := metadata.FromIncomingContext(stream.Context()); hasMeta {
-		if id, found := meta[proto.HeaderId]; found {
-			p := peer.NewPeer(id[0], stream)
-
-			s.registry.Register(p)
-			s.dispatcher.ListenForMessages(stream.Context(), p.Id, s.forwardMessageToPeer)
-
-			return p, nil
-		} else {
-			s.metrics.RegistrationFailures.Add(stream.Context(), 1, metric.WithAttributes(attribute.String(labelError, labelErrorMissingId)))
-			return nil, status.Errorf(codes.FailedPrecondition, "missing connection header: "+proto.HeaderId)
-		}
-	} else {
+	meta, hasMeta := metadata.FromIncomingContext(stream.Context())
+	if !hasMeta {
 		s.metrics.RegistrationFailures.Add(stream.Context(), 1, metric.WithAttributes(attribute.String(labelError, labelErrorMissingMeta)))
 		return nil, status.Errorf(codes.FailedPrecondition, "missing connection stream meta")
 	}
+
+	id, found := meta[proto.HeaderId]
+	if !found {
+		s.metrics.RegistrationFailures.Add(stream.Context(), 1, metric.WithAttributes(attribute.String(labelError, labelErrorMissingId)))
+		return nil, status.Errorf(codes.FailedPrecondition, "missing connection header: %s", proto.HeaderId)
+	}
+
+	p := peer.NewPeer(id[0], stream)
+	s.registry.Register(p)
+	s.dispatcher.ListenForMessages(stream.Context(), p.Id, s.forwardMessageToPeer)
+	return p, nil
 }
 
 func (s *Server) DeregisterPeer(p *peer.Peer) {
 	log.Debugf("peer disconnected [%s] [streamID %d] ", p.Id, p.StreamID)
 	s.registry.Deregister(p)
-
 	s.metrics.PeerConnectionDuration.Record(p.Stream.Context(), int64(time.Since(p.RegisteredAt).Seconds()))
 }
 
 func (s *Server) forwardMessageToPeer(ctx context.Context, msg *proto.EncryptedMessage) {
 	log.Debugf("forwarding a new message from peer [%s] to peer [%s]", msg.Key, msg.RemoteKey)
-
 	getRegistrationStart := time.Now()
 
 	// lookup the target peer where the message is going to
-	if dstPeer, found := s.registry.Get(msg.RemoteKey); found {
-		s.metrics.GetRegistrationDelay.Record(ctx, float64(time.Since(getRegistrationStart).Nanoseconds())/1e6, metric.WithAttributes(attribute.String(labelType, labelTypeStream), attribute.String(labelRegistrationStatus, labelRegistrationFound)))
-		start := time.Now()
-		// forward the message to the target peer
-		if err := dstPeer.Stream.Send(msg); err != nil {
-			log.Warnf("error while forwarding message from peer [%s] to peer [%s] %v", msg.Key, msg.RemoteKey, err)
-			// todo respond to the sender?
-			s.metrics.MessageForwardFailures.Add(ctx, 1, metric.WithAttributes(attribute.String(labelType, labelTypeError)))
-		} else {
-			// in milliseconds
-			s.metrics.MessageForwardLatency.Record(ctx, float64(time.Since(start).Nanoseconds())/1e6, metric.WithAttributes(attribute.String(labelType, labelTypeStream)))
-			s.metrics.MessagesForwarded.Add(ctx, 1)
-		}
-	} else {
+	dstPeer, found := s.registry.Get(msg.RemoteKey)
+
+	if !found {
 		s.metrics.GetRegistrationDelay.Record(ctx, float64(time.Since(getRegistrationStart).Nanoseconds())/1e6, metric.WithAttributes(attribute.String(labelType, labelTypeStream), attribute.String(labelRegistrationStatus, labelRegistrationNotFound)))
 		s.metrics.MessageForwardFailures.Add(ctx, 1, metric.WithAttributes(attribute.String(labelType, labelTypeNotConnected)))
 		log.Debugf("message from peer [%s] can't be forwarded to peer [%s] because destination peer is not connected", msg.Key, msg.RemoteKey)
 		// todo respond to the sender?
+		return
 	}
+
+	s.metrics.GetRegistrationDelay.Record(ctx, float64(time.Since(getRegistrationStart).Nanoseconds())/1e6, metric.WithAttributes(attribute.String(labelType, labelTypeStream), attribute.String(labelRegistrationStatus, labelRegistrationFound)))
+	start := time.Now()
+
+	// forward the message to the target peer
+	if err := dstPeer.Stream.Send(msg); err != nil {
+		log.Warnf("error while forwarding message from peer [%s] to peer [%s] %v", msg.Key, msg.RemoteKey, err)
+		// todo respond to the sender?
+		s.metrics.MessageForwardFailures.Add(ctx, 1, metric.WithAttributes(attribute.String(labelType, labelTypeError)))
+		return
+	}
+
+	// in milliseconds
+	s.metrics.MessageForwardLatency.Record(ctx, float64(time.Since(start).Nanoseconds())/1e6, metric.WithAttributes(attribute.String(labelType, labelTypeStream)))
+	s.metrics.MessagesForwarded.Add(ctx, 1)
+	s.metrics.MessageSize.Record(ctx, int64(gproto.Size(msg)), metric.WithAttributes(attribute.String(labelType, labelTypeMessage)))
 }
diff --git a/util/grpc/dialer.go b/util/grpc/dialer.go
index 83a11c65d..f6d6d2f04 100644
--- a/util/grpc/dialer.go
+++ b/util/grpc/dialer.go
@@ -40,7 +40,6 @@ func WithCustomDialer() grpc.DialOption {
 			}
 		}
 
-		log.Debug("Using nbnet.NewDialer()")
 		conn, err := nbnet.NewDialer().DialContext(ctx, "tcp", addr)
 		if err != nil {
 			log.Errorf("Failed to dial: %s", err)
diff --git a/util/net/env.go b/util/net/env.go
index 099da39b7..32425665d 100644
--- a/util/net/env.go
+++ b/util/net/env.go
@@ -2,6 +2,7 @@ package net
 
 import (
 	"os"
+	"strconv"
 
 	log "github.com/sirupsen/logrus"
 
@@ -10,20 +11,24 @@ import (
 
 const (
 	envDisableCustomRouting = "NB_DISABLE_CUSTOM_ROUTING"
-	envSkipSocketMark       = "NB_SKIP_SOCKET_MARK"
 )
 
+// CustomRoutingDisabled returns true if custom routing is disabled.
+// This will fall back to the operation mode before the exit node functionality was implemented.
+// In particular exclusion routes won't be set up and all dialers and listeners will use net.Dial and net.Listen, respectively.
 func CustomRoutingDisabled() bool {
 	if netstack.IsEnabled() {
 		return true
 	}
-	return os.Getenv(envDisableCustomRouting) == "true"
-}
 
-func SkipSocketMark() bool {
-	if skipSocketMark := os.Getenv(envSkipSocketMark); skipSocketMark == "true" {
-		log.Infof("%s is set to true, skipping SO_MARK", envSkipSocketMark)
-		return true
+	var customRoutingDisabled bool
+	if val := os.Getenv(envDisableCustomRouting); val != "" {
+		var err error
+		customRoutingDisabled, err = strconv.ParseBool(val)
+		if err != nil {
+			log.Warnf("failed to parse %s: %v", envDisableCustomRouting, err)
+		}
 	}
-	return false
+
+	return customRoutingDisabled
 }
diff --git a/util/net/env_generic.go b/util/net/env_generic.go
new file mode 100644
index 000000000..6d142a838
--- /dev/null
+++ b/util/net/env_generic.go
@@ -0,0 +1,12 @@
+//go:build !linux || android
+
+package net
+
+func Init() {
+	// nothing to do on non-linux
+}
+
+func AdvancedRouting() bool {
+	// non-linux currently doesn't support advanced routing
+	return false
+}
diff --git a/util/net/env_linux.go b/util/net/env_linux.go
new file mode 100644
index 000000000..124bf64de
--- /dev/null
+++ b/util/net/env_linux.go
@@ -0,0 +1,119 @@
+//go:build linux && !android
+
+package net
+
+import (
+	"errors"
+	"os"
+	"strconv"
+	"syscall"
+	"time"
+
+	log "github.com/sirupsen/logrus"
+	"github.com/vishvananda/netlink"
+
+	"github.com/netbirdio/netbird/client/iface/netstack"
+)
+
+const (
+	// these have the same effect, skip socket env supported for backward compatibility
+	envSkipSocketMark   = "NB_SKIP_SOCKET_MARK"
+	envUseLegacyRouting = "NB_USE_LEGACY_ROUTING"
+)
+
+var advancedRoutingSupported bool
+
+func Init() {
+	advancedRoutingSupported = checkAdvancedRoutingSupport()
+}
+
+func AdvancedRouting() bool {
+	return advancedRoutingSupported
+}
+
+func checkAdvancedRoutingSupport() bool {
+	var err error
+
+	var legacyRouting bool
+	if val := os.Getenv(envUseLegacyRouting); val != "" {
+		legacyRouting, err = strconv.ParseBool(val)
+		if err != nil {
+			log.Warnf("failed to parse %s: %v", envUseLegacyRouting, err)
+		}
+	}
+
+	var skipSocketMark bool
+	if val := os.Getenv(envSkipSocketMark); val != "" {
+		skipSocketMark, err = strconv.ParseBool(val)
+		if err != nil {
+			log.Warnf("failed to parse %s: %v", envSkipSocketMark, err)
+		}
+	}
+
+	// requested to disable advanced routing
+	if legacyRouting || skipSocketMark ||
+		// envCustomRoutingDisabled disables the custom dialers.
+		// There is no point in using advanced routing without those, as they set up fwmarks on the sockets.
+		CustomRoutingDisabled() ||
+		// netstack mode doesn't need routing at all
+		netstack.IsEnabled() {
+
+		log.Info("advanced routing has been requested to be disabled")
+		return false
+	}
+
+	if !CheckFwmarkSupport() || !CheckRuleOperationsSupport() {
+		log.Warn("system doesn't support required routing features, falling back to legacy routing")
+		return false
+	}
+
+	log.Info("system supports advanced routing")
+
+	return true
+}
+
+func CheckFwmarkSupport() bool {
+	// temporarily enable advanced routing to check fwmarks are supported
+	old := advancedRoutingSupported
+	advancedRoutingSupported = true
+	defer func() {
+		advancedRoutingSupported = old
+	}()
+
+	dialer := NewDialer()
+	dialer.Timeout = 100 * time.Millisecond
+
+	conn, err := dialer.Dial("udp", "127.0.0.1:9")
+	if err != nil {
+		log.Warnf("failed to dial with fwmark: %v", err)
+		return false
+	}
+	if err := conn.Close(); err != nil {
+		log.Warnf("failed to close connection: %v", err)
+
+	}
+
+	return true
+}
+
+func CheckRuleOperationsSupport() bool {
+	rule := netlink.NewRule()
+	// low precedence, semi-random
+	rule.Priority = 32321
+	rule.Table = syscall.RT_TABLE_MAIN
+	rule.Family = netlink.FAMILY_V4
+
+	if err := netlink.RuleAdd(rule); err != nil {
+		if errors.Is(err, syscall.EOPNOTSUPP) {
+			log.Warn("IP rule operations are not supported")
+			return false
+		}
+		log.Warnf("failed to test rule support: %v", err)
+		return false
+	}
+
+	if err := netlink.RuleDel(rule); err != nil {
+		log.Warnf("failed to delete test rule: %v", err)
+	}
+	return true
+}
diff --git a/util/net/net_linux.go b/util/net/net_linux.go
index fc486ebd4..eae483a26 100644
--- a/util/net/net_linux.go
+++ b/util/net/net_linux.go
@@ -5,13 +5,11 @@ package net
 import (
 	"fmt"
 	"syscall"
-
-	log "github.com/sirupsen/logrus"
 )
 
 // SetSocketMark sets the SO_MARK option on the given socket connection
 func SetSocketMark(conn syscall.Conn) error {
-	if isSocketMarkDisabled() {
+	if !AdvancedRouting() {
 		return nil
 	}
 
@@ -25,7 +23,7 @@ func SetSocketMark(conn syscall.Conn) error {
 
 // SetSocketOpt sets the SO_MARK option on the given file descriptor
 func SetSocketOpt(fd int) error {
-	if isSocketMarkDisabled() {
+	if !AdvancedRouting() {
 		return nil
 	}
 
@@ -36,7 +34,7 @@ func setRawSocketMark(conn syscall.RawConn) error {
 	var setErr error
 
 	err := conn.Control(func(fd uintptr) {
-		if isSocketMarkDisabled() {
+		if !AdvancedRouting() {
 			return
 		}
 		setErr = setSocketOptInt(int(fd))
@@ -55,15 +53,3 @@ func setRawSocketMark(conn syscall.RawConn) error {
 func setSocketOptInt(fd int) error {
 	return syscall.SetsockoptInt(fd, syscall.SOL_SOCKET, syscall.SO_MARK, NetbirdFwmark)
 }
-
-func isSocketMarkDisabled() bool {
-	if CustomRoutingDisabled() {
-		log.Infof("Custom routing is disabled, skipping SO_MARK")
-		return true
-	}
-
-	if SkipSocketMark() {
-		return true
-	}
-	return false
-}