Implement API response cache (#1645)
Apply peer validator cache mechanism --------- Co-authored-by: Maycon Santos <mlsmaycon@gmail.com> Co-authored-by: Yury Gargay <yury.gargay@gmail.com> Co-authored-by: Viktor Liu <viktor@netbird.io> Co-authored-by: Bethuel Mmbaga <bethuelmbaga12@gmail.com> Co-authored-by: pascal-fischer <32096965+pascal-fischer@users.noreply.github.com> Co-authored-by: Misha Bragin <bangvalo@gmail.com>
2
.github/ISSUE_TEMPLATE/bug-issue-report.md
vendored
@ -2,7 +2,7 @@
|
|||||||
name: Bug/Issue report
|
name: Bug/Issue report
|
||||||
about: Create a report to help us improve
|
about: Create a report to help us improve
|
||||||
title: ''
|
title: ''
|
||||||
labels: ['triage']
|
labels: ['triage-needed']
|
||||||
assignees: ''
|
assignees: ''
|
||||||
|
|
||||||
---
|
---
|
||||||
|
@ -162,6 +162,13 @@ jobs:
|
|||||||
test $count -eq 4
|
test $count -eq 4
|
||||||
working-directory: infrastructure_files/artifacts
|
working-directory: infrastructure_files/artifacts
|
||||||
|
|
||||||
|
- name: test geolocation databases
|
||||||
|
working-directory: infrastructure_files/artifacts
|
||||||
|
run: |
|
||||||
|
sleep 30
|
||||||
|
docker compose exec management ls -l /var/lib/netbird/ | grep -i GeoLite2-City.mmdb
|
||||||
|
docker compose exec management ls -l /var/lib/netbird/ | grep -i geonames.db
|
||||||
|
|
||||||
test-getting-started-script:
|
test-getting-started-script:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
@ -199,6 +206,6 @@ jobs:
|
|||||||
- name: test script
|
- name: test script
|
||||||
run: bash -x infrastructure_files/download-geolite2.sh
|
run: bash -x infrastructure_files/download-geolite2.sh
|
||||||
- name: test mmdb file exists
|
- name: test mmdb file exists
|
||||||
run: ls -l GeoLite2-City_*/GeoLite2-City.mmdb
|
run: test -f GeoLite2-City.mmdb
|
||||||
- name: test geonames file exists
|
- name: test geonames file exists
|
||||||
run: test -f geonames.db
|
run: test -f geonames.db
|
||||||
|
@ -63,6 +63,14 @@ linters-settings:
|
|||||||
enable:
|
enable:
|
||||||
- nilness
|
- nilness
|
||||||
|
|
||||||
|
revive:
|
||||||
|
rules:
|
||||||
|
- name: exported
|
||||||
|
severity: warning
|
||||||
|
disabled: false
|
||||||
|
arguments:
|
||||||
|
- "checkPrivateReceivers"
|
||||||
|
- "sayRepetitiveInsteadOfStutters"
|
||||||
tenv:
|
tenv:
|
||||||
# The option `all` will run against whole test files (`_test.go`) regardless of method/function signatures.
|
# The option `all` will run against whole test files (`_test.go`) regardless of method/function signatures.
|
||||||
# Otherwise, only methods that take `*testing.T`, `*testing.B`, and `testing.TB` as arguments are checked.
|
# Otherwise, only methods that take `*testing.T`, `*testing.B`, and `testing.TB` as arguments are checked.
|
||||||
@ -93,6 +101,7 @@ linters:
|
|||||||
- nilerr # finds the code that returns nil even if it checks that the error is not nil
|
- nilerr # finds the code that returns nil even if it checks that the error is not nil
|
||||||
- nilnil # checks that there is no simultaneous return of nil error and an invalid value
|
- nilnil # checks that there is no simultaneous return of nil error and an invalid value
|
||||||
- predeclared # predeclared finds code that shadows one of Go's predeclared identifiers
|
- predeclared # predeclared finds code that shadows one of Go's predeclared identifiers
|
||||||
|
- revive # Fast, configurable, extensible, flexible, and beautiful linter for Go. Drop-in replacement of golint.
|
||||||
- sqlclosecheck # checks that sql.Rows and sql.Stmt are closed
|
- sqlclosecheck # checks that sql.Rows and sql.Stmt are closed
|
||||||
- thelper # thelper detects Go test helpers without t.Helper() call and checks the consistency of test helpers.
|
- thelper # thelper detects Go test helpers without t.Helper() call and checks the consistency of test helpers.
|
||||||
- wastedassign # wastedassign finds wasted assignment statements
|
- wastedassign # wastedassign finds wasted assignment statements
|
||||||
|
@ -54,7 +54,7 @@ nfpms:
|
|||||||
contents:
|
contents:
|
||||||
- src: client/ui/netbird.desktop
|
- src: client/ui/netbird.desktop
|
||||||
dst: /usr/share/applications/netbird.desktop
|
dst: /usr/share/applications/netbird.desktop
|
||||||
- src: client/ui/netbird-systemtray-default.png
|
- src: client/ui/netbird-systemtray-connected.png
|
||||||
dst: /usr/share/pixmaps/netbird.png
|
dst: /usr/share/pixmaps/netbird.png
|
||||||
dependencies:
|
dependencies:
|
||||||
- netbird
|
- netbird
|
||||||
@ -71,7 +71,7 @@ nfpms:
|
|||||||
contents:
|
contents:
|
||||||
- src: client/ui/netbird.desktop
|
- src: client/ui/netbird.desktop
|
||||||
dst: /usr/share/applications/netbird.desktop
|
dst: /usr/share/applications/netbird.desktop
|
||||||
- src: client/ui/netbird-systemtray-default.png
|
- src: client/ui/netbird-systemtray-connected.png
|
||||||
dst: /usr/share/pixmaps/netbird.png
|
dst: /usr/share/pixmaps/netbird.png
|
||||||
dependencies:
|
dependencies:
|
||||||
- netbird
|
- netbird
|
||||||
|
31
README.md
@ -1,6 +1,6 @@
|
|||||||
<p align="center">
|
<p align="center">
|
||||||
<strong>:hatching_chick: New Release! Self-hosting in under 5 min.</strong>
|
<strong>:hatching_chick: New Release! Device Posture Checks.</strong>
|
||||||
<a href="https://github.com/netbirdio/netbird#quickstart-with-self-hosted-netbird">
|
<a href="https://docs.netbird.io/how-to/manage-posture-checks">
|
||||||
Learn more
|
Learn more
|
||||||
</a>
|
</a>
|
||||||
</p>
|
</p>
|
||||||
@ -42,25 +42,22 @@
|
|||||||
|
|
||||||
**Secure.** NetBird enables secure remote access by applying granular access policies, while allowing you to manage them intuitively from a single place. Works universally on any infrastructure.
|
**Secure.** NetBird enables secure remote access by applying granular access policies, while allowing you to manage them intuitively from a single place. Works universally on any infrastructure.
|
||||||
|
|
||||||
### Secure peer-to-peer VPN with SSO and MFA in minutes
|
### Open-Source Network Security in a Single Platform
|
||||||
|
|
||||||
https://user-images.githubusercontent.com/700848/197345890-2e2cded5-7b7a-436f-a444-94e80dd24f46.mov
|

|
||||||
|
|
||||||
### Key features
|
### Key features
|
||||||
|
|
||||||
| Connectivity | Management | Automation | Platforms |
|
| Connectivity | Management | Security | Automation | Platforms |
|
||||||
|---------------------------------------------------------------------------------------------------------------------------------|--------------------------------------------------------------------------|----------------------------------------------------------------------------|---------------------------------------|
|
|------------------------------------------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------|-----------------------------------------------------------------------------------------|
|
||||||
| <ul><li> - \[x] Kernel WireGuard </ul></li> | <ul><li> - \[x] [Admin Web UI](https://github.com/netbirdio/dashboard) </ul></li> | <ul><li> - \[x] [Public API](https://docs.netbird.io/api) </ul></li> | <ul><li> - \[x] Linux </ul></li> |
|
| <ul><li> - \[x] Kernel WireGuard </ul></li> | <ul><li> - \[x] [Admin Web UI](https://github.com/netbirdio/dashboard) </ul></li> | <ul><li> - \[x] [SSO & MFA support](https://docs.netbird.io/how-to/installation#running-net-bird-with-sso-login) </ul></li> | <ul><li> - \[x] [Public API](https://docs.netbird.io/api) </ul></li> | <ul><li> - \[x] Linux </ul></li> |
|
||||||
| <ul><li> - \[x] Peer-to-peer connections </ul></li> | <ul><li> - \[x] Auto peer discovery and configuration </ul></li> | <ul><li> - \[x] [Setup keys for bulk network provisioning](https://docs.netbird.io/how-to/register-machines-using-setup-keys) </ul></li> | <ul><li> - \[x] Mac </ul></li> |
|
| <ul><li> - \[x] Peer-to-peer connections </ul></li> | <ul><li> - \[x] Auto peer discovery and configuration </ul></li> | <ul><li> - \[x] [Access control - groups & rules](https://docs.netbird.io/how-to/manage-network-access) </ul></li> | <ul><li> - \[x] [Setup keys for bulk network provisioning](https://docs.netbird.io/how-to/register-machines-using-setup-keys) </ul></li> | <ul><li> - \[x] Mac </ul></li> |
|
||||||
| <ul><li> - \[x] Peer-to-peer encryption </ul></li> | <ul><li> - \[x] [IdP integrations](https://docs.netbird.io/selfhosted/identity-providers) </ul></li> | <ul><li> - \[x] [Self-hosting quickstart script](https://docs.netbird.io/selfhosted/selfhosted-quickstart) </ul></li> | <ul><li> - \[x] Windows </ul></li> |
|
| <ul><li> - \[x] Connection relay fallback </ul></li> | <ul><li> - \[x] [IdP integrations](https://docs.netbird.io/selfhosted/identity-providers) </ul></li> | <ul><li> - \[x] [Activity logging](https://docs.netbird.io/how-to/monitor-system-and-network-activity) </ul></li> | <ul><li> - \[x] [Self-hosting quickstart script](https://docs.netbird.io/selfhosted/selfhosted-quickstart) </ul></li> | <ul><li> - \[x] Windows </ul></li> |
|
||||||
| <ul><li> - \[x] Connection relay fallback </ul></li> | <ul><li> - \[x] [SSO & MFA support](https://docs.netbird.io/how-to/installation#running-net-bird-with-sso-login) </ul></li> | <ul><li> - \[x] IdP groups sync with JWT </ul></li> | <ul><li> - \[x] Android </ul></li> |
|
| <ul><li> - \[x] [Routes to external networks](https://docs.netbird.io/how-to/routing-traffic-to-private-networks) </ul></li> | <ul><li> - \[x] [Private DNS](https://docs.netbird.io/how-to/manage-dns-in-your-network) </ul></li> | <ul><li> - \[x] [Device posture checks](https://docs.netbird.io/how-to/manage-posture-checks) </ul></li> | <ul><li> - \[x] IdP groups sync with JWT </ul></li> | <ul><li> - \[x] Android </ul></li> |
|
||||||
| <ul><li> - \[x] [Routes to external networks](https://docs.netbird.io/how-to/routing-traffic-to-private-networks) </ul></li> | <ul><li> - \[x] [Access control - groups & rules](https://docs.netbird.io/how-to/manage-network-access) </ul></li> | | <ul><li> - \[x] iOS </ul></li> |
|
| <ul><li> - \[x] NAT traversal with BPF </ul></li> | <ul><li> - \[x] [Multiuser support](https://docs.netbird.io/how-to/add-users-to-your-network) </ul></li> | <ul><li> - \[x] Peer-to-peer encryption </ul></li> | | <ul><li> - \[x] iOS </ul></li> |
|
||||||
| <ul><li> - \[x] NAT traversal with BPF </ul></li> | <ul><li> - \[x] [Private DNS](https://docs.netbird.io/how-to/manage-dns-in-your-network) </ul></li> | | <ul><li> - \[x] Docker </ul></li> |
|
| | | <ul><li> - \[x] [Quantum-resistance with Rosenpass](https://netbird.io/knowledge-hub/the-first-quantum-resistant-mesh-vpn) </ul></li> | | <ul><li> - \[x] OpenWRT </ul></li> |
|
||||||
| <ul><li> - \[x] Post-quantum-secure connection through [Rosenpass](https://rosenpass.eu) </ul></li> | <ul><li> - \[x] [Multiuser support](https://docs.netbird.io/how-to/add-users-to-your-network) </ul></li> | | <ul><li> - \[x] OpenWRT </ul></li> |
|
| | | <ui><li> - \[x] [Periodic re-authentication](https://docs.netbird.io/how-to/enforce-periodic-user-authentication)</ul></li> | | <ul><li> - \[x] [Serverless](https://docs.netbird.io/how-to/netbird-on-faas) </ul></li> |
|
||||||
| | <ul><li> - \[x] [Activity logging](https://docs.netbird.io/how-to/monitor-system-and-network-activity) </ul></li> | | |
|
| | | | | <ul><li> - \[x] Docker </ul></li> |
|
||||||
| | <ul><li> - \[x] SSH access management </ul></li> | | |
|
|
||||||
|
|
||||||
|
|
||||||
### Quickstart with NetBird Cloud
|
### Quickstart with NetBird Cloud
|
||||||
|
|
||||||
- Download and install NetBird at [https://app.netbird.io/install](https://app.netbird.io/install)
|
- Download and install NetBird at [https://app.netbird.io/install](https://app.netbird.io/install)
|
||||||
|
@ -76,10 +76,7 @@ func startManagement(t *testing.T, config *mgmt.Config) (*grpc.Server, net.Liste
|
|||||||
|
|
||||||
peersUpdateManager := mgmt.NewPeersUpdateManager(nil)
|
peersUpdateManager := mgmt.NewPeersUpdateManager(nil)
|
||||||
eventStore := &activity.InMemoryEventStore{}
|
eventStore := &activity.InMemoryEventStore{}
|
||||||
if err != nil {
|
iv, _ := integrations.NewIntegratedApproval(eventStore)
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
iv := integrations.NewIntegratedApproval()
|
|
||||||
accountManager, err := mgmt.BuildManager(store, peersUpdateManager, nil, "", "", eventStore, nil, false, iv)
|
accountManager, err := mgmt.BuildManager(store, peersUpdateManager, nil, "", "", eventStore, nil, false, iv)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
|
@ -26,7 +26,7 @@ type HTTPClient interface {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// AuthFlowInfo holds information for the OAuth 2.0 authorization flow
|
// AuthFlowInfo holds information for the OAuth 2.0 authorization flow
|
||||||
type AuthFlowInfo struct {
|
type AuthFlowInfo struct { //nolint:revive
|
||||||
DeviceCode string `json:"device_code"`
|
DeviceCode string `json:"device_code"`
|
||||||
UserCode string `json:"user_code"`
|
UserCode string `json:"user_code"`
|
||||||
VerificationURI string `json:"verification_uri"`
|
VerificationURI string `json:"verification_uri"`
|
||||||
|
@ -8,6 +8,7 @@ import (
|
|||||||
"net/netip"
|
"net/netip"
|
||||||
"os"
|
"os"
|
||||||
"strings"
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
log "github.com/sirupsen/logrus"
|
log "github.com/sirupsen/logrus"
|
||||||
)
|
)
|
||||||
@ -23,10 +24,16 @@ const (
|
|||||||
fileMaxNumberOfSearchDomains = 6
|
fileMaxNumberOfSearchDomains = 6
|
||||||
)
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
dnsFailoverTimeout = 4 * time.Second
|
||||||
|
dnsFailoverAttempts = 1
|
||||||
|
)
|
||||||
|
|
||||||
type fileConfigurator struct {
|
type fileConfigurator struct {
|
||||||
repair *repair
|
repair *repair
|
||||||
|
|
||||||
originalPerms os.FileMode
|
originalPerms os.FileMode
|
||||||
|
nbNameserverIP string
|
||||||
}
|
}
|
||||||
|
|
||||||
func newFileConfigurator() (hostManager, error) {
|
func newFileConfigurator() (hostManager, error) {
|
||||||
@ -64,7 +71,7 @@ func (f *fileConfigurator) applyDNSConfig(config HostDNSConfig) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
nbSearchDomains := searchDomains(config)
|
nbSearchDomains := searchDomains(config)
|
||||||
nbNameserverIP := config.ServerIP
|
f.nbNameserverIP = config.ServerIP
|
||||||
|
|
||||||
resolvConf, err := parseBackupResolvConf()
|
resolvConf, err := parseBackupResolvConf()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -73,11 +80,11 @@ func (f *fileConfigurator) applyDNSConfig(config HostDNSConfig) error {
|
|||||||
|
|
||||||
f.repair.stopWatchFileChanges()
|
f.repair.stopWatchFileChanges()
|
||||||
|
|
||||||
err = f.updateConfig(nbSearchDomains, nbNameserverIP, resolvConf)
|
err = f.updateConfig(nbSearchDomains, f.nbNameserverIP, resolvConf)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
f.repair.watchFileChanges(nbSearchDomains, nbNameserverIP)
|
f.repair.watchFileChanges(nbSearchDomains, f.nbNameserverIP)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -85,10 +92,11 @@ func (f *fileConfigurator) updateConfig(nbSearchDomains []string, nbNameserverIP
|
|||||||
searchDomainList := mergeSearchDomains(nbSearchDomains, cfg.searchDomains)
|
searchDomainList := mergeSearchDomains(nbSearchDomains, cfg.searchDomains)
|
||||||
nameServers := generateNsList(nbNameserverIP, cfg)
|
nameServers := generateNsList(nbNameserverIP, cfg)
|
||||||
|
|
||||||
|
options := prepareOptionsWithTimeout(cfg.others, int(dnsFailoverTimeout.Seconds()), dnsFailoverAttempts)
|
||||||
buf := prepareResolvConfContent(
|
buf := prepareResolvConfContent(
|
||||||
searchDomainList,
|
searchDomainList,
|
||||||
nameServers,
|
nameServers,
|
||||||
cfg.others)
|
options)
|
||||||
|
|
||||||
log.Debugf("creating managed file %s", defaultResolvConfPath)
|
log.Debugf("creating managed file %s", defaultResolvConfPath)
|
||||||
err := os.WriteFile(defaultResolvConfPath, buf.Bytes(), f.originalPerms)
|
err := os.WriteFile(defaultResolvConfPath, buf.Bytes(), f.originalPerms)
|
||||||
@ -131,7 +139,12 @@ func (f *fileConfigurator) backup() error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (f *fileConfigurator) restore() error {
|
func (f *fileConfigurator) restore() error {
|
||||||
err := copyFile(fileDefaultResolvConfBackupLocation, defaultResolvConfPath)
|
err := removeFirstNbNameserver(fileDefaultResolvConfBackupLocation, f.nbNameserverIP)
|
||||||
|
if err != nil {
|
||||||
|
log.Errorf("Failed to remove netbird nameserver from %s on backup restore: %s", fileDefaultResolvConfBackupLocation, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
err = copyFile(fileDefaultResolvConfBackupLocation, defaultResolvConfPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("restoring %s from %s: %w", defaultResolvConfPath, fileDefaultResolvConfBackupLocation, err)
|
return fmt.Errorf("restoring %s from %s: %w", defaultResolvConfPath, fileDefaultResolvConfBackupLocation, err)
|
||||||
}
|
}
|
||||||
@ -157,7 +170,7 @@ func (f *fileConfigurator) restoreUncleanShutdownDNS(storedDNSAddress *netip.Add
|
|||||||
currentDNSAddress, err := netip.ParseAddr(resolvConf.nameServers[0])
|
currentDNSAddress, err := netip.ParseAddr(resolvConf.nameServers[0])
|
||||||
// not a valid first nameserver -> restore
|
// not a valid first nameserver -> restore
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Errorf("restoring unclean shutdown: parse dns address %s failed: %s", resolvConf.nameServers[1], err)
|
log.Errorf("restoring unclean shutdown: parse dns address %s failed: %s", resolvConf.nameServers[0], err)
|
||||||
return restoreResolvConfFile()
|
return restoreResolvConfFile()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -5,6 +5,7 @@ package dns
|
|||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"os"
|
"os"
|
||||||
|
"regexp"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
log "github.com/sirupsen/logrus"
|
log "github.com/sirupsen/logrus"
|
||||||
@ -14,6 +15,9 @@ const (
|
|||||||
defaultResolvConfPath = "/etc/resolv.conf"
|
defaultResolvConfPath = "/etc/resolv.conf"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
var timeoutRegex = regexp.MustCompile(`timeout:\d+`)
|
||||||
|
var attemptsRegex = regexp.MustCompile(`attempts:\d+`)
|
||||||
|
|
||||||
type resolvConf struct {
|
type resolvConf struct {
|
||||||
nameServers []string
|
nameServers []string
|
||||||
searchDomains []string
|
searchDomains []string
|
||||||
@ -103,3 +107,62 @@ func parseResolvConfFile(resolvConfFile string) (*resolvConf, error) {
|
|||||||
}
|
}
|
||||||
return rconf, nil
|
return rconf, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// prepareOptionsWithTimeout appends timeout to existing options if it doesn't exist,
|
||||||
|
// otherwise it adds a new option with timeout and attempts.
|
||||||
|
func prepareOptionsWithTimeout(input []string, timeout int, attempts int) []string {
|
||||||
|
configs := make([]string, len(input))
|
||||||
|
copy(configs, input)
|
||||||
|
|
||||||
|
for i, config := range configs {
|
||||||
|
if strings.HasPrefix(config, "options") {
|
||||||
|
config = strings.ReplaceAll(config, "rotate", "")
|
||||||
|
config = strings.Join(strings.Fields(config), " ")
|
||||||
|
|
||||||
|
if strings.Contains(config, "timeout:") {
|
||||||
|
config = timeoutRegex.ReplaceAllString(config, fmt.Sprintf("timeout:%d", timeout))
|
||||||
|
} else {
|
||||||
|
config = strings.Replace(config, "options ", fmt.Sprintf("options timeout:%d ", timeout), 1)
|
||||||
|
}
|
||||||
|
|
||||||
|
if strings.Contains(config, "attempts:") {
|
||||||
|
config = attemptsRegex.ReplaceAllString(config, fmt.Sprintf("attempts:%d", attempts))
|
||||||
|
} else {
|
||||||
|
config = strings.Replace(config, "options ", fmt.Sprintf("options attempts:%d ", attempts), 1)
|
||||||
|
}
|
||||||
|
|
||||||
|
configs[i] = config
|
||||||
|
return configs
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return append(configs, fmt.Sprintf("options timeout:%d attempts:%d", timeout, attempts))
|
||||||
|
}
|
||||||
|
|
||||||
|
// removeFirstNbNameserver removes the given nameserver from the given file if it is in the first position
|
||||||
|
// and writes the file back to the original location
|
||||||
|
func removeFirstNbNameserver(filename, nameserverIP string) error {
|
||||||
|
resolvConf, err := parseResolvConfFile(filename)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("parse backup resolv.conf: %w", err)
|
||||||
|
}
|
||||||
|
content, err := os.ReadFile(filename)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("read %s: %w", filename, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(resolvConf.nameServers) > 1 && resolvConf.nameServers[0] == nameserverIP {
|
||||||
|
newContent := strings.Replace(string(content), fmt.Sprintf("nameserver %s\n", nameserverIP), "", 1)
|
||||||
|
|
||||||
|
stat, err := os.Stat(filename)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("stat %s: %w", filename, err)
|
||||||
|
}
|
||||||
|
if err := os.WriteFile(filename, []byte(newContent), stat.Mode()); err != nil {
|
||||||
|
return fmt.Errorf("write %s: %w", filename, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
@ -6,6 +6,8 @@ import (
|
|||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
)
|
)
|
||||||
|
|
||||||
func Test_parseResolvConf(t *testing.T) {
|
func Test_parseResolvConf(t *testing.T) {
|
||||||
@ -172,3 +174,131 @@ nameserver 192.168.0.1
|
|||||||
t.Errorf("unexpected resolv.conf content: %v", cfg)
|
t.Errorf("unexpected resolv.conf content: %v", cfg)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestPrepareOptionsWithTimeout(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
others []string
|
||||||
|
timeout int
|
||||||
|
attempts int
|
||||||
|
expected []string
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "Append new options with timeout and attempts",
|
||||||
|
others: []string{"some config"},
|
||||||
|
timeout: 2,
|
||||||
|
attempts: 2,
|
||||||
|
expected: []string{"some config", "options timeout:2 attempts:2"},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Modify existing options to exclude rotate and include timeout and attempts",
|
||||||
|
others: []string{"some config", "options rotate someother"},
|
||||||
|
timeout: 3,
|
||||||
|
attempts: 2,
|
||||||
|
expected: []string{"some config", "options attempts:2 timeout:3 someother"},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Existing options with timeout and attempts are updated",
|
||||||
|
others: []string{"some config", "options timeout:4 attempts:3"},
|
||||||
|
timeout: 5,
|
||||||
|
attempts: 4,
|
||||||
|
expected: []string{"some config", "options timeout:5 attempts:4"},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Modify existing options, add missing attempts before timeout",
|
||||||
|
others: []string{"some config", "options timeout:4"},
|
||||||
|
timeout: 4,
|
||||||
|
attempts: 3,
|
||||||
|
expected: []string{"some config", "options attempts:3 timeout:4"},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tc := range tests {
|
||||||
|
t.Run(tc.name, func(t *testing.T) {
|
||||||
|
result := prepareOptionsWithTimeout(tc.others, tc.timeout, tc.attempts)
|
||||||
|
assert.Equal(t, tc.expected, result)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestRemoveFirstNbNameserver(t *testing.T) {
|
||||||
|
testCases := []struct {
|
||||||
|
name string
|
||||||
|
content string
|
||||||
|
ipToRemove string
|
||||||
|
expected string
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "Unrelated nameservers with comments and options",
|
||||||
|
content: `# This is a comment
|
||||||
|
options rotate
|
||||||
|
nameserver 1.1.1.1
|
||||||
|
# Another comment
|
||||||
|
nameserver 8.8.4.4
|
||||||
|
search example.com`,
|
||||||
|
ipToRemove: "9.9.9.9",
|
||||||
|
expected: `# This is a comment
|
||||||
|
options rotate
|
||||||
|
nameserver 1.1.1.1
|
||||||
|
# Another comment
|
||||||
|
nameserver 8.8.4.4
|
||||||
|
search example.com`,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "First nameserver matches",
|
||||||
|
content: `search example.com
|
||||||
|
nameserver 9.9.9.9
|
||||||
|
# oof, a comment
|
||||||
|
nameserver 8.8.4.4
|
||||||
|
options attempts:5`,
|
||||||
|
ipToRemove: "9.9.9.9",
|
||||||
|
expected: `search example.com
|
||||||
|
# oof, a comment
|
||||||
|
nameserver 8.8.4.4
|
||||||
|
options attempts:5`,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Target IP not the first nameserver",
|
||||||
|
// nolint:dupword
|
||||||
|
content: `# Comment about the first nameserver
|
||||||
|
nameserver 8.8.4.4
|
||||||
|
# Comment before our target
|
||||||
|
nameserver 9.9.9.9
|
||||||
|
options timeout:2`,
|
||||||
|
ipToRemove: "9.9.9.9",
|
||||||
|
// nolint:dupword
|
||||||
|
expected: `# Comment about the first nameserver
|
||||||
|
nameserver 8.8.4.4
|
||||||
|
# Comment before our target
|
||||||
|
nameserver 9.9.9.9
|
||||||
|
options timeout:2`,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Only nameserver matches",
|
||||||
|
content: `options debug
|
||||||
|
nameserver 9.9.9.9
|
||||||
|
search localdomain`,
|
||||||
|
ipToRemove: "9.9.9.9",
|
||||||
|
expected: `options debug
|
||||||
|
nameserver 9.9.9.9
|
||||||
|
search localdomain`,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tc := range testCases {
|
||||||
|
t.Run(tc.name, func(t *testing.T) {
|
||||||
|
tempDir := t.TempDir()
|
||||||
|
tempFile := filepath.Join(tempDir, "resolv.conf")
|
||||||
|
err := os.WriteFile(tempFile, []byte(tc.content), 0644)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
err = removeFirstNbNameserver(tempFile, tc.ipToRemove)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
content, err := os.ReadFile(tempFile)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
assert.Equal(t, tc.expected, string(content), "The resulting content should match the expected output.")
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
@ -65,7 +65,7 @@ func newHostManager(wgInterface string) (hostManager, error) {
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Debugf("discovered mode is: %s", osManager)
|
log.Infof("System DNS manager discovered: %s", osManager)
|
||||||
return newHostManagerFromType(wgInterface, osManager)
|
return newHostManagerFromType(wgInterface, osManager)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -53,10 +53,12 @@ func (r *resolvconf) applyDNSConfig(config HostDNSConfig) error {
|
|||||||
searchDomainList := searchDomains(config)
|
searchDomainList := searchDomains(config)
|
||||||
searchDomainList = mergeSearchDomains(searchDomainList, r.originalSearchDomains)
|
searchDomainList = mergeSearchDomains(searchDomainList, r.originalSearchDomains)
|
||||||
|
|
||||||
|
options := prepareOptionsWithTimeout(r.othersConfigs, int(dnsFailoverTimeout.Seconds()), dnsFailoverAttempts)
|
||||||
|
|
||||||
buf := prepareResolvConfContent(
|
buf := prepareResolvConfContent(
|
||||||
searchDomainList,
|
searchDomainList,
|
||||||
append([]string{config.ServerIP}, r.originalNameServers...),
|
append([]string{config.ServerIP}, r.originalNameServers...),
|
||||||
r.othersConfigs)
|
options)
|
||||||
|
|
||||||
// create a backup for unclean shutdown detection before the resolv.conf is changed
|
// create a backup for unclean shutdown detection before the resolv.conf is changed
|
||||||
if err := createUncleanShutdownIndicator(defaultResolvConfPath, resolvConfManager, config.ServerIP); err != nil {
|
if err := createUncleanShutdownIndicator(defaultResolvConfPath, resolvConfManager, config.ServerIP); err != nil {
|
||||||
|
@ -1048,10 +1048,8 @@ func startManagement(dataDir string) (*grpc.Server, string, error) {
|
|||||||
|
|
||||||
peersUpdateManager := server.NewPeersUpdateManager(nil)
|
peersUpdateManager := server.NewPeersUpdateManager(nil)
|
||||||
eventStore := &activity.InMemoryEventStore{}
|
eventStore := &activity.InMemoryEventStore{}
|
||||||
if err != nil {
|
ia, _ := integrations.NewIntegratedApproval(eventStore)
|
||||||
return nil, "", err
|
accountManager, err := server.BuildManager(store, peersUpdateManager, nil, "", "", eventStore, nil, false, ia)
|
||||||
}
|
|
||||||
accountManager, err := server.BuildManager(store, peersUpdateManager, nil, "", "", eventStore, nil, false, integrations.NewIntegratedApproval())
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, "", err
|
return nil, "", err
|
||||||
}
|
}
|
||||||
|
@ -61,7 +61,7 @@ func main() {
|
|||||||
|
|
||||||
flag.Parse()
|
flag.Parse()
|
||||||
|
|
||||||
a := app.New()
|
a := app.NewWithID("NetBird")
|
||||||
a.SetIcon(fyne.NewStaticResource("netbird", iconDisconnectedPNG))
|
a.SetIcon(fyne.NewStaticResource("netbird", iconDisconnectedPNG))
|
||||||
|
|
||||||
client := newServiceClient(daemonAddr, a, showSettings)
|
client := newServiceClient(daemonAddr, a, showSettings)
|
||||||
@ -82,17 +82,23 @@ var iconConnectedICO []byte
|
|||||||
//go:embed netbird-systemtray-connected.png
|
//go:embed netbird-systemtray-connected.png
|
||||||
var iconConnectedPNG []byte
|
var iconConnectedPNG []byte
|
||||||
|
|
||||||
//go:embed netbird-systemtray-default.ico
|
//go:embed netbird-systemtray-disconnected.ico
|
||||||
var iconDisconnectedICO []byte
|
var iconDisconnectedICO []byte
|
||||||
|
|
||||||
//go:embed netbird-systemtray-default.png
|
//go:embed netbird-systemtray-disconnected.png
|
||||||
var iconDisconnectedPNG []byte
|
var iconDisconnectedPNG []byte
|
||||||
|
|
||||||
//go:embed netbird-systemtray-update.ico
|
//go:embed netbird-systemtray-update-disconnected.ico
|
||||||
var iconUpdateICO []byte
|
var iconUpdateDisconnectedICO []byte
|
||||||
|
|
||||||
//go:embed netbird-systemtray-update.png
|
//go:embed netbird-systemtray-update-disconnected.png
|
||||||
var iconUpdatePNG []byte
|
var iconUpdateDisconnectedPNG []byte
|
||||||
|
|
||||||
|
//go:embed netbird-systemtray-update-connected.ico
|
||||||
|
var iconUpdateConnectedICO []byte
|
||||||
|
|
||||||
|
//go:embed netbird-systemtray-update-connected.png
|
||||||
|
var iconUpdateConnectedPNG []byte
|
||||||
|
|
||||||
//go:embed netbird-systemtray-update-cloud.ico
|
//go:embed netbird-systemtray-update-cloud.ico
|
||||||
var iconUpdateCloudICO []byte
|
var iconUpdateCloudICO []byte
|
||||||
@ -107,7 +113,8 @@ type serviceClient struct {
|
|||||||
|
|
||||||
icConnected []byte
|
icConnected []byte
|
||||||
icDisconnected []byte
|
icDisconnected []byte
|
||||||
icUpdate []byte
|
icUpdateConnected []byte
|
||||||
|
icUpdateDisconnected []byte
|
||||||
icUpdateCloud []byte
|
icUpdateCloud []byte
|
||||||
|
|
||||||
// systray menu items
|
// systray menu items
|
||||||
@ -126,6 +133,7 @@ type serviceClient struct {
|
|||||||
app fyne.App
|
app fyne.App
|
||||||
wSettings fyne.Window
|
wSettings fyne.Window
|
||||||
showSettings bool
|
showSettings bool
|
||||||
|
sendNotification bool
|
||||||
|
|
||||||
// input elements for settings form
|
// input elements for settings form
|
||||||
iMngURL *widget.Entry
|
iMngURL *widget.Entry
|
||||||
@ -139,6 +147,7 @@ type serviceClient struct {
|
|||||||
preSharedKey string
|
preSharedKey string
|
||||||
adminURL string
|
adminURL string
|
||||||
|
|
||||||
|
connected bool
|
||||||
update *version.Update
|
update *version.Update
|
||||||
daemonVersion string
|
daemonVersion string
|
||||||
updateIndicationLock sync.Mutex
|
updateIndicationLock sync.Mutex
|
||||||
@ -153,6 +162,7 @@ func newServiceClient(addr string, a fyne.App, showSettings bool) *serviceClient
|
|||||||
ctx: context.Background(),
|
ctx: context.Background(),
|
||||||
addr: addr,
|
addr: addr,
|
||||||
app: a,
|
app: a,
|
||||||
|
sendNotification: false,
|
||||||
|
|
||||||
showSettings: showSettings,
|
showSettings: showSettings,
|
||||||
update: version.NewUpdate(),
|
update: version.NewUpdate(),
|
||||||
@ -161,13 +171,15 @@ func newServiceClient(addr string, a fyne.App, showSettings bool) *serviceClient
|
|||||||
if runtime.GOOS == "windows" {
|
if runtime.GOOS == "windows" {
|
||||||
s.icConnected = iconConnectedICO
|
s.icConnected = iconConnectedICO
|
||||||
s.icDisconnected = iconDisconnectedICO
|
s.icDisconnected = iconDisconnectedICO
|
||||||
s.icUpdate = iconUpdateICO
|
s.icUpdateConnected = iconUpdateConnectedICO
|
||||||
|
s.icUpdateDisconnected = iconUpdateDisconnectedICO
|
||||||
s.icUpdateCloud = iconUpdateCloudICO
|
s.icUpdateCloud = iconUpdateCloudICO
|
||||||
|
|
||||||
} else {
|
} else {
|
||||||
s.icConnected = iconConnectedPNG
|
s.icConnected = iconConnectedPNG
|
||||||
s.icDisconnected = iconDisconnectedPNG
|
s.icDisconnected = iconDisconnectedPNG
|
||||||
s.icUpdate = iconUpdatePNG
|
s.icUpdateConnected = iconUpdateConnectedPNG
|
||||||
|
s.icUpdateDisconnected = iconUpdateDisconnectedPNG
|
||||||
s.icUpdateCloud = iconUpdateCloudPNG
|
s.icUpdateCloud = iconUpdateCloudPNG
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -367,9 +379,18 @@ func (s *serviceClient) updateStatus() error {
|
|||||||
s.updateIndicationLock.Lock()
|
s.updateIndicationLock.Lock()
|
||||||
defer s.updateIndicationLock.Unlock()
|
defer s.updateIndicationLock.Unlock()
|
||||||
|
|
||||||
|
// notify the user when the session has expired
|
||||||
|
if status.Status == string(internal.StatusNeedsLogin) {
|
||||||
|
s.onSessionExpire()
|
||||||
|
}
|
||||||
|
|
||||||
var systrayIconState bool
|
var systrayIconState bool
|
||||||
if status.Status == string(internal.StatusConnected) && !s.mUp.Disabled() {
|
if status.Status == string(internal.StatusConnected) && !s.mUp.Disabled() {
|
||||||
if !s.isUpdateIconActive {
|
s.connected = true
|
||||||
|
s.sendNotification = true
|
||||||
|
if s.isUpdateIconActive {
|
||||||
|
systray.SetIcon(s.icUpdateConnected)
|
||||||
|
} else {
|
||||||
systray.SetIcon(s.icConnected)
|
systray.SetIcon(s.icConnected)
|
||||||
}
|
}
|
||||||
systray.SetTooltip("NetBird (Connected)")
|
systray.SetTooltip("NetBird (Connected)")
|
||||||
@ -378,7 +399,10 @@ func (s *serviceClient) updateStatus() error {
|
|||||||
s.mDown.Enable()
|
s.mDown.Enable()
|
||||||
systrayIconState = true
|
systrayIconState = true
|
||||||
} else if status.Status != string(internal.StatusConnected) && s.mUp.Disabled() {
|
} else if status.Status != string(internal.StatusConnected) && s.mUp.Disabled() {
|
||||||
if !s.isUpdateIconActive {
|
s.connected = false
|
||||||
|
if s.isUpdateIconActive {
|
||||||
|
systray.SetIcon(s.icUpdateDisconnected)
|
||||||
|
} else {
|
||||||
systray.SetIcon(s.icDisconnected)
|
systray.SetIcon(s.icDisconnected)
|
||||||
}
|
}
|
||||||
systray.SetTooltip("NetBird (Disconnected)")
|
systray.SetTooltip("NetBird (Disconnected)")
|
||||||
@ -605,10 +629,30 @@ func (s *serviceClient) onUpdateAvailable() {
|
|||||||
defer s.updateIndicationLock.Unlock()
|
defer s.updateIndicationLock.Unlock()
|
||||||
|
|
||||||
s.mUpdate.Show()
|
s.mUpdate.Show()
|
||||||
s.mAbout.SetIcon(s.icUpdateCloud)
|
|
||||||
|
|
||||||
s.isUpdateIconActive = true
|
s.isUpdateIconActive = true
|
||||||
systray.SetIcon(s.icUpdate)
|
|
||||||
|
if s.connected {
|
||||||
|
systray.SetIcon(s.icUpdateConnected)
|
||||||
|
} else {
|
||||||
|
systray.SetIcon(s.icUpdateDisconnected)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// onSessionExpire sends a notification to the user when the session expires.
|
||||||
|
func (s *serviceClient) onSessionExpire() {
|
||||||
|
if s.sendNotification {
|
||||||
|
title := "Connection session expired"
|
||||||
|
if runtime.GOOS == "darwin" {
|
||||||
|
title = "NetBird connection session expired"
|
||||||
|
}
|
||||||
|
s.app.SendNotification(
|
||||||
|
fyne.NewNotification(
|
||||||
|
title,
|
||||||
|
"Please re-authenticate to connect to the network",
|
||||||
|
),
|
||||||
|
)
|
||||||
|
s.sendNotification = false
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func openURL(url string) error {
|
func openURL(url string) error {
|
||||||
|
Before Width: | Height: | Size: 4.3 KiB After Width: | Height: | Size: 5.0 KiB |
Before Width: | Height: | Size: 7.1 KiB After Width: | Height: | Size: 8.9 KiB |
Before Width: | Height: | Size: 2.8 KiB |
Before Width: | Height: | Size: 4.8 KiB |
BIN
client/ui/netbird-systemtray-disconnected.ico
Normal file
After Width: | Height: | Size: 5.0 KiB |
BIN
client/ui/netbird-systemtray-disconnected.png
Normal file
After Width: | Height: | Size: 9.0 KiB |
BIN
client/ui/netbird-systemtray-update-connected.ico
Normal file
After Width: | Height: | Size: 7.5 KiB |
BIN
client/ui/netbird-systemtray-update-connected.png
Normal file
After Width: | Height: | Size: 11 KiB |
BIN
client/ui/netbird-systemtray-update-disconnected.ico
Normal file
After Width: | Height: | Size: 7.8 KiB |
BIN
client/ui/netbird-systemtray-update-disconnected.png
Normal file
After Width: | Height: | Size: 12 KiB |
Before Width: | Height: | Size: 4.6 KiB |
Before Width: | Height: | Size: 7.3 KiB |
2
go.mod
@ -59,7 +59,7 @@ require (
|
|||||||
github.com/mitchellh/hashstructure/v2 v2.0.2
|
github.com/mitchellh/hashstructure/v2 v2.0.2
|
||||||
github.com/nadoo/ipset v0.5.0
|
github.com/nadoo/ipset v0.5.0
|
||||||
github.com/netbirdio/management-integrations/additions v0.0.0-20240226151841-2e4fe2407450
|
github.com/netbirdio/management-integrations/additions v0.0.0-20240226151841-2e4fe2407450
|
||||||
github.com/netbirdio/management-integrations/integrations v0.0.0-20240226151841-2e4fe2407450
|
github.com/netbirdio/management-integrations/integrations v0.0.0-20240305130559-469a80446ac7
|
||||||
github.com/okta/okta-sdk-golang/v2 v2.18.0
|
github.com/okta/okta-sdk-golang/v2 v2.18.0
|
||||||
github.com/oschwald/maxminddb-golang v1.12.0
|
github.com/oschwald/maxminddb-golang v1.12.0
|
||||||
github.com/patrickmn/go-cache v2.1.0+incompatible
|
github.com/patrickmn/go-cache v2.1.0+incompatible
|
||||||
|
4
go.sum
@ -379,8 +379,8 @@ github.com/nadoo/ipset v0.5.0 h1:5GJUAuZ7ITQQQGne5J96AmFjRtI8Avlbk6CabzYWVUc=
|
|||||||
github.com/nadoo/ipset v0.5.0/go.mod h1:rYF5DQLRGGoQ8ZSWeK+6eX5amAuPqwFkWjhQlEITGJQ=
|
github.com/nadoo/ipset v0.5.0/go.mod h1:rYF5DQLRGGoQ8ZSWeK+6eX5amAuPqwFkWjhQlEITGJQ=
|
||||||
github.com/netbirdio/management-integrations/additions v0.0.0-20240226151841-2e4fe2407450 h1:qA4S5YFt6/s0kQ8wKLjq8faLxuBSte1WzjWfmQmyJTU=
|
github.com/netbirdio/management-integrations/additions v0.0.0-20240226151841-2e4fe2407450 h1:qA4S5YFt6/s0kQ8wKLjq8faLxuBSte1WzjWfmQmyJTU=
|
||||||
github.com/netbirdio/management-integrations/additions v0.0.0-20240226151841-2e4fe2407450/go.mod h1:31FhBNvQ+riHEIu6LSTmqr8IeuSIsGfQffqV4LFmbwA=
|
github.com/netbirdio/management-integrations/additions v0.0.0-20240226151841-2e4fe2407450/go.mod h1:31FhBNvQ+riHEIu6LSTmqr8IeuSIsGfQffqV4LFmbwA=
|
||||||
github.com/netbirdio/management-integrations/integrations v0.0.0-20240226151841-2e4fe2407450 h1:jEepZRVo60IN+us4E8BvNUbasoViFwqJ7exKix4aQyc=
|
github.com/netbirdio/management-integrations/integrations v0.0.0-20240305130559-469a80446ac7 h1:YYIQJbRhANmNFClkCmjBa0w33RpTzsF2DpbGAWhul6Y=
|
||||||
github.com/netbirdio/management-integrations/integrations v0.0.0-20240226151841-2e4fe2407450/go.mod h1:B0nMS3es77gOvPYhc0K91fAzTkQLi/jRq5TffUN3klM=
|
github.com/netbirdio/management-integrations/integrations v0.0.0-20240305130559-469a80446ac7/go.mod h1:B0nMS3es77gOvPYhc0K91fAzTkQLi/jRq5TffUN3klM=
|
||||||
github.com/netbirdio/service v0.0.0-20230215170314-b923b89432b0 h1:hirFRfx3grVA/9eEyjME5/z3nxdJlN9kfQpvWWPk32g=
|
github.com/netbirdio/service v0.0.0-20230215170314-b923b89432b0 h1:hirFRfx3grVA/9eEyjME5/z3nxdJlN9kfQpvWWPk32g=
|
||||||
github.com/netbirdio/service v0.0.0-20230215170314-b923b89432b0/go.mod h1:CIMRFEJVL+0DS1a3Nx06NaMn4Dz63Ng6O7dl0qH0zVM=
|
github.com/netbirdio/service v0.0.0-20230215170314-b923b89432b0/go.mod h1:CIMRFEJVL+0DS1a3Nx06NaMn4Dz63Ng6O7dl0qH0zVM=
|
||||||
github.com/netbirdio/systray v0.0.0-20231030152038-ef1ed2a27949 h1:xbWM9BU6mwZZLHxEjxIX/V8Hv3HurQt4mReIE4mY4DM=
|
github.com/netbirdio/systray v0.0.0-20231030152038-ef1ed2a27949 h1:xbWM9BU6mwZZLHxEjxIX/V8Hv3HurQt4mReIE4mY4DM=
|
||||||
|
@ -8,7 +8,7 @@ import (
|
|||||||
"golang.zx2c4.com/wireguard/tun/netstack"
|
"golang.zx2c4.com/wireguard/tun/netstack"
|
||||||
)
|
)
|
||||||
|
|
||||||
type NetStackTun struct {
|
type NetStackTun struct { //nolint:revive
|
||||||
address string
|
address string
|
||||||
mtu int
|
mtu int
|
||||||
listenAddress string
|
listenAddress string
|
||||||
|
@ -43,21 +43,18 @@ download_geolite_mmdb() {
|
|||||||
mkdir -p "$EXTRACTION_DIR"
|
mkdir -p "$EXTRACTION_DIR"
|
||||||
tar -xzvf "$DATABASE_FILE" > /dev/null 2>&1
|
tar -xzvf "$DATABASE_FILE" > /dev/null 2>&1
|
||||||
|
|
||||||
# Create a SHA256 signature file
|
|
||||||
MMDB_FILE="GeoLite2-City.mmdb"
|
MMDB_FILE="GeoLite2-City.mmdb"
|
||||||
cd "$EXTRACTION_DIR"
|
cp "$EXTRACTION_DIR"/"$MMDB_FILE" $MMDB_FILE
|
||||||
sha256sum "$MMDB_FILE" > "$MMDB_FILE.sha256"
|
|
||||||
echo "SHA256 signature created for $MMDB_FILE."
|
|
||||||
cd - > /dev/null 2>&1
|
|
||||||
|
|
||||||
# Remove downloaded files
|
# Remove downloaded files
|
||||||
|
rm -r "$EXTRACTION_DIR"
|
||||||
rm "$DATABASE_FILE" "$SIGNATURE_FILE"
|
rm "$DATABASE_FILE" "$SIGNATURE_FILE"
|
||||||
|
|
||||||
# Done. Print next steps
|
# Done. Print next steps
|
||||||
echo ""
|
echo ""
|
||||||
echo "Process completed successfully."
|
echo "Process completed successfully."
|
||||||
echo "Now you can place $EXTRACTION_DIR/$MMDB_FILE to 'datadir' of management service."
|
echo "Now you can place $MMDB_FILE to 'datadir' of management service."
|
||||||
echo -e "Example:\n\tdocker compose cp $EXTRACTION_DIR/$MMDB_FILE management:/var/lib/netbird/"
|
echo -e "Example:\n\tdocker compose cp $MMDB_FILE management:/var/lib/netbird/"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -137,6 +137,13 @@ create_new_application() {
|
|||||||
BASE_REDIRECT_URL2=$5
|
BASE_REDIRECT_URL2=$5
|
||||||
LOGOUT_URL=$6
|
LOGOUT_URL=$6
|
||||||
ZITADEL_DEV_MODE=$7
|
ZITADEL_DEV_MODE=$7
|
||||||
|
DEVICE_CODE=$8
|
||||||
|
|
||||||
|
if [[ $DEVICE_CODE == "true" ]]; then
|
||||||
|
GRANT_TYPES='["OIDC_GRANT_TYPE_AUTHORIZATION_CODE","OIDC_GRANT_TYPE_DEVICE_CODE","OIDC_GRANT_TYPE_REFRESH_TOKEN"]'
|
||||||
|
else
|
||||||
|
GRANT_TYPES='["OIDC_GRANT_TYPE_AUTHORIZATION_CODE","OIDC_GRANT_TYPE_REFRESH_TOKEN"]'
|
||||||
|
fi
|
||||||
|
|
||||||
RESPONSE=$(
|
RESPONSE=$(
|
||||||
curl -sS -X POST "$INSTANCE_URL/management/v1/projects/$PROJECT_ID/apps/oidc" \
|
curl -sS -X POST "$INSTANCE_URL/management/v1/projects/$PROJECT_ID/apps/oidc" \
|
||||||
@ -154,10 +161,7 @@ create_new_application() {
|
|||||||
"RESPONSETypes": [
|
"RESPONSETypes": [
|
||||||
"OIDC_RESPONSE_TYPE_CODE"
|
"OIDC_RESPONSE_TYPE_CODE"
|
||||||
],
|
],
|
||||||
"grantTypes": [
|
"grantTypes": '"$GRANT_TYPES"',
|
||||||
"OIDC_GRANT_TYPE_AUTHORIZATION_CODE",
|
|
||||||
"OIDC_GRANT_TYPE_REFRESH_TOKEN"
|
|
||||||
],
|
|
||||||
"appType": "OIDC_APP_TYPE_USER_AGENT",
|
"appType": "OIDC_APP_TYPE_USER_AGENT",
|
||||||
"authMethodType": "OIDC_AUTH_METHOD_TYPE_NONE",
|
"authMethodType": "OIDC_AUTH_METHOD_TYPE_NONE",
|
||||||
"version": "OIDC_VERSION_1_0",
|
"version": "OIDC_VERSION_1_0",
|
||||||
@ -340,10 +344,10 @@ init_zitadel() {
|
|||||||
|
|
||||||
# create zitadel spa applications
|
# create zitadel spa applications
|
||||||
echo "Creating new Zitadel SPA Dashboard application"
|
echo "Creating new Zitadel SPA Dashboard application"
|
||||||
DASHBOARD_APPLICATION_CLIENT_ID=$(create_new_application "$INSTANCE_URL" "$PAT" "Dashboard" "$BASE_REDIRECT_URL/nb-auth" "$BASE_REDIRECT_URL/nb-silent-auth" "$BASE_REDIRECT_URL/" "$ZITADEL_DEV_MODE")
|
DASHBOARD_APPLICATION_CLIENT_ID=$(create_new_application "$INSTANCE_URL" "$PAT" "Dashboard" "$BASE_REDIRECT_URL/nb-auth" "$BASE_REDIRECT_URL/nb-silent-auth" "$BASE_REDIRECT_URL/" "$ZITADEL_DEV_MODE" "false")
|
||||||
|
|
||||||
echo "Creating new Zitadel SPA Cli application"
|
echo "Creating new Zitadel SPA Cli application"
|
||||||
CLI_APPLICATION_CLIENT_ID=$(create_new_application "$INSTANCE_URL" "$PAT" "Cli" "http://localhost:53000/" "http://localhost:54000/" "http://localhost:53000/" "true")
|
CLI_APPLICATION_CLIENT_ID=$(create_new_application "$INSTANCE_URL" "$PAT" "Cli" "http://localhost:53000/" "http://localhost:54000/" "http://localhost:53000/" "true" "true")
|
||||||
|
|
||||||
MACHINE_USER_ID=$(create_service_user "$INSTANCE_URL" "$PAT")
|
MACHINE_USER_ID=$(create_service_user "$INSTANCE_URL" "$PAT")
|
||||||
|
|
||||||
@ -561,6 +565,8 @@ renderCaddyfile() {
|
|||||||
reverse_proxy /.well-known/openid-configuration h2c://zitadel:8080
|
reverse_proxy /.well-known/openid-configuration h2c://zitadel:8080
|
||||||
reverse_proxy /openapi/* h2c://zitadel:8080
|
reverse_proxy /openapi/* h2c://zitadel:8080
|
||||||
reverse_proxy /debug/* h2c://zitadel:8080
|
reverse_proxy /debug/* h2c://zitadel:8080
|
||||||
|
reverse_proxy /device/* h2c://zitadel:8080
|
||||||
|
reverse_proxy /device h2c://zitadel:8080
|
||||||
# Dashboard
|
# Dashboard
|
||||||
reverse_proxy /* dashboard:80
|
reverse_proxy /* dashboard:80
|
||||||
}
|
}
|
||||||
@ -629,6 +635,14 @@ renderManagementJson() {
|
|||||||
"ManagementEndpoint": "$NETBIRD_HTTP_PROTOCOL://$NETBIRD_DOMAIN/management/v1"
|
"ManagementEndpoint": "$NETBIRD_HTTP_PROTOCOL://$NETBIRD_DOMAIN/management/v1"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
"DeviceAuthorizationFlow": {
|
||||||
|
"Provider": "hosted",
|
||||||
|
"ProviderConfig": {
|
||||||
|
"Audience": "$NETBIRD_AUTH_CLIENT_ID_CLI",
|
||||||
|
"ClientID": "$NETBIRD_AUTH_CLIENT_ID_CLI",
|
||||||
|
"Scope": "openid"
|
||||||
|
}
|
||||||
|
},
|
||||||
"PKCEAuthorizationFlow": {
|
"PKCEAuthorizationFlow": {
|
||||||
"ProviderConfig": {
|
"ProviderConfig": {
|
||||||
"Audience": "$NETBIRD_AUTH_CLIENT_ID_CLI",
|
"Audience": "$NETBIRD_AUTH_CLIENT_ID_CLI",
|
||||||
|
@ -26,6 +26,13 @@
|
|||||||
"Username": "",
|
"Username": "",
|
||||||
"Password": null
|
"Password": null
|
||||||
},
|
},
|
||||||
|
"ReverseProxy": {
|
||||||
|
"TrustedHTTPProxies": [],
|
||||||
|
"TrustedHTTPProxiesCount": 0,
|
||||||
|
"TrustedPeers": [
|
||||||
|
"0.0.0.0/0"
|
||||||
|
]
|
||||||
|
},
|
||||||
"Datadir": "",
|
"Datadir": "",
|
||||||
"DataStoreEncryptionKey": "$NETBIRD_DATASTORE_ENC_KEY",
|
"DataStoreEncryptionKey": "$NETBIRD_DATASTORE_ENC_KEY",
|
||||||
"StoreConfig": {
|
"StoreConfig": {
|
||||||
|
@ -46,6 +46,7 @@ server {
|
|||||||
proxy_set_header X-Scheme $scheme;
|
proxy_set_header X-Scheme $scheme;
|
||||||
proxy_set_header X-Forwarded-Proto https;
|
proxy_set_header X-Forwarded-Proto https;
|
||||||
proxy_set_header X-Forwarded-Host $host;
|
proxy_set_header X-Forwarded-Host $host;
|
||||||
|
grpc_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||||
|
|
||||||
# Proxy dashboard
|
# Proxy dashboard
|
||||||
location / {
|
location / {
|
||||||
|
@ -2,7 +2,6 @@ package client
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"github.com/netbirdio/management-integrations/integrations"
|
|
||||||
"net"
|
"net"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"sync"
|
"sync"
|
||||||
@ -16,6 +15,7 @@ import (
|
|||||||
log "github.com/sirupsen/logrus"
|
log "github.com/sirupsen/logrus"
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
|
|
||||||
|
"github.com/netbirdio/management-integrations/integrations"
|
||||||
"github.com/netbirdio/netbird/encryption"
|
"github.com/netbirdio/netbird/encryption"
|
||||||
mgmtProto "github.com/netbirdio/netbird/management/proto"
|
mgmtProto "github.com/netbirdio/netbird/management/proto"
|
||||||
mgmt "github.com/netbirdio/netbird/management/server"
|
mgmt "github.com/netbirdio/netbird/management/server"
|
||||||
@ -61,7 +61,8 @@ func startManagement(t *testing.T) (*grpc.Server, net.Listener) {
|
|||||||
|
|
||||||
peersUpdateManager := mgmt.NewPeersUpdateManager(nil)
|
peersUpdateManager := mgmt.NewPeersUpdateManager(nil)
|
||||||
eventStore := &activity.InMemoryEventStore{}
|
eventStore := &activity.InMemoryEventStore{}
|
||||||
accountManager, err := mgmt.BuildManager(store, peersUpdateManager, nil, "", "", eventStore, nil, false, integrations.NewIntegratedApproval())
|
ia, _ := integrations.NewIntegratedApproval(eventStore)
|
||||||
|
accountManager, err := mgmt.BuildManager(store, peersUpdateManager, nil, "", "", eventStore, nil, false, ia)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@ -368,6 +369,7 @@ func Test_SystemMetaDataFromClient(t *testing.T) {
|
|||||||
SysSerialNumber: info.SystemSerialNumber,
|
SysSerialNumber: info.SystemSerialNumber,
|
||||||
SysProductName: info.SystemProductName,
|
SysProductName: info.SystemProductName,
|
||||||
SysManufacturer: info.SystemManufacturer,
|
SysManufacturer: info.SystemManufacturer,
|
||||||
|
Environment: &mgmtProto.Environment{Cloud: info.Environment.Cloud, Platform: info.Environment.Platform},
|
||||||
}
|
}
|
||||||
|
|
||||||
assert.Equal(t, ValidKey, actualValidKey)
|
assert.Equal(t, ValidKey, actualValidKey)
|
||||||
@ -408,7 +410,9 @@ func isEqual(a, b *mgmtProto.PeerSystemMeta) bool {
|
|||||||
a.GetUiVersion() == b.GetUiVersion() &&
|
a.GetUiVersion() == b.GetUiVersion() &&
|
||||||
a.GetSysSerialNumber() == b.GetSysSerialNumber() &&
|
a.GetSysSerialNumber() == b.GetSysSerialNumber() &&
|
||||||
a.GetSysProductName() == b.GetSysProductName() &&
|
a.GetSysProductName() == b.GetSysProductName() &&
|
||||||
a.GetSysManufacturer() == b.GetSysManufacturer()
|
a.GetSysManufacturer() == b.GetSysManufacturer() &&
|
||||||
|
a.GetEnvironment().Cloud == b.GetEnvironment().Cloud &&
|
||||||
|
a.GetEnvironment().Platform == b.GetEnvironment().Platform
|
||||||
}
|
}
|
||||||
|
|
||||||
func Test_GetDeviceAuthorizationFlow(t *testing.T) {
|
func Test_GetDeviceAuthorizationFlow(t *testing.T) {
|
||||||
|
@ -26,6 +26,8 @@ import (
|
|||||||
"github.com/netbirdio/netbird/management/proto"
|
"github.com/netbirdio/netbird/management/proto"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
const ConnectTimeout = 10 * time.Second
|
||||||
|
|
||||||
// ConnStateNotifier is a wrapper interface of the status recorders
|
// ConnStateNotifier is a wrapper interface of the status recorders
|
||||||
type ConnStateNotifier interface {
|
type ConnStateNotifier interface {
|
||||||
MarkManagementDisconnected(error)
|
MarkManagementDisconnected(error)
|
||||||
@ -49,7 +51,7 @@ func NewClient(ctx context.Context, addr string, ourPrivateKey wgtypes.Key, tlsE
|
|||||||
transportOption = grpc.WithTransportCredentials(credentials.NewTLS(&tls.Config{}))
|
transportOption = grpc.WithTransportCredentials(credentials.NewTLS(&tls.Config{}))
|
||||||
}
|
}
|
||||||
|
|
||||||
mgmCtx, cancel := context.WithTimeout(ctx, 5*time.Second)
|
mgmCtx, cancel := context.WithTimeout(ctx, ConnectTimeout)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
conn, err := grpc.DialContext(
|
conn, err := grpc.DialContext(
|
||||||
mgmCtx,
|
mgmCtx,
|
||||||
@ -318,7 +320,7 @@ func (c *GrpcClient) login(serverKey wgtypes.Key, req *proto.LoginRequest) (*pro
|
|||||||
log.Errorf("failed to encrypt message: %s", err)
|
log.Errorf("failed to encrypt message: %s", err)
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
mgmCtx, cancel := context.WithTimeout(c.ctx, 5*time.Second)
|
mgmCtx, cancel := context.WithTimeout(c.ctx, ConnectTimeout)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
resp, err := c.realClient.Login(mgmCtx, &proto.EncryptedMessage{
|
resp, err := c.realClient.Login(mgmCtx, &proto.EncryptedMessage{
|
||||||
WgPubKey: c.key.PublicKey().String(),
|
WgPubKey: c.key.PublicKey().String(),
|
||||||
@ -474,5 +476,9 @@ func infoToMetaData(info *system.Info) *proto.PeerSystemMeta {
|
|||||||
SysSerialNumber: info.SystemSerialNumber,
|
SysSerialNumber: info.SystemSerialNumber,
|
||||||
SysManufacturer: info.SystemManufacturer,
|
SysManufacturer: info.SystemManufacturer,
|
||||||
SysProductName: info.SystemProductName,
|
SysProductName: info.SystemProductName,
|
||||||
|
Environment: &proto.Environment{
|
||||||
|
Cloud: info.Environment.Cloud,
|
||||||
|
Platform: info.Environment.Platform,
|
||||||
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -43,6 +43,7 @@ import (
|
|||||||
"github.com/netbirdio/netbird/management/server/metrics"
|
"github.com/netbirdio/netbird/management/server/metrics"
|
||||||
"github.com/netbirdio/netbird/management/server/telemetry"
|
"github.com/netbirdio/netbird/management/server/telemetry"
|
||||||
"github.com/netbirdio/netbird/util"
|
"github.com/netbirdio/netbird/util"
|
||||||
|
"github.com/netbirdio/netbird/version"
|
||||||
)
|
)
|
||||||
|
|
||||||
// ManagementLegacyPort is the port that was used before by the Management gRPC server.
|
// ManagementLegacyPort is the port that was used before by the Management gRPC server.
|
||||||
@ -166,13 +167,15 @@ var (
|
|||||||
|
|
||||||
geo, err := geolocation.NewGeolocation(config.Datadir)
|
geo, err := geolocation.NewGeolocation(config.Datadir)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Warnf("could not initialize geo location service, we proceed without geo support")
|
log.Warnf("could not initialize geo location service: %v, we proceed without geo support", err)
|
||||||
} else {
|
} else {
|
||||||
log.Infof("geo location service has been initialized from %s", config.Datadir)
|
log.Infof("geo location service has been initialized from %s", config.Datadir)
|
||||||
}
|
}
|
||||||
|
|
||||||
integratedPeerApproval := integrations.NewIntegratedApproval()
|
integratedPeerApproval, err := integrations.NewIntegratedApproval(eventStore)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to initialize integrated peer approval: %v", err)
|
||||||
|
}
|
||||||
accountManager, err := server.BuildManager(store, peersUpdateManager, idpManager, mgmtSingleAccModeDomain,
|
accountManager, err := server.BuildManager(store, peersUpdateManager, idpManager, mgmtSingleAccModeDomain,
|
||||||
dnsDomain, eventStore, geo, userDeleteFromIDPEnabled, integratedPeerApproval)
|
dnsDomain, eventStore, geo, userDeleteFromIDPEnabled, integratedPeerApproval)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -317,12 +320,14 @@ var (
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
log.Infof("management server version %s", version.NetbirdVersion())
|
||||||
log.Infof("running HTTP server and gRPC server on the same port: %s", listener.Addr().String())
|
log.Infof("running HTTP server and gRPC server on the same port: %s", listener.Addr().String())
|
||||||
serveGRPCWithHTTP(listener, rootHandler, tlsEnabled)
|
serveGRPCWithHTTP(listener, rootHandler, tlsEnabled)
|
||||||
|
|
||||||
SetupCloseHandler()
|
SetupCloseHandler()
|
||||||
|
|
||||||
<-stopCh
|
<-stopCh
|
||||||
|
integratedPeerApproval.Stop()
|
||||||
if geo != nil {
|
if geo != nil {
|
||||||
_ = geo.Stop()
|
_ = geo.Stop()
|
||||||
}
|
}
|
||||||
|
@ -92,6 +92,14 @@ message PeerKeys {
|
|||||||
bytes wgPubKey = 2;
|
bytes wgPubKey = 2;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Environment is part of the PeerSystemMeta and describes the environment the agent is running in.
|
||||||
|
message Environment {
|
||||||
|
// cloud is the cloud provider the agent is running in if applicable.
|
||||||
|
string cloud = 1;
|
||||||
|
// platform is the platform the agent is running on if applicable.
|
||||||
|
string platform = 2;
|
||||||
|
}
|
||||||
|
|
||||||
// PeerSystemMeta is machine meta data like OS and version.
|
// PeerSystemMeta is machine meta data like OS and version.
|
||||||
message PeerSystemMeta {
|
message PeerSystemMeta {
|
||||||
string hostname = 1;
|
string hostname = 1;
|
||||||
@ -108,6 +116,7 @@ message PeerSystemMeta {
|
|||||||
string sysSerialNumber = 12;
|
string sysSerialNumber = 12;
|
||||||
string sysProductName = 13;
|
string sysProductName = 13;
|
||||||
string sysManufacturer = 14;
|
string sysManufacturer = 14;
|
||||||
|
Environment environment = 15;
|
||||||
}
|
}
|
||||||
|
|
||||||
message LoginResponse {
|
message LoginResponse {
|
||||||
|
@ -126,6 +126,7 @@ type AccountManager interface {
|
|||||||
SavePostureChecks(accountID, userID string, postureChecks *posture.Checks) error
|
SavePostureChecks(accountID, userID string, postureChecks *posture.Checks) error
|
||||||
DeletePostureChecks(accountID, postureChecksID, userID string) error
|
DeletePostureChecks(accountID, postureChecksID, userID string) error
|
||||||
ListPostureChecks(accountID, userID string) ([]*posture.Checks, error)
|
ListPostureChecks(accountID, userID string) ([]*posture.Checks, error)
|
||||||
|
GetIdpManager() idp.Manager
|
||||||
UpdateIntegratedApprovalGroups(accountID string, userID string, groups []string) error
|
UpdateIntegratedApprovalGroups(accountID string, userID string, groups []string) error
|
||||||
GroupValidation(accountId string, groups []string) (bool, error)
|
GroupValidation(accountId string, groups []string) (bool, error)
|
||||||
}
|
}
|
||||||
@ -209,6 +210,7 @@ type Account struct {
|
|||||||
|
|
||||||
// User.Id it was created by
|
// User.Id it was created by
|
||||||
CreatedBy string
|
CreatedBy string
|
||||||
|
CreatedAt time.Time
|
||||||
Domain string `gorm:"index"`
|
Domain string `gorm:"index"`
|
||||||
DomainCategory string
|
DomainCategory string
|
||||||
IsDomainPrimaryAccount bool
|
IsDomainPrimaryAccount bool
|
||||||
@ -703,6 +705,7 @@ func (a *Account) Copy() *Account {
|
|||||||
return &Account{
|
return &Account{
|
||||||
Id: a.Id,
|
Id: a.Id,
|
||||||
CreatedBy: a.CreatedBy,
|
CreatedBy: a.CreatedBy,
|
||||||
|
CreatedAt: a.CreatedAt,
|
||||||
Domain: a.Domain,
|
Domain: a.Domain,
|
||||||
DomainCategory: a.DomainCategory,
|
DomainCategory: a.DomainCategory,
|
||||||
IsDomainPrimaryAccount: a.IsDomainPrimaryAccount,
|
IsDomainPrimaryAccount: a.IsDomainPrimaryAccount,
|
||||||
@ -922,6 +925,10 @@ func (am *DefaultAccountManager) GetExternalCacheManager() ExternalCacheManager
|
|||||||
return am.externalCacheManager
|
return am.externalCacheManager
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (am *DefaultAccountManager) GetIdpManager() idp.Manager {
|
||||||
|
return am.idpManager
|
||||||
|
}
|
||||||
|
|
||||||
// UpdateAccountSettings updates Account settings.
|
// UpdateAccountSettings updates Account settings.
|
||||||
// Only users with role UserRoleAdmin can update the account.
|
// Only users with role UserRoleAdmin can update the account.
|
||||||
// User that performs the update has to belong to the account.
|
// User that performs the update has to belong to the account.
|
||||||
@ -939,12 +946,7 @@ func (am *DefaultAccountManager) UpdateAccountSettings(accountID, userID string,
|
|||||||
unlock := am.Store.AcquireAccountLock(accountID)
|
unlock := am.Store.AcquireAccountLock(accountID)
|
||||||
defer unlock()
|
defer unlock()
|
||||||
|
|
||||||
account, err := am.Store.GetAccountByUser(userID)
|
account, err := am.Store.GetAccount(accountID)
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
err = additions.ValidateExtraSettings(newSettings.Extra, account.Settings.Extra, account.Peers, userID, accountID, am.eventStore)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -958,6 +960,11 @@ func (am *DefaultAccountManager) UpdateAccountSettings(accountID, userID string,
|
|||||||
return nil, status.Errorf(status.PermissionDenied, "user is not allowed to update account")
|
return nil, status.Errorf(status.PermissionDenied, "user is not allowed to update account")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
err = additions.ValidateExtraSettings(newSettings.Extra, account.Settings.Extra, account.Peers, userID, accountID, am.eventStore)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
oldSettings := account.Settings
|
oldSettings := account.Settings
|
||||||
if oldSettings.PeerLoginExpirationEnabled != newSettings.PeerLoginExpirationEnabled {
|
if oldSettings.PeerLoginExpirationEnabled != newSettings.PeerLoginExpirationEnabled {
|
||||||
event := activity.AccountPeerLoginExpirationEnabled
|
event := activity.AccountPeerLoginExpirationEnabled
|
||||||
@ -1892,6 +1899,7 @@ func newAccountWithId(accountID, userID, domain string) *Account {
|
|||||||
|
|
||||||
acc := &Account{
|
acc := &Account{
|
||||||
Id: accountID,
|
Id: accountID,
|
||||||
|
CreatedAt: time.Now().UTC(),
|
||||||
SetupKeys: setupKeys,
|
SetupKeys: setupKeys,
|
||||||
Network: network,
|
Network: network,
|
||||||
Peers: peers,
|
Peers: peers,
|
||||||
|
@ -32,8 +32,12 @@ func (MocIntegratedApproval) PreparePeer(accountID string, peer *nbpeer.Peer, pe
|
|||||||
return peer
|
return peer
|
||||||
}
|
}
|
||||||
|
|
||||||
func (MocIntegratedApproval) SyncPeer(accountID string, peer *nbpeer.Peer, peersGroup []string, extraSettings *account.ExtraSettings) (*nbpeer.Peer, bool) {
|
func (MocIntegratedApproval) IsRequiresApproval(accountID string, peer *nbpeer.Peer, peersGroup []string, extraSettings *account.ExtraSettings) bool {
|
||||||
return peer.Copy(), false
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func (MocIntegratedApproval) Stop() {
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func verifyCanAddPeerToAccount(t *testing.T, manager AccountManager, account *Account, userID string) {
|
func verifyCanAddPeerToAccount(t *testing.T, manager AccountManager, account *Account, userID string) {
|
||||||
@ -104,6 +108,10 @@ func verifyNewAccountHasDefaultFields(t *testing.T, account *Account, createdBy
|
|||||||
t.Errorf("expecting newly created account to be created by user %s, got %s", createdBy, account.CreatedBy)
|
t.Errorf("expecting newly created account to be created by user %s, got %s", createdBy, account.CreatedBy)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if account.CreatedAt.IsZero() {
|
||||||
|
t.Errorf("expecting newly created account to have a non-zero creation time")
|
||||||
|
}
|
||||||
|
|
||||||
if account.Domain != domain {
|
if account.Domain != domain {
|
||||||
t.Errorf("expecting newly created account to have domain %s, got %s", domain, account.Domain)
|
t.Errorf("expecting newly created account to have domain %s, got %s", domain, account.Domain)
|
||||||
}
|
}
|
||||||
@ -1483,6 +1491,7 @@ func TestAccount_Copy(t *testing.T) {
|
|||||||
account := &Account{
|
account := &Account{
|
||||||
Id: "account1",
|
Id: "account1",
|
||||||
CreatedBy: "tester",
|
CreatedBy: "tester",
|
||||||
|
CreatedAt: time.Now().UTC(),
|
||||||
Domain: "test.com",
|
Domain: "test.com",
|
||||||
DomainCategory: "public",
|
DomainCategory: "public",
|
||||||
IsDomainPrimaryAccount: true,
|
IsDomainPrimaryAccount: true,
|
||||||
|
@ -9,7 +9,7 @@ const (
|
|||||||
)
|
)
|
||||||
|
|
||||||
// ActivityDescriber is an interface that describes an activity
|
// ActivityDescriber is an interface that describes an activity
|
||||||
type ActivityDescriber interface {
|
type ActivityDescriber interface { //nolint:revive
|
||||||
StringCode() string
|
StringCode() string
|
||||||
Message() string
|
Message() string
|
||||||
}
|
}
|
||||||
|
210
management/server/geolocation/database.go
Normal file
@ -0,0 +1,210 @@
|
|||||||
|
package geolocation
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/csv"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"net/url"
|
||||||
|
"os"
|
||||||
|
"path"
|
||||||
|
"strconv"
|
||||||
|
|
||||||
|
"gorm.io/driver/sqlite"
|
||||||
|
"gorm.io/gorm"
|
||||||
|
"gorm.io/gorm/logger"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
geoLiteCityTarGZURL = "https://pkgs.netbird.io/geolocation-dbs/GeoLite2-City/download?suffix=tar.gz"
|
||||||
|
geoLiteCityZipURL = "https://pkgs.netbird.io/geolocation-dbs/GeoLite2-City-CSV/download?suffix=zip"
|
||||||
|
geoLiteCitySha256TarURL = "https://pkgs.netbird.io/geolocation-dbs/GeoLite2-City/download?suffix=tar.gz.sha256"
|
||||||
|
geoLiteCitySha256ZipURL = "https://pkgs.netbird.io/geolocation-dbs/GeoLite2-City-CSV/download?suffix=zip.sha256"
|
||||||
|
)
|
||||||
|
|
||||||
|
// loadGeolocationDatabases loads the MaxMind databases.
|
||||||
|
func loadGeolocationDatabases(dataDir string) error {
|
||||||
|
files := []string{MMDBFileName, GeoSqliteDBFile}
|
||||||
|
for _, file := range files {
|
||||||
|
exists, _ := fileExists(path.Join(dataDir, file))
|
||||||
|
if exists {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
switch file {
|
||||||
|
case MMDBFileName:
|
||||||
|
extractFunc := func(src string, dst string) error {
|
||||||
|
if err := decompressTarGzFile(src, dst); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return copyFile(path.Join(dst, MMDBFileName), path.Join(dataDir, MMDBFileName))
|
||||||
|
}
|
||||||
|
if err := loadDatabase(
|
||||||
|
geoLiteCitySha256TarURL,
|
||||||
|
geoLiteCityTarGZURL,
|
||||||
|
extractFunc,
|
||||||
|
); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
case GeoSqliteDBFile:
|
||||||
|
extractFunc := func(src string, dst string) error {
|
||||||
|
if err := decompressZipFile(src, dst); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
extractedCsvFile := path.Join(dst, "GeoLite2-City-Locations-en.csv")
|
||||||
|
return importCsvToSqlite(dataDir, extractedCsvFile)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := loadDatabase(
|
||||||
|
geoLiteCitySha256ZipURL,
|
||||||
|
geoLiteCityZipURL,
|
||||||
|
extractFunc,
|
||||||
|
); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// loadDatabase downloads a file from the specified URL and verifies its checksum.
|
||||||
|
// It then calls the extract function to perform additional processing on the extracted files.
|
||||||
|
func loadDatabase(checksumURL string, fileURL string, extractFunc func(src string, dst string) error) error {
|
||||||
|
temp, err := os.MkdirTemp(os.TempDir(), "geolite")
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer os.RemoveAll(temp)
|
||||||
|
|
||||||
|
checksumFile := path.Join(temp, getDatabaseFileName(checksumURL))
|
||||||
|
err = downloadFile(checksumURL, checksumFile)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
sha256sum, err := loadChecksumFromFile(checksumFile)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
dbFile := path.Join(temp, getDatabaseFileName(fileURL))
|
||||||
|
err = downloadFile(fileURL, dbFile)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := verifyChecksum(dbFile, sha256sum); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return extractFunc(dbFile, temp)
|
||||||
|
}
|
||||||
|
|
||||||
|
// importCsvToSqlite imports a CSV file into a SQLite database.
|
||||||
|
func importCsvToSqlite(dataDir string, csvFile string) error {
|
||||||
|
geonames, err := loadGeonamesCsv(csvFile)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
db, err := gorm.Open(sqlite.Open(path.Join(dataDir, GeoSqliteDBFile)), &gorm.Config{
|
||||||
|
Logger: logger.Default.LogMode(logger.Silent),
|
||||||
|
CreateBatchSize: 1000,
|
||||||
|
PrepareStmt: true,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer func() {
|
||||||
|
sql, err := db.DB()
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
sql.Close()
|
||||||
|
}()
|
||||||
|
|
||||||
|
if err := db.AutoMigrate(&GeoNames{}); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return db.Create(geonames).Error
|
||||||
|
}
|
||||||
|
|
||||||
|
func loadGeonamesCsv(filepath string) ([]GeoNames, error) {
|
||||||
|
f, err := os.Open(filepath)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
defer f.Close()
|
||||||
|
|
||||||
|
reader := csv.NewReader(f)
|
||||||
|
records, err := reader.ReadAll()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
var geoNames []GeoNames
|
||||||
|
for index, record := range records {
|
||||||
|
if index == 0 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
geoNameID, err := strconv.Atoi(record[0])
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
geoName := GeoNames{
|
||||||
|
GeoNameID: geoNameID,
|
||||||
|
LocaleCode: record[1],
|
||||||
|
ContinentCode: record[2],
|
||||||
|
ContinentName: record[3],
|
||||||
|
CountryIsoCode: record[4],
|
||||||
|
CountryName: record[5],
|
||||||
|
Subdivision1IsoCode: record[6],
|
||||||
|
Subdivision1Name: record[7],
|
||||||
|
Subdivision2IsoCode: record[8],
|
||||||
|
Subdivision2Name: record[9],
|
||||||
|
CityName: record[10],
|
||||||
|
MetroCode: record[11],
|
||||||
|
TimeZone: record[12],
|
||||||
|
IsInEuropeanUnion: record[13],
|
||||||
|
}
|
||||||
|
geoNames = append(geoNames, geoName)
|
||||||
|
}
|
||||||
|
|
||||||
|
return geoNames, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// getDatabaseFileName extracts the file name from a given URL string.
|
||||||
|
func getDatabaseFileName(urlStr string) string {
|
||||||
|
u, err := url.Parse(urlStr)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
ext := u.Query().Get("suffix")
|
||||||
|
fileName := fmt.Sprintf("%s.%s", path.Base(u.Path), ext)
|
||||||
|
return fileName
|
||||||
|
}
|
||||||
|
|
||||||
|
// copyFile performs a file copy operation from the source file to the destination.
|
||||||
|
func copyFile(src string, dst string) error {
|
||||||
|
srcFile, err := os.Open(src)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer srcFile.Close()
|
||||||
|
|
||||||
|
dstFile, err := os.Create(dst)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer dstFile.Close()
|
||||||
|
|
||||||
|
_, err = io.Copy(dstFile, srcFile)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
@ -2,9 +2,7 @@ package geolocation
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"crypto/sha256"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
|
||||||
"net"
|
"net"
|
||||||
"os"
|
"os"
|
||||||
"path"
|
"path"
|
||||||
@ -54,20 +52,23 @@ type Country struct {
|
|||||||
CountryName string
|
CountryName string
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewGeolocation(datadir string) (*Geolocation, error) {
|
func NewGeolocation(dataDir string) (*Geolocation, error) {
|
||||||
mmdbPath := path.Join(datadir, MMDBFileName)
|
if err := loadGeolocationDatabases(dataDir); err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to load MaxMind databases: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
mmdbPath := path.Join(dataDir, MMDBFileName)
|
||||||
db, err := openDB(mmdbPath)
|
db, err := openDB(mmdbPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
sha256sum, err := getSha256sum(mmdbPath)
|
sha256sum, err := calculateFileSHA256(mmdbPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
locationDB, err := NewSqliteStore(datadir)
|
locationDB, err := NewSqliteStore(dataDir)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -104,21 +105,6 @@ func openDB(mmdbPath string) (*maxminddb.Reader, error) {
|
|||||||
return db, nil
|
return db, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func getSha256sum(mmdbPath string) ([]byte, error) {
|
|
||||||
f, err := os.Open(mmdbPath)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
defer f.Close()
|
|
||||||
|
|
||||||
h := sha256.New()
|
|
||||||
if _, err := io.Copy(h, f); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return h.Sum(nil), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (gl *Geolocation) Lookup(ip net.IP) (*Record, error) {
|
func (gl *Geolocation) Lookup(ip net.IP) (*Record, error) {
|
||||||
gl.mux.RLock()
|
gl.mux.RLock()
|
||||||
defer gl.mux.RUnlock()
|
defer gl.mux.RUnlock()
|
||||||
@ -189,7 +175,7 @@ func (gl *Geolocation) reloader() {
|
|||||||
log.Errorf("geonames db reload failed: %s", err)
|
log.Errorf("geonames db reload failed: %s", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
newSha256sum1, err := getSha256sum(gl.mmdbPath)
|
newSha256sum1, err := calculateFileSHA256(gl.mmdbPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Errorf("failed to calculate sha256 sum for '%s': %s", gl.mmdbPath, err)
|
log.Errorf("failed to calculate sha256 sum for '%s': %s", gl.mmdbPath, err)
|
||||||
continue
|
continue
|
||||||
@ -198,7 +184,7 @@ func (gl *Geolocation) reloader() {
|
|||||||
// we check sum twice just to avoid possible case when we reload during update of the file
|
// we check sum twice just to avoid possible case when we reload during update of the file
|
||||||
// considering the frequency of file update (few times a week) checking sum twice should be enough
|
// considering the frequency of file update (few times a week) checking sum twice should be enough
|
||||||
time.Sleep(50 * time.Millisecond)
|
time.Sleep(50 * time.Millisecond)
|
||||||
newSha256sum2, err := getSha256sum(gl.mmdbPath)
|
newSha256sum2, err := calculateFileSHA256(gl.mmdbPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Errorf("failed to calculate sha256 sum for '%s': %s", gl.mmdbPath, err)
|
log.Errorf("failed to calculate sha256 sum for '%s': %s", gl.mmdbPath, err)
|
||||||
continue
|
continue
|
||||||
|
@ -20,6 +20,27 @@ const (
|
|||||||
GeoSqliteDBFile = "geonames.db"
|
GeoSqliteDBFile = "geonames.db"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
type GeoNames struct {
|
||||||
|
GeoNameID int `gorm:"column:geoname_id"`
|
||||||
|
LocaleCode string `gorm:"column:locale_code"`
|
||||||
|
ContinentCode string `gorm:"column:continent_code"`
|
||||||
|
ContinentName string `gorm:"column:continent_name"`
|
||||||
|
CountryIsoCode string `gorm:"column:country_iso_code"`
|
||||||
|
CountryName string `gorm:"column:country_name"`
|
||||||
|
Subdivision1IsoCode string `gorm:"column:subdivision_1_iso_code"`
|
||||||
|
Subdivision1Name string `gorm:"column:subdivision_1_name"`
|
||||||
|
Subdivision2IsoCode string `gorm:"column:subdivision_2_iso_code"`
|
||||||
|
Subdivision2Name string `gorm:"column:subdivision_2_name"`
|
||||||
|
CityName string `gorm:"column:city_name"`
|
||||||
|
MetroCode string `gorm:"column:metro_code"`
|
||||||
|
TimeZone string `gorm:"column:time_zone"`
|
||||||
|
IsInEuropeanUnion string `gorm:"column:is_in_european_union"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (*GeoNames) TableName() string {
|
||||||
|
return "geonames"
|
||||||
|
}
|
||||||
|
|
||||||
// SqliteStore represents a location storage backed by a Sqlite DB.
|
// SqliteStore represents a location storage backed by a Sqlite DB.
|
||||||
type SqliteStore struct {
|
type SqliteStore struct {
|
||||||
db *gorm.DB
|
db *gorm.DB
|
||||||
@ -37,7 +58,7 @@ func NewSqliteStore(dataDir string) (*SqliteStore, error) {
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
sha256sum, err := getSha256sum(file)
|
sha256sum, err := calculateFileSHA256(file)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -60,7 +81,7 @@ func (s *SqliteStore) GetAllCountries() ([]Country, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
var countries []Country
|
var countries []Country
|
||||||
result := s.db.Table("geonames").
|
result := s.db.Model(&GeoNames{}).
|
||||||
Select("country_iso_code", "country_name").
|
Select("country_iso_code", "country_name").
|
||||||
Group("country_name").
|
Group("country_name").
|
||||||
Scan(&countries)
|
Scan(&countries)
|
||||||
@ -81,7 +102,7 @@ func (s *SqliteStore) GetCitiesByCountry(countryISOCode string) ([]City, error)
|
|||||||
}
|
}
|
||||||
|
|
||||||
var cities []City
|
var cities []City
|
||||||
result := s.db.Table("geonames").
|
result := s.db.Model(&GeoNames{}).
|
||||||
Select("geoname_id", "city_name").
|
Select("geoname_id", "city_name").
|
||||||
Where("country_iso_code = ?", countryISOCode).
|
Where("country_iso_code = ?", countryISOCode).
|
||||||
Group("city_name").
|
Group("city_name").
|
||||||
@ -98,7 +119,7 @@ func (s *SqliteStore) reload() error {
|
|||||||
s.mux.Lock()
|
s.mux.Lock()
|
||||||
defer s.mux.Unlock()
|
defer s.mux.Unlock()
|
||||||
|
|
||||||
newSha256sum1, err := getSha256sum(s.filePath)
|
newSha256sum1, err := calculateFileSHA256(s.filePath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Errorf("failed to calculate sha256 sum for '%s': %s", s.filePath, err)
|
log.Errorf("failed to calculate sha256 sum for '%s': %s", s.filePath, err)
|
||||||
}
|
}
|
||||||
@ -107,7 +128,7 @@ func (s *SqliteStore) reload() error {
|
|||||||
// we check sum twice just to avoid possible case when we reload during update of the file
|
// we check sum twice just to avoid possible case when we reload during update of the file
|
||||||
// considering the frequency of file update (few times a week) checking sum twice should be enough
|
// considering the frequency of file update (few times a week) checking sum twice should be enough
|
||||||
time.Sleep(50 * time.Millisecond)
|
time.Sleep(50 * time.Millisecond)
|
||||||
newSha256sum2, err := getSha256sum(s.filePath)
|
newSha256sum2, err := calculateFileSHA256(s.filePath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("failed to calculate sha256 sum for '%s': %s", s.filePath, err)
|
return fmt.Errorf("failed to calculate sha256 sum for '%s': %s", s.filePath, err)
|
||||||
}
|
}
|
||||||
|
176
management/server/geolocation/utils.go
Normal file
@ -0,0 +1,176 @@
|
|||||||
|
package geolocation
|
||||||
|
|
||||||
|
import (
|
||||||
|
"archive/tar"
|
||||||
|
"archive/zip"
|
||||||
|
"bufio"
|
||||||
|
"bytes"
|
||||||
|
"compress/gzip"
|
||||||
|
"crypto/sha256"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"net/http"
|
||||||
|
"os"
|
||||||
|
"path"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
// decompressTarGzFile decompresses a .tar.gz file.
|
||||||
|
func decompressTarGzFile(filepath, destDir string) error {
|
||||||
|
file, err := os.Open(filepath)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer file.Close()
|
||||||
|
|
||||||
|
gzipReader, err := gzip.NewReader(file)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer gzipReader.Close()
|
||||||
|
|
||||||
|
tarReader := tar.NewReader(gzipReader)
|
||||||
|
|
||||||
|
for {
|
||||||
|
header, err := tarReader.Next()
|
||||||
|
if err != nil {
|
||||||
|
if errors.Is(err, io.EOF) {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if header.Typeflag == tar.TypeReg {
|
||||||
|
outFile, err := os.Create(path.Join(destDir, path.Base(header.Name)))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err = io.Copy(outFile, tarReader) // #nosec G110
|
||||||
|
outFile.Close()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// decompressZipFile decompresses a .zip file.
|
||||||
|
func decompressZipFile(filepath, destDir string) error {
|
||||||
|
r, err := zip.OpenReader(filepath)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer r.Close()
|
||||||
|
|
||||||
|
for _, f := range r.File {
|
||||||
|
if f.FileInfo().IsDir() {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
outFile, err := os.Create(path.Join(destDir, path.Base(f.Name)))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
rc, err := f.Open()
|
||||||
|
if err != nil {
|
||||||
|
outFile.Close()
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err = io.Copy(outFile, rc) // #nosec G110
|
||||||
|
outFile.Close()
|
||||||
|
rc.Close()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// calculateFileSHA256 calculates the SHA256 checksum of a file.
|
||||||
|
func calculateFileSHA256(filepath string) ([]byte, error) {
|
||||||
|
file, err := os.Open(filepath)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
defer file.Close()
|
||||||
|
|
||||||
|
h := sha256.New()
|
||||||
|
if _, err := io.Copy(h, file); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return h.Sum(nil), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// loadChecksumFromFile loads the first checksum from a file.
|
||||||
|
func loadChecksumFromFile(filepath string) (string, error) {
|
||||||
|
file, err := os.Open(filepath)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
defer file.Close()
|
||||||
|
|
||||||
|
scanner := bufio.NewScanner(file)
|
||||||
|
if scanner.Scan() {
|
||||||
|
parts := strings.Fields(scanner.Text())
|
||||||
|
if len(parts) > 0 {
|
||||||
|
return parts[0], nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if err := scanner.Err(); err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
return "", nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// verifyChecksum compares the calculated SHA256 checksum of a file against the expected checksum.
|
||||||
|
func verifyChecksum(filepath, expectedChecksum string) error {
|
||||||
|
calculatedChecksum, err := calculateFileSHA256(filepath)
|
||||||
|
|
||||||
|
fileCheckSum := fmt.Sprintf("%x", calculatedChecksum)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if fileCheckSum != expectedChecksum {
|
||||||
|
return fmt.Errorf("checksum mismatch: expected %s, got %s", expectedChecksum, fileCheckSum)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// downloadFile downloads a file from a URL and saves it to a local file path.
|
||||||
|
func downloadFile(url, filepath string) error {
|
||||||
|
resp, err := http.Get(url)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
bodyBytes, err := io.ReadAll(resp.Body)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if resp.StatusCode != http.StatusOK {
|
||||||
|
return fmt.Errorf("unexpected error occurred while downloading the file: %s", string(bodyBytes))
|
||||||
|
}
|
||||||
|
|
||||||
|
out, err := os.Create(filepath)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer out.Close()
|
||||||
|
|
||||||
|
_, err = io.Copy(out, bytes.NewBuffer(bodyBytes))
|
||||||
|
return err
|
||||||
|
}
|
@ -288,6 +288,10 @@ func extractPeerMeta(loginReq *proto.LoginRequest) nbpeer.PeerSystemMeta {
|
|||||||
SystemSerialNumber: loginReq.GetMeta().GetSysSerialNumber(),
|
SystemSerialNumber: loginReq.GetMeta().GetSysSerialNumber(),
|
||||||
SystemProductName: loginReq.GetMeta().GetSysProductName(),
|
SystemProductName: loginReq.GetMeta().GetSysProductName(),
|
||||||
SystemManufacturer: loginReq.GetMeta().GetSysManufacturer(),
|
SystemManufacturer: loginReq.GetMeta().GetSysManufacturer(),
|
||||||
|
Environment: nbpeer.Environment{
|
||||||
|
Cloud: loginReq.GetMeta().GetEnvironment().GetCloud(),
|
||||||
|
Platform: loginReq.GetMeta().GetEnvironment().GetPlatform(),
|
||||||
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -121,7 +121,7 @@ components:
|
|||||||
description: Last time this user performed a login to the dashboard
|
description: Last time this user performed a login to the dashboard
|
||||||
type: string
|
type: string
|
||||||
format: date-time
|
format: date-time
|
||||||
example: 2023-05-05T09:00:35.477782Z
|
example: "2023-05-05T09:00:35.477782Z"
|
||||||
auto_groups:
|
auto_groups:
|
||||||
description: Group IDs to auto-assign to peers registered by this user
|
description: Group IDs to auto-assign to peers registered by this user
|
||||||
type: array
|
type: array
|
||||||
@ -259,7 +259,7 @@ components:
|
|||||||
description: Last time peer connected to Netbird's management service
|
description: Last time peer connected to Netbird's management service
|
||||||
type: string
|
type: string
|
||||||
format: date-time
|
format: date-time
|
||||||
example: 2023-05-05T10:05:26.420578Z
|
example: "2023-05-05T10:05:26.420578Z"
|
||||||
os:
|
os:
|
||||||
description: Peer's operating system and version
|
description: Peer's operating system and version
|
||||||
type: string
|
type: string
|
||||||
@ -313,7 +313,7 @@ components:
|
|||||||
description: Last time this peer performed log in (authentication). E.g., user authenticated.
|
description: Last time this peer performed log in (authentication). E.g., user authenticated.
|
||||||
type: string
|
type: string
|
||||||
format: date-time
|
format: date-time
|
||||||
example: 2023-05-05T09:00:35.477782Z
|
example: "2023-05-05T09:00:35.477782Z"
|
||||||
approval_required:
|
approval_required:
|
||||||
description: (Cloud only) Indicates whether peer needs approval
|
description: (Cloud only) Indicates whether peer needs approval
|
||||||
type: boolean
|
type: boolean
|
||||||
@ -405,7 +405,7 @@ components:
|
|||||||
description: Setup Key expiration date
|
description: Setup Key expiration date
|
||||||
type: string
|
type: string
|
||||||
format: date-time
|
format: date-time
|
||||||
example: 2023-06-01T14:47:22.291057Z
|
example: "2023-06-01T14:47:22.291057Z"
|
||||||
type:
|
type:
|
||||||
description: Setup key type, one-off for single time usage and reusable
|
description: Setup key type, one-off for single time usage and reusable
|
||||||
type: string
|
type: string
|
||||||
@ -426,7 +426,7 @@ components:
|
|||||||
description: Setup key last usage date
|
description: Setup key last usage date
|
||||||
type: string
|
type: string
|
||||||
format: date-time
|
format: date-time
|
||||||
example: 2023-05-05T09:00:35.477782Z
|
example: "2023-05-05T09:00:35.477782Z"
|
||||||
state:
|
state:
|
||||||
description: Setup key status, "valid", "overused","expired" or "revoked"
|
description: Setup key status, "valid", "overused","expired" or "revoked"
|
||||||
type: string
|
type: string
|
||||||
@ -441,7 +441,7 @@ components:
|
|||||||
description: Setup key last update date
|
description: Setup key last update date
|
||||||
type: string
|
type: string
|
||||||
format: date-time
|
format: date-time
|
||||||
example: 2023-05-05T09:00:35.477782Z
|
example: "2023-05-05T09:00:35.477782Z"
|
||||||
usage_limit:
|
usage_limit:
|
||||||
description: A number of times this key can be used. The value of 0 indicates the unlimited usage.
|
description: A number of times this key can be used. The value of 0 indicates the unlimited usage.
|
||||||
type: integer
|
type: integer
|
||||||
@ -522,7 +522,7 @@ components:
|
|||||||
description: Date the token expires
|
description: Date the token expires
|
||||||
type: string
|
type: string
|
||||||
format: date-time
|
format: date-time
|
||||||
example: 2023-05-05T14:38:28.977616Z
|
example: "2023-05-05T14:38:28.977616Z"
|
||||||
created_by:
|
created_by:
|
||||||
description: User ID of the user who created the token
|
description: User ID of the user who created the token
|
||||||
type: string
|
type: string
|
||||||
@ -531,12 +531,12 @@ components:
|
|||||||
description: Date the token was created
|
description: Date the token was created
|
||||||
type: string
|
type: string
|
||||||
format: date-time
|
format: date-time
|
||||||
example: 2023-05-02T14:48:20.465209Z
|
example: "2023-05-02T14:48:20.465209Z"
|
||||||
last_used:
|
last_used:
|
||||||
description: Date the token was last used
|
description: Date the token was last used
|
||||||
type: string
|
type: string
|
||||||
format: date-time
|
format: date-time
|
||||||
example: 2023-05-04T12:45:25.9723616Z
|
example: "2023-05-04T12:45:25.9723616Z"
|
||||||
required:
|
required:
|
||||||
- id
|
- id
|
||||||
- name
|
- name
|
||||||
@ -862,8 +862,8 @@ components:
|
|||||||
$ref: '#/components/schemas/OSVersionCheck'
|
$ref: '#/components/schemas/OSVersionCheck'
|
||||||
geo_location_check:
|
geo_location_check:
|
||||||
$ref: '#/components/schemas/GeoLocationCheck'
|
$ref: '#/components/schemas/GeoLocationCheck'
|
||||||
private_network_check:
|
peer_network_range_check:
|
||||||
$ref: '#/components/schemas/PrivateNetworkCheck'
|
$ref: '#/components/schemas/PeerNetworkRangeCheck'
|
||||||
NBVersionCheck:
|
NBVersionCheck:
|
||||||
description: Posture check for the version of NetBird
|
description: Posture check for the version of NetBird
|
||||||
type: object
|
type: object
|
||||||
@ -934,16 +934,16 @@ components:
|
|||||||
required:
|
required:
|
||||||
- locations
|
- locations
|
||||||
- action
|
- action
|
||||||
PrivateNetworkCheck:
|
PeerNetworkRangeCheck:
|
||||||
description: Posture check for allow or deny private network
|
description: Posture check for allow or deny access based on peer local network addresses
|
||||||
type: object
|
type: object
|
||||||
properties:
|
properties:
|
||||||
ranges:
|
ranges:
|
||||||
description: List of private network ranges in CIDR notation
|
description: List of peer network ranges in CIDR notation
|
||||||
type: array
|
type: array
|
||||||
items:
|
items:
|
||||||
type: string
|
type: string
|
||||||
example: ["192.168.1.0/24", "10.0.0.0/8"]
|
example: ["192.168.1.0/24", "10.0.0.0/8", "2001:db8:1234:1a00::/56"]
|
||||||
action:
|
action:
|
||||||
description: Action to take upon policy match
|
description: Action to take upon policy match
|
||||||
type: string
|
type: string
|
||||||
@ -1197,7 +1197,7 @@ components:
|
|||||||
description: The date and time when the event occurred
|
description: The date and time when the event occurred
|
||||||
type: string
|
type: string
|
||||||
format: date-time
|
format: date-time
|
||||||
example: 2023-05-05T10:04:37.473542Z
|
example: "2023-05-05T10:04:37.473542Z"
|
||||||
activity:
|
activity:
|
||||||
description: The activity that occurred during the event
|
description: The activity that occurred during the event
|
||||||
type: string
|
type: string
|
||||||
|
@ -74,6 +74,12 @@ const (
|
|||||||
NameserverNsTypeUdp NameserverNsType = "udp"
|
NameserverNsTypeUdp NameserverNsType = "udp"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// Defines values for PeerNetworkRangeCheckAction.
|
||||||
|
const (
|
||||||
|
PeerNetworkRangeCheckActionAllow PeerNetworkRangeCheckAction = "allow"
|
||||||
|
PeerNetworkRangeCheckActionDeny PeerNetworkRangeCheckAction = "deny"
|
||||||
|
)
|
||||||
|
|
||||||
// Defines values for PolicyRuleAction.
|
// Defines values for PolicyRuleAction.
|
||||||
const (
|
const (
|
||||||
PolicyRuleActionAccept PolicyRuleAction = "accept"
|
PolicyRuleActionAccept PolicyRuleAction = "accept"
|
||||||
@ -116,12 +122,6 @@ const (
|
|||||||
PolicyRuleUpdateProtocolUdp PolicyRuleUpdateProtocol = "udp"
|
PolicyRuleUpdateProtocolUdp PolicyRuleUpdateProtocol = "udp"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Defines values for PrivateNetworkCheckAction.
|
|
||||||
const (
|
|
||||||
PrivateNetworkCheckActionAllow PrivateNetworkCheckAction = "allow"
|
|
||||||
PrivateNetworkCheckActionDeny PrivateNetworkCheckAction = "deny"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Defines values for UserStatus.
|
// Defines values for UserStatus.
|
||||||
const (
|
const (
|
||||||
UserStatusActive UserStatus = "active"
|
UserStatusActive UserStatus = "active"
|
||||||
@ -199,8 +199,8 @@ type Checks struct {
|
|||||||
// OsVersionCheck Posture check for the version of operating system
|
// OsVersionCheck Posture check for the version of operating system
|
||||||
OsVersionCheck *OSVersionCheck `json:"os_version_check,omitempty"`
|
OsVersionCheck *OSVersionCheck `json:"os_version_check,omitempty"`
|
||||||
|
|
||||||
// PrivateNetworkCheck Posture check for allow or deny private network
|
// PeerNetworkRangeCheck Posture check for allow or deny access based on peer local network addresses
|
||||||
PrivateNetworkCheck *PrivateNetworkCheck `json:"private_network_check,omitempty"`
|
PeerNetworkRangeCheck *PeerNetworkRangeCheck `json:"peer_network_range_check,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// City Describe city geographical location information
|
// City Describe city geographical location information
|
||||||
@ -656,6 +656,18 @@ type PeerMinimum struct {
|
|||||||
Name string `json:"name"`
|
Name string `json:"name"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// PeerNetworkRangeCheck Posture check for allow or deny access based on peer local network addresses
|
||||||
|
type PeerNetworkRangeCheck struct {
|
||||||
|
// Action Action to take upon policy match
|
||||||
|
Action PeerNetworkRangeCheckAction `json:"action"`
|
||||||
|
|
||||||
|
// Ranges List of peer network ranges in CIDR notation
|
||||||
|
Ranges []string `json:"ranges"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// PeerNetworkRangeCheckAction Action to take upon policy match
|
||||||
|
type PeerNetworkRangeCheckAction string
|
||||||
|
|
||||||
// PeerRequest defines model for PeerRequest.
|
// PeerRequest defines model for PeerRequest.
|
||||||
type PeerRequest struct {
|
type PeerRequest struct {
|
||||||
// ApprovalRequired (Cloud only) Indicates whether peer needs approval
|
// ApprovalRequired (Cloud only) Indicates whether peer needs approval
|
||||||
@ -898,18 +910,6 @@ type PostureCheckUpdate struct {
|
|||||||
Name string `json:"name"`
|
Name string `json:"name"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// PrivateNetworkCheck Posture check for allow or deny private network
|
|
||||||
type PrivateNetworkCheck struct {
|
|
||||||
// Action Action to take upon policy match
|
|
||||||
Action PrivateNetworkCheckAction `json:"action"`
|
|
||||||
|
|
||||||
// Ranges List of private network ranges in CIDR notation
|
|
||||||
Ranges []string `json:"ranges"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// PrivateNetworkCheckAction Action to take upon policy match
|
|
||||||
type PrivateNetworkCheckAction string
|
|
||||||
|
|
||||||
// Route defines model for Route.
|
// Route defines model for Route.
|
||||||
type Route struct {
|
type Route struct {
|
||||||
// Description Route description
|
// Description Route description
|
||||||
|
@ -177,7 +177,10 @@ func TestAuthMiddleware_Handler(t *testing.T) {
|
|||||||
for _, tc := range tt {
|
for _, tc := range tt {
|
||||||
t.Run(tc.name, func(t *testing.T) {
|
t.Run(tc.name, func(t *testing.T) {
|
||||||
if tc.shouldBypassAuth {
|
if tc.shouldBypassAuth {
|
||||||
bypass.AddBypassPath(tc.path)
|
err := bypass.AddBypassPath(tc.path)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("failed to add bypass path: %v", err)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
req := httptest.NewRequest("GET", "http://testing"+tc.path, nil)
|
req := httptest.NewRequest("GET", "http://testing"+tc.path, nil)
|
||||||
|
@ -1,8 +1,12 @@
|
|||||||
package bypass
|
package bypass
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"fmt"
|
||||||
"net/http"
|
"net/http"
|
||||||
|
"path"
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
|
log "github.com/sirupsen/logrus"
|
||||||
)
|
)
|
||||||
|
|
||||||
var byPassMutex sync.RWMutex
|
var byPassMutex sync.RWMutex
|
||||||
@ -11,10 +15,16 @@ var byPassMutex sync.RWMutex
|
|||||||
var bypassPaths = make(map[string]struct{})
|
var bypassPaths = make(map[string]struct{})
|
||||||
|
|
||||||
// AddBypassPath adds an exact path to the list of paths that bypass middleware.
|
// AddBypassPath adds an exact path to the list of paths that bypass middleware.
|
||||||
func AddBypassPath(path string) {
|
// Paths can include wildcards, such as /api/*. Paths are matched using path.Match.
|
||||||
|
// Returns an error if the path has invalid pattern.
|
||||||
|
func AddBypassPath(path string) error {
|
||||||
byPassMutex.Lock()
|
byPassMutex.Lock()
|
||||||
defer byPassMutex.Unlock()
|
defer byPassMutex.Unlock()
|
||||||
|
if err := validatePath(path); err != nil {
|
||||||
|
return fmt.Errorf("validate: %w", err)
|
||||||
|
}
|
||||||
bypassPaths[path] = struct{}{}
|
bypassPaths[path] = struct{}{}
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// RemovePath removes a path from the list of paths that bypass middleware.
|
// RemovePath removes a path from the list of paths that bypass middleware.
|
||||||
@ -24,16 +34,41 @@ func RemovePath(path string) {
|
|||||||
delete(bypassPaths, path)
|
delete(bypassPaths, path)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// GetList returns a list of all bypass paths.
|
||||||
|
func GetList() []string {
|
||||||
|
byPassMutex.RLock()
|
||||||
|
defer byPassMutex.RUnlock()
|
||||||
|
|
||||||
|
list := make([]string, 0, len(bypassPaths))
|
||||||
|
for k := range bypassPaths {
|
||||||
|
list = append(list, k)
|
||||||
|
}
|
||||||
|
|
||||||
|
return list
|
||||||
|
}
|
||||||
|
|
||||||
// ShouldBypass checks if the request path is one of the auth bypass paths and returns true if the middleware should be bypassed.
|
// ShouldBypass checks if the request path is one of the auth bypass paths and returns true if the middleware should be bypassed.
|
||||||
// This can be used to bypass authz/authn middlewares for certain paths, such as webhooks that implement their own authentication.
|
// This can be used to bypass authz/authn middlewares for certain paths, such as webhooks that implement their own authentication.
|
||||||
func ShouldBypass(requestPath string, h http.Handler, w http.ResponseWriter, r *http.Request) bool {
|
func ShouldBypass(requestPath string, h http.Handler, w http.ResponseWriter, r *http.Request) bool {
|
||||||
byPassMutex.RLock()
|
byPassMutex.RLock()
|
||||||
defer byPassMutex.RUnlock()
|
defer byPassMutex.RUnlock()
|
||||||
|
|
||||||
if _, ok := bypassPaths[requestPath]; ok {
|
for bypassPath := range bypassPaths {
|
||||||
|
matched, err := path.Match(bypassPath, requestPath)
|
||||||
|
if err != nil {
|
||||||
|
log.Errorf("Error matching path %s with %s from %s: %v", bypassPath, requestPath, GetList(), err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if matched {
|
||||||
h.ServeHTTP(w, r)
|
h.ServeHTTP(w, r)
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func validatePath(p string) error {
|
||||||
|
_, err := path.Match(p, "")
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
@ -11,6 +11,19 @@ import (
|
|||||||
"github.com/netbirdio/netbird/management/server/http/middleware/bypass"
|
"github.com/netbirdio/netbird/management/server/http/middleware/bypass"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
func TestGetList(t *testing.T) {
|
||||||
|
bypassPaths := []string{"/path1", "/path2", "/path3"}
|
||||||
|
|
||||||
|
for _, path := range bypassPaths {
|
||||||
|
err := bypass.AddBypassPath(path)
|
||||||
|
require.NoError(t, err, "Adding bypass path should not fail")
|
||||||
|
}
|
||||||
|
|
||||||
|
list := bypass.GetList()
|
||||||
|
|
||||||
|
assert.ElementsMatch(t, bypassPaths, list, "Bypass path list did not match expected paths")
|
||||||
|
}
|
||||||
|
|
||||||
func TestAuthBypass(t *testing.T) {
|
func TestAuthBypass(t *testing.T) {
|
||||||
dummyHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
dummyHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||||
w.WriteHeader(http.StatusOK)
|
w.WriteHeader(http.StatusOK)
|
||||||
@ -31,6 +44,13 @@ func TestAuthBypass(t *testing.T) {
|
|||||||
expectBypass: true,
|
expectBypass: true,
|
||||||
expectHTTPCode: http.StatusOK,
|
expectHTTPCode: http.StatusOK,
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
name: "Wildcard path added to bypass",
|
||||||
|
pathToAdd: "/bypass/*",
|
||||||
|
testPath: "/bypass/extra",
|
||||||
|
expectBypass: true,
|
||||||
|
expectHTTPCode: http.StatusOK,
|
||||||
|
},
|
||||||
{
|
{
|
||||||
name: "Path not added to bypass",
|
name: "Path not added to bypass",
|
||||||
testPath: "/no-bypass",
|
testPath: "/no-bypass",
|
||||||
@ -59,6 +79,13 @@ func TestAuthBypass(t *testing.T) {
|
|||||||
expectBypass: false,
|
expectBypass: false,
|
||||||
expectHTTPCode: http.StatusOK,
|
expectHTTPCode: http.StatusOK,
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
name: "Wildcard subpath does not match bypass",
|
||||||
|
pathToAdd: "/webhook/*",
|
||||||
|
testPath: "/webhook/extra/path",
|
||||||
|
expectBypass: false,
|
||||||
|
expectHTTPCode: http.StatusOK,
|
||||||
|
},
|
||||||
{
|
{
|
||||||
name: "Similar path does not match bypass",
|
name: "Similar path does not match bypass",
|
||||||
pathToAdd: "/webhook",
|
pathToAdd: "/webhook",
|
||||||
@ -78,7 +105,8 @@ func TestAuthBypass(t *testing.T) {
|
|||||||
for _, tc := range tests {
|
for _, tc := range tests {
|
||||||
t.Run(tc.name, func(t *testing.T) {
|
t.Run(tc.name, func(t *testing.T) {
|
||||||
if tc.pathToAdd != "" {
|
if tc.pathToAdd != "" {
|
||||||
bypass.AddBypassPath(tc.pathToAdd)
|
err := bypass.AddBypassPath(tc.pathToAdd)
|
||||||
|
require.NoError(t, err, "Adding bypass path should not fail")
|
||||||
defer bypass.RemovePath(tc.pathToAdd)
|
defer bypass.RemovePath(tc.pathToAdd)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -213,8 +213,8 @@ func (p *PostureChecksHandler) savePostureChecks(
|
|||||||
postureChecks.Checks.GeoLocationCheck = toPostureGeoLocationCheck(geoLocationCheck)
|
postureChecks.Checks.GeoLocationCheck = toPostureGeoLocationCheck(geoLocationCheck)
|
||||||
}
|
}
|
||||||
|
|
||||||
if privateNetworkCheck := req.Checks.PrivateNetworkCheck; privateNetworkCheck != nil {
|
if peerNetworkRangeCheck := req.Checks.PeerNetworkRangeCheck; peerNetworkRangeCheck != nil {
|
||||||
postureChecks.Checks.PrivateNetworkCheck, err = toPrivateNetworkCheck(privateNetworkCheck)
|
postureChecks.Checks.PeerNetworkRangeCheck, err = toPeerNetworkRangeCheck(peerNetworkRangeCheck)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
util.WriteError(status.Errorf(status.InvalidArgument, "invalid network prefix"), w)
|
util.WriteError(status.Errorf(status.InvalidArgument, "invalid network prefix"), w)
|
||||||
return
|
return
|
||||||
@ -235,7 +235,7 @@ func validatePostureChecksUpdate(req api.PostureCheckUpdate) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if req.Checks == nil || (req.Checks.NbVersionCheck == nil && req.Checks.OsVersionCheck == nil &&
|
if req.Checks == nil || (req.Checks.NbVersionCheck == nil && req.Checks.OsVersionCheck == nil &&
|
||||||
req.Checks.GeoLocationCheck == nil && req.Checks.PrivateNetworkCheck == nil) {
|
req.Checks.GeoLocationCheck == nil && req.Checks.PeerNetworkRangeCheck == nil) {
|
||||||
return status.Errorf(status.InvalidArgument, "posture checks shouldn't be empty")
|
return status.Errorf(status.InvalidArgument, "posture checks shouldn't be empty")
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -278,17 +278,17 @@ func validatePostureChecksUpdate(req api.PostureCheckUpdate) error {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if privateNetworkCheck := req.Checks.PrivateNetworkCheck; privateNetworkCheck != nil {
|
if peerNetworkRangeCheck := req.Checks.PeerNetworkRangeCheck; peerNetworkRangeCheck != nil {
|
||||||
if privateNetworkCheck.Action == "" {
|
if peerNetworkRangeCheck.Action == "" {
|
||||||
return status.Errorf(status.InvalidArgument, "action for private network check shouldn't be empty")
|
return status.Errorf(status.InvalidArgument, "action for peer network range check shouldn't be empty")
|
||||||
}
|
}
|
||||||
|
|
||||||
allowedActions := []api.PrivateNetworkCheckAction{api.PrivateNetworkCheckActionAllow, api.PrivateNetworkCheckActionDeny}
|
allowedActions := []api.PeerNetworkRangeCheckAction{api.PeerNetworkRangeCheckActionAllow, api.PeerNetworkRangeCheckActionDeny}
|
||||||
if !slices.Contains(allowedActions, privateNetworkCheck.Action) {
|
if !slices.Contains(allowedActions, peerNetworkRangeCheck.Action) {
|
||||||
return status.Errorf(status.InvalidArgument, "action for private network check is not valid value")
|
return status.Errorf(status.InvalidArgument, "action for peer network range check is not valid value")
|
||||||
}
|
}
|
||||||
if len(privateNetworkCheck.Ranges) == 0 {
|
if len(peerNetworkRangeCheck.Ranges) == 0 {
|
||||||
return status.Errorf(status.InvalidArgument, "network ranges for private network check shouldn't be empty")
|
return status.Errorf(status.InvalidArgument, "network ranges for peer network range check shouldn't be empty")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -318,8 +318,8 @@ func toPostureChecksResponse(postureChecks *posture.Checks) *api.PostureCheck {
|
|||||||
checks.GeoLocationCheck = toGeoLocationCheckResponse(postureChecks.Checks.GeoLocationCheck)
|
checks.GeoLocationCheck = toGeoLocationCheckResponse(postureChecks.Checks.GeoLocationCheck)
|
||||||
}
|
}
|
||||||
|
|
||||||
if postureChecks.Checks.PrivateNetworkCheck != nil {
|
if postureChecks.Checks.PeerNetworkRangeCheck != nil {
|
||||||
checks.PrivateNetworkCheck = toPrivateNetworkCheckResponse(postureChecks.Checks.PrivateNetworkCheck)
|
checks.PeerNetworkRangeCheck = toPeerNetworkRangeCheckResponse(postureChecks.Checks.PeerNetworkRangeCheck)
|
||||||
}
|
}
|
||||||
|
|
||||||
return &api.PostureCheck{
|
return &api.PostureCheck{
|
||||||
@ -369,19 +369,19 @@ func toPostureGeoLocationCheck(apiGeoLocationCheck *api.GeoLocationCheck) *postu
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func toPrivateNetworkCheckResponse(check *posture.PrivateNetworkCheck) *api.PrivateNetworkCheck {
|
func toPeerNetworkRangeCheckResponse(check *posture.PeerNetworkRangeCheck) *api.PeerNetworkRangeCheck {
|
||||||
netPrefixes := make([]string, 0, len(check.Ranges))
|
netPrefixes := make([]string, 0, len(check.Ranges))
|
||||||
for _, netPrefix := range check.Ranges {
|
for _, netPrefix := range check.Ranges {
|
||||||
netPrefixes = append(netPrefixes, netPrefix.String())
|
netPrefixes = append(netPrefixes, netPrefix.String())
|
||||||
}
|
}
|
||||||
|
|
||||||
return &api.PrivateNetworkCheck{
|
return &api.PeerNetworkRangeCheck{
|
||||||
Ranges: netPrefixes,
|
Ranges: netPrefixes,
|
||||||
Action: api.PrivateNetworkCheckAction(check.Action),
|
Action: api.PeerNetworkRangeCheckAction(check.Action),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func toPrivateNetworkCheck(check *api.PrivateNetworkCheck) (*posture.PrivateNetworkCheck, error) {
|
func toPeerNetworkRangeCheck(check *api.PeerNetworkRangeCheck) (*posture.PeerNetworkRangeCheck, error) {
|
||||||
prefixes := make([]netip.Prefix, 0)
|
prefixes := make([]netip.Prefix, 0)
|
||||||
for _, prefix := range check.Ranges {
|
for _, prefix := range check.Ranges {
|
||||||
parsedPrefix, err := netip.ParsePrefix(prefix)
|
parsedPrefix, err := netip.ParsePrefix(prefix)
|
||||||
@ -391,7 +391,7 @@ func toPrivateNetworkCheck(check *api.PrivateNetworkCheck) (*posture.PrivateNetw
|
|||||||
prefixes = append(prefixes, parsedPrefix)
|
prefixes = append(prefixes, parsedPrefix)
|
||||||
}
|
}
|
||||||
|
|
||||||
return &posture.PrivateNetworkCheck{
|
return &posture.PeerNetworkRangeCheck{
|
||||||
Ranges: prefixes,
|
Ranges: prefixes,
|
||||||
Action: string(check.Action),
|
Action: string(check.Action),
|
||||||
}, nil
|
}, nil
|
||||||
|
@ -131,7 +131,7 @@ func TestGetPostureCheck(t *testing.T) {
|
|||||||
ID: "privateNetworkPostureCheck",
|
ID: "privateNetworkPostureCheck",
|
||||||
Name: "privateNetwork",
|
Name: "privateNetwork",
|
||||||
Checks: posture.ChecksDefinition{
|
Checks: posture.ChecksDefinition{
|
||||||
PrivateNetworkCheck: &posture.PrivateNetworkCheck{
|
PeerNetworkRangeCheck: &posture.PeerNetworkRangeCheck{
|
||||||
Ranges: []netip.Prefix{
|
Ranges: []netip.Prefix{
|
||||||
netip.MustParsePrefix("192.168.0.0/24"),
|
netip.MustParsePrefix("192.168.0.0/24"),
|
||||||
},
|
},
|
||||||
@ -375,7 +375,7 @@ func TestPostureCheckUpdate(t *testing.T) {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "Create Posture Checks Private Network",
|
name: "Create Posture Checks Peer Network Range",
|
||||||
requestType: http.MethodPost,
|
requestType: http.MethodPost,
|
||||||
requestPath: "/api/posture-checks",
|
requestPath: "/api/posture-checks",
|
||||||
requestBody: bytes.NewBuffer(
|
requestBody: bytes.NewBuffer(
|
||||||
@ -383,7 +383,7 @@ func TestPostureCheckUpdate(t *testing.T) {
|
|||||||
"name": "default",
|
"name": "default",
|
||||||
"description": "default",
|
"description": "default",
|
||||||
"checks": {
|
"checks": {
|
||||||
"private_network_check": {
|
"peer_network_range_check": {
|
||||||
"action": "allow",
|
"action": "allow",
|
||||||
"ranges": [
|
"ranges": [
|
||||||
"10.0.0.0/8"
|
"10.0.0.0/8"
|
||||||
@ -398,11 +398,11 @@ func TestPostureCheckUpdate(t *testing.T) {
|
|||||||
Name: "default",
|
Name: "default",
|
||||||
Description: str("default"),
|
Description: str("default"),
|
||||||
Checks: api.Checks{
|
Checks: api.Checks{
|
||||||
PrivateNetworkCheck: &api.PrivateNetworkCheck{
|
PeerNetworkRangeCheck: &api.PeerNetworkRangeCheck{
|
||||||
Ranges: []string{
|
Ranges: []string{
|
||||||
"10.0.0.0/8",
|
"10.0.0.0/8",
|
||||||
},
|
},
|
||||||
Action: api.PrivateNetworkCheckActionAllow,
|
Action: api.PeerNetworkRangeCheckActionAllow,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
@ -715,14 +715,14 @@ func TestPostureCheckUpdate(t *testing.T) {
|
|||||||
expectedBody: false,
|
expectedBody: false,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "Update Posture Checks Private Network",
|
name: "Update Posture Checks Peer Network Range",
|
||||||
requestType: http.MethodPut,
|
requestType: http.MethodPut,
|
||||||
requestPath: "/api/posture-checks/privateNetworkPostureCheck",
|
requestPath: "/api/posture-checks/peerNetworkRangePostureCheck",
|
||||||
requestBody: bytes.NewBuffer(
|
requestBody: bytes.NewBuffer(
|
||||||
[]byte(`{
|
[]byte(`{
|
||||||
"name": "default",
|
"name": "default",
|
||||||
"checks": {
|
"checks": {
|
||||||
"private_network_check": {
|
"peer_network_range_check": {
|
||||||
"action": "deny",
|
"action": "deny",
|
||||||
"ranges": [
|
"ranges": [
|
||||||
"192.168.1.0/24"
|
"192.168.1.0/24"
|
||||||
@ -737,11 +737,11 @@ func TestPostureCheckUpdate(t *testing.T) {
|
|||||||
Name: "default",
|
Name: "default",
|
||||||
Description: str(""),
|
Description: str(""),
|
||||||
Checks: api.Checks{
|
Checks: api.Checks{
|
||||||
PrivateNetworkCheck: &api.PrivateNetworkCheck{
|
PeerNetworkRangeCheck: &api.PeerNetworkRangeCheck{
|
||||||
Ranges: []string{
|
Ranges: []string{
|
||||||
"192.168.1.0/24",
|
"192.168.1.0/24",
|
||||||
},
|
},
|
||||||
Action: api.PrivateNetworkCheckActionDeny,
|
Action: api.PeerNetworkRangeCheckActionDeny,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
@ -784,10 +784,10 @@ func TestPostureCheckUpdate(t *testing.T) {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
&posture.Checks{
|
&posture.Checks{
|
||||||
ID: "privateNetworkPostureCheck",
|
ID: "peerNetworkRangePostureCheck",
|
||||||
Name: "privateNetwork",
|
Name: "peerNetworkRange",
|
||||||
Checks: posture.ChecksDefinition{
|
Checks: posture.ChecksDefinition{
|
||||||
PrivateNetworkCheck: &posture.PrivateNetworkCheck{
|
PeerNetworkRangeCheck: &posture.PeerNetworkRangeCheck{
|
||||||
Ranges: []netip.Prefix{
|
Ranges: []netip.Prefix{
|
||||||
netip.MustParsePrefix("192.168.0.0/24"),
|
netip.MustParsePrefix("192.168.0.0/24"),
|
||||||
},
|
},
|
||||||
@ -891,29 +891,50 @@ func TestPostureCheck_validatePostureChecksUpdate(t *testing.T) {
|
|||||||
err = validatePostureChecksUpdate(api.PostureCheckUpdate{Name: "Default", Checks: &api.Checks{OsVersionCheck: &osVersionCheck}})
|
err = validatePostureChecksUpdate(api.PostureCheckUpdate{Name: "Default", Checks: &api.Checks{OsVersionCheck: &osVersionCheck}})
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
|
||||||
// valid private network check
|
// valid peer network range check
|
||||||
privateNetworkCheck := api.PrivateNetworkCheck{
|
peerNetworkRangeCheck := api.PeerNetworkRangeCheck{
|
||||||
Action: api.PrivateNetworkCheckActionAllow,
|
Action: api.PeerNetworkRangeCheckActionAllow,
|
||||||
Ranges: []string{
|
Ranges: []string{
|
||||||
"192.168.1.0/24", "10.0.0.0/8",
|
"192.168.1.0/24", "10.0.0.0/8",
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
err = validatePostureChecksUpdate(api.PostureCheckUpdate{Name: "Default", Checks: &api.Checks{PrivateNetworkCheck: &privateNetworkCheck}})
|
err = validatePostureChecksUpdate(
|
||||||
|
api.PostureCheckUpdate{
|
||||||
|
Name: "Default",
|
||||||
|
Checks: &api.Checks{
|
||||||
|
PeerNetworkRangeCheck: &peerNetworkRangeCheck,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
|
||||||
// invalid private network check
|
// invalid peer network range check
|
||||||
privateNetworkCheck = api.PrivateNetworkCheck{
|
peerNetworkRangeCheck = api.PeerNetworkRangeCheck{
|
||||||
Action: api.PrivateNetworkCheckActionDeny,
|
Action: api.PeerNetworkRangeCheckActionDeny,
|
||||||
Ranges: []string{},
|
Ranges: []string{},
|
||||||
}
|
}
|
||||||
err = validatePostureChecksUpdate(api.PostureCheckUpdate{Name: "Default", Checks: &api.Checks{PrivateNetworkCheck: &privateNetworkCheck}})
|
err = validatePostureChecksUpdate(
|
||||||
|
api.PostureCheckUpdate{
|
||||||
|
Name: "Default",
|
||||||
|
Checks: &api.Checks{
|
||||||
|
PeerNetworkRangeCheck: &peerNetworkRangeCheck,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
)
|
||||||
assert.Error(t, err)
|
assert.Error(t, err)
|
||||||
|
|
||||||
// invalid private network check
|
// invalid peer network range check
|
||||||
privateNetworkCheck = api.PrivateNetworkCheck{
|
peerNetworkRangeCheck = api.PeerNetworkRangeCheck{
|
||||||
Action: "unknownAction",
|
Action: "unknownAction",
|
||||||
Ranges: []string{},
|
Ranges: []string{},
|
||||||
}
|
}
|
||||||
err = validatePostureChecksUpdate(api.PostureCheckUpdate{Name: "Default", Checks: &api.Checks{PrivateNetworkCheck: &privateNetworkCheck}})
|
err = validatePostureChecksUpdate(
|
||||||
|
api.PostureCheckUpdate{
|
||||||
|
Name: "Default",
|
||||||
|
Checks: &api.Checks{
|
||||||
|
PeerNetworkRangeCheck: &peerNetworkRangeCheck,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
)
|
||||||
assert.Error(t, err)
|
assert.Error(t, err)
|
||||||
}
|
}
|
||||||
|
@ -114,6 +114,22 @@ type auth0Profile struct {
|
|||||||
LastLogin string `json:"last_login"`
|
LastLogin string `json:"last_login"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Connections represents a single Auth0 connection
|
||||||
|
// https://auth0.com/docs/api/management/v2/connections/get-connections
|
||||||
|
type Connection struct {
|
||||||
|
Id string `json:"id"`
|
||||||
|
Name string `json:"name"`
|
||||||
|
DisplayName string `json:"display_name"`
|
||||||
|
IsDomainConnection bool `json:"is_domain_connection"`
|
||||||
|
Realms []string `json:"realms"`
|
||||||
|
Metadata map[string]string `json:"metadata"`
|
||||||
|
Options ConnectionOptions `json:"options"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type ConnectionOptions struct {
|
||||||
|
DomainAliases []string `json:"domain_aliases"`
|
||||||
|
}
|
||||||
|
|
||||||
// NewAuth0Manager creates a new instance of the Auth0Manager
|
// NewAuth0Manager creates a new instance of the Auth0Manager
|
||||||
func NewAuth0Manager(config Auth0ClientConfig, appMetrics telemetry.AppMetrics) (*Auth0Manager, error) {
|
func NewAuth0Manager(config Auth0ClientConfig, appMetrics telemetry.AppMetrics) (*Auth0Manager, error) {
|
||||||
httpTransport := http.DefaultTransport.(*http.Transport).Clone()
|
httpTransport := http.DefaultTransport.(*http.Transport).Clone()
|
||||||
@ -581,13 +597,13 @@ func (am *Auth0Manager) GetAllAccounts() (map[string][]*UserData, error) {
|
|||||||
|
|
||||||
body, err := io.ReadAll(jobResp.Body)
|
body, err := io.ReadAll(jobResp.Body)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Debugf("Coudln't read export job response; %v", err)
|
log.Debugf("Couldn't read export job response; %v", err)
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
err = am.helper.Unmarshal(body, &exportJobResp)
|
err = am.helper.Unmarshal(body, &exportJobResp)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Debugf("Coudln't unmarshal export job response; %v", err)
|
log.Debugf("Couldn't unmarshal export job response; %v", err)
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -635,7 +651,7 @@ func (am *Auth0Manager) GetUserByEmail(email string) ([]*UserData, error) {
|
|||||||
|
|
||||||
err = am.helper.Unmarshal(body, &userResp)
|
err = am.helper.Unmarshal(body, &userResp)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Debugf("Coudln't unmarshal export job response; %v", err)
|
log.Debugf("Couldn't unmarshal export job response; %v", err)
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -684,13 +700,13 @@ func (am *Auth0Manager) CreateUser(email, name, accountID, invitedByEmail string
|
|||||||
|
|
||||||
body, err := io.ReadAll(resp.Body)
|
body, err := io.ReadAll(resp.Body)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Debugf("Coudln't read export job response; %v", err)
|
log.Debugf("Couldn't read export job response; %v", err)
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
err = am.helper.Unmarshal(body, &createResp)
|
err = am.helper.Unmarshal(body, &createResp)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Debugf("Coudln't unmarshal export job response; %v", err)
|
log.Debugf("Couldn't unmarshal export job response; %v", err)
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -777,6 +793,56 @@ func (am *Auth0Manager) DeleteUser(userID string) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// GetAllConnections returns detailed list of all connections filtered by given params.
|
||||||
|
// Note this method is not part of the IDP Manager interface as this is Auth0 specific.
|
||||||
|
func (am *Auth0Manager) GetAllConnections(strategy []string) ([]Connection, error) {
|
||||||
|
var connections []Connection
|
||||||
|
|
||||||
|
q := make(url.Values)
|
||||||
|
q.Set("strategy", strings.Join(strategy, ","))
|
||||||
|
|
||||||
|
req, err := am.createRequest(http.MethodGet, "/api/v2/connections?"+q.Encode(), nil)
|
||||||
|
if err != nil {
|
||||||
|
return connections, err
|
||||||
|
}
|
||||||
|
|
||||||
|
resp, err := am.httpClient.Do(req)
|
||||||
|
if err != nil {
|
||||||
|
log.Debugf("execute get connections request: %v", err)
|
||||||
|
if am.appMetrics != nil {
|
||||||
|
am.appMetrics.IDPMetrics().CountRequestError()
|
||||||
|
}
|
||||||
|
return connections, err
|
||||||
|
}
|
||||||
|
|
||||||
|
defer func() {
|
||||||
|
err = resp.Body.Close()
|
||||||
|
if err != nil {
|
||||||
|
log.Errorf("close get connections request body: %v", err)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
if resp.StatusCode != 200 {
|
||||||
|
if am.appMetrics != nil {
|
||||||
|
am.appMetrics.IDPMetrics().CountRequestStatusError()
|
||||||
|
}
|
||||||
|
return connections, fmt.Errorf("unable to get connections, statusCode %d", resp.StatusCode)
|
||||||
|
}
|
||||||
|
|
||||||
|
body, err := io.ReadAll(resp.Body)
|
||||||
|
if err != nil {
|
||||||
|
log.Debugf("Couldn't read get connections response; %v", err)
|
||||||
|
return connections, err
|
||||||
|
}
|
||||||
|
|
||||||
|
err = am.helper.Unmarshal(body, &connections)
|
||||||
|
if err != nil {
|
||||||
|
log.Debugf("Couldn't unmarshal get connection response; %v", err)
|
||||||
|
return connections, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return connections, err
|
||||||
|
}
|
||||||
|
|
||||||
// checkExportJobStatus checks the status of the job created at CreateExportUsersJob.
|
// checkExportJobStatus checks the status of the job created at CreateExportUsersJob.
|
||||||
// If the status is "completed", then return the downloadLink
|
// If the status is "completed", then return the downloadLink
|
||||||
func (am *Auth0Manager) checkExportJobStatus(jobID string) (bool, string, error) {
|
func (am *Auth0Manager) checkExportJobStatus(jobID string) (bool, string, error) {
|
||||||
|
@ -8,5 +8,6 @@ import (
|
|||||||
// IntegratedApproval interface exists to avoid the circle dependencies
|
// IntegratedApproval interface exists to avoid the circle dependencies
|
||||||
type IntegratedApproval interface {
|
type IntegratedApproval interface {
|
||||||
PreparePeer(accountID string, peer *nbpeer.Peer, peersGroup []string, extraSettings *account.ExtraSettings) *nbpeer.Peer
|
PreparePeer(accountID string, peer *nbpeer.Peer, peersGroup []string, extraSettings *account.ExtraSettings) *nbpeer.Peer
|
||||||
SyncPeer(accountID string, peer *nbpeer.Peer, peersGroup []string, extraSettings *account.ExtraSettings) (*nbpeer.Peer, bool)
|
IsRequiresApproval(accountID string, peer *nbpeer.Peer, peersGroup []string, extraSettings *account.ExtraSettings) bool
|
||||||
|
Stop()
|
||||||
}
|
}
|
||||||
|
@ -452,8 +452,12 @@ func (MocIntegratedApproval) PreparePeer(accountID string, peer *nbpeer.Peer, pe
|
|||||||
return peer
|
return peer
|
||||||
}
|
}
|
||||||
|
|
||||||
func (MocIntegratedApproval) SyncPeer(accountID string, peer *nbpeer.Peer, peersGroup []string, extraSettings *account.ExtraSettings) (*nbpeer.Peer, bool) {
|
func (MocIntegratedApproval) IsRequiresApproval(accountID string, peer *nbpeer.Peer, peersGroup []string, extraSettings *account.ExtraSettings) bool {
|
||||||
return peer.Copy(), false
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func (MocIntegratedApproval) Stop() {
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func loginPeerWithValidSetupKey(serverPubKey wgtypes.Key, key wgtypes.Key, client mgmtProto.ManagementServiceClient) *mgmtProto.LoginResponse {
|
func loginPeerWithValidSetupKey(serverPubKey wgtypes.Key, key wgtypes.Key, client mgmtProto.ManagementServiceClient) *mgmtProto.LoginResponse {
|
||||||
|
@ -11,6 +11,7 @@ import (
|
|||||||
nbdns "github.com/netbirdio/netbird/dns"
|
nbdns "github.com/netbirdio/netbird/dns"
|
||||||
"github.com/netbirdio/netbird/management/server"
|
"github.com/netbirdio/netbird/management/server"
|
||||||
"github.com/netbirdio/netbird/management/server/activity"
|
"github.com/netbirdio/netbird/management/server/activity"
|
||||||
|
"github.com/netbirdio/netbird/management/server/idp"
|
||||||
"github.com/netbirdio/netbird/management/server/jwtclaims"
|
"github.com/netbirdio/netbird/management/server/jwtclaims"
|
||||||
nbpeer "github.com/netbirdio/netbird/management/server/peer"
|
nbpeer "github.com/netbirdio/netbird/management/server/peer"
|
||||||
"github.com/netbirdio/netbird/management/server/posture"
|
"github.com/netbirdio/netbird/management/server/posture"
|
||||||
@ -93,6 +94,7 @@ type MockAccountManager struct {
|
|||||||
DeletePostureChecksFunc func(accountID, postureChecksID, userID string) error
|
DeletePostureChecksFunc func(accountID, postureChecksID, userID string) error
|
||||||
ListPostureChecksFunc func(accountID, userID string) ([]*posture.Checks, error)
|
ListPostureChecksFunc func(accountID, userID string) ([]*posture.Checks, error)
|
||||||
GetUsageFunc func(ctx context.Context, accountID string, start, end time.Time) (*server.AccountUsageStats, error)
|
GetUsageFunc func(ctx context.Context, accountID string, start, end time.Time) (*server.AccountUsageStats, error)
|
||||||
|
GetIdpManagerFunc func() idp.Manager
|
||||||
UpdateIntegratedApprovalGroupsFunc func(accountID string, userID string, groups []string) error
|
UpdateIntegratedApprovalGroupsFunc func(accountID string, userID string, groups []string) error
|
||||||
GroupValidationFunc func(accountId string, groups []string) (bool, error)
|
GroupValidationFunc func(accountId string, groups []string) (bool, error)
|
||||||
}
|
}
|
||||||
@ -707,7 +709,7 @@ func (am *MockAccountManager) ListPostureChecks(accountID, userID string) ([]*po
|
|||||||
return nil, status.Errorf(codes.Unimplemented, "method ListPostureChecks is not implemented")
|
return nil, status.Errorf(codes.Unimplemented, "method ListPostureChecks is not implemented")
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetUsage mocks GetCurrentUsage of the AccountManager interface
|
// GetUsage mocks GetUsage of the AccountManager interface
|
||||||
func (am *MockAccountManager) GetUsage(ctx context.Context, accountID string, start time.Time, end time.Time) (*server.AccountUsageStats, error) {
|
func (am *MockAccountManager) GetUsage(ctx context.Context, accountID string, start time.Time, end time.Time) (*server.AccountUsageStats, error) {
|
||||||
if am.GetUsageFunc != nil {
|
if am.GetUsageFunc != nil {
|
||||||
return am.GetUsageFunc(ctx, accountID, start, end)
|
return am.GetUsageFunc(ctx, accountID, start, end)
|
||||||
@ -715,6 +717,14 @@ func (am *MockAccountManager) GetUsage(ctx context.Context, accountID string, st
|
|||||||
return nil, status.Errorf(codes.Unimplemented, "method GetUsage is not implemented")
|
return nil, status.Errorf(codes.Unimplemented, "method GetUsage is not implemented")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// GetIdpManager mocks GetIdpManager of the AccountManager interface
|
||||||
|
func (am *MockAccountManager) GetIdpManager() idp.Manager {
|
||||||
|
if am.GetIdpManagerFunc != nil {
|
||||||
|
return am.GetIdpManagerFunc()
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
// UpdateIntegratedApprovalGroups mocks UpdateIntegratedApprovalGroups of the AccountManager interface
|
// UpdateIntegratedApprovalGroups mocks UpdateIntegratedApprovalGroups of the AccountManager interface
|
||||||
func (am *MockAccountManager) UpdateIntegratedApprovalGroups(accountID string, userID string, groups []string) error {
|
func (am *MockAccountManager) UpdateIntegratedApprovalGroups(accountID string, userID string, groups []string) error {
|
||||||
if am.UpdateIntegratedApprovalGroupsFunc != nil {
|
if am.UpdateIntegratedApprovalGroupsFunc != nil {
|
||||||
|
@ -408,6 +408,8 @@ func (am *DefaultAccountManager) AddPeer(setupKey, userID string, peer *nbpeer.P
|
|||||||
return nil, nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
registrationTime := time.Now().UTC()
|
||||||
|
|
||||||
newPeer := &nbpeer.Peer{
|
newPeer := &nbpeer.Peer{
|
||||||
ID: xid.New().String(),
|
ID: xid.New().String(),
|
||||||
Key: peer.Key,
|
Key: peer.Key,
|
||||||
@ -417,10 +419,11 @@ func (am *DefaultAccountManager) AddPeer(setupKey, userID string, peer *nbpeer.P
|
|||||||
Name: peer.Meta.Hostname,
|
Name: peer.Meta.Hostname,
|
||||||
DNSLabel: newLabel,
|
DNSLabel: newLabel,
|
||||||
UserID: userID,
|
UserID: userID,
|
||||||
Status: &nbpeer.PeerStatus{Connected: false, LastSeen: time.Now().UTC()},
|
Status: &nbpeer.PeerStatus{Connected: false, LastSeen: registrationTime},
|
||||||
SSHEnabled: false,
|
SSHEnabled: false,
|
||||||
SSHKey: peer.SSHKey,
|
SSHKey: peer.SSHKey,
|
||||||
LastLogin: time.Now().UTC(),
|
LastLogin: registrationTime,
|
||||||
|
CreatedAt: registrationTime,
|
||||||
LoginExpirationEnabled: addedByUser,
|
LoginExpirationEnabled: addedByUser,
|
||||||
Ephemeral: ephemeral,
|
Ephemeral: ephemeral,
|
||||||
}
|
}
|
||||||
@ -518,10 +521,9 @@ func (am *DefaultAccountManager) SyncPeer(sync PeerSync) (*nbpeer.Peer, *Network
|
|||||||
return nil, nil, status.Errorf(status.PermissionDenied, "peer login has expired, please log in once more")
|
return nil, nil, status.Errorf(status.PermissionDenied, "peer login has expired, please log in once more")
|
||||||
}
|
}
|
||||||
|
|
||||||
peer, updated := am.integratedPeerValidator.SyncPeer(account.Id, peer, account.GetPeerGroupsList(peer.ID), account.Settings.Extra)
|
requiresApproval := am.integratedPeerValidator.IsRequiresApproval(account.Id, peer, account.GetPeerGroupsList(peer.ID), account.Settings.Extra)
|
||||||
|
if peer.Status.RequiresApproval != requiresApproval {
|
||||||
if updated {
|
peer.Status.RequiresApproval = requiresApproval
|
||||||
account.UpdatePeer(peer)
|
|
||||||
err = am.Store.SaveAccount(account)
|
err = am.Store.SaveAccount(account)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, err
|
return nil, nil, err
|
||||||
@ -594,12 +596,12 @@ func (am *DefaultAccountManager) LoginPeer(login PeerLogin) (*nbpeer.Peer, *Netw
|
|||||||
am.StoreEvent(login.UserID, peer.ID, account.Id, activity.UserLoggedInPeer, peer.EventMeta(am.GetDNSDomain()))
|
am.StoreEvent(login.UserID, peer.ID, account.Id, activity.UserLoggedInPeer, peer.EventMeta(am.GetDNSDomain()))
|
||||||
}
|
}
|
||||||
|
|
||||||
peer, updated := am.integratedPeerValidator.SyncPeer(account.Id, peer, account.GetPeerGroupsList(peer.ID), account.Settings.Extra)
|
isRequiresApproval := am.integratedPeerValidator.IsRequiresApproval(account.Id, peer, account.GetPeerGroupsList(peer.ID), account.Settings.Extra)
|
||||||
if updated {
|
if peer.Status.RequiresApproval != isRequiresApproval {
|
||||||
shouldStoreAccount = true
|
shouldStoreAccount = true
|
||||||
}
|
}
|
||||||
|
|
||||||
peer, updated = updatePeerMeta(peer, login.Meta, account)
|
peer, updated := updatePeerMeta(peer, login.Meta, account)
|
||||||
if updated {
|
if updated {
|
||||||
shouldStoreAccount = true
|
shouldStoreAccount = true
|
||||||
}
|
}
|
||||||
|
@ -40,13 +40,15 @@ type Peer struct {
|
|||||||
LoginExpirationEnabled bool
|
LoginExpirationEnabled bool
|
||||||
// LastLogin the time when peer performed last login operation
|
// LastLogin the time when peer performed last login operation
|
||||||
LastLogin time.Time
|
LastLogin time.Time
|
||||||
|
// CreatedAt records the time the peer was created
|
||||||
|
CreatedAt time.Time
|
||||||
// Indicate ephemeral peer attribute
|
// Indicate ephemeral peer attribute
|
||||||
Ephemeral bool
|
Ephemeral bool
|
||||||
// Geo location based on connection IP
|
// Geo location based on connection IP
|
||||||
Location Location `gorm:"embedded;embeddedPrefix:location_"`
|
Location Location `gorm:"embedded;embeddedPrefix:location_"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type PeerStatus struct {
|
type PeerStatus struct { //nolint:revive
|
||||||
// LastSeen is the last time peer was connected to the management service
|
// LastSeen is the last time peer was connected to the management service
|
||||||
LastSeen time.Time
|
LastSeen time.Time
|
||||||
// Connected indicates whether peer is connected to the management service or not
|
// Connected indicates whether peer is connected to the management service or not
|
||||||
@ -71,8 +73,14 @@ type NetworkAddress struct {
|
|||||||
Mac string
|
Mac string
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Environment is a system environment information
|
||||||
|
type Environment struct {
|
||||||
|
Cloud string
|
||||||
|
Platform string
|
||||||
|
}
|
||||||
|
|
||||||
// PeerSystemMeta is a metadata of a Peer machine system
|
// PeerSystemMeta is a metadata of a Peer machine system
|
||||||
type PeerSystemMeta struct {
|
type PeerSystemMeta struct { //nolint:revive
|
||||||
Hostname string
|
Hostname string
|
||||||
GoOS string
|
GoOS string
|
||||||
Kernel string
|
Kernel string
|
||||||
@ -87,6 +95,7 @@ type PeerSystemMeta struct {
|
|||||||
SystemSerialNumber string
|
SystemSerialNumber string
|
||||||
SystemProductName string
|
SystemProductName string
|
||||||
SystemManufacturer string
|
SystemManufacturer string
|
||||||
|
Environment Environment `gorm:"serializer:json"`
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p PeerSystemMeta) isEqual(other PeerSystemMeta) bool {
|
func (p PeerSystemMeta) isEqual(other PeerSystemMeta) bool {
|
||||||
@ -119,7 +128,9 @@ func (p PeerSystemMeta) isEqual(other PeerSystemMeta) bool {
|
|||||||
p.UIVersion == other.UIVersion &&
|
p.UIVersion == other.UIVersion &&
|
||||||
p.SystemSerialNumber == other.SystemSerialNumber &&
|
p.SystemSerialNumber == other.SystemSerialNumber &&
|
||||||
p.SystemProductName == other.SystemProductName &&
|
p.SystemProductName == other.SystemProductName &&
|
||||||
p.SystemManufacturer == other.SystemManufacturer
|
p.SystemManufacturer == other.SystemManufacturer &&
|
||||||
|
p.Environment.Cloud == other.Environment.Cloud &&
|
||||||
|
p.Environment.Platform == other.Environment.Platform
|
||||||
}
|
}
|
||||||
|
|
||||||
// AddedWithSSOLogin indicates whether this peer has been added with an SSO login by a user.
|
// AddedWithSSOLogin indicates whether this peer has been added with an SSO login by a user.
|
||||||
@ -148,6 +159,7 @@ func (p *Peer) Copy() *Peer {
|
|||||||
SSHEnabled: p.SSHEnabled,
|
SSHEnabled: p.SSHEnabled,
|
||||||
LoginExpirationEnabled: p.LoginExpirationEnabled,
|
LoginExpirationEnabled: p.LoginExpirationEnabled,
|
||||||
LastLogin: p.LastLogin,
|
LastLogin: p.LastLogin,
|
||||||
|
CreatedAt: p.CreatedAt,
|
||||||
Ephemeral: p.Ephemeral,
|
Ephemeral: p.Ephemeral,
|
||||||
Location: p.Location,
|
Location: p.Location,
|
||||||
}
|
}
|
||||||
@ -204,7 +216,7 @@ func (p *Peer) FQDN(dnsDomain string) string {
|
|||||||
|
|
||||||
// EventMeta returns activity event meta related to the peer
|
// EventMeta returns activity event meta related to the peer
|
||||||
func (p *Peer) EventMeta(dnsDomain string) map[string]any {
|
func (p *Peer) EventMeta(dnsDomain string) map[string]any {
|
||||||
return map[string]any{"name": p.Name, "fqdn": p.FQDN(dnsDomain), "ip": p.IP}
|
return map[string]any{"name": p.Name, "fqdn": p.FQDN(dnsDomain), "ip": p.IP, "created_at": p.CreatedAt}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Copy PeerStatus
|
// Copy PeerStatus
|
||||||
|
@ -13,7 +13,7 @@ const (
|
|||||||
NBVersionCheckName = "NBVersionCheck"
|
NBVersionCheckName = "NBVersionCheck"
|
||||||
OSVersionCheckName = "OSVersionCheck"
|
OSVersionCheckName = "OSVersionCheck"
|
||||||
GeoLocationCheckName = "GeoLocationCheck"
|
GeoLocationCheckName = "GeoLocationCheck"
|
||||||
PrivateNetworkCheckName = "PrivateNetworkCheck"
|
PeerNetworkRangeCheckName = "PeerNetworkRangeCheck"
|
||||||
|
|
||||||
CheckActionAllow string = "allow"
|
CheckActionAllow string = "allow"
|
||||||
CheckActionDeny string = "deny"
|
CheckActionDeny string = "deny"
|
||||||
@ -47,7 +47,7 @@ type ChecksDefinition struct {
|
|||||||
NBVersionCheck *NBVersionCheck `json:",omitempty"`
|
NBVersionCheck *NBVersionCheck `json:",omitempty"`
|
||||||
OSVersionCheck *OSVersionCheck `json:",omitempty"`
|
OSVersionCheck *OSVersionCheck `json:",omitempty"`
|
||||||
GeoLocationCheck *GeoLocationCheck `json:",omitempty"`
|
GeoLocationCheck *GeoLocationCheck `json:",omitempty"`
|
||||||
PrivateNetworkCheck *PrivateNetworkCheck `json:",omitempty"`
|
PeerNetworkRangeCheck *PeerNetworkRangeCheck `json:",omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// Copy returns a copy of a checks definition.
|
// Copy returns a copy of a checks definition.
|
||||||
@ -85,13 +85,13 @@ func (cd ChecksDefinition) Copy() ChecksDefinition {
|
|||||||
}
|
}
|
||||||
copy(cdCopy.GeoLocationCheck.Locations, geoCheck.Locations)
|
copy(cdCopy.GeoLocationCheck.Locations, geoCheck.Locations)
|
||||||
}
|
}
|
||||||
if cd.PrivateNetworkCheck != nil {
|
if cd.PeerNetworkRangeCheck != nil {
|
||||||
privateNetCheck := cd.PrivateNetworkCheck
|
peerNetRangeCheck := cd.PeerNetworkRangeCheck
|
||||||
cdCopy.PrivateNetworkCheck = &PrivateNetworkCheck{
|
cdCopy.PeerNetworkRangeCheck = &PeerNetworkRangeCheck{
|
||||||
Action: privateNetCheck.Action,
|
Action: peerNetRangeCheck.Action,
|
||||||
Ranges: make([]netip.Prefix, len(privateNetCheck.Ranges)),
|
Ranges: make([]netip.Prefix, len(peerNetRangeCheck.Ranges)),
|
||||||
}
|
}
|
||||||
copy(cdCopy.PrivateNetworkCheck.Ranges, privateNetCheck.Ranges)
|
copy(cdCopy.PeerNetworkRangeCheck.Ranges, peerNetRangeCheck.Ranges)
|
||||||
}
|
}
|
||||||
return cdCopy
|
return cdCopy
|
||||||
}
|
}
|
||||||
@ -130,8 +130,8 @@ func (pc *Checks) GetChecks() []Check {
|
|||||||
if pc.Checks.GeoLocationCheck != nil {
|
if pc.Checks.GeoLocationCheck != nil {
|
||||||
checks = append(checks, pc.Checks.GeoLocationCheck)
|
checks = append(checks, pc.Checks.GeoLocationCheck)
|
||||||
}
|
}
|
||||||
if pc.Checks.PrivateNetworkCheck != nil {
|
if pc.Checks.PeerNetworkRangeCheck != nil {
|
||||||
checks = append(checks, pc.Checks.PrivateNetworkCheck)
|
checks = append(checks, pc.Checks.PeerNetworkRangeCheck)
|
||||||
}
|
}
|
||||||
return checks
|
return checks
|
||||||
}
|
}
|
||||||
|
@ -254,7 +254,7 @@ func TestChecks_Copy(t *testing.T) {
|
|||||||
},
|
},
|
||||||
Action: CheckActionAllow,
|
Action: CheckActionAllow,
|
||||||
},
|
},
|
||||||
PrivateNetworkCheck: &PrivateNetworkCheck{
|
PeerNetworkRangeCheck: &PeerNetworkRangeCheck{
|
||||||
Ranges: []netip.Prefix{
|
Ranges: []netip.Prefix{
|
||||||
netip.MustParsePrefix("192.168.0.0/24"),
|
netip.MustParsePrefix("192.168.0.0/24"),
|
||||||
netip.MustParsePrefix("10.0.0.0/8"),
|
netip.MustParsePrefix("10.0.0.0/8"),
|
||||||
|
@ -8,16 +8,16 @@ import (
|
|||||||
nbpeer "github.com/netbirdio/netbird/management/server/peer"
|
nbpeer "github.com/netbirdio/netbird/management/server/peer"
|
||||||
)
|
)
|
||||||
|
|
||||||
type PrivateNetworkCheck struct {
|
type PeerNetworkRangeCheck struct {
|
||||||
Action string
|
Action string
|
||||||
Ranges []netip.Prefix `gorm:"serializer:json"`
|
Ranges []netip.Prefix `gorm:"serializer:json"`
|
||||||
}
|
}
|
||||||
|
|
||||||
var _ Check = (*PrivateNetworkCheck)(nil)
|
var _ Check = (*PeerNetworkRangeCheck)(nil)
|
||||||
|
|
||||||
func (p *PrivateNetworkCheck) Check(peer nbpeer.Peer) (bool, error) {
|
func (p *PeerNetworkRangeCheck) Check(peer nbpeer.Peer) (bool, error) {
|
||||||
if len(peer.Meta.NetworkAddresses) == 0 {
|
if len(peer.Meta.NetworkAddresses) == 0 {
|
||||||
return false, fmt.Errorf("peer's does not contain private network addresses")
|
return false, fmt.Errorf("peer's does not contain peer network range addresses")
|
||||||
}
|
}
|
||||||
|
|
||||||
maskedPrefixes := make([]netip.Prefix, 0, len(p.Ranges))
|
maskedPrefixes := make([]netip.Prefix, 0, len(p.Ranges))
|
||||||
@ -34,7 +34,7 @@ func (p *PrivateNetworkCheck) Check(peer nbpeer.Peer) (bool, error) {
|
|||||||
case CheckActionAllow:
|
case CheckActionAllow:
|
||||||
return true, nil
|
return true, nil
|
||||||
default:
|
default:
|
||||||
return false, fmt.Errorf("invalid private network check action: %s", p.Action)
|
return false, fmt.Errorf("invalid peer network range check action: %s", p.Action)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -46,9 +46,9 @@ func (p *PrivateNetworkCheck) Check(peer nbpeer.Peer) (bool, error) {
|
|||||||
return false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
return false, fmt.Errorf("invalid private network check action: %s", p.Action)
|
return false, fmt.Errorf("invalid peer network range check action: %s", p.Action)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *PrivateNetworkCheck) Name() string {
|
func (p *PeerNetworkRangeCheck) Name() string {
|
||||||
return PrivateNetworkCheckName
|
return PeerNetworkRangeCheckName
|
||||||
}
|
}
|
||||||
|
@ -9,17 +9,17 @@ import (
|
|||||||
nbpeer "github.com/netbirdio/netbird/management/server/peer"
|
nbpeer "github.com/netbirdio/netbird/management/server/peer"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestPrivateNetworkCheck_Check(t *testing.T) {
|
func TestPeerNetworkRangeCheck_Check(t *testing.T) {
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
name string
|
name string
|
||||||
check PrivateNetworkCheck
|
check PeerNetworkRangeCheck
|
||||||
peer nbpeer.Peer
|
peer nbpeer.Peer
|
||||||
wantErr bool
|
wantErr bool
|
||||||
isValid bool
|
isValid bool
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
name: "Peer private networks matches the allowed range",
|
name: "Peer networks range matches the allowed range",
|
||||||
check: PrivateNetworkCheck{
|
check: PeerNetworkRangeCheck{
|
||||||
Action: CheckActionAllow,
|
Action: CheckActionAllow,
|
||||||
Ranges: []netip.Prefix{
|
Ranges: []netip.Prefix{
|
||||||
netip.MustParsePrefix("192.168.0.0/24"),
|
netip.MustParsePrefix("192.168.0.0/24"),
|
||||||
@ -42,8 +42,8 @@ func TestPrivateNetworkCheck_Check(t *testing.T) {
|
|||||||
isValid: true,
|
isValid: true,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "Peer private networks doesn't matches the allowed range",
|
name: "Peer networks range doesn't matches the allowed range",
|
||||||
check: PrivateNetworkCheck{
|
check: PeerNetworkRangeCheck{
|
||||||
Action: CheckActionAllow,
|
Action: CheckActionAllow,
|
||||||
Ranges: []netip.Prefix{
|
Ranges: []netip.Prefix{
|
||||||
netip.MustParsePrefix("192.168.0.0/24"),
|
netip.MustParsePrefix("192.168.0.0/24"),
|
||||||
@ -63,8 +63,8 @@ func TestPrivateNetworkCheck_Check(t *testing.T) {
|
|||||||
isValid: false,
|
isValid: false,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "Peer with no privates network in the allow range",
|
name: "Peer with no network range in the allow range",
|
||||||
check: PrivateNetworkCheck{
|
check: PeerNetworkRangeCheck{
|
||||||
Action: CheckActionAllow,
|
Action: CheckActionAllow,
|
||||||
Ranges: []netip.Prefix{
|
Ranges: []netip.Prefix{
|
||||||
netip.MustParsePrefix("192.168.0.0/16"),
|
netip.MustParsePrefix("192.168.0.0/16"),
|
||||||
@ -76,8 +76,8 @@ func TestPrivateNetworkCheck_Check(t *testing.T) {
|
|||||||
isValid: false,
|
isValid: false,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "Peer private networks matches the denied range",
|
name: "Peer networks range matches the denied range",
|
||||||
check: PrivateNetworkCheck{
|
check: PeerNetworkRangeCheck{
|
||||||
Action: CheckActionDeny,
|
Action: CheckActionDeny,
|
||||||
Ranges: []netip.Prefix{
|
Ranges: []netip.Prefix{
|
||||||
netip.MustParsePrefix("192.168.0.0/24"),
|
netip.MustParsePrefix("192.168.0.0/24"),
|
||||||
@ -100,8 +100,8 @@ func TestPrivateNetworkCheck_Check(t *testing.T) {
|
|||||||
isValid: false,
|
isValid: false,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "Peer private networks doesn't matches the denied range",
|
name: "Peer networks range doesn't matches the denied range",
|
||||||
check: PrivateNetworkCheck{
|
check: PeerNetworkRangeCheck{
|
||||||
Action: CheckActionDeny,
|
Action: CheckActionDeny,
|
||||||
Ranges: []netip.Prefix{
|
Ranges: []netip.Prefix{
|
||||||
netip.MustParsePrefix("192.168.0.0/24"),
|
netip.MustParsePrefix("192.168.0.0/24"),
|
||||||
@ -121,8 +121,8 @@ func TestPrivateNetworkCheck_Check(t *testing.T) {
|
|||||||
isValid: true,
|
isValid: true,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "Peer with no private networks in the denied range",
|
name: "Peer with no networks range in the denied range",
|
||||||
check: PrivateNetworkCheck{
|
check: PeerNetworkRangeCheck{
|
||||||
Action: CheckActionDeny,
|
Action: CheckActionDeny,
|
||||||
Ranges: []netip.Prefix{
|
Ranges: []netip.Prefix{
|
||||||
netip.MustParsePrefix("192.168.0.0/16"),
|
netip.MustParsePrefix("192.168.0.0/16"),
|
||||||
|
@ -1,9 +1,10 @@
|
|||||||
package server
|
package server
|
||||||
|
|
||||||
import (
|
import (
|
||||||
log "github.com/sirupsen/logrus"
|
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
log "github.com/sirupsen/logrus"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Scheduler is an interface which implementations can schedule and cancel jobs
|
// Scheduler is an interface which implementations can schedule and cancel jobs
|
||||||
@ -55,14 +56,8 @@ func (wm *DefaultScheduler) cancel(ID string) bool {
|
|||||||
cancel, ok := wm.jobs[ID]
|
cancel, ok := wm.jobs[ID]
|
||||||
if ok {
|
if ok {
|
||||||
delete(wm.jobs, ID)
|
delete(wm.jobs, ID)
|
||||||
select {
|
close(cancel)
|
||||||
case cancel <- struct{}{}:
|
|
||||||
log.Debugf("cancelled scheduled job %s", ID)
|
log.Debugf("cancelled scheduled job %s", ID)
|
||||||
default:
|
|
||||||
log.Warnf("couldn't cancel job %s because there was no routine listening on the cancel event", ID)
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
}
|
||||||
return ok
|
return ok
|
||||||
}
|
}
|
||||||
@ -90,25 +85,41 @@ func (wm *DefaultScheduler) Schedule(in time.Duration, ID string, job func() (ne
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
ticker := time.NewTicker(in)
|
||||||
|
|
||||||
wm.jobs[ID] = cancel
|
wm.jobs[ID] = cancel
|
||||||
log.Debugf("scheduled a job %s to run in %s. There are %d total jobs scheduled.", ID, in.String(), len(wm.jobs))
|
log.Debugf("scheduled a job %s to run in %s. There are %d total jobs scheduled.", ID, in.String(), len(wm.jobs))
|
||||||
go func() {
|
go func() {
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-ticker.C:
|
||||||
select {
|
select {
|
||||||
case <-time.After(in):
|
|
||||||
log.Debugf("time to do a scheduled job %s", ID)
|
|
||||||
runIn, reschedule := job()
|
|
||||||
wm.mu.Lock()
|
|
||||||
defer wm.mu.Unlock()
|
|
||||||
delete(wm.jobs, ID)
|
|
||||||
if reschedule {
|
|
||||||
go wm.Schedule(runIn, ID, job)
|
|
||||||
}
|
|
||||||
case <-cancel:
|
case <-cancel:
|
||||||
log.Debugf("stopped scheduled job %s ", ID)
|
log.Debugf("scheduled job %s was canceled, stop timer", ID)
|
||||||
|
ticker.Stop()
|
||||||
|
return
|
||||||
|
default:
|
||||||
|
log.Debugf("time to do a scheduled job %s", ID)
|
||||||
|
}
|
||||||
|
runIn, reschedule := job()
|
||||||
|
if !reschedule {
|
||||||
wm.mu.Lock()
|
wm.mu.Lock()
|
||||||
defer wm.mu.Unlock()
|
defer wm.mu.Unlock()
|
||||||
delete(wm.jobs, ID)
|
delete(wm.jobs, ID)
|
||||||
|
log.Debugf("job %s is not scheduled to run again", ID)
|
||||||
|
ticker.Stop()
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
// we need this comparison to avoid resetting the ticker with the same duration and missing the current elapsesed time
|
||||||
|
if runIn != in {
|
||||||
|
ticker.Reset(runIn)
|
||||||
|
}
|
||||||
|
case <-cancel:
|
||||||
|
log.Debugf("job %s was canceled, stopping timer", ID)
|
||||||
|
ticker.Stop()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
}()
|
}()
|
||||||
}
|
}
|
||||||
|
@ -2,11 +2,12 @@ package server
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"github.com/stretchr/testify/assert"
|
|
||||||
"math/rand"
|
"math/rand"
|
||||||
"sync"
|
"sync"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestScheduler_Performance(t *testing.T) {
|
func TestScheduler_Performance(t *testing.T) {
|
||||||
@ -36,15 +37,24 @@ func TestScheduler_Cancel(t *testing.T) {
|
|||||||
jobID1 := "test-scheduler-job-1"
|
jobID1 := "test-scheduler-job-1"
|
||||||
jobID2 := "test-scheduler-job-2"
|
jobID2 := "test-scheduler-job-2"
|
||||||
scheduler := NewDefaultScheduler()
|
scheduler := NewDefaultScheduler()
|
||||||
scheduler.Schedule(2*time.Second, jobID1, func() (nextRunIn time.Duration, reschedule bool) {
|
tChan := make(chan struct{})
|
||||||
return 0, false
|
p := []string{jobID1, jobID2}
|
||||||
|
scheduler.Schedule(2*time.Millisecond, jobID1, func() (nextRunIn time.Duration, reschedule bool) {
|
||||||
|
tt := p[0]
|
||||||
|
<-tChan
|
||||||
|
t.Logf("job %s", tt)
|
||||||
|
return 2 * time.Millisecond, true
|
||||||
})
|
})
|
||||||
scheduler.Schedule(2*time.Second, jobID2, func() (nextRunIn time.Duration, reschedule bool) {
|
scheduler.Schedule(2*time.Millisecond, jobID2, func() (nextRunIn time.Duration, reschedule bool) {
|
||||||
return 0, false
|
return 2 * time.Millisecond, true
|
||||||
})
|
})
|
||||||
|
|
||||||
|
time.Sleep(4 * time.Millisecond)
|
||||||
assert.Len(t, scheduler.jobs, 2)
|
assert.Len(t, scheduler.jobs, 2)
|
||||||
scheduler.Cancel([]string{jobID1})
|
scheduler.Cancel([]string{jobID1})
|
||||||
|
close(tChan)
|
||||||
|
p = []string{}
|
||||||
|
time.Sleep(4 * time.Millisecond)
|
||||||
assert.Len(t, scheduler.jobs, 1)
|
assert.Len(t, scheduler.jobs, 1)
|
||||||
assert.NotNil(t, scheduler.jobs[jobID2])
|
assert.NotNil(t, scheduler.jobs[jobID2])
|
||||||
}
|
}
|
||||||
|
@ -85,6 +85,8 @@ type User struct {
|
|||||||
Blocked bool
|
Blocked bool
|
||||||
// LastLogin is the last time the user logged in to IdP
|
// LastLogin is the last time the user logged in to IdP
|
||||||
LastLogin time.Time
|
LastLogin time.Time
|
||||||
|
// CreatedAt records the time the user was created
|
||||||
|
CreatedAt time.Time
|
||||||
|
|
||||||
// Issued of the user
|
// Issued of the user
|
||||||
Issued string `gorm:"default:api"`
|
Issued string `gorm:"default:api"`
|
||||||
@ -173,6 +175,7 @@ func (u *User) Copy() *User {
|
|||||||
PATs: pats,
|
PATs: pats,
|
||||||
Blocked: u.Blocked,
|
Blocked: u.Blocked,
|
||||||
LastLogin: u.LastLogin,
|
LastLogin: u.LastLogin,
|
||||||
|
CreatedAt: u.CreatedAt,
|
||||||
Issued: u.Issued,
|
Issued: u.Issued,
|
||||||
IntegrationReference: u.IntegrationReference,
|
IntegrationReference: u.IntegrationReference,
|
||||||
}
|
}
|
||||||
@ -188,6 +191,7 @@ func NewUser(id string, role UserRole, isServiceUser bool, nonDeletable bool, se
|
|||||||
ServiceUserName: serviceUserName,
|
ServiceUserName: serviceUserName,
|
||||||
AutoGroups: autoGroups,
|
AutoGroups: autoGroups,
|
||||||
Issued: issued,
|
Issued: issued,
|
||||||
|
CreatedAt: time.Now().UTC(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -338,6 +342,7 @@ func (am *DefaultAccountManager) inviteNewUser(accountID, userID string, invite
|
|||||||
AutoGroups: invite.AutoGroups,
|
AutoGroups: invite.AutoGroups,
|
||||||
Issued: invite.Issued,
|
Issued: invite.Issued,
|
||||||
IntegrationReference: invite.IntegrationReference,
|
IntegrationReference: invite.IntegrationReference,
|
||||||
|
CreatedAt: time.Now().UTC(),
|
||||||
}
|
}
|
||||||
account.Users[idpUser.ID] = newUser
|
account.Users[idpUser.ID] = newUser
|
||||||
|
|
||||||
@ -414,7 +419,7 @@ func (am *DefaultAccountManager) ListUsers(accountID string) ([]*User, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (am *DefaultAccountManager) deleteServiceUser(account *Account, initiatorUserID string, targetUser *User) {
|
func (am *DefaultAccountManager) deleteServiceUser(account *Account, initiatorUserID string, targetUser *User) {
|
||||||
meta := map[string]any{"name": targetUser.ServiceUserName}
|
meta := map[string]any{"name": targetUser.ServiceUserName, "created_at": targetUser.CreatedAt}
|
||||||
am.StoreEvent(initiatorUserID, targetUser.Id, account.Id, activity.ServiceUserDeleted, meta)
|
am.StoreEvent(initiatorUserID, targetUser.Id, account.Id, activity.ServiceUserDeleted, meta)
|
||||||
delete(account.Users, targetUser.Id)
|
delete(account.Users, targetUser.Id)
|
||||||
}
|
}
|
||||||
@ -494,13 +499,23 @@ func (am *DefaultAccountManager) deleteRegularUser(account *Account, initiatorUs
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
u, err := account.FindUser(targetUserID)
|
||||||
|
if err != nil {
|
||||||
|
log.Errorf("failed to find user %s for deletion, this should never happen: %s", targetUserID, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
var tuCreatedAt time.Time
|
||||||
|
if u != nil {
|
||||||
|
tuCreatedAt = u.CreatedAt
|
||||||
|
}
|
||||||
|
|
||||||
delete(account.Users, targetUserID)
|
delete(account.Users, targetUserID)
|
||||||
err = am.Store.SaveAccount(account)
|
err = am.Store.SaveAccount(account)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
meta := map[string]any{"name": tuName, "email": tuEmail}
|
meta := map[string]any{"name": tuName, "email": tuEmail, "created_at": tuCreatedAt}
|
||||||
am.StoreEvent(initiatorUserID, targetUserID, account.Id, activity.UserDeleted, meta)
|
am.StoreEvent(initiatorUserID, targetUserID, account.Id, activity.UserDeleted, meta)
|
||||||
|
|
||||||
am.updateAccountPeers(account)
|
am.updateAccountPeers(account)
|
||||||
|
@ -273,7 +273,8 @@ func TestUser_Copy(t *testing.T) {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
Blocked: false,
|
Blocked: false,
|
||||||
LastLogin: time.Now(),
|
LastLogin: time.Now().UTC(),
|
||||||
|
CreatedAt: time.Now().UTC(),
|
||||||
Issued: "test",
|
Issued: "test",
|
||||||
IntegrationReference: IntegrationReference{
|
IntegrationReference: IntegrationReference{
|
||||||
ID: 0,
|
ID: 0,
|
||||||
|
@ -21,11 +21,10 @@ import (
|
|||||||
"google.golang.org/grpc/status"
|
"google.golang.org/grpc/status"
|
||||||
|
|
||||||
"github.com/netbirdio/netbird/encryption"
|
"github.com/netbirdio/netbird/encryption"
|
||||||
|
"github.com/netbirdio/netbird/management/client"
|
||||||
"github.com/netbirdio/netbird/signal/proto"
|
"github.com/netbirdio/netbird/signal/proto"
|
||||||
)
|
)
|
||||||
|
|
||||||
const defaultSendTimeout = 5 * time.Second
|
|
||||||
|
|
||||||
// ConnStateNotifier is a wrapper interface of the status recorder
|
// ConnStateNotifier is a wrapper interface of the status recorder
|
||||||
type ConnStateNotifier interface {
|
type ConnStateNotifier interface {
|
||||||
MarkSignalDisconnected(error)
|
MarkSignalDisconnected(error)
|
||||||
@ -71,7 +70,7 @@ func NewClient(ctx context.Context, addr string, key wgtypes.Key, tlsEnabled boo
|
|||||||
transportOption = grpc.WithTransportCredentials(credentials.NewTLS(&tls.Config{}))
|
transportOption = grpc.WithTransportCredentials(credentials.NewTLS(&tls.Config{}))
|
||||||
}
|
}
|
||||||
|
|
||||||
sigCtx, cancel := context.WithTimeout(ctx, 5*time.Second)
|
sigCtx, cancel := context.WithTimeout(ctx, client.ConnectTimeout)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
conn, err := grpc.DialContext(
|
conn, err := grpc.DialContext(
|
||||||
sigCtx,
|
sigCtx,
|
||||||
@ -353,7 +352,7 @@ func (c *GrpcClient) Send(msg *proto.Message) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
attemptTimeout := defaultSendTimeout
|
attemptTimeout := client.ConnectTimeout
|
||||||
|
|
||||||
for attempt := 0; attempt < 4; attempt++ {
|
for attempt := 0; attempt < 4; attempt++ {
|
||||||
if attempt > 1 {
|
if attempt > 1 {
|
||||||
|
@ -4,7 +4,6 @@ import (
|
|||||||
"errors"
|
"errors"
|
||||||
"flag"
|
"flag"
|
||||||
"fmt"
|
"fmt"
|
||||||
"golang.org/x/crypto/acme/autocert"
|
|
||||||
"io"
|
"io"
|
||||||
"io/fs"
|
"io/fs"
|
||||||
"net"
|
"net"
|
||||||
@ -14,10 +13,14 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"golang.org/x/crypto/acme/autocert"
|
||||||
|
|
||||||
"github.com/netbirdio/netbird/encryption"
|
"github.com/netbirdio/netbird/encryption"
|
||||||
"github.com/netbirdio/netbird/signal/proto"
|
"github.com/netbirdio/netbird/signal/proto"
|
||||||
"github.com/netbirdio/netbird/signal/server"
|
"github.com/netbirdio/netbird/signal/server"
|
||||||
"github.com/netbirdio/netbird/util"
|
"github.com/netbirdio/netbird/util"
|
||||||
|
"github.com/netbirdio/netbird/version"
|
||||||
|
|
||||||
log "github.com/sirupsen/logrus"
|
log "github.com/sirupsen/logrus"
|
||||||
"github.com/spf13/cobra"
|
"github.com/spf13/cobra"
|
||||||
"google.golang.org/grpc"
|
"google.golang.org/grpc"
|
||||||
@ -129,6 +132,7 @@ var (
|
|||||||
log.Infof("running gRPC server: %s", grpcListener.Addr().String())
|
log.Infof("running gRPC server: %s", grpcListener.Addr().String())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
log.Infof("signal server version %s", version.NetbirdVersion())
|
||||||
log.Infof("started Signal Service")
|
log.Infof("started Signal Service")
|
||||||
|
|
||||||
SetupCloseHandler()
|
SetupCloseHandler()
|
||||||
|