mirror of
https://github.com/netbirdio/netbird.git
synced 2025-03-13 06:08:48 +01:00
Merge branch 'main' into feature/port-forwarding
This commit is contained in:
commit
a76ca8c565
2
.github/workflows/release.yml
vendored
2
.github/workflows/release.yml
vendored
@ -9,7 +9,7 @@ on:
|
||||
pull_request:
|
||||
|
||||
env:
|
||||
SIGN_PIPE_VER: "v0.0.17"
|
||||
SIGN_PIPE_VER: "v0.0.18"
|
||||
GORELEASER_VER: "v2.3.2"
|
||||
PRODUCT_NAME: "NetBird"
|
||||
COPYRIGHT: "Wiretrustee UG (haftungsbeschreankt)"
|
||||
|
@ -190,7 +190,7 @@ func runInForegroundMode(ctx context.Context, cmd *cobra.Command) error {
|
||||
r.GetFullStatus()
|
||||
|
||||
connectClient := internal.NewConnectClient(ctx, config, r)
|
||||
return connectClient.Run()
|
||||
return connectClient.Run(nil)
|
||||
}
|
||||
|
||||
func runInDaemonMode(ctx context.Context, cmd *cobra.Command) error {
|
||||
|
@ -348,6 +348,10 @@ func (m *AclManager) addIOFiltering(
|
||||
UserData: userData,
|
||||
})
|
||||
|
||||
if err := m.rConn.Flush(); err != nil {
|
||||
return nil, fmt.Errorf(flushError, err)
|
||||
}
|
||||
|
||||
rule := &Rule{
|
||||
nftRule: nftRule,
|
||||
mangleRule: m.createPreroutingRule(expressions, userData),
|
||||
@ -359,6 +363,7 @@ func (m *AclManager) addIOFiltering(
|
||||
if ipset != nil {
|
||||
m.ipsetStore.AddReferenceToIpset(ipset.Name)
|
||||
}
|
||||
|
||||
return rule, nil
|
||||
}
|
||||
|
||||
|
@ -59,13 +59,8 @@ func NewConnectClient(
|
||||
}
|
||||
|
||||
// Run with main logic.
|
||||
func (c *ConnectClient) Run() error {
|
||||
return c.run(MobileDependency{}, nil, nil)
|
||||
}
|
||||
|
||||
// RunWithProbes runs the client's main logic with probes attached
|
||||
func (c *ConnectClient) RunWithProbes(probes *ProbeHolder, runningChan chan error) error {
|
||||
return c.run(MobileDependency{}, probes, runningChan)
|
||||
func (c *ConnectClient) Run(runningChan chan error) error {
|
||||
return c.run(MobileDependency{}, runningChan)
|
||||
}
|
||||
|
||||
// RunOnAndroid with main logic on mobile system
|
||||
@ -84,7 +79,7 @@ func (c *ConnectClient) RunOnAndroid(
|
||||
HostDNSAddresses: dnsAddresses,
|
||||
DnsReadyListener: dnsReadyListener,
|
||||
}
|
||||
return c.run(mobileDependency, nil, nil)
|
||||
return c.run(mobileDependency, nil)
|
||||
}
|
||||
|
||||
func (c *ConnectClient) RunOniOS(
|
||||
@ -102,10 +97,10 @@ func (c *ConnectClient) RunOniOS(
|
||||
DnsManager: dnsManager,
|
||||
StateFilePath: stateFilePath,
|
||||
}
|
||||
return c.run(mobileDependency, nil, nil)
|
||||
return c.run(mobileDependency, nil)
|
||||
}
|
||||
|
||||
func (c *ConnectClient) run(mobileDependency MobileDependency, probes *ProbeHolder, runningChan chan error) error {
|
||||
func (c *ConnectClient) run(mobileDependency MobileDependency, runningChan chan error) error {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
log.Panicf("Panic occurred: %v, stack trace: %s", r, string(debug.Stack()))
|
||||
@ -261,7 +256,7 @@ func (c *ConnectClient) run(mobileDependency MobileDependency, probes *ProbeHold
|
||||
checks := loginResp.GetChecks()
|
||||
|
||||
c.engineMutex.Lock()
|
||||
c.engine = NewEngineWithProbes(engineCtx, cancel, signalClient, mgmClient, relayManager, engineConfig, mobileDependency, c.statusRecorder, probes, checks)
|
||||
c.engine = NewEngine(engineCtx, cancel, signalClient, mgmClient, relayManager, engineConfig, mobileDependency, c.statusRecorder, checks)
|
||||
c.engine.SetNetworkMapPersistence(c.persistNetworkMap)
|
||||
c.engineMutex.Unlock()
|
||||
|
||||
|
@ -177,8 +177,6 @@ type Engine struct {
|
||||
|
||||
dnsServer dns.Server
|
||||
|
||||
probes *ProbeHolder
|
||||
|
||||
// checks are the client-applied posture checks that need to be evaluated on the client
|
||||
checks []*mgmProto.Checks
|
||||
|
||||
@ -198,7 +196,7 @@ type Peer struct {
|
||||
WgAllowedIps string
|
||||
}
|
||||
|
||||
// NewEngine creates a new Connection Engine
|
||||
// NewEngine creates a new Connection Engine with probes attached
|
||||
func NewEngine(
|
||||
clientCtx context.Context,
|
||||
clientCancel context.CancelFunc,
|
||||
@ -209,33 +207,6 @@ func NewEngine(
|
||||
mobileDep MobileDependency,
|
||||
statusRecorder *peer.Status,
|
||||
checks []*mgmProto.Checks,
|
||||
) *Engine {
|
||||
return NewEngineWithProbes(
|
||||
clientCtx,
|
||||
clientCancel,
|
||||
signalClient,
|
||||
mgmClient,
|
||||
relayManager,
|
||||
config,
|
||||
mobileDep,
|
||||
statusRecorder,
|
||||
nil,
|
||||
checks,
|
||||
)
|
||||
}
|
||||
|
||||
// NewEngineWithProbes creates a new Connection Engine with probes attached
|
||||
func NewEngineWithProbes(
|
||||
clientCtx context.Context,
|
||||
clientCancel context.CancelFunc,
|
||||
signalClient signal.Client,
|
||||
mgmClient mgm.Client,
|
||||
relayManager *relayClient.Manager,
|
||||
config *EngineConfig,
|
||||
mobileDep MobileDependency,
|
||||
statusRecorder *peer.Status,
|
||||
probes *ProbeHolder,
|
||||
checks []*mgmProto.Checks,
|
||||
) *Engine {
|
||||
engine := &Engine{
|
||||
clientCtx: clientCtx,
|
||||
@ -253,7 +224,6 @@ func NewEngineWithProbes(
|
||||
networkSerial: 0,
|
||||
sshServerFunc: nbssh.DefaultSSHServer,
|
||||
statusRecorder: statusRecorder,
|
||||
probes: probes,
|
||||
checks: checks,
|
||||
connSemaphore: semaphoregroup.NewSemaphoreGroup(connInitLimit),
|
||||
}
|
||||
@ -459,7 +429,6 @@ func (e *Engine) Start() error {
|
||||
|
||||
e.receiveSignalEvents()
|
||||
e.receiveManagementEvents()
|
||||
e.receiveProbeEvents()
|
||||
|
||||
// starting network monitor at the very last to avoid disruptions
|
||||
e.startNetworkMonitor()
|
||||
@ -1527,54 +1496,32 @@ func (e *Engine) getRosenpassAddr() string {
|
||||
return ""
|
||||
}
|
||||
|
||||
func (e *Engine) receiveProbeEvents() {
|
||||
if e.probes == nil {
|
||||
return
|
||||
}
|
||||
if e.probes.SignalProbe != nil {
|
||||
go e.probes.SignalProbe.Receive(e.ctx, func() bool {
|
||||
healthy := e.signal.IsHealthy()
|
||||
log.Debugf("received signal probe request, healthy: %t", healthy)
|
||||
return healthy
|
||||
})
|
||||
}
|
||||
// RunHealthProbes executes health checks for Signal, Management, Relay and WireGuard services
|
||||
// and updates the status recorder with the latest states.
|
||||
func (e *Engine) RunHealthProbes() bool {
|
||||
signalHealthy := e.signal.IsHealthy()
|
||||
log.Debugf("signal health check: healthy=%t", signalHealthy)
|
||||
|
||||
if e.probes.MgmProbe != nil {
|
||||
go e.probes.MgmProbe.Receive(e.ctx, func() bool {
|
||||
healthy := e.mgmClient.IsHealthy()
|
||||
log.Debugf("received management probe request, healthy: %t", healthy)
|
||||
return healthy
|
||||
})
|
||||
}
|
||||
|
||||
if e.probes.RelayProbe != nil {
|
||||
go e.probes.RelayProbe.Receive(e.ctx, func() bool {
|
||||
healthy := true
|
||||
managementHealthy := e.mgmClient.IsHealthy()
|
||||
log.Debugf("management health check: healthy=%t", managementHealthy)
|
||||
|
||||
results := append(e.probeSTUNs(), e.probeTURNs()...)
|
||||
e.statusRecorder.UpdateRelayStates(results)
|
||||
|
||||
// A single failed server will result in a "failed" probe
|
||||
relayHealthy := true
|
||||
for _, res := range results {
|
||||
if res.Err != nil {
|
||||
healthy = false
|
||||
relayHealthy = false
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
log.Debugf("received relay probe request, healthy: %t", healthy)
|
||||
return healthy
|
||||
})
|
||||
}
|
||||
|
||||
if e.probes.WgProbe != nil {
|
||||
go e.probes.WgProbe.Receive(e.ctx, func() bool {
|
||||
log.Debug("received wg probe request")
|
||||
log.Debugf("relay health check: healthy=%t", relayHealthy)
|
||||
|
||||
for _, key := range e.peerStore.PeersPubKey() {
|
||||
wgStats, err := e.wgInterface.GetStats(key)
|
||||
if err != nil {
|
||||
log.Debugf("failed to get wg stats for peer %s: %s", key, err)
|
||||
continue
|
||||
}
|
||||
// wgStats could be zero value, in which case we just reset the stats
|
||||
if err := e.statusRecorder.UpdateWireGuardPeerState(key, wgStats); err != nil {
|
||||
@ -1582,17 +1529,25 @@ func (e *Engine) receiveProbeEvents() {
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
})
|
||||
}
|
||||
allHealthy := signalHealthy && managementHealthy && relayHealthy
|
||||
log.Debugf("all health checks completed: healthy=%t", allHealthy)
|
||||
return allHealthy
|
||||
}
|
||||
|
||||
func (e *Engine) probeSTUNs() []relay.ProbeResult {
|
||||
return relay.ProbeAll(e.ctx, relay.ProbeSTUN, e.STUNs)
|
||||
e.syncMsgMux.Lock()
|
||||
stuns := slices.Clone(e.STUNs)
|
||||
e.syncMsgMux.Unlock()
|
||||
|
||||
return relay.ProbeAll(e.ctx, relay.ProbeSTUN, stuns)
|
||||
}
|
||||
|
||||
func (e *Engine) probeTURNs() []relay.ProbeResult {
|
||||
return relay.ProbeAll(e.ctx, relay.ProbeTURN, e.TURNs)
|
||||
e.syncMsgMux.Lock()
|
||||
turns := slices.Clone(e.TURNs)
|
||||
e.syncMsgMux.Unlock()
|
||||
|
||||
return relay.ProbeAll(e.ctx, relay.ProbeTURN, turns)
|
||||
}
|
||||
|
||||
func (e *Engine) restartEngine() {
|
||||
|
@ -1,58 +0,0 @@
|
||||
package internal
|
||||
|
||||
import "context"
|
||||
|
||||
type ProbeHolder struct {
|
||||
MgmProbe *Probe
|
||||
SignalProbe *Probe
|
||||
RelayProbe *Probe
|
||||
WgProbe *Probe
|
||||
}
|
||||
|
||||
// Probe allows to run on-demand callbacks from different code locations.
|
||||
// Pass the probe to a receiving and a sending end. The receiving end starts listening
|
||||
// to requests with Receive and executes a callback when the sending end requests it
|
||||
// by calling Probe.
|
||||
type Probe struct {
|
||||
request chan struct{}
|
||||
result chan bool
|
||||
ready bool
|
||||
}
|
||||
|
||||
// NewProbe returns a new initialized probe.
|
||||
func NewProbe() *Probe {
|
||||
return &Probe{
|
||||
request: make(chan struct{}),
|
||||
result: make(chan bool),
|
||||
}
|
||||
}
|
||||
|
||||
// Probe requests the callback to be run and returns a bool indicating success.
|
||||
// It always returns true as long as the receiver is not ready.
|
||||
func (p *Probe) Probe() bool {
|
||||
if !p.ready {
|
||||
return true
|
||||
}
|
||||
|
||||
p.request <- struct{}{}
|
||||
return <-p.result
|
||||
}
|
||||
|
||||
// Receive starts listening for probe requests. On such a request it runs the supplied
|
||||
// callback func which must return a bool indicating success.
|
||||
// Blocks until the passed context is cancelled.
|
||||
func (p *Probe) Receive(ctx context.Context, callback func() bool) {
|
||||
p.ready = true
|
||||
defer func() {
|
||||
p.ready = false
|
||||
}()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case <-p.request:
|
||||
p.result <- callback()
|
||||
}
|
||||
}
|
||||
}
|
@ -4,6 +4,7 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
"reflect"
|
||||
runtime "runtime"
|
||||
"time"
|
||||
|
||||
"github.com/hashicorp/go-multierror"
|
||||
@ -439,7 +440,7 @@ func handlerType(rt *route.Route, useNewDNSRoute bool) int {
|
||||
return handlerTypeStatic
|
||||
}
|
||||
|
||||
if useNewDNSRoute {
|
||||
if useNewDNSRoute && runtime.GOOS != "ios" {
|
||||
return handlerTypeDomain
|
||||
}
|
||||
return handlerTypeDynamic
|
||||
|
@ -63,12 +63,7 @@ type Server struct {
|
||||
statusRecorder *peer.Status
|
||||
sessionWatcher *internal.SessionWatcher
|
||||
|
||||
mgmProbe *internal.Probe
|
||||
signalProbe *internal.Probe
|
||||
relayProbe *internal.Probe
|
||||
wgProbe *internal.Probe
|
||||
lastProbe time.Time
|
||||
|
||||
persistNetworkMap bool
|
||||
}
|
||||
|
||||
@ -87,11 +82,6 @@ func New(ctx context.Context, configPath, logFile string) *Server {
|
||||
ConfigPath: configPath,
|
||||
},
|
||||
logFile: logFile,
|
||||
mgmProbe: internal.NewProbe(),
|
||||
signalProbe: internal.NewProbe(),
|
||||
relayProbe: internal.NewProbe(),
|
||||
wgProbe: internal.NewProbe(),
|
||||
|
||||
persistNetworkMap: true,
|
||||
}
|
||||
}
|
||||
@ -202,14 +192,7 @@ func (s *Server) connectWithRetryRuns(ctx context.Context, config *internal.Conf
|
||||
s.connectClient = internal.NewConnectClient(ctx, config, statusRecorder)
|
||||
s.connectClient.SetNetworkMapPersistence(s.persistNetworkMap)
|
||||
|
||||
probes := internal.ProbeHolder{
|
||||
MgmProbe: s.mgmProbe,
|
||||
SignalProbe: s.signalProbe,
|
||||
RelayProbe: s.relayProbe,
|
||||
WgProbe: s.wgProbe,
|
||||
}
|
||||
|
||||
err := s.connectClient.RunWithProbes(&probes, runningChan)
|
||||
err := s.connectClient.Run(runningChan)
|
||||
if err != nil {
|
||||
log.Debugf("run client connection exited with error: %v. Will retry in the background", err)
|
||||
}
|
||||
@ -676,9 +659,13 @@ func (s *Server) Down(ctx context.Context, _ *proto.DownRequest) (*proto.DownRes
|
||||
|
||||
// Status returns the daemon status
|
||||
func (s *Server) Status(
|
||||
_ context.Context,
|
||||
ctx context.Context,
|
||||
msg *proto.StatusRequest,
|
||||
) (*proto.StatusResponse, error) {
|
||||
if ctx.Err() != nil {
|
||||
return nil, ctx.Err()
|
||||
}
|
||||
|
||||
s.mutex.Lock()
|
||||
defer s.mutex.Unlock()
|
||||
|
||||
@ -707,14 +694,17 @@ func (s *Server) Status(
|
||||
}
|
||||
|
||||
func (s *Server) runProbes() {
|
||||
if time.Since(s.lastProbe) > probeThreshold {
|
||||
managementHealthy := s.mgmProbe.Probe()
|
||||
signalHealthy := s.signalProbe.Probe()
|
||||
relayHealthy := s.relayProbe.Probe()
|
||||
wgProbe := s.wgProbe.Probe()
|
||||
if s.connectClient == nil {
|
||||
return
|
||||
}
|
||||
|
||||
// Update last time only if all probes were successful
|
||||
if managementHealthy && signalHealthy && relayHealthy && wgProbe {
|
||||
engine := s.connectClient.Engine()
|
||||
if engine == nil {
|
||||
return
|
||||
}
|
||||
|
||||
if time.Since(s.lastProbe) > probeThreshold {
|
||||
if engine.RunHealthProbes() {
|
||||
s.lastProbe = time.Now()
|
||||
}
|
||||
}
|
||||
|
2
go.mod
2
go.mod
@ -92,7 +92,7 @@ require (
|
||||
goauthentik.io/api/v3 v3.2023051.3
|
||||
golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842
|
||||
golang.org/x/mobile v0.0.0-20231127183840-76ac6878050a
|
||||
golang.org/x/net v0.30.0
|
||||
golang.org/x/net v0.33.0
|
||||
golang.org/x/oauth2 v0.19.0
|
||||
golang.org/x/sync v0.10.0
|
||||
golang.org/x/term v0.28.0
|
||||
|
4
go.sum
4
go.sum
@ -883,8 +883,8 @@ golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns=
|
||||
golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
|
||||
golang.org/x/net v0.14.0/go.mod h1:PpSgVXXLK0OxS0F31C1/tv6XNguvCrnXIDrFMspZIUI=
|
||||
golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY=
|
||||
golang.org/x/net v0.30.0 h1:AcW1SDZMkb8IpzCdQUaIq2sP4sZ4zw+55h6ynffypl4=
|
||||
golang.org/x/net v0.30.0/go.mod h1:2wGyMJ5iFasEhkwi13ChkO/t1ECNC4X4eBKkVFyYFlU=
|
||||
golang.org/x/net v0.33.0 h1:74SYHlV8BIgHIFC/LrYkOGIwL19eTYXQ5wc6TBuO36I=
|
||||
golang.org/x/net v0.33.0/go.mod h1:HXLR5J+9DxmrqMwG9qjGCxZ+zKXxBru04zlTvWlWuN4=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
|
@ -73,7 +73,9 @@ func (pm *PolicyRule) Copy() *PolicyRule {
|
||||
Enabled: pm.Enabled,
|
||||
Action: pm.Action,
|
||||
Destinations: make([]string, len(pm.Destinations)),
|
||||
DestinationResource: pm.DestinationResource,
|
||||
Sources: make([]string, len(pm.Sources)),
|
||||
SourceResource: pm.SourceResource,
|
||||
Bidirectional: pm.Bidirectional,
|
||||
Protocol: pm.Protocol,
|
||||
Ports: make([]string, len(pm.Ports)),
|
||||
|
Loading…
Reference in New Issue
Block a user