[client] Destory WG interface on down timeout (#2435)

wait on engine down to not only wait for the interface to be down but completely removed. If the waiting loop reaches the timeout we will trigger an interface destroy. On the up command, it now waits until the engine is fully running before sending the response to the CLI. Includes a small refactor of probes to comply with sonar rules about parameter count in the function call
This commit is contained in:
pascal-fischer
2024-09-02 19:19:14 +02:00
committed by GitHub
parent 95174d4619
commit 13e7198046
16 changed files with 222 additions and 116 deletions

View File

@ -55,17 +55,15 @@ func NewConnectClient(
// Run with main logic.
func (c *ConnectClient) Run() error {
return c.run(MobileDependency{}, nil, nil, nil, nil)
return c.run(MobileDependency{}, nil, nil)
}
// RunWithProbes runs the client's main logic with probes attached
func (c *ConnectClient) RunWithProbes(
mgmProbe *Probe,
signalProbe *Probe,
relayProbe *Probe,
wgProbe *Probe,
probes *ProbeHolder,
runningWg *sync.WaitGroup,
) error {
return c.run(MobileDependency{}, mgmProbe, signalProbe, relayProbe, wgProbe)
return c.run(MobileDependency{}, probes, runningWg)
}
// RunOnAndroid with main logic on mobile system
@ -84,7 +82,7 @@ func (c *ConnectClient) RunOnAndroid(
HostDNSAddresses: dnsAddresses,
DnsReadyListener: dnsReadyListener,
}
return c.run(mobileDependency, nil, nil, nil, nil)
return c.run(mobileDependency, nil, nil)
}
func (c *ConnectClient) RunOniOS(
@ -100,15 +98,13 @@ func (c *ConnectClient) RunOniOS(
NetworkChangeListener: networkChangeListener,
DnsManager: dnsManager,
}
return c.run(mobileDependency, nil, nil, nil, nil)
return c.run(mobileDependency, nil, nil)
}
func (c *ConnectClient) run(
mobileDependency MobileDependency,
mgmProbe *Probe,
signalProbe *Probe,
relayProbe *Probe,
wgProbe *Probe,
probes *ProbeHolder,
runningWg *sync.WaitGroup,
) error {
defer func() {
if r := recover(); r != nil {
@ -255,7 +251,7 @@ func (c *ConnectClient) run(
checks := loginResp.GetChecks()
c.engineMutex.Lock()
c.engine = NewEngineWithProbes(engineCtx, cancel, signalClient, mgmClient, engineConfig, mobileDependency, c.statusRecorder, mgmProbe, signalProbe, relayProbe, wgProbe, checks)
c.engine = NewEngineWithProbes(engineCtx, cancel, signalClient, mgmClient, engineConfig, mobileDependency, c.statusRecorder, probes, checks)
c.engineMutex.Unlock()
err = c.engine.Start()
@ -267,17 +263,15 @@ func (c *ConnectClient) run(
log.Infof("Netbird engine started, the IP is: %s", peerConfig.GetAddress())
state.Set(StatusConnected)
if runningWg != nil {
runningWg.Done()
}
<-engineCtx.Done()
c.statusRecorder.ClientTeardown()
backOff.Reset()
err = c.engine.Stop()
if err != nil {
log.Errorf("failed stopping engine %v", err)
return wrapErr(err)
}
log.Info("stopped NetBird client")
if _, err := state.Status(); errors.Is(err, ErrResetConnection) {
@ -307,6 +301,12 @@ func (c *ConnectClient) Engine() *Engine {
return e
}
func (c *ConnectClient) Stop() error {
c.engineMutex.Lock()
defer c.engineMutex.Unlock()
return c.engine.Stop()
}
// createEngineConfig converts configuration received from Management Service to EngineConfig
func createEngineConfig(key wgtypes.Key, config *Config, peerConfig *mgmProto.PeerConfig) (*EngineConfig, error) {
nm := false