mirror of
https://github.com/netbirdio/netbird.git
synced 2025-06-24 03:31:28 +02:00
Merge branch 'main' into feature/poc-lazy-detection
This commit is contained in:
commit
9691e197df
@ -58,7 +58,9 @@ type Manager struct {
|
||||
// Route HA group management
|
||||
peerToHAGroups map[string][]route.HAUniqueID // peer ID -> HA groups they belong to
|
||||
haGroupToPeers map[route.HAUniqueID][]string // HA group -> peer IDs in the group
|
||||
routesMu sync.RWMutex // protects route mappings
|
||||
routesMu sync.RWMutex
|
||||
|
||||
onInactive chan peerid.ConnID
|
||||
}
|
||||
|
||||
// NewManager creates a new lazy connection manager
|
||||
@ -223,6 +225,13 @@ func (m *Manager) AddPeer(peerCfg lazyconn.PeerConfig) (bool, error) {
|
||||
peerCfg: &peerCfg,
|
||||
expectedWatcher: watcherActivity,
|
||||
}
|
||||
|
||||
// Check if this peer should be activated because its HA group peers are active
|
||||
if group, ok := m.shouldActivateNewPeer(peerCfg.PublicKey); ok {
|
||||
peerCfg.Log.Debugf("peer belongs to active HA group %s, will activate immediately", group)
|
||||
m.activateNewPeerInActiveGroup(peerCfg)
|
||||
}
|
||||
|
||||
return false, nil
|
||||
}
|
||||
|
||||
@ -334,26 +343,29 @@ func (m *Manager) activateSinglePeer(cfg *lazyconn.PeerConfig, mp *managedPeer)
|
||||
|
||||
// activateHAGroupPeers activates all peers in HA groups that the given peer belongs to
|
||||
func (m *Manager) activateHAGroupPeers(triggerPeerID string) {
|
||||
var peersToActivate []string
|
||||
|
||||
m.routesMu.RLock()
|
||||
haGroups := m.peerToHAGroups[triggerPeerID]
|
||||
m.routesMu.RUnlock()
|
||||
|
||||
if len(haGroups) == 0 {
|
||||
m.routesMu.RUnlock()
|
||||
log.Debugf("peer %s is not part of any HA groups", triggerPeerID)
|
||||
return
|
||||
}
|
||||
|
||||
activatedCount := 0
|
||||
for _, haGroup := range haGroups {
|
||||
m.routesMu.RLock()
|
||||
peers := m.haGroupToPeers[haGroup]
|
||||
for _, peerID := range peers {
|
||||
if peerID != triggerPeerID {
|
||||
peersToActivate = append(peersToActivate, peerID)
|
||||
}
|
||||
}
|
||||
}
|
||||
m.routesMu.RUnlock()
|
||||
|
||||
for _, peerID := range peers {
|
||||
if peerID == triggerPeerID {
|
||||
continue
|
||||
}
|
||||
|
||||
activatedCount := 0
|
||||
for _, peerID := range peersToActivate {
|
||||
cfg, mp := m.getPeerForActivation(peerID)
|
||||
if cfg == nil {
|
||||
continue
|
||||
@ -361,11 +373,10 @@ func (m *Manager) activateHAGroupPeers(triggerPeerID string) {
|
||||
|
||||
if m.activateSinglePeer(cfg, mp) {
|
||||
activatedCount++
|
||||
cfg.Log.Infof("activated peer as part of HA group %s (triggered by %s)", haGroup, triggerPeerID)
|
||||
cfg.Log.Infof("activated peer as part of HA group (triggered by %s)", triggerPeerID)
|
||||
m.peerStore.PeerConnOpen(m.engineCtx, cfg.PublicKey)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if activatedCount > 0 {
|
||||
log.Infof("activated %d additional peers in HA groups for peer %s (groups: %v)",
|
||||
@ -373,6 +384,51 @@ func (m *Manager) activateHAGroupPeers(triggerPeerID string) {
|
||||
}
|
||||
}
|
||||
|
||||
// shouldActivateNewPeer checks if a newly added peer should be activated
|
||||
// because other peers in its HA groups are already active
|
||||
func (m *Manager) shouldActivateNewPeer(peerID string) (route.HAUniqueID, bool) {
|
||||
m.routesMu.RLock()
|
||||
defer m.routesMu.RUnlock()
|
||||
|
||||
haGroups := m.peerToHAGroups[peerID]
|
||||
if len(haGroups) == 0 {
|
||||
return "", false
|
||||
}
|
||||
|
||||
for _, haGroup := range haGroups {
|
||||
peers := m.haGroupToPeers[haGroup]
|
||||
for _, groupPeerID := range peers {
|
||||
if groupPeerID == peerID {
|
||||
continue
|
||||
}
|
||||
|
||||
cfg, ok := m.managedPeers[groupPeerID]
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
if mp, ok := m.managedPeersByConnID[cfg.PeerConnID]; ok && mp.expectedWatcher == watcherInactivity {
|
||||
return haGroup, true
|
||||
}
|
||||
}
|
||||
}
|
||||
return "", false
|
||||
}
|
||||
|
||||
// activateNewPeerInActiveGroup activates a newly added peer that should be active due to HA group
|
||||
func (m *Manager) activateNewPeerInActiveGroup(peerCfg lazyconn.PeerConfig) {
|
||||
mp, ok := m.managedPeersByConnID[peerCfg.PeerConnID]
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
|
||||
if !m.activateSinglePeer(&peerCfg, mp) {
|
||||
return
|
||||
}
|
||||
|
||||
peerCfg.Log.Infof("activated newly added peer due to active HA group peers")
|
||||
m.peerStore.PeerConnOpen(m.engineCtx, peerCfg.PublicKey)
|
||||
}
|
||||
|
||||
func (m *Manager) addActivePeer(peerCfg *lazyconn.PeerConfig) error {
|
||||
if _, ok := m.managedPeers[peerCfg.PublicKey]; ok {
|
||||
peerCfg.Log.Warnf("peer already managed")
|
||||
@ -422,6 +478,48 @@ func (m *Manager) close() {
|
||||
log.Infof("lazy connection manager closed")
|
||||
}
|
||||
|
||||
// shouldDeferIdleForHA checks if peer should stay connected due to HA group requirements
|
||||
func (m *Manager) shouldDeferIdleForHA(peerID string) bool {
|
||||
m.routesMu.RLock()
|
||||
defer m.routesMu.RUnlock()
|
||||
|
||||
haGroups := m.peerToHAGroups[peerID]
|
||||
if len(haGroups) == 0 {
|
||||
return false
|
||||
}
|
||||
|
||||
for _, haGroup := range haGroups {
|
||||
groupPeers := m.haGroupToPeers[haGroup]
|
||||
|
||||
for _, groupPeerID := range groupPeers {
|
||||
if groupPeerID == peerID {
|
||||
continue
|
||||
}
|
||||
|
||||
cfg, ok := m.managedPeers[groupPeerID]
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
groupMp, ok := m.managedPeersByConnID[cfg.PeerConnID]
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
if groupMp.expectedWatcher != watcherInactivity {
|
||||
continue
|
||||
}
|
||||
|
||||
// Other member is still connected, defer idle
|
||||
if peer, ok := m.peerStore.PeerConn(groupPeerID); ok && peer.IsConnected() {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
func (m *Manager) onPeerActivity(peerConnID peerid.ConnID) {
|
||||
m.managedPeersMu.Lock()
|
||||
defer m.managedPeersMu.Unlock()
|
||||
@ -469,6 +567,11 @@ func (m *Manager) onPeerInactivityTimedOut(peerID string) {
|
||||
return
|
||||
}
|
||||
|
||||
if m.shouldDeferIdleForHA(mp.peerCfg.PublicKey) {
|
||||
// todo: review to how reset the inactivity detection
|
||||
return
|
||||
}
|
||||
|
||||
mp.peerCfg.Log.Infof("connection timed out")
|
||||
|
||||
// this is blocking operation, potentially can be optimized
|
||||
|
@ -321,12 +321,12 @@ func (conn *Conn) WgConfig() WgConfig {
|
||||
return conn.config.WgConfig
|
||||
}
|
||||
|
||||
// IsConnected unit tests only
|
||||
// refactor unit test to use status recorder use refactor status recorded to manage connection status in peer.Conn
|
||||
// IsConnected returns true if the peer is connected
|
||||
func (conn *Conn) IsConnected() bool {
|
||||
conn.mu.Lock()
|
||||
defer conn.mu.Unlock()
|
||||
return conn.currentConnPriority != conntype.None
|
||||
|
||||
return conn.evalStatus() == StatusConnected
|
||||
}
|
||||
|
||||
func (conn *Conn) GetKey() string {
|
||||
|
Loading…
x
Reference in New Issue
Block a user