Use select for turn credentials and peers update (#443)

Also, prevent peer update when SSH is the same
This commit is contained in:
Maycon Santos 2022-08-27 12:57:03 +02:00 committed by GitHub
parent dd4ff61b51
commit c13f0b9f07
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
4 changed files with 62 additions and 10 deletions

View File

@ -390,6 +390,11 @@ func (am *DefaultAccountManager) UpdatePeerSSHKey(peerKey string, sshKey string)
return err return err
} }
if peer.SSHKey == sshKey {
log.Debugf("same SSH key provided for peer %s, skipping update", peerKey)
return nil
}
account, err := am.Store.GetPeerAccount(peerKey) account, err := am.Store.GetPeerAccount(peerKey)
if err != nil { if err != nil {
return err return err

View File

@ -85,15 +85,18 @@ func (m *TimeBasedAuthSecretsManager) SetupRefresh(peerKey string) {
m.cancel(peerKey) m.cancel(peerKey)
cancel := make(chan struct{}, 1) cancel := make(chan struct{}, 1)
m.cancelMap[peerKey] = cancel m.cancelMap[peerKey] = cancel
log.Debugf("starting turn refresh for %s", peerKey)
go func() { go func() {
//we don't want to regenerate credentials right on expiration, so we do it slightly before (at 3/4 of TTL)
ticker := time.NewTicker(m.config.CredentialsTTL.Duration / 4 * 3)
for { for {
select { select {
case <-cancel: case <-cancel:
log.Debugf("stopping turn refresh for %s", peerKey)
return return
default: case <-ticker.C:
//we don't want to regenerate credentials right on expiration, so we do it slightly before (at 3/4 of TTL)
time.Sleep(m.config.CredentialsTTL.Duration / 4 * 3)
c := m.GenerateCredentials() c := m.GenerateCredentials()
var turns []*proto.ProtectedHostConfig var turns []*proto.ProtectedHostConfig
for _, host := range m.config.Turns { for _, host := range m.config.Turns {

View File

@ -6,6 +6,8 @@ import (
"sync" "sync"
) )
const channelBufferSize = 100
type UpdateMessage struct { type UpdateMessage struct {
Update *proto.SyncResponse Update *proto.SyncResponse
} }
@ -28,7 +30,12 @@ func (p *PeersUpdateManager) SendUpdate(peer string, update *UpdateMessage) erro
p.channelsMux.Lock() p.channelsMux.Lock()
defer p.channelsMux.Unlock() defer p.channelsMux.Unlock()
if channel, ok := p.peerChannels[peer]; ok { if channel, ok := p.peerChannels[peer]; ok {
channel <- update select {
case channel <- update:
log.Infof("update was sent to channel for peer %s", peer)
default:
log.Warnf("channel for peer %s is %d full", peer, len(channel))
}
return nil return nil
} }
log.Debugf("peer %s has no channel", peer) log.Debugf("peer %s has no channel", peer)
@ -45,7 +52,7 @@ func (p *PeersUpdateManager) CreateChannel(peerKey string) chan *UpdateMessage {
close(channel) close(channel)
} }
//mbragin: todo shouldn't it be more? or configurable? //mbragin: todo shouldn't it be more? or configurable?
channel := make(chan *UpdateMessage, 100) channel := make(chan *UpdateMessage, channelBufferSize)
p.peerChannels[peerKey] = channel p.peerChannels[peerKey] = channel
log.Debugf("opened updates channel for a peer %s", peerKey) log.Debugf("opened updates channel for a peer %s", peerKey)

View File

@ -3,13 +3,14 @@ package server
import ( import (
"github.com/netbirdio/netbird/management/proto" "github.com/netbirdio/netbird/management/proto"
"testing" "testing"
"time"
) )
var peersUpdater *PeersUpdateManager //var peersUpdater *PeersUpdateManager
func TestCreateChannel(t *testing.T) { func TestCreateChannel(t *testing.T) {
peer := "test-create" peer := "test-create"
peersUpdater = NewPeersUpdateManager() peersUpdater := NewPeersUpdateManager()
defer peersUpdater.CloseChannel(peer) defer peersUpdater.CloseChannel(peer)
_ = peersUpdater.CreateChannel(peer) _ = peersUpdater.CreateChannel(peer)
@ -20,12 +21,17 @@ func TestCreateChannel(t *testing.T) {
func TestSendUpdate(t *testing.T) { func TestSendUpdate(t *testing.T) {
peer := "test-sendupdate" peer := "test-sendupdate"
update := &UpdateMessage{Update: &proto.SyncResponse{}} peersUpdater := NewPeersUpdateManager()
update1 := &UpdateMessage{Update: &proto.SyncResponse{
NetworkMap: &proto.NetworkMap{
Serial: 0,
},
}}
_ = peersUpdater.CreateChannel(peer) _ = peersUpdater.CreateChannel(peer)
if _, ok := peersUpdater.peerChannels[peer]; !ok { if _, ok := peersUpdater.peerChannels[peer]; !ok {
t.Error("Error creating the channel") t.Error("Error creating the channel")
} }
err := peersUpdater.SendUpdate(peer, update) err := peersUpdater.SendUpdate(peer, update1)
if err != nil { if err != nil {
t.Error("Error sending update: ", err) t.Error("Error sending update: ", err)
} }
@ -34,10 +40,41 @@ func TestSendUpdate(t *testing.T) {
default: default:
t.Error("Update wasn't send") t.Error("Update wasn't send")
} }
for range [channelBufferSize]int{} {
err = peersUpdater.SendUpdate(peer, update1)
if err != nil {
t.Errorf("got an early error sending update: %v ", err)
}
}
update2 := &UpdateMessage{Update: &proto.SyncResponse{
NetworkMap: &proto.NetworkMap{
Serial: 10,
},
}}
err = peersUpdater.SendUpdate(peer, update2)
if err != nil {
t.Error("update shouldn't return an error when channel buffer is full")
}
timeout := time.After(5 * time.Second)
for range [channelBufferSize]int{} {
select {
case <-timeout:
t.Error("timed out reading previously sent updates")
case updateReader := <-peersUpdater.peerChannels[peer]:
if updateReader.Update.NetworkMap.Serial == update2.Update.NetworkMap.Serial {
t.Error("got the update that shouldn't have been sent")
}
}
}
} }
func TestCloseChannel(t *testing.T) { func TestCloseChannel(t *testing.T) {
peer := "test-close" peer := "test-close"
peersUpdater := NewPeersUpdateManager()
_ = peersUpdater.CreateChannel(peer) _ = peersUpdater.CreateChannel(peer)
if _, ok := peersUpdater.peerChannels[peer]; !ok { if _, ok := peersUpdater.peerChannels[peer]; !ok {
t.Error("Error creating the channel") t.Error("Error creating the channel")