Refactor handshaker loop

This commit is contained in:
Zoltán Papp 2024-06-21 12:35:28 +02:00
parent bfe60c01ba
commit 4a08f1a1e9
4 changed files with 283 additions and 272 deletions

View File

@ -125,7 +125,6 @@ func NewConn(engineCtx context.Context, config ConnConfig, statusRecorder *Statu
wgProxyFactory: wgProxyFactory,
signaler: signaler,
allowedIPsIP: allowedIPsIP.String(),
handshaker: NewHandshaker(ctx, connLog, config, signaler),
statusRelay: StatusDisconnected,
statusICE: StatusDisconnected,
}
@ -133,7 +132,6 @@ func NewConn(engineCtx context.Context, config ConnConfig, statusRecorder *Statu
rFns := WorkerRelayCallbacks{
OnConnReady: conn.relayConnectionIsReady,
OnStatusChanged: conn.onWorkerRelayStateChanged,
DoHandshake: conn.doHandshake,
}
wFns := WorkerICECallbacks{
@ -142,8 +140,13 @@ func NewConn(engineCtx context.Context, config ConnConfig, statusRecorder *Statu
DoHandshake: conn.doHandshake,
}
conn.handshaker = NewHandshaker(ctx, connLog, config, signaler, conn.onNewOffer)
go conn.handshaker.Listen()
conn.workerRelay = NewWorkerRelay(ctx, connLog, relayManager, config, rFns)
conn.workerICE = NewWorkerICE(ctx, connLog, config, config.ICEConfig, signaler, iFaceDiscover, statusRecorder, wFns)
conn.workerICE, err = NewWorkerICE(ctx, connLog, config, config.ICEConfig, signaler, iFaceDiscover, statusRecorder, wFns)
if err != nil {
return nil, err
}
return conn, nil
}
@ -525,25 +528,21 @@ func (conn *Conn) updateStatus(peerState State, remoteRosenpassPubKey []byte, re
return
}
func (conn *Conn) doHandshake() (*OfferAnswer, error) {
func (conn *Conn) doHandshake() error {
if !conn.signaler.Ready() {
return nil, ErrSignalIsNotReady
return ErrSignalIsNotReady
}
var (
ha HandshakeArgs
err error
)
ha.IceUFrag, ha.IcePwd, err = conn.workerICE.GetLocalUserCredentials()
if err != nil {
conn.log.Errorf("failed to get local user credentials: %v", err)
}
ha.IceUFrag, ha.IcePwd = conn.workerICE.GetLocalUserCredentials()
addr, err := conn.workerRelay.RelayAddress()
if err == nil {
ha.RelayAddr = addr.String()
}
return conn.handshaker.Handshake(ha)
return conn.handshaker.SendOffer(ha)
}
func (conn *Conn) evalStatus() ConnStatus {
@ -558,6 +557,12 @@ func (conn *Conn) evalStatus() ConnStatus {
return StatusDisconnected
}
func (conn *Conn) onNewOffer(answer *OfferAnswer) {
// todo move to this callback into handshaker
go conn.workerRelay.OnNewOffer(answer)
go conn.workerICE.OnNewOffer(answer)
}
func isRosenpassEnabled(remoteRosenpassPubKey []byte) bool {
return remoteRosenpassPubKey != nil
}

View File

@ -54,12 +54,27 @@ type HandshakeArgs struct {
RelayAddr string
}
func (a HandshakeArgs) Equal(args HandshakeArgs) bool {
if a.IceUFrag != args.IceUFrag {
return false
}
if a.IcePwd != args.IcePwd {
return false
}
if a.RelayAddr != args.RelayAddr {
return false
}
return true
}
type Handshaker struct {
mu sync.Mutex
ctx context.Context
log *log.Entry
config ConnConfig
signaler *Signaler
mu sync.Mutex
ctx context.Context
log *log.Entry
config ConnConfig
signaler *Signaler
onNewOfferListener func(*OfferAnswer)
// remoteOffersCh is a channel used to wait for remote credentials to proceed with the connection
remoteOffersCh chan OfferAnswer
@ -69,50 +84,54 @@ type Handshaker struct {
remoteOfferAnswer *OfferAnswer
remoteOfferAnswerCreated time.Time
handshakeArgs HandshakeArgs
lastSentOffer time.Time
lastOfferArgs HandshakeArgs
}
func NewHandshaker(ctx context.Context, log *log.Entry, config ConnConfig, signaler *Signaler) *Handshaker {
func NewHandshaker(ctx context.Context, log *log.Entry, config ConnConfig, signaler *Signaler, onNewOfferListener func(*OfferAnswer)) *Handshaker {
return &Handshaker{
ctx: ctx,
log: log,
config: config,
signaler: signaler,
remoteOffersCh: make(chan OfferAnswer),
remoteAnswerCh: make(chan OfferAnswer),
ctx: ctx,
log: log,
config: config,
signaler: signaler,
remoteOffersCh: make(chan OfferAnswer),
remoteAnswerCh: make(chan OfferAnswer),
onNewOfferListener: onNewOfferListener,
}
}
func (h *Handshaker) Handshake(args HandshakeArgs) (*OfferAnswer, error) {
func (h *Handshaker) Listen() {
for {
remoteOfferAnswer, err := h.waitForRemoteOfferConfirmation()
if err != nil {
if _, ok := err.(*ConnectionClosedError); ok {
return
}
log.Errorf("failed to received remote offer confirmation: %s", err)
continue
}
h.log.Debugf("received connection confirmation, running version %s and with remote WireGuard listen port %d", remoteOfferAnswer.Version, remoteOfferAnswer.WgListenPort)
go h.onNewOfferListener(remoteOfferAnswer)
}
}
func (h *Handshaker) SendOffer(args HandshakeArgs) error {
h.mu.Lock()
defer h.mu.Unlock()
h.log.Infof("start handshake with remote peer")
h.handshakeArgs = args
cachedOfferAnswer, ok := h.cachedHandshake()
if ok {
return cachedOfferAnswer, nil
if h.lastOfferArgs.Equal(args) && h.lastSentOffer.After(time.Now().Add(-time.Second)) {
return nil
}
err := h.sendOffer(args)
if err != nil {
return nil, err
return err
}
// Only continue once we got a connection confirmation from the remote peer.
// The connection timeout could have happened before a confirmation received from the remote.
// The connection could have also been closed externally (e.g. when we received an update from the management that peer shouldn't be connected)
remoteOfferAnswer, err := h.waitForRemoteOfferConfirmation()
if err != nil {
return nil, err
}
h.storeRemoteOfferAnswer(remoteOfferAnswer)
h.log.Debugf("received connection confirmation, running version %s and with remote WireGuard listen port %d",
remoteOfferAnswer.Version, remoteOfferAnswer.WgListenPort)
return remoteOfferAnswer, nil
h.lastOfferArgs = args
h.lastSentOffer = time.Now()
return nil
}
// OnRemoteOffer handles an offer from the remote peer and returns true if the message was accepted, false otherwise
@ -149,6 +168,23 @@ func (h *Handshaker) OnRemoteAnswer(answer OfferAnswer) bool {
}
}
func (h *Handshaker) waitForRemoteOfferConfirmation() (*OfferAnswer, error) {
select {
case remoteOfferAnswer := <-h.remoteOffersCh:
// received confirmation from the remote peer -> ready to proceed
err := h.sendAnswer()
if err != nil {
return nil, err
}
return &remoteOfferAnswer, nil
case remoteOfferAnswer := <-h.remoteAnswerCh:
return &remoteOfferAnswer, nil
case <-h.ctx.Done():
// closed externally
return nil, NewConnectionClosedError(h.config.Key)
}
}
// sendOffer prepares local user credentials and signals them to the remote peer
func (h *Handshaker) sendOffer(args HandshakeArgs) error {
offer := OfferAnswer{
@ -166,12 +202,12 @@ func (h *Handshaker) sendOffer(args HandshakeArgs) error {
func (h *Handshaker) sendAnswer() error {
h.log.Debugf("sending answer")
answer := OfferAnswer{
IceCredentials: IceCredentials{h.handshakeArgs.IceUFrag, h.handshakeArgs.IcePwd},
IceCredentials: IceCredentials{h.lastOfferArgs.IceUFrag, h.lastOfferArgs.IcePwd},
WgListenPort: h.config.LocalWgPort,
Version: version.NetbirdVersion(),
RosenpassPubKey: h.config.RosenpassPubKey,
RosenpassAddr: h.config.RosenpassAddr,
RelaySrvAddress: h.handshakeArgs.RelayAddr,
RelaySrvAddress: h.lastOfferArgs.RelayAddr,
}
err := h.signaler.SignalAnswer(answer, h.config.Key)
if err != nil {
@ -180,43 +216,3 @@ func (h *Handshaker) sendAnswer() error {
return nil
}
func (h *Handshaker) waitForRemoteOfferConfirmation() (*OfferAnswer, error) {
timeout := time.NewTimer(h.config.Timeout)
defer timeout.Stop()
select {
case remoteOfferAnswer := <-h.remoteOffersCh:
// received confirmation from the remote peer -> ready to proceed
err := h.sendAnswer()
if err != nil {
return nil, err
}
return &remoteOfferAnswer, nil
case remoteOfferAnswer := <-h.remoteAnswerCh:
return &remoteOfferAnswer, nil
case <-timeout.C:
h.log.Debugf("handshake timeout")
return nil, NewConnectionTimeoutError(h.config.Key, h.config.Timeout)
case <-h.ctx.Done():
// closed externally
return nil, NewConnectionClosedError(h.config.Key)
}
}
func (h *Handshaker) storeRemoteOfferAnswer(answer *OfferAnswer) {
h.remoteOfferAnswer = answer
h.remoteOfferAnswerCreated = time.Now()
}
func (h *Handshaker) cachedHandshake() (*OfferAnswer, bool) {
if h.remoteOfferAnswer == nil {
return nil, false
}
if time.Since(h.remoteOfferAnswerCreated) > handshakeCacheTimeout {
return nil, false
}
return h.remoteOfferAnswer, true
}

View File

@ -2,7 +2,6 @@ package peer
import (
"context"
"errors"
"fmt"
"math/rand"
"net"
@ -69,7 +68,7 @@ type ICEConnInfo struct {
type WorkerICECallbacks struct {
OnConnReady func(ConnPriority, ICEConnInfo)
OnStatusChanged func(ConnStatus)
DoHandshake func() (*OfferAnswer, error)
DoHandshake func() error
}
type WorkerICE struct {
@ -90,12 +89,19 @@ type WorkerICE struct {
StunTurn []*stun.URI
sentExtraSrflx bool
localUfrag string
localPwd string
localUfrag string
localPwd string
creadantialHasUsed bool
hasRelayOnLocally bool
onDisconnected context.CancelFunc
onOfferReceived context.CancelFunc
tickerCancel context.CancelFunc
ticker *time.Ticker
}
func NewWorkerICE(ctx context.Context, log *log.Entry, config ConnConfig, configICE ICEConfig, signaler *Signaler, ifaceDiscover stdnet.ExternalIFaceDiscover, statusRecorder *Status, callBacks WorkerICECallbacks) *WorkerICE {
cice := &WorkerICE{
func NewWorkerICE(ctx context.Context, log *log.Entry, config ConnConfig, configICE ICEConfig, signaler *Signaler, ifaceDiscover stdnet.ExternalIFaceDiscover, statusRecorder *Status, callBacks WorkerICECallbacks) (*WorkerICE, error) {
w := &WorkerICE{
ctx: ctx,
log: log,
config: config,
@ -105,113 +111,141 @@ func NewWorkerICE(ctx context.Context, log *log.Entry, config ConnConfig, config
statusRecorder: statusRecorder,
conn: callBacks,
}
return cice
localUfrag, localPwd, err := generateICECredentials()
if err != nil {
return nil, err
}
w.localUfrag = localUfrag
w.localPwd = localPwd
return w, nil
}
// SetupICEConnection sets up an ICE connection with the remote peer.
// If the relay mode is supported then try to connect in p2p way only.
// It is trying to reconnection in a loop until the context is canceled.
// In case of success connection it will call the onICEConnReady callback.
func (w *WorkerICE) SetupICEConnection(hasRelayOnLocally bool) {
time.Sleep(20 * time.Second)
w.muxAgent.Lock()
defer w.muxAgent.Unlock()
if w.agent != nil {
return
}
w.hasRelayOnLocally = hasRelayOnLocally
go w.sendOffer()
}
func (w *WorkerICE) sendOffer() {
w.ticker = time.NewTicker(w.config.Timeout)
defer w.ticker.Stop()
tickerCtx, tickerCancel := context.WithCancel(w.ctx)
w.tickerCancel = tickerCancel
w.conn.OnStatusChanged(StatusConnecting)
w.log.Debugf("ICE trigger a new handshake")
err := w.conn.DoHandshake()
if err != nil {
w.log.Errorf("%s", err)
}
for {
if !w.waitForReconnectTry() {
w.log.Debugf("ICE trigger new reconnect handshake")
select {
case <-w.ticker.C:
err := w.conn.DoHandshake()
if err != nil {
w.log.Errorf("%s", err)
}
case <-tickerCtx.Done():
w.log.Debugf("left reconnect loop")
return
case <-w.ctx.Done():
return
}
w.conn.OnStatusChanged(StatusConnecting)
w.log.Debugf("trying to establish ICE connection with peer %s", w.config.Key)
remoteOfferAnswer, err := w.conn.DoHandshake()
if err != nil {
if errors.Is(err, ErrSignalIsNotReady) {
w.log.Infof("signal client isn't ready, skipping connection attempt")
}
continue
}
var preferredCandidateTypes []ice.CandidateType
if hasRelayOnLocally && remoteOfferAnswer.RelaySrvAddress != "" {
w.selectedPriority = connPriorityICEP2P
preferredCandidateTypes = candidateTypesP2P()
} else {
w.selectedPriority = connPriorityICETurn
preferredCandidateTypes = candidateTypes()
}
ctx, ctxCancel := context.WithCancel(w.ctx)
w.muxAgent.Lock()
agent, err := w.reCreateAgent(ctxCancel, preferredCandidateTypes)
if err != nil {
ctxCancel()
w.muxAgent.Unlock()
continue
}
w.agent = agent
// generate credentials for the next loop. Important the credentials are generated before handshake, because
// the handshake could provide a cached offer-answer
w.localUfrag, w.localPwd, err = generateICECredentials()
if err != nil {
ctxCancel()
w.muxAgent.Unlock()
continue
}
w.muxAgent.Unlock()
err = w.agent.GatherCandidates()
if err != nil {
ctxCancel()
continue
}
// will block until connection succeeded
// but it won't release if ICE Agent went into Disconnected or Failed state,
// so we have to cancel it with the provided context once agent detected a broken connection
remoteConn, err := w.turnAgentDial(remoteOfferAnswer)
if err != nil {
ctxCancel()
continue
}
pair, err := w.agent.GetSelectedCandidatePair()
if err != nil {
ctxCancel()
continue
}
if !isRelayCandidate(pair.Local) {
// dynamically set remote WireGuard port if other side specified a different one from the default one
remoteWgPort := iface.DefaultWgPort
if remoteOfferAnswer.WgListenPort != 0 {
remoteWgPort = remoteOfferAnswer.WgListenPort
}
// To support old version's with direct mode we attempt to punch an additional role with the remote WireGuard port
go w.punchRemoteWGPort(pair, remoteWgPort)
}
ci := ICEConnInfo{
RemoteConn: remoteConn,
RosenpassPubKey: remoteOfferAnswer.RosenpassPubKey,
RosenpassAddr: remoteOfferAnswer.RosenpassAddr,
LocalIceCandidateType: pair.Local.Type().String(),
RemoteIceCandidateType: pair.Remote.Type().String(),
LocalIceCandidateEndpoint: fmt.Sprintf("%s:%d", pair.Local.Address(), pair.Local.Port()),
RemoteIceCandidateEndpoint: fmt.Sprintf("%s:%d", pair.Remote.Address(), pair.Remote.Port()),
Direct: !isRelayCandidate(pair.Local),
Relayed: isRelayed(pair),
RelayedOnLocal: isRelayCandidate(pair.Local),
}
go w.conn.OnConnReady(w.selectedPriority, ci)
<-ctx.Done()
ctxCancel()
_ = w.agent.Close()
w.conn.OnStatusChanged(StatusDisconnected)
}
}
func (w *WorkerICE) OnNewOffer(remoteOfferAnswer *OfferAnswer) {
log.Debugf("OnNewOffer for ICE")
w.muxAgent.Lock()
if w.agent != nil {
log.Debugf("agent already exists, skipping the offer")
w.muxAgent.Unlock()
return
}
// cancel reconnection loop
w.log.Debugf("canceling reconnection loop")
w.tickerCancel()
var preferredCandidateTypes []ice.CandidateType
if w.hasRelayOnLocally && remoteOfferAnswer.RelaySrvAddress != "" {
w.selectedPriority = connPriorityICEP2P
preferredCandidateTypes = candidateTypesP2P()
} else {
w.selectedPriority = connPriorityICETurn
preferredCandidateTypes = candidateTypes()
}
w.log.Debugf("recreate agent")
agentCtx, agentCancel := context.WithCancel(w.ctx)
agent, err := w.reCreateAgent(agentCancel, preferredCandidateTypes)
if err != nil {
w.log.Errorf("failed to recreate ICE Agent: %s", err)
return
}
w.agent = agent
w.muxAgent.Unlock()
w.log.Debugf("gather candidates")
err = w.agent.GatherCandidates()
if err != nil {
w.log.Debugf("failed to gather candidates: %s", err)
return
}
// will block until connection succeeded
// but it won't release if ICE Agent went into Disconnected or Failed state,
// so we have to cancel it with the provided context once agent detected a broken connection
w.log.Debugf("turnAgentDial")
remoteConn, err := w.turnAgentDial(agentCtx, remoteOfferAnswer)
if err != nil {
w.log.Debugf("failed to dial the remote peer: %s", err)
return
}
w.log.Debugf("GetSelectedCandidatePair")
pair, err := w.agent.GetSelectedCandidatePair()
if err != nil {
return
}
if !isRelayCandidate(pair.Local) {
// dynamically set remote WireGuard port if other side specified a different one from the default one
remoteWgPort := iface.DefaultWgPort
if remoteOfferAnswer.WgListenPort != 0 {
remoteWgPort = remoteOfferAnswer.WgListenPort
}
// To support old version's with direct mode we attempt to punch an additional role with the remote WireGuard port
go w.punchRemoteWGPort(pair, remoteWgPort)
}
ci := ICEConnInfo{
RemoteConn: remoteConn,
RosenpassPubKey: remoteOfferAnswer.RosenpassPubKey,
RosenpassAddr: remoteOfferAnswer.RosenpassAddr,
LocalIceCandidateType: pair.Local.Type().String(),
RemoteIceCandidateType: pair.Remote.Type().String(),
LocalIceCandidateEndpoint: fmt.Sprintf("%s:%d", pair.Local.Address(), pair.Local.Port()),
RemoteIceCandidateEndpoint: fmt.Sprintf("%s:%d", pair.Remote.Address(), pair.Remote.Port()),
Direct: !isRelayCandidate(pair.Local),
Relayed: isRelayed(pair),
RelayedOnLocal: isRelayCandidate(pair.Local),
}
w.log.Debugf("on conn ready")
go w.conn.OnConnReady(w.selectedPriority, ci)
}
// OnRemoteCandidate Handles ICE connection Candidate provided by the remote peer.
func (w *WorkerICE) OnRemoteCandidate(candidate ice.Candidate, haRoutes route.HAMap) {
w.muxAgent.Lock()
@ -233,19 +267,14 @@ func (w *WorkerICE) OnRemoteCandidate(candidate ice.Candidate, haRoutes route.HA
}
}
func (w *WorkerICE) GetLocalUserCredentials() (frag string, pwd string, err error) {
func (w *WorkerICE) GetLocalUserCredentials() (frag string, pwd string) {
w.muxAgent.Lock()
defer w.muxAgent.Unlock()
if w.localUfrag != "" && w.localPwd != "" {
return w.localUfrag, w.localPwd, nil
}
w.localUfrag, w.localPwd, err = generateICECredentials()
return w.localUfrag, w.localPwd, err
return w.localUfrag, w.localPwd
}
func (w *WorkerICE) reCreateAgent(ctxCancel context.CancelFunc, relaySupport []ice.CandidateType) (*ice.Agent, error) {
func (w *WorkerICE) reCreateAgent(agentCancel context.CancelFunc, relaySupport []ice.CandidateType) (*ice.Agent, error) {
log.Debugf("--RECREATE AGENT-----")
transportNet, err := w.newStdNet()
if err != nil {
w.log.Errorf("failed to create pion's stdnet: %s", err)
@ -256,9 +285,9 @@ func (w *WorkerICE) reCreateAgent(ctxCancel context.CancelFunc, relaySupport []i
iceRelayAcceptanceMinWait := iceRelayAcceptanceMinWait()
agentConfig := &ice.AgentConfig{
MulticastDNSMode: ice.MulticastDNSModeDisabled,
NetworkTypes: []ice.NetworkType{ice.NetworkTypeUDP4, ice.NetworkTypeUDP6},
Urls: w.configICE.StunTurn.Load().([]*stun.URI),
MulticastDNSMode: ice.MulticastDNSModeDisabled,
NetworkTypes: []ice.NetworkType{ice.NetworkTypeUDP4, ice.NetworkTypeUDP6},
//Urls: w.configICE.StunTurn.Load().([]*stun.URI),
CandidateTypes: relaySupport,
InterfaceFilter: stdnet.InterfaceFilter(w.configICE.InterfaceBlackList),
UDPMux: w.configICE.UDPMux,
@ -291,7 +320,23 @@ func (w *WorkerICE) reCreateAgent(ctxCancel context.CancelFunc, relaySupport []i
err = agent.OnConnectionStateChange(func(state ice.ConnectionState) {
w.log.Debugf("ICE ConnectionState has changed to %s", state.String())
if state == ice.ConnectionStateFailed || state == ice.ConnectionStateDisconnected {
ctxCancel()
w.conn.OnStatusChanged(StatusDisconnected)
w.muxAgent.Lock()
agentCancel()
_ = agent.Close()
w.agent = nil
// generate credentials for the next agent creation loop
localUfrag, localPwd, err := generateICECredentials()
if err != nil {
log.Errorf("failed to generate new ICE credentials: %s", err)
}
w.localUfrag = localUfrag
w.localPwd = localPwd
w.muxAgent.Unlock()
go w.sendOffer()
}
})
if err != nil {
@ -387,12 +432,12 @@ func (w *WorkerICE) shouldSendExtraSrflxCandidate(candidate ice.Candidate) bool
return false
}
func (w *WorkerICE) turnAgentDial(remoteOfferAnswer *OfferAnswer) (*ice.Conn, error) {
func (w *WorkerICE) turnAgentDial(ctx context.Context, remoteOfferAnswer *OfferAnswer) (*ice.Conn, error) {
isControlling := w.config.LocalKey > w.config.Key
if isControlling {
return w.agent.Dial(w.ctx, remoteOfferAnswer.IceCredentials.UFrag, remoteOfferAnswer.IceCredentials.Pwd)
return w.agent.Dial(ctx, remoteOfferAnswer.IceCredentials.UFrag, remoteOfferAnswer.IceCredentials.Pwd)
} else {
return w.agent.Accept(w.ctx, remoteOfferAnswer.IceCredentials.UFrag, remoteOfferAnswer.IceCredentials.Pwd)
return w.agent.Accept(ctx, remoteOfferAnswer.IceCredentials.UFrag, remoteOfferAnswer.IceCredentials.Pwd)
}
}
@ -465,7 +510,7 @@ func candidateTypes() []ice.CandidateType {
}
func candidateTypesP2P() []ice.CandidateType {
return []ice.CandidateType{ice.CandidateTypeHost, ice.CandidateTypeServerReflexive}
return []ice.CandidateType{ice.CandidateTypeHost}
}
func isRelayCandidate(candidate ice.Candidate) bool {
@ -480,6 +525,7 @@ func isRelayed(pair *ice.CandidatePair) bool {
}
func generateICECredentials() (string, string, error) {
log.Debugf("-----GENERATE CREDENTIALS------")
ufrag, err := randutil.GenerateCryptoRandomString(lenUFrag, runesAlpha)
if err != nil {
return "", "", err

View File

@ -2,10 +2,7 @@ package peer
import (
"context"
"errors"
"math/rand"
"net"
"time"
log "github.com/sirupsen/logrus"
@ -21,7 +18,6 @@ type RelayConnInfo struct {
type WorkerRelayCallbacks struct {
OnConnReady func(RelayConnInfo)
OnStatusChanged func(ConnStatus)
DoHandshake func() (*OfferAnswer, error)
}
type WorkerRelay struct {
@ -44,52 +40,37 @@ func NewWorkerRelay(ctx context.Context, log *log.Entry, relayManager *relayClie
// SetupRelayConnection todo: this function is not completed. Make no sense to put it in a for loop because we are not waiting for any event
func (w *WorkerRelay) SetupRelayConnection() {
for {
if !w.waitForReconnectTry() {
return
}
w.log.Debugf("trying to establish Relay connection with peer %s", w.config.Key)
}
remoteOfferAnswer, err := w.conn.DoHandshake()
if err != nil {
if errors.Is(err, ErrSignalIsNotReady) {
w.log.Infof("signal client isn't ready, skipping connection attempt")
}
w.log.Errorf("%s", err)
continue
}
if !w.isRelaySupported(remoteOfferAnswer) {
w.log.Infof("Relay is not supported by remote peer")
// todo should we retry?
// if the remote peer doesn't support relay make no sense to retry infinity
// but if the remote peer supports relay just the connection is lost we should retry
continue
}
// the relayManager will return with error in case if the connection has lost with relay server
currentRelayAddress, err := w.relayManager.RelayAddress()
if err != nil {
w.log.Infof("local Relay connection is lost, skipping connection attempt")
continue
}
srv := w.preferredRelayServer(currentRelayAddress.String(), remoteOfferAnswer.RelaySrvAddress)
relayedConn, err := w.relayManager.OpenConn(srv, w.config.Key)
if err != nil {
w.log.Infof("failed to open relay connection: %s", err)
continue
}
go w.conn.OnConnReady(RelayConnInfo{
relayedConn: relayedConn,
rosenpassPubKey: remoteOfferAnswer.RosenpassPubKey,
rosenpassAddr: remoteOfferAnswer.RosenpassAddr,
})
<-w.ctx.Done()
func (w *WorkerRelay) OnNewOffer(remoteOfferAnswer *OfferAnswer) {
if !w.isRelaySupported(remoteOfferAnswer) {
w.log.Infof("Relay is not supported by remote peer")
// todo should we retry?
// if the remote peer doesn't support relay make no sense to retry infinity
// but if the remote peer supports relay just the connection is lost we should retry
return
}
// the relayManager will return with error in case if the connection has lost with relay server
currentRelayAddress, err := w.relayManager.RelayAddress()
if err != nil {
w.log.Infof("local Relay connection is lost, skipping connection attempt")
return
}
srv := w.preferredRelayServer(currentRelayAddress.String(), remoteOfferAnswer.RelaySrvAddress)
relayedConn, err := w.relayManager.OpenConn(srv, w.config.Key)
if err != nil {
w.log.Infof("do not need to reopen relay connection: %s", err)
return
}
go w.conn.OnConnReady(RelayConnInfo{
relayedConn: relayedConn,
rosenpassPubKey: remoteOfferAnswer.RosenpassPubKey,
rosenpassAddr: remoteOfferAnswer.RosenpassAddr,
})
}
func (w *WorkerRelay) RelayAddress() (net.Addr, error) {
@ -113,20 +94,3 @@ func (w *WorkerRelay) preferredRelayServer(myRelayAddress, remoteRelayAddress st
func (w *WorkerRelay) RelayIsSupportedLocally() bool {
return w.relayManager.HasRelayAddress()
}
// waitForReconnectTry waits for a random duration before trying to reconnect
func (w *WorkerRelay) waitForReconnectTry() bool {
minWait := 500
maxWait := 2000
duration := time.Duration(rand.Intn(maxWait-minWait)+minWait) * time.Millisecond
timeout := time.NewTimer(duration)
defer timeout.Stop()
select {
case <-w.ctx.Done():
return false
case <-timeout.C:
return true
}
}