EtherGuard-VPN/device/peer.go

573 lines
15 KiB
Go
Raw Normal View History

2019-01-02 01:55:51 +01:00
/* SPDX-License-Identifier: MIT
*
* Copyright (C) 2017-2021 WireGuard LLC. All Rights Reserved.
*/
2019-03-03 04:04:41 +01:00
package device
import (
2021-08-23 21:11:01 +02:00
"bytes"
"container/list"
2017-06-26 13:14:02 +02:00
"errors"
2021-08-16 20:58:15 +02:00
"fmt"
2021-08-23 21:11:01 +02:00
"io/ioutil"
"net"
"sync"
"sync/atomic"
"time"
2021-12-02 18:13:48 +01:00
"github.com/KusakabeSi/EtherGuard-VPN/conn"
"github.com/KusakabeSi/EtherGuard-VPN/mtypes"
"github.com/KusakabeSi/EtherGuard-VPN/path"
2021-08-23 21:11:01 +02:00
"gopkg.in/yaml.v2"
)
2021-10-27 03:02:44 +02:00
type endpoint_tryitem struct {
URL string
lastTry time.Time
firstTry time.Time
}
type endpoint_trylist struct {
sync.RWMutex
timeout time.Duration
peer *Peer
trymap_super map[string]*endpoint_tryitem
trymap_p2p map[string]*endpoint_tryitem
}
func NewEndpoint_trylist(peer *Peer, timeout time.Duration) *endpoint_trylist {
return &endpoint_trylist{
timeout: timeout,
peer: peer,
trymap_super: make(map[string]*endpoint_tryitem),
trymap_p2p: make(map[string]*endpoint_tryitem),
}
}
2021-12-02 18:13:48 +01:00
func (et *endpoint_trylist) UpdateSuper(urls mtypes.API_connurl, UseLocalIP bool) {
2021-10-27 03:02:44 +02:00
et.Lock()
defer et.Unlock()
newmap_super := make(map[string]*endpoint_tryitem)
2021-12-02 18:13:48 +01:00
if urls.IsEmpty() {
2021-10-27 03:02:44 +02:00
if et.peer.device.LogLevel.LogInternal {
fmt.Println(fmt.Sprintf("Internal: Peer %v : Reset trylist(super) %v", et.peer.ID.ToString(), "nil"))
}
}
2021-12-02 18:13:48 +01:00
for url, it := range urls.GetList(UseLocalIP) {
if url == "" {
continue
}
2021-10-27 03:02:44 +02:00
_, err := conn.LookupIP(url, 0)
if err != nil {
if et.peer.device.LogLevel.LogInternal {
fmt.Println(fmt.Sprintf("Internal: Peer %v : Update trylist(super) %v error: %v", et.peer.ID.ToString(), url, err))
}
continue
}
if val, ok := et.trymap_super[url]; ok {
if et.peer.device.LogLevel.LogInternal {
fmt.Println(fmt.Sprintf("Internal: Peer %v : Update trylist(super) %v", et.peer.ID.ToString(), url))
}
newmap_super[url] = val
} else {
if et.peer.device.LogLevel.LogInternal {
fmt.Println(fmt.Sprintf("Internal: Peer %v : New trylist(super) %v", et.peer.ID.ToString(), url))
}
newmap_super[url] = &endpoint_tryitem{
URL: url,
2021-12-03 23:46:58 +01:00
lastTry: time.Time{}.Add(mtypes.S2TD(it)),
2021-10-27 03:02:44 +02:00
firstTry: time.Time{},
}
}
}
et.trymap_super = newmap_super
}
func (et *endpoint_trylist) UpdateP2P(url string) {
_, err := conn.LookupIP(url, 0)
if err != nil {
return
}
et.Lock()
defer et.Unlock()
if _, ok := et.trymap_p2p[url]; !ok {
if et.peer.device.LogLevel.LogInternal {
fmt.Println(fmt.Sprintf("Internal: Peer %v : Add trylist(p2p) %v", et.peer.ID.ToString(), url))
}
et.trymap_p2p[url] = &endpoint_tryitem{
URL: url,
lastTry: time.Now(),
firstTry: time.Time{},
}
}
}
func (et *endpoint_trylist) Delete(url string) {
et.Lock()
defer et.Unlock()
delete(et.trymap_super, url)
delete(et.trymap_p2p, url)
}
2021-10-27 03:41:13 +02:00
func (et *endpoint_trylist) GetNextTry() (bool, string) {
2021-10-27 03:02:44 +02:00
et.RLock()
defer et.RUnlock()
var smallest *endpoint_tryitem
2021-10-27 03:41:13 +02:00
FastTry := true
2021-10-27 03:02:44 +02:00
for _, v := range et.trymap_super {
if smallest == nil || smallest.lastTry.After(v.lastTry) {
smallest = v
}
}
for url, v := range et.trymap_p2p {
if v.firstTry.After(time.Time{}) && v.firstTry.Add(et.timeout).Before(time.Now()) {
if et.peer.device.LogLevel.LogInternal {
fmt.Println(fmt.Sprintf("Internal: Peer %v : Delete trylist(p2p) %v", et.peer.ID.ToString(), url))
}
delete(et.trymap_p2p, url)
}
if smallest.lastTry.After(v.lastTry) {
smallest = v
}
}
if smallest == nil {
2021-10-27 03:41:13 +02:00
return false, ""
2021-10-27 03:02:44 +02:00
}
smallest.lastTry = time.Now()
if smallest.firstTry.After(time.Time{}) {
smallest.firstTry = time.Now()
}
2021-10-27 03:41:13 +02:00
if smallest.firstTry.Add(et.timeout).Before(time.Now()) {
FastTry = false
}
return FastTry, smallest.URL
2021-10-27 03:02:44 +02:00
}
type Peer struct {
2021-08-20 19:32:50 +02:00
isRunning AtomicBool
sync.RWMutex // Mostly protects endpoint, but is generally taken whenever we modify peer
keypairs Keypairs
handshake Handshake
device *Device
endpoint conn.Endpoint
2021-10-27 03:02:44 +02:00
endpoint_trylist *endpoint_trylist
2021-12-02 18:13:48 +01:00
LastPacketReceivedAdd1Sec atomic.Value // *time.Time
SingleWayLatency float64
2021-08-20 19:32:50 +02:00
stopping sync.WaitGroup // routines pending stop
2021-12-02 18:13:48 +01:00
ID mtypes.Vertex
2021-08-23 18:39:04 +02:00
AskedForNeighbor bool
2021-08-24 10:43:55 +02:00
StaticConn bool //if true, this peer will not write to config file when roaming, and the endpoint will be reset periodically
ConnURL string
ConnAF int //0: both, 4: ipv4 only, 6: ipv6 only
2021-08-16 20:58:15 +02:00
// These fields are accessed with atomic operations, which must be
// 64-bit aligned even on 32-bit platforms. Go guarantees that an
// allocated struct will be 64-bit aligned. So we place
// atomically-accessed fields up front, so that they can share in
// this alignment before smaller fields throw it off.
stats struct {
txBytes uint64 // bytes send to peer (endpoint)
rxBytes uint64 // bytes received from peer
lastHandshakeNano int64 // nano seconds since epoch
}
disableRoaming bool
timers struct {
retransmitHandshake *Timer
sendKeepalive *Timer
newHandshake *Timer
zeroKeyMaterial *Timer
persistentKeepalive *Timer
2018-05-20 06:50:07 +02:00
handshakeAttempts uint32
needAnotherKeepalive AtomicBool
sentLastMinuteHandshake AtomicBool
}
device: remove mutex from Peer send/receive The immediate motivation for this change is an observed deadlock. 1. A goroutine calls peer.Stop. That calls peer.queue.Lock(). 2. Another goroutine is in RoutineSequentialReceiver. It receives an elem from peer.queue.inbound. 3. The peer.Stop goroutine calls close(peer.queue.inbound), close(peer.queue.outbound), and peer.stopping.Wait(). It blocks waiting for RoutineSequentialReceiver and RoutineSequentialSender to exit. 4. The RoutineSequentialReceiver goroutine calls peer.SendStagedPackets(). SendStagedPackets attempts peer.queue.RLock(). That blocks forever because the peer.Stop goroutine holds a write lock on that mutex. A background motivation for this change is that it can be expensive to have a mutex in the hot code path of RoutineSequential*. The mutex was necessary to avoid attempting to send elems on a closed channel. This commit removes that danger by never closing the channel. Instead, we send a sentinel nil value on the channel to indicate to the receiver that it should exit. The only problem with this is that if the receiver exits, we could write an elem into the channel which would never get received. If it never gets received, it cannot get returned to the device pools. To work around this, we use a finalizer. When the channel can be GC'd, the finalizer drains any remaining elements from the channel and restores them to the device pool. After that change, peer.queue.RWMutex no longer makes sense where it is. It is only used to prevent concurrent calls to Start and Stop. Move it to a more sensible location and make it a plain sync.Mutex. Signed-off-by: Josh Bleecher Snyder <josh@tailscale.com>
2021-02-08 22:02:52 +01:00
state struct {
sync.Mutex // protects against concurrent Start/Stop
device: remove mutex from Peer send/receive The immediate motivation for this change is an observed deadlock. 1. A goroutine calls peer.Stop. That calls peer.queue.Lock(). 2. Another goroutine is in RoutineSequentialReceiver. It receives an elem from peer.queue.inbound. 3. The peer.Stop goroutine calls close(peer.queue.inbound), close(peer.queue.outbound), and peer.stopping.Wait(). It blocks waiting for RoutineSequentialReceiver and RoutineSequentialSender to exit. 4. The RoutineSequentialReceiver goroutine calls peer.SendStagedPackets(). SendStagedPackets attempts peer.queue.RLock(). That blocks forever because the peer.Stop goroutine holds a write lock on that mutex. A background motivation for this change is that it can be expensive to have a mutex in the hot code path of RoutineSequential*. The mutex was necessary to avoid attempting to send elems on a closed channel. This commit removes that danger by never closing the channel. Instead, we send a sentinel nil value on the channel to indicate to the receiver that it should exit. The only problem with this is that if the receiver exits, we could write an elem into the channel which would never get received. If it never gets received, it cannot get returned to the device pools. To work around this, we use a finalizer. When the channel can be GC'd, the finalizer drains any remaining elements from the channel and restores them to the device pool. After that change, peer.queue.RWMutex no longer makes sense where it is. It is only used to prevent concurrent calls to Start and Stop. Move it to a more sensible location and make it a plain sync.Mutex. Signed-off-by: Josh Bleecher Snyder <josh@tailscale.com>
2021-02-08 22:02:52 +01:00
}
queue struct {
staged chan *QueueOutboundElement // staged packets before a handshake is available
outbound *autodrainingOutboundQueue // sequential ordering of udp transmission
inbound *autodrainingInboundQueue // sequential ordering of tun writing
}
cookieGenerator CookieGenerator
trieEntries list.List
persistentKeepaliveInterval uint32 // accessed atomically
2017-06-24 15:34:17 +02:00
}
2021-12-02 18:13:48 +01:00
func (device *Device) NewPeer(pk NoisePublicKey, id mtypes.Vertex, isSuper bool) (*Peer, error) {
if isSuper == false {
2021-12-02 18:13:48 +01:00
if id < mtypes.Special_NodeID {
2021-09-21 22:03:11 +02:00
//pass check
} else {
return nil, errors.New(fmt.Sprint("ID ", uint32(id), " is a special NodeID"))
2021-09-21 22:03:11 +02:00
}
} else {
2021-12-02 18:13:48 +01:00
if id == mtypes.SuperNodeMessage {
2021-09-21 22:03:11 +02:00
//pass check
} else {
return nil, errors.New(fmt.Sprint("ID", uint32(id), "is not a supernode NodeID"))
2021-09-21 22:03:11 +02:00
}
}
if device.isClosed() {
2018-05-13 19:33:41 +02:00
return nil, errors.New("device closed")
}
// lock resources
device.staticIdentity.RLock()
defer device.staticIdentity.RUnlock()
device.peers.Lock()
defer device.peers.Unlock()
// check if over limit
if len(device.peers.keyMap) >= MaxPeers {
2018-05-13 19:33:41 +02:00
return nil, errors.New("too many peers")
}
2017-06-26 13:14:02 +02:00
// create peer
2021-10-27 03:02:44 +02:00
if device.LogLevel.LogInternal {
fmt.Println("Internal: Create peer with ID : " + id.ToString() + " and PubKey:" + pk.ToString())
2021-08-21 16:54:24 +02:00
}
peer := new(Peer)
2021-12-02 18:13:48 +01:00
peer.LastPacketReceivedAdd1Sec.Store(&time.Time{})
peer.Lock()
defer peer.Unlock()
2017-07-01 23:29:22 +02:00
2018-05-13 23:14:43 +02:00
peer.cookieGenerator.Init(pk)
2017-07-01 23:29:22 +02:00
peer.device = device
2021-12-03 23:46:58 +01:00
peer.endpoint_trylist = NewEndpoint_trylist(peer, mtypes.S2TD(device.EdgeConfig.DynamicRoute.PeerAliveTimeout))
2021-12-02 18:13:48 +01:00
peer.SingleWayLatency = path.Infinity
peer.queue.outbound = newAutodrainingOutboundQueue(device)
peer.queue.inbound = newAutodrainingInboundQueue(device)
peer.queue.staged = make(chan *QueueOutboundElement, QueueStagedSize)
// map public key
_, ok := device.peers.keyMap[pk]
2017-06-26 13:14:02 +02:00
if ok {
return nil, fmt.Errorf("adding existing peer pubkey: %v", pk.ToString())
2021-08-16 20:58:15 +02:00
}
_, ok = device.peers.IDMap[id]
if ok {
return nil, fmt.Errorf("adding existing peer id: %v", id)
2017-06-26 13:14:02 +02:00
}
2021-08-16 20:58:15 +02:00
peer.ID = id
2017-06-24 15:34:17 +02:00
// pre-compute DH
2017-06-26 13:14:02 +02:00
handshake := &peer.handshake
handshake.mutex.Lock()
2018-05-13 23:14:43 +02:00
handshake.precomputedStaticStatic = device.staticIdentity.privateKey.sharedSecret(pk)
2019-08-05 16:57:41 +02:00
handshake.remoteStatic = pk
2017-06-26 13:14:02 +02:00
handshake.mutex.Unlock()
2017-06-24 15:34:17 +02:00
// reset endpoint
peer.endpoint = nil
2020-03-18 06:06:56 +01:00
// add
2021-12-02 18:13:48 +01:00
if id == mtypes.SuperNodeMessage { // To communicate with supernode
2021-08-20 19:32:50 +02:00
device.peers.SuperPeer[pk] = peer
2021-08-21 16:54:24 +02:00
device.peers.keyMap[pk] = peer
2021-08-20 19:32:50 +02:00
} else { // Regular peer, other edgenodes
device.peers.keyMap[pk] = peer
device.peers.IDMap[id] = peer
}
2019-08-05 16:57:41 +02:00
// start peer
peer.timersInit()
if peer.device.isUp() {
peer.Start()
}
return peer, nil
}
2021-10-27 03:02:44 +02:00
func (peer *Peer) IsPeerAlive() bool {
2021-12-03 23:46:58 +01:00
PeerAliveTimeout := mtypes.S2TD(peer.device.EdgeConfig.DynamicRoute.PeerAliveTimeout)
2021-10-27 03:02:44 +02:00
if peer.endpoint == nil {
return false
}
2021-12-02 18:13:48 +01:00
if peer.LastPacketReceivedAdd1Sec.Load().(*time.Time).Add(PeerAliveTimeout).Before(time.Now()) {
2021-10-27 03:02:44 +02:00
return false
}
return true
}
func (peer *Peer) SendBuffer(buffer []byte) error {
peer.device.net.RLock()
defer peer.device.net.RUnlock()
2017-12-29 17:42:09 +01:00
if peer.device.isClosed() {
return nil
2018-02-02 20:45:25 +01:00
}
peer.RLock()
defer peer.RUnlock()
2017-12-29 17:42:09 +01:00
if peer.endpoint == nil {
2018-05-13 19:33:41 +02:00
return errors.New("no known endpoint for peer")
}
2017-12-29 17:42:09 +01:00
err := peer.device.net.bind.Send(buffer, peer.endpoint)
if err == nil {
atomic.AddUint64(&peer.stats.txBytes, uint64(len(buffer)))
}
return err
}
func (peer *Peer) String() string {
// The awful goo that follows is identical to:
//
// base64Key := base64.StdEncoding.EncodeToString(peer.handshake.remoteStatic[:])
// abbreviatedKey := base64Key[0:4] + "…" + base64Key[39:43]
// return fmt.Sprintf("peer(%s)", abbreviatedKey)
//
// except that it is considerably more efficient.
src := peer.handshake.remoteStatic
b64 := func(input byte) byte {
return input + 'A' + byte(((25-int(input))>>8)&6) - byte(((51-int(input))>>8)&75) - byte(((61-int(input))>>8)&15) + byte(((62-int(input))>>8)&3)
}
b := []byte("peer(____…____)")
const first = len("peer(")
const second = len("peer(____…")
b[first+0] = b64((src[0] >> 2) & 63)
b[first+1] = b64(((src[0] << 4) | (src[1] >> 4)) & 63)
b[first+2] = b64(((src[1] << 2) | (src[2] >> 6)) & 63)
b[first+3] = b64(src[2] & 63)
b[second+0] = b64(src[29] & 63)
b[second+1] = b64((src[30] >> 2) & 63)
b[second+2] = b64(((src[30] << 4) | (src[31] >> 4)) & 63)
b[second+3] = b64((src[31] << 2) & 63)
return string(b)
}
func (peer *Peer) Start() {
// should never start a peer on a closed device
if peer.device.isClosed() {
return
}
// prevent simultaneous start/stop operations
peer.state.Lock()
defer peer.state.Unlock()
2018-02-02 20:45:25 +01:00
if peer.isRunning.Get() {
return
}
2018-02-04 19:18:44 +01:00
device := peer.device
device.log.Verbosef("%v - Starting", peer)
// reset routine state
peer.stopping.Wait()
peer.stopping.Add(2)
2017-12-29 17:42:09 +01:00
peer.handshake.mutex.Lock()
peer.handshake.lastSentHandshake = time.Now().Add(-(RekeyTimeout + time.Second))
peer.handshake.mutex.Unlock()
peer.device.queue.encryption.wg.Add(1) // keep encryption queue open for our writes
peer.timersStart()
2018-02-02 20:45:25 +01:00
device.flushInboundQueue(peer.queue.inbound)
device.flushOutboundQueue(peer.queue.outbound)
2017-12-29 17:42:09 +01:00
go peer.RoutineSequentialSender()
go peer.RoutineSequentialReceiver()
peer.isRunning.Set(true)
2017-12-29 17:42:09 +01:00
}
2018-05-13 23:14:43 +02:00
func (peer *Peer) ZeroAndFlushAll() {
device := peer.device
// clear key pairs
keypairs := &peer.keypairs
keypairs.Lock()
2018-05-13 23:14:43 +02:00
device.DeleteKeypair(keypairs.previous)
device.DeleteKeypair(keypairs.current)
device.DeleteKeypair(keypairs.loadNext())
2018-05-13 23:14:43 +02:00
keypairs.previous = nil
keypairs.current = nil
keypairs.storeNext(nil)
keypairs.Unlock()
2018-05-13 23:14:43 +02:00
// clear handshake state
handshake := &peer.handshake
handshake.mutex.Lock()
device.indexTable.Delete(handshake.localIndex)
handshake.Clear()
handshake.mutex.Unlock()
peer.FlushStagedPackets()
2018-05-13 23:14:43 +02:00
}
func (peer *Peer) ExpireCurrentKeypairs() {
handshake := &peer.handshake
handshake.mutex.Lock()
peer.device.indexTable.Delete(handshake.localIndex)
handshake.Clear()
peer.handshake.lastSentHandshake = time.Now().Add(-(RekeyTimeout + time.Second))
handshake.mutex.Unlock()
keypairs := &peer.keypairs
keypairs.Lock()
if keypairs.current != nil {
atomic.StoreUint64(&keypairs.current.sendNonce, RejectAfterMessages)
}
if keypairs.next != nil {
next := keypairs.loadNext()
atomic.StoreUint64(&next.sendNonce, RejectAfterMessages)
}
keypairs.Unlock()
}
2017-12-29 17:42:09 +01:00
func (peer *Peer) Stop() {
peer.state.Lock()
defer peer.state.Unlock()
2018-02-02 20:45:25 +01:00
if !peer.isRunning.Swap(false) {
return
}
peer.device.log.Verbosef("%v - Stopping", peer)
peer.timersStop()
device: remove mutex from Peer send/receive The immediate motivation for this change is an observed deadlock. 1. A goroutine calls peer.Stop. That calls peer.queue.Lock(). 2. Another goroutine is in RoutineSequentialReceiver. It receives an elem from peer.queue.inbound. 3. The peer.Stop goroutine calls close(peer.queue.inbound), close(peer.queue.outbound), and peer.stopping.Wait(). It blocks waiting for RoutineSequentialReceiver and RoutineSequentialSender to exit. 4. The RoutineSequentialReceiver goroutine calls peer.SendStagedPackets(). SendStagedPackets attempts peer.queue.RLock(). That blocks forever because the peer.Stop goroutine holds a write lock on that mutex. A background motivation for this change is that it can be expensive to have a mutex in the hot code path of RoutineSequential*. The mutex was necessary to avoid attempting to send elems on a closed channel. This commit removes that danger by never closing the channel. Instead, we send a sentinel nil value on the channel to indicate to the receiver that it should exit. The only problem with this is that if the receiver exits, we could write an elem into the channel which would never get received. If it never gets received, it cannot get returned to the device pools. To work around this, we use a finalizer. When the channel can be GC'd, the finalizer drains any remaining elements from the channel and restores them to the device pool. After that change, peer.queue.RWMutex no longer makes sense where it is. It is only used to prevent concurrent calls to Start and Stop. Move it to a more sensible location and make it a plain sync.Mutex. Signed-off-by: Josh Bleecher Snyder <josh@tailscale.com>
2021-02-08 22:02:52 +01:00
// Signal that RoutineSequentialSender and RoutineSequentialReceiver should exit.
peer.queue.inbound.c <- nil
peer.queue.outbound.c <- nil
peer.stopping.Wait()
peer.device.queue.encryption.wg.Done() // no more writes to encryption queue from us
2018-05-13 23:14:43 +02:00
peer.ZeroAndFlushAll()
}
2018-05-26 02:59:26 +02:00
2021-08-21 16:54:24 +02:00
func (peer *Peer) SetPSK(psk NoisePresharedKey) {
2021-12-03 23:46:58 +01:00
if peer.device.IsSuperNode == false && peer.ID < mtypes.Special_NodeID && peer.device.EdgeConfig.DynamicRoute.P2P.UseP2P == true {
peer.device.log.Verbosef("Preshared keys disabled in P2P mode.")
return
}
2021-08-21 16:54:24 +02:00
peer.handshake.mutex.Lock()
peer.handshake.presharedKey = psk
peer.handshake.mutex.Unlock()
}
func (peer *Peer) SetEndpointFromConnURL(connurl string, af int, static bool) error {
peer.StaticConn = static
peer.ConnURL = connurl
peer.ConnAF = af
2021-10-27 03:02:44 +02:00
if peer.device.LogLevel.LogInternal {
fmt.Println("Internal: Set endpoint to " + connurl + " for NodeID:" + peer.ID.ToString())
}
var err error
connurl, err = conn.LookupIP(connurl, af)
if err != nil {
return err
}
endpoint, err := peer.device.net.bind.ParseEndpoint(connurl)
if err != nil {
return err
}
peer.StaticConn = static
peer.ConnURL = connurl
peer.SetEndpointFromPacket(endpoint)
return nil
}
func (peer *Peer) SetEndpointFromPacket(endpoint conn.Endpoint) {
if peer.disableRoaming {
2018-05-26 02:59:26 +02:00
return
}
peer.Lock()
2021-12-02 18:13:48 +01:00
if peer.ID == mtypes.SuperNodeMessage {
conn, err := net.Dial("udp", endpoint.DstToString())
defer conn.Close()
if err == nil {
IP := conn.LocalAddr().(*net.UDPAddr).IP
if ip4 := IP.To4(); ip4 != nil {
peer.device.peers.LocalV4 = ip4
} else {
peer.device.peers.LocalV6 = IP
}
}
}
2021-08-23 21:11:01 +02:00
peer.device.SaveToConfig(peer, endpoint)
2018-05-26 02:59:26 +02:00
peer.endpoint = endpoint
peer.Unlock()
2018-05-26 02:59:26 +02:00
}
2021-08-20 19:32:50 +02:00
func (peer *Peer) GetEndpointSrcStr() string {
2021-08-21 16:54:24 +02:00
if peer.endpoint == nil {
return ""
}
2021-08-20 19:32:50 +02:00
return peer.endpoint.SrcToString()
}
func (peer *Peer) GetEndpointDstStr() string {
2021-08-21 16:54:24 +02:00
if peer.endpoint == nil {
return ""
}
2021-08-20 19:32:50 +02:00
return peer.endpoint.DstToString()
}
2021-08-23 21:11:01 +02:00
func (device *Device) SaveToConfig(peer *Peer, endpoint conn.Endpoint) {
2021-09-21 22:03:11 +02:00
if device.IsSuperNode { //Can't use in super mode
2021-08-23 21:11:01 +02:00
return
}
2021-08-24 10:43:55 +02:00
if peer.StaticConn == true { //static conn do not write new endpoint to config
2021-08-23 21:11:01 +02:00
return
}
2021-12-03 23:46:58 +01:00
if !device.EdgeConfig.DynamicRoute.P2P.UseP2P { //Must in p2p mode
2021-08-23 21:11:01 +02:00
return
}
2021-08-24 10:43:55 +02:00
if peer.endpoint != nil && peer.endpoint.DstIP().Equal(endpoint.DstIP()) { //endpoint changed
2021-08-23 21:11:01 +02:00
return
}
2021-08-24 10:43:55 +02:00
2021-08-23 21:11:01 +02:00
url := endpoint.DstToString()
foundInFile := false
2021-09-21 03:15:23 +02:00
pubkeystr := peer.handshake.remoteStatic.ToString()
pskstr := peer.handshake.presharedKey.ToString()
2021-08-23 21:11:01 +02:00
if bytes.Equal(peer.handshake.presharedKey[:], make([]byte, 32)) {
pskstr = ""
}
for _, peerfile := range device.EdgeConfig.Peers {
if peerfile.NodeID == peer.ID && peerfile.PubKey == pubkeystr {
foundInFile = true
if peerfile.Static == false {
peerfile.EndPoint = url
}
} else if peerfile.NodeID == peer.ID || peerfile.PubKey == pubkeystr {
2021-08-25 10:13:53 +02:00
panic("Found NodeID match " + peer.ID.ToString() + ", but PubKey Not match %s enrties in config file" + pubkeystr)
2021-08-23 21:11:01 +02:00
}
}
if !foundInFile {
2021-12-02 18:13:48 +01:00
device.EdgeConfig.Peers = append(device.EdgeConfig.Peers, mtypes.PeerInfo{
2021-08-23 21:11:01 +02:00
NodeID: peer.ID,
PubKey: pubkeystr,
PSKey: pskstr,
EndPoint: url,
Static: false,
})
}
go device.SaveConfig()
}
func (device *Device) SaveConfig() {
2021-12-03 23:46:58 +01:00
if device.EdgeConfig.DynamicRoute.SaveNewPeers {
2021-08-23 21:11:01 +02:00
configbytes, _ := yaml.Marshal(device.EdgeConfig)
2021-09-21 22:03:11 +02:00
ioutil.WriteFile(device.EdgeConfigPath, configbytes, 0644)
2021-08-23 21:11:01 +02:00
}
}