mirror of
https://github.com/tim-beatham/smegmesh.git
synced 2024-12-12 09:30:52 +01:00
45-use-statistical-testing
Keepalive is based on per mesh and not per node. Using total ordering mechanism similar to paxos to elect a leader if leader doesn't update it's timestamp within 3 * keepAlive then give the leader a gravestone and elect the next leader. Leader is bassed on lexicographically ordered public key.
This commit is contained in:
parent
64885f1055
commit
661fb0d54c
@ -13,6 +13,7 @@ import (
|
|||||||
"github.com/tim-beatham/wgmesh/pkg/mesh"
|
"github.com/tim-beatham/wgmesh/pkg/mesh"
|
||||||
"github.com/tim-beatham/wgmesh/pkg/robin"
|
"github.com/tim-beatham/wgmesh/pkg/robin"
|
||||||
"github.com/tim-beatham/wgmesh/pkg/sync"
|
"github.com/tim-beatham/wgmesh/pkg/sync"
|
||||||
|
timer "github.com/tim-beatham/wgmesh/pkg/timers"
|
||||||
"golang.zx2c4.com/wireguard/wgctrl"
|
"golang.zx2c4.com/wireguard/wgctrl"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -62,6 +63,7 @@ func main() {
|
|||||||
syncRequester = sync.NewSyncRequester(ctrlServer)
|
syncRequester = sync.NewSyncRequester(ctrlServer)
|
||||||
syncer = sync.NewSyncer(ctrlServer.MeshManager, conf, syncRequester)
|
syncer = sync.NewSyncer(ctrlServer.MeshManager, conf, syncRequester)
|
||||||
syncScheduler := sync.NewSyncScheduler(ctrlServer, syncRequester, syncer)
|
syncScheduler := sync.NewSyncScheduler(ctrlServer, syncRequester, syncer)
|
||||||
|
keepAlive := timer.NewTimestampScheduler(ctrlServer)
|
||||||
|
|
||||||
robinIpcParams := robin.RobinIpcParams{
|
robinIpcParams := robin.RobinIpcParams{
|
||||||
CtrlServer: ctrlServer,
|
CtrlServer: ctrlServer,
|
||||||
@ -79,10 +81,12 @@ func main() {
|
|||||||
|
|
||||||
go ipc.RunIpcHandler(&robinIpc)
|
go ipc.RunIpcHandler(&robinIpc)
|
||||||
go syncScheduler.Run()
|
go syncScheduler.Run()
|
||||||
|
go keepAlive.Run()
|
||||||
|
|
||||||
closeResources := func() {
|
closeResources := func() {
|
||||||
logging.Log.WriteInfof("Closing resources")
|
logging.Log.WriteInfof("Closing resources")
|
||||||
syncScheduler.Stop()
|
syncScheduler.Stop()
|
||||||
|
keepAlive.Stop()
|
||||||
ctrlServer.Close()
|
ctrlServer.Close()
|
||||||
client.Close()
|
client.Close()
|
||||||
}
|
}
|
||||||
|
@ -5,6 +5,7 @@ import (
|
|||||||
"encoding/gob"
|
"encoding/gob"
|
||||||
"fmt"
|
"fmt"
|
||||||
"net"
|
"net"
|
||||||
|
"slices"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
@ -245,6 +246,31 @@ func (m *TwoPhaseStoreMeshManager) UpdateTimeStamp(nodeId string) error {
|
|||||||
return fmt.Errorf("datastore: %s does not exist in the mesh", nodeId)
|
return fmt.Errorf("datastore: %s does not exist in the mesh", nodeId)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Sort nodes by their public key
|
||||||
|
peers := m.GetPeers()
|
||||||
|
slices.Sort(peers)
|
||||||
|
|
||||||
|
if len(peers) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
peerToUpdate := peers[0]
|
||||||
|
|
||||||
|
if uint64(time.Now().Unix())-m.store.Clock.GetTimestamp(peerToUpdate) > 3*uint64(m.conf.KeepAliveTime) {
|
||||||
|
m.store.Mark(peerToUpdate)
|
||||||
|
|
||||||
|
if len(peers) < 2 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
peerToUpdate = peers[1]
|
||||||
|
}
|
||||||
|
|
||||||
|
if peerToUpdate != nodeId {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Refresh causing node to update it's time stamp
|
||||||
node := m.store.Get(nodeId)
|
node := m.store.Get(nodeId)
|
||||||
node.Timestamp = time.Now().Unix()
|
node.Timestamp = time.Now().Unix()
|
||||||
m.store.Put(nodeId, node)
|
m.store.Put(nodeId, node)
|
||||||
|
@ -2,6 +2,7 @@ package crdt
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"hash/fnv"
|
||||||
|
|
||||||
"github.com/tim-beatham/wgmesh/pkg/conf"
|
"github.com/tim-beatham/wgmesh/pkg/conf"
|
||||||
"github.com/tim-beatham/wgmesh/pkg/lib"
|
"github.com/tim-beatham/wgmesh/pkg/lib"
|
||||||
@ -16,7 +17,11 @@ func (f *TwoPhaseMapFactory) CreateMesh(params *mesh.MeshProviderFactoryParams)
|
|||||||
IfName: params.DevName,
|
IfName: params.DevName,
|
||||||
Client: params.Client,
|
Client: params.Client,
|
||||||
conf: params.Conf,
|
conf: params.Conf,
|
||||||
store: NewTwoPhaseMap[string, MeshNode](params.NodeID),
|
store: NewTwoPhaseMap[string, MeshNode](params.NodeID, func(s string) uint64 {
|
||||||
|
h := fnv.New32a()
|
||||||
|
h.Write([]byte(s))
|
||||||
|
return uint64(h.Sum32())
|
||||||
|
}, uint64(3*params.Conf.KeepAliveTime)),
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2,9 +2,8 @@
|
|||||||
package crdt
|
package crdt
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"cmp"
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
"github.com/tim-beatham/wgmesh/pkg/lib"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
type Bucket[D any] struct {
|
type Bucket[D any] struct {
|
||||||
@ -14,7 +13,7 @@ type Bucket[D any] struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// GMap is a set that can only grow in size
|
// GMap is a set that can only grow in size
|
||||||
type GMap[K comparable, D any] struct {
|
type GMap[K cmp.Ordered, D any] struct {
|
||||||
lock sync.RWMutex
|
lock sync.RWMutex
|
||||||
contents map[K]Bucket[D]
|
contents map[K]Bucket[D]
|
||||||
clock *VectorClock[K]
|
clock *VectorClock[K]
|
||||||
@ -155,18 +154,17 @@ func (g *GMap[K, D]) GetHash() uint64 {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (g *GMap[K, D]) Prune() {
|
func (g *GMap[K, D]) Prune() {
|
||||||
outliers := lib.GetOutliers(g.clock.GetClock(), 0.05)
|
stale := g.clock.getStale()
|
||||||
|
|
||||||
g.lock.Lock()
|
g.lock.Lock()
|
||||||
|
|
||||||
for _, outlier := range outliers {
|
for _, outlier := range stale {
|
||||||
delete(g.contents, outlier)
|
delete(g.contents, outlier)
|
||||||
}
|
}
|
||||||
|
|
||||||
g.lock.Unlock()
|
g.lock.Unlock()
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewGMap[K comparable, D any](clock *VectorClock[K]) *GMap[K, D] {
|
func NewGMap[K cmp.Ordered, D any](clock *VectorClock[K]) *GMap[K, D] {
|
||||||
return &GMap[K, D]{
|
return &GMap[K, D]{
|
||||||
contents: make(map[K]Bucket[D]),
|
contents: make(map[K]Bucket[D]),
|
||||||
clock: clock,
|
clock: clock,
|
||||||
|
@ -1,17 +1,19 @@
|
|||||||
package crdt
|
package crdt
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"cmp"
|
||||||
|
|
||||||
"github.com/tim-beatham/wgmesh/pkg/lib"
|
"github.com/tim-beatham/wgmesh/pkg/lib"
|
||||||
)
|
)
|
||||||
|
|
||||||
type TwoPhaseMap[K comparable, D any] struct {
|
type TwoPhaseMap[K cmp.Ordered, D any] struct {
|
||||||
addMap *GMap[K, D]
|
addMap *GMap[K, D]
|
||||||
removeMap *GMap[K, bool]
|
removeMap *GMap[K, bool]
|
||||||
Clock *VectorClock[K]
|
Clock *VectorClock[K]
|
||||||
processId K
|
processId K
|
||||||
}
|
}
|
||||||
|
|
||||||
type TwoPhaseMapSnapshot[K comparable, D any] struct {
|
type TwoPhaseMapSnapshot[K cmp.Ordered, D any] struct {
|
||||||
Add map[K]Bucket[D]
|
Add map[K]Bucket[D]
|
||||||
Remove map[K]Bucket[bool]
|
Remove map[K]Bucket[bool]
|
||||||
}
|
}
|
||||||
@ -104,7 +106,7 @@ func (m *TwoPhaseMap[K, D]) SnapShotFromState(state *TwoPhaseMapState[K]) *TwoPh
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
type TwoPhaseMapState[K comparable] struct {
|
type TwoPhaseMapState[K cmp.Ordered] struct {
|
||||||
Vectors map[K]uint64
|
Vectors map[K]uint64
|
||||||
AddContents map[K]uint64
|
AddContents map[K]uint64
|
||||||
RemoveContents map[K]uint64
|
RemoveContents map[K]uint64
|
||||||
@ -154,7 +156,7 @@ func (m *TwoPhaseMapState[K]) Difference(state *TwoPhaseMapState[K]) *TwoPhaseMa
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
for key, value := range state.AddContents {
|
for key, value := range state.RemoveContents {
|
||||||
otherValue, ok := m.RemoveContents[key]
|
otherValue, ok := m.RemoveContents[key]
|
||||||
|
|
||||||
if !ok || otherValue < value {
|
if !ok || otherValue < value {
|
||||||
@ -188,10 +190,10 @@ func (m *TwoPhaseMap[K, D]) Prune() {
|
|||||||
// NewTwoPhaseMap: create a new two phase map. Consists of two maps
|
// NewTwoPhaseMap: create a new two phase map. Consists of two maps
|
||||||
// a grow map and a remove map. If both timestamps equal then favour keeping
|
// a grow map and a remove map. If both timestamps equal then favour keeping
|
||||||
// it in the map
|
// it in the map
|
||||||
func NewTwoPhaseMap[K comparable, D any](processId K) *TwoPhaseMap[K, D] {
|
func NewTwoPhaseMap[K cmp.Ordered, D any](processId K, hashKey func(K) uint64, staleTime uint64) *TwoPhaseMap[K, D] {
|
||||||
m := TwoPhaseMap[K, D]{
|
m := TwoPhaseMap[K, D]{
|
||||||
processId: processId,
|
processId: processId,
|
||||||
Clock: NewVectorClock(processId),
|
Clock: NewVectorClock(processId, hashKey, staleTime),
|
||||||
}
|
}
|
||||||
|
|
||||||
m.addMap = NewGMap[K, D](m.Clock)
|
m.addMap = NewGMap[K, D](m.Clock)
|
||||||
|
@ -10,7 +10,8 @@ import (
|
|||||||
type SyncState int
|
type SyncState int
|
||||||
|
|
||||||
const (
|
const (
|
||||||
PREPARE SyncState = iota
|
HASH SyncState = iota
|
||||||
|
PREPARE
|
||||||
PRESENT
|
PRESENT
|
||||||
EXCHANGE
|
EXCHANGE
|
||||||
MERGE
|
MERGE
|
||||||
@ -26,13 +27,51 @@ type TwoPhaseSyncer struct {
|
|||||||
peerMsg []byte
|
peerMsg []byte
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type TwoPhaseHash struct {
|
||||||
|
Hash uint64
|
||||||
|
}
|
||||||
|
|
||||||
type SyncFSM map[SyncState]func(*TwoPhaseSyncer) ([]byte, bool)
|
type SyncFSM map[SyncState]func(*TwoPhaseSyncer) ([]byte, bool)
|
||||||
|
|
||||||
func prepare(syncer *TwoPhaseSyncer) ([]byte, bool) {
|
func hash(syncer *TwoPhaseSyncer) ([]byte, bool) {
|
||||||
|
hash := TwoPhaseHash{
|
||||||
|
Hash: syncer.manager.store.Clock.GetHash(),
|
||||||
|
}
|
||||||
|
|
||||||
var buffer bytes.Buffer
|
var buffer bytes.Buffer
|
||||||
enc := gob.NewEncoder(&buffer)
|
enc := gob.NewEncoder(&buffer)
|
||||||
|
|
||||||
err := enc.Encode(*syncer.mapState)
|
err := enc.Encode(hash)
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
logging.Log.WriteInfof(err.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
syncer.IncrementState()
|
||||||
|
return buffer.Bytes(), true
|
||||||
|
}
|
||||||
|
|
||||||
|
func prepare(syncer *TwoPhaseSyncer) ([]byte, bool) {
|
||||||
|
var recvBuffer = bytes.NewBuffer(syncer.peerMsg)
|
||||||
|
dec := gob.NewDecoder(recvBuffer)
|
||||||
|
|
||||||
|
var hash TwoPhaseHash
|
||||||
|
err := dec.Decode(&hash)
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
logging.Log.WriteInfof(err.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
// If vector clocks are equal then no need to merge state
|
||||||
|
// Helps to reduce bandwidth by detecting early
|
||||||
|
if hash.Hash == syncer.manager.store.Clock.GetHash() {
|
||||||
|
return nil, false
|
||||||
|
}
|
||||||
|
|
||||||
|
var buffer bytes.Buffer
|
||||||
|
enc := gob.NewEncoder(&buffer)
|
||||||
|
|
||||||
|
err = enc.Encode(*syncer.mapState)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logging.Log.WriteInfof(err.Error())
|
logging.Log.WriteInfof(err.Error())
|
||||||
@ -124,11 +163,14 @@ func (t *TwoPhaseSyncer) RecvMessage(msg []byte) error {
|
|||||||
|
|
||||||
func (t *TwoPhaseSyncer) Complete() {
|
func (t *TwoPhaseSyncer) Complete() {
|
||||||
logging.Log.WriteInfof("SYNC COMPLETED")
|
logging.Log.WriteInfof("SYNC COMPLETED")
|
||||||
t.manager.store.Clock.IncrementClock()
|
if t.state == FINISHED || t.state == MERGE {
|
||||||
|
t.manager.store.Clock.IncrementClock()
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewTwoPhaseSyncer(manager *TwoPhaseStoreMeshManager) *TwoPhaseSyncer {
|
func NewTwoPhaseSyncer(manager *TwoPhaseStoreMeshManager) *TwoPhaseSyncer {
|
||||||
var generateMessageFsm SyncFSM = SyncFSM{
|
var generateMessageFsm SyncFSM = SyncFSM{
|
||||||
|
HASH: hash,
|
||||||
PREPARE: prepare,
|
PREPARE: prepare,
|
||||||
PRESENT: present,
|
PRESENT: present,
|
||||||
EXCHANGE: exchange,
|
EXCHANGE: exchange,
|
||||||
@ -137,7 +179,7 @@ func NewTwoPhaseSyncer(manager *TwoPhaseStoreMeshManager) *TwoPhaseSyncer {
|
|||||||
|
|
||||||
return &TwoPhaseSyncer{
|
return &TwoPhaseSyncer{
|
||||||
manager: manager,
|
manager: manager,
|
||||||
state: PREPARE,
|
state: HASH,
|
||||||
mapState: manager.store.GenerateMessage(),
|
mapState: manager.store.GenerateMessage(),
|
||||||
generateMessageFSM: generateMessageFsm,
|
generateMessageFSM: generateMessageFsm,
|
||||||
}
|
}
|
||||||
|
@ -1,17 +1,29 @@
|
|||||||
package crdt
|
package crdt
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"cmp"
|
||||||
|
"slices"
|
||||||
"sync"
|
"sync"
|
||||||
|
"time"
|
||||||
|
|
||||||
"github.com/tim-beatham/wgmesh/pkg/lib"
|
"github.com/tim-beatham/wgmesh/pkg/lib"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
type VectorBucket struct {
|
||||||
|
// clock current value of the node's clock
|
||||||
|
clock uint64
|
||||||
|
// lastUpdate we've seen
|
||||||
|
lastUpdate uint64
|
||||||
|
}
|
||||||
|
|
||||||
// Vector clock defines an abstract data type
|
// Vector clock defines an abstract data type
|
||||||
// for a vector clock implementation
|
// for a vector clock implementation
|
||||||
type VectorClock[K comparable] struct {
|
type VectorClock[K cmp.Ordered] struct {
|
||||||
vectors map[K]uint64
|
vectors map[K]*VectorBucket
|
||||||
lock sync.RWMutex
|
lock sync.RWMutex
|
||||||
processID K
|
processID K
|
||||||
|
staleTime uint64
|
||||||
|
hashFunc func(K) uint64
|
||||||
}
|
}
|
||||||
|
|
||||||
// IncrementClock: increments the node's value in the vector clock
|
// IncrementClock: increments the node's value in the vector clock
|
||||||
@ -20,10 +32,16 @@ func (m *VectorClock[K]) IncrementClock() uint64 {
|
|||||||
m.lock.Lock()
|
m.lock.Lock()
|
||||||
|
|
||||||
for _, value := range m.vectors {
|
for _, value := range m.vectors {
|
||||||
maxClock = max(maxClock, value)
|
maxClock = max(maxClock, value.clock)
|
||||||
}
|
}
|
||||||
|
|
||||||
m.vectors[m.processID] = maxClock + 1
|
newBucket := VectorBucket{
|
||||||
|
clock: maxClock + 1,
|
||||||
|
lastUpdate: uint64(time.Now().Unix()),
|
||||||
|
}
|
||||||
|
|
||||||
|
m.vectors[m.processID] = &newBucket
|
||||||
|
|
||||||
m.lock.Unlock()
|
m.lock.Unlock()
|
||||||
return maxClock
|
return maxClock
|
||||||
}
|
}
|
||||||
@ -33,29 +51,73 @@ func (m *VectorClock[K]) IncrementClock() uint64 {
|
|||||||
func (m *VectorClock[K]) GetHash() uint64 {
|
func (m *VectorClock[K]) GetHash() uint64 {
|
||||||
m.lock.RLock()
|
m.lock.RLock()
|
||||||
|
|
||||||
sum := lib.Reduce(uint64(0), lib.MapValues(m.vectors), func(sum uint64, current uint64) uint64 {
|
hash := uint64(0)
|
||||||
return current + sum
|
|
||||||
})
|
sortedKeys := lib.MapKeys(m.vectors)
|
||||||
|
slices.Sort(sortedKeys)
|
||||||
|
|
||||||
|
for key, bucket := range m.vectors {
|
||||||
|
hash += m.hashFunc(key)
|
||||||
|
hash += bucket.clock
|
||||||
|
}
|
||||||
|
|
||||||
m.lock.RUnlock()
|
m.lock.RUnlock()
|
||||||
return sum
|
return hash
|
||||||
|
}
|
||||||
|
|
||||||
|
// getStale: get all entries that are stale within the mesh
|
||||||
|
func (m *VectorClock[K]) getStale() []K {
|
||||||
|
m.lock.RLock()
|
||||||
|
maxTimeStamp := lib.Reduce(0, lib.MapValues(m.vectors), func(i uint64, vb *VectorBucket) uint64 {
|
||||||
|
return max(i, vb.lastUpdate)
|
||||||
|
})
|
||||||
|
|
||||||
|
toRemove := make([]K, 0)
|
||||||
|
|
||||||
|
for key, bucket := range m.vectors {
|
||||||
|
if maxTimeStamp-bucket.lastUpdate > m.staleTime {
|
||||||
|
toRemove = append(toRemove, key)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
m.lock.RUnlock()
|
||||||
|
return toRemove
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *VectorClock[K]) Prune() {
|
func (m *VectorClock[K]) Prune() {
|
||||||
outliers := lib.GetOutliers(m.vectors, 0.05)
|
stale := m.getStale()
|
||||||
|
|
||||||
m.lock.Lock()
|
m.lock.Lock()
|
||||||
|
|
||||||
for _, outlier := range outliers {
|
for _, key := range stale {
|
||||||
delete(m.vectors, outlier)
|
delete(m.vectors, key)
|
||||||
}
|
}
|
||||||
|
|
||||||
m.lock.Unlock()
|
m.lock.Unlock()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (m *VectorClock[K]) GetTimestamp(processId K) uint64 {
|
||||||
|
return m.vectors[processId].lastUpdate
|
||||||
|
}
|
||||||
|
|
||||||
func (m *VectorClock[K]) Put(key K, value uint64) {
|
func (m *VectorClock[K]) Put(key K, value uint64) {
|
||||||
|
clockValue := uint64(0)
|
||||||
|
|
||||||
m.lock.Lock()
|
m.lock.Lock()
|
||||||
m.vectors[key] = max(value, m.vectors[key])
|
bucket, ok := m.vectors[key]
|
||||||
|
|
||||||
|
if ok {
|
||||||
|
clockValue = bucket.clock
|
||||||
|
}
|
||||||
|
|
||||||
|
if value > clockValue {
|
||||||
|
newBucket := VectorBucket{
|
||||||
|
clock: value,
|
||||||
|
lastUpdate: uint64(time.Now().Unix()),
|
||||||
|
}
|
||||||
|
m.vectors[key] = &newBucket
|
||||||
|
}
|
||||||
|
|
||||||
m.lock.Unlock()
|
m.lock.Unlock()
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -64,6 +126,9 @@ func (m *VectorClock[K]) GetClock() map[K]uint64 {
|
|||||||
|
|
||||||
m.lock.RLock()
|
m.lock.RLock()
|
||||||
|
|
||||||
|
keys := lib.MapKeys(m.vectors)
|
||||||
|
slices.Sort(keys)
|
||||||
|
|
||||||
for key, value := range clock {
|
for key, value := range clock {
|
||||||
clock[key] = value
|
clock[key] = value
|
||||||
}
|
}
|
||||||
@ -72,9 +137,11 @@ func (m *VectorClock[K]) GetClock() map[K]uint64 {
|
|||||||
return clock
|
return clock
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewVectorClock[K comparable](processID K) *VectorClock[K] {
|
func NewVectorClock[K cmp.Ordered](processID K, hashFunc func(K) uint64, staleTime uint64) *VectorClock[K] {
|
||||||
return &VectorClock[K]{
|
return &VectorClock[K]{
|
||||||
vectors: make(map[K]uint64),
|
vectors: make(map[K]*VectorBucket),
|
||||||
processID: processID,
|
processID: processID,
|
||||||
|
staleTime: staleTime,
|
||||||
|
hashFunc: hashFunc,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1,11 +1,13 @@
|
|||||||
package lib
|
package lib
|
||||||
|
|
||||||
|
import "cmp"
|
||||||
|
|
||||||
// MapToSlice converts a map to a slice in go
|
// MapToSlice converts a map to a slice in go
|
||||||
func MapValues[K comparable, V any](m map[K]V) []V {
|
func MapValues[K cmp.Ordered, V any](m map[K]V) []V {
|
||||||
return MapValuesWithExclude(m, map[K]struct{}{})
|
return MapValuesWithExclude(m, map[K]struct{}{})
|
||||||
}
|
}
|
||||||
|
|
||||||
func MapValuesWithExclude[K comparable, V any](m map[K]V, exclude map[K]struct{}) []V {
|
func MapValuesWithExclude[K cmp.Ordered, V any](m map[K]V, exclude map[K]struct{}) []V {
|
||||||
values := make([]V, len(m)-len(exclude))
|
values := make([]V, len(m)-len(exclude))
|
||||||
|
|
||||||
i := 0
|
i := 0
|
||||||
@ -26,7 +28,7 @@ func MapValuesWithExclude[K comparable, V any](m map[K]V, exclude map[K]struct{}
|
|||||||
return values
|
return values
|
||||||
}
|
}
|
||||||
|
|
||||||
func MapKeys[K comparable, V any](m map[K]V) []K {
|
func MapKeys[K cmp.Ordered, V any](m map[K]V) []K {
|
||||||
values := make([]K, len(m))
|
values := make([]K, len(m))
|
||||||
|
|
||||||
i := 0
|
i := 0
|
||||||
|
@ -2,6 +2,7 @@
|
|||||||
package lib
|
package lib
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"cmp"
|
||||||
"math"
|
"math"
|
||||||
|
|
||||||
"gonum.org/v1/gonum/stat"
|
"gonum.org/v1/gonum/stat"
|
||||||
@ -10,7 +11,7 @@ import (
|
|||||||
|
|
||||||
// Modelling the distribution using a normal distribution get the count
|
// Modelling the distribution using a normal distribution get the count
|
||||||
// of the outliers
|
// of the outliers
|
||||||
func GetOutliers[K comparable](counts map[K]uint64, alpha float64) []K {
|
func GetOutliers[K cmp.Ordered](counts map[K]uint64, alpha float64) []K {
|
||||||
n := float64(len(counts))
|
n := float64(len(counts))
|
||||||
|
|
||||||
keys := MapKeys(counts)
|
keys := MapKeys(counts)
|
||||||
|
@ -8,7 +8,6 @@ import (
|
|||||||
"github.com/tim-beatham/wgmesh/pkg/conf"
|
"github.com/tim-beatham/wgmesh/pkg/conf"
|
||||||
"github.com/tim-beatham/wgmesh/pkg/ip"
|
"github.com/tim-beatham/wgmesh/pkg/ip"
|
||||||
"github.com/tim-beatham/wgmesh/pkg/lib"
|
"github.com/tim-beatham/wgmesh/pkg/lib"
|
||||||
logging "github.com/tim-beatham/wgmesh/pkg/log"
|
|
||||||
"github.com/tim-beatham/wgmesh/pkg/wg"
|
"github.com/tim-beatham/wgmesh/pkg/wg"
|
||||||
"golang.zx2c4.com/wireguard/wgctrl"
|
"golang.zx2c4.com/wireguard/wgctrl"
|
||||||
"golang.zx2c4.com/wireguard/wgctrl/wgtypes"
|
"golang.zx2c4.com/wireguard/wgctrl/wgtypes"
|
||||||
@ -329,7 +328,6 @@ func (s *MeshManagerImpl) GetSelf(meshId string) (MeshNode, error) {
|
|||||||
return nil, fmt.Errorf("mesh %s does not exist", meshId)
|
return nil, fmt.Errorf("mesh %s does not exist", meshId)
|
||||||
}
|
}
|
||||||
|
|
||||||
logging.Log.WriteInfof(s.HostParameters.GetPublicKey())
|
|
||||||
node, err := meshInstance.GetNode(s.HostParameters.GetPublicKey())
|
node, err := meshInstance.GetNode(s.HostParameters.GetPublicKey())
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -173,7 +173,7 @@ type MeshProviderFactory interface {
|
|||||||
// MeshNodeFactoryParams are the parameters required to construct
|
// MeshNodeFactoryParams are the parameters required to construct
|
||||||
// a mesh node
|
// a mesh node
|
||||||
type MeshNodeFactoryParams struct {
|
type MeshNodeFactoryParams struct {
|
||||||
PublicKey *wgtypes.Key
|
PublicKey *wgtypes.Key
|
||||||
NodeIP net.IP
|
NodeIP net.IP
|
||||||
WgPort int
|
WgPort int
|
||||||
Endpoint string
|
Endpoint string
|
||||||
|
@ -12,7 +12,7 @@ import (
|
|||||||
"github.com/tim-beatham/wgmesh/pkg/mesh"
|
"github.com/tim-beatham/wgmesh/pkg/mesh"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Syncer: picks random nodes from the mesh
|
// Syncer: picks random nodes from the meshs
|
||||||
type Syncer interface {
|
type Syncer interface {
|
||||||
Sync(meshId string) error
|
Sync(meshId string) error
|
||||||
SyncMeshes() error
|
SyncMeshes() error
|
||||||
@ -25,54 +25,63 @@ type SyncerImpl struct {
|
|||||||
syncCount int
|
syncCount int
|
||||||
cluster conn.ConnCluster
|
cluster conn.ConnCluster
|
||||||
conf *conf.WgMeshConfiguration
|
conf *conf.WgMeshConfiguration
|
||||||
|
lastSync uint64
|
||||||
}
|
}
|
||||||
|
|
||||||
// Sync: Sync random nodes
|
// Sync: Sync random nodes
|
||||||
func (s *SyncerImpl) Sync(meshId string) error {
|
func (s *SyncerImpl) Sync(meshId string) error {
|
||||||
if !s.manager.HasChanges(meshId) && s.infectionCount == 0 {
|
self, err := s.manager.GetSelf(meshId)
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
s.manager.GetMesh(meshId).Prune()
|
||||||
|
|
||||||
|
if self.GetType() == conf.PEER_ROLE && !s.manager.HasChanges(meshId) && s.infectionCount == 0 {
|
||||||
logging.Log.WriteInfof("No changes for %s", meshId)
|
logging.Log.WriteInfof("No changes for %s", meshId)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
logging.Log.WriteInfof("UPDATING WG CONF")
|
before := time.Now()
|
||||||
|
|
||||||
s.manager.GetRouteManager().UpdateRoutes()
|
s.manager.GetRouteManager().UpdateRoutes()
|
||||||
err := s.manager.ApplyConfig()
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
logging.Log.WriteInfof("Failed to update config %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
publicKey := s.manager.GetPublicKey()
|
publicKey := s.manager.GetPublicKey()
|
||||||
|
|
||||||
logging.Log.WriteInfof(publicKey.String())
|
logging.Log.WriteInfof(publicKey.String())
|
||||||
|
|
||||||
nodeNames := s.manager.GetMesh(meshId).GetPeers()
|
nodeNames := s.manager.GetMesh(meshId).GetPeers()
|
||||||
neighbours := s.cluster.GetNeighbours(nodeNames, publicKey.String())
|
|
||||||
randomSubset := lib.RandomSubsetOfLength(neighbours, s.conf.BranchRate)
|
|
||||||
|
|
||||||
for _, node := range randomSubset {
|
var gossipNodes []string
|
||||||
logging.Log.WriteInfof("Random node: %s", node)
|
|
||||||
}
|
|
||||||
|
|
||||||
before := time.Now()
|
// Clients always pings its peer for configuration
|
||||||
|
if self.GetType() == conf.CLIENT_ROLE {
|
||||||
|
keyFunc := lib.HashString
|
||||||
|
bucketFunc := lib.HashString
|
||||||
|
|
||||||
if len(nodeNames) > s.conf.ClusterSize && rand.Float64() < s.conf.InterClusterChance {
|
neighbour := lib.ConsistentHash(nodeNames, publicKey.String(), keyFunc, bucketFunc)
|
||||||
logging.Log.WriteInfof("Sending to random cluster")
|
gossipNodes = make([]string, 1)
|
||||||
randomSubset[len(randomSubset)-1] = s.cluster.GetInterCluster(nodeNames, publicKey.String())
|
gossipNodes[0] = neighbour
|
||||||
|
} else {
|
||||||
|
neighbours := s.cluster.GetNeighbours(nodeNames, publicKey.String())
|
||||||
|
gossipNodes = lib.RandomSubsetOfLength(neighbours, s.conf.BranchRate)
|
||||||
|
|
||||||
|
if len(nodeNames) > s.conf.ClusterSize && rand.Float64() < s.conf.InterClusterChance {
|
||||||
|
gossipNodes[len(gossipNodes)-1] = s.cluster.GetInterCluster(nodeNames, publicKey.String())
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
var succeeded bool = false
|
var succeeded bool = false
|
||||||
|
|
||||||
// Do this synchronously to conserve bandwidth
|
// Do this synchronously to conserve bandwidth
|
||||||
for _, node := range randomSubset {
|
for _, node := range gossipNodes {
|
||||||
correspondingPeer := s.manager.GetNode(meshId, node)
|
correspondingPeer := s.manager.GetNode(meshId, node)
|
||||||
|
|
||||||
if correspondingPeer == nil {
|
if correspondingPeer == nil {
|
||||||
logging.Log.WriteErrorf("node %s does not exist", node)
|
logging.Log.WriteErrorf("node %s does not exist", node)
|
||||||
}
|
}
|
||||||
|
|
||||||
err = s.requester.SyncMesh(meshId, correspondingPeer)
|
err := s.requester.SyncMesh(meshId, correspondingPeer)
|
||||||
|
|
||||||
if err == nil || err == io.EOF {
|
if err == nil || err == io.EOF {
|
||||||
succeeded = true
|
succeeded = true
|
||||||
@ -96,7 +105,15 @@ func (s *SyncerImpl) Sync(meshId string) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
s.manager.GetMesh(meshId).SaveChanges()
|
s.manager.GetMesh(meshId).SaveChanges()
|
||||||
s.manager.Prune()
|
s.lastSync = uint64(time.Now().Unix())
|
||||||
|
|
||||||
|
logging.Log.WriteInfof("UPDATING WG CONF")
|
||||||
|
err = s.manager.ApplyConfig()
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
logging.Log.WriteInfof("Failed to update config %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -3,7 +3,6 @@ package wg
|
|||||||
import (
|
import (
|
||||||
"crypto"
|
"crypto"
|
||||||
"crypto/rand"
|
"crypto/rand"
|
||||||
"encoding/base64"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
"github.com/tim-beatham/wgmesh/pkg/lib"
|
"github.com/tim-beatham/wgmesh/pkg/lib"
|
||||||
@ -35,8 +34,7 @@ func (m *WgInterfaceManipulatorImpl) CreateInterface(port int, privKey *wgtypes.
|
|||||||
}
|
}
|
||||||
|
|
||||||
md5 := crypto.MD5.New().Sum(randomBuf)
|
md5 := crypto.MD5.New().Sum(randomBuf)
|
||||||
|
md5Str := fmt.Sprintf("wg%x", md5)[:hashLength]
|
||||||
md5Str := fmt.Sprintf("wg%s", base64.StdEncoding.EncodeToString(md5)[:hashLength])
|
|
||||||
|
|
||||||
err = rtnl.CreateLink(md5Str)
|
err = rtnl.CreateLink(md5Str)
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user