mirror of
https://github.com/tim-beatham/smegmesh.git
synced 2025-08-18 17:08:15 +02:00
45-use-statistical-testing
Keepalive is based on per mesh and not per node. Using total ordering mechanism similar to paxos to elect a leader if leader doesn't update it's timestamp within 3 * keepAlive then give the leader a gravestone and elect the next leader. Leader is bassed on lexicographically ordered public key.
This commit is contained in:
@@ -1,17 +1,29 @@
|
||||
package crdt
|
||||
|
||||
import (
|
||||
"cmp"
|
||||
"slices"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/tim-beatham/wgmesh/pkg/lib"
|
||||
)
|
||||
|
||||
type VectorBucket struct {
|
||||
// clock current value of the node's clock
|
||||
clock uint64
|
||||
// lastUpdate we've seen
|
||||
lastUpdate uint64
|
||||
}
|
||||
|
||||
// Vector clock defines an abstract data type
|
||||
// for a vector clock implementation
|
||||
type VectorClock[K comparable] struct {
|
||||
vectors map[K]uint64
|
||||
type VectorClock[K cmp.Ordered] struct {
|
||||
vectors map[K]*VectorBucket
|
||||
lock sync.RWMutex
|
||||
processID K
|
||||
staleTime uint64
|
||||
hashFunc func(K) uint64
|
||||
}
|
||||
|
||||
// IncrementClock: increments the node's value in the vector clock
|
||||
@@ -20,10 +32,16 @@ func (m *VectorClock[K]) IncrementClock() uint64 {
|
||||
m.lock.Lock()
|
||||
|
||||
for _, value := range m.vectors {
|
||||
maxClock = max(maxClock, value)
|
||||
maxClock = max(maxClock, value.clock)
|
||||
}
|
||||
|
||||
m.vectors[m.processID] = maxClock + 1
|
||||
newBucket := VectorBucket{
|
||||
clock: maxClock + 1,
|
||||
lastUpdate: uint64(time.Now().Unix()),
|
||||
}
|
||||
|
||||
m.vectors[m.processID] = &newBucket
|
||||
|
||||
m.lock.Unlock()
|
||||
return maxClock
|
||||
}
|
||||
@@ -33,29 +51,73 @@ func (m *VectorClock[K]) IncrementClock() uint64 {
|
||||
func (m *VectorClock[K]) GetHash() uint64 {
|
||||
m.lock.RLock()
|
||||
|
||||
sum := lib.Reduce(uint64(0), lib.MapValues(m.vectors), func(sum uint64, current uint64) uint64 {
|
||||
return current + sum
|
||||
})
|
||||
hash := uint64(0)
|
||||
|
||||
sortedKeys := lib.MapKeys(m.vectors)
|
||||
slices.Sort(sortedKeys)
|
||||
|
||||
for key, bucket := range m.vectors {
|
||||
hash += m.hashFunc(key)
|
||||
hash += bucket.clock
|
||||
}
|
||||
|
||||
m.lock.RUnlock()
|
||||
return sum
|
||||
return hash
|
||||
}
|
||||
|
||||
// getStale: get all entries that are stale within the mesh
|
||||
func (m *VectorClock[K]) getStale() []K {
|
||||
m.lock.RLock()
|
||||
maxTimeStamp := lib.Reduce(0, lib.MapValues(m.vectors), func(i uint64, vb *VectorBucket) uint64 {
|
||||
return max(i, vb.lastUpdate)
|
||||
})
|
||||
|
||||
toRemove := make([]K, 0)
|
||||
|
||||
for key, bucket := range m.vectors {
|
||||
if maxTimeStamp-bucket.lastUpdate > m.staleTime {
|
||||
toRemove = append(toRemove, key)
|
||||
}
|
||||
}
|
||||
|
||||
m.lock.RUnlock()
|
||||
return toRemove
|
||||
}
|
||||
|
||||
func (m *VectorClock[K]) Prune() {
|
||||
outliers := lib.GetOutliers(m.vectors, 0.05)
|
||||
stale := m.getStale()
|
||||
|
||||
m.lock.Lock()
|
||||
|
||||
for _, outlier := range outliers {
|
||||
delete(m.vectors, outlier)
|
||||
for _, key := range stale {
|
||||
delete(m.vectors, key)
|
||||
}
|
||||
|
||||
m.lock.Unlock()
|
||||
}
|
||||
|
||||
func (m *VectorClock[K]) GetTimestamp(processId K) uint64 {
|
||||
return m.vectors[processId].lastUpdate
|
||||
}
|
||||
|
||||
func (m *VectorClock[K]) Put(key K, value uint64) {
|
||||
clockValue := uint64(0)
|
||||
|
||||
m.lock.Lock()
|
||||
m.vectors[key] = max(value, m.vectors[key])
|
||||
bucket, ok := m.vectors[key]
|
||||
|
||||
if ok {
|
||||
clockValue = bucket.clock
|
||||
}
|
||||
|
||||
if value > clockValue {
|
||||
newBucket := VectorBucket{
|
||||
clock: value,
|
||||
lastUpdate: uint64(time.Now().Unix()),
|
||||
}
|
||||
m.vectors[key] = &newBucket
|
||||
}
|
||||
|
||||
m.lock.Unlock()
|
||||
}
|
||||
|
||||
@@ -64,6 +126,9 @@ func (m *VectorClock[K]) GetClock() map[K]uint64 {
|
||||
|
||||
m.lock.RLock()
|
||||
|
||||
keys := lib.MapKeys(m.vectors)
|
||||
slices.Sort(keys)
|
||||
|
||||
for key, value := range clock {
|
||||
clock[key] = value
|
||||
}
|
||||
@@ -72,9 +137,11 @@ func (m *VectorClock[K]) GetClock() map[K]uint64 {
|
||||
return clock
|
||||
}
|
||||
|
||||
func NewVectorClock[K comparable](processID K) *VectorClock[K] {
|
||||
func NewVectorClock[K cmp.Ordered](processID K, hashFunc func(K) uint64, staleTime uint64) *VectorClock[K] {
|
||||
return &VectorClock[K]{
|
||||
vectors: make(map[K]uint64),
|
||||
vectors: make(map[K]*VectorBucket),
|
||||
processID: processID,
|
||||
staleTime: staleTime,
|
||||
hashFunc: hashFunc,
|
||||
}
|
||||
}
|
||||
|
Reference in New Issue
Block a user