NTP support

This commit is contained in:
KusakabeSi 2021-08-25 11:54:13 +00:00
parent eae0dc1aa5
commit f8fe962f6a
25 changed files with 548 additions and 61 deletions

View File

@ -2,8 +2,8 @@ package config
import (
"crypto/rand"
"strconv"
"math"
"strconv"
)
const (
@ -64,13 +64,14 @@ type LoggerInfo struct {
LogTransit bool
LogControl bool
LogNormal bool
LogNTP bool
}
// Nonnegative integer ID of vertex
type Vertex uint32
func (v *Vertex) ToString() string {
switch *v{
switch *v {
case Boardcast:
return "B"
case ControlMessage:
@ -95,9 +96,11 @@ type DynamicRouteInfo struct {
}
type NTPinfo struct {
UseNTP bool
MaxServerUse int
Servers []string
UseNTP bool
MaxServerUse int
SyncTimeInterval float64
NTPTimeout float64
Servers []string
}
type SuperInfo struct {

View File

@ -485,7 +485,7 @@ func (peer *Peer) RoutineSequentialReceiver() {
should_process = false
should_transfer = false
if device.LogLevel.LogTransit {
fmt.Printf("Duplicate packet received from %d through %d , src_nodeID = %d . Dropeed.\n", peer.ID, device.ID, src_nodeID)
fmt.Printf("Transit: Duplicate packet received from %d through %d , src_nodeID = %d . Dropeed.\n", peer.ID, device.ID, src_nodeID)
}
}
case device.ID:
@ -521,7 +521,7 @@ func (peer *Peer) RoutineSequentialReceiver() {
if next_id != nil {
peer_out = device.peers.IDMap[*next_id]
if device.LogLevel.LogTransit {
fmt.Printf("Transfer packet from %d through %d to %d\n", peer.ID, device.ID, peer_out.ID)
fmt.Printf("Transit: Transfer packet from %d through %d to %d\n", peer.ID, device.ID, peer_out.ID)
}
device.SendPacket(peer_out, elem.packet, MessageTransportOffsetContent)
}
@ -533,7 +533,7 @@ func (peer *Peer) RoutineSequentialReceiver() {
if packet_type != path.NornalPacket {
if device.LogLevel.LogControl {
if peer.GetEndpointDstStr() != "" {
fmt.Println("Received MID:" + strconv.Itoa(int(EgHeader.GetMessageID())) + " From:" + peer.GetEndpointDstStr() + " " + device.sprint_received(packet_type, elem.packet[path.EgHeaderLen:]))
fmt.Println("Control: Received MID:" + strconv.Itoa(int(EgHeader.GetMessageID())) + " From:" + peer.GetEndpointDstStr() + " " + device.sprint_received(packet_type, elem.packet[path.EgHeaderLen:]))
}
}
err = device.process_received(packet_type, peer, elem.packet[path.EgHeaderLen:])
@ -546,7 +546,7 @@ func (peer *Peer) RoutineSequentialReceiver() {
if should_receive { // Write message to tap device
if packet_type == path.NornalPacket {
if device.LogLevel.LogNormal {
fmt.Println("Reveived Normal packet From:" + peer.GetEndpointDstStr() + " SrcID:" + src_nodeID.ToString() + " DstID:" + dst_nodeID.ToString() + " Len:" + strconv.Itoa(len(elem.packet)))
fmt.Println("Normal: Reveived Normal packet From:" + peer.GetEndpointDstStr() + " SrcID:" + src_nodeID.ToString() + " DstID:" + dst_nodeID.ToString() + " Len:" + strconv.Itoa(len(elem.packet)))
}
if len(elem.packet) <= path.EgHeaderLen+12 {
device.log.Errorf("Invalid normal packet from peer %v", peer)

View File

@ -25,7 +25,7 @@ func (device *Device) SendPacket(peer *Peer, packet []byte, offset int) {
EgHeader, _ := path.NewEgHeader(packet[:path.EgHeaderLen])
if EgHeader.GetUsage() == path.NornalPacket {
dst_nodeID := EgHeader.GetDst()
fmt.Println("Send Normal packet To:" + peer.GetEndpointDstStr() + " SrcID:" + device.ID.ToString() + " DstID:" + dst_nodeID.ToString() + " Len:" + strconv.Itoa(len(packet)))
fmt.Println("Normal: Send Normal packet To:" + peer.GetEndpointDstStr() + " SrcID:" + device.ID.ToString() + " DstID:" + dst_nodeID.ToString() + " Len:" + strconv.Itoa(len(packet)))
}
}
if device.LogLevel.LogControl {
@ -34,7 +34,7 @@ func (device *Device) SendPacket(peer *Peer, packet []byte, offset int) {
device.MsgCount += 1
EgHeader.SetMessageID(device.MsgCount)
if peer.GetEndpointDstStr() != "" {
fmt.Println("Send MID:" + strconv.Itoa(int(device.MsgCount)) + " To:" + peer.GetEndpointDstStr() + " " + device.sprint_received(EgHeader.GetUsage(), packet[path.EgHeaderLen:]))
fmt.Println("Control: Send MID:" + strconv.Itoa(int(device.MsgCount)) + " To:" + peer.GetEndpointDstStr() + " " + device.sprint_received(EgHeader.GetUsage(), packet[path.EgHeaderLen:]))
}
}
}
@ -69,7 +69,7 @@ func (device *Device) SpreadPacket(skip_list map[config.Vertex]bool, packet []by
for peer_id, peer_out := range device.peers.IDMap {
if _, ok := skip_list[peer_id]; ok {
if device.LogLevel.LogTransit {
fmt.Printf("Skipped Spread Packet packet through %d to %d\n", device.ID, peer_out.ID)
fmt.Printf("Transit: Skipped Spread Packet packet through %d to %d\n", device.ID, peer_out.ID)
}
continue
}
@ -84,7 +84,7 @@ func (device *Device) TransitBoardcastPacket(src_nodeID config.Vertex, in_id con
for peer_id := range node_boardcast_list {
peer_out := device.peers.IDMap[peer_id]
if device.LogLevel.LogTransit {
fmt.Printf("Transfer packet from %d through %d to %d\n", in_id, device.ID, peer_out.ID)
fmt.Printf("Transit: Transfer packet from %d through %d to %d\n", in_id, device.ID, peer_out.ID)
}
device.SendPacket(peer_out, packet, offset)
}
@ -268,12 +268,15 @@ func (device *Device) process_UpdatePeerMsg(content path.UpdatePeerMsg) error {
if device.DRoute.SuperNode.UseSuperNode {
var peer_infos config.HTTP_Peers
if bytes.Equal(device.peers.Peer_state[:], content.State_hash[:]) {
if device.LogLevel.LogControl {
fmt.Println("Control: Same PeerState Hash, skip download nhTable")
}
return nil
}
downloadurl := device.DRoute.SuperNode.APIUrl + "/peerinfo?PubKey=" + url.QueryEscape(PubKey2Str(device.staticIdentity.publicKey)) + "&State=" + url.QueryEscape(string(content.State_hash[:]))
if device.LogLevel.LogControl {
fmt.Println("Download peerinfo from :" + downloadurl)
fmt.Println("Control: Download peerinfo from :" + downloadurl)
}
client := http.Client{
Timeout: 30 * time.Second,
@ -290,7 +293,7 @@ func (device *Device) process_UpdatePeerMsg(content path.UpdatePeerMsg) error {
return err
}
if device.LogLevel.LogControl {
fmt.Println("Download result :" + string(allbytes))
fmt.Println("Control: Download peerinfo result :" + string(allbytes))
}
if err := json.Unmarshal(allbytes, &peer_infos); err != nil {
device.log.Errorf(err.Error())
@ -308,7 +311,7 @@ func (device *Device) process_UpdatePeerMsg(content path.UpdatePeerMsg) error {
thepeer := device.LookupPeer(sk)
if thepeer == nil { //not exist in local
if device.LogLevel.LogControl {
fmt.Println("Add new peer to local ID:" + peerinfo.NodeID.ToString() + " PubKey:" + pubkey)
fmt.Println("Control: Add new peer to local ID:" + peerinfo.NodeID.ToString() + " PubKey:" + pubkey)
}
if device.graph.Weight(device.ID, peerinfo.NodeID) == path.Infinity { // add node to graph
device.graph.UpdateLentancy(device.ID, peerinfo.NodeID, path.S2TD(path.Infinity), false)
@ -368,7 +371,7 @@ func (device *Device) RoutineSetEndpoint() {
return true
}
if device.LogLevel.LogControl {
fmt.Println("Set endpoint to " + endpoint.DstToString() + " for NodeID:" + thepeer.ID.ToString())
fmt.Println("Control: Set endpoint to " + endpoint.DstToString() + " for NodeID:" + thepeer.ID.ToString())
}
thepeer.SetEndpointFromPacket(endpoint)
NextRun = true
@ -517,7 +520,7 @@ func (device *Device) process_UpdateNhTableMsg(content path.UpdateNhTableMsg) er
if device.DRoute.SuperNode.UseSuperNode {
if bytes.Equal(device.graph.NhTableHash[:], content.State_hash[:]) {
if device.LogLevel.LogControl {
fmt.Println("Same State_hash, skip download nhTable")
fmt.Println("Control: Same nhTable Hash, skip download nhTable")
}
device.graph.NhTableExpire = time.Now().Add(device.graph.SuperNodeInfoTimeout)
return nil
@ -528,7 +531,7 @@ func (device *Device) process_UpdateNhTableMsg(content path.UpdateNhTableMsg) er
}
downloadurl := device.DRoute.SuperNode.APIUrl + "/nhtable?PubKey=" + url.QueryEscape(PubKey2Str(device.staticIdentity.publicKey)) + "&State=" + url.QueryEscape(string(content.State_hash[:]))
if device.LogLevel.LogControl {
fmt.Println("Download NhTable from :" + downloadurl)
fmt.Println("Control: Download NhTable from :" + downloadurl)
}
client := http.Client{
Timeout: 30 * time.Second,
@ -545,7 +548,7 @@ func (device *Device) process_UpdateNhTableMsg(content path.UpdateNhTableMsg) er
return err
}
if device.LogLevel.LogControl {
fmt.Println("Download result :" + string(allbytes))
fmt.Println("Control: Download NhTable result :" + string(allbytes))
}
if err := json.Unmarshal(allbytes, &NhTable); err != nil {
device.log.Errorf(err.Error())
@ -608,7 +611,7 @@ func (device *Device) process_BoardcastPeerMsg(peer *Peer, content path.Boardcas
thepeer := device.LookupPeer(sk)
if thepeer == nil { //not exist in local
if device.LogLevel.LogControl {
fmt.Println("Add new peer to local ID:" + content.NodeID.ToString() + " PubKey:" + base64.StdEncoding.EncodeToString(content.PubKey[:]))
fmt.Println("Control: Add new peer to local ID:" + content.NodeID.ToString() + " PubKey:" + base64.StdEncoding.EncodeToString(content.PubKey[:]))
}
if device.graph.Weight(device.ID, content.NodeID) == path.Infinity { // add node to graph
device.graph.UpdateLentancy(device.ID, content.NodeID, path.S2TD(path.Infinity), false)

View File

@ -274,9 +274,7 @@ func (device *Device) RoutineReadFromTUN() {
continue
}
if device.LogLevel.LogNormal {
if device.LogLevel.LogNormal {
fmt.Println("Send Normal packet To:" + peer.GetEndpointDstStr() + " SrcID:" + device.ID.ToString() + " DstID:" + dst_nodeID.ToString() + " Len:" + strconv.Itoa(len(elem.packet)))
}
fmt.Println("Normal: Send Normal packet To:" + peer.GetEndpointDstStr() + " SrcID:" + device.ID.ToString() + " DstID:" + dst_nodeID.ToString() + " Len:" + strconv.Itoa(len(elem.packet)))
}
if peer.isRunning.Get() {
peer.StagePacket(elem)

View File

@ -17,6 +17,7 @@ loglevel:
logtransit: true
logcontrol: true
lognormal: true
logntp: true
dynamicroute:
sendpinginterval: 20
dupchecktimeout: 40
@ -41,7 +42,9 @@ dynamicroute:
recalculatecooldown: 5
ntpconfig:
usentp: true
maxserveruse: 5
maxserveruse: 8
synctimeinterval: 3600
ntptimeout: 10
servers:
- time.google.com
- time1.google.com

View File

@ -17,6 +17,7 @@ loglevel:
logtransit: true
logcontrol: true
lognormal: true
logntp: true
dynamicroute:
sendpinginterval: 20
dupchecktimeout: 40
@ -41,7 +42,9 @@ dynamicroute:
recalculatecooldown: 5
ntpconfig:
usentp: true
maxserveruse: 5
maxserveruse: 8
synctimeinterval: 3600
ntptimeout: 10
servers:
- time.google.com
- time1.google.com

View File

@ -17,6 +17,7 @@ loglevel:
logtransit: true
logcontrol: true
lognormal: true
logntp: true
dynamicroute:
sendpinginterval: 20
dupchecktimeout: 40
@ -41,7 +42,9 @@ dynamicroute:
recalculatecooldown: 5
ntpconfig:
usentp: true
maxserveruse: 5
maxserveruse: 8
synctimeinterval: 3600
ntptimeout: 10
servers:
- time.google.com
- time1.google.com

View File

@ -17,6 +17,7 @@ loglevel:
logtransit: true
logcontrol: true
lognormal: true
logntp: true
dynamicroute:
sendpinginterval: 20
dupchecktimeout: 40
@ -41,7 +42,9 @@ dynamicroute:
recalculatecooldown: 5
ntpconfig:
usentp: true
maxserveruse: 5
maxserveruse: 8
synctimeinterval: 3600
ntptimeout: 10
servers:
- time.google.com
- time1.google.com

View File

@ -17,6 +17,7 @@ loglevel:
logtransit: true
logcontrol: true
lognormal: true
logntp: true
dynamicroute:
sendpinginterval: 20
dupchecktimeout: 40
@ -41,7 +42,9 @@ dynamicroute:
recalculatecooldown: 5
ntpconfig:
usentp: true
maxserveruse: 5
maxserveruse: 8
synctimeinterval: 3600
ntptimeout: 10
servers:
- time.google.com
- time1.google.com

View File

@ -17,6 +17,7 @@ loglevel:
logtransit: true
logcontrol: true
lognormal: true
logntp: true
dynamicroute:
sendpinginterval: 20
dupchecktimeout: 40
@ -41,7 +42,9 @@ dynamicroute:
recalculatecooldown: 5
ntpconfig:
usentp: true
maxserveruse: 5
maxserveruse: 8
synctimeinterval: 3600
ntptimeout: 10
servers:
- time.google.com
- time1.google.com

View File

@ -17,6 +17,7 @@ loglevel:
logtransit: true
logcontrol: true
lognormal: true
logntp: true
dynamicroute:
sendpinginterval: 20
dupchecktimeout: 40
@ -40,8 +41,10 @@ dynamicroute:
nodereporttimeout: 40
recalculatecooldown: 5
ntpconfig:
usentp: true
maxserveruse: 5
usentp: false
maxserveruse: 8
synctimeinterval: 3600
ntptimeout: 10
servers:
- time.google.com
- time1.google.com

View File

@ -17,6 +17,7 @@ loglevel:
logtransit: true
logcontrol: true
lognormal: true
logntp: true
dynamicroute:
sendpinginterval: 20
dupchecktimeout: 40
@ -40,8 +41,10 @@ dynamicroute:
nodereporttimeout: 40
recalculatecooldown: 5
ntpconfig:
usentp: true
maxserveruse: 5
usentp: false
maxserveruse: 8
synctimeinterval: 3600
ntptimeout: 10
servers:
- time.google.com
- time1.google.com

View File

@ -17,6 +17,7 @@ loglevel:
logtransit: true
logcontrol: true
lognormal: true
logntp: true
dynamicroute:
sendpinginterval: 20
dupchecktimeout: 40
@ -40,8 +41,10 @@ dynamicroute:
nodereporttimeout: 40
recalculatecooldown: 5
ntpconfig:
usentp: true
maxserveruse: 5
usentp: false
maxserveruse: 8
synctimeinterval: 3600
ntptimeout: 10
servers:
- time.google.com
- time1.google.com

View File

@ -17,6 +17,7 @@ loglevel:
logtransit: true
logcontrol: true
lognormal: true
logntp: true
dynamicroute:
sendpinginterval: 20
dupchecktimeout: 40
@ -40,8 +41,10 @@ dynamicroute:
nodereporttimeout: 40
recalculatecooldown: 5
ntpconfig:
usentp: true
maxserveruse: 5
usentp: false
maxserveruse: 8
synctimeinterval: 3600
ntptimeout: 10
servers:
- time.google.com
- time1.google.com

View File

@ -17,6 +17,7 @@ loglevel:
logtransit: true
logcontrol: true
lognormal: true
logntp: true
dynamicroute:
sendpinginterval: 20
dupchecktimeout: 40
@ -40,8 +41,10 @@ dynamicroute:
nodereporttimeout: 40
recalculatecooldown: 5
ntpconfig:
usentp: true
maxserveruse: 5
usentp: false
maxserveruse: 8
synctimeinterval: 3600
ntptimeout: 10
servers:
- time.google.com
- time1.google.com

View File

@ -17,6 +17,7 @@ loglevel:
logtransit: true
logcontrol: true
lognormal: true
logntp: true
dynamicroute:
sendpinginterval: 20
dupchecktimeout: 40
@ -40,8 +41,10 @@ dynamicroute:
nodereporttimeout: 40
recalculatecooldown: 5
ntpconfig:
usentp: true
maxserveruse: 5
usentp: false
maxserveruse: 8
synctimeinterval: 3600
ntptimeout: 10
servers:
- time.google.com
- time1.google.com

View File

@ -17,6 +17,7 @@ loglevel:
logtransit: true
logcontrol: true
lognormal: true
logntp: true
dynamicroute:
sendpinginterval: 20
dupchecktimeout: 40
@ -41,7 +42,9 @@ dynamicroute:
recalculatecooldown: 5
ntpconfig:
usentp: true
maxserveruse: 5
maxserveruse: 8
synctimeinterval: 3600
ntptimeout: 10
servers:
- time.google.com
- time1.google.com

View File

@ -17,6 +17,7 @@ loglevel:
logtransit: true
logcontrol: true
lognormal: true
logntp: true
dynamicroute:
sendpinginterval: 20
dupchecktimeout: 40
@ -41,7 +42,9 @@ dynamicroute:
recalculatecooldown: 5
ntpconfig:
usentp: true
maxserveruse: 5
maxserveruse: 8
synctimeinterval: 3600
ntptimeout: 10
servers:
- time.google.com
- time1.google.com

1
go.mod
View File

@ -6,6 +6,7 @@ require (
git.fd.io/govpp.git v0.3.6-0.20210810100027-c0da1f2999a6
git.fd.io/govpp.git/extras v0.0.0-20210810100027-c0da1f2999a6
github.com/KusakabeSi/go-cache v0.0.0-20210823132304-22b5b1d22b41
github.com/beevik/ntp v0.3.0
github.com/sirupsen/logrus v1.8.1
golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2
golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57

View File

@ -45,6 +45,8 @@ func printExampleEdgeConf() {
LogLevel: "normal",
LogTransit: true,
LogControl: true,
LogNormal: true,
LogNTP: false,
},
DynamicRoute: config.DynamicRouteInfo{
SendPingInterval: 20,
@ -72,8 +74,10 @@ func printExampleEdgeConf() {
},
},
NTPconfig: config.NTPinfo{
UseNTP: true,
MaxServerUse: 5,
UseNTP: true,
MaxServerUse: 5,
SyncTimeInterval: 3600,
NTPTimeout: 10,
Servers: []string{"time.google.com",
"time1.google.com",
"time2.google.com",
@ -102,7 +106,7 @@ func printExampleEdgeConf() {
},
},
}
g := path.NewGraph(3, false, tconfig.DynamicRoute.P2P.GraphRecalculateSetting)
g := path.NewGraph(3, false, tconfig.DynamicRoute.P2P.GraphRecalculateSetting, tconfig.DynamicRoute.NTPconfig, false)
g.UpdateLentancy(1, 2, path.S2TD(0.5), false)
g.UpdateLentancy(2, 1, path.S2TD(0.5), false)
@ -139,8 +143,10 @@ func Edge(configPath string, useUAPI bool, printExample bool) (err error) {
return err
}
interfaceName := tconfig.NodeName
NodeName := tconfig.NodeName
if len(NodeName) > 32 {
return errors.New("Node name can't longer than 32 :" + NodeName)
}
var logLevel int
switch tconfig.LogLevel.LogLevel {
case "verbose", "debug":
@ -152,7 +158,7 @@ func Edge(configPath string, useUAPI bool, printExample bool) (err error) {
}
logger := device.NewLogger(
logLevel,
fmt.Sprintf("(%s) ", interfaceName),
fmt.Sprintf("(%s) ", NodeName),
)
if err != nil {
@ -188,7 +194,10 @@ func Edge(configPath string, useUAPI bool, printExample bool) (err error) {
////////////////////////////////////////////////////
// Config
graph := path.NewGraph(3, false, tconfig.DynamicRoute.P2P.GraphRecalculateSetting)
if tconfig.DynamicRoute.P2P.UseP2P == false && tconfig.DynamicRoute.SuperNode.UseSuperNode == false {
tconfig.LogLevel.LogNTP = false // NTP in static mode is useless
}
graph := path.NewGraph(3, false, tconfig.DynamicRoute.P2P.GraphRecalculateSetting, tconfig.DynamicRoute.NTPconfig, tconfig.LogLevel.LogNTP)
graph.SetNHTable(tconfig.NextHopTable, [32]byte{})
the_device := device.NewDevice(thetap, tconfig.NodeID, conn.NewDefaultBind(), logger, graph, false, configPath, &tconfig, nil, nil)
@ -260,7 +269,7 @@ func Edge(configPath string, useUAPI bool, printExample bool) (err error) {
term := make(chan os.Signal, 1)
if useUAPI {
startUAPI(interfaceName, logger, the_device, errs)
startUAPI(NodeName, logger, the_device, errs)
}
// wait for program to terminate

View File

@ -75,7 +75,11 @@ func Super(configPath string, useUAPI bool, printExample bool) (err error) {
fmt.Printf("Error read config: %v\n", configPath)
return err
}
interfaceName := sconfig.NodeName
NodeName := sconfig.NodeName
if len(NodeName) > 32 {
return errors.New("Node name can't longer than 32 :" + NodeName)
}
var logLevel int
switch sconfig.LogLevel.LogLevel {
case "verbose", "debug":
@ -88,11 +92,11 @@ func Super(configPath string, useUAPI bool, printExample bool) (err error) {
logger4 := device.NewLogger(
logLevel,
fmt.Sprintf("(%s) ", interfaceName+"_v4"),
fmt.Sprintf("(%s) ", NodeName+"_v4"),
)
logger6 := device.NewLogger(
logLevel,
fmt.Sprintf("(%s) ", interfaceName+"_v6"),
fmt.Sprintf("(%s) ", NodeName+"_v6"),
)
http_PeerState = make(map[string]*PeerState)
@ -108,7 +112,7 @@ func Super(configPath string, useUAPI bool, printExample bool) (err error) {
}
thetap, _ := tap.CreateDummyTAP()
http_graph = path.NewGraph(3, true, sconfig.GraphRecalculateSetting)
http_graph = path.NewGraph(3, true, sconfig.GraphRecalculateSetting, config.NTPinfo{}, sconfig.LogLevel.LogNTP)
http_device4 = device.NewDevice(thetap, config.SuperNodeMessage, conn.NewCustomBind(true, false), logger4, http_graph, true, configPath, nil, &sconfig, &super_chains)
http_device6 = device.NewDevice(thetap, config.SuperNodeMessage, conn.NewCustomBind(false, true), logger6, http_graph, true, configPath, nil, &sconfig, &super_chains)
defer http_device4.Close()
@ -184,12 +188,12 @@ func Super(configPath string, useUAPI bool, printExample bool) (err error) {
errs := make(chan error, 1<<3)
term := make(chan os.Signal, 1)
if useUAPI {
uapi4, err := startUAPI(interfaceName+"_v4", logger4, http_device4, errs)
uapi4, err := startUAPI(NodeName+"_v4", logger4, http_device4, errs)
if err != nil {
return err
}
defer uapi4.Close()
uapi6, err := startUAPI(interfaceName+"_v6", logger6, http_device6, errs)
uapi6, err := startUAPI(NodeName+"_v6", logger6, http_device6, errs)
if err != nil {
return err
}

286
orderdmap/orderdmap.go Normal file
View File

@ -0,0 +1,286 @@
package orderedmap
import (
"bytes"
"encoding/json"
"sort"
)
type Pair struct {
key string
value interface{}
}
func (kv *Pair) Key() string {
return kv.key
}
func (kv *Pair) Value() interface{} {
return kv.value
}
type ByPair struct {
Pairs []*Pair
LessFunc func(a *Pair, j *Pair) bool
}
func (a ByPair) Len() int { return len(a.Pairs) }
func (a ByPair) Swap(i, j int) { a.Pairs[i], a.Pairs[j] = a.Pairs[j], a.Pairs[i] }
func (a ByPair) Less(i, j int) bool { return a.LessFunc(a.Pairs[i], a.Pairs[j]) }
type OrderedMap struct {
keys []string
values map[string]interface{}
escapeHTML bool
}
func New() *OrderedMap {
o := OrderedMap{}
o.keys = []string{}
o.values = map[string]interface{}{}
o.escapeHTML = true
return &o
}
func (o *OrderedMap) SetEscapeHTML(on bool) {
o.escapeHTML = on
}
func (o *OrderedMap) Get(key string) (interface{}, bool) {
val, exists := o.values[key]
return val, exists
}
func (o *OrderedMap) Set(key string, value interface{}) {
_, exists := o.values[key]
if !exists {
o.keys = append(o.keys, key)
}
o.values[key] = value
}
func (o *OrderedMap) Delete(key string) {
// check key is in use
_, ok := o.values[key]
if !ok {
return
}
// remove from keys
for i, k := range o.keys {
if k == key {
o.keys = append(o.keys[:i], o.keys[i+1:]...)
break
}
}
// remove from values
delete(o.values, key)
}
func (o *OrderedMap) Keys() []string {
return o.keys
}
// SortKeys Sort the map keys using your sort func
func (o *OrderedMap) SortKeys(sortFunc func(keys []string)) {
sortFunc(o.keys)
}
// Sort Sort the map using your sort func
func (o *OrderedMap) Sort(lessFunc func(a *Pair, b *Pair) bool) {
pairs := make([]*Pair, len(o.keys))
for i, key := range o.keys {
pairs[i] = &Pair{key, o.values[key]}
}
sort.Sort(ByPair{pairs, lessFunc})
for i, pair := range pairs {
o.keys[i] = pair.key
}
}
func (o *OrderedMap) UnmarshalJSON(b []byte) error {
if o.values == nil {
o.values = map[string]interface{}{}
}
err := json.Unmarshal(b, &o.values)
if err != nil {
return err
}
dec := json.NewDecoder(bytes.NewReader(b))
if _, err = dec.Token(); err != nil { // skip '{'
return err
}
o.keys = make([]string, 0, len(o.values))
return decodeOrderedMap(dec, o)
}
func decodeOrderedMap(dec *json.Decoder, o *OrderedMap) error {
hasKey := make(map[string]bool, len(o.values))
for {
token, err := dec.Token()
if err != nil {
return err
}
if delim, ok := token.(json.Delim); ok && delim == '}' {
return nil
}
key := token.(string)
if hasKey[key] {
// duplicate key
for j, k := range o.keys {
if k == key {
copy(o.keys[j:], o.keys[j+1:])
break
}
}
o.keys[len(o.keys)-1] = key
} else {
hasKey[key] = true
o.keys = append(o.keys, key)
}
token, err = dec.Token()
if err != nil {
return err
}
if delim, ok := token.(json.Delim); ok {
switch delim {
case '{':
if values, ok := o.values[key].(map[string]interface{}); ok {
newMap := OrderedMap{
keys: make([]string, 0, len(values)),
values: values,
escapeHTML: o.escapeHTML,
}
if err = decodeOrderedMap(dec, &newMap); err != nil {
return err
}
o.values[key] = newMap
} else if oldMap, ok := o.values[key].(OrderedMap); ok {
newMap := OrderedMap{
keys: make([]string, 0, len(oldMap.values)),
values: oldMap.values,
escapeHTML: o.escapeHTML,
}
if err = decodeOrderedMap(dec, &newMap); err != nil {
return err
}
o.values[key] = newMap
} else if err = decodeOrderedMap(dec, &OrderedMap{}); err != nil {
return err
}
case '[':
if values, ok := o.values[key].([]interface{}); ok {
if err = decodeSlice(dec, values, o.escapeHTML); err != nil {
return err
}
} else if err = decodeSlice(dec, []interface{}{}, o.escapeHTML); err != nil {
return err
}
}
}
}
}
func decodeSlice(dec *json.Decoder, s []interface{}, escapeHTML bool) error {
for index := 0; ; index++ {
token, err := dec.Token()
if err != nil {
return err
}
if delim, ok := token.(json.Delim); ok {
switch delim {
case '{':
if index < len(s) {
if values, ok := s[index].(map[string]interface{}); ok {
newMap := OrderedMap{
keys: make([]string, 0, len(values)),
values: values,
escapeHTML: escapeHTML,
}
if err = decodeOrderedMap(dec, &newMap); err != nil {
return err
}
s[index] = newMap
} else if oldMap, ok := s[index].(OrderedMap); ok {
newMap := OrderedMap{
keys: make([]string, 0, len(oldMap.values)),
values: oldMap.values,
escapeHTML: escapeHTML,
}
if err = decodeOrderedMap(dec, &newMap); err != nil {
return err
}
s[index] = newMap
} else if err = decodeOrderedMap(dec, &OrderedMap{}); err != nil {
return err
}
} else if err = decodeOrderedMap(dec, &OrderedMap{}); err != nil {
return err
}
case '[':
if index < len(s) {
if values, ok := s[index].([]interface{}); ok {
if err = decodeSlice(dec, values, escapeHTML); err != nil {
return err
}
} else if err = decodeSlice(dec, []interface{}{}, escapeHTML); err != nil {
return err
}
} else if err = decodeSlice(dec, []interface{}{}, escapeHTML); err != nil {
return err
}
case ']':
return nil
}
}
}
}
func (o OrderedMap) MarshalJSON() ([]byte, error) {
var buf bytes.Buffer
buf.WriteByte('{')
encoder := json.NewEncoder(&buf)
encoder.SetEscapeHTML(o.escapeHTML)
for i, k := range o.keys {
if i > 0 {
buf.WriteByte(',')
}
// add key
if err := encoder.Encode(k); err != nil {
return nil, err
}
buf.WriteByte(':')
// add value
if err := encoder.Encode(o.values[k]); err != nil {
return nil, err
}
}
buf.WriteByte('}')
return buf.Bytes(), nil
}
/*
The MIT License (MIT)
Copyright (c) 2017 Ian Coleman
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, Subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or Substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/

128
path/ntp.go Normal file
View File

@ -0,0 +1,128 @@
package path
import (
"fmt"
"sort"
"time"
orderedmap "github.com/KusakabeSi/EtherGuardVPN/orderdmap"
"github.com/beevik/ntp"
)
var forever = time.Hour * 99999
func (g *IG) InitNTP() {
if g.ntp_info.UseNTP {
if len(g.ntp_info.Servers) == 0 {
g.ntp_info.UseNTP = false
return
}
g.ntp_servers = *orderedmap.New()
for _, url := range g.ntp_info.Servers {
g.ntp_servers.Set(url, ntp.Response{
RTT: forever,
})
}
g.SyncTimeMultiple(-1)
go g.RoutineSyncTime()
} else {
if g.ntp_log {
fmt.Println("NTP sync disabled")
}
}
}
type ntp_result_pair struct {
URL string
VAL ntp.Response
}
func (g *IG) RoutineSyncTime() {
if !g.ntp_info.UseNTP {
return
}
for {
time.Sleep(S2TD(g.ntp_info.SyncTimeInterval))
g.SyncTimeMultiple(g.ntp_info.MaxServerUse)
}
}
type ByDuration []time.Duration
func (a ByDuration) Len() int { return len(a) }
func (a ByDuration) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
func (a ByDuration) Less(i, j int) bool { return a[i] < a[j] }
func (g *IG) SyncTimeMultiple(count int) {
var url2sync []string
if count < 0 {
count = len(g.ntp_servers.Keys())
}
if count > len(g.ntp_servers.Keys()) {
count = len(g.ntp_servers.Keys())
}
for index, url := range g.ntp_servers.Keys() {
if index < count {
url2sync = append(url2sync, url)
} else {
break
}
}
for _, url := range url2sync {
g.ntp_wg.Add(1)
go g.SyncTime(url, S2TD(g.ntp_info.NTPTimeout))
}
g.ntp_wg.Wait()
g.ntp_servers.Sort(func(a *orderedmap.Pair, b *orderedmap.Pair) bool {
return a.Value().(ntp.Response).RTT < b.Value().(ntp.Response).RTT
})
results := make([]time.Duration, count)
for _, url := range g.ntp_servers.Keys() {
val, has := g.ntp_servers.Get(url)
if has == false {
continue
}
result := val.(ntp.Response)
if result.RTT < forever {
results = append(results, result.ClockOffset)
}
}
if g.ntp_log {
fmt.Println("NTP: All done")
}
sort.Sort(ByDuration(results))
if len(results) > 3 {
results = results[1 : len(results)-1]
}
var totaltime time.Duration
for _, result := range results {
totaltime += result
}
avgtime := totaltime / time.Duration(len(results))
if g.ntp_log {
fmt.Println("NTP: Arvage offset: " + avgtime.String())
}
g.ntp_offset = avgtime
}
func (g *IG) SyncTime(url string, timeout time.Duration) {
if g.ntp_log {
fmt.Println("NTP: Starting syncing with NTP server :" + url)
}
options := ntp.QueryOptions{Timeout: timeout}
response, err := ntp.QueryWithOptions(url, options)
if err == nil {
if g.ntp_log {
fmt.Println("NTP: NTP server :" + url + "\tResult:" + response.ClockOffset.String() + " RTT:" + response.RTT.String())
}
g.ntp_servers.Set(url, *response)
} else {
if g.ntp_log {
fmt.Println("NTP: NTP server :" + url + "\tFailed :" + err.Error())
}
g.ntp_servers.Set(url, ntp.Response{
RTT: forever,
})
}
g.ntp_wg.Done()
}

View File

@ -7,13 +7,14 @@ import (
"time"
"github.com/KusakabeSi/EtherGuardVPN/config"
orderedmap "github.com/KusakabeSi/EtherGuardVPN/orderdmap"
yaml "gopkg.in/yaml.v2"
)
const Infinity = float64(99999)
func (g *IG) GetCurrentTime() time.Time {
return time.Now().Round(0)
return time.Now().Add(g.ntp_offset).Round(0)
}
// A Graph is the interface implemented by graphs that
@ -50,24 +51,32 @@ type IG struct {
NhTableHash [32]byte
NhTableExpire time.Time
IsSuperMode bool
ntp_wg sync.WaitGroup
ntp_log bool
ntp_info config.NTPinfo
ntp_offset time.Duration
ntp_servers orderedmap.OrderedMap // serverurl:lentancy
}
func S2TD(secs float64) time.Duration {
return time.Duration(secs * float64(time.Second))
}
func NewGraph(num_node int, IsSuperMode bool, theconfig config.GraphRecalculateSetting) *IG {
func NewGraph(num_node int, IsSuperMode bool, theconfig config.GraphRecalculateSetting, ntpinfo config.NTPinfo, logntp bool) *IG {
g := IG{
edgelock: &sync.RWMutex{},
JitterTolerance: theconfig.JitterTolerance,
JitterToleranceMultiplier: theconfig.JitterToleranceMultiplier,
NodeReportTimeout: S2TD(theconfig.NodeReportTimeout),
RecalculateCoolDown: S2TD(theconfig.RecalculateCoolDown),
ntp_info: ntpinfo,
}
g.Vert = make(map[config.Vertex]bool, num_node)
g.edges = make(map[config.Vertex]map[config.Vertex]Latency, num_node)
g.IsSuperMode = IsSuperMode
g.ntp_log = logntp
g.InitNTP()
return &g
}

View File

@ -18,6 +18,7 @@ import (
"git.fd.io/govpp.git/binapi/l2"
"git.fd.io/govpp.git/binapi/memif"
"git.fd.io/govpp.git/extras/libmemif"
"golang.org/x/sys/unix"
"github.com/KusakabeSi/EtherGuardVPN/config"
logger "github.com/sirupsen/logrus"
@ -61,6 +62,9 @@ type VppTap struct {
// New creates and returns a new TUN interface for the application.
func CreateVppTAP(iconfig config.InterfaceConf, loglevel string) (tapdev Device, err error) {
// Setup TUN Config
if len(iconfig.Name) >= unix.IFNAMSIZ {
return nil, fmt.Errorf("interface name too long: %w", unix.ENAMETOOLONG)
}
// Set logger
log := logger.New()
log.Out = os.Stdout