clear duplicated variables

This commit is contained in:
Kusakabe Si 2021-12-03 22:46:58 +00:00
parent c1133c9a69
commit e949229827
14 changed files with 156 additions and 139 deletions

View File

@ -75,7 +75,6 @@ type Device struct {
LocalV6 net.IP
}
event_tryendpoint chan struct{}
ResetConnInterval float64
EdgeConfigPath string
EdgeConfig *mtypes.EdgeConfig
@ -91,17 +90,13 @@ type Device struct {
indexTable IndexTable
cookieChecker CookieChecker
IsSuperNode bool
ID mtypes.Vertex
DefaultTTL uint8
graph *path.IG
l2fib sync.Map
fibTimeout float64
LogLevel mtypes.LoggerInfo
DRoute mtypes.DynamicRouteInfo
DupData fixed_time_cache.Cache
Version string
AdditionalCost float64
IsSuperNode bool
ID mtypes.Vertex
graph *path.IG
l2fib sync.Map
LogLevel mtypes.LoggerInfo
DupData fixed_time_cache.Cache
Version string
HttpPostCount uint64
JWTSecret mtypes.JWTSecret
@ -350,24 +345,22 @@ func NewDevice(tapDevice tap.Device, id mtypes.Vertex, bind conn.Bind, logger *L
device.indexTable.Init()
device.PopulatePools()
if IsSuperNode {
device.SuperConfigPath = configpath
device.SuperConfig = sconfig
device.EdgeConfig = &mtypes.EdgeConfig{}
device.Event_server_pong = superevents.Event_server_pong
device.Event_server_register = superevents.Event_server_register
device.LogLevel = sconfig.LogLevel
device.SuperConfig = sconfig
device.SuperConfigPath = configpath
} else {
device.EdgeConfigPath = configpath
device.EdgeConfig = econfig
device.DRoute = econfig.DynamicRoute
device.DupData = *fixed_time_cache.NewCache(path.S2TD(econfig.DynamicRoute.DupCheckTimeout), false, path.S2TD(60))
device.SuperConfig = &mtypes.SuperConfig{}
device.DupData = *fixed_time_cache.NewCache(mtypes.S2TD(econfig.DynamicRoute.DupCheckTimeout), false, mtypes.S2TD(60))
device.event_tryendpoint = make(chan struct{}, 1<<6)
device.Event_save_config = make(chan struct{}, 1<<5)
device.Event_Supernode_OK = make(chan struct{}, 4)
device.LogLevel = econfig.LogLevel
device.ResetConnInterval = device.EdgeConfig.ResetConnInterval
device.DefaultTTL = econfig.DefaultTTL
device.fibTimeout = econfig.L2FIBTimeout
device.AdditionalCost = device.DRoute.P2P.AdditionalCost
go device.RoutineSetEndpoint()
go device.RoutineDetectOfflineAndTryNextEndpoint()
go device.RoutineRegister()

View File

@ -76,7 +76,7 @@ func (et *endpoint_trylist) UpdateSuper(urls mtypes.API_connurl, UseLocalIP bool
}
newmap_super[url] = &endpoint_tryitem{
URL: url,
lastTry: time.Time{}.Add(path.S2TD(it)),
lastTry: time.Time{}.Add(mtypes.S2TD(it)),
firstTry: time.Time{},
}
}
@ -245,7 +245,7 @@ func (device *Device) NewPeer(pk NoisePublicKey, id mtypes.Vertex, isSuper bool)
peer.cookieGenerator.Init(pk)
peer.device = device
peer.endpoint_trylist = NewEndpoint_trylist(peer, path.S2TD(device.DRoute.PeerAliveTimeout))
peer.endpoint_trylist = NewEndpoint_trylist(peer, mtypes.S2TD(device.EdgeConfig.DynamicRoute.PeerAliveTimeout))
peer.SingleWayLatency = path.Infinity
peer.queue.outbound = newAutodrainingOutboundQueue(device)
peer.queue.inbound = newAutodrainingInboundQueue(device)
@ -290,7 +290,7 @@ func (device *Device) NewPeer(pk NoisePublicKey, id mtypes.Vertex, isSuper bool)
}
func (peer *Peer) IsPeerAlive() bool {
PeerAliveTimeout := path.S2TD(peer.device.DRoute.PeerAliveTimeout)
PeerAliveTimeout := mtypes.S2TD(peer.device.EdgeConfig.DynamicRoute.PeerAliveTimeout)
if peer.endpoint == nil {
return false
}
@ -452,7 +452,7 @@ func (peer *Peer) Stop() {
}
func (peer *Peer) SetPSK(psk NoisePresharedKey) {
if peer.device.IsSuperNode == false && peer.ID < mtypes.Special_NodeID && peer.device.DRoute.P2P.UseP2P == true {
if peer.device.IsSuperNode == false && peer.ID < mtypes.Special_NodeID && peer.device.EdgeConfig.DynamicRoute.P2P.UseP2P == true {
peer.device.log.Verbosef("Preshared keys disabled in P2P mode.")
return
}
@ -528,7 +528,7 @@ func (device *Device) SaveToConfig(peer *Peer, endpoint conn.Endpoint) {
if peer.StaticConn == true { //static conn do not write new endpoint to config
return
}
if !device.DRoute.P2P.UseP2P { //Must in p2p mode
if !device.EdgeConfig.DynamicRoute.P2P.UseP2P { //Must in p2p mode
return
}
if peer.endpoint != nil && peer.endpoint.DstIP().Equal(endpoint.DstIP()) { //endpoint changed
@ -565,7 +565,7 @@ func (device *Device) SaveToConfig(peer *Peer, endpoint conn.Endpoint) {
}
func (device *Device) SaveConfig() {
if device.DRoute.SaveNewPeers {
if device.EdgeConfig.DynamicRoute.SaveNewPeers {
configbytes, _ := yaml.Marshal(device.EdgeConfig)
ioutil.WriteFile(device.EdgeConfigPath, configbytes, 0644)
}

View File

@ -111,7 +111,7 @@ func (device *Device) TransitBoardcastPacket(src_nodeID mtypes.Vertex, in_id mty
func (device *Device) Send2Super(usage path.Usage, packet []byte, offset int) {
device.peers.RLock()
if device.DRoute.SuperNode.UseSuperNode {
if device.EdgeConfig.DynamicRoute.SuperNode.UseSuperNode {
for _, peer_out := range device.peers.SuperPeer {
/*if device.LogTransit {
fmt.Printf("Send to supernode %s\n", peer_out.endpoint.DstToString())
@ -255,7 +255,7 @@ func (device *Device) SendPing(peer *Peer, times int, replies int, interval floa
for i := 0; i < times; i++ {
packet, usage, _ := device.GeneratePingPacket(device.ID, replies)
device.SendPacket(peer, usage, packet, MessageTransportOffsetContent)
time.Sleep(path.S2TD(interval))
time.Sleep(mtypes.S2TD(interval))
}
}
@ -300,7 +300,7 @@ func (device *Device) server_process_RegisterMsg(peer *Peer, content mtypes.Regi
buf := make([]byte, path.EgHeaderLen+len(body))
header, err := path.NewEgHeader(buf[:path.EgHeaderLen])
header.SetSrc(device.ID)
header.SetTTL(device.DefaultTTL)
header.SetTTL(0)
header.SetPacketLength(uint16(len(body)))
copy(buf[path.EgHeaderLen:], body)
header.SetDst(mtypes.SuperNodeMessage)
@ -324,11 +324,11 @@ func (device *Device) process_ping(peer *Peer, content mtypes.PingMsg) error {
Src_nodeID: content.Src_nodeID,
Dst_nodeID: device.ID,
Timediff: Timediff,
TimeToAlive: device.DRoute.PeerAliveTimeout,
AdditionalCost: device.AdditionalCost,
TimeToAlive: device.EdgeConfig.DynamicRoute.PeerAliveTimeout,
AdditionalCost: device.EdgeConfig.DynamicRoute.AdditionalCost,
}
if device.DRoute.P2P.UseP2P && time.Now().After(device.graph.NhTableExpire) {
device.graph.UpdateLatency(content.Src_nodeID, device.ID, PongMSG.Timediff, device.DRoute.PeerAliveTimeout, device.AdditionalCost, true, false)
if device.EdgeConfig.DynamicRoute.P2P.UseP2P && time.Now().After(device.graph.NhTableExpire) {
device.graph.UpdateLatency(content.Src_nodeID, device.ID, PongMSG.Timediff, device.EdgeConfig.DynamicRoute.PeerAliveTimeout, device.EdgeConfig.DynamicRoute.AdditionalCost, true, false)
}
body, err := mtypes.GetByte(&PongMSG)
if err != nil {
@ -337,14 +337,14 @@ func (device *Device) process_ping(peer *Peer, content mtypes.PingMsg) error {
buf := make([]byte, path.EgHeaderLen+len(body))
header, err := path.NewEgHeader(buf[:path.EgHeaderLen])
header.SetSrc(device.ID)
header.SetTTL(device.DefaultTTL)
header.SetTTL(device.EdgeConfig.DefaultTTL)
header.SetPacketLength(uint16(len(body)))
copy(buf[path.EgHeaderLen:], body)
if device.DRoute.SuperNode.UseSuperNode {
if device.EdgeConfig.DynamicRoute.SuperNode.UseSuperNode {
header.SetDst(mtypes.SuperNodeMessage)
device.Send2Super(path.PongPacket, buf, MessageTransportOffsetContent)
}
if device.DRoute.P2P.UseP2P {
if device.EdgeConfig.DynamicRoute.P2P.UseP2P {
header.SetDst(mtypes.ControlMessage)
device.SpreadPacket(make(map[mtypes.Vertex]bool), path.PongPacket, buf, MessageTransportOffsetContent)
}
@ -353,9 +353,9 @@ func (device *Device) process_ping(peer *Peer, content mtypes.PingMsg) error {
}
func (device *Device) process_pong(peer *Peer, content mtypes.PongMsg) error {
if device.DRoute.P2P.UseP2P {
if device.EdgeConfig.DynamicRoute.P2P.UseP2P {
if time.Now().After(device.graph.NhTableExpire) {
device.graph.UpdateLatency(content.Src_nodeID, content.Dst_nodeID, content.Timediff, device.DRoute.PeerAliveTimeout, content.AdditionalCost, true, false)
device.graph.UpdateLatency(content.Src_nodeID, content.Dst_nodeID, content.Timediff, device.EdgeConfig.DynamicRoute.PeerAliveTimeout, content.AdditionalCost, true, false)
}
if !peer.AskedForNeighbor {
QueryPeerMsg := mtypes.QueryPeerMsg{
@ -368,7 +368,7 @@ func (device *Device) process_pong(peer *Peer, content mtypes.PongMsg) error {
buf := make([]byte, path.EgHeaderLen+len(body))
header, err := path.NewEgHeader(buf[:path.EgHeaderLen])
header.SetSrc(device.ID)
header.SetTTL(device.DefaultTTL)
header.SetTTL(device.EdgeConfig.DefaultTTL)
header.SetPacketLength(uint16(len(body)))
copy(buf[path.EgHeaderLen:], body)
device.SendPacket(peer, path.QueryPeer, buf, MessageTransportOffsetContent)
@ -379,7 +379,7 @@ func (device *Device) process_pong(peer *Peer, content mtypes.PongMsg) error {
func (device *Device) process_UpdatePeerMsg(peer *Peer, content mtypes.UpdatePeerMsg) error {
var send_signal bool
if device.DRoute.SuperNode.UseSuperNode {
if device.EdgeConfig.DynamicRoute.SuperNode.UseSuperNode {
if peer.ID != mtypes.SuperNodeMessage {
if device.LogLevel.LogControl {
fmt.Println("Control: Ignored UpdateErrorMsg. Not from supernode.")
@ -394,7 +394,7 @@ func (device *Device) process_UpdatePeerMsg(peer *Peer, content mtypes.UpdatePee
}
var peer_infos mtypes.API_Peers
downloadurl := device.DRoute.SuperNode.APIUrl + "/peerinfo?NodeID=" + strconv.Itoa(int(device.ID)) + "&PubKey=" + url.QueryEscape(device.staticIdentity.publicKey.ToString()) + "&State=" + url.QueryEscape(string(content.State_hash[:]))
downloadurl := device.EdgeConfig.DynamicRoute.SuperNode.APIUrl + "/peerinfo?NodeID=" + strconv.Itoa(int(device.ID)) + "&PubKey=" + url.QueryEscape(device.staticIdentity.publicKey.ToString()) + "&State=" + url.QueryEscape(string(content.State_hash[:]))
if device.LogLevel.LogControl {
fmt.Println("Control: Download peerinfo from :" + downloadurl)
}
@ -459,10 +459,10 @@ func (device *Device) process_UpdatePeerMsg(peer *Peer, content mtypes.UpdatePee
fmt.Println("Control: Add new peer to local ID:" + peerinfo.NodeID.ToString() + " PubKey:" + PubKey)
}
if device.graph.Weight(device.ID, peerinfo.NodeID, false) == path.Infinity { // add node to graph
device.graph.UpdateLatency(device.ID, peerinfo.NodeID, path.Infinity, 0, device.AdditionalCost, true, false)
device.graph.UpdateLatency(device.ID, peerinfo.NodeID, path.Infinity, 0, device.EdgeConfig.DynamicRoute.AdditionalCost, true, false)
}
if device.graph.Weight(peerinfo.NodeID, device.ID, false) == path.Infinity { // add node to graph
device.graph.UpdateLatency(peerinfo.NodeID, device.ID, path.Infinity, 0, device.AdditionalCost, true, false)
device.graph.UpdateLatency(peerinfo.NodeID, device.ID, path.Infinity, 0, device.EdgeConfig.DynamicRoute.AdditionalCost, true, false)
}
device.NewPeer(sk, peerinfo.NodeID, false)
thepeer = device.LookupPeer(sk)
@ -491,7 +491,7 @@ func (device *Device) process_UpdatePeerMsg(peer *Peer, content mtypes.UpdatePee
}
func (device *Device) process_UpdateNhTableMsg(peer *Peer, content mtypes.UpdateNhTableMsg) error {
if device.DRoute.SuperNode.UseSuperNode {
if device.EdgeConfig.DynamicRoute.SuperNode.UseSuperNode {
if peer.ID != mtypes.SuperNodeMessage {
if device.LogLevel.LogControl {
fmt.Println("Control: Ignored UpdateErrorMsg. Not from supernode.")
@ -509,7 +509,7 @@ func (device *Device) process_UpdateNhTableMsg(peer *Peer, content mtypes.Update
if bytes.Equal(device.graph.NhTableHash[:], content.State_hash[:]) {
return nil
}
downloadurl := device.DRoute.SuperNode.APIUrl + "/nhtable?NodeID=" + strconv.Itoa(int(device.ID)) + "&PubKey=" + url.QueryEscape(device.staticIdentity.publicKey.ToString()) + "&State=" + url.QueryEscape(string(content.State_hash[:]))
downloadurl := device.EdgeConfig.DynamicRoute.SuperNode.APIUrl + "/nhtable?NodeID=" + strconv.Itoa(int(device.ID)) + "&PubKey=" + url.QueryEscape(device.staticIdentity.publicKey.ToString()) + "&State=" + url.QueryEscape(string(content.State_hash[:]))
if device.LogLevel.LogControl {
fmt.Println("Control: Download NhTable from :" + downloadurl)
}
@ -562,7 +562,7 @@ func (device *Device) process_UpdateErrorMsg(peer *Peer, content mtypes.ServerCo
}
func (device *Device) process_RequestPeerMsg(content mtypes.QueryPeerMsg) error { //Send all my peers to all my peers
if device.DRoute.P2P.UseP2P {
if device.EdgeConfig.DynamicRoute.P2P.UseP2P {
device.peers.RLock()
for pubkey, peer := range device.peers.keyMap {
if peer.ID >= mtypes.Special_NodeID {
@ -593,7 +593,7 @@ func (device *Device) process_RequestPeerMsg(content mtypes.QueryPeerMsg) error
buf := make([]byte, path.EgHeaderLen+len(body))
header, _ := path.NewEgHeader(buf[0:path.EgHeaderLen])
header.SetDst(mtypes.ControlMessage)
header.SetTTL(device.DefaultTTL)
header.SetTTL(device.EdgeConfig.DefaultTTL)
header.SetSrc(device.ID)
header.SetPacketLength(uint16(len(body)))
copy(buf[path.EgHeaderLen:], body)
@ -605,7 +605,7 @@ func (device *Device) process_RequestPeerMsg(content mtypes.QueryPeerMsg) error
}
func (device *Device) process_BoardcastPeerMsg(peer *Peer, content mtypes.BoardcastPeerMsg) error {
if device.DRoute.P2P.UseP2P {
if device.EdgeConfig.DynamicRoute.P2P.UseP2P {
var pk NoisePublicKey
if content.Request_ID == uint32(device.ID) {
peer.AskedForNeighbor = true
@ -620,10 +620,10 @@ func (device *Device) process_BoardcastPeerMsg(peer *Peer, content mtypes.Boardc
fmt.Println("Control: Add new peer to local ID:" + content.NodeID.ToString() + " PubKey:" + pk.ToString())
}
if device.graph.Weight(device.ID, content.NodeID, false) == path.Infinity { // add node to graph
device.graph.UpdateLatency(device.ID, content.NodeID, path.Infinity, 0, device.AdditionalCost, true, false)
device.graph.UpdateLatency(device.ID, content.NodeID, path.Infinity, 0, device.EdgeConfig.DynamicRoute.AdditionalCost, true, false)
}
if device.graph.Weight(content.NodeID, device.ID, false) == path.Infinity { // add node to graph
device.graph.UpdateLatency(content.NodeID, device.ID, path.Infinity, 0, device.AdditionalCost, true, false)
device.graph.UpdateLatency(content.NodeID, device.ID, path.Infinity, 0, device.EdgeConfig.DynamicRoute.AdditionalCost, true, false)
}
device.NewPeer(pk, content.NodeID, false)
}
@ -638,14 +638,15 @@ func (device *Device) process_BoardcastPeerMsg(peer *Peer, content mtypes.Boardc
}
func (device *Device) RoutineSetEndpoint() {
if !(device.DRoute.P2P.UseP2P || device.DRoute.SuperNode.UseSuperNode) {
if !(device.EdgeConfig.DynamicRoute.P2P.UseP2P || device.EdgeConfig.DynamicRoute.SuperNode.UseSuperNode) {
return
}
timeout := mtypes.S2TD(device.EdgeConfig.DynamicRoute.ConnNextTry)
for {
NextRun := false
<-device.event_tryendpoint
for _, thepeer := range device.peers.IDMap {
if thepeer.LastPacketReceivedAdd1Sec.Load().(*time.Time).Add(path.S2TD(device.DRoute.PeerAliveTimeout)).After(time.Now()) {
if thepeer.LastPacketReceivedAdd1Sec.Load().(*time.Time).Add(mtypes.S2TD(device.EdgeConfig.DynamicRoute.PeerAliveTimeout)).After(time.Now()) {
//Peer alives
continue
} else {
@ -661,7 +662,7 @@ func (device *Device) RoutineSetEndpoint() {
}
if FastTry {
NextRun = true
go device.SendPing(thepeer, int(device.DRoute.ConnNextTry+1), 1, 1)
go device.SendPing(thepeer, int(device.EdgeConfig.DynamicRoute.ConnNextTry+1), 1, 1)
}
}
@ -674,7 +675,7 @@ func (device *Device) RoutineSetEndpoint() {
break ClearChanLoop
}
}
time.Sleep(path.S2TD(device.DRoute.ConnNextTry))
time.Sleep(timeout)
if device.LogLevel.LogInternal {
fmt.Printf("Internal: RoutineSetEndpoint: NextRun:%v\n", NextRun)
}
@ -685,33 +686,36 @@ func (device *Device) RoutineSetEndpoint() {
}
func (device *Device) RoutineDetectOfflineAndTryNextEndpoint() {
if !(device.DRoute.P2P.UseP2P || device.DRoute.SuperNode.UseSuperNode) {
if !(device.EdgeConfig.DynamicRoute.P2P.UseP2P || device.EdgeConfig.DynamicRoute.SuperNode.UseSuperNode) {
return
}
if device.DRoute.ConnTimeOut == 0 {
if device.EdgeConfig.DynamicRoute.ConnTimeOut == 0 {
return
}
timeout := mtypes.S2TD(device.EdgeConfig.DynamicRoute.ConnTimeOut)
for {
device.event_tryendpoint <- struct{}{}
time.Sleep(path.S2TD(device.DRoute.ConnTimeOut))
time.Sleep(timeout)
}
}
func (device *Device) RoutineSendPing() {
if !(device.DRoute.P2P.UseP2P || device.DRoute.SuperNode.UseSuperNode) {
if !(device.EdgeConfig.DynamicRoute.P2P.UseP2P || device.EdgeConfig.DynamicRoute.SuperNode.UseSuperNode) {
return
}
timeout := mtypes.S2TD(device.EdgeConfig.DynamicRoute.SendPingInterval)
for {
packet, usage, _ := device.GeneratePingPacket(device.ID, 0)
device.SpreadPacket(make(map[mtypes.Vertex]bool), usage, packet, MessageTransportOffsetContent)
time.Sleep(path.S2TD(device.DRoute.SendPingInterval))
time.Sleep(timeout)
}
}
func (device *Device) RoutineRegister() {
if !(device.DRoute.SuperNode.UseSuperNode) {
if !(device.EdgeConfig.DynamicRoute.SuperNode.UseSuperNode) {
return
}
timeout := mtypes.S2TD(device.EdgeConfig.DynamicRoute.SendPingInterval)
_ = <-device.Event_Supernode_OK
for {
body, _ := mtypes.GetByte(mtypes.RegisterMsg{
@ -730,17 +734,18 @@ func (device *Device) RoutineRegister() {
header.SetPacketLength(uint16(len(body)))
copy(buf[path.EgHeaderLen:], body)
device.Send2Super(path.Register, buf, MessageTransportOffsetContent)
time.Sleep(path.S2TD(device.DRoute.SendPingInterval))
time.Sleep(timeout)
}
}
func (device *Device) RoutinePostPeerInfo() {
if !(device.DRoute.SuperNode.UseSuperNode) {
if !(device.EdgeConfig.DynamicRoute.SuperNode.UseSuperNode) {
return
}
if device.DRoute.SuperNode.HttpPostInterval <= 0 {
if device.EdgeConfig.DynamicRoute.SuperNode.HttpPostInterval <= 0 {
return
}
timeout := mtypes.S2TD(device.EdgeConfig.DynamicRoute.SuperNode.HttpPostInterval)
for {
// Stat all latency
device.peers.RLock()
@ -753,7 +758,7 @@ func (device *Device) RoutinePostPeerInfo() {
Src_nodeID: device.ID,
Dst_nodeID: id,
Timediff: peer.SingleWayLatency,
TimeToAlive: time.Now().Sub(*peer.LastPacketReceivedAdd1Sec.Load().(*time.Time)).Seconds() + device.DRoute.PeerAliveTimeout,
TimeToAlive: time.Now().Sub(*peer.LastPacketReceivedAdd1Sec.Load().(*time.Time)).Seconds() + device.EdgeConfig.DynamicRoute.PeerAliveTimeout,
}
pongs = append(pongs, pong)
if device.LogLevel.LogControl {
@ -766,20 +771,22 @@ func (device *Device) RoutinePostPeerInfo() {
// Prepare post paramater and post body
LocalV4s := make(map[string]float64)
LocalV6s := make(map[string]float64)
if !device.peers.LocalV4.Equal(net.IP{}) {
LocalV4 := net.UDPAddr{
IP: device.peers.LocalV4,
Port: int(device.net.port),
}
if !device.EdgeConfig.DynamicRoute.SuperNode.SkipLocalIP {
if !device.peers.LocalV4.Equal(net.IP{}) {
LocalV4 := net.UDPAddr{
IP: device.peers.LocalV4,
Port: int(device.net.port),
}
LocalV4s[LocalV4.String()] = 100
}
if !device.peers.LocalV6.Equal(net.IP{}) {
LocalV6 := net.UDPAddr{
IP: device.peers.LocalV6,
Port: int(device.net.port),
LocalV4s[LocalV4.String()] = 100
}
if !device.peers.LocalV6.Equal(net.IP{}) {
LocalV6 := net.UDPAddr{
IP: device.peers.LocalV6,
Port: int(device.net.port),
}
LocalV4s[LocalV6.String()] = 100
}
LocalV4s[LocalV6.String()] = 100
}
body, _ := mtypes.GetByte(mtypes.API_report_peerinfo{
@ -796,7 +803,7 @@ func (device *Device) RoutinePostPeerInfo() {
tokenString, err := token.SignedString(device.JWTSecret[:])
// Construct post request
client := &http.Client{}
downloadurl := device.DRoute.SuperNode.APIUrl + "/post/nodeinfo"
downloadurl := device.EdgeConfig.DynamicRoute.SuperNode.APIUrl + "/post/nodeinfo"
req, err := http.NewRequest("POST", downloadurl, bytes.NewReader(body))
q := req.URL.Query()
q.Add("NodeID", device.ID.ToString())
@ -818,7 +825,7 @@ func (device *Device) RoutinePostPeerInfo() {
resp.Body.Close()
}
time.Sleep(mtypes.S2TD(device.DRoute.SuperNode.HttpPostInterval * 0.8))
time.Sleep(timeout)
}
}
@ -826,8 +833,8 @@ func (device *Device) RoutineRecalculateNhTable() {
if device.graph.TimeoutCheckInterval == 0 {
return
}
if !device.DRoute.P2P.UseP2P {
if !device.EdgeConfig.DynamicRoute.P2P.UseP2P {
return
}
for {
@ -842,21 +849,23 @@ func (device *Device) RoutineRecalculateNhTable() {
}
func (device *Device) RoutineSpreadAllMyNeighbor() {
if !device.DRoute.P2P.UseP2P {
if !device.EdgeConfig.DynamicRoute.P2P.UseP2P {
return
}
timeout := mtypes.S2TD(device.EdgeConfig.DynamicRoute.P2P.SendPeerInterval)
for {
device.process_RequestPeerMsg(mtypes.QueryPeerMsg{
Request_ID: uint32(mtypes.Broadcast),
})
time.Sleep(path.S2TD(device.DRoute.P2P.SendPeerInterval))
time.Sleep(timeout)
}
}
func (device *Device) RoutineResetConn() {
if device.ResetConnInterval <= 0.01 {
if device.EdgeConfig.ResetConnInterval <= 0.01 {
return
}
timeout := mtypes.S2TD(device.EdgeConfig.ResetConnInterval)
for {
for _, peer := range device.peers.keyMap {
if !peer.StaticConn { //Do not reset connecton for dynamic peer
@ -871,15 +880,15 @@ func (device *Device) RoutineResetConn() {
continue
}
}
time.Sleep(path.S2TD(device.ResetConnInterval))
time.Sleep(timeout)
}
}
func (device *Device) RoutineClearL2FIB() {
if device.fibTimeout <= 0.01 {
if device.EdgeConfig.L2FIBTimeout <= 0.01 {
return
}
timeout := path.S2TD(device.fibTimeout)
timeout := mtypes.S2TD(device.EdgeConfig.L2FIBTimeout)
for {
device.l2fib.Range(func(k interface{}, v interface{}) bool {
val := v.(*IdAndTime)

View File

@ -266,7 +266,7 @@ func (device *Device) RoutineReadFromTUN() {
EgBody.SetSrc(device.ID)
EgBody.SetDst(dst_nodeID)
EgBody.SetPacketLength(uint16(packet_len))
EgBody.SetTTL(device.DefaultTTL)
EgBody.SetTTL(device.EdgeConfig.DefaultTTL)
elem.Type = path.NormalPacket
if packet_len <= 12 {
if device.LogLevel.LogNormal {

View File

@ -412,6 +412,8 @@ curl "http://127.0.0.1:3000/api/peer/del?privkey=IJtpnkm9ytbuCukx4VBMENJKuLngo9K
1. `pubkeyv6`: Super node的IPv6工鑰
1. `apiurl`: Super node的HTTP(S) API連線地址
1. `supernodeinfotimeout`: Supernode Timeout
1. `httppostinterval`: 15
1. `skiplocalip`: 打洞時一律使用supernode蒐集到的外部ip不使用edge自行回報的local ip
### Super node本身的設定檔

View File

@ -16,7 +16,7 @@ l2fibtimeout: 3600
privkey: 6GyDagZKhbm5WNqMiRHhkf43RlbMJ34IieTlIuvfJ1M=
listenport: 0
loglevel:
loglevel: normal
loglevel: error
logtransit: true
logcontrol: true
lognormal: true
@ -28,6 +28,7 @@ dynamicroute:
dupchecktimeout: 40
conntimeout: 20
connnexttry: 5
additionalcost: 10
savenewpeers: true
supernode:
usesupernode: true
@ -37,14 +38,12 @@ dynamicroute:
connurlv6: ''
pubkeyv6: HCfL6YJtpJEGHTlJ2LgVXIWKB/K95P57LHTJ42ZG8VI=
apiurl: http://127.0.0.1:3000/api
supernodeinfotimeout: 50
httppostinterval: 15
skiplocalip: false
httppostinterval: 15
supernodeinfotimeout: 50
p2p:
usep2p: false
sendpeerinterval: 20
additionalcost: 0
graphrecalculatesetting:
staticmode: false
jittertolerance: 20

View File

@ -16,7 +16,7 @@ l2fibtimeout: 3600
privkey: IJtpnkm9ytbuCukx4VBMENJKuLngo9KSsS1D60BqonQ=
listenport: 0
loglevel:
loglevel: normal
loglevel: error
logtransit: true
logcontrol: true
lognormal: true
@ -28,6 +28,7 @@ dynamicroute:
dupchecktimeout: 40
conntimeout: 20
connnexttry: 5
additionalcost: 10
savenewpeers: true
supernode:
usesupernode: true
@ -43,7 +44,6 @@ dynamicroute:
p2p:
usep2p: false
sendpeerinterval: 20
additionalcost: 0
graphrecalculatesetting:
staticmode: false
jittertolerance: 20

View File

@ -16,7 +16,7 @@ l2fibtimeout: 3600
privkey: OH8BsVUU2Rqzeu9B2J5GPG8PUmxWfX8uVvNFZKhVF3o=
listenport: 0
loglevel:
loglevel: normal
loglevel: error
logtransit: true
logcontrol: true
lognormal: true
@ -28,6 +28,7 @@ dynamicroute:
dupchecktimeout: 40
conntimeout: 20
connnexttry: 5
additionalcost: 10
savenewpeers: true
supernode:
usesupernode: true
@ -37,13 +38,12 @@ dynamicroute:
connurlv6: ''
pubkeyv6: HCfL6YJtpJEGHTlJ2LgVXIWKB/K95P57LHTJ42ZG8VI=
apiurl: http://127.0.0.1:3000/api
supernodeinfotimeout: 50
httppostinterval: 15
skiplocalip: false
httppostinterval: 15
supernodeinfotimeout: 50
p2p:
usep2p: false
sendpeerinterval: 20
additionalcost: 0
graphrecalculatesetting:
staticmode: false
jittertolerance: 20
@ -74,4 +74,4 @@ dynamicroute:
- time.windows.com
nexthoptable: {}
resetconninterval: 86400
peers: []
peers: []

View File

@ -21,7 +21,7 @@ graphrecalculatesetting:
jittertolerancemultiplier: 1.01
nodereporttimeout: 70
timeoutcheckinterval: 5
recalculatecooldown: 1
recalculatecooldown: 5
nexthoptable:
1:
2: 2
@ -34,14 +34,11 @@ peers:
name: Node_01
pubkey: ZqzLVSbXzjppERslwbf2QziWruW3V/UIx9oqwU8Fn3I=
pskey: iPM8FXfnHVzwjguZHRW9bLNY+h7+B1O2oTJtktptQkI=
additionalcost: 1000
additionalcost: 10
skiplocalip: false
- nodeid: 2
name: Node_02
pubkey: dHeWQtlTPQGy87WdbUARS4CtwVaR2y7IQ1qcX4GKSXk=
pskey: juJMQaGAaeSy8aDsXSKNsPZv/nFiPj4h/1G70tGYygs=
additionalcost: 1000
- nodeid: 100
name: Node_100
pubkey: 6SuqwPH9pxGigtZDNp3PABZYfSEzDaBSwuThsUUAcyM=
pskey: ""
additionalcost: 1000
additionalcost: 10
skiplocalip: false

View File

@ -11,6 +11,7 @@ import (
"io/ioutil"
"net"
"strconv"
"strings"
"sync"
"time"
@ -157,7 +158,7 @@ func get_api_peers(old_State_hash [32]byte) (api_peerinfo mtypes.API_Peers, Stat
PSKey: peerinfo.PSKey,
Connurl: &mtypes.API_connurl{},
}
if httpobj.http_PeerState[peerinfo.PubKey].LastSeen.Add(path.S2TD(httpobj.http_sconfig.GraphRecalculateSetting.NodeReportTimeout)).After(time.Now()) {
if httpobj.http_PeerState[peerinfo.PubKey].LastSeen.Add(mtypes.S2TD(httpobj.http_sconfig.GraphRecalculateSetting.NodeReportTimeout)).After(time.Now()) {
connV4 := httpobj.http_device4.GetConnurl(peerinfo.NodeID)
connV6 := httpobj.http_device6.GetConnurl(peerinfo.NodeID)
if connV4 != "" {
@ -166,8 +167,10 @@ func get_api_peers(old_State_hash [32]byte) (api_peerinfo mtypes.API_Peers, Stat
if connV6 != "" {
api_peerinfo[peerinfo.PubKey].Connurl.ExternalV6 = map[string]float64{connV6: 6}
}
api_peerinfo[peerinfo.PubKey].Connurl.LocalV4 = httpobj.http_PeerIPs[peerinfo.PubKey].LocalIPv4
api_peerinfo[peerinfo.PubKey].Connurl.LocalV6 = httpobj.http_PeerIPs[peerinfo.PubKey].LocalIPv6
if !peerinfo.SkipLocalIP {
api_peerinfo[peerinfo.PubKey].Connurl.LocalV4 = httpobj.http_PeerIPs[peerinfo.PubKey].LocalIPv4
api_peerinfo[peerinfo.PubKey].Connurl.LocalV6 = httpobj.http_PeerIPs[peerinfo.PubKey].LocalIPv6
}
}
}
@ -199,7 +202,7 @@ func get_peerinfo(w http.ResponseWriter, r *http.Request) {
defer httpobj.RUnlock()
if httpobj.http_PeerID2Info[NodeID].PubKey != PubKey {
w.WriteHeader(http.StatusNotFound)
w.Write([]byte("NodeID and PunKey are not match"))
w.Write([]byte("Paramater NodeID: NodeID and PubKey are not match"))
return
}
@ -228,6 +231,10 @@ func get_peerinfo(w http.ResponseWriter, r *http.Request) {
} else {
peerinfo.PSKey = ""
}
if httpobj.http_PeerID2Info[NodeID].SkipLocalIP { // Clear all local IP
peerinfo.Connurl.LocalV4 = make(map[string]float64)
peerinfo.Connurl.LocalV6 = make(map[string]float64)
}
http_PeerInfo_2peer[PeerPubKey] = peerinfo
}
api_peerinfo_str_byte, _ := json.Marshal(&http_PeerInfo_2peer)
@ -239,7 +246,7 @@ func get_peerinfo(w http.ResponseWriter, r *http.Request) {
}
}
w.WriteHeader(http.StatusNotFound)
w.Write([]byte("State not correct"))
w.Write([]byte("Paramater State: State not correct"))
}
func get_nhtable(w http.ResponseWriter, r *http.Request) {
@ -451,26 +458,33 @@ func peeradd(w http.ResponseWriter, r *http.Request) { //Waiting for test
}
r.ParseForm()
NodeID, err := extractParamsVertex(r.Form, "nodeid", w)
NodeID, err := extractParamsVertex(r.Form, "NodeID", w)
if err != nil {
return
}
Name, err := extractParamsStr(r.Form, "name", w)
Name, err := extractParamsStr(r.Form, "Name", w)
if err != nil {
return
}
AdditionalCost, err := extractParamsFloat(r.Form, "additionalcost", 64, w)
AdditionalCost, err := extractParamsFloat(r.Form, "AdditionalCost", 64, w)
if err != nil {
return
}
PubKey, err := extractParamsStr(r.Form, "pubkey", w)
PubKey, err := extractParamsStr(r.Form, "PubKey", w)
if err != nil {
return
}
PSKey, err := extractParamsStr(r.Form, "pskey", nil)
SkipLocalIPS, err := extractParamsStr(r.Form, "SkipLocalIP", w)
if err != nil {
return
}
SkipLocalIP := strings.EqualFold(SkipLocalIPS, "true")
PSKey, err := extractParamsStr(r.Form, "PSKey", nil)
httpobj.Lock()
defer httpobj.Unlock()
@ -478,32 +492,32 @@ func peeradd(w http.ResponseWriter, r *http.Request) { //Waiting for test
for _, peerinfo := range httpobj.http_sconfig.Peers {
if peerinfo.NodeID == NodeID {
w.WriteHeader(http.StatusConflict)
w.Write([]byte("Paramater nodeid: NodeID exists"))
w.Write([]byte("Paramater NodeID: NodeID exists"))
return
}
if peerinfo.Name == Name {
w.WriteHeader(http.StatusConflict)
w.Write([]byte("Paramater name: Node name exists"))
w.Write([]byte("Paramater Name: Node name exists"))
return
}
if peerinfo.PubKey == PubKey {
w.WriteHeader(http.StatusConflict)
w.Write([]byte("Paramater pubkey: PubKey exists"))
w.Write([]byte("Paramater PubKey: PubKey exists"))
return
}
}
if httpobj.http_sconfig.GraphRecalculateSetting.StaticMode == true {
NhTableStr := r.Form.Get("nexthoptable")
NhTableStr := r.Form.Get("NextHopTable")
if NhTableStr == "" {
w.WriteHeader(http.StatusExpectationFailed)
w.Write([]byte("Paramater nexthoptable: Your NextHopTable is in static mode.\nPlease provide your new NextHopTable in \"nexthoptable\" parmater in json format"))
w.Write([]byte("Paramater NextHopTable: Your NextHopTable is in static mode.\nPlease provide your new NextHopTable in \"NextHopTable\" parmater in json format"))
return
}
var NewNhTable mtypes.NextHopTable
err := json.Unmarshal([]byte(NhTableStr), &NewNhTable)
if err != nil {
w.WriteHeader(http.StatusExpectationFailed)
w.Write([]byte(fmt.Sprintf("Paramater nexthoptable: \"%v\", %v", NhTableStr, err)))
w.Write([]byte(fmt.Sprintf("Paramater NextHopTable: \"%v\", %v", NhTableStr, err)))
return
}
err = checkNhTable(NewNhTable, append(httpobj.http_sconfig.Peers, mtypes.SuperPeerInfo{
@ -512,6 +526,7 @@ func peeradd(w http.ResponseWriter, r *http.Request) { //Waiting for test
PubKey: PubKey,
PSKey: PSKey,
AdditionalCost: AdditionalCost,
SkipLocalIP: SkipLocalIP,
}))
if err != nil {
w.WriteHeader(http.StatusExpectationFailed)
@ -526,6 +541,7 @@ func peeradd(w http.ResponseWriter, r *http.Request) { //Waiting for test
PubKey: PubKey,
PSKey: PSKey,
AdditionalCost: AdditionalCost,
SkipLocalIP: SkipLocalIP,
})
if err != nil {
w.WriteHeader(http.StatusExpectationFailed)
@ -538,6 +554,7 @@ func peeradd(w http.ResponseWriter, r *http.Request) { //Waiting for test
PubKey: PubKey,
PSKey: PSKey,
AdditionalCost: AdditionalCost,
SkipLocalIP: SkipLocalIP,
})
mtypesBytes, _ := yaml.Marshal(httpobj.http_sconfig)
ioutil.WriteFile(httpobj.http_sconfig_path, mtypesBytes, 0644)
@ -545,6 +562,8 @@ func peeradd(w http.ResponseWriter, r *http.Request) { //Waiting for test
httpobj.http_econfig_tmp.NodeName = Name
httpobj.http_econfig_tmp.PrivKey = "Your_Private_Key"
httpobj.http_econfig_tmp.DynamicRoute.SuperNode.PSKey = PSKey
httpobj.http_econfig_tmp.DynamicRoute.AdditionalCost = AdditionalCost
httpobj.http_econfig_tmp.DynamicRoute.SuperNode.SkipLocalIP = SkipLocalIP
ret_str_byte, _ := yaml.Marshal(&httpobj.http_econfig_tmp)
w.WriteHeader(http.StatusOK)
w.Write(ret_str_byte)

View File

@ -109,14 +109,14 @@ func printExampleSuperConf() {
Name: "Node_01",
PubKey: "ZqzLVSbXzjppERslwbf2QziWruW3V/UIx9oqwU8Fn3I=",
PSKey: "iPM8FXfnHVzwjguZHRW9bLNY+h7+B1O2oTJtktptQkI=",
AdditionalCost: 0,
AdditionalCost: 10,
},
{
NodeID: 2,
Name: "Node_02",
PubKey: "dHeWQtlTPQGy87WdbUARS4CtwVaR2y7IQ1qcX4GKSXk=",
PSKey: "juJMQaGAaeSy8aDsXSKNsPZv/nFiPj4h/1G70tGYygs=",
AdditionalCost: 0,
AdditionalCost: 10,
},
},
}
@ -244,7 +244,7 @@ func Super(configPath string, useUAPI bool, printExample bool, bindmode string)
}
go Event_server_event_hendler(httpobj.http_graph, httpobj.http_super_chains)
go RoutinePushSettings(path.S2TD(sconfig.RePushConfigInterval))
go RoutinePushSettings(mtypes.S2TD(sconfig.RePushConfigInterval))
go RoutineTimeoutCheck()
go HttpServer(sconfig.ListenPort, "/api")
@ -358,7 +358,7 @@ func super_peerdel_notify(toDelete mtypes.Vertex, PubKey string) {
peer6 := httpobj.http_device6.LookupPeerByStr(PubKey)
httpobj.http_device6.SendPacket(peer6, path.UpdateError, buf, device.MessageTransportOffsetContent)
time.Sleep(path.S2TD(0.1))
time.Sleep(mtypes.S2TD(0.1))
}
httpobj.http_device4.RemovePeerByID(toDelete)
httpobj.http_device6.RemovePeerByID(toDelete)
@ -439,7 +439,7 @@ func RoutinePushSettings(interval time.Duration) {
}
PushNhTable(force)
PushPeerinfo(force)
time.Sleep(path.S2TD(1))
time.Sleep(mtypes.S2TD(1))
}
}
@ -475,7 +475,7 @@ func PushNhTable(force bool) {
header.SetTTL(0)
copy(buf[path.EgHeaderLen:], body)
for pkstr, peerstate := range httpobj.http_PeerState {
isAlive := peerstate.LastSeen.Add(path.S2TD(httpobj.http_sconfig.GraphRecalculateSetting.NodeReportTimeout)).After(time.Now())
isAlive := peerstate.LastSeen.Add(mtypes.S2TD(httpobj.http_sconfig.GraphRecalculateSetting.NodeReportTimeout)).After(time.Now())
if !isAlive {
continue
}
@ -507,7 +507,7 @@ func PushPeerinfo(force bool) {
header.SetTTL(0)
copy(buf[path.EgHeaderLen:], body)
for pkstr, peerstate := range httpobj.http_PeerState {
isAlive := peerstate.LastSeen.Add(path.S2TD(httpobj.http_sconfig.GraphRecalculateSetting.NodeReportTimeout)).After(time.Now())
isAlive := peerstate.LastSeen.Add(mtypes.S2TD(httpobj.http_sconfig.GraphRecalculateSetting.NodeReportTimeout)).After(time.Now())
if !isAlive {
continue
}

View File

@ -80,6 +80,7 @@ type SuperPeerInfo struct {
PubKey string
PSKey string
AdditionalCost float64
SkipLocalIP bool
}
type LoggerInfo struct {
@ -110,6 +111,7 @@ type DynamicRouteInfo struct {
DupCheckTimeout float64
ConnTimeOut float64
ConnNextTry float64
AdditionalCost float64
SaveNewPeers bool
SuperNode SuperInfo
P2P P2Pinfo
@ -140,7 +142,6 @@ type SuperInfo struct {
type P2Pinfo struct {
UseP2P bool
SendPeerInterval float64
AdditionalCost float64
GraphRecalculateSetting GraphRecalculateSetting
}

View File

@ -5,6 +5,7 @@ import (
"sort"
"time"
"github.com/KusakabeSi/EtherGuard-VPN/mtypes"
orderedmap "github.com/KusakabeSi/EtherGuard-VPN/orderdmap"
"github.com/beevik/ntp"
)
@ -43,7 +44,7 @@ func (g *IG) RoutineSyncTime() {
}
for {
g.SyncTimeMultiple(g.ntp_info.MaxServerUse)
time.Sleep(S2TD(g.ntp_info.SyncTimeInterval))
time.Sleep(mtypes.S2TD(g.ntp_info.SyncTimeInterval))
}
}
@ -70,7 +71,7 @@ func (g *IG) SyncTimeMultiple(count int) {
}
for _, url := range url2sync {
g.ntp_wg.Add(1)
go g.SyncTime(url, S2TD(g.ntp_info.NTPTimeout))
go g.SyncTime(url, mtypes.S2TD(g.ntp_info.NTPTimeout))
}
g.ntp_wg.Wait()
g.ntp_servers.Sort(func(a *orderedmap.Pair, b *orderedmap.Pair) bool {

View File

@ -59,18 +59,14 @@ type IG struct {
ntp_servers orderedmap.OrderedMap // serverurl:lentancy
}
func S2TD(secs float64) time.Duration {
return time.Duration(secs * float64(time.Second))
}
func NewGraph(num_node int, IsSuperMode bool, theconfig mtypes.GraphRecalculateSetting, ntpinfo mtypes.NTPinfo, loglevel mtypes.LoggerInfo) *IG {
g := IG{
edgelock: &sync.RWMutex{},
StaticMode: theconfig.StaticMode,
JitterTolerance: theconfig.JitterTolerance,
JitterToleranceMultiplier: theconfig.JitterToleranceMultiplier,
RecalculateCoolDown: S2TD(theconfig.RecalculateCoolDown),
TimeoutCheckInterval: S2TD(theconfig.TimeoutCheckInterval),
RecalculateCoolDown: mtypes.S2TD(theconfig.RecalculateCoolDown),
TimeoutCheckInterval: mtypes.S2TD(theconfig.TimeoutCheckInterval),
ntp_info: ntpinfo,
}
g.Vert = make(map[mtypes.Vertex]bool, num_node)