Move TTL to header, remove packet_len, increase MTU to 1404

This commit is contained in:
Kusakabe Si 2021-12-12 11:35:20 +00:00
parent ad4ffff985
commit 3f6001dcdf
26 changed files with 141 additions and 167 deletions

View File

@ -383,6 +383,7 @@ func NewDevice(tapDevice tap.Device, id mtypes.Vertex, bind conn.Bind, logger *L
go device.RoutineClearL2FIB()
go device.RoutineRecalculateNhTable()
go device.RoutinePostPeerInfo(device.Chan_HttpPostStart)
}
// create queues

View File

@ -57,16 +57,16 @@ const (
MessageInitiationSize = 145 // size of handshake initiation message
MessageResponseSize = 89 // size of response message
MessageCookieReplySize = 61 // size of cookie reply message
MessageTransportHeaderSize = 13 // size of data preceding content in transport message
MessageTransportHeaderSize = 14 // size of data preceding content in transport message
MessageTransportSize = MessageTransportHeaderSize + poly1305.TagSize // size of empty transport
MessageKeepaliveSize = MessageTransportSize // size of keepalive
MessageHandshakeSize = MessageInitiationSize // size of largest handshake related message
)
const (
MessageTransportOffsetReceiver = 1
MessageTransportOffsetCounter = 5
MessageTransportOffsetContent = 13
MessageTransportOffsetReceiver = 2
MessageTransportOffsetCounter = 6
MessageTransportOffsetContent = 14
)
/* Type is an 8-bit field, followed by 3 nul bytes,

View File

@ -7,7 +7,6 @@ package device
import (
"bytes"
"encoding/base64"
"encoding/binary"
"errors"
"fmt"
@ -36,6 +35,7 @@ type QueueHandshakeElement struct {
type QueueInboundElement struct {
Type path.Usage
TTL uint8
sync.Mutex
buffer *[MaxMessageSize]byte
packet []byte
@ -127,6 +127,7 @@ func (device *Device) RoutineReceiveIncoming(recv conn.ReceiveFunc) {
packet := buffer[:size]
msgType := path.Usage(packet[0])
msgTTL := uint8(packet[1])
msgType_wg := msgType
if msgType >= path.MessageTransportType {
msgType_wg = path.MessageTransportType
@ -167,6 +168,7 @@ func (device *Device) RoutineReceiveIncoming(recv conn.ReceiveFunc) {
peer := value.peer
elem := device.GetInboundElement()
elem.Type = msgType
elem.TTL = msgTTL
elem.packet = packet
elem.buffer = buffer
elem.keypair = keypair
@ -428,7 +430,6 @@ func (peer *Peer) RoutineSequentialReceiver() {
should_process := false
should_receive := false
should_transfer := false
packetlan := 0
currentTime := time.Now()
storeTime := currentTime.Add(time.Second)
if currentTime.After((*peer.LastPacketReceivedAdd1Sec.Load().(*time.Time))) {
@ -470,15 +471,6 @@ func (peer *Peer) RoutineSequentialReceiver() {
dst_nodeID = EgHeader.GetDst()
packet_type = elem.Type
packetlan = int(EgHeader.GetPacketLength() + path.EgHeaderLen)
if packetlan > len(elem.packet) {
if device.LogLevel.LogTransit {
fmt.Printf("Transit: invalid packet %v PL:%v RealPL:%v S:%v D:%v From:%v IP:%v\n", base64.StdEncoding.EncodeToString([]byte(elem.packet)), packetlan, len(elem.packet), src_nodeID.ToString(), dst_nodeID.ToString(), peer.ID.ToString(), peer.endpoint.DstToString())
}
goto skip
}
elem.packet = elem.packet[:packetlan] // EG header + true packet
if device.IsSuperNode {
if packet_type.IsControl_Edge2Super() {
should_process = true
@ -559,18 +551,18 @@ func (peer *Peer) RoutineSequentialReceiver() {
}
}
if should_transfer {
l2ttl := EgHeader.GetTTL()
l2ttl := elem.TTL
if l2ttl == 0 {
device.log.Verbosef("TTL is 0 %v", dst_nodeID)
} else {
EgHeader.SetTTL(l2ttl - 1)
l2ttl = l2ttl - 1
if dst_nodeID == mtypes.NodeID_Broadcast { //Regular transfer algorithm
device.TransitBoardcastPacket(src_nodeID, peer.ID, elem.Type, elem.packet, MessageTransportOffsetContent)
device.TransitBoardcastPacket(src_nodeID, peer.ID, elem.Type, l2ttl, elem.packet, MessageTransportOffsetContent)
} else if dst_nodeID == mtypes.NodeID_Spread { // Control Message will try send to every know node regardless the connectivity
skip_list := make(map[mtypes.Vertex]bool)
skip_list[src_nodeID] = true //Don't send to conimg peer and source peer
skip_list[peer.ID] = true
device.SpreadPacket(skip_list, elem.Type, elem.packet, MessageTransportOffsetContent)
device.SpreadPacket(skip_list, elem.Type, l2ttl, elem.packet, MessageTransportOffsetContent)
} else {
next_id := device.graph.Next(device.ID, dst_nodeID)
@ -581,7 +573,7 @@ func (peer *Peer) RoutineSequentialReceiver() {
if device.LogLevel.LogTransit {
fmt.Printf("Transit: Transfer From:%v Me:%v To:%v S:%v D:%v\n", peer.ID, device.ID, peer_out.ID, src_nodeID.ToString(), dst_nodeID.ToString())
}
go device.SendPacket(peer_out, elem.Type, elem.packet, MessageTransportOffsetContent)
go device.SendPacket(peer_out, elem.Type, l2ttl, elem.packet, MessageTransportOffsetContent)
}
}
}

View File

@ -28,7 +28,7 @@ import (
"github.com/google/gopacket/layers"
)
func (device *Device) SendPacket(peer *Peer, usage path.Usage, packet []byte, offset int) {
func (device *Device) SendPacket(peer *Peer, usage path.Usage, ttl uint8, packet []byte, offset int) {
if peer == nil {
return
} else if peer.endpoint == nil {
@ -66,6 +66,7 @@ func (device *Device) SendPacket(peer *Peer, usage path.Usage, packet []byte, of
elem = device.NewOutboundElement()
copy(elem.buffer[offset:offset+len(packet)], packet)
elem.Type = usage
elem.TTL = ttl
elem.packet = elem.buffer[offset : offset+len(packet)]
if peer.isRunning.Get() {
peer.StagePacket(elem)
@ -74,7 +75,7 @@ func (device *Device) SendPacket(peer *Peer, usage path.Usage, packet []byte, of
}
}
func (device *Device) BoardcastPacket(skip_list map[mtypes.Vertex]bool, usage path.Usage, packet []byte, offset int) { // Send packet to all connected peers
func (device *Device) BoardcastPacket(skip_list map[mtypes.Vertex]bool, usage path.Usage, ttl uint8, packet []byte, offset int) { // Send packet to all connected peers
send_list := device.graph.GetBoardcastList(device.ID)
for node_id := range skip_list {
send_list[node_id] = false
@ -83,13 +84,13 @@ func (device *Device) BoardcastPacket(skip_list map[mtypes.Vertex]bool, usage pa
for node_id, should_send := range send_list {
if should_send {
peer_out := device.peers.IDMap[node_id]
go device.SendPacket(peer_out, usage, packet, offset)
go device.SendPacket(peer_out, usage, ttl, packet, offset)
}
}
device.peers.RUnlock()
}
func (device *Device) SpreadPacket(skip_list map[mtypes.Vertex]bool, usage path.Usage, packet []byte, offset int) { // Send packet to all peers no matter it is alive
func (device *Device) SpreadPacket(skip_list map[mtypes.Vertex]bool, usage path.Usage, ttl uint8, packet []byte, offset int) { // Send packet to all peers no matter it is alive
device.peers.RLock()
for peer_id, peer_out := range device.peers.IDMap {
if _, ok := skip_list[peer_id]; ok {
@ -98,12 +99,12 @@ func (device *Device) SpreadPacket(skip_list map[mtypes.Vertex]bool, usage path.
}
continue
}
go device.SendPacket(peer_out, usage, packet, MessageTransportOffsetContent)
go device.SendPacket(peer_out, usage, ttl, packet, offset)
}
device.peers.RUnlock()
}
func (device *Device) TransitBoardcastPacket(src_nodeID mtypes.Vertex, in_id mtypes.Vertex, usage path.Usage, packet []byte, offset int) {
func (device *Device) TransitBoardcastPacket(src_nodeID mtypes.Vertex, in_id mtypes.Vertex, usage path.Usage, ttl uint8, packet []byte, offset int) {
node_boardcast_list, errs := device.graph.GetBoardcastThroughList(device.ID, in_id, src_nodeID)
if device.LogLevel.LogControl {
for _, err := range errs {
@ -116,19 +117,19 @@ func (device *Device) TransitBoardcastPacket(src_nodeID mtypes.Vertex, in_id mty
if device.LogLevel.LogTransit {
fmt.Printf("Transit: Transfer packet from %d through %d to %d\n", in_id, device.ID, peer_out.ID)
}
go device.SendPacket(peer_out, usage, packet, offset)
go device.SendPacket(peer_out, usage, ttl, packet, offset)
}
device.peers.RUnlock()
}
func (device *Device) Send2Super(usage path.Usage, packet []byte, offset int) {
func (device *Device) Send2Super(usage path.Usage, ttl uint8, packet []byte, offset int) {
device.peers.RLock()
if device.EdgeConfig.DynamicRoute.SuperNode.UseSuperNode {
for _, peer_out := range device.peers.SuperPeer {
/*if device.LogTransit {
fmt.Printf("Send to supernode %s\n", peer_out.endpoint.DstToString())
}*/
go device.SendPacket(peer_out, usage, packet, offset)
go device.SendPacket(peer_out, usage, ttl, packet, offset)
}
}
device.peers.RUnlock()
@ -237,32 +238,30 @@ func (device *Device) sprint_received(msg_type path.Usage, body []byte) string {
}
}
func (device *Device) GeneratePingPacket(src_nodeID mtypes.Vertex, request_reply int) ([]byte, path.Usage, error) {
func (device *Device) GeneratePingPacket(src_nodeID mtypes.Vertex, request_reply int) ([]byte, path.Usage, uint8, error) {
body, err := mtypes.GetByte(&mtypes.PingMsg{
Src_nodeID: src_nodeID,
Time: device.graph.GetCurrentTime(),
RequestReply: request_reply,
})
if err != nil {
return nil, path.PingPacket, err
return nil, path.PingPacket, 0, err
}
buf := make([]byte, path.EgHeaderLen+len(body))
header, _ := path.NewEgHeader(buf[0:path.EgHeaderLen], device.EdgeConfig.Interface.MTU)
if err != nil {
return nil, path.PingPacket, err
return nil, path.PingPacket, 0, err
}
header.SetDst(mtypes.NodeID_Spread)
header.SetTTL(0)
header.SetSrc(device.ID)
header.SetPacketLength(uint16(len(body)))
copy(buf[path.EgHeaderLen:], body)
return buf, path.PingPacket, nil
return buf, path.PingPacket, 0, nil
}
func (device *Device) SendPing(peer *Peer, times int, replies int, interval float64) {
for i := 0; i < times; i++ {
packet, usage, _ := device.GeneratePingPacket(device.ID, replies)
device.SendPacket(peer, usage, packet, MessageTransportOffsetContent)
packet, usage, ttl, _ := device.GeneratePingPacket(device.ID, replies)
device.SendPacket(peer, usage, ttl, packet, MessageTransportOffsetContent)
time.Sleep(mtypes.S2TD(interval))
}
}
@ -308,11 +307,9 @@ func (device *Device) server_process_RegisterMsg(peer *Peer, content mtypes.Regi
buf := make([]byte, path.EgHeaderLen+len(body))
header, _ := path.NewEgHeader(buf[:path.EgHeaderLen], device.EdgeConfig.Interface.MTU)
header.SetSrc(device.ID)
header.SetTTL(0)
header.SetPacketLength(uint16(len(body)))
copy(buf[path.EgHeaderLen:], body)
header.SetDst(mtypes.NodeID_SuperNode)
device.SendPacket(peer, path.ServerUpdate, buf, MessageTransportOffsetContent)
device.SendPacket(peer, path.ServerUpdate, 0, buf, MessageTransportOffsetContent)
return nil
}
device.Chan_server_register <- content
@ -351,16 +348,14 @@ func (device *Device) process_ping(peer *Peer, content mtypes.PingMsg) error {
buf := make([]byte, path.EgHeaderLen+len(body))
header, _ := path.NewEgHeader(buf[:path.EgHeaderLen], device.EdgeConfig.Interface.MTU)
header.SetSrc(device.ID)
header.SetTTL(device.EdgeConfig.DefaultTTL)
header.SetPacketLength(uint16(len(body)))
copy(buf[path.EgHeaderLen:], body)
if device.EdgeConfig.DynamicRoute.SuperNode.UseSuperNode {
header.SetDst(mtypes.NodeID_SuperNode)
device.Send2Super(path.PongPacket, buf, MessageTransportOffsetContent)
device.Send2Super(path.PongPacket, 0, buf, MessageTransportOffsetContent)
}
if device.EdgeConfig.DynamicRoute.P2P.UseP2P {
header.SetDst(mtypes.NodeID_Spread)
device.SpreadPacket(make(map[mtypes.Vertex]bool), path.PongPacket, buf, MessageTransportOffsetContent)
device.SpreadPacket(make(map[mtypes.Vertex]bool), path.PongPacket, device.EdgeConfig.DefaultTTL, buf, MessageTransportOffsetContent)
}
go device.SendPing(peer, content.RequestReply, 0, 3)
return nil
@ -382,10 +377,8 @@ func (device *Device) process_pong(peer *Peer, content mtypes.PongMsg) error {
buf := make([]byte, path.EgHeaderLen+len(body))
header, _ := path.NewEgHeader(buf[:path.EgHeaderLen], device.EdgeConfig.Interface.MTU)
header.SetSrc(device.ID)
header.SetTTL(device.EdgeConfig.DefaultTTL)
header.SetPacketLength(uint16(len(body)))
copy(buf[path.EgHeaderLen:], body)
device.SendPacket(peer, path.QueryPeer, buf, MessageTransportOffsetContent)
device.SendPacket(peer, path.QueryPeer, device.EdgeConfig.DefaultTTL, buf, MessageTransportOffsetContent)
}
}
return nil
@ -708,11 +701,9 @@ func (device *Device) process_RequestPeerMsg(content mtypes.QueryPeerMsg) error
buf := make([]byte, path.EgHeaderLen+len(body))
header, _ := path.NewEgHeader(buf[0:path.EgHeaderLen], device.EdgeConfig.Interface.MTU)
header.SetDst(mtypes.NodeID_Spread)
header.SetTTL(device.EdgeConfig.DefaultTTL)
header.SetSrc(device.ID)
header.SetPacketLength(uint16(len(body)))
copy(buf[path.EgHeaderLen:], body)
device.SpreadPacket(make(map[mtypes.Vertex]bool), path.BroadcastPeer, buf, MessageTransportOffsetContent)
device.SpreadPacket(make(map[mtypes.Vertex]bool), path.BroadcastPeer, device.EdgeConfig.DefaultTTL, buf, MessageTransportOffsetContent)
}
device.peers.RUnlock()
}
@ -838,8 +829,8 @@ func (device *Device) RoutineSendPing(startchan <-chan struct{}) {
}
case <-waitchan:
}
packet, usage, _ := device.GeneratePingPacket(device.ID, 0)
device.SpreadPacket(make(map[mtypes.Vertex]bool), usage, packet, MessageTransportOffsetContent)
packet, usage, ttl, _ := device.GeneratePingPacket(device.ID, 0)
device.SpreadPacket(make(map[mtypes.Vertex]bool), usage, ttl, packet, MessageTransportOffsetContent)
}
}
@ -880,11 +871,9 @@ func (device *Device) RoutineRegister(startchan chan struct{}) {
buf := make([]byte, path.EgHeaderLen+len(body))
header, _ := path.NewEgHeader(buf[0:path.EgHeaderLen], device.EdgeConfig.Interface.MTU)
header.SetDst(mtypes.NodeID_SuperNode)
header.SetTTL(0)
header.SetSrc(device.ID)
header.SetPacketLength(uint16(len(body)))
copy(buf[path.EgHeaderLen:], body)
device.Send2Super(path.Register, buf, MessageTransportOffsetContent)
device.Send2Super(path.Register, 0, buf, MessageTransportOffsetContent)
}
}

View File

@ -50,6 +50,7 @@ import (
type QueueOutboundElement struct {
Type path.Usage
TTL uint8
sync.Mutex
buffer *[MaxMessageSize]byte // slice holding the packet data
packet []byte // slice of "buffer" (always!)
@ -265,9 +266,8 @@ func (device *Device) RoutineReadFromTUN() {
packet_len := len(elem.packet) - path.EgHeaderLen
EgBody.SetSrc(device.ID)
EgBody.SetDst(dst_nodeID)
EgBody.SetPacketLength(uint16(packet_len))
EgBody.SetTTL(device.EdgeConfig.DefaultTTL)
elem.Type = path.NormalPacket
elem.TTL = device.EdgeConfig.DefaultTTL
if packet_len <= 12 {
if device.LogLevel.LogNormal {
fmt.Println("Normal: Invalid packet: Ethernet packet too small." + " Len:" + strconv.Itoa(packet_len))
@ -298,7 +298,7 @@ func (device *Device) RoutineReadFromTUN() {
}
}
} else {
device.BoardcastPacket(make(map[mtypes.Vertex]bool, 0), elem.Type, elem.packet, offset)
device.BoardcastPacket(make(map[mtypes.Vertex]bool, 0), elem.Type, elem.TTL, elem.packet, offset)
}
}
@ -403,10 +403,11 @@ func (device *Device) RoutineEncryption(id int) {
// populate header fields
header := elem.buffer[:MessageTransportHeaderSize]
fieldReceiver := header[1:5]
fieldNonce := header[5:13]
fieldReceiver := header[MessageTransportOffsetReceiver:MessageTransportOffsetCounter]
fieldNonce := header[MessageTransportOffsetCounter:MessageTransportHeaderSize]
header[0] = uint8(elem.Type)
header[1] = uint8(elem.TTL)
binary.LittleEndian.PutUint32(fieldReceiver, elem.keypair.remoteIndex)
binary.LittleEndian.PutUint64(fieldNonce, elem.nonce)

View File

@ -12,7 +12,7 @@ import (
"github.com/KusakabeSi/EtherGuard-VPN/tap"
)
const DefaultMTU = 1402
const DefaultMTU = 1404
func (device *Device) RoutineTUNEventReader() {
device.log.Verbosef("Routine: event worker - started")

View File

@ -7,7 +7,7 @@ Interface:
IPv4CIDR: 192.168.76.0/24
IPv6CIDR: fd95:71cb:a3df:e586::/64
IPv6LLPrefix: fe80::a3df:0/112
MTU: 1402
MTU: 1404
RecvAddr: 127.0.0.1:4001
SendAddr: 127.0.0.1:5001
L2HeaderMode: kbdbg

View File

@ -7,7 +7,7 @@ Interface:
IPv4CIDR: 192.168.76.0/24
IPv6CIDR: fd95:71cb:a3df:e586::/64
IPv6LLPrefix: fe80::a3df:0/112
MTU: 1402
MTU: 1404
RecvAddr: 127.0.0.1:4001
SendAddr: 127.0.0.1:5001
L2HeaderMode: kbdbg

View File

@ -7,7 +7,7 @@ Interface:
IPv4CIDR: 192.168.76.0/24
IPv6CIDR: fd95:71cb:a3df:e586::/64
IPv6LLPrefix: fe80::a3df:0/112
MTU: 1402
MTU: 1404
RecvAddr: 127.0.0.1:4001
SendAddr: 127.0.0.1:5001
L2HeaderMode: kbdbg

View File

@ -7,7 +7,7 @@ Interface:
IPv4CIDR: 192.168.76.0/24
IPv6CIDR: fd95:71cb:a3df:e586::/64
IPv6LLPrefix: fe80::a3df:0/112
MTU: 1402
MTU: 1404
RecvAddr: 127.0.0.1:4001
SendAddr: 127.0.0.1:5001
L2HeaderMode: kbdbg

View File

@ -7,7 +7,7 @@ Interface:
IPv4CIDR: 192.168.76.0/24
IPv6CIDR: fd95:71cb:a3df:e586::/64
IPv6LLPrefix: fe80::a3df:0/112
MTU: 1402
MTU: 1404
RecvAddr: 127.0.0.1:4001
SendAddr: 127.0.0.1:5001
L2HeaderMode: kbdbg

View File

@ -7,7 +7,7 @@ Interface:
IPv4CIDR: 192.168.76.0/24
IPv6CIDR: fd95:71cb:a3df:e586::/64
IPv6LLPrefix: fe80::a3df:0/112
MTU: 1402
MTU: 1404
RecvAddr: 127.0.0.1:4001
SendAddr: 127.0.0.1:5001
L2HeaderMode: kbdbg

View File

@ -7,7 +7,7 @@ Interface:
IPv4CIDR: 192.168.76.0/24
IPv6CIDR: fd95:71cb:a3df:e586::/64
IPv6LLPrefix: fe80::a3df:0/112
MTU: 1402
MTU: 1404
RecvAddr: 127.0.0.1:4001
SendAddr: 127.0.0.1:5001
L2HeaderMode: kbdbg

View File

@ -7,7 +7,7 @@ Interface:
IPv4CIDR: 192.168.76.0/24
IPv6CIDR: fd95:71cb:a3df:e586::/64
IPv6LLPrefix: fe80::a3df:0/112
MTU: 1402
MTU: 1404
RecvAddr: 127.0.0.1:4001
SendAddr: 127.0.0.1:5001
L2HeaderMode: kbdbg

View File

@ -7,7 +7,7 @@ Interface:
IPv4CIDR: 192.168.76.0/24
IPv6CIDR: fd95:71cb:a3df:e586::/64
IPv6LLPrefix: fe80::a3df:0/112
MTU: 1402
MTU: 1404
RecvAddr: 127.0.0.1:4001
SendAddr: 127.0.0.1:5001
L2HeaderMode: kbdbg

View File

@ -7,7 +7,7 @@ Interface:
IPv4CIDR: 192.168.76.0/24
IPv6CIDR: fd95:71cb:a3df:e586::/64
IPv6LLPrefix: fe80::a3df:0/112
MTU: 1402
MTU: 1404
RecvAddr: 127.0.0.1:4001
SendAddr: 127.0.0.1:5001
L2HeaderMode: kbdbg

View File

@ -7,7 +7,7 @@ Interface:
IPv4CIDR: 192.168.76.0/24
IPv6CIDR: fd95:71cb:a3df:e586::/64
IPv6LLPrefix: fe80::a3df:0/112
MTU: 1402
MTU: 1404
RecvAddr: 127.0.0.1:4001
SendAddr: 127.0.0.1:5001
L2HeaderMode: kbdbg

View File

@ -7,7 +7,7 @@ Interface:
IPv4CIDR: 192.168.76.0/24
IPv6CIDR: fd95:71cb:a3df:e586::/64
IPv6LLPrefix: fe80::a3df:0/112
MTU: 1402
MTU: 1404
RecvAddr: 127.0.0.1:4001
SendAddr: 127.0.0.1:5001
L2HeaderMode: kbdbg

View File

@ -7,7 +7,7 @@ Interface:
IPv4CIDR: 192.168.76.0/24
IPv6CIDR: fd95:71cb:a3df:e586::/64
IPv6LLPrefix: fe80::a3df:0/112
MTU: 1402
MTU: 1404
RecvAddr: 127.0.0.1:4001
SendAddr: 127.0.0.1:5001
L2HeaderMode: kbdbg
@ -19,7 +19,7 @@ L2FIBTimeout: 3600
PrivKey: 12CRJpzWOTRQDOdtROtwwWb68B4HHjSbrS1WySAkWYI=
ListenPort: 0
LogLevel:
LogLevel: error
LogLevel: verbose
LogTransit: true
LogNormal: true
LogControl: true

View File

@ -7,7 +7,7 @@ Interface:
IPv4CIDR: 192.168.76.0/24
IPv6CIDR: fd95:71cb:a3df:e586::/64
IPv6LLPrefix: fe80::a3df:0/112
MTU: 1402
MTU: 1404
RecvAddr: 127.0.0.1:4002
SendAddr: 127.0.0.1:5002
L2HeaderMode: kbdbg

View File

@ -7,7 +7,7 @@ Interface:
IPv4CIDR: 192.168.76.0/24
IPv6CIDR: fd95:71cb:a3df:e586::/64
IPv6LLPrefix: fe80::a3df:0/112
MTU: 1402
MTU: 1404
RecvAddr: 127.0.0.1:4100
SendAddr: 127.0.0.1:5100
L2HeaderMode: kbdbg

View File

@ -12,7 +12,7 @@ PeerAliveTimeout: 70
SendPingInterval: 15
DampingResistance: 0.9
LogLevel:
LogLevel: normal
LogLevel: verbose
LogTransit: false
LogNormal: false
LogControl: true

View File

@ -306,7 +306,7 @@ interface:
vppifaceid: 1
vppbridgeid: 4242
macaddrprefix: AA:BB:CC:DD
mtu: 1402
mtu: 1404
recvaddr: 127.0.0.1:4001
sendaddr: 127.0.0.1:5001
l2headermode: kbdbg

View File

@ -1,56 +1,65 @@
interface:
itype: fd
name: tap1
vppifaceid: 1
vppbridgeid: 4242
macaddrprefix: AA:BB:CC:DD
mtu: 1402
recvaddr: 127.0.0.1:4001
sendaddr: 127.0.0.1:5001
l2headermode: kbdbg
nodeid: 1
nodename: Node01
defaultttl: 200
l2fibtimeout: 3600
privkey: 6GyDagZKhbm5WNqMiRHhkf43RlbMJ34IieTlIuvfJ1M=
listenport: 3001
loglevel:
loglevel: normal
logtransit: true
logcontrol: true
lognormal: true
logntp: true
dynamicroute:
sendpinginterval: 16
peeralivetimeout: 30
dupchecktimeout: 40
conntimeout: 30
connnexttry: 5
savenewpeers: true
supernode:
usesupernode: true
pskey: 'iPM8FXfnHVzwjguZHRW9bLNY+h7+B1O2oTJtktptQkI='
connurlv4: 127.0.0.1:3000
pubkeyv4: LJ8KKacUcIoACTGB/9Ed9w0osrJ3WWeelzpL2u4oUic=
connurlv6: ''
pubkeyv6: HCfL6YJtpJEGHTlJ2LgVXIWKB/K95P57LHTJ42ZG8VI=
apiurl: http://127.0.0.1:3000/api
supernodeinfotimeout: 50
p2p:
usep2p: false
sendpeerinterval: 20
graphrecalculatesetting:
staticmode: false
jittertolerance: 20
jittertolerancemultiplier: 1.1
nodereporttimeout: 40
recalculatecooldown: 5
ntpconfig:
usentp: true
maxserveruse: 8
synctimeinterval: 3600
ntptimeout: 3
servers:
Interface:
IType: fd
Name: EgNet001
VPPIFaceID: 1
VPPBridgeID: 4242
MacAddrPrefix: 62:A6:A3:6D
IPv4CIDR: 192.168.76.0/24
IPv6CIDR: fd95:71cb:a3df:e586::/64
IPv6LLPrefix: fe80::a3df:0/112
MTU: 1404
RecvAddr: 127.0.0.1:4001
SendAddr: 127.0.0.1:5001
L2HeaderMode: kbdbg
NodeID: 1
NodeName: EgNet001
PostScript: ""
DefaultTTL: 200
L2FIBTimeout: 3600
PrivKey: 12CRJpzWOTRQDOdtROtwwWb68B4HHjSbrS1WySAkWYI=
ListenPort: 0
LogLevel:
LogLevel: verbose
LogTransit: true
LogNormal: true
LogControl: true
LogInternal: true
LogNTP: true
DynamicRoute:
SendPingInterval: 16
PeerAliveTimeout: 70
TimeoutCheckInterval: 20
ConnNextTry: 5
DupCheckTimeout: 40
AdditionalCost: 10
DampingResistance: 0.9
SaveNewPeers: true
SuperNode:
UseSuperNode: true
PSKey: 2eOq1sJlEs3No80xYOaKJ059ElgRaSveyMu9IyQG3X8=
EndpointV4: 127.0.0.1:3456
PubKeyV4: 10CPQrpXKqXxnjtpdxDwnYqLglnuRnCFsiSAjxMrMTc=
EndpointV6: ""
PubKeyV6: KhpV1fJ+jtNT6S5wKUZJbb0oFlDNMS5qxO0f5Ow/QQU=
EndpointEdgeAPIUrl: http://127.0.0.1:3456/eg_net/eg_api
SkipLocalIP: false
SuperNodeInfoTimeout: 50
P2P:
UseP2P: false
SendPeerInterval: 20
GraphRecalculateSetting:
StaticMode: false
ManualLatency: {}
JitterTolerance: 50
JitterToleranceMultiplier: 1.1
TimeoutCheckInterval: 5
RecalculateCoolDown: 5
NTPConfig:
UseNTP: true
MaxServerUse: 8
SyncTimeInterval: 3600
NTPTimeout: 3
Servers:
- time.google.com
- time1.google.com
- time2.google.com
@ -66,6 +75,11 @@ dynamicroute:
- time.asia.apple.com
- time.euro.apple.com
- time.windows.com
nexthoptable: {}
resetconninterval: 86400
peers: []
- pool.ntp.org
- 0.pool.ntp.org
- 1.pool.ntp.org
- 2.pool.ntp.org
- 3.pool.ntp.org
NextHopTable: {}
ResetConnInterval: 86400
Peers: []

View File

@ -336,16 +336,14 @@ func super_peerdel_notify(toDelete mtypes.Vertex, PubKey string) {
buf := make([]byte, path.EgHeaderLen+len(body))
header, _ := path.NewEgHeader(buf[:path.EgHeaderLen], device.DefaultMTU)
header.SetSrc(mtypes.NodeID_SuperNode)
header.SetTTL(0)
header.SetPacketLength(uint16(len(body)))
copy(buf[path.EgHeaderLen:], body)
header.SetDst(toDelete)
peer4 := httpobj.http_device4.LookupPeerByStr(PubKey)
httpobj.http_device4.SendPacket(peer4, path.ServerUpdate, buf, device.MessageTransportOffsetContent)
httpobj.http_device4.SendPacket(peer4, path.ServerUpdate, 0, buf, device.MessageTransportOffsetContent)
peer6 := httpobj.http_device6.LookupPeerByStr(PubKey)
httpobj.http_device6.SendPacket(peer6, path.ServerUpdate, buf, device.MessageTransportOffsetContent)
httpobj.http_device6.SendPacket(peer6, path.ServerUpdate, 0, buf, device.MessageTransportOffsetContent)
time.Sleep(mtypes.S2TD(0.1))
}
httpobj.http_device4.RemovePeerByID(toDelete)
@ -467,9 +465,7 @@ func PushNhTable(force bool) {
buf := make([]byte, path.EgHeaderLen+len(body))
header, _ := path.NewEgHeader(buf[:path.EgHeaderLen], device.DefaultMTU)
header.SetDst(mtypes.NodeID_SuperNode)
header.SetPacketLength(uint16(len(body)))
header.SetSrc(mtypes.NodeID_SuperNode)
header.SetTTL(0)
copy(buf[path.EgHeaderLen:], body)
for pkstr, peerstate := range httpobj.http_PeerState {
isAlive := peerstate.LastSeen.Load().(time.Time).Add(mtypes.S2TD(httpobj.http_sconfig.PeerAliveTimeout)).After(time.Now())
@ -478,10 +474,10 @@ func PushNhTable(force bool) {
}
if force || peerstate.NhTableState.Load().(string) != httpobj.http_NhTable_Hash {
if peer := httpobj.http_device4.LookupPeerByStr(pkstr); peer != nil && peer.GetEndpointDstStr() != "" {
httpobj.http_device4.SendPacket(peer, path.ServerUpdate, buf, device.MessageTransportOffsetContent)
httpobj.http_device4.SendPacket(peer, path.ServerUpdate, 0, buf, device.MessageTransportOffsetContent)
}
if peer := httpobj.http_device6.LookupPeerByStr(pkstr); peer != nil && peer.GetEndpointDstStr() != "" {
httpobj.http_device6.SendPacket(peer, path.ServerUpdate, buf, device.MessageTransportOffsetContent)
httpobj.http_device6.SendPacket(peer, path.ServerUpdate, 0, buf, device.MessageTransportOffsetContent)
}
}
}
@ -502,9 +498,7 @@ func PushPeerinfo(force bool) {
buf := make([]byte, path.EgHeaderLen+len(body))
header, _ := path.NewEgHeader(buf[:path.EgHeaderLen], device.DefaultMTU)
header.SetDst(mtypes.NodeID_SuperNode)
header.SetPacketLength(uint16(len(body)))
header.SetSrc(mtypes.NodeID_SuperNode)
header.SetTTL(0)
copy(buf[path.EgHeaderLen:], body)
for pkstr, peerstate := range httpobj.http_PeerState {
isAlive := peerstate.LastSeen.Load().(time.Time).Add(mtypes.S2TD(httpobj.http_sconfig.PeerAliveTimeout)).After(time.Now())
@ -513,10 +507,10 @@ func PushPeerinfo(force bool) {
}
if force || peerstate.PeerInfoState.Load().(string) != httpobj.http_PeerInfo_hash {
if peer := httpobj.http_device4.LookupPeerByStr(pkstr); peer != nil {
httpobj.http_device4.SendPacket(peer, path.ServerUpdate, buf, device.MessageTransportOffsetContent)
httpobj.http_device4.SendPacket(peer, path.ServerUpdate, 0, buf, device.MessageTransportOffsetContent)
}
if peer := httpobj.http_device6.LookupPeerByStr(pkstr); peer != nil {
httpobj.http_device6.SendPacket(peer, path.ServerUpdate, buf, device.MessageTransportOffsetContent)
httpobj.http_device6.SendPacket(peer, path.ServerUpdate, 0, buf, device.MessageTransportOffsetContent)
}
}
}
@ -544,16 +538,14 @@ func PushServerParams(force bool) {
buf := make([]byte, path.EgHeaderLen+len(body))
header, _ := path.NewEgHeader(buf[:path.EgHeaderLen], device.DefaultMTU)
header.SetDst(mtypes.NodeID_SuperNode)
header.SetPacketLength(uint16(len(body)))
header.SetSrc(mtypes.NodeID_SuperNode)
header.SetTTL(0)
copy(buf[path.EgHeaderLen:], body)
if peer := httpobj.http_device4.LookupPeerByStr(pkstr); peer != nil {
httpobj.http_device4.SendPacket(peer, path.ServerUpdate, buf, device.MessageTransportOffsetContent)
httpobj.http_device4.SendPacket(peer, path.ServerUpdate, 0, buf, device.MessageTransportOffsetContent)
}
if peer := httpobj.http_device6.LookupPeerByStr(pkstr); peer != nil {
httpobj.http_device6.SendPacket(peer, path.ServerUpdate, buf, device.MessageTransportOffsetContent)
httpobj.http_device6.SendPacket(peer, path.ServerUpdate, 0, buf, device.MessageTransportOffsetContent)
}
}
}

View File

@ -7,7 +7,7 @@ import (
"github.com/KusakabeSi/EtherGuard-VPN/mtypes"
)
const EgHeaderLen = 7
const EgHeaderLen =4
type EgHeader struct {
buf []byte
@ -112,18 +112,3 @@ func (e EgHeader) GetSrc() mtypes.Vertex {
func (e EgHeader) SetSrc(node_ID mtypes.Vertex) {
binary.BigEndian.PutUint16(e.buf[2:4], uint16(node_ID))
}
func (e EgHeader) GetTTL() uint8 {
return e.buf[4]
}
func (e EgHeader) SetTTL(ttl uint8) {
e.buf[4] = ttl
}
func (e EgHeader) GetPacketLength() (ret uint16) {
ret = binary.BigEndian.Uint16(e.buf[5:7])
return
}
func (e EgHeader) SetPacketLength(length uint16) {
binary.BigEndian.PutUint16(e.buf[5:7], length)
}