2023-05-28 10:50:43 +02:00
|
|
|
// Copyright 2017 fatedier, fatedier@gmail.com
|
|
|
|
//
|
|
|
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
// you may not use this file except in compliance with the License.
|
|
|
|
// You may obtain a copy of the License at
|
|
|
|
//
|
|
|
|
// http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
//
|
|
|
|
// Unless required by applicable law or agreed to in writing, software
|
|
|
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
// See the License for the specific language governing permissions and
|
|
|
|
// limitations under the License.
|
|
|
|
|
|
|
|
package visitor
|
|
|
|
|
|
|
|
import (
|
|
|
|
"context"
|
|
|
|
"errors"
|
|
|
|
"fmt"
|
|
|
|
"io"
|
|
|
|
"net"
|
|
|
|
"strconv"
|
|
|
|
"sync"
|
|
|
|
"time"
|
|
|
|
|
2023-05-29 08:10:34 +02:00
|
|
|
libio "github.com/fatedier/golib/io"
|
2023-05-28 10:50:43 +02:00
|
|
|
fmux "github.com/hashicorp/yamux"
|
|
|
|
quic "github.com/quic-go/quic-go"
|
|
|
|
"golang.org/x/time/rate"
|
|
|
|
|
2023-09-06 04:18:02 +02:00
|
|
|
v1 "github.com/fatedier/frp/pkg/config/v1"
|
2023-05-28 10:50:43 +02:00
|
|
|
"github.com/fatedier/frp/pkg/msg"
|
|
|
|
"github.com/fatedier/frp/pkg/nathole"
|
|
|
|
"github.com/fatedier/frp/pkg/transport"
|
2023-11-27 08:47:49 +01:00
|
|
|
netpkg "github.com/fatedier/frp/pkg/util/net"
|
2023-05-28 10:50:43 +02:00
|
|
|
"github.com/fatedier/frp/pkg/util/util"
|
|
|
|
"github.com/fatedier/frp/pkg/util/xlog"
|
|
|
|
)
|
|
|
|
|
|
|
|
var ErrNoTunnelSession = errors.New("no tunnel session")
|
|
|
|
|
|
|
|
type XTCPVisitor struct {
|
|
|
|
*BaseVisitor
|
|
|
|
session TunnelSession
|
|
|
|
startTunnelCh chan struct{}
|
|
|
|
retryLimiter *rate.Limiter
|
|
|
|
cancel context.CancelFunc
|
|
|
|
|
2023-09-06 04:18:02 +02:00
|
|
|
cfg *v1.XTCPVisitorConfig
|
2023-05-28 10:50:43 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
func (sv *XTCPVisitor) Run() (err error) {
|
|
|
|
sv.ctx, sv.cancel = context.WithCancel(sv.ctx)
|
|
|
|
|
|
|
|
if sv.cfg.Protocol == "kcp" {
|
|
|
|
sv.session = NewKCPTunnelSession()
|
|
|
|
} else {
|
2023-09-06 04:18:02 +02:00
|
|
|
sv.session = NewQUICTunnelSession(sv.clientCfg)
|
2023-05-28 10:50:43 +02:00
|
|
|
}
|
|
|
|
|
2023-05-30 04:55:00 +02:00
|
|
|
if sv.cfg.BindPort > 0 {
|
|
|
|
sv.l, err = net.Listen("tcp", net.JoinHostPort(sv.cfg.BindAddr, strconv.Itoa(sv.cfg.BindPort)))
|
|
|
|
if err != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
go sv.worker()
|
2023-05-28 10:50:43 +02:00
|
|
|
}
|
|
|
|
|
2023-05-30 04:55:00 +02:00
|
|
|
go sv.internalConnWorker()
|
2023-05-28 10:50:43 +02:00
|
|
|
go sv.processTunnelStartEvents()
|
|
|
|
if sv.cfg.KeepTunnelOpen {
|
|
|
|
sv.retryLimiter = rate.NewLimiter(rate.Every(time.Hour/time.Duration(sv.cfg.MaxRetriesAnHour)), sv.cfg.MaxRetriesAnHour)
|
|
|
|
go sv.keepTunnelOpenWorker()
|
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
func (sv *XTCPVisitor) Close() {
|
2023-05-30 04:55:00 +02:00
|
|
|
sv.mu.Lock()
|
|
|
|
defer sv.mu.Unlock()
|
|
|
|
sv.BaseVisitor.Close()
|
|
|
|
if sv.cancel != nil {
|
|
|
|
sv.cancel()
|
|
|
|
}
|
2023-05-28 10:50:43 +02:00
|
|
|
if sv.session != nil {
|
|
|
|
sv.session.Close()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (sv *XTCPVisitor) worker() {
|
|
|
|
xl := xlog.FromContextSafe(sv.ctx)
|
|
|
|
for {
|
|
|
|
conn, err := sv.l.Accept()
|
|
|
|
if err != nil {
|
2024-03-12 06:58:53 +01:00
|
|
|
xl.Warnf("xtcp local listener closed")
|
2023-05-28 10:50:43 +02:00
|
|
|
return
|
|
|
|
}
|
2023-05-30 04:55:00 +02:00
|
|
|
go sv.handleConn(conn)
|
|
|
|
}
|
|
|
|
}
|
2023-05-28 10:50:43 +02:00
|
|
|
|
2023-05-30 04:55:00 +02:00
|
|
|
func (sv *XTCPVisitor) internalConnWorker() {
|
|
|
|
xl := xlog.FromContextSafe(sv.ctx)
|
|
|
|
for {
|
|
|
|
conn, err := sv.internalLn.Accept()
|
|
|
|
if err != nil {
|
2024-03-12 06:58:53 +01:00
|
|
|
xl.Warnf("xtcp internal listener closed")
|
2023-05-30 04:55:00 +02:00
|
|
|
return
|
|
|
|
}
|
2023-05-28 10:50:43 +02:00
|
|
|
go sv.handleConn(conn)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (sv *XTCPVisitor) processTunnelStartEvents() {
|
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case <-sv.ctx.Done():
|
|
|
|
return
|
|
|
|
case <-sv.startTunnelCh:
|
|
|
|
start := time.Now()
|
|
|
|
sv.makeNatHole()
|
|
|
|
duration := time.Since(start)
|
|
|
|
// avoid too frequently
|
|
|
|
if duration < 10*time.Second {
|
|
|
|
time.Sleep(10*time.Second - duration)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (sv *XTCPVisitor) keepTunnelOpenWorker() {
|
|
|
|
xl := xlog.FromContextSafe(sv.ctx)
|
|
|
|
ticker := time.NewTicker(time.Duration(sv.cfg.MinRetryInterval) * time.Second)
|
|
|
|
defer ticker.Stop()
|
|
|
|
|
|
|
|
sv.startTunnelCh <- struct{}{}
|
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case <-sv.ctx.Done():
|
|
|
|
return
|
|
|
|
case <-ticker.C:
|
2024-03-12 06:58:53 +01:00
|
|
|
xl.Debugf("keepTunnelOpenWorker try to check tunnel...")
|
2023-05-28 10:50:43 +02:00
|
|
|
conn, err := sv.getTunnelConn()
|
|
|
|
if err != nil {
|
2024-03-12 06:58:53 +01:00
|
|
|
xl.Warnf("keepTunnelOpenWorker get tunnel connection error: %v", err)
|
2023-05-28 10:50:43 +02:00
|
|
|
_ = sv.retryLimiter.Wait(sv.ctx)
|
|
|
|
continue
|
|
|
|
}
|
2024-03-12 06:58:53 +01:00
|
|
|
xl.Debugf("keepTunnelOpenWorker check success")
|
2023-05-28 10:50:43 +02:00
|
|
|
if conn != nil {
|
|
|
|
conn.Close()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (sv *XTCPVisitor) handleConn(userConn net.Conn) {
|
|
|
|
xl := xlog.FromContextSafe(sv.ctx)
|
2023-05-30 04:55:00 +02:00
|
|
|
isConnTrasfered := false
|
|
|
|
defer func() {
|
|
|
|
if !isConnTrasfered {
|
|
|
|
userConn.Close()
|
|
|
|
}
|
|
|
|
}()
|
2023-05-28 10:50:43 +02:00
|
|
|
|
2024-03-12 06:58:53 +01:00
|
|
|
xl.Debugf("get a new xtcp user connection")
|
2023-05-28 10:50:43 +02:00
|
|
|
|
|
|
|
// Open a tunnel connection to the server. If there is already a successful hole-punching connection,
|
|
|
|
// it will be reused. Otherwise, it will block and wait for a successful hole-punching connection until timeout.
|
2023-05-30 04:55:00 +02:00
|
|
|
ctx := context.Background()
|
|
|
|
if sv.cfg.FallbackTo != "" {
|
|
|
|
timeoutCtx, cancel := context.WithTimeout(ctx, time.Duration(sv.cfg.FallbackTimeoutMs)*time.Millisecond)
|
|
|
|
defer cancel()
|
|
|
|
ctx = timeoutCtx
|
|
|
|
}
|
|
|
|
tunnelConn, err := sv.openTunnel(ctx)
|
2023-05-28 10:50:43 +02:00
|
|
|
if err != nil {
|
2024-03-12 06:58:53 +01:00
|
|
|
xl.Errorf("open tunnel error: %v", err)
|
2023-05-30 04:55:00 +02:00
|
|
|
// no fallback, just return
|
|
|
|
if sv.cfg.FallbackTo == "" {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2024-03-12 06:58:53 +01:00
|
|
|
xl.Debugf("try to transfer connection to visitor: %s", sv.cfg.FallbackTo)
|
2023-05-30 14:25:22 +02:00
|
|
|
if err := sv.helper.TransferConn(sv.cfg.FallbackTo, userConn); err != nil {
|
2024-03-12 06:58:53 +01:00
|
|
|
xl.Errorf("transfer connection to visitor %s error: %v", sv.cfg.FallbackTo, err)
|
2023-05-30 04:55:00 +02:00
|
|
|
return
|
|
|
|
}
|
|
|
|
isConnTrasfered = true
|
2023-05-28 10:50:43 +02:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
var muxConnRWCloser io.ReadWriteCloser = tunnelConn
|
2023-09-06 04:18:02 +02:00
|
|
|
if sv.cfg.Transport.UseEncryption {
|
|
|
|
muxConnRWCloser, err = libio.WithEncryption(muxConnRWCloser, []byte(sv.cfg.SecretKey))
|
2023-05-28 10:50:43 +02:00
|
|
|
if err != nil {
|
2024-03-12 06:58:53 +01:00
|
|
|
xl.Errorf("create encryption stream error: %v", err)
|
2023-05-28 10:50:43 +02:00
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
2023-09-06 04:18:02 +02:00
|
|
|
if sv.cfg.Transport.UseCompression {
|
2023-07-25 15:31:26 +02:00
|
|
|
var recycleFn func()
|
|
|
|
muxConnRWCloser, recycleFn = libio.WithCompressionFromPool(muxConnRWCloser)
|
|
|
|
defer recycleFn()
|
2023-05-28 10:50:43 +02:00
|
|
|
}
|
|
|
|
|
2023-05-29 08:10:34 +02:00
|
|
|
_, _, errs := libio.Join(userConn, muxConnRWCloser)
|
2024-03-12 06:58:53 +01:00
|
|
|
xl.Debugf("join connections closed")
|
2023-05-28 10:50:43 +02:00
|
|
|
if len(errs) > 0 {
|
2024-03-12 06:58:53 +01:00
|
|
|
xl.Tracef("join connections errors: %v", errs)
|
2023-05-28 10:50:43 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// openTunnel will open a tunnel connection to the target server.
|
2023-05-30 04:55:00 +02:00
|
|
|
func (sv *XTCPVisitor) openTunnel(ctx context.Context) (conn net.Conn, err error) {
|
2023-05-28 10:50:43 +02:00
|
|
|
xl := xlog.FromContextSafe(sv.ctx)
|
|
|
|
ticker := time.NewTicker(500 * time.Millisecond)
|
|
|
|
defer ticker.Stop()
|
|
|
|
|
|
|
|
timeoutC := time.After(20 * time.Second)
|
|
|
|
immediateTrigger := make(chan struct{}, 1)
|
|
|
|
defer close(immediateTrigger)
|
|
|
|
immediateTrigger <- struct{}{}
|
|
|
|
|
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case <-sv.ctx.Done():
|
|
|
|
return nil, sv.ctx.Err()
|
2023-05-30 04:55:00 +02:00
|
|
|
case <-ctx.Done():
|
|
|
|
return nil, ctx.Err()
|
2023-05-28 10:50:43 +02:00
|
|
|
case <-immediateTrigger:
|
|
|
|
conn, err = sv.getTunnelConn()
|
|
|
|
case <-ticker.C:
|
|
|
|
conn, err = sv.getTunnelConn()
|
|
|
|
case <-timeoutC:
|
|
|
|
return nil, fmt.Errorf("open tunnel timeout")
|
|
|
|
}
|
|
|
|
|
|
|
|
if err != nil {
|
|
|
|
if err != ErrNoTunnelSession {
|
2024-03-12 06:58:53 +01:00
|
|
|
xl.Warnf("get tunnel connection error: %v", err)
|
2023-05-28 10:50:43 +02:00
|
|
|
}
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
return conn, nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (sv *XTCPVisitor) getTunnelConn() (net.Conn, error) {
|
|
|
|
conn, err := sv.session.OpenConn(sv.ctx)
|
|
|
|
if err == nil {
|
|
|
|
return conn, nil
|
|
|
|
}
|
|
|
|
sv.session.Close()
|
|
|
|
|
|
|
|
select {
|
|
|
|
case sv.startTunnelCh <- struct{}{}:
|
|
|
|
default:
|
|
|
|
}
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// 0. PreCheck
|
|
|
|
// 1. Prepare
|
|
|
|
// 2. ExchangeInfo
|
|
|
|
// 3. MakeNATHole
|
|
|
|
// 4. Create a tunnel session using an underlying UDP connection.
|
|
|
|
func (sv *XTCPVisitor) makeNatHole() {
|
|
|
|
xl := xlog.FromContextSafe(sv.ctx)
|
2024-03-12 06:58:53 +01:00
|
|
|
xl.Tracef("makeNatHole start")
|
2023-05-30 14:25:22 +02:00
|
|
|
if err := nathole.PreCheck(sv.ctx, sv.helper.MsgTransporter(), sv.cfg.ServerName, 5*time.Second); err != nil {
|
2024-03-12 06:58:53 +01:00
|
|
|
xl.Warnf("nathole precheck error: %v", err)
|
2023-05-28 10:50:43 +02:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2024-03-12 06:58:53 +01:00
|
|
|
xl.Tracef("nathole prepare start")
|
2023-05-28 10:50:43 +02:00
|
|
|
prepareResult, err := nathole.Prepare([]string{sv.clientCfg.NatHoleSTUNServer})
|
|
|
|
if err != nil {
|
2024-03-12 06:58:53 +01:00
|
|
|
xl.Warnf("nathole prepare error: %v", err)
|
2023-05-28 10:50:43 +02:00
|
|
|
return
|
|
|
|
}
|
2024-03-12 06:58:53 +01:00
|
|
|
xl.Infof("nathole prepare success, nat type: %s, behavior: %s, addresses: %v, assistedAddresses: %v",
|
2023-05-28 10:50:43 +02:00
|
|
|
prepareResult.NatType, prepareResult.Behavior, prepareResult.Addrs, prepareResult.AssistedAddrs)
|
|
|
|
|
|
|
|
listenConn := prepareResult.ListenConn
|
|
|
|
|
|
|
|
// send NatHoleVisitor to server
|
|
|
|
now := time.Now().Unix()
|
|
|
|
transactionID := nathole.NewTransactionID()
|
|
|
|
natHoleVisitorMsg := &msg.NatHoleVisitor{
|
|
|
|
TransactionID: transactionID,
|
|
|
|
ProxyName: sv.cfg.ServerName,
|
|
|
|
Protocol: sv.cfg.Protocol,
|
2023-09-06 04:18:02 +02:00
|
|
|
SignKey: util.GetAuthKey(sv.cfg.SecretKey, now),
|
2023-05-28 10:50:43 +02:00
|
|
|
Timestamp: now,
|
|
|
|
MappedAddrs: prepareResult.Addrs,
|
|
|
|
AssistedAddrs: prepareResult.AssistedAddrs,
|
|
|
|
}
|
|
|
|
|
2024-03-12 06:58:53 +01:00
|
|
|
xl.Tracef("nathole exchange info start")
|
2023-05-30 14:25:22 +02:00
|
|
|
natHoleRespMsg, err := nathole.ExchangeInfo(sv.ctx, sv.helper.MsgTransporter(), transactionID, natHoleVisitorMsg, 5*time.Second)
|
2023-05-28 10:50:43 +02:00
|
|
|
if err != nil {
|
|
|
|
listenConn.Close()
|
2024-03-12 06:58:53 +01:00
|
|
|
xl.Warnf("nathole exchange info error: %v", err)
|
2023-05-28 10:50:43 +02:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2024-03-12 06:58:53 +01:00
|
|
|
xl.Infof("get natHoleRespMsg, sid [%s], protocol [%s], candidate address %v, assisted address %v, detectBehavior: %+v",
|
2023-05-28 10:50:43 +02:00
|
|
|
natHoleRespMsg.Sid, natHoleRespMsg.Protocol, natHoleRespMsg.CandidateAddrs,
|
|
|
|
natHoleRespMsg.AssistedAddrs, natHoleRespMsg.DetectBehavior)
|
|
|
|
|
2023-09-06 04:18:02 +02:00
|
|
|
newListenConn, raddr, err := nathole.MakeHole(sv.ctx, listenConn, natHoleRespMsg, []byte(sv.cfg.SecretKey))
|
2023-05-28 10:50:43 +02:00
|
|
|
if err != nil {
|
|
|
|
listenConn.Close()
|
2024-03-12 06:58:53 +01:00
|
|
|
xl.Warnf("make hole error: %v", err)
|
2023-05-28 10:50:43 +02:00
|
|
|
return
|
|
|
|
}
|
|
|
|
listenConn = newListenConn
|
2024-03-12 06:58:53 +01:00
|
|
|
xl.Infof("establishing nat hole connection successful, sid [%s], remoteAddr [%s]", natHoleRespMsg.Sid, raddr)
|
2023-05-28 10:50:43 +02:00
|
|
|
|
|
|
|
if err := sv.session.Init(listenConn, raddr); err != nil {
|
|
|
|
listenConn.Close()
|
2024-03-12 06:58:53 +01:00
|
|
|
xl.Warnf("init tunnel session error: %v", err)
|
2023-05-28 10:50:43 +02:00
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
type TunnelSession interface {
|
|
|
|
Init(listenConn *net.UDPConn, raddr *net.UDPAddr) error
|
|
|
|
OpenConn(context.Context) (net.Conn, error)
|
|
|
|
Close()
|
|
|
|
}
|
|
|
|
|
|
|
|
type KCPTunnelSession struct {
|
|
|
|
session *fmux.Session
|
|
|
|
lConn *net.UDPConn
|
|
|
|
mu sync.RWMutex
|
|
|
|
}
|
|
|
|
|
|
|
|
func NewKCPTunnelSession() TunnelSession {
|
|
|
|
return &KCPTunnelSession{}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (ks *KCPTunnelSession) Init(listenConn *net.UDPConn, raddr *net.UDPAddr) error {
|
|
|
|
listenConn.Close()
|
|
|
|
laddr, _ := net.ResolveUDPAddr("udp", listenConn.LocalAddr().String())
|
|
|
|
lConn, err := net.DialUDP("udp", laddr, raddr)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("dial udp error: %v", err)
|
|
|
|
}
|
2023-11-27 08:47:49 +01:00
|
|
|
remote, err := netpkg.NewKCPConnFromUDP(lConn, true, raddr.String())
|
2023-05-28 10:50:43 +02:00
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("create kcp connection from udp connection error: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
fmuxCfg := fmux.DefaultConfig()
|
|
|
|
fmuxCfg.KeepAliveInterval = 10 * time.Second
|
2023-06-04 13:58:55 +02:00
|
|
|
fmuxCfg.MaxStreamWindowSize = 6 * 1024 * 1024
|
2023-05-28 10:50:43 +02:00
|
|
|
fmuxCfg.LogOutput = io.Discard
|
|
|
|
session, err := fmux.Client(remote, fmuxCfg)
|
|
|
|
if err != nil {
|
|
|
|
remote.Close()
|
|
|
|
return fmt.Errorf("initial client session error: %v", err)
|
|
|
|
}
|
|
|
|
ks.mu.Lock()
|
|
|
|
ks.session = session
|
|
|
|
ks.lConn = lConn
|
|
|
|
ks.mu.Unlock()
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2023-07-21 04:30:46 +02:00
|
|
|
func (ks *KCPTunnelSession) OpenConn(_ context.Context) (net.Conn, error) {
|
2023-05-28 10:50:43 +02:00
|
|
|
ks.mu.RLock()
|
|
|
|
defer ks.mu.RUnlock()
|
|
|
|
session := ks.session
|
|
|
|
if session == nil {
|
|
|
|
return nil, ErrNoTunnelSession
|
|
|
|
}
|
|
|
|
return session.Open()
|
|
|
|
}
|
|
|
|
|
|
|
|
func (ks *KCPTunnelSession) Close() {
|
|
|
|
ks.mu.Lock()
|
|
|
|
defer ks.mu.Unlock()
|
|
|
|
if ks.session != nil {
|
|
|
|
_ = ks.session.Close()
|
|
|
|
ks.session = nil
|
|
|
|
}
|
|
|
|
if ks.lConn != nil {
|
|
|
|
_ = ks.lConn.Close()
|
|
|
|
ks.lConn = nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
type QUICTunnelSession struct {
|
|
|
|
session quic.Connection
|
|
|
|
listenConn *net.UDPConn
|
|
|
|
mu sync.RWMutex
|
|
|
|
|
2023-09-06 04:18:02 +02:00
|
|
|
clientCfg *v1.ClientCommonConfig
|
2023-05-28 10:50:43 +02:00
|
|
|
}
|
|
|
|
|
2023-09-06 04:18:02 +02:00
|
|
|
func NewQUICTunnelSession(clientCfg *v1.ClientCommonConfig) TunnelSession {
|
2023-05-28 10:50:43 +02:00
|
|
|
return &QUICTunnelSession{
|
|
|
|
clientCfg: clientCfg,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (qs *QUICTunnelSession) Init(listenConn *net.UDPConn, raddr *net.UDPAddr) error {
|
|
|
|
tlsConfig, err := transport.NewClientTLSConfig("", "", "", raddr.String())
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("create tls config error: %v", err)
|
|
|
|
}
|
|
|
|
tlsConfig.NextProtos = []string{"frp"}
|
2023-07-21 08:34:44 +02:00
|
|
|
quicConn, err := quic.Dial(context.Background(), listenConn, raddr, tlsConfig,
|
2023-05-28 10:50:43 +02:00
|
|
|
&quic.Config{
|
2023-09-06 04:18:02 +02:00
|
|
|
MaxIdleTimeout: time.Duration(qs.clientCfg.Transport.QUIC.MaxIdleTimeout) * time.Second,
|
|
|
|
MaxIncomingStreams: int64(qs.clientCfg.Transport.QUIC.MaxIncomingStreams),
|
|
|
|
KeepAlivePeriod: time.Duration(qs.clientCfg.Transport.QUIC.KeepalivePeriod) * time.Second,
|
2023-05-28 10:50:43 +02:00
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("dial quic error: %v", err)
|
|
|
|
}
|
|
|
|
qs.mu.Lock()
|
|
|
|
qs.session = quicConn
|
|
|
|
qs.listenConn = listenConn
|
|
|
|
qs.mu.Unlock()
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (qs *QUICTunnelSession) OpenConn(ctx context.Context) (net.Conn, error) {
|
|
|
|
qs.mu.RLock()
|
|
|
|
defer qs.mu.RUnlock()
|
|
|
|
session := qs.session
|
|
|
|
if session == nil {
|
|
|
|
return nil, ErrNoTunnelSession
|
|
|
|
}
|
|
|
|
stream, err := session.OpenStreamSync(ctx)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2023-11-27 08:47:49 +01:00
|
|
|
return netpkg.QuicStreamToNetConn(stream, session), nil
|
2023-05-28 10:50:43 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
func (qs *QUICTunnelSession) Close() {
|
|
|
|
qs.mu.Lock()
|
|
|
|
defer qs.mu.Unlock()
|
|
|
|
if qs.session != nil {
|
|
|
|
_ = qs.session.CloseWithError(0, "")
|
|
|
|
qs.session = nil
|
|
|
|
}
|
|
|
|
if qs.listenConn != nil {
|
|
|
|
_ = qs.listenConn.Close()
|
|
|
|
qs.listenConn = nil
|
|
|
|
}
|
|
|
|
}
|