models/server: fix one possible error when frps accept too many user connections in a short time

This commit is contained in:
fatedier 2016-05-19 20:50:19 +08:00
parent d569a60eff
commit ee8786a6b3
2 changed files with 53 additions and 62 deletions

View File

@ -243,7 +243,7 @@ func doLogin(req *msg.ControlReq, c *conn.Conn) (ret int64, info string) {
return return
} }
// the connection will close after join over // the connection will close after join over
s.RecvNewWorkConn(c) s.RegisterNewWorkConn(c)
} else { } else {
info = fmt.Sprintf("Unsupport login message type [%d]", req.Type) info = fmt.Sprintf("Unsupport login message type [%d]", req.Type)
log.Warn("Unsupport login message type [%d]", req.Type) log.Warn("Unsupport login message type [%d]", req.Type)

View File

@ -15,7 +15,7 @@
package server package server
import ( import (
"container/list" "fmt"
"sync" "sync"
"time" "time"
@ -42,15 +42,13 @@ type ProxyServer struct {
listeners []Listener // accept new connection from remote users listeners []Listener // accept new connection from remote users
ctlMsgChan chan int64 // every time accept a new user conn, put "1" to the channel ctlMsgChan chan int64 // every time accept a new user conn, put "1" to the channel
workConnChan chan *conn.Conn // get new work conns from control goroutine workConnChan chan *conn.Conn // get new work conns from control goroutine
userConnList *list.List // store user conns
mutex sync.Mutex mutex sync.Mutex
} }
func (p *ProxyServer) Init() { func (p *ProxyServer) Init() {
p.Status = consts.Idle p.Status = consts.Idle
p.workConnChan = make(chan *conn.Conn) p.workConnChan = make(chan *conn.Conn, 100)
p.ctlMsgChan = make(chan int64) p.ctlMsgChan = make(chan int64)
p.userConnList = list.New()
p.listeners = make([]Listener, 0) p.listeners = make([]Listener, 0)
} }
@ -96,74 +94,34 @@ func (p *ProxyServer) Start() (err error) {
} }
log.Debug("ProxyName [%s], get one new user conn [%s]", p.Name, c.GetRemoteAddr()) log.Debug("ProxyName [%s], get one new user conn [%s]", p.Name, c.GetRemoteAddr())
// insert into list
p.Lock()
if p.Status != consts.Working { if p.Status != consts.Working {
log.Debug("ProxyName [%s] is not working, new user conn close", p.Name) log.Debug("ProxyName [%s] is not working, new user conn close", p.Name)
c.Close() c.Close()
p.Unlock()
return return
} }
p.userConnList.PushBack(c)
p.Unlock()
// put msg to control conn // start another goroutine for join two conns from frpc and user
p.ctlMsgChan <- 1 go func() {
workConn, err := p.getWorkConn()
// set timeout if err != nil {
time.AfterFunc(time.Duration(UserConnTimeout)*time.Second, func() {
p.Lock()
element := p.userConnList.Front()
p.Unlock()
if element == nil {
return return
} }
userConn := element.Value.(*conn.Conn) userConn := c
if userConn == c { // msg will transfer to another without modifying
log.Warn("ProxyName [%s], user conn [%s] timeout", p.Name, c.GetRemoteAddr()) // l means local, r means remote
userConn.Close() log.Debug("Join two connections, (l[%s] r[%s]) (l[%s] r[%s])", workConn.GetLocalAddr(), workConn.GetRemoteAddr(),
userConn.GetLocalAddr(), userConn.GetRemoteAddr())
if p.UseEncryption {
go conn.JoinMore(userConn, workConn, p.AuthToken)
} else {
go conn.Join(userConn, workConn)
} }
}) }()
} }
}(listener) }(listener)
} }
// start another goroutine for join two conns from frpc and user
go func() {
for {
workConn, ok := <-p.workConnChan
if !ok {
return
}
p.Lock()
element := p.userConnList.Front()
var userConn *conn.Conn
if element != nil {
userConn = element.Value.(*conn.Conn)
p.userConnList.Remove(element)
} else {
workConn.Close()
p.Unlock()
continue
}
p.Unlock()
// msg will transfer to another without modifying
// l means local, r means remote
log.Debug("Join two connections, (l[%s] r[%s]) (l[%s] r[%s])", workConn.GetLocalAddr(), workConn.GetRemoteAddr(),
userConn.GetLocalAddr(), userConn.GetRemoteAddr())
if p.UseEncryption {
go conn.JoinMore(userConn, workConn, p.AuthToken)
} else {
go conn.Join(userConn, workConn)
}
}
}()
return nil return nil
} }
@ -180,7 +138,6 @@ func (p *ProxyServer) Close() {
} }
close(p.ctlMsgChan) close(p.ctlMsgChan)
close(p.workConnChan) close(p.workConnChan)
p.userConnList = list.New()
} }
p.Unlock() p.Unlock()
} }
@ -195,6 +152,40 @@ func (p *ProxyServer) WaitUserConn() (closeFlag bool) {
return return
} }
func (p *ProxyServer) RecvNewWorkConn(c *conn.Conn) { func (p *ProxyServer) RegisterNewWorkConn(c *conn.Conn) {
p.workConnChan <- c p.workConnChan <- c
} }
// when frps get one user connection, we get one work connection from the pool and return it
// if no workConn available in the pool, send message to frpc to get one or more
// and wait until it is available
// return an error if wait timeout
func (p *ProxyServer) getWorkConn() (workConn *conn.Conn, err error) {
var ok bool
// get a work connection from the pool
select {
case workConn, ok = <-p.workConnChan:
if !ok {
err = fmt.Errorf("ProxyName [%s], no work connections available, control is closing", p.Name)
return
}
default:
// no work connections available in the poll, send message to frpc to get one
p.ctlMsgChan <- 1
select {
case workConn, ok = <-p.workConnChan:
if !ok {
err = fmt.Errorf("ProxyName [%s], no work connections available, control is closing", p.Name)
return
}
case <-time.After(time.Duration(UserConnTimeout) * time.Second):
log.Warn("ProxyName [%s], timeout trying to get work connection", p.Name)
err = fmt.Errorf("ProxyName [%s], timeout trying to get work connection", p.Name)
return
}
}
return
}