Merge pull request #372 from fatedier/dev

bump verson to v0.12.0
This commit is contained in:
fatedier 2017-06-19 21:36:51 +08:00 committed by GitHub
commit ad3cf9a64a
257 changed files with 23107 additions and 220 deletions

61
Godeps/Godeps.json generated
View File

@ -30,6 +30,16 @@
"Comment": "v1.1-41-g8a45e95",
"Rev": "8a45e95fc75cb77048068a62daed98cc22fdac7c"
},
{
"ImportPath": "github.com/klauspost/cpuid",
"Comment": "v1.0",
"Rev": "09cded8978dc9e80714c4d85b0322337b0a1e5e0"
},
{
"ImportPath": "github.com/klauspost/reedsolomon",
"Comment": "1.3-1-gdde6ad5",
"Rev": "dde6ad55c5e5a6379a4e82dcca32ee407346eb6d"
},
{
"ImportPath": "github.com/pkg/errors",
"Comment": "v0.8.0-5-gc605e28",
@ -54,14 +64,63 @@
"ImportPath": "github.com/vaughan0/go-ini",
"Rev": "a98ad7ee00ec53921f08832bc06ecf7fd600e6a1"
},
{
"ImportPath": "github.com/xtaci/kcp-go",
"Comment": "v3.17",
"Rev": "df437e2b8ec365a336200f9d9da53441cf72ed47"
},
{
"ImportPath": "github.com/xtaci/smux",
"Comment": "v1.0.5-8-g2de5471",
"Rev": "2de5471dfcbc029f5fe1392b83fe784127c4943e"
},
{
"ImportPath": "golang.org/x/crypto/blowfish",
"Rev": "e1a4589e7d3ea14a3352255d04b6f1a418845e5e"
},
{
"ImportPath": "golang.org/x/crypto/cast5",
"Rev": "e1a4589e7d3ea14a3352255d04b6f1a418845e5e"
},
{
"ImportPath": "golang.org/x/crypto/pbkdf2",
"Rev": "1f22c0103821b9390939b6776727195525381532"
"Rev": "e1a4589e7d3ea14a3352255d04b6f1a418845e5e"
},
{
"ImportPath": "golang.org/x/crypto/salsa20",
"Rev": "e1a4589e7d3ea14a3352255d04b6f1a418845e5e"
},
{
"ImportPath": "golang.org/x/crypto/salsa20/salsa",
"Rev": "e1a4589e7d3ea14a3352255d04b6f1a418845e5e"
},
{
"ImportPath": "golang.org/x/crypto/tea",
"Rev": "e1a4589e7d3ea14a3352255d04b6f1a418845e5e"
},
{
"ImportPath": "golang.org/x/crypto/twofish",
"Rev": "e1a4589e7d3ea14a3352255d04b6f1a418845e5e"
},
{
"ImportPath": "golang.org/x/crypto/xtea",
"Rev": "e1a4589e7d3ea14a3352255d04b6f1a418845e5e"
},
{
"ImportPath": "golang.org/x/net/bpf",
"Rev": "e4fa1c5465ad6111f206fc92186b8c83d64adbe1"
},
{
"ImportPath": "golang.org/x/net/internal/iana",
"Rev": "e4fa1c5465ad6111f206fc92186b8c83d64adbe1"
},
{
"ImportPath": "golang.org/x/net/internal/socket",
"Rev": "e4fa1c5465ad6111f206fc92186b8c83d64adbe1"
},
{
"ImportPath": "golang.org/x/net/ipv4",
"Rev": "e4fa1c5465ad6111f206fc92186b8c83d64adbe1"
}
]
}

View File

@ -15,7 +15,12 @@ file:
go generate ./assets/...
fmt:
go fmt ./...
go fmt ./assets/...
go fmt ./client/...
go fmt ./cmd/...
go fmt ./models/...
go fmt ./server/...
go fmt ./utils/...
frps:
go build -o bin/frps ./cmd/frps

131
README.md
View File

@ -18,6 +18,8 @@ frp is a fast reverse proxy to help you expose a local server behind a NAT or fi
* [Access your computer in LAN by SSH](#access-your-computer-in-lan-by-ssh)
* [Visit your web service in LAN by custom domains](#visit-your-web-service-in-lan-by-custom-domains)
* [Forward DNS query request](#forward-dns-query-request)
* [Forward unix domain socket](#forward-unix-domain-socket)
* [Connect website through frpc's network](#connect-website-through-frpcs-network)
* [Features](#features)
* [Dashboard](#dashboard)
* [Authentication](#authentication)
@ -26,16 +28,19 @@ frp is a fast reverse proxy to help you expose a local server behind a NAT or fi
* [Privilege Mode](#privilege-mode)
* [Port White List](#port-white-list)
* [TCP Stream Multiplexing](#tcp-stream-multiplexing)
* [Support KCP Protocol](#support-kcp-protocol)
* [Connection Pool](#connection-pool)
* [Rewriting the Host Header](#rewriting-the-host-header)
* [Password protecting your web service](#password-protecting-your-web-service)
* [Custom subdomain names](#custom-subdomain-names)
* [URL routing](#url-routing)
* [Connect frps by HTTP PROXY](#connect-frps-by-http-proxy)
* [Plugin](#plugin)
* [Development Plan](#development-plan)
* [Contributing](#contributing)
* [Donation](#donation)
* [AliPay](#alipay)
* [Wechat Pay](#wechat-pay)
* [Paypal](#paypal)
<!-- vim-markdown-toc -->
@ -143,7 +148,7 @@ However, we can expose a http or https service using frp.
### Forward DNS query request
1. Modify frps.ini, configure a reverse proxy named [dns]:
1. Modify frps.ini:
```ini
# frps.ini
@ -178,6 +183,69 @@ However, we can expose a http or https service using frp.
`dig @x.x.x.x -p 6000 www.goolge.com`
### Forward unix domain socket
Using tcp port to connect unix domain socket like docker daemon.
1. Modify frps.ini:
```ini
# frps.ini
[common]
bind_port = 7000
```
2. Start frps:
`./frps -c ./frps.ini`
3. Modify frpc.ini:
```ini
# frpc.ini
[common]
server_addr = x.x.x.x
server_port = 7000
[unix_domain_socket]
type = tcp
remote_port = 6000
plugin = unix_domain_socket
plugin_unix_path = /var/run/docker.sock
```
4. Start frpc:
`./frpc -c ./frpc.ini`
5. Get docker version by curl command:
`curl http://x.x.x.x:6000/version`
### Connect website through frpc's network
Configure frps same as above.
1. Modify frpc.ini:
```ini
# frpc.ini
[common]
server_addr = x.x.x.x
server_port = 7000
[http_proxy]
type = tcp
remote_port = 6000
plugin = http_proxy
```
4. Start frpc:
`./frpc -c ./frpc.ini`
5. Set http proxy `x.x.x.x:6000` in your browser and visit website through frpc's network.
## Features
### Dashboard
@ -252,6 +320,35 @@ You can disable this feature by modify frps.ini and frpc.ini:
tcp_mux = false
```
### Support KCP Protocol
frp support kcp protocol since v0.12.0.
KCP is a fast and reliable protocol that can achieve the transmission effect of a reduction of the average latency by 30% to 40% and reduction of the maximum delay by a factor of three, at the cost of 10% to 20% more bandwidth wasted than TCP.
Using kcp in frp:
1. Enable kcp protocol in frps:
```ini
# frps.ini
[common]
bind_port = 7000
# kcp needs to bind a udp port, it can be same with 'bind_port'
kcp_bind_port = 7000
```
2. Configure the protocol used in frpc to connect frps:
```ini
# frpc.ini
[common]
server_addr = x.x.x.x
# specify the 'kcp_bind_port' in frps
server_port = 7000
protocol = kcp
```
### Connection Pool
By default, frps send message to frpc for create a new connection to backward service when getting an user request.If a proxy's connection pool is enabled, there will be a specified number of connections pre-established.
@ -358,22 +455,46 @@ Http requests with url prefix `/news` and `/about` will be forwarded to **web02*
frpc can connect frps using HTTP PROXY if you set os environment `HTTP_PROXY` or configure `http_proxy` param in frpc.ini file.
It only works when protocol is tcp.
```ini
# frpc.ini
[common]
server_addr = x.x.x.x
server_port = 7000
http_proxy = http://user:pwd@192.168.1.128:8080
```
### Plugin
frpc only forward request to local tcp or udp port by default.
Plugin is used for providing rich features. There are built-in plugins such as **unix_domain_socket**, **http_proxy** and you can see [example usage](#example-usage).
Specify which plugin to use by `plugin` parameter. Configuration parameters of plugin should be started with `plugin_`. `local_ip` and `local_port` is useless for plugin.
Using plugin **http_proxy**:
```ini
# frpc.ini
[http_proxy]
type = tcp
remote_port = 6000
plugin = http_proxy
plugin_http_user = abc
plugin_http_passwd = abc
```
`plugin_http_user` and `plugin_http_passwd` are configuration parameters used in `http_proxy` plugin.
## Development Plan
* Log http request information in frps.
* Direct reverse proxy, like haproxy.
* Load balance to different service in frpc.
* Frpc can directly be a webserver for static files.
* Full control mode, dynamically modify frpc's configure with dashboard in frps.
* P2p communicate by make udp hole to penetrate NAT.
* Client Plugin (http proxy).
* kubernetes ingress support.
@ -398,6 +519,10 @@ frp QQ group: 606194980
![donation-alipay](/doc/pic/donate-alipay.png)
### Wechat Pay
![donation-wechatpay](/doc/pic/donate-wechatpay.png)
### Paypal
Donate money by [paypal](https://www.paypal.me/fatedier) to my account **fatedier@gmail.com**.

View File

@ -16,6 +16,8 @@ frp 是一个可用于内网穿透的高性能的反向代理应用,支持 tcp
* [通过 ssh 访问公司内网机器](#通过-ssh-访问公司内网机器)
* [通过自定义域名访问部署于内网的 web 服务](#通过自定义域名访问部署于内网的-web-服务)
* [转发 DNS 查询请求](#转发-dns-查询请求)
* [转发 Unix域套接字](#转发-unix域套接字)
* [通过 frpc 所在机器访问外网](#通过-frpc-所在机器访问外网)
* [功能说明](#功能说明)
* [Dashboard](#dashboard)
* [身份验证](#身份验证)
@ -24,16 +26,19 @@ frp 是一个可用于内网穿透的高性能的反向代理应用,支持 tcp
* [特权模式](#特权模式)
* [端口白名单](#端口白名单)
* [TCP 多路复用](#tcp-多路复用)
* [支持 kcp 协议](#支持-kcp-协议)
* [连接池](#连接池)
* [修改 Host Header](#修改-host-header)
* [通过密码保护你的 web 服务](#通过密码保护你的-web-服务)
* [自定义二级域名](#自定义二级域名)
* [URL 路由](#url-路由)
* [通过代理连接 frps](#通过代理连接-frps)
* [插件](#插件)
* [开发计划](#开发计划)
* [为 frp 做贡献](#为-frp-做贡献)
* [捐助](#捐助)
* [支付宝扫码捐赠](#支付宝扫码捐赠)
* [微信支付捐赠](#微信支付捐赠)
* [Paypal 捐赠](#paypal-捐赠)
<!-- vim-markdown-toc -->
@ -179,6 +184,71 @@ DNS 查询请求通常使用 UDP 协议frp 支持对内网 UDP 服务的穿
`dig @x.x.x.x -p 6000 www.goolge.com`
### 转发 Unix域套接字
通过 tcp 端口访问内网的 unix域套接字(和 docker daemon 通信)。
1. 修改 frps.ini 文件:
```ini
# frps.ini
[common]
bind_port = 7000
```
2. 启动 frps
`./frps -c ./frps.ini`
3. 修改 frpc.ini 文件,启用 unix_domain_socket 插件:
```ini
# frpc.ini
[common]
server_addr = x.x.x.x
server_port = 7000
[unix_domain_socket]
type = tcp
remote_port = 6000
plugin = unix_domain_socket
plugin_unix_path = /var/run/docker.sock
```
4. 启动 frpc
`./frpc -c ./frpc.ini`
5. 通过 curl 命令查看 docker 版本信息
`curl http://x.x.x.x:6000/version`
### 通过 frpc 所在机器访问外网
frpc 内置了 http proxy 插件,可以使其他机器通过 frpc 的网络访问互联网。
frps 的部署步骤同上。
1. 修改 frpc.ini 文件,启用 http_proxy 插件:
```ini
# frpc.ini
[common]
server_addr = x.x.x.x
server_port = 7000
[http_proxy]
type = tcp
remote_port = 6000
plugin = http_proxy
```
4. 启动 frpc
`./frpc -c ./frpc.ini`
5. 浏览器设置 http 代理地址为 `x.x.x.x:6000`,通过 frpc 机器的网络访问互联网。
## 功能说明
### Dashboard
@ -257,6 +327,35 @@ privilege_allow_ports 可以配置允许使用的某个指定端口或者是一
tcp_mux = false
```
### 支持 kcp 协议
从 v0.12.0 版本开始,底层通信协议支持选择 kcp 协议,在弱网环境下传输效率提升明显,但是会有一些额外的流量消耗。
开启 kcp 协议支持:
1. 在 frps.ini 中启用 kcp 协议支持,指定一个 udp 端口用于接收客户端请求:
```ini
# frps.ini
[common]
bind_port = 7000
# kcp 绑定的是 udp 端口,可以和 bind_port 一样
kcp_bind_port = 7000
```
2. 在 frpc.ini 指定需要使用的协议类型,目前只支持 tcp 和 kcp。其他代理配置不需要变更
```ini
# frpc.ini
[common]
server_addr = x.x.x.x
# server_port 指定为 frps 的 kcp_bind_port
server_port = 7000
protocol = kcp
```
3. 像之前一样使用 frp需要注意开放相关机器上的 udp 的端口的访问权限。
### 连接池
默认情况下当用户请求建立连接后frps 才会请求 frpc 主动与后端服务建立一个连接。当为指定的代理启用连接池后frp 会预先和后端服务建立起指定数量的连接,每次接收到用户请求后,会从连接池中取出一个连接和用户连接关联起来,避免了等待与后端服务建立连接以及 frpc 和 frps 之间传递控制信息的时间。
@ -373,13 +472,38 @@ locations = /news,/about
可以通过设置 `HTTP_PROXY` 系统环境变量或者通过在 frpc 的配置文件中设置 `http_proxy` 参数来使用此功能。
仅在 `protocol = tcp` 时生效。
```ini
# frpc.ini
[common]
server_addr = x.x.x.x
server_port = 7000
http_proxy = http://user:pwd@192.168.1.128:8080
```
### 插件
默认情况下frpc 只会转发请求到本地 tcp 或 udp 端口。
插件模式是为了在客户端提供更加丰富的功能,目前内置的插件有 **unix_domain_socket**、**http_proxy**。具体使用方式请查看[使用示例](#使用示例)。
通过 `plugin` 指定需要使用的插件,插件的配置参数都以 `plugin_` 开头。使用插件后 `local_ip` 和 `local_port 不再需要配置。
使用 **http_proxy** 插件的示例:
```ini
# frpc.ini
[http_proxy]
type = tcp
remote_port = 6000
plugin = http_proxy
plugin_http_user = abc
plugin_http_passwd = abc
```
`plugin_http_user``plugin_http_passwd` 即为 `http_proxy` 插件可选的配置参数。
## 开发计划
计划在后续版本中加入的功能与优化,排名不分先后,如果有其他功能建议欢迎在 [issues](https://github.com/fatedier/frp/issues) 中反馈。
@ -388,9 +512,7 @@ http_proxy = http://user:pwd@192.168.1.128:8080
* frps 支持直接反向代理,类似 haproxy。
* frpc 支持负载均衡到后端不同服务。
* frpc 支持直接作为 webserver 访问指定静态页面。
* frpc 完全控制模式,通过 dashboard 对 frpc 进行在线操作。
* 支持 udp 打洞的方式,提供两边内网机器直接通信,流量不经过服务器转发。
* 支持 pluginfrpc 获取到的连接可以交给指定 plugin 处理,例如 http 代理,简单的 web server。
* 集成对 k8s 等平台的支持。
## 为 frp 做贡献
@ -416,6 +538,10 @@ frp 交流群606194980 (QQ 群号)
![donate-alipay](/doc/pic/donate-alipay.png)
### 微信支付捐赠
![donate-wechatpay](/doc/pic/donate-wechatpay.png)
### Paypal 捐赠
海外用户推荐通过 [Paypal](https://www.paypal.me/fatedier) 向我的账户 **fatedier@gmail.com** 进行捐赠。

View File

@ -150,7 +150,7 @@ func (ctl *Control) NewWorkConn() {
workConn = net.WrapConn(stream)
} else {
workConn, err = net.ConnectTcpServerByHttpProxy(config.ClientCommonCfg.HttpProxy,
workConn, err = net.ConnectServerByHttpProxy(config.ClientCommonCfg.HttpProxy, config.ClientCommonCfg.Protocol,
fmt.Sprintf("%s:%d", config.ClientCommonCfg.ServerAddr, config.ClientCommonCfg.ServerPort))
if err != nil {
ctl.Warn("start new work connection error: %v", err)
@ -199,7 +199,7 @@ func (ctl *Control) login() (err error) {
ctl.session.Close()
}
conn, err := net.ConnectTcpServerByHttpProxy(config.ClientCommonCfg.HttpProxy,
conn, err := net.ConnectServerByHttpProxy(config.ClientCommonCfg.HttpProxy, config.ClientCommonCfg.Protocol,
fmt.Sprintf("%s:%d", config.ClientCommonCfg.ServerAddr, config.ClientCommonCfg.ServerPort))
if err != nil {
return err
@ -358,6 +358,9 @@ func (ctl *Control) manager() {
pxy := NewProxy(ctl, cfg)
if err := pxy.Run(); err != nil {
ctl.Warn("[%s] proxy start running error: %v", m.ProxyName, err)
ctl.sendCh <- &msg.CloseProxy{
ProxyName: m.ProxyName,
}
continue
}
ctl.proxies[m.ProxyName] = pxy

View File

@ -24,9 +24,9 @@ import (
"github.com/fatedier/frp/models/config"
"github.com/fatedier/frp/models/msg"
"github.com/fatedier/frp/models/plugin"
"github.com/fatedier/frp/models/proto/tcp"
"github.com/fatedier/frp/models/proto/udp"
"github.com/fatedier/frp/utils/errors"
frpIo "github.com/fatedier/frp/utils/io"
"github.com/fatedier/frp/utils/log"
frpNet "github.com/fatedier/frp/utils/net"
)
@ -277,14 +277,14 @@ func HandleTcpWorkConnection(localInfo *config.LocalSvrConf, proxyPlugin plugin.
)
remote = workConn
if baseInfo.UseEncryption {
remote, err = tcp.WithEncryption(remote, []byte(config.ClientCommonCfg.PrivilegeToken))
remote, err = frpIo.WithEncryption(remote, []byte(config.ClientCommonCfg.PrivilegeToken))
if err != nil {
workConn.Error("create encryption stream error: %v", err)
return
}
}
if baseInfo.UseCompression {
remote = tcp.WithCompression(remote)
remote = frpIo.WithCompression(remote)
}
if proxyPlugin != nil {
@ -294,7 +294,7 @@ func HandleTcpWorkConnection(localInfo *config.LocalSvrConf, proxyPlugin plugin.
workConn.Debug("handle by plugin finished")
return
} else {
localConn, err := frpNet.ConnectTcpServer(fmt.Sprintf("%s:%d", localInfo.LocalIp, localInfo.LocalPort))
localConn, err := frpNet.ConnectServer("tcp", fmt.Sprintf("%s:%d", localInfo.LocalIp, localInfo.LocalPort))
if err != nil {
workConn.Error("connect to local service [%s:%d] error: %v", localInfo.LocalIp, localInfo.LocalPort, err)
return
@ -302,7 +302,7 @@ func HandleTcpWorkConnection(localInfo *config.LocalSvrConf, proxyPlugin plugin.
workConn.Debug("join connections, localConn(l[%s] r[%s]) workConn(l[%s] r[%s])", localConn.LocalAddr().String(),
localConn.RemoteAddr().String(), workConn.LocalAddr().String(), workConn.RemoteAddr().String())
tcp.Join(localConn, remote)
frpIo.Join(localConn, remote)
workConn.Debug("join connections closed")
}
}

View File

@ -6,6 +6,7 @@ server_addr = 0.0.0.0
server_port = 7000
# if you want to connect frps by http proxy, you can set http_proxy here or in global environment variables
# it only works when protocol is tcp
# http_proxy = http://user:pwd@192.168.1.128:8080
# console or real logFile path like ./frpc.log
@ -32,6 +33,10 @@ user = your_name
# default is true
login_fail_exit = true
# communication protocol used to connect to server
# now it supports tcp and kcp, default is tcp
protocol = tcp
# proxy names you want to start divided by ','
# default is empty, means all proxies
# start = ssh,dns

View File

@ -5,6 +5,10 @@
bind_addr = 0.0.0.0
bind_port = 7000
# udp port used for kcp protocol, it can be same with 'bind_port'
# if not set, kcp is disabled in frps
kcp_bind_port = 7000
# if you want to support virtual host, you must set the http port for listening (optional)
vhost_http_port = 80
vhost_https_port = 443

Binary file not shown.

After

Width:  |  Height:  |  Size: 27 KiB

View File

@ -41,6 +41,7 @@ type ClientCommonConf struct {
User string
LoginFailExit bool
Start map[string]struct{}
Protocol string
HeartBeatInterval int64
HeartBeatTimeout int64
}
@ -61,6 +62,7 @@ func GetDeaultClientCommonConf() *ClientCommonConf {
User: "",
LoginFailExit: true,
Start: make(map[string]struct{}),
Protocol: "tcp",
HeartBeatInterval: 30,
HeartBeatTimeout: 90,
}
@ -154,6 +156,15 @@ func LoadClientCommonConf(conf ini.File) (cfg *ClientCommonConf, err error) {
cfg.LoginFailExit = true
}
tmpStr, ok = conf.Get("common", "protocol")
if ok {
// Now it only support tcp and kcp.
if tmpStr != "kcp" {
tmpStr = "tcp"
}
cfg.Protocol = tmpStr
}
tmpStr, ok = conf.Get("common", "heartbeat_timeout")
if ok {
v, err = strconv.ParseInt(tmpStr, 10, 64)

View File

@ -27,9 +27,10 @@ var ServerCommonCfg *ServerCommonConf
// common config
type ServerCommonConf struct {
ConfigFile string
BindAddr string
BindPort int64
ConfigFile string
BindAddr string
BindPort int64
KcpBindPort int64
// If VhostHttpPort equals 0, don't listen a public port for http protocol.
VhostHttpPort int64
@ -64,6 +65,7 @@ func GetDefaultServerCommonConf() *ServerCommonConf {
ConfigFile: "./frps.ini",
BindAddr: "0.0.0.0",
BindPort: 7000,
KcpBindPort: 0,
VhostHttpPort: 0,
VhostHttpsPort: 0,
DashboardPort: 0,
@ -107,6 +109,14 @@ func LoadServerCommonConf(conf ini.File) (cfg *ServerCommonConf, err error) {
}
}
tmpStr, ok = conf.Get("common", "kcp_bind_port")
if ok {
v, err = strconv.ParseInt(tmpStr, 10, 64)
if err == nil && v > 0 {
cfg.KcpBindPort = v
}
}
tmpStr, ok = conf.Get("common", "vhost_http_port")
if ok {
cfg.VhostHttpPort, err = strconv.ParseInt(tmpStr, 10, 64)

View File

@ -24,6 +24,7 @@ const (
TypeLoginResp = '1'
TypeNewProxy = 'p'
TypeNewProxyResp = '2'
TypeCloseProxy = 'c'
TypeNewWorkConn = 'w'
TypeReqWorkConn = 'r'
TypeStartWorkConn = 's'
@ -45,6 +46,7 @@ func init() {
TypeMap[TypeLoginResp] = reflect.TypeOf(LoginResp{})
TypeMap[TypeNewProxy] = reflect.TypeOf(NewProxy{})
TypeMap[TypeNewProxyResp] = reflect.TypeOf(NewProxyResp{})
TypeMap[TypeCloseProxy] = reflect.TypeOf(CloseProxy{})
TypeMap[TypeNewWorkConn] = reflect.TypeOf(NewWorkConn{})
TypeMap[TypeReqWorkConn] = reflect.TypeOf(ReqWorkConn{})
TypeMap[TypeStartWorkConn] = reflect.TypeOf(StartWorkConn{})
@ -105,6 +107,10 @@ type NewProxyResp struct {
Error string `json:"error"`
}
type CloseProxy struct {
ProxyName string `json:"proxy_name"`
}
type NewWorkConn struct {
RunId string `json:"run_id"`
}

View File

@ -15,6 +15,7 @@
package plugin
import (
"bufio"
"encoding/base64"
"fmt"
"io"
@ -23,8 +24,8 @@ import (
"strings"
"sync"
"github.com/fatedier/frp/models/proto/tcp"
"github.com/fatedier/frp/utils/errors"
frpIo "github.com/fatedier/frp/utils/io"
frpNet "github.com/fatedier/frp/utils/net"
)
@ -106,14 +107,26 @@ func (hp *HttpProxy) Name() string {
}
func (hp *HttpProxy) Handle(conn io.ReadWriteCloser) {
var wrapConn net.Conn
if realConn, ok := conn.(net.Conn); ok {
var wrapConn frpNet.Conn
if realConn, ok := conn.(frpNet.Conn); ok {
wrapConn = realConn
} else {
wrapConn = frpNet.WrapReadWriteCloserToConn(conn)
}
hp.l.PutConn(wrapConn)
sc, rd := frpNet.NewShareConn(wrapConn)
request, err := http.ReadRequest(bufio.NewReader(rd))
if err != nil {
wrapConn.Close()
return
}
if request.Method == http.MethodConnect {
hp.handleConnectReq(request, frpIo.WrapReadWriteCloser(rd, wrapConn, nil))
return
}
hp.l.PutConn(sc)
return
}
@ -124,13 +137,15 @@ func (hp *HttpProxy) Close() error {
}
func (hp *HttpProxy) ServeHTTP(rw http.ResponseWriter, req *http.Request) {
if ok := hp.Auth(rw, req); !ok {
if ok := hp.Auth(req); !ok {
rw.Header().Set("Proxy-Authenticate", "Basic")
rw.WriteHeader(http.StatusProxyAuthRequired)
return
}
if req.Method == "CONNECT" {
if req.Method == http.MethodConnect {
// deprecated
// Connect request is handled in Handle function.
hp.ConnectHandler(rw, req)
} else {
hp.HttpHandler(rw, req)
@ -156,6 +171,9 @@ func (hp *HttpProxy) HttpHandler(rw http.ResponseWriter, req *http.Request) {
}
}
// deprecated
// Hijack needs to SetReadDeadline on the Conn of the request, but if we use stream compression here,
// we may always get i/o timeout error.
func (hp *HttpProxy) ConnectHandler(rw http.ResponseWriter, req *http.Request) {
hj, ok := rw.(http.Hijacker)
if !ok {
@ -175,12 +193,12 @@ func (hp *HttpProxy) ConnectHandler(rw http.ResponseWriter, req *http.Request) {
client.Close()
return
}
client.Write([]byte("HTTP/1.0 200 OK\r\n\r\n"))
client.Write([]byte("HTTP/1.1 200 OK\r\n\r\n"))
go tcp.Join(remote, client)
go frpIo.Join(remote, client)
}
func (hp *HttpProxy) Auth(rw http.ResponseWriter, req *http.Request) bool {
func (hp *HttpProxy) Auth(req *http.Request) bool {
if hp.AuthUser == "" && hp.AuthPasswd == "" {
return true
}
@ -206,6 +224,30 @@ func (hp *HttpProxy) Auth(rw http.ResponseWriter, req *http.Request) bool {
return true
}
func (hp *HttpProxy) handleConnectReq(req *http.Request, rwc io.ReadWriteCloser) {
defer rwc.Close()
if ok := hp.Auth(req); !ok {
res := getBadResponse()
res.Write(rwc)
return
}
remote, err := net.Dial("tcp", req.URL.Host)
if err != nil {
res := &http.Response{
StatusCode: 400,
Proto: "HTTP/1.1",
ProtoMajor: 1,
ProtoMinor: 1,
}
res.Write(rwc)
return
}
rwc.Write([]byte("HTTP/1.1 200 OK\r\n\r\n"))
frpIo.Join(remote, rwc)
}
func copyHeaders(dst, src http.Header) {
for key, values := range src {
for _, value := range values {
@ -225,3 +267,17 @@ func removeProxyHeaders(req *http.Request) {
req.Header.Del("Transfer-Encoding")
req.Header.Del("Upgrade")
}
func getBadResponse() *http.Response {
header := make(map[string][]string)
header["Proxy-Authenticate"] = []string{"Basic"}
res := &http.Response{
Status: "407 Not authorized",
StatusCode: 407,
Proto: "HTTP/1.1",
ProtoMajor: 1,
ProtoMinor: 1,
Header: header,
}
return res
}

View File

@ -19,7 +19,7 @@ import (
"io"
"net"
"github.com/fatedier/frp/models/proto/tcp"
frpIo "github.com/fatedier/frp/utils/io"
)
const PluginUnixDomainSocket = "unix_domain_socket"
@ -57,7 +57,7 @@ func (uds *UnixDomainSocketPlugin) Handle(conn io.ReadWriteCloser) {
return
}
tcp.Join(localConn, conn)
frpIo.Join(localConn, conn)
}
func (uds *UnixDomainSocketPlugin) Name() string {

View File

@ -1,67 +0,0 @@
// Copyright 2017 fatedier, fatedier@gmail.com
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package tcp
import (
"io"
"testing"
"github.com/stretchr/testify/assert"
)
func TestJoin(t *testing.T) {
assert := assert.New(t)
var (
n int
err error
)
text1 := "A document that gives tips for writing clear, idiomatic Go code. A must read for any new Go programmer. It augments the tour and the language specification, both of which should be read first."
text2 := "A document that specifies the conditions under which reads of a variable in one goroutine can be guaranteed to observe values produced by writes to the same variable in a different goroutine."
// Forward bytes directly.
pr, pw := io.Pipe()
pr2, pw2 := io.Pipe()
pr3, pw3 := io.Pipe()
pr4, pw4 := io.Pipe()
conn1 := WrapReadWriteCloser(pr, pw2)
conn2 := WrapReadWriteCloser(pr2, pw)
conn3 := WrapReadWriteCloser(pr3, pw4)
conn4 := WrapReadWriteCloser(pr4, pw3)
go func() {
Join(conn2, conn3)
}()
buf1 := make([]byte, 1024)
buf2 := make([]byte, 1024)
conn1.Write([]byte(text1))
conn4.Write([]byte(text2))
n, err = conn4.Read(buf1)
assert.NoError(err)
assert.Equal(text1, string(buf1[:n]))
n, err = conn1.Read(buf2)
assert.NoError(err)
assert.Equal(text2, string(buf2[:n]))
conn1.Close()
conn2.Close()
conn3.Close()
conn4.Close()
}

View File

@ -50,7 +50,7 @@ type Control struct {
workConnCh chan net.Conn
// proxies in one client
proxies []Proxy
proxies map[string]Proxy
// pool count
poolCount int
@ -82,7 +82,7 @@ func NewControl(svr *Service, ctlConn net.Conn, loginMsg *msg.Login) *Control {
sendCh: make(chan msg.Message, 10),
readCh: make(chan msg.Message, 10),
workConnCh: make(chan net.Conn, loginMsg.PoolCount+10),
proxies: make([]Proxy, 0),
proxies: make(map[string]Proxy),
poolCount: loginMsg.PoolCount,
lastPing: time.Now(),
runId: loginMsg.RunId,
@ -265,6 +265,8 @@ func (ctl *Control) stoper() {
workConn.Close()
}
ctl.mu.Lock()
defer ctl.mu.Unlock()
for _, pxy := range ctl.proxies {
pxy.Close()
ctl.svr.DelProxy(pxy.GetName())
@ -317,6 +319,9 @@ func (ctl *Control) manager() {
StatsNewProxy(m.ProxyName, m.ProxyType)
}
ctl.sendCh <- resp
case *msg.CloseProxy:
ctl.CloseProxy(m)
ctl.conn.Info("close proxy [%s] success", m.ProxyName)
case *msg.Ping:
ctl.lastPing = time.Now()
ctl.conn.Debug("receive heartbeat")
@ -355,6 +360,24 @@ func (ctl *Control) RegisterProxy(pxyMsg *msg.NewProxy) (err error) {
if err != nil {
return err
}
ctl.proxies = append(ctl.proxies, pxy)
ctl.mu.Lock()
ctl.proxies[pxy.GetName()] = pxy
ctl.mu.Unlock()
return nil
}
func (ctl *Control) CloseProxy(closeMsg *msg.CloseProxy) (err error) {
ctl.mu.Lock()
defer ctl.mu.Unlock()
pxy, ok := ctl.proxies[closeMsg.ProxyName]
if !ok {
return
}
pxy.Close()
ctl.svr.DelProxy(pxy.GetName())
StatsCloseProxy(pxy.GetName(), pxy.GetConf().GetBaseInfo().ProxyType)
return
}

View File

@ -24,9 +24,9 @@ import (
"github.com/fatedier/frp/models/config"
"github.com/fatedier/frp/models/msg"
"github.com/fatedier/frp/models/proto/tcp"
"github.com/fatedier/frp/models/proto/udp"
"github.com/fatedier/frp/utils/errors"
frpIo "github.com/fatedier/frp/utils/io"
"github.com/fatedier/frp/utils/log"
frpNet "github.com/fatedier/frp/utils/net"
"github.com/fatedier/frp/utils/vhost"
@ -461,20 +461,20 @@ func HandleUserTcpConnection(pxy Proxy, userConn frpNet.Conn) {
var local io.ReadWriteCloser = workConn
cfg := pxy.GetConf().GetBaseInfo()
if cfg.UseEncryption {
local, err = tcp.WithEncryption(local, []byte(config.ServerCommonCfg.PrivilegeToken))
local, err = frpIo.WithEncryption(local, []byte(config.ServerCommonCfg.PrivilegeToken))
if err != nil {
pxy.Error("create encryption stream error: %v", err)
return
}
}
if cfg.UseCompression {
local = tcp.WithCompression(local)
local = frpIo.WithCompression(local)
}
pxy.Debug("join connections, workConn(l[%s] r[%s]) userConn(l[%s] r[%s])", workConn.LocalAddr().String(),
workConn.RemoteAddr().String(), userConn.LocalAddr().String(), userConn.RemoteAddr().String())
StatsOpenConnection(pxy.GetName())
inCount, outCount := tcp.Join(local, userConn)
inCount, outCount := frpIo.Join(local, userConn)
StatsCloseConnection(pxy.GetName())
StatsAddTrafficIn(pxy.GetName(), inCount)
StatsAddTrafficOut(pxy.GetName(), outCount)

View File

@ -41,6 +41,9 @@ type Service struct {
// Accept connections from client.
listener frpNet.Listener
// Accept connections using kcp.
kcpListener frpNet.Listener
// For http proxies, route requests to different clients by hostname and other infomation.
VhostHttpMuxer *vhost.HttpMuxer
@ -73,9 +76,20 @@ func NewService() (svr *Service, err error) {
err = fmt.Errorf("Create server listener error, %v", err)
return
}
log.Info("frps tcp listen on %s:%d", config.ServerCommonCfg.BindAddr, config.ServerCommonCfg.BindPort)
// Listen for accepting connections from client using kcp protocol.
if config.ServerCommonCfg.KcpBindPort > 0 {
svr.kcpListener, err = frpNet.ListenKcp(config.ServerCommonCfg.BindAddr, config.ServerCommonCfg.KcpBindPort)
if err != nil {
err = fmt.Errorf("Listen on kcp address udp [%s:%d] error: %v", config.ServerCommonCfg.BindAddr, config.ServerCommonCfg.KcpBindPort, err)
return
}
log.Info("frps kcp listen on udp %s:%d", config.ServerCommonCfg.BindAddr, config.ServerCommonCfg.BindPort)
}
// Create http vhost muxer.
if config.ServerCommonCfg.VhostHttpPort != 0 {
if config.ServerCommonCfg.VhostHttpPort > 0 {
var l frpNet.Listener
l, err = frpNet.ListenTcp(config.ServerCommonCfg.BindAddr, config.ServerCommonCfg.VhostHttpPort)
if err != nil {
@ -87,10 +101,11 @@ func NewService() (svr *Service, err error) {
err = fmt.Errorf("Create vhost httpMuxer error, %v", err)
return
}
log.Info("http service listen on %s:%d", config.ServerCommonCfg.BindAddr, config.ServerCommonCfg.VhostHttpPort)
}
// Create https vhost muxer.
if config.ServerCommonCfg.VhostHttpsPort != 0 {
if config.ServerCommonCfg.VhostHttpsPort > 0 {
var l frpNet.Listener
l, err = frpNet.ListenTcp(config.ServerCommonCfg.BindAddr, config.ServerCommonCfg.VhostHttpsPort)
if err != nil {
@ -102,10 +117,11 @@ func NewService() (svr *Service, err error) {
err = fmt.Errorf("Create vhost httpsMuxer error, %v", err)
return
}
log.Info("https service listen on %s:%d", config.ServerCommonCfg.BindAddr, config.ServerCommonCfg.VhostHttpsPort)
}
// Create dashboard web server.
if config.ServerCommonCfg.DashboardPort != 0 {
if config.ServerCommonCfg.DashboardPort > 0 {
err = RunDashboardServer(config.ServerCommonCfg.BindAddr, config.ServerCommonCfg.DashboardPort)
if err != nil {
err = fmt.Errorf("Create dashboard web server error, %v", err)
@ -117,9 +133,17 @@ func NewService() (svr *Service, err error) {
}
func (svr *Service) Run() {
if config.ServerCommonCfg.KcpBindPort > 0 {
go svr.HandleListener(svr.kcpListener)
}
svr.HandleListener(svr.listener)
}
func (svr *Service) HandleListener(l frpNet.Listener) {
// Listen for incoming connections from client.
for {
c, err := svr.listener.Accept()
c, err := l.Accept()
if err != nil {
log.Warn("Listener for incoming connections from client closed")
return
@ -131,7 +155,7 @@ func (svr *Service) Run() {
var rawMsg msg.Message
conn.SetReadDeadline(time.Now().Add(connReadTimeout))
if rawMsg, err = msg.ReadMsg(conn); err != nil {
log.Warn("Failed to read message: %v", err)
log.Trace("Failed to read message: %v", err)
conn.Close()
return
}

View File

@ -12,38 +12,74 @@
// See the License for the specific language governing permissions and
// limitations under the License.
package tcp
package io
import (
"io"
"github.com/golang/snappy"
"sync"
"github.com/fatedier/frp/utils/crypto"
"github.com/fatedier/frp/utils/pool"
)
// Join two io.ReadWriteCloser and do some operations.
func Join(c1 io.ReadWriteCloser, c2 io.ReadWriteCloser) (inCount int64, outCount int64) {
var wait sync.WaitGroup
pipe := func(to io.ReadWriteCloser, from io.ReadWriteCloser, count *int64) {
defer to.Close()
defer from.Close()
defer wait.Done()
buf := pool.GetBuf(16 * 1024)
defer pool.PutBuf(buf)
*count, _ = io.CopyBuffer(to, from, buf)
}
wait.Add(2)
go pipe(c1, c2, &inCount)
go pipe(c2, c1, &outCount)
wait.Wait()
return
}
func WithEncryption(rwc io.ReadWriteCloser, key []byte) (io.ReadWriteCloser, error) {
w, err := crypto.NewWriter(rwc, key)
if err != nil {
return nil, err
}
return WrapReadWriteCloser(crypto.NewReader(rwc, key), w), nil
return WrapReadWriteCloser(crypto.NewReader(rwc, key), w, func() error {
return rwc.Close()
}), nil
}
func WithCompression(rwc io.ReadWriteCloser) io.ReadWriteCloser {
return WrapReadWriteCloser(snappy.NewReader(rwc), snappy.NewWriter(rwc))
}
func WrapReadWriteCloser(r io.Reader, w io.Writer) io.ReadWriteCloser {
return &ReadWriteCloser{
r: r,
w: w,
}
sr := pool.GetSnappyReader(rwc)
sw := pool.GetSnappyWriter(rwc)
return WrapReadWriteCloser(sr, sw, func() error {
err := rwc.Close()
pool.PutSnappyReader(sr)
pool.PutSnappyWriter(sw)
return err
})
}
type ReadWriteCloser struct {
r io.Reader
w io.Writer
r io.Reader
w io.Writer
closeFn func() error
closed bool
mu sync.Mutex
}
// closeFn will be called only once
func WrapReadWriteCloser(r io.Reader, w io.Writer, closeFn func() error) io.ReadWriteCloser {
return &ReadWriteCloser{
r: r,
w: w,
closeFn: closeFn,
closed: false,
}
}
func (rwc *ReadWriteCloser) Read(p []byte) (n int, err error) {
@ -55,6 +91,14 @@ func (rwc *ReadWriteCloser) Write(p []byte) (n int, err error) {
}
func (rwc *ReadWriteCloser) Close() (errRet error) {
rwc.mu.Lock()
if rwc.closed {
rwc.mu.Unlock()
return
}
rwc.closed = true
rwc.mu.Unlock()
var err error
if rc, ok := rwc.r.(io.Closer); ok {
err = rc.Close()
@ -69,5 +113,12 @@ func (rwc *ReadWriteCloser) Close() (errRet error) {
errRet = err
}
}
if rwc.closeFn != nil {
err = rwc.closeFn()
if err != nil {
errRet = err
}
}
return
}

View File

@ -12,7 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
package tcp
package io
import (
"io"
@ -21,6 +21,51 @@ import (
"github.com/stretchr/testify/assert"
)
func TestJoin(t *testing.T) {
assert := assert.New(t)
var (
n int
err error
)
text1 := "A document that gives tips for writing clear, idiomatic Go code. A must read for any new Go programmer. It augments the tour and the language specification, both of which should be read first."
text2 := "A document that specifies the conditions under which reads of a variable in one goroutine can be guaranteed to observe values produced by writes to the same variable in a different goroutine."
// Forward bytes directly.
pr, pw := io.Pipe()
pr2, pw2 := io.Pipe()
pr3, pw3 := io.Pipe()
pr4, pw4 := io.Pipe()
conn1 := WrapReadWriteCloser(pr, pw2, nil)
conn2 := WrapReadWriteCloser(pr2, pw, nil)
conn3 := WrapReadWriteCloser(pr3, pw4, nil)
conn4 := WrapReadWriteCloser(pr4, pw3, nil)
go func() {
Join(conn2, conn3)
}()
buf1 := make([]byte, 1024)
buf2 := make([]byte, 1024)
conn1.Write([]byte(text1))
conn4.Write([]byte(text2))
n, err = conn4.Read(buf1)
assert.NoError(err)
assert.Equal(text1, string(buf1[:n]))
n, err = conn1.Read(buf2)
assert.NoError(err)
assert.Equal(text2, string(buf2[:n]))
conn1.Close()
conn2.Close()
conn3.Close()
conn4.Close()
}
func TestWithCompression(t *testing.T) {
assert := assert.New(t)
@ -28,8 +73,8 @@ func TestWithCompression(t *testing.T) {
pr, pw := io.Pipe()
pr2, pw2 := io.Pipe()
conn1 := WrapReadWriteCloser(pr, pw2)
conn2 := WrapReadWriteCloser(pr2, pw)
conn1 := WrapReadWriteCloser(pr, pw2, nil)
conn2 := WrapReadWriteCloser(pr2, pw, nil)
compressionStream1 := WithCompression(conn1)
compressionStream2 := WithCompression(conn2)
@ -71,12 +116,12 @@ func TestWithEncryption(t *testing.T) {
pr5, pw5 := io.Pipe()
pr6, pw6 := io.Pipe()
conn1 := WrapReadWriteCloser(pr, pw2)
conn2 := WrapReadWriteCloser(pr2, pw)
conn3 := WrapReadWriteCloser(pr3, pw4)
conn4 := WrapReadWriteCloser(pr4, pw3)
conn5 := WrapReadWriteCloser(pr5, pw6)
conn6 := WrapReadWriteCloser(pr6, pw5)
conn1 := WrapReadWriteCloser(pr, pw2, nil)
conn2 := WrapReadWriteCloser(pr2, pw, nil)
conn3 := WrapReadWriteCloser(pr3, pw4, nil)
conn4 := WrapReadWriteCloser(pr4, pw3, nil)
conn5 := WrapReadWriteCloser(pr5, pw6, nil)
conn6 := WrapReadWriteCloser(pr6, pw5, nil)
encryptStream1, err := WithEncryption(conn3, []byte(key))
assert.NoError(err)

View File

@ -15,11 +15,17 @@
package net
import (
"bytes"
"errors"
"fmt"
"io"
"net"
"sync"
"time"
"github.com/fatedier/frp/utils/log"
kcp "github.com/xtaci/kcp-go"
)
// Conn is the interface of connections used in frp.
@ -45,6 +51,13 @@ type WrapReadWriteCloserConn struct {
log.Logger
}
func WrapReadWriteCloserToConn(rwc io.ReadWriteCloser) Conn {
return &WrapReadWriteCloserConn{
ReadWriteCloser: rwc,
Logger: log.NewPrefixLogger(""),
}
}
func (conn *WrapReadWriteCloserConn) LocalAddr() net.Addr {
return (*net.TCPAddr)(nil)
}
@ -54,26 +67,92 @@ func (conn *WrapReadWriteCloserConn) RemoteAddr() net.Addr {
}
func (conn *WrapReadWriteCloserConn) SetDeadline(t time.Time) error {
return nil
return &net.OpError{Op: "set", Net: "wrap", Source: nil, Addr: nil, Err: errors.New("deadline not supported")}
}
func (conn *WrapReadWriteCloserConn) SetReadDeadline(t time.Time) error {
return nil
return &net.OpError{Op: "set", Net: "wrap", Source: nil, Addr: nil, Err: errors.New("deadline not supported")}
}
func (conn *WrapReadWriteCloserConn) SetWriteDeadline(t time.Time) error {
return nil
return &net.OpError{Op: "set", Net: "wrap", Source: nil, Addr: nil, Err: errors.New("deadline not supported")}
}
func WrapReadWriteCloserToConn(rwc io.ReadWriteCloser) Conn {
return &WrapReadWriteCloserConn{
ReadWriteCloser: rwc,
Logger: log.NewPrefixLogger(""),
func ConnectServer(protocol string, addr string) (c Conn, err error) {
switch protocol {
case "tcp":
return ConnectTcpServer(addr)
case "kcp":
kcpConn, errRet := kcp.DialWithOptions(addr, nil, 10, 3)
if errRet != nil {
err = errRet
return
}
kcpConn.SetStreamMode(true)
kcpConn.SetWriteDelay(true)
kcpConn.SetNoDelay(1, 20, 2, 1)
kcpConn.SetWindowSize(128, 512)
kcpConn.SetMtu(1350)
kcpConn.SetACKNoDelay(false)
kcpConn.SetReadBuffer(4194304)
kcpConn.SetWriteBuffer(4194304)
c = WrapConn(kcpConn)
return
default:
return nil, fmt.Errorf("unsupport protocol: %s", protocol)
}
}
type Listener interface {
Accept() (Conn, error)
Close() error
log.Logger
func ConnectServerByHttpProxy(httpProxy string, protocol string, addr string) (c Conn, err error) {
switch protocol {
case "tcp":
return ConnectTcpServerByHttpProxy(httpProxy, addr)
case "kcp":
// http proxy is not supported for kcp
return ConnectServer(protocol, addr)
default:
return nil, fmt.Errorf("unsupport protocol: %s", protocol)
}
}
type SharedConn struct {
Conn
sync.Mutex
buf *bytes.Buffer
}
// the bytes you read in io.Reader, will be reserved in SharedConn
func NewShareConn(conn Conn) (*SharedConn, io.Reader) {
sc := &SharedConn{
Conn: conn,
buf: bytes.NewBuffer(make([]byte, 0, 1024)),
}
return sc, io.TeeReader(conn, sc.buf)
}
func (sc *SharedConn) Read(p []byte) (n int, err error) {
sc.Lock()
if sc.buf == nil {
sc.Unlock()
return sc.Conn.Read(p)
}
sc.Unlock()
n, err = sc.buf.Read(p)
if err == io.EOF {
sc.Lock()
sc.buf = nil
sc.Unlock()
var n2 int
n2, err = sc.Conn.Read(p[n:])
n += n2
}
return
}
func (sc *SharedConn) WriteBuff(buffer []byte) (err error) {
sc.buf.Reset()
_, err = sc.buf.Write(buffer)
return err
}

87
utils/net/kcp.go Normal file
View File

@ -0,0 +1,87 @@
// Copyright 2017 fatedier, fatedier@gmail.com
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package net
import (
"fmt"
"net"
"github.com/fatedier/frp/utils/log"
kcp "github.com/xtaci/kcp-go"
)
type KcpListener struct {
net.Addr
listener net.Listener
accept chan Conn
closeFlag bool
log.Logger
}
func ListenKcp(bindAddr string, bindPort int64) (l *KcpListener, err error) {
listener, err := kcp.ListenWithOptions(fmt.Sprintf("%s:%d", bindAddr, bindPort), nil, 10, 3)
if err != nil {
return l, err
}
listener.SetReadBuffer(4194304)
listener.SetWriteBuffer(4194304)
l = &KcpListener{
Addr: listener.Addr(),
listener: listener,
accept: make(chan Conn),
closeFlag: false,
Logger: log.NewPrefixLogger(""),
}
go func() {
for {
conn, err := listener.AcceptKCP()
if err != nil {
if l.closeFlag {
close(l.accept)
return
}
continue
}
conn.SetStreamMode(true)
conn.SetWriteDelay(true)
conn.SetNoDelay(1, 20, 2, 1)
conn.SetMtu(1350)
conn.SetWindowSize(1024, 1024)
conn.SetACKNoDelay(false)
l.accept <- WrapConn(conn)
}
}()
return l, err
}
func (l *KcpListener) Accept() (Conn, error) {
conn, ok := <-l.accept
if !ok {
return conn, fmt.Errorf("channel for kcp listener closed")
}
return conn, nil
}
func (l *KcpListener) Close() error {
if !l.closeFlag {
l.closeFlag = true
l.listener.Close()
}
return nil
}

View File

@ -1,4 +1,4 @@
// Copyright 2016 fatedier, fatedier@gmail.com
// Copyright 2017 fatedier, fatedier@gmail.com
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@ -12,27 +12,35 @@
// See the License for the specific language governing permissions and
// limitations under the License.
package tcp
package net
import (
"io"
"sync"
"net"
"github.com/fatedier/frp/utils/log"
)
// Join two io.ReadWriteCloser and do some operations.
func Join(c1 io.ReadWriteCloser, c2 io.ReadWriteCloser) (inCount int64, outCount int64) {
var wait sync.WaitGroup
pipe := func(to io.ReadWriteCloser, from io.ReadWriteCloser, count *int64) {
defer to.Close()
defer from.Close()
defer wait.Done()
*count, _ = io.Copy(to, from)
}
wait.Add(2)
go pipe(c1, c2, &inCount)
go pipe(c2, c1, &outCount)
wait.Wait()
return
type Listener interface {
Accept() (Conn, error)
Close() error
log.Logger
}
type LogListener struct {
l net.Listener
net.Listener
log.Logger
}
func WrapLogListener(l net.Listener) Listener {
return &LogListener{
l: l,
Listener: l,
Logger: log.NewPrefixLogger(""),
}
}
func (logL *LogListener) Accept() (Conn, error) {
c, err := logL.l.Accept()
return WrapConn(c), err
}

View File

@ -17,15 +17,18 @@ package pool
import "sync"
var (
bufPool5k sync.Pool
bufPool2k sync.Pool
bufPool1k sync.Pool
bufPool sync.Pool
bufPool16k sync.Pool
bufPool5k sync.Pool
bufPool2k sync.Pool
bufPool1k sync.Pool
bufPool sync.Pool
)
func GetBuf(size int) []byte {
var x interface{}
if size >= 5*1024 {
if size >= 16*1024 {
x = bufPool16k.Get()
} else if size >= 5*1024 {
x = bufPool5k.Get()
} else if size >= 2*1024 {
x = bufPool2k.Get()
@ -46,7 +49,9 @@ func GetBuf(size int) []byte {
func PutBuf(buf []byte) {
size := cap(buf)
if size >= 5*1024 {
if size >= 16*1024 {
bufPool16k.Put(buf)
} else if size >= 5*1024 {
bufPool5k.Put(buf)
} else if size >= 2*1024 {
bufPool2k.Put(buf)

57
utils/pool/snappy.go Normal file
View File

@ -0,0 +1,57 @@
// Copyright 2017 fatedier, fatedier@gmail.com
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package pool
import (
"io"
"sync"
"github.com/golang/snappy"
)
var (
snappyReaderPool sync.Pool
snappyWriterPool sync.Pool
)
func GetSnappyReader(r io.Reader) *snappy.Reader {
var x interface{}
x = snappyReaderPool.Get()
if x == nil {
return snappy.NewReader(r)
}
sr := x.(*snappy.Reader)
sr.Reset(r)
return sr
}
func PutSnappyReader(sr *snappy.Reader) {
snappyReaderPool.Put(sr)
}
func GetSnappyWriter(w io.Writer) *snappy.Writer {
var x interface{}
x = snappyWriterPool.Get()
if x == nil {
return snappy.NewWriter(w)
}
sw := x.(*snappy.Writer)
sw.Reset(w)
return sw
}
func PutSnappyWriter(sw *snappy.Writer) {
snappyWriterPool.Put(sw)
}

View File

@ -19,7 +19,7 @@ import (
"strings"
)
var version string = "0.11.0"
var version string = "0.12.0"
func Full() string {
return version

View File

@ -35,7 +35,7 @@ type HttpMuxer struct {
func GetHttpRequestInfo(c frpNet.Conn) (_ frpNet.Conn, _ map[string]string, err error) {
reqInfoMap := make(map[string]string, 0)
sc, rd := newShareConn(c)
sc, rd := frpNet.NewShareConn(c)
request, err := http.ReadRequest(bufio.NewReader(rd))
if err != nil {
@ -62,7 +62,7 @@ func NewHttpMuxer(listener frpNet.Listener, timeout time.Duration) (*HttpMuxer,
}
func HttpHostNameRewrite(c frpNet.Conn, rewriteHost string) (_ frpNet.Conn, err error) {
sc, rd := newShareConn(c)
sc, rd := frpNet.NewShareConn(c)
var buff []byte
if buff, err = hostNameRewrite(rd, rewriteHost); err != nil {
return sc, err

View File

@ -179,7 +179,7 @@ func readHandshake(rd io.Reader) (host string, err error) {
func GetHttpsHostname(c frpNet.Conn) (sc frpNet.Conn, _ map[string]string, err error) {
reqInfoMap := make(map[string]string, 0)
sc, rd := newShareConn(c)
sc, rd := frpNet.NewShareConn(c)
host, err := readHandshake(rd)
if err != nil {
return sc, reqInfoMap, err

62
utils/vhost/resource.go Normal file
View File

@ -0,0 +1,62 @@
// Copyright 2017 fatedier, fatedier@gmail.com
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package vhost
import (
"io/ioutil"
"net/http"
"strings"
"github.com/fatedier/frp/utils/version"
)
const (
NotFound = `<!DOCTYPE html>
<html>
<head>
<title>Not Found</title>
<style>
body {
width: 35em;
margin: 0 auto;
font-family: Tahoma, Verdana, Arial, sans-serif;
}
</style>
</head>
<body>
<h1>The page you visit not found.</h1>
<p>Sorry, the page you are looking for is currently unavailable.<br/>
Please try again later.</p>
<p>The server is powered by <a href="https://github.com/fatedier/frp">frp</a>.</p>
<p><em>Faithfully yours, frp.</em></p>
</body>
</html>
`
)
func notFoundResponse() *http.Response {
header := make(http.Header)
header.Set("server", "frp/"+version.Full())
res := &http.Response{
Status: "Not Found",
StatusCode: 400,
Proto: "HTTP/1.0",
ProtoMajor: 1,
ProtoMinor: 0,
Header: header,
Body: ioutil.NopCloser(strings.NewReader(NotFound)),
}
return res
}

View File

@ -13,9 +13,7 @@
package vhost
import (
"bytes"
"fmt"
"io"
"strings"
"sync"
"time"
@ -128,7 +126,7 @@ func (v *VhostMuxer) handle(c frpNet.Conn) {
sConn, reqInfoMap, err := v.vhostFunc(c)
if err != nil {
log.Error("get hostname from http/https request error: %v", err)
log.Warn("get hostname from http/https request error: %v", err)
c.Close()
return
}
@ -137,17 +135,19 @@ func (v *VhostMuxer) handle(c frpNet.Conn) {
path := strings.ToLower(reqInfoMap["Path"])
l, ok := v.getListener(name, path)
if !ok {
res := notFoundResponse()
res.Write(c)
log.Debug("http request for host [%s] path [%s] not found", name, path)
c.Close()
return
}
// if authFunc is exist and userName/password is set
// verify user access
// then verify user access
if l.mux.authFunc != nil && l.userName != "" && l.passWord != "" {
bAccess, err := l.mux.authFunc(c, l.userName, l.passWord, reqInfoMap["Authorization"])
if bAccess == false || err != nil {
l.Debug("check Authorization failed")
l.Debug("check http Authorization failed")
res := noAuthResponse()
res.Write(c)
c.Close()
@ -209,45 +209,3 @@ func (l *Listener) Close() error {
func (l *Listener) Name() string {
return l.name
}
type sharedConn struct {
frpNet.Conn
sync.Mutex
buff *bytes.Buffer
}
// the bytes you read in io.Reader, will be reserved in sharedConn
func newShareConn(conn frpNet.Conn) (*sharedConn, io.Reader) {
sc := &sharedConn{
Conn: conn,
buff: bytes.NewBuffer(make([]byte, 0, 1024)),
}
return sc, io.TeeReader(conn, sc.buff)
}
func (sc *sharedConn) Read(p []byte) (n int, err error) {
sc.Lock()
if sc.buff == nil {
sc.Unlock()
return sc.Conn.Read(p)
}
sc.Unlock()
n, err = sc.buff.Read(p)
if err == io.EOF {
sc.Lock()
sc.buff = nil
sc.Unlock()
var n2 int
n2, err = sc.Conn.Read(p[n:])
n += n2
}
return
}
func (sc *sharedConn) WriteBuff(buffer []byte) (err error) {
sc.buff.Reset()
_, err = sc.buff.Write(buffer)
return err
}

24
vendor/github.com/klauspost/cpuid/.gitignore generated vendored Normal file
View File

@ -0,0 +1,24 @@
# Compiled Object files, Static and Dynamic libs (Shared Objects)
*.o
*.a
*.so
# Folders
_obj
_test
# Architecture specific extensions/prefixes
*.[568vq]
[568vq].out
*.cgo1.go
*.cgo2.c
_cgo_defun.c
_cgo_gotypes.go
_cgo_export.*
_testmain.go
*.exe
*.test
*.prof

8
vendor/github.com/klauspost/cpuid/.travis.yml generated vendored Normal file
View File

@ -0,0 +1,8 @@
language: go
go:
- 1.3
- 1.4
- 1.5
- 1.6
- tip

22
vendor/github.com/klauspost/cpuid/LICENSE generated vendored Normal file
View File

@ -0,0 +1,22 @@
The MIT License (MIT)
Copyright (c) 2015 Klaus Post
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

145
vendor/github.com/klauspost/cpuid/README.md generated vendored Normal file
View File

@ -0,0 +1,145 @@
# cpuid
Package cpuid provides information about the CPU running the current program.
CPU features are detected on startup, and kept for fast access through the life of the application.
Currently x86 / x64 (AMD64) is supported, and no external C (cgo) code is used, which should make the library very easy to use.
You can access the CPU information by accessing the shared CPU variable of the cpuid library.
Package home: https://github.com/klauspost/cpuid
[![GoDoc][1]][2] [![Build Status][3]][4]
[1]: https://godoc.org/github.com/klauspost/cpuid?status.svg
[2]: https://godoc.org/github.com/klauspost/cpuid
[3]: https://travis-ci.org/klauspost/cpuid.svg
[4]: https://travis-ci.org/klauspost/cpuid
# features
## CPU Instructions
* **CMOV** (i686 CMOV)
* **NX** (NX (No-Execute) bit)
* **AMD3DNOW** (AMD 3DNOW)
* **AMD3DNOWEXT** (AMD 3DNowExt)
* **MMX** (standard MMX)
* **MMXEXT** (SSE integer functions or AMD MMX ext)
* **SSE** (SSE functions)
* **SSE2** (P4 SSE functions)
* **SSE3** (Prescott SSE3 functions)
* **SSSE3** (Conroe SSSE3 functions)
* **SSE4** (Penryn SSE4.1 functions)
* **SSE4A** (AMD Barcelona microarchitecture SSE4a instructions)
* **SSE42** (Nehalem SSE4.2 functions)
* **AVX** (AVX functions)
* **AVX2** (AVX2 functions)
* **FMA3** (Intel FMA 3)
* **FMA4** (Bulldozer FMA4 functions)
* **XOP** (Bulldozer XOP functions)
* **F16C** (Half-precision floating-point conversion)
* **BMI1** (Bit Manipulation Instruction Set 1)
* **BMI2** (Bit Manipulation Instruction Set 2)
* **TBM** (AMD Trailing Bit Manipulation)
* **LZCNT** (LZCNT instruction)
* **POPCNT** (POPCNT instruction)
* **AESNI** (Advanced Encryption Standard New Instructions)
* **CLMUL** (Carry-less Multiplication)
* **HTT** (Hyperthreading (enabled))
* **HLE** (Hardware Lock Elision)
* **RTM** (Restricted Transactional Memory)
* **RDRAND** (RDRAND instruction is available)
* **RDSEED** (RDSEED instruction is available)
* **ADX** (Intel ADX (Multi-Precision Add-Carry Instruction Extensions))
* **SHA** (Intel SHA Extensions)
* **AVX512F** (AVX-512 Foundation)
* **AVX512DQ** (AVX-512 Doubleword and Quadword Instructions)
* **AVX512IFMA** (AVX-512 Integer Fused Multiply-Add Instructions)
* **AVX512PF** (AVX-512 Prefetch Instructions)
* **AVX512ER** (AVX-512 Exponential and Reciprocal Instructions)
* **AVX512CD** (AVX-512 Conflict Detection Instructions)
* **AVX512BW** (AVX-512 Byte and Word Instructions)
* **AVX512VL** (AVX-512 Vector Length Extensions)
* **AVX512VBMI** (AVX-512 Vector Bit Manipulation Instructions)
* **MPX** (Intel MPX (Memory Protection Extensions))
* **ERMS** (Enhanced REP MOVSB/STOSB)
* **RDTSCP** (RDTSCP Instruction)
* **CX16** (CMPXCHG16B Instruction)
* **SGX** (Software Guard Extensions, with activation details)
## Performance
* **RDTSCP()** Returns current cycle count. Can be used for benchmarking.
* **SSE2SLOW** (SSE2 is supported, but usually not faster)
* **SSE3SLOW** (SSE3 is supported, but usually not faster)
* **ATOM** (Atom processor, some SSSE3 instructions are slower)
* **Cache line** (Probable size of a cache line).
* **L1, L2, L3 Cache size** on newer Intel/AMD CPUs.
## Cpu Vendor/VM
* **Intel**
* **AMD**
* **VIA**
* **Transmeta**
* **NSC**
* **KVM** (Kernel-based Virtual Machine)
* **MSVM** (Microsoft Hyper-V or Windows Virtual PC)
* **VMware**
* **XenHVM**
# installing
```go get github.com/klauspost/cpuid```
# example
```Go
package main
import (
"fmt"
"github.com/klauspost/cpuid"
)
func main() {
// Print basic CPU information:
fmt.Println("Name:", cpuid.CPU.BrandName)
fmt.Println("PhysicalCores:", cpuid.CPU.PhysicalCores)
fmt.Println("ThreadsPerCore:", cpuid.CPU.ThreadsPerCore)
fmt.Println("LogicalCores:", cpuid.CPU.LogicalCores)
fmt.Println("Family", cpuid.CPU.Family, "Model:", cpuid.CPU.Model)
fmt.Println("Features:", cpuid.CPU.Features)
fmt.Println("Cacheline bytes:", cpuid.CPU.CacheLine)
fmt.Println("L1 Data Cache:", cpuid.CPU.Cache.L1D, "bytes")
fmt.Println("L1 Instruction Cache:", cpuid.CPU.Cache.L1D, "bytes")
fmt.Println("L2 Cache:", cpuid.CPU.Cache.L2, "bytes")
fmt.Println("L3 Cache:", cpuid.CPU.Cache.L3, "bytes")
// Test if we have a specific feature:
if cpuid.CPU.SSE() {
fmt.Println("We have Streaming SIMD Extensions")
}
}
```
Sample output:
```
>go run main.go
Name: Intel(R) Core(TM) i5-2540M CPU @ 2.60GHz
PhysicalCores: 2
ThreadsPerCore: 2
LogicalCores: 4
Family 6 Model: 42
Features: CMOV,MMX,MMXEXT,SSE,SSE2,SSE3,SSSE3,SSE4.1,SSE4.2,AVX,AESNI,CLMUL
Cacheline bytes: 64
We have Streaming SIMD Extensions
```
# private package
In the "private" folder you can find an autogenerated version of the library you can include in your own packages.
For this purpose all exports are removed, and functions and constants are lowercased.
This is not a recommended way of using the library, but provided for convenience, if it is difficult for you to use external packages.
# license
This code is published under an MIT license. See LICENSE file for more information.

1022
vendor/github.com/klauspost/cpuid/cpuid.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

42
vendor/github.com/klauspost/cpuid/cpuid_386.s generated vendored Normal file
View File

@ -0,0 +1,42 @@
// Copyright (c) 2015 Klaus Post, released under MIT License. See LICENSE file.
// +build 386,!gccgo
// func asmCpuid(op uint32) (eax, ebx, ecx, edx uint32)
TEXT ·asmCpuid(SB), 7, $0
XORL CX, CX
MOVL op+0(FP), AX
CPUID
MOVL AX, eax+4(FP)
MOVL BX, ebx+8(FP)
MOVL CX, ecx+12(FP)
MOVL DX, edx+16(FP)
RET
// func asmCpuidex(op, op2 uint32) (eax, ebx, ecx, edx uint32)
TEXT ·asmCpuidex(SB), 7, $0
MOVL op+0(FP), AX
MOVL op2+4(FP), CX
CPUID
MOVL AX, eax+8(FP)
MOVL BX, ebx+12(FP)
MOVL CX, ecx+16(FP)
MOVL DX, edx+20(FP)
RET
// func xgetbv(index uint32) (eax, edx uint32)
TEXT ·asmXgetbv(SB), 7, $0
MOVL index+0(FP), CX
BYTE $0x0f; BYTE $0x01; BYTE $0xd0 // XGETBV
MOVL AX, eax+4(FP)
MOVL DX, edx+8(FP)
RET
// func asmRdtscpAsm() (eax, ebx, ecx, edx uint32)
TEXT ·asmRdtscpAsm(SB), 7, $0
BYTE $0x0F; BYTE $0x01; BYTE $0xF9 // RDTSCP
MOVL AX, eax+0(FP)
MOVL BX, ebx+4(FP)
MOVL CX, ecx+8(FP)
MOVL DX, edx+12(FP)
RET

42
vendor/github.com/klauspost/cpuid/cpuid_amd64.s generated vendored Normal file
View File

@ -0,0 +1,42 @@
// Copyright (c) 2015 Klaus Post, released under MIT License. See LICENSE file.
//+build amd64,!gccgo
// func asmCpuid(op uint32) (eax, ebx, ecx, edx uint32)
TEXT ·asmCpuid(SB), 7, $0
XORQ CX, CX
MOVL op+0(FP), AX
CPUID
MOVL AX, eax+8(FP)
MOVL BX, ebx+12(FP)
MOVL CX, ecx+16(FP)
MOVL DX, edx+20(FP)
RET
// func asmCpuidex(op, op2 uint32) (eax, ebx, ecx, edx uint32)
TEXT ·asmCpuidex(SB), 7, $0
MOVL op+0(FP), AX
MOVL op2+4(FP), CX
CPUID
MOVL AX, eax+8(FP)
MOVL BX, ebx+12(FP)
MOVL CX, ecx+16(FP)
MOVL DX, edx+20(FP)
RET
// func asmXgetbv(index uint32) (eax, edx uint32)
TEXT ·asmXgetbv(SB), 7, $0
MOVL index+0(FP), CX
BYTE $0x0f; BYTE $0x01; BYTE $0xd0 // XGETBV
MOVL AX, eax+8(FP)
MOVL DX, edx+12(FP)
RET
// func asmRdtscpAsm() (eax, ebx, ecx, edx uint32)
TEXT ·asmRdtscpAsm(SB), 7, $0
BYTE $0x0F; BYTE $0x01; BYTE $0xF9 // RDTSCP
MOVL AX, eax+0(FP)
MOVL BX, ebx+4(FP)
MOVL CX, ecx+8(FP)
MOVL DX, edx+12(FP)
RET

17
vendor/github.com/klauspost/cpuid/detect_intel.go generated vendored Normal file
View File

@ -0,0 +1,17 @@
// Copyright (c) 2015 Klaus Post, released under MIT License. See LICENSE file.
// +build 386,!gccgo amd64,!gccgo
package cpuid
func asmCpuid(op uint32) (eax, ebx, ecx, edx uint32)
func asmCpuidex(op, op2 uint32) (eax, ebx, ecx, edx uint32)
func asmXgetbv(index uint32) (eax, edx uint32)
func asmRdtscpAsm() (eax, ebx, ecx, edx uint32)
func initCPU() {
cpuid = asmCpuid
cpuidex = asmCpuidex
xgetbv = asmXgetbv
rdtscpAsm = asmRdtscpAsm
}

23
vendor/github.com/klauspost/cpuid/detect_ref.go generated vendored Normal file
View File

@ -0,0 +1,23 @@
// Copyright (c) 2015 Klaus Post, released under MIT License. See LICENSE file.
// +build !amd64,!386 gccgo
package cpuid
func initCPU() {
cpuid = func(op uint32) (eax, ebx, ecx, edx uint32) {
return 0, 0, 0, 0
}
cpuidex = func(op, op2 uint32) (eax, ebx, ecx, edx uint32) {
return 0, 0, 0, 0
}
xgetbv = func(index uint32) (eax, edx uint32) {
return 0, 0
}
rdtscpAsm = func() (eax, ebx, ecx, edx uint32) {
return 0, 0, 0, 0
}
}

3
vendor/github.com/klauspost/cpuid/generate.go generated vendored Normal file
View File

@ -0,0 +1,3 @@
package cpuid
//go:generate go run private-gen.go

476
vendor/github.com/klauspost/cpuid/private-gen.go generated vendored Normal file
View File

@ -0,0 +1,476 @@
// +build ignore
package main
import (
"bytes"
"fmt"
"go/ast"
"go/parser"
"go/printer"
"go/token"
"io"
"io/ioutil"
"log"
"os"
"reflect"
"strings"
"unicode"
"unicode/utf8"
)
var inFiles = []string{"cpuid.go", "cpuid_test.go"}
var copyFiles = []string{"cpuid_amd64.s", "cpuid_386.s", "detect_ref.go", "detect_intel.go"}
var fileSet = token.NewFileSet()
var reWrites = []rewrite{
initRewrite("CPUInfo -> cpuInfo"),
initRewrite("Vendor -> vendor"),
initRewrite("Flags -> flags"),
initRewrite("Detect -> detect"),
initRewrite("CPU -> cpu"),
}
var excludeNames = map[string]bool{"string": true, "join": true, "trim": true,
// cpuid_test.go
"t": true, "println": true, "logf": true, "log": true, "fatalf": true, "fatal": true,
}
var excludePrefixes = []string{"test", "benchmark"}
func main() {
Package := "private"
parserMode := parser.ParseComments
exported := make(map[string]rewrite)
for _, file := range inFiles {
in, err := os.Open(file)
if err != nil {
log.Fatalf("opening input", err)
}
src, err := ioutil.ReadAll(in)
if err != nil {
log.Fatalf("reading input", err)
}
astfile, err := parser.ParseFile(fileSet, file, src, parserMode)
if err != nil {
log.Fatalf("parsing input", err)
}
for _, rw := range reWrites {
astfile = rw(astfile)
}
// Inspect the AST and print all identifiers and literals.
var startDecl token.Pos
var endDecl token.Pos
ast.Inspect(astfile, func(n ast.Node) bool {
var s string
switch x := n.(type) {
case *ast.Ident:
if x.IsExported() {
t := strings.ToLower(x.Name)
for _, pre := range excludePrefixes {
if strings.HasPrefix(t, pre) {
return true
}
}
if excludeNames[t] != true {
//if x.Pos() > startDecl && x.Pos() < endDecl {
exported[x.Name] = initRewrite(x.Name + " -> " + t)
}
}
case *ast.GenDecl:
if x.Tok == token.CONST && x.Lparen > 0 {
startDecl = x.Lparen
endDecl = x.Rparen
// fmt.Printf("Decl:%s -> %s\n", fileSet.Position(startDecl), fileSet.Position(endDecl))
}
}
if s != "" {
fmt.Printf("%s:\t%s\n", fileSet.Position(n.Pos()), s)
}
return true
})
for _, rw := range exported {
astfile = rw(astfile)
}
var buf bytes.Buffer
printer.Fprint(&buf, fileSet, astfile)
// Remove package documentation and insert information
s := buf.String()
ind := strings.Index(buf.String(), "\npackage cpuid")
s = s[ind:]
s = "// Generated, DO NOT EDIT,\n" +
"// but copy it to your own project and rename the package.\n" +
"// See more at http://github.com/klauspost/cpuid\n" +
s
outputName := Package + string(os.PathSeparator) + file
err = ioutil.WriteFile(outputName, []byte(s), 0644)
if err != nil {
log.Fatalf("writing output: %s", err)
}
log.Println("Generated", outputName)
}
for _, file := range copyFiles {
dst := ""
if strings.HasPrefix(file, "cpuid") {
dst = Package + string(os.PathSeparator) + file
} else {
dst = Package + string(os.PathSeparator) + "cpuid_" + file
}
err := copyFile(file, dst)
if err != nil {
log.Fatalf("copying file: %s", err)
}
log.Println("Copied", dst)
}
}
// CopyFile copies a file from src to dst. If src and dst files exist, and are
// the same, then return success. Copy the file contents from src to dst.
func copyFile(src, dst string) (err error) {
sfi, err := os.Stat(src)
if err != nil {
return
}
if !sfi.Mode().IsRegular() {
// cannot copy non-regular files (e.g., directories,
// symlinks, devices, etc.)
return fmt.Errorf("CopyFile: non-regular source file %s (%q)", sfi.Name(), sfi.Mode().String())
}
dfi, err := os.Stat(dst)
if err != nil {
if !os.IsNotExist(err) {
return
}
} else {
if !(dfi.Mode().IsRegular()) {
return fmt.Errorf("CopyFile: non-regular destination file %s (%q)", dfi.Name(), dfi.Mode().String())
}
if os.SameFile(sfi, dfi) {
return
}
}
err = copyFileContents(src, dst)
return
}
// copyFileContents copies the contents of the file named src to the file named
// by dst. The file will be created if it does not already exist. If the
// destination file exists, all it's contents will be replaced by the contents
// of the source file.
func copyFileContents(src, dst string) (err error) {
in, err := os.Open(src)
if err != nil {
return
}
defer in.Close()
out, err := os.Create(dst)
if err != nil {
return
}
defer func() {
cerr := out.Close()
if err == nil {
err = cerr
}
}()
if _, err = io.Copy(out, in); err != nil {
return
}
err = out.Sync()
return
}
type rewrite func(*ast.File) *ast.File
// Mostly copied from gofmt
func initRewrite(rewriteRule string) rewrite {
f := strings.Split(rewriteRule, "->")
if len(f) != 2 {
fmt.Fprintf(os.Stderr, "rewrite rule must be of the form 'pattern -> replacement'\n")
os.Exit(2)
}
pattern := parseExpr(f[0], "pattern")
replace := parseExpr(f[1], "replacement")
return func(p *ast.File) *ast.File { return rewriteFile(pattern, replace, p) }
}
// parseExpr parses s as an expression.
// It might make sense to expand this to allow statement patterns,
// but there are problems with preserving formatting and also
// with what a wildcard for a statement looks like.
func parseExpr(s, what string) ast.Expr {
x, err := parser.ParseExpr(s)
if err != nil {
fmt.Fprintf(os.Stderr, "parsing %s %s at %s\n", what, s, err)
os.Exit(2)
}
return x
}
// Keep this function for debugging.
/*
func dump(msg string, val reflect.Value) {
fmt.Printf("%s:\n", msg)
ast.Print(fileSet, val.Interface())
fmt.Println()
}
*/
// rewriteFile applies the rewrite rule 'pattern -> replace' to an entire file.
func rewriteFile(pattern, replace ast.Expr, p *ast.File) *ast.File {
cmap := ast.NewCommentMap(fileSet, p, p.Comments)
m := make(map[string]reflect.Value)
pat := reflect.ValueOf(pattern)
repl := reflect.ValueOf(replace)
var rewriteVal func(val reflect.Value) reflect.Value
rewriteVal = func(val reflect.Value) reflect.Value {
// don't bother if val is invalid to start with
if !val.IsValid() {
return reflect.Value{}
}
for k := range m {
delete(m, k)
}
val = apply(rewriteVal, val)
if match(m, pat, val) {
val = subst(m, repl, reflect.ValueOf(val.Interface().(ast.Node).Pos()))
}
return val
}
r := apply(rewriteVal, reflect.ValueOf(p)).Interface().(*ast.File)
r.Comments = cmap.Filter(r).Comments() // recreate comments list
return r
}
// set is a wrapper for x.Set(y); it protects the caller from panics if x cannot be changed to y.
func set(x, y reflect.Value) {
// don't bother if x cannot be set or y is invalid
if !x.CanSet() || !y.IsValid() {
return
}
defer func() {
if x := recover(); x != nil {
if s, ok := x.(string); ok &&
(strings.Contains(s, "type mismatch") || strings.Contains(s, "not assignable")) {
// x cannot be set to y - ignore this rewrite
return
}
panic(x)
}
}()
x.Set(y)
}
// Values/types for special cases.
var (
objectPtrNil = reflect.ValueOf((*ast.Object)(nil))
scopePtrNil = reflect.ValueOf((*ast.Scope)(nil))
identType = reflect.TypeOf((*ast.Ident)(nil))
objectPtrType = reflect.TypeOf((*ast.Object)(nil))
positionType = reflect.TypeOf(token.NoPos)
callExprType = reflect.TypeOf((*ast.CallExpr)(nil))
scopePtrType = reflect.TypeOf((*ast.Scope)(nil))
)
// apply replaces each AST field x in val with f(x), returning val.
// To avoid extra conversions, f operates on the reflect.Value form.
func apply(f func(reflect.Value) reflect.Value, val reflect.Value) reflect.Value {
if !val.IsValid() {
return reflect.Value{}
}
// *ast.Objects introduce cycles and are likely incorrect after
// rewrite; don't follow them but replace with nil instead
if val.Type() == objectPtrType {
return objectPtrNil
}
// similarly for scopes: they are likely incorrect after a rewrite;
// replace them with nil
if val.Type() == scopePtrType {
return scopePtrNil
}
switch v := reflect.Indirect(val); v.Kind() {
case reflect.Slice:
for i := 0; i < v.Len(); i++ {
e := v.Index(i)
set(e, f(e))
}
case reflect.Struct:
for i := 0; i < v.NumField(); i++ {
e := v.Field(i)
set(e, f(e))
}
case reflect.Interface:
e := v.Elem()
set(v, f(e))
}
return val
}
func isWildcard(s string) bool {
rune, size := utf8.DecodeRuneInString(s)
return size == len(s) && unicode.IsLower(rune)
}
// match returns true if pattern matches val,
// recording wildcard submatches in m.
// If m == nil, match checks whether pattern == val.
func match(m map[string]reflect.Value, pattern, val reflect.Value) bool {
// Wildcard matches any expression. If it appears multiple
// times in the pattern, it must match the same expression
// each time.
if m != nil && pattern.IsValid() && pattern.Type() == identType {
name := pattern.Interface().(*ast.Ident).Name
if isWildcard(name) && val.IsValid() {
// wildcards only match valid (non-nil) expressions.
if _, ok := val.Interface().(ast.Expr); ok && !val.IsNil() {
if old, ok := m[name]; ok {
return match(nil, old, val)
}
m[name] = val
return true
}
}
}
// Otherwise, pattern and val must match recursively.
if !pattern.IsValid() || !val.IsValid() {
return !pattern.IsValid() && !val.IsValid()
}
if pattern.Type() != val.Type() {
return false
}
// Special cases.
switch pattern.Type() {
case identType:
// For identifiers, only the names need to match
// (and none of the other *ast.Object information).
// This is a common case, handle it all here instead
// of recursing down any further via reflection.
p := pattern.Interface().(*ast.Ident)
v := val.Interface().(*ast.Ident)
return p == nil && v == nil || p != nil && v != nil && p.Name == v.Name
case objectPtrType, positionType:
// object pointers and token positions always match
return true
case callExprType:
// For calls, the Ellipsis fields (token.Position) must
// match since that is how f(x) and f(x...) are different.
// Check them here but fall through for the remaining fields.
p := pattern.Interface().(*ast.CallExpr)
v := val.Interface().(*ast.CallExpr)
if p.Ellipsis.IsValid() != v.Ellipsis.IsValid() {
return false
}
}
p := reflect.Indirect(pattern)
v := reflect.Indirect(val)
if !p.IsValid() || !v.IsValid() {
return !p.IsValid() && !v.IsValid()
}
switch p.Kind() {
case reflect.Slice:
if p.Len() != v.Len() {
return false
}
for i := 0; i < p.Len(); i++ {
if !match(m, p.Index(i), v.Index(i)) {
return false
}
}
return true
case reflect.Struct:
for i := 0; i < p.NumField(); i++ {
if !match(m, p.Field(i), v.Field(i)) {
return false
}
}
return true
case reflect.Interface:
return match(m, p.Elem(), v.Elem())
}
// Handle token integers, etc.
return p.Interface() == v.Interface()
}
// subst returns a copy of pattern with values from m substituted in place
// of wildcards and pos used as the position of tokens from the pattern.
// if m == nil, subst returns a copy of pattern and doesn't change the line
// number information.
func subst(m map[string]reflect.Value, pattern reflect.Value, pos reflect.Value) reflect.Value {
if !pattern.IsValid() {
return reflect.Value{}
}
// Wildcard gets replaced with map value.
if m != nil && pattern.Type() == identType {
name := pattern.Interface().(*ast.Ident).Name
if isWildcard(name) {
if old, ok := m[name]; ok {
return subst(nil, old, reflect.Value{})
}
}
}
if pos.IsValid() && pattern.Type() == positionType {
// use new position only if old position was valid in the first place
if old := pattern.Interface().(token.Pos); !old.IsValid() {
return pattern
}
return pos
}
// Otherwise copy.
switch p := pattern; p.Kind() {
case reflect.Slice:
v := reflect.MakeSlice(p.Type(), p.Len(), p.Len())
for i := 0; i < p.Len(); i++ {
v.Index(i).Set(subst(m, p.Index(i), pos))
}
return v
case reflect.Struct:
v := reflect.New(p.Type()).Elem()
for i := 0; i < p.NumField(); i++ {
v.Field(i).Set(subst(m, p.Field(i), pos))
}
return v
case reflect.Ptr:
v := reflect.New(p.Type()).Elem()
if elem := p.Elem(); elem.IsValid() {
v.Set(subst(m, elem, pos).Addr())
}
return v
case reflect.Interface:
v := reflect.New(p.Type()).Elem()
if elem := p.Elem(); elem.IsValid() {
v.Set(subst(m, elem, pos))
}
return v
}
return pattern
}

24
vendor/github.com/klauspost/reedsolomon/.gitignore generated vendored Normal file
View File

@ -0,0 +1,24 @@
# Compiled Object files, Static and Dynamic libs (Shared Objects)
*.o
*.a
*.so
# Folders
_obj
_test
# Architecture specific extensions/prefixes
*.[568vq]
[568vq].out
*.cgo1.go
*.cgo2.c
_cgo_defun.c
_cgo_gotypes.go
_cgo_export.*
_testmain.go
*.exe
*.test
*.prof

33
vendor/github.com/klauspost/reedsolomon/.travis.yml generated vendored Normal file
View File

@ -0,0 +1,33 @@
language: go
sudo: false
os:
- linux
- osx
go:
- 1.5
- 1.6
- 1.7
- 1.8
- master
install:
- go get ./...
script:
- go vet ./...
- go test -v -cpu=1,2,4 .
- go test -v -cpu=1,2,4 -short -race .
- go test -tags=noasm -v -cpu=1,2,4 -short -race .
- go build examples/simple-decoder.go
- go build examples/simple-encoder.go
- go build examples/stream-decoder.go
- go build examples/stream-encoder.go
- diff <(gofmt -d .) <("")
matrix:
allow_failures:
- go: 'master'
fast_finish: true

23
vendor/github.com/klauspost/reedsolomon/LICENSE generated vendored Normal file
View File

@ -0,0 +1,23 @@
The MIT License (MIT)
Copyright (c) 2015 Klaus Post
Copyright (c) 2015 Backblaze
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

216
vendor/github.com/klauspost/reedsolomon/README.md generated vendored Normal file
View File

@ -0,0 +1,216 @@
# Reed-Solomon
[![GoDoc][1]][2] [![Build Status][3]][4]
[1]: https://godoc.org/github.com/klauspost/reedsolomon?status.svg
[2]: https://godoc.org/github.com/klauspost/reedsolomon
[3]: https://travis-ci.org/klauspost/reedsolomon.svg?branch=master
[4]: https://travis-ci.org/klauspost/reedsolomon
Reed-Solomon Erasure Coding in Go, with speeds exceeding 1GB/s/cpu core implemented in pure Go.
This is a golang port of the [JavaReedSolomon](https://github.com/Backblaze/JavaReedSolomon) library released by [Backblaze](http://backblaze.com), with some additional optimizations.
For an introduction on erasure coding, see the post on the [Backblaze blog](https://www.backblaze.com/blog/reed-solomon/).
Package home: https://github.com/klauspost/reedsolomon
Godoc: https://godoc.org/github.com/klauspost/reedsolomon
# Installation
To get the package use the standard:
```bash
go get github.com/klauspost/reedsolomon
```
# Usage
This section assumes you know the basics of Reed-Solomon encoding. A good start is this [Backblaze blog post](https://www.backblaze.com/blog/reed-solomon/).
This package performs the calculation of the parity sets. The usage is therefore relatively simple.
First of all, you need to choose your distribution of data and parity shards. A 'good' distribution is very subjective, and will depend a lot on your usage scenario. A good starting point is above 5 and below 257 data shards (the maximum supported number), and the number of parity shards to be 2 or above, and below the number of data shards.
To create an encoder with 10 data shards (where your data goes) and 3 parity shards (calculated):
```Go
enc, err := reedsolomon.New(10, 3)
```
This encoder will work for all parity sets with this distribution of data and parity shards. The error will only be set if you specify 0 or negative values in any of the parameters, or if you specify more than 256 data shards.
The you send and receive data is a simple slice of byte slices; `[][]byte`. In the example above, the top slice must have a length of 13.
```Go
data := make([][]byte, 13)
```
You should then fill the 10 first slices with *equally sized* data, and create parity shards that will be populated with parity data. In this case we create the data in memory, but you could for instance also use [mmap](https://github.com/edsrzf/mmap-go) to map files.
```Go
// Create all shards, size them at 50000 each
for i := range input {
data[i] := make([]byte, 50000)
}
// Fill some data into the data shards
for i, in := range data[:10] {
for j:= range in {
in[j] = byte((i+j)&0xff)
}
}
```
To populate the parity shards, you simply call `Encode()` with your data.
```Go
err = enc.Encode(data)
```
The only cases where you should get an error is, if the data shards aren't of equal size. The last 3 shards now contain parity data. You can verify this by calling `Verify()`:
```Go
ok, err = enc.Verify(data)
```
The final (and important) part is to be able to reconstruct missing shards. For this to work, you need to know which parts of your data is missing. The encoder *does not know which parts are invalid*, so if data corruption is a likely scenario, you need to implement a hash check for each shard. If a byte has changed in your set, and you don't know which it is, there is no way to reconstruct the data set.
To indicate missing data, you set the shard to nil before calling `Reconstruct()`:
```Go
// Delete two data shards
data[3] = nil
data[7] = nil
// Reconstruct the missing shards
err := enc.Reconstruct(data)
```
The missing data and parity shards will be recreated. If more than 3 shards are missing, the reconstruction will fail.
So to sum up reconstruction:
* The number of data/parity shards must match the numbers used for encoding.
* The order of shards must be the same as used when encoding.
* You may only supply data you know is valid.
* Invalid shards should be set to nil.
For complete examples of an encoder and decoder see the [examples folder](https://github.com/klauspost/reedsolomon/tree/master/examples).
# Splitting/Joining Data
You might have a large slice of data. To help you split this, there are some helper functions that can split and join a single byte slice.
```Go
bigfile, _ := ioutil.Readfile("myfile.data")
// Split the file
split, err := enc.Split(bigfile)
```
This will split the file into the number of data shards set when creating the encoder and create empty parity shards.
An important thing to note is that you have to *keep track of the exact input size*. If the size of the input isn't divisible by the number of data shards, extra zeros will be inserted in the last shard.
To join a data set, use the `Join()` function, which will join the shards and write it to the `io.Writer` you supply:
```Go
// Join a data set and write it to io.Discard.
err = enc.Join(io.Discard, data, len(bigfile))
```
# Streaming/Merging
It might seem like a limitation that all data should be in memory, but an important property is that *as long as the number of data/parity shards are the same, you can merge/split data sets*, and they will remain valid as a separate set.
```Go
// Split the data set of 50000 elements into two of 25000
splitA := make([][]byte, 13)
splitB := make([][]byte, 13)
// Merge into a 100000 element set
merged := make([][]byte, 13)
for i := range data {
splitA[i] = data[i][:25000]
splitB[i] = data[i][25000:]
// Concencate it to itself
merged[i] = append(make([]byte, 0, len(data[i])*2), data[i]...)
merged[i] = append(merged[i], data[i]...)
}
// Each part should still verify as ok.
ok, err := enc.Verify(splitA)
if ok && err == nil {
log.Println("splitA ok")
}
ok, err = enc.Verify(splitB)
if ok && err == nil {
log.Println("splitB ok")
}
ok, err = enc.Verify(merge)
if ok && err == nil {
log.Println("merge ok")
}
```
This means that if you have a data set that may not fit into memory, you can split processing into smaller blocks. For the best throughput, don't use too small blocks.
This also means that you can divide big input up into smaller blocks, and do reconstruction on parts of your data. This doesn't give the same flexibility of a higher number of data shards, but it will be much more performant.
# Streaming API
There has been added support for a streaming API, to help perform fully streaming operations, which enables you to do the same operations, but on streams. To use the stream API, use [`NewStream`](https://godoc.org/github.com/klauspost/reedsolomon#NewStream) function to create the encoding/decoding interfaces. You can use [`NewStreamC`](https://godoc.org/github.com/klauspost/reedsolomon#NewStreamC) to ready an interface that reads/writes concurrently from the streams.
Input is delivered as `[]io.Reader`, output as `[]io.Writer`, and functionality corresponds to the in-memory API. Each stream must supply the same amount of data, similar to how each slice must be similar size with the in-memory API.
If an error occurs in relation to a stream, a [`StreamReadError`](https://godoc.org/github.com/klauspost/reedsolomon#StreamReadError) or [`StreamWriteError`](https://godoc.org/github.com/klauspost/reedsolomon#StreamWriteError) will help you determine which stream was the offender.
There is no buffering or timeouts/retry specified. If you want to add that, you need to add it to the Reader/Writer.
For complete examples of a streaming encoder and decoder see the [examples folder](https://github.com/klauspost/reedsolomon/tree/master/examples).
#Advanced Options
You can modify internal options which affects how jobs are split between and processed by goroutines.
To create options, use the WithXXX functions. You can supply options to `New`, `NewStream` and `NewStreamC`. If no Options are supplied, default options are used.
Example of how to supply options:
```Go
enc, err := reedsolomon.New(10, 3, WithMaxGoroutines(25))
```
# Performance
Performance depends mainly on the number of parity shards. In rough terms, doubling the number of parity shards will double the encoding time.
Here are the throughput numbers with some different selections of data and parity shards. For reference each shard is 1MB random data, and 2 CPU cores are used for encoding.
| Data | Parity | Parity | MB/s | SSSE3 MB/s | SSSE3 Speed | Rel. Speed |
|------|--------|--------|--------|-------------|-------------|------------|
| 5 | 2 | 40% | 576,11 | 2599,2 | 451% | 100,00% |
| 10 | 2 | 20% | 587,73 | 3100,28 | 528% | 102,02% |
| 10 | 4 | 40% | 298,38 | 2470,97 | 828% | 51,79% |
| 50 | 20 | 40% | 59,81 | 713,28 | 1193% | 10,38% |
If `runtime.GOMAXPROCS()` is set to a value higher than 1, the encoder will use multiple goroutines to perform the calculations in `Verify`, `Encode` and `Reconstruct`.
Example of performance scaling on Intel(R) Core(TM) i7-2600 CPU @ 3.40GHz - 4 physical cores, 8 logical cores. The example uses 10 blocks with 16MB data each and 4 parity blocks.
| Threads | MB/s | Speed |
|---------|---------|-------|
| 1 | 1355,11 | 100% |
| 2 | 2339,78 | 172% |
| 4 | 3179,33 | 235% |
| 8 | 4346,18 | 321% |
# asm2plan9s
[asm2plan9s](https://github.com/fwessels/asm2plan9s) is used for assembling the AVX2 instructions into their BYTE/WORD/LONG equivalents.
# Links
* [Backblaze Open Sources Reed-Solomon Erasure Coding Source Code](https://www.backblaze.com/blog/reed-solomon/).
* [JavaReedSolomon](https://github.com/Backblaze/JavaReedSolomon). Compatible java library by Backblaze.
* [reedsolomon-c](https://github.com/jannson/reedsolomon-c). C version, compatible with output from this package.
* [Reed-Solomon Erasure Coding in Haskell](https://github.com/NicolasT/reedsolomon). Haskell port of the package with similar performance.
* [go-erasure](https://github.com/somethingnew2-0/go-erasure). A similar library using cgo, slower in my tests.
* [rsraid](https://github.com/goayame/rsraid). A similar library written in Go. Slower, but supports more shards.
* [Screaming Fast Galois Field Arithmetic](http://www.snia.org/sites/default/files2/SDC2013/presentations/NewThinking/EthanMiller_Screaming_Fast_Galois_Field%20Arithmetic_SIMD%20Instructions.pdf). Basis for SSE3 optimizations.
# License
This code, as the original [JavaReedSolomon](https://github.com/Backblaze/JavaReedSolomon) is published under an MIT license. See LICENSE file for more information.

20
vendor/github.com/klauspost/reedsolomon/appveyor.yml generated vendored Normal file
View File

@ -0,0 +1,20 @@
os: Visual Studio 2015
platform: x64
clone_folder: c:\gopath\src\github.com\klauspost\reedsolomon
# environment variables
environment:
GOPATH: c:\gopath
install:
- echo %PATH%
- echo %GOPATH%
- go version
- go env
- go get -d ./...
build_script:
- go test -v -cpu=2 ./...
- go test -cpu=1,2,4 -short -race ./...

134
vendor/github.com/klauspost/reedsolomon/galois.go generated vendored Normal file

File diff suppressed because one or more lines are too long

View File

@ -0,0 +1,73 @@
//+build !noasm
//+build !appengine
// Copyright 2015, Klaus Post, see LICENSE for details.
package reedsolomon
//go:noescape
func galMulSSSE3(low, high, in, out []byte)
//go:noescape
func galMulSSSE3Xor(low, high, in, out []byte)
//go:noescape
func galMulAVX2Xor(low, high, in, out []byte)
//go:noescape
func galMulAVX2(low, high, in, out []byte)
// This is what the assembler rountes does in blocks of 16 bytes:
/*
func galMulSSSE3(low, high, in, out []byte) {
for n, input := range in {
l := input & 0xf
h := input >> 4
out[n] = low[l] ^ high[h]
}
}
func galMulSSSE3Xor(low, high, in, out []byte) {
for n, input := range in {
l := input & 0xf
h := input >> 4
out[n] ^= low[l] ^ high[h]
}
}
*/
func galMulSlice(c byte, in, out []byte, ssse3, avx2 bool) {
var done int
if avx2 {
galMulAVX2(mulTableLow[c][:], mulTableHigh[c][:], in, out)
done = (len(in) >> 5) << 5
} else if ssse3 {
galMulSSSE3(mulTableLow[c][:], mulTableHigh[c][:], in, out)
done = (len(in) >> 4) << 4
}
remain := len(in) - done
if remain > 0 {
mt := mulTable[c]
for i := done; i < len(in); i++ {
out[i] = mt[in[i]]
}
}
}
func galMulSliceXor(c byte, in, out []byte, ssse3, avx2 bool) {
var done int
if avx2 {
galMulAVX2Xor(mulTableLow[c][:], mulTableHigh[c][:], in, out)
done = (len(in) >> 5) << 5
} else if ssse3 {
galMulSSSE3Xor(mulTableLow[c][:], mulTableHigh[c][:], in, out)
done = (len(in) >> 4) << 4
}
remain := len(in) - done
if remain > 0 {
mt := mulTable[c]
for i := done; i < len(in); i++ {
out[i] ^= mt[in[i]]
}
}
}

164
vendor/github.com/klauspost/reedsolomon/galois_amd64.s generated vendored Normal file
View File

@ -0,0 +1,164 @@
//+build !noasm !appengine
// Copyright 2015, Klaus Post, see LICENSE for details.
// Based on http://www.snia.org/sites/default/files2/SDC2013/presentations/NewThinking/EthanMiller_Screaming_Fast_Galois_Field%20Arithmetic_SIMD%20Instructions.pdf
// and http://jerasure.org/jerasure/gf-complete/tree/master
// func galMulSSSE3Xor(low, high, in, out []byte)
TEXT ·galMulSSSE3Xor(SB), 7, $0
MOVQ low+0(FP), SI // SI: &low
MOVQ high+24(FP), DX // DX: &high
MOVOU (SI), X6 // X6 low
MOVOU (DX), X7 // X7: high
MOVQ $15, BX // BX: low mask
MOVQ BX, X8
PXOR X5, X5
MOVQ in+48(FP), SI // R11: &in
MOVQ in_len+56(FP), R9 // R9: len(in)
MOVQ out+72(FP), DX // DX: &out
PSHUFB X5, X8 // X8: lomask (unpacked)
SHRQ $4, R9 // len(in) / 16
CMPQ R9, $0
JEQ done_xor
loopback_xor:
MOVOU (SI), X0 // in[x]
MOVOU (DX), X4 // out[x]
MOVOU X0, X1 // in[x]
MOVOU X6, X2 // low copy
MOVOU X7, X3 // high copy
PSRLQ $4, X1 // X1: high input
PAND X8, X0 // X0: low input
PAND X8, X1 // X0: high input
PSHUFB X0, X2 // X2: mul low part
PSHUFB X1, X3 // X3: mul high part
PXOR X2, X3 // X3: Result
PXOR X4, X3 // X3: Result xor existing out
MOVOU X3, (DX) // Store
ADDQ $16, SI // in+=16
ADDQ $16, DX // out+=16
SUBQ $1, R9
JNZ loopback_xor
done_xor:
RET
// func galMulSSSE3(low, high, in, out []byte)
TEXT ·galMulSSSE3(SB), 7, $0
MOVQ low+0(FP), SI // SI: &low
MOVQ high+24(FP), DX // DX: &high
MOVOU (SI), X6 // X6 low
MOVOU (DX), X7 // X7: high
MOVQ $15, BX // BX: low mask
MOVQ BX, X8
PXOR X5, X5
MOVQ in+48(FP), SI // R11: &in
MOVQ in_len+56(FP), R9 // R9: len(in)
MOVQ out+72(FP), DX // DX: &out
PSHUFB X5, X8 // X8: lomask (unpacked)
SHRQ $4, R9 // len(in) / 16
CMPQ R9, $0
JEQ done
loopback:
MOVOU (SI), X0 // in[x]
MOVOU X0, X1 // in[x]
MOVOU X6, X2 // low copy
MOVOU X7, X3 // high copy
PSRLQ $4, X1 // X1: high input
PAND X8, X0 // X0: low input
PAND X8, X1 // X0: high input
PSHUFB X0, X2 // X2: mul low part
PSHUFB X1, X3 // X3: mul high part
PXOR X2, X3 // X3: Result
MOVOU X3, (DX) // Store
ADDQ $16, SI // in+=16
ADDQ $16, DX // out+=16
SUBQ $1, R9
JNZ loopback
done:
RET
// func galMulAVX2Xor(low, high, in, out []byte)
TEXT ·galMulAVX2Xor(SB), 7, $0
MOVQ low+0(FP), SI // SI: &low
MOVQ high+24(FP), DX // DX: &high
MOVQ $15, BX // BX: low mask
MOVQ BX, X5
MOVOU (SI), X6 // X6 low
MOVOU (DX), X7 // X7: high
MOVQ in_len+56(FP), R9 // R9: len(in)
LONG $0x384de3c4; WORD $0x01f6 // VINSERTI128 YMM6, YMM6, XMM6, 1 ; low
LONG $0x3845e3c4; WORD $0x01ff // VINSERTI128 YMM7, YMM7, XMM7, 1 ; high
LONG $0x787d62c4; BYTE $0xc5 // VPBROADCASTB YMM8, XMM5 ; X8: lomask (unpacked)
SHRQ $5, R9 // len(in) /32
MOVQ out+72(FP), DX // DX: &out
MOVQ in+48(FP), SI // R11: &in
TESTQ R9, R9
JZ done_xor_avx2
loopback_xor_avx2:
LONG $0x066ffec5 // VMOVDQU YMM0, [rsi]
LONG $0x226ffec5 // VMOVDQU YMM4, [rdx]
LONG $0xd073f5c5; BYTE $0x04 // VPSRLQ YMM1, YMM0, 4 ; X1: high input
LONG $0xdb7dc1c4; BYTE $0xc0 // VPAND YMM0, YMM0, YMM8 ; X0: low input
LONG $0xdb75c1c4; BYTE $0xc8 // VPAND YMM1, YMM1, YMM8 ; X1: high input
LONG $0x004de2c4; BYTE $0xd0 // VPSHUFB YMM2, YMM6, YMM0 ; X2: mul low part
LONG $0x0045e2c4; BYTE $0xd9 // VPSHUFB YMM3, YMM7, YMM1 ; X2: mul high part
LONG $0xdbefedc5 // VPXOR YMM3, YMM2, YMM3 ; X3: Result
LONG $0xe4efe5c5 // VPXOR YMM4, YMM3, YMM4 ; X4: Result
LONG $0x227ffec5 // VMOVDQU [rdx], YMM4
ADDQ $32, SI // in+=32
ADDQ $32, DX // out+=32
SUBQ $1, R9
JNZ loopback_xor_avx2
done_xor_avx2:
// VZEROUPPER
BYTE $0xc5; BYTE $0xf8; BYTE $0x77
RET
// func galMulAVX2(low, high, in, out []byte)
TEXT ·galMulAVX2(SB), 7, $0
MOVQ low+0(FP), SI // SI: &low
MOVQ high+24(FP), DX // DX: &high
MOVQ $15, BX // BX: low mask
MOVQ BX, X5
MOVOU (SI), X6 // X6 low
MOVOU (DX), X7 // X7: high
MOVQ in_len+56(FP), R9 // R9: len(in)
LONG $0x384de3c4; WORD $0x01f6 // VINSERTI128 YMM6, YMM6, XMM6, 1 ; low
LONG $0x3845e3c4; WORD $0x01ff // VINSERTI128 YMM7, YMM7, XMM7, 1 ; high
LONG $0x787d62c4; BYTE $0xc5 // VPBROADCASTB YMM8, XMM5 ; X8: lomask (unpacked)
SHRQ $5, R9 // len(in) /32
MOVQ out+72(FP), DX // DX: &out
MOVQ in+48(FP), SI // R11: &in
TESTQ R9, R9
JZ done_avx2
loopback_avx2:
LONG $0x066ffec5 // VMOVDQU YMM0, [rsi]
LONG $0xd073f5c5; BYTE $0x04 // VPSRLQ YMM1, YMM0, 4 ; X1: high input
LONG $0xdb7dc1c4; BYTE $0xc0 // VPAND YMM0, YMM0, YMM8 ; X0: low input
LONG $0xdb75c1c4; BYTE $0xc8 // VPAND YMM1, YMM1, YMM8 ; X1: high input
LONG $0x004de2c4; BYTE $0xd0 // VPSHUFB YMM2, YMM6, YMM0 ; X2: mul low part
LONG $0x0045e2c4; BYTE $0xd9 // VPSHUFB YMM3, YMM7, YMM1 ; X2: mul high part
LONG $0xe3efedc5 // VPXOR YMM4, YMM2, YMM3 ; X4: Result
LONG $0x227ffec5 // VMOVDQU [rdx], YMM4
ADDQ $32, SI // in+=32
ADDQ $32, DX // out+=32
SUBQ $1, R9
JNZ loopback_avx2
done_avx2:
BYTE $0xc5; BYTE $0xf8; BYTE $0x77 // VZEROUPPER
RET

View File

@ -0,0 +1,19 @@
//+build !amd64 noasm appengine
// Copyright 2015, Klaus Post, see LICENSE for details.
package reedsolomon
func galMulSlice(c byte, in, out []byte, ssse3, avx2 bool) {
mt := mulTable[c]
for n, input := range in {
out[n] = mt[input]
}
}
func galMulSliceXor(c byte, in, out []byte, ssse3, avx2 bool) {
mt := mulTable[c]
for n, input := range in {
out[n] ^= mt[input]
}
}

132
vendor/github.com/klauspost/reedsolomon/gentables.go generated vendored Normal file
View File

@ -0,0 +1,132 @@
//+build ignore
package main
import (
"fmt"
)
var logTable = [fieldSize]int16{
-1, 0, 1, 25, 2, 50, 26, 198,
3, 223, 51, 238, 27, 104, 199, 75,
4, 100, 224, 14, 52, 141, 239, 129,
28, 193, 105, 248, 200, 8, 76, 113,
5, 138, 101, 47, 225, 36, 15, 33,
53, 147, 142, 218, 240, 18, 130, 69,
29, 181, 194, 125, 106, 39, 249, 185,
201, 154, 9, 120, 77, 228, 114, 166,
6, 191, 139, 98, 102, 221, 48, 253,
226, 152, 37, 179, 16, 145, 34, 136,
54, 208, 148, 206, 143, 150, 219, 189,
241, 210, 19, 92, 131, 56, 70, 64,
30, 66, 182, 163, 195, 72, 126, 110,
107, 58, 40, 84, 250, 133, 186, 61,
202, 94, 155, 159, 10, 21, 121, 43,
78, 212, 229, 172, 115, 243, 167, 87,
7, 112, 192, 247, 140, 128, 99, 13,
103, 74, 222, 237, 49, 197, 254, 24,
227, 165, 153, 119, 38, 184, 180, 124,
17, 68, 146, 217, 35, 32, 137, 46,
55, 63, 209, 91, 149, 188, 207, 205,
144, 135, 151, 178, 220, 252, 190, 97,
242, 86, 211, 171, 20, 42, 93, 158,
132, 60, 57, 83, 71, 109, 65, 162,
31, 45, 67, 216, 183, 123, 164, 118,
196, 23, 73, 236, 127, 12, 111, 246,
108, 161, 59, 82, 41, 157, 85, 170,
251, 96, 134, 177, 187, 204, 62, 90,
203, 89, 95, 176, 156, 169, 160, 81,
11, 245, 22, 235, 122, 117, 44, 215,
79, 174, 213, 233, 230, 231, 173, 232,
116, 214, 244, 234, 168, 80, 88, 175,
}
const (
// The number of elements in the field.
fieldSize = 256
// The polynomial used to generate the logarithm table.
//
// There are a number of polynomials that work to generate
// a Galois field of 256 elements. The choice is arbitrary,
// and we just use the first one.
//
// The possibilities are: 29, 43, 45, 77, 95, 99, 101, 105,
//* 113, 135, 141, 169, 195, 207, 231, and 245.
generatingPolynomial = 29
)
func main() {
t := generateExpTable()
fmt.Printf("var expTable = %#v\n", t)
//t2 := generateMulTableSplit(t)
//fmt.Printf("var mulTable = %#v\n", t2)
low, high := generateMulTableHalf(t)
fmt.Printf("var mulTableLow = %#v\n", low)
fmt.Printf("var mulTableHigh = %#v\n", high)
}
/**
* Generates the inverse log table.
*/
func generateExpTable() []byte {
result := make([]byte, fieldSize*2-2)
for i := 1; i < fieldSize; i++ {
log := logTable[i]
result[log] = byte(i)
result[log+fieldSize-1] = byte(i)
}
return result
}
func generateMulTable(expTable []byte) []byte {
result := make([]byte, 256*256)
for v := range result {
a := byte(v & 0xff)
b := byte(v >> 8)
if a == 0 || b == 0 {
result[v] = 0
continue
}
logA := int(logTable[a])
logB := int(logTable[b])
result[v] = expTable[logA+logB]
}
return result
}
func generateMulTableSplit(expTable []byte) [256][256]byte {
var result [256][256]byte
for a := range result {
for b := range result[a] {
if a == 0 || b == 0 {
result[a][b] = 0
continue
}
logA := int(logTable[a])
logB := int(logTable[b])
result[a][b] = expTable[logA+logB]
}
}
return result
}
func generateMulTableHalf(expTable []byte) (low [256][16]byte, high [256][16]byte) {
for a := range low {
for b := range low {
result := 0
if !(a == 0 || b == 0) {
logA := int(logTable[a])
logB := int(logTable[b])
result = int(expTable[logA+logB])
}
if (b & 0xf) == b {
low[a][b] = byte(result)
}
if (b & 0xf0) == b {
high[a][b>>4] = byte(result)
}
}
}
return
}

View File

@ -0,0 +1,160 @@
/**
* A thread-safe tree which caches inverted matrices.
*
* Copyright 2016, Peter Collins
*/
package reedsolomon
import (
"errors"
"sync"
)
// The tree uses a Reader-Writer mutex to make it thread-safe
// when accessing cached matrices and inserting new ones.
type inversionTree struct {
mutex *sync.RWMutex
root inversionNode
}
type inversionNode struct {
matrix matrix
children []*inversionNode
}
// newInversionTree initializes a tree for storing inverted matrices.
// Note that the root node is the identity matrix as it implies
// there were no errors with the original data.
func newInversionTree(dataShards, parityShards int) inversionTree {
identity, _ := identityMatrix(dataShards)
root := inversionNode{
matrix: identity,
children: make([]*inversionNode, dataShards+parityShards),
}
return inversionTree{
mutex: &sync.RWMutex{},
root: root,
}
}
// GetInvertedMatrix returns the cached inverted matrix or nil if it
// is not found in the tree keyed on the indices of invalid rows.
func (t inversionTree) GetInvertedMatrix(invalidIndices []int) matrix {
// Lock the tree for reading before accessing the tree.
t.mutex.RLock()
defer t.mutex.RUnlock()
// If no invalid indices were give we should return the root
// identity matrix.
if len(invalidIndices) == 0 {
return t.root.matrix
}
// Recursively search for the inverted matrix in the tree, passing in
// 0 as the parent index as we start at the root of the tree.
return t.root.getInvertedMatrix(invalidIndices, 0)
}
// errAlreadySet is returned if the root node matrix is overwritten
var errAlreadySet = errors.New("the root node identity matrix is already set")
// InsertInvertedMatrix inserts a new inverted matrix into the tree
// keyed by the indices of invalid rows. The total number of shards
// is required for creating the proper length lists of child nodes for
// each node.
func (t inversionTree) InsertInvertedMatrix(invalidIndices []int, matrix matrix, shards int) error {
// If no invalid indices were given then we are done because the
// root node is already set with the identity matrix.
if len(invalidIndices) == 0 {
return errAlreadySet
}
if !matrix.IsSquare() {
return errNotSquare
}
// Lock the tree for writing and reading before accessing the tree.
t.mutex.Lock()
defer t.mutex.Unlock()
// Recursively create nodes for the inverted matrix in the tree until
// we reach the node to insert the matrix to. We start by passing in
// 0 as the parent index as we start at the root of the tree.
t.root.insertInvertedMatrix(invalidIndices, matrix, shards, 0)
return nil
}
func (n inversionNode) getInvertedMatrix(invalidIndices []int, parent int) matrix {
// Get the child node to search next from the list of children. The
// list of children starts relative to the parent index passed in
// because the indices of invalid rows is sorted (by default). As we
// search recursively, the first invalid index gets popped off the list,
// so when searching through the list of children, use that first invalid
// index to find the child node.
firstIndex := invalidIndices[0]
node := n.children[firstIndex-parent]
// If the child node doesn't exist in the list yet, fail fast by
// returning, so we can construct and insert the proper inverted matrix.
if node == nil {
return nil
}
// If there's more than one invalid index left in the list we should
// keep searching recursively.
if len(invalidIndices) > 1 {
// Search recursively on the child node by passing in the invalid indices
// with the first index popped off the front. Also the parent index to
// pass down is the first index plus one.
return node.getInvertedMatrix(invalidIndices[1:], firstIndex+1)
}
// If there aren't any more invalid indices to search, we've found our
// node. Return it, however keep in mind that the matrix could still be
// nil because intermediary nodes in the tree are created sometimes with
// their inversion matrices uninitialized.
return node.matrix
}
func (n inversionNode) insertInvertedMatrix(invalidIndices []int, matrix matrix, shards, parent int) {
// As above, get the child node to search next from the list of children.
// The list of children starts relative to the parent index passed in
// because the indices of invalid rows is sorted (by default). As we
// search recursively, the first invalid index gets popped off the list,
// so when searching through the list of children, use that first invalid
// index to find the child node.
firstIndex := invalidIndices[0]
node := n.children[firstIndex-parent]
// If the child node doesn't exist in the list yet, create a new
// node because we have the writer lock and add it to the list
// of children.
if node == nil {
// Make the length of the list of children equal to the number
// of shards minus the first invalid index because the list of
// invalid indices is sorted, so only this length of errors
// are possible in the tree.
node = &inversionNode{
children: make([]*inversionNode, shards-firstIndex),
}
// Insert the new node into the tree at the first index relative
// to the parent index that was given in this recursive call.
n.children[firstIndex-parent] = node
}
// If there's more than one invalid index left in the list we should
// keep searching recursively in order to find the node to add our
// matrix.
if len(invalidIndices) > 1 {
// As above, search recursively on the child node by passing in
// the invalid indices with the first index popped off the front.
// Also the total number of shards and parent index are passed down
// which is equal to the first index plus one.
node.insertInvertedMatrix(invalidIndices[1:], matrix, shards, firstIndex+1)
} else {
// If there aren't any more invalid indices to search, we've found our
// node. Cache the inverted matrix in this node.
node.matrix = matrix
}
}

279
vendor/github.com/klauspost/reedsolomon/matrix.go generated vendored Normal file
View File

@ -0,0 +1,279 @@
/**
* Matrix Algebra over an 8-bit Galois Field
*
* Copyright 2015, Klaus Post
* Copyright 2015, Backblaze, Inc.
*/
package reedsolomon
import (
"errors"
"fmt"
"strconv"
"strings"
)
// byte[row][col]
type matrix [][]byte
// newMatrix returns a matrix of zeros.
func newMatrix(rows, cols int) (matrix, error) {
if rows <= 0 {
return nil, errInvalidRowSize
}
if cols <= 0 {
return nil, errInvalidColSize
}
m := matrix(make([][]byte, rows))
for i := range m {
m[i] = make([]byte, cols)
}
return m, nil
}
// NewMatrixData initializes a matrix with the given row-major data.
// Note that data is not copied from input.
func newMatrixData(data [][]byte) (matrix, error) {
m := matrix(data)
err := m.Check()
if err != nil {
return nil, err
}
return m, nil
}
// IdentityMatrix returns an identity matrix of the given size.
func identityMatrix(size int) (matrix, error) {
m, err := newMatrix(size, size)
if err != nil {
return nil, err
}
for i := range m {
m[i][i] = 1
}
return m, nil
}
// errInvalidRowSize will be returned if attempting to create a matrix with negative or zero row number.
var errInvalidRowSize = errors.New("invalid row size")
// errInvalidColSize will be returned if attempting to create a matrix with negative or zero column number.
var errInvalidColSize = errors.New("invalid column size")
// errColSizeMismatch is returned if the size of matrix columns mismatch.
var errColSizeMismatch = errors.New("column size is not the same for all rows")
func (m matrix) Check() error {
rows := len(m)
if rows <= 0 {
return errInvalidRowSize
}
cols := len(m[0])
if cols <= 0 {
return errInvalidColSize
}
for _, col := range m {
if len(col) != cols {
return errColSizeMismatch
}
}
return nil
}
// String returns a human-readable string of the matrix contents.
//
// Example: [[1, 2], [3, 4]]
func (m matrix) String() string {
rowOut := make([]string, 0, len(m))
for _, row := range m {
colOut := make([]string, 0, len(row))
for _, col := range row {
colOut = append(colOut, strconv.Itoa(int(col)))
}
rowOut = append(rowOut, "["+strings.Join(colOut, ", ")+"]")
}
return "[" + strings.Join(rowOut, ", ") + "]"
}
// Multiply multiplies this matrix (the one on the left) by another
// matrix (the one on the right) and returns a new matrix with the result.
func (m matrix) Multiply(right matrix) (matrix, error) {
if len(m[0]) != len(right) {
return nil, fmt.Errorf("columns on left (%d) is different than rows on right (%d)", len(m[0]), len(right))
}
result, _ := newMatrix(len(m), len(right[0]))
for r, row := range result {
for c := range row {
var value byte
for i := range m[0] {
value ^= galMultiply(m[r][i], right[i][c])
}
result[r][c] = value
}
}
return result, nil
}
// Augment returns the concatenation of this matrix and the matrix on the right.
func (m matrix) Augment(right matrix) (matrix, error) {
if len(m) != len(right) {
return nil, errMatrixSize
}
result, _ := newMatrix(len(m), len(m[0])+len(right[0]))
for r, row := range m {
for c := range row {
result[r][c] = m[r][c]
}
cols := len(m[0])
for c := range right[0] {
result[r][cols+c] = right[r][c]
}
}
return result, nil
}
// errMatrixSize is returned if matrix dimensions are doesn't match.
var errMatrixSize = errors.New("matrix sizes does not match")
func (m matrix) SameSize(n matrix) error {
if len(m) != len(n) {
return errMatrixSize
}
for i := range m {
if len(m[i]) != len(n[i]) {
return errMatrixSize
}
}
return nil
}
// Returns a part of this matrix. Data is copied.
func (m matrix) SubMatrix(rmin, cmin, rmax, cmax int) (matrix, error) {
result, err := newMatrix(rmax-rmin, cmax-cmin)
if err != nil {
return nil, err
}
// OPTME: If used heavily, use copy function to copy slice
for r := rmin; r < rmax; r++ {
for c := cmin; c < cmax; c++ {
result[r-rmin][c-cmin] = m[r][c]
}
}
return result, nil
}
// SwapRows Exchanges two rows in the matrix.
func (m matrix) SwapRows(r1, r2 int) error {
if r1 < 0 || len(m) <= r1 || r2 < 0 || len(m) <= r2 {
return errInvalidRowSize
}
m[r2], m[r1] = m[r1], m[r2]
return nil
}
// IsSquare will return true if the matrix is square
// and nil if the matrix is square
func (m matrix) IsSquare() bool {
return len(m) == len(m[0])
}
// errSingular is returned if the matrix is singular and cannot be inversed
var errSingular = errors.New("matrix is singular")
// errNotSquare is returned if attempting to inverse a non-square matrix.
var errNotSquare = errors.New("only square matrices can be inverted")
// Invert returns the inverse of this matrix.
// Returns ErrSingular when the matrix is singular and doesn't have an inverse.
// The matrix must be square, otherwise ErrNotSquare is returned.
func (m matrix) Invert() (matrix, error) {
if !m.IsSquare() {
return nil, errNotSquare
}
size := len(m)
work, _ := identityMatrix(size)
work, _ = m.Augment(work)
err := work.gaussianElimination()
if err != nil {
return nil, err
}
return work.SubMatrix(0, size, size, size*2)
}
func (m matrix) gaussianElimination() error {
rows := len(m)
columns := len(m[0])
// Clear out the part below the main diagonal and scale the main
// diagonal to be 1.
for r := 0; r < rows; r++ {
// If the element on the diagonal is 0, find a row below
// that has a non-zero and swap them.
if m[r][r] == 0 {
for rowBelow := r + 1; rowBelow < rows; rowBelow++ {
if m[rowBelow][r] != 0 {
m.SwapRows(r, rowBelow)
break
}
}
}
// If we couldn't find one, the matrix is singular.
if m[r][r] == 0 {
return errSingular
}
// Scale to 1.
if m[r][r] != 1 {
scale := galDivide(1, m[r][r])
for c := 0; c < columns; c++ {
m[r][c] = galMultiply(m[r][c], scale)
}
}
// Make everything below the 1 be a 0 by subtracting
// a multiple of it. (Subtraction and addition are
// both exclusive or in the Galois field.)
for rowBelow := r + 1; rowBelow < rows; rowBelow++ {
if m[rowBelow][r] != 0 {
scale := m[rowBelow][r]
for c := 0; c < columns; c++ {
m[rowBelow][c] ^= galMultiply(scale, m[r][c])
}
}
}
}
// Now clear the part above the main diagonal.
for d := 0; d < rows; d++ {
for rowAbove := 0; rowAbove < d; rowAbove++ {
if m[rowAbove][d] != 0 {
scale := m[rowAbove][d]
for c := 0; c < columns; c++ {
m[rowAbove][c] ^= galMultiply(scale, m[d][c])
}
}
}
}
return nil
}
// Create a Vandermonde matrix, which is guaranteed to have the
// property that any subset of rows that forms a square matrix
// is invertible.
func vandermonde(rows, cols int) (matrix, error) {
result, err := newMatrix(rows, cols)
if err != nil {
return nil, err
}
for r, row := range result {
for c := range row {
result[r][c] = galExp(byte(r), c)
}
}
return result, nil
}

67
vendor/github.com/klauspost/reedsolomon/options.go generated vendored Normal file
View File

@ -0,0 +1,67 @@
package reedsolomon
import (
"runtime"
"github.com/klauspost/cpuid"
)
// Option allows to override processing parameters.
type Option func(*options)
type options struct {
maxGoroutines int
minSplitSize int
useAVX2, useSSSE3 bool
}
var defaultOptions = options{
maxGoroutines: 50,
minSplitSize: 512,
}
func init() {
if runtime.GOMAXPROCS(0) <= 1 {
defaultOptions.maxGoroutines = 1
}
// Detect CPU capabilities.
defaultOptions.useSSSE3 = cpuid.CPU.SSSE3()
defaultOptions.useAVX2 = cpuid.CPU.AVX2()
}
// WithMaxGoroutines is the maximum number of goroutines number for encoding & decoding.
// Jobs will be split into this many parts, unless each goroutine would have to process
// less than minSplitSize bytes (set with WithMinSplitSize).
// For the best speed, keep this well above the GOMAXPROCS number for more fine grained
// scheduling.
// If n <= 0, it is ignored.
func WithMaxGoroutines(n int) Option {
return func(o *options) {
if n > 0 {
o.maxGoroutines = n
}
}
}
// MinSplitSize Is the minimum encoding size in bytes per goroutine.
// See WithMaxGoroutines on how jobs are split.
// If n <= 0, it is ignored.
func WithMinSplitSize(n int) Option {
return func(o *options) {
if n > 0 {
o.minSplitSize = n
}
}
}
func withSSE3(enabled bool) Option {
return func(o *options) {
o.useSSSE3 = enabled
}
}
func withAVX2(enabled bool) Option {
return func(o *options) {
o.useAVX2 = enabled
}
}

596
vendor/github.com/klauspost/reedsolomon/reedsolomon.go generated vendored Normal file
View File

@ -0,0 +1,596 @@
/**
* Reed-Solomon Coding over 8-bit values.
*
* Copyright 2015, Klaus Post
* Copyright 2015, Backblaze, Inc.
*/
// Package reedsolomon enables Erasure Coding in Go
//
// For usage and examples, see https://github.com/klauspost/reedsolomon
//
package reedsolomon
import (
"bytes"
"errors"
"io"
"sync"
)
// Encoder is an interface to encode Reed-Salomon parity sets for your data.
type Encoder interface {
// Encodes parity for a set of data shards.
// Input is 'shards' containing data shards followed by parity shards.
// The number of shards must match the number given to New().
// Each shard is a byte array, and they must all be the same size.
// The parity shards will always be overwritten and the data shards
// will remain the same, so it is safe for you to read from the
// data shards while this is running.
Encode(shards [][]byte) error
// Verify returns true if the parity shards contain correct data.
// The data is the same format as Encode. No data is modified, so
// you are allowed to read from data while this is running.
Verify(shards [][]byte) (bool, error)
// Reconstruct will recreate the missing shards if possible.
//
// Given a list of shards, some of which contain data, fills in the
// ones that don't have data.
//
// The length of the array must be equal to the total number of shards.
// You indicate that a shard is missing by setting it to nil.
//
// If there are too few shards to reconstruct the missing
// ones, ErrTooFewShards will be returned.
//
// The reconstructed shard set is complete, but integrity is not verified.
// Use the Verify function to check if data set is ok.
Reconstruct(shards [][]byte) error
// Split a data slice into the number of shards given to the encoder,
// and create empty parity shards.
//
// The data will be split into equally sized shards.
// If the data size isn't dividable by the number of shards,
// the last shard will contain extra zeros.
//
// There must be at least 1 byte otherwise ErrShortData will be
// returned.
//
// The data will not be copied, except for the last shard, so you
// should not modify the data of the input slice afterwards.
Split(data []byte) ([][]byte, error)
// Join the shards and write the data segment to dst.
//
// Only the data shards are considered.
// You must supply the exact output size you want.
// If there are to few shards given, ErrTooFewShards will be returned.
// If the total data size is less than outSize, ErrShortData will be returned.
Join(dst io.Writer, shards [][]byte, outSize int) error
}
// reedSolomon contains a matrix for a specific
// distribution of datashards and parity shards.
// Construct if using New()
type reedSolomon struct {
DataShards int // Number of data shards, should not be modified.
ParityShards int // Number of parity shards, should not be modified.
Shards int // Total number of shards. Calculated, and should not be modified.
m matrix
tree inversionTree
parity [][]byte
o options
}
// ErrInvShardNum will be returned by New, if you attempt to create
// an Encoder where either data or parity shards is zero or less.
var ErrInvShardNum = errors.New("cannot create Encoder with zero or less data/parity shards")
// ErrMaxShardNum will be returned by New, if you attempt to create
// an Encoder where data and parity shards cannot be bigger than
// Galois field GF(2^8) - 1.
var ErrMaxShardNum = errors.New("cannot create Encoder with 255 or more data+parity shards")
// New creates a new encoder and initializes it to
// the number of data shards and parity shards that
// you want to use. You can reuse this encoder.
// Note that the maximum number of data shards is 256.
// If no options are supplied, default options are used.
func New(dataShards, parityShards int, opts ...Option) (Encoder, error) {
r := reedSolomon{
DataShards: dataShards,
ParityShards: parityShards,
Shards: dataShards + parityShards,
o: defaultOptions,
}
for _, opt := range opts {
opt(&r.o)
}
if dataShards <= 0 || parityShards <= 0 {
return nil, ErrInvShardNum
}
if dataShards+parityShards > 255 {
return nil, ErrMaxShardNum
}
// Start with a Vandermonde matrix. This matrix would work,
// in theory, but doesn't have the property that the data
// shards are unchanged after encoding.
vm, err := vandermonde(r.Shards, dataShards)
if err != nil {
return nil, err
}
// Multiply by the inverse of the top square of the matrix.
// This will make the top square be the identity matrix, but
// preserve the property that any square subset of rows is
// invertible.
top, _ := vm.SubMatrix(0, 0, dataShards, dataShards)
top, _ = top.Invert()
r.m, _ = vm.Multiply(top)
// Inverted matrices are cached in a tree keyed by the indices
// of the invalid rows of the data to reconstruct.
// The inversion root node will have the identity matrix as
// its inversion matrix because it implies there are no errors
// with the original data.
r.tree = newInversionTree(dataShards, parityShards)
r.parity = make([][]byte, parityShards)
for i := range r.parity {
r.parity[i] = r.m[dataShards+i]
}
return &r, err
}
// ErrTooFewShards is returned if too few shards where given to
// Encode/Verify/Reconstruct. It will also be returned from Reconstruct
// if there were too few shards to reconstruct the missing data.
var ErrTooFewShards = errors.New("too few shards given")
// Encodes parity for a set of data shards.
// An array 'shards' containing data shards followed by parity shards.
// The number of shards must match the number given to New.
// Each shard is a byte array, and they must all be the same size.
// The parity shards will always be overwritten and the data shards
// will remain the same.
func (r reedSolomon) Encode(shards [][]byte) error {
if len(shards) != r.Shards {
return ErrTooFewShards
}
err := checkShards(shards, false)
if err != nil {
return err
}
// Get the slice of output buffers.
output := shards[r.DataShards:]
// Do the coding.
r.codeSomeShards(r.parity, shards[0:r.DataShards], output, r.ParityShards, len(shards[0]))
return nil
}
// Verify returns true if the parity shards contain the right data.
// The data is the same format as Encode. No data is modified.
func (r reedSolomon) Verify(shards [][]byte) (bool, error) {
if len(shards) != r.Shards {
return false, ErrTooFewShards
}
err := checkShards(shards, false)
if err != nil {
return false, err
}
// Slice of buffers being checked.
toCheck := shards[r.DataShards:]
// Do the checking.
return r.checkSomeShards(r.parity, shards[0:r.DataShards], toCheck, r.ParityShards, len(shards[0])), nil
}
// Multiplies a subset of rows from a coding matrix by a full set of
// input shards to produce some output shards.
// 'matrixRows' is The rows from the matrix to use.
// 'inputs' An array of byte arrays, each of which is one input shard.
// The number of inputs used is determined by the length of each matrix row.
// outputs Byte arrays where the computed shards are stored.
// The number of outputs computed, and the
// number of matrix rows used, is determined by
// outputCount, which is the number of outputs to compute.
func (r reedSolomon) codeSomeShards(matrixRows, inputs, outputs [][]byte, outputCount, byteCount int) {
if r.o.maxGoroutines > 1 && byteCount > r.o.minSplitSize {
r.codeSomeShardsP(matrixRows, inputs, outputs, outputCount, byteCount)
return
}
for c := 0; c < r.DataShards; c++ {
in := inputs[c]
for iRow := 0; iRow < outputCount; iRow++ {
if c == 0 {
galMulSlice(matrixRows[iRow][c], in, outputs[iRow], r.o.useSSSE3, r.o.useAVX2)
} else {
galMulSliceXor(matrixRows[iRow][c], in, outputs[iRow], r.o.useSSSE3, r.o.useAVX2)
}
}
}
}
// Perform the same as codeSomeShards, but split the workload into
// several goroutines.
func (r reedSolomon) codeSomeShardsP(matrixRows, inputs, outputs [][]byte, outputCount, byteCount int) {
var wg sync.WaitGroup
do := byteCount / r.o.maxGoroutines
if do < r.o.minSplitSize {
do = r.o.minSplitSize
}
start := 0
for start < byteCount {
if start+do > byteCount {
do = byteCount - start
}
wg.Add(1)
go func(start, stop int) {
for c := 0; c < r.DataShards; c++ {
in := inputs[c]
for iRow := 0; iRow < outputCount; iRow++ {
if c == 0 {
galMulSlice(matrixRows[iRow][c], in[start:stop], outputs[iRow][start:stop], r.o.useSSSE3, r.o.useAVX2)
} else {
galMulSliceXor(matrixRows[iRow][c], in[start:stop], outputs[iRow][start:stop], r.o.useSSSE3, r.o.useAVX2)
}
}
}
wg.Done()
}(start, start+do)
start += do
}
wg.Wait()
}
// checkSomeShards is mostly the same as codeSomeShards,
// except this will check values and return
// as soon as a difference is found.
func (r reedSolomon) checkSomeShards(matrixRows, inputs, toCheck [][]byte, outputCount, byteCount int) bool {
if r.o.maxGoroutines > 1 && byteCount > r.o.minSplitSize {
return r.checkSomeShardsP(matrixRows, inputs, toCheck, outputCount, byteCount)
}
outputs := make([][]byte, len(toCheck))
for i := range outputs {
outputs[i] = make([]byte, byteCount)
}
for c := 0; c < r.DataShards; c++ {
in := inputs[c]
for iRow := 0; iRow < outputCount; iRow++ {
galMulSliceXor(matrixRows[iRow][c], in, outputs[iRow], r.o.useSSSE3, r.o.useAVX2)
}
}
for i, calc := range outputs {
if !bytes.Equal(calc, toCheck[i]) {
return false
}
}
return true
}
func (r reedSolomon) checkSomeShardsP(matrixRows, inputs, toCheck [][]byte, outputCount, byteCount int) bool {
same := true
var mu sync.RWMutex // For above
var wg sync.WaitGroup
do := byteCount / r.o.maxGoroutines
if do < r.o.minSplitSize {
do = r.o.minSplitSize
}
start := 0
for start < byteCount {
if start+do > byteCount {
do = byteCount - start
}
wg.Add(1)
go func(start, do int) {
defer wg.Done()
outputs := make([][]byte, len(toCheck))
for i := range outputs {
outputs[i] = make([]byte, do)
}
for c := 0; c < r.DataShards; c++ {
mu.RLock()
if !same {
mu.RUnlock()
return
}
mu.RUnlock()
in := inputs[c][start : start+do]
for iRow := 0; iRow < outputCount; iRow++ {
galMulSliceXor(matrixRows[iRow][c], in, outputs[iRow], r.o.useSSSE3, r.o.useAVX2)
}
}
for i, calc := range outputs {
if !bytes.Equal(calc, toCheck[i][start:start+do]) {
mu.Lock()
same = false
mu.Unlock()
return
}
}
}(start, do)
start += do
}
wg.Wait()
return same
}
// ErrShardNoData will be returned if there are no shards,
// or if the length of all shards is zero.
var ErrShardNoData = errors.New("no shard data")
// ErrShardSize is returned if shard length isn't the same for all
// shards.
var ErrShardSize = errors.New("shard sizes does not match")
// checkShards will check if shards are the same size
// or 0, if allowed. An error is returned if this fails.
// An error is also returned if all shards are size 0.
func checkShards(shards [][]byte, nilok bool) error {
size := shardSize(shards)
if size == 0 {
return ErrShardNoData
}
for _, shard := range shards {
if len(shard) != size {
if len(shard) != 0 || !nilok {
return ErrShardSize
}
}
}
return nil
}
// shardSize return the size of a single shard.
// The first non-zero size is returned,
// or 0 if all shards are size 0.
func shardSize(shards [][]byte) int {
for _, shard := range shards {
if len(shard) != 0 {
return len(shard)
}
}
return 0
}
// Reconstruct will recreate the missing shards, if possible.
//
// Given a list of shards, some of which contain data, fills in the
// ones that don't have data.
//
// The length of the array must be equal to Shards.
// You indicate that a shard is missing by setting it to nil.
//
// If there are too few shards to reconstruct the missing
// ones, ErrTooFewShards will be returned.
//
// The reconstructed shard set is complete, but integrity is not verified.
// Use the Verify function to check if data set is ok.
func (r reedSolomon) Reconstruct(shards [][]byte) error {
if len(shards) != r.Shards {
return ErrTooFewShards
}
// Check arguments.
err := checkShards(shards, true)
if err != nil {
return err
}
shardSize := shardSize(shards)
// Quick check: are all of the shards present? If so, there's
// nothing to do.
numberPresent := 0
for i := 0; i < r.Shards; i++ {
if len(shards[i]) != 0 {
numberPresent++
}
}
if numberPresent == r.Shards {
// Cool. All of the shards data data. We don't
// need to do anything.
return nil
}
// More complete sanity check
if numberPresent < r.DataShards {
return ErrTooFewShards
}
// Pull out an array holding just the shards that
// correspond to the rows of the submatrix. These shards
// will be the input to the decoding process that re-creates
// the missing data shards.
//
// Also, create an array of indices of the valid rows we do have
// and the invalid rows we don't have up until we have enough valid rows.
subShards := make([][]byte, r.DataShards)
validIndices := make([]int, r.DataShards)
invalidIndices := make([]int, 0)
subMatrixRow := 0
for matrixRow := 0; matrixRow < r.Shards && subMatrixRow < r.DataShards; matrixRow++ {
if len(shards[matrixRow]) != 0 {
subShards[subMatrixRow] = shards[matrixRow]
validIndices[subMatrixRow] = matrixRow
subMatrixRow++
} else {
invalidIndices = append(invalidIndices, matrixRow)
}
}
// Attempt to get the cached inverted matrix out of the tree
// based on the indices of the invalid rows.
dataDecodeMatrix := r.tree.GetInvertedMatrix(invalidIndices)
// If the inverted matrix isn't cached in the tree yet we must
// construct it ourselves and insert it into the tree for the
// future. In this way the inversion tree is lazily loaded.
if dataDecodeMatrix == nil {
// Pull out the rows of the matrix that correspond to the
// shards that we have and build a square matrix. This
// matrix could be used to generate the shards that we have
// from the original data.
subMatrix, _ := newMatrix(r.DataShards, r.DataShards)
for subMatrixRow, validIndex := range validIndices {
for c := 0; c < r.DataShards; c++ {
subMatrix[subMatrixRow][c] = r.m[validIndex][c]
}
}
// Invert the matrix, so we can go from the encoded shards
// back to the original data. Then pull out the row that
// generates the shard that we want to decode. Note that
// since this matrix maps back to the original data, it can
// be used to create a data shard, but not a parity shard.
dataDecodeMatrix, err = subMatrix.Invert()
if err != nil {
return err
}
// Cache the inverted matrix in the tree for future use keyed on the
// indices of the invalid rows.
err = r.tree.InsertInvertedMatrix(invalidIndices, dataDecodeMatrix, r.Shards)
if err != nil {
return err
}
}
// Re-create any data shards that were missing.
//
// The input to the coding is all of the shards we actually
// have, and the output is the missing data shards. The computation
// is done using the special decode matrix we just built.
outputs := make([][]byte, r.ParityShards)
matrixRows := make([][]byte, r.ParityShards)
outputCount := 0
for iShard := 0; iShard < r.DataShards; iShard++ {
if len(shards[iShard]) == 0 {
shards[iShard] = make([]byte, shardSize)
outputs[outputCount] = shards[iShard]
matrixRows[outputCount] = dataDecodeMatrix[iShard]
outputCount++
}
}
r.codeSomeShards(matrixRows, subShards, outputs[:outputCount], outputCount, shardSize)
// Now that we have all of the data shards intact, we can
// compute any of the parity that is missing.
//
// The input to the coding is ALL of the data shards, including
// any that we just calculated. The output is whichever of the
// data shards were missing.
outputCount = 0
for iShard := r.DataShards; iShard < r.Shards; iShard++ {
if len(shards[iShard]) == 0 {
shards[iShard] = make([]byte, shardSize)
outputs[outputCount] = shards[iShard]
matrixRows[outputCount] = r.parity[iShard-r.DataShards]
outputCount++
}
}
r.codeSomeShards(matrixRows, shards[:r.DataShards], outputs[:outputCount], outputCount, shardSize)
return nil
}
// ErrShortData will be returned by Split(), if there isn't enough data
// to fill the number of shards.
var ErrShortData = errors.New("not enough data to fill the number of requested shards")
// Split a data slice into the number of shards given to the encoder,
// and create empty parity shards.
//
// The data will be split into equally sized shards.
// If the data size isn't divisible by the number of shards,
// the last shard will contain extra zeros.
//
// There must be at least 1 byte otherwise ErrShortData will be
// returned.
//
// The data will not be copied, except for the last shard, so you
// should not modify the data of the input slice afterwards.
func (r reedSolomon) Split(data []byte) ([][]byte, error) {
if len(data) == 0 {
return nil, ErrShortData
}
// Calculate number of bytes per shard.
perShard := (len(data) + r.DataShards - 1) / r.DataShards
// Pad data to r.Shards*perShard.
padding := make([]byte, (r.Shards*perShard)-len(data))
data = append(data, padding...)
// Split into equal-length shards.
dst := make([][]byte, r.Shards)
for i := range dst {
dst[i] = data[:perShard]
data = data[perShard:]
}
return dst, nil
}
// ErrReconstructRequired is returned if too few data shards are intact and a
// reconstruction is required before you can successfully join the shards.
var ErrReconstructRequired = errors.New("reconstruction required as one or more required data shards are nil")
// Join the shards and write the data segment to dst.
//
// Only the data shards are considered.
// You must supply the exact output size you want.
//
// If there are to few shards given, ErrTooFewShards will be returned.
// If the total data size is less than outSize, ErrShortData will be returned.
// If one or more required data shards are nil, ErrReconstructRequired will be returned.
func (r reedSolomon) Join(dst io.Writer, shards [][]byte, outSize int) error {
// Do we have enough shards?
if len(shards) < r.DataShards {
return ErrTooFewShards
}
shards = shards[:r.DataShards]
// Do we have enough data?
size := 0
for _, shard := range shards {
if shard == nil {
return ErrReconstructRequired
}
size += len(shard)
// Do we have enough data already?
if size >= outSize {
break
}
}
if size < outSize {
return ErrShortData
}
// Copy data to dst
write := outSize
for _, shard := range shards {
if write < len(shard) {
_, err := dst.Write(shard[:write])
return err
}
n, err := dst.Write(shard)
if err != nil {
return err
}
write -= n
}
return nil
}

575
vendor/github.com/klauspost/reedsolomon/streaming.go generated vendored Normal file
View File

@ -0,0 +1,575 @@
/**
* Reed-Solomon Coding over 8-bit values.
*
* Copyright 2015, Klaus Post
* Copyright 2015, Backblaze, Inc.
*/
package reedsolomon
import (
"bytes"
"errors"
"fmt"
"io"
"sync"
)
// StreamEncoder is an interface to encode Reed-Salomon parity sets for your data.
// It provides a fully streaming interface, and processes data in blocks of up to 4MB.
//
// For small shard sizes, 10MB and below, it is recommended to use the in-memory interface,
// since the streaming interface has a start up overhead.
//
// For all operations, no readers and writers should not assume any order/size of
// individual reads/writes.
//
// For usage examples, see "stream-encoder.go" and "streamdecoder.go" in the examples
// folder.
type StreamEncoder interface {
// Encodes parity shards for a set of data shards.
//
// Input is 'shards' containing readers for data shards followed by parity shards
// io.Writer.
//
// The number of shards must match the number given to NewStream().
//
// Each reader must supply the same number of bytes.
//
// The parity shards will be written to the writer.
// The number of bytes written will match the input size.
//
// If a data stream returns an error, a StreamReadError type error
// will be returned. If a parity writer returns an error, a
// StreamWriteError will be returned.
Encode(data []io.Reader, parity []io.Writer) error
// Verify returns true if the parity shards contain correct data.
//
// The number of shards must match the number total data+parity shards
// given to NewStream().
//
// Each reader must supply the same number of bytes.
// If a shard stream returns an error, a StreamReadError type error
// will be returned.
Verify(shards []io.Reader) (bool, error)
// Reconstruct will recreate the missing shards if possible.
//
// Given a list of valid shards (to read) and invalid shards (to write)
//
// You indicate that a shard is missing by setting it to nil in the 'valid'
// slice and at the same time setting a non-nil writer in "fill".
// An index cannot contain both non-nil 'valid' and 'fill' entry.
// If both are provided 'ErrReconstructMismatch' is returned.
//
// If there are too few shards to reconstruct the missing
// ones, ErrTooFewShards will be returned.
//
// The reconstructed shard set is complete, but integrity is not verified.
// Use the Verify function to check if data set is ok.
Reconstruct(valid []io.Reader, fill []io.Writer) error
// Split a an input stream into the number of shards given to the encoder.
//
// The data will be split into equally sized shards.
// If the data size isn't dividable by the number of shards,
// the last shard will contain extra zeros.
//
// You must supply the total size of your input.
// 'ErrShortData' will be returned if it is unable to retrieve the
// number of bytes indicated.
Split(data io.Reader, dst []io.Writer, size int64) (err error)
// Join the shards and write the data segment to dst.
//
// Only the data shards are considered.
//
// You must supply the exact output size you want.
// If there are to few shards given, ErrTooFewShards will be returned.
// If the total data size is less than outSize, ErrShortData will be returned.
Join(dst io.Writer, shards []io.Reader, outSize int64) error
}
// StreamReadError is returned when a read error is encountered
// that relates to a supplied stream.
// This will allow you to find out which reader has failed.
type StreamReadError struct {
Err error // The error
Stream int // The stream number on which the error occurred
}
// Error returns the error as a string
func (s StreamReadError) Error() string {
return fmt.Sprintf("error reading stream %d: %s", s.Stream, s.Err)
}
// String returns the error as a string
func (s StreamReadError) String() string {
return s.Error()
}
// StreamWriteError is returned when a write error is encountered
// that relates to a supplied stream. This will allow you to
// find out which reader has failed.
type StreamWriteError struct {
Err error // The error
Stream int // The stream number on which the error occurred
}
// Error returns the error as a string
func (s StreamWriteError) Error() string {
return fmt.Sprintf("error writing stream %d: %s", s.Stream, s.Err)
}
// String returns the error as a string
func (s StreamWriteError) String() string {
return s.Error()
}
// rsStream contains a matrix for a specific
// distribution of datashards and parity shards.
// Construct if using NewStream()
type rsStream struct {
r *reedSolomon
bs int // Block size
// Shard reader
readShards func(dst [][]byte, in []io.Reader) error
// Shard writer
writeShards func(out []io.Writer, in [][]byte) error
creads bool
cwrites bool
}
// NewStream creates a new encoder and initializes it to
// the number of data shards and parity shards that
// you want to use. You can reuse this encoder.
// Note that the maximum number of data shards is 256.
func NewStream(dataShards, parityShards int, o ...Option) (StreamEncoder, error) {
enc, err := New(dataShards, parityShards, o...)
if err != nil {
return nil, err
}
rs := enc.(*reedSolomon)
r := rsStream{r: rs, bs: 4 << 20}
r.readShards = readShards
r.writeShards = writeShards
return &r, err
}
// NewStreamC creates a new encoder and initializes it to
// the number of data shards and parity shards given.
//
// This functions as 'NewStream', but allows you to enable CONCURRENT reads and writes.
func NewStreamC(dataShards, parityShards int, conReads, conWrites bool, o ...Option) (StreamEncoder, error) {
enc, err := New(dataShards, parityShards, o...)
if err != nil {
return nil, err
}
rs := enc.(*reedSolomon)
r := rsStream{r: rs, bs: 4 << 20}
r.readShards = readShards
r.writeShards = writeShards
if conReads {
r.readShards = cReadShards
}
if conWrites {
r.writeShards = cWriteShards
}
return &r, err
}
func createSlice(n, length int) [][]byte {
out := make([][]byte, n)
for i := range out {
out[i] = make([]byte, length)
}
return out
}
// Encodes parity shards for a set of data shards.
//
// Input is 'shards' containing readers for data shards followed by parity shards
// io.Writer.
//
// The number of shards must match the number given to NewStream().
//
// Each reader must supply the same number of bytes.
//
// The parity shards will be written to the writer.
// The number of bytes written will match the input size.
//
// If a data stream returns an error, a StreamReadError type error
// will be returned. If a parity writer returns an error, a
// StreamWriteError will be returned.
func (r rsStream) Encode(data []io.Reader, parity []io.Writer) error {
if len(data) != r.r.DataShards {
return ErrTooFewShards
}
if len(parity) != r.r.ParityShards {
return ErrTooFewShards
}
all := createSlice(r.r.Shards, r.bs)
in := all[:r.r.DataShards]
out := all[r.r.DataShards:]
read := 0
for {
err := r.readShards(in, data)
switch err {
case nil:
case io.EOF:
if read == 0 {
return ErrShardNoData
}
return nil
default:
return err
}
out = trimShards(out, shardSize(in))
read += shardSize(in)
err = r.r.Encode(all)
if err != nil {
return err
}
err = r.writeShards(parity, out)
if err != nil {
return err
}
}
}
// Trim the shards so they are all the same size
func trimShards(in [][]byte, size int) [][]byte {
for i := range in {
if in[i] != nil {
in[i] = in[i][0:size]
}
if len(in[i]) < size {
in[i] = nil
}
}
return in
}
func readShards(dst [][]byte, in []io.Reader) error {
if len(in) != len(dst) {
panic("internal error: in and dst size does not match")
}
size := -1
for i := range in {
if in[i] == nil {
dst[i] = nil
continue
}
n, err := io.ReadFull(in[i], dst[i])
// The error is EOF only if no bytes were read.
// If an EOF happens after reading some but not all the bytes,
// ReadFull returns ErrUnexpectedEOF.
switch err {
case io.ErrUnexpectedEOF, io.EOF:
if size < 0 {
size = n
} else if n != size {
// Shard sizes must match.
return ErrShardSize
}
dst[i] = dst[i][0:n]
case nil:
continue
default:
return StreamReadError{Err: err, Stream: i}
}
}
if size == 0 {
return io.EOF
}
return nil
}
func writeShards(out []io.Writer, in [][]byte) error {
if len(out) != len(in) {
panic("internal error: in and out size does not match")
}
for i := range in {
if out[i] == nil {
continue
}
n, err := out[i].Write(in[i])
if err != nil {
return StreamWriteError{Err: err, Stream: i}
}
//
if n != len(in[i]) {
return StreamWriteError{Err: io.ErrShortWrite, Stream: i}
}
}
return nil
}
type readResult struct {
n int
size int
err error
}
// cReadShards reads shards concurrently
func cReadShards(dst [][]byte, in []io.Reader) error {
if len(in) != len(dst) {
panic("internal error: in and dst size does not match")
}
var wg sync.WaitGroup
wg.Add(len(in))
res := make(chan readResult, len(in))
for i := range in {
if in[i] == nil {
dst[i] = nil
wg.Done()
continue
}
go func(i int) {
defer wg.Done()
n, err := io.ReadFull(in[i], dst[i])
// The error is EOF only if no bytes were read.
// If an EOF happens after reading some but not all the bytes,
// ReadFull returns ErrUnexpectedEOF.
res <- readResult{size: n, err: err, n: i}
}(i)
}
wg.Wait()
close(res)
size := -1
for r := range res {
switch r.err {
case io.ErrUnexpectedEOF, io.EOF:
if size < 0 {
size = r.size
} else if r.size != size {
// Shard sizes must match.
return ErrShardSize
}
dst[r.n] = dst[r.n][0:r.size]
case nil:
default:
return StreamReadError{Err: r.err, Stream: r.n}
}
}
if size == 0 {
return io.EOF
}
return nil
}
// cWriteShards writes shards concurrently
func cWriteShards(out []io.Writer, in [][]byte) error {
if len(out) != len(in) {
panic("internal error: in and out size does not match")
}
var errs = make(chan error, len(out))
var wg sync.WaitGroup
wg.Add(len(out))
for i := range in {
go func(i int) {
defer wg.Done()
if out[i] == nil {
errs <- nil
return
}
n, err := out[i].Write(in[i])
if err != nil {
errs <- StreamWriteError{Err: err, Stream: i}
return
}
if n != len(in[i]) {
errs <- StreamWriteError{Err: io.ErrShortWrite, Stream: i}
}
}(i)
}
wg.Wait()
close(errs)
for err := range errs {
if err != nil {
return err
}
}
return nil
}
// Verify returns true if the parity shards contain correct data.
//
// The number of shards must match the number total data+parity shards
// given to NewStream().
//
// Each reader must supply the same number of bytes.
// If a shard stream returns an error, a StreamReadError type error
// will be returned.
func (r rsStream) Verify(shards []io.Reader) (bool, error) {
if len(shards) != r.r.Shards {
return false, ErrTooFewShards
}
read := 0
all := createSlice(r.r.Shards, r.bs)
for {
err := r.readShards(all, shards)
if err == io.EOF {
if read == 0 {
return false, ErrShardNoData
}
return true, nil
}
if err != nil {
return false, err
}
read += shardSize(all)
ok, err := r.r.Verify(all)
if !ok || err != nil {
return ok, err
}
}
}
// ErrReconstructMismatch is returned by the StreamEncoder, if you supply
// "valid" and "fill" streams on the same index.
// Therefore it is impossible to see if you consider the shard valid
// or would like to have it reconstructed.
var ErrReconstructMismatch = errors.New("valid shards and fill shards are mutually exclusive")
// Reconstruct will recreate the missing shards if possible.
//
// Given a list of valid shards (to read) and invalid shards (to write)
//
// You indicate that a shard is missing by setting it to nil in the 'valid'
// slice and at the same time setting a non-nil writer in "fill".
// An index cannot contain both non-nil 'valid' and 'fill' entry.
//
// If there are too few shards to reconstruct the missing
// ones, ErrTooFewShards will be returned.
//
// The reconstructed shard set is complete, but integrity is not verified.
// Use the Verify function to check if data set is ok.
func (r rsStream) Reconstruct(valid []io.Reader, fill []io.Writer) error {
if len(valid) != r.r.Shards {
return ErrTooFewShards
}
if len(fill) != r.r.Shards {
return ErrTooFewShards
}
all := createSlice(r.r.Shards, r.bs)
for i := range valid {
if valid[i] != nil && fill[i] != nil {
return ErrReconstructMismatch
}
}
read := 0
for {
err := r.readShards(all, valid)
if err == io.EOF {
if read == 0 {
return ErrShardNoData
}
return nil
}
if err != nil {
return err
}
read += shardSize(all)
all = trimShards(all, shardSize(all))
err = r.r.Reconstruct(all)
if err != nil {
return err
}
err = r.writeShards(fill, all)
if err != nil {
return err
}
}
}
// Join the shards and write the data segment to dst.
//
// Only the data shards are considered.
//
// You must supply the exact output size you want.
// If there are to few shards given, ErrTooFewShards will be returned.
// If the total data size is less than outSize, ErrShortData will be returned.
func (r rsStream) Join(dst io.Writer, shards []io.Reader, outSize int64) error {
// Do we have enough shards?
if len(shards) < r.r.DataShards {
return ErrTooFewShards
}
// Trim off parity shards if any
shards = shards[:r.r.DataShards]
for i := range shards {
if shards[i] == nil {
return StreamReadError{Err: ErrShardNoData, Stream: i}
}
}
// Join all shards
src := io.MultiReader(shards...)
// Copy data to dst
n, err := io.CopyN(dst, src, outSize)
if err == io.EOF {
return ErrShortData
}
if err != nil {
return err
}
if n != outSize {
return ErrShortData
}
return nil
}
// Split a an input stream into the number of shards given to the encoder.
//
// The data will be split into equally sized shards.
// If the data size isn't dividable by the number of shards,
// the last shard will contain extra zeros.
//
// You must supply the total size of your input.
// 'ErrShortData' will be returned if it is unable to retrieve the
// number of bytes indicated.
func (r rsStream) Split(data io.Reader, dst []io.Writer, size int64) error {
if size == 0 {
return ErrShortData
}
if len(dst) != r.r.DataShards {
return ErrInvShardNum
}
for i := range dst {
if dst[i] == nil {
return StreamWriteError{Err: ErrShardNoData, Stream: i}
}
}
// Calculate number of bytes per shard.
perShard := (size + int64(r.r.DataShards) - 1) / int64(r.r.DataShards)
// Pad data to r.Shards*perShard.
padding := make([]byte, (int64(r.r.Shards)*perShard)-size)
data = io.MultiReader(data, bytes.NewBuffer(padding))
// Split into equal-length shards and copy.
for i := range dst {
n, err := io.CopyN(dst[i], data, perShard)
if err != io.EOF && err != nil {
return err
}
if n != perShard {
return ErrShortData
}
}
return nil
}

View File

@ -181,7 +181,7 @@ func indentMessageLines(message string, longestLabelLen int) string {
// no need to align first line because it starts at the correct location (after the label)
if i != 0 {
// append alignLen+1 spaces to align with "{{longestLabel}}:" before adding tab
outBuf.WriteString("\n\r\t" + strings.Repeat(" ", longestLabelLen+1) + "\t")
outBuf.WriteString("\n\r\t" + strings.Repeat(" ", longestLabelLen +1) + "\t")
}
outBuf.WriteString(scanner.Text())
}
@ -229,7 +229,7 @@ func Fail(t TestingT, failureMessage string, msgAndArgs ...interface{}) bool {
}
type labeledContent struct {
label string
label string
content string
}

24
vendor/github.com/xtaci/kcp-go/.gitignore generated vendored Normal file
View File

@ -0,0 +1,24 @@
# Compiled Object files, Static and Dynamic libs (Shared Objects)
*.o
*.a
*.so
# Folders
_obj
_test
# Architecture specific extensions/prefixes
*.[568vq]
[568vq].out
*.cgo1.go
*.cgo2.c
_cgo_defun.c
_cgo_gotypes.go
_cgo_export.*
_testmain.go
*.exe
*.test
*.prof

15
vendor/github.com/xtaci/kcp-go/.travis.yml generated vendored Normal file
View File

@ -0,0 +1,15 @@
language: go
go:
- 1.8
before_install:
- go get -t -v ./...
install:
- go get github.com/xtaci/kcp-go
script:
- go test -coverprofile=coverage.txt -covermode=atomic -bench .
after_success:
- bash <(curl -s https://codecov.io/bash)

22
vendor/github.com/xtaci/kcp-go/LICENSE generated vendored Normal file
View File

@ -0,0 +1,22 @@
The MIT License (MIT)
Copyright (c) 2015 Daniel Fu
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

171
vendor/github.com/xtaci/kcp-go/README.md generated vendored Normal file
View File

@ -0,0 +1,171 @@
<img src="kcp-go.png" alt="kcp-go" height="50px" />
[![GoDoc][1]][2] [![Powered][9]][10] [![MIT licensed][11]][12] [![Build Status][3]][4] [![Go Report Card][5]][6] [![Coverage Statusd][7]][8]
[1]: https://godoc.org/github.com/xtaci/kcp-go?status.svg
[2]: https://godoc.org/github.com/xtaci/kcp-go
[3]: https://travis-ci.org/xtaci/kcp-go.svg?branch=master
[4]: https://travis-ci.org/xtaci/kcp-go
[5]: https://goreportcard.com/badge/github.com/xtaci/kcp-go
[6]: https://goreportcard.com/report/github.com/xtaci/kcp-go
[7]: https://codecov.io/gh/xtaci/kcp-go/branch/master/graph/badge.svg
[8]: https://codecov.io/gh/xtaci/kcp-go
[9]: https://img.shields.io/badge/KCP-Powered-blue.svg
[10]: https://github.com/skywind3000/kcp
[11]: https://img.shields.io/badge/license-MIT-blue.svg
[12]: LICENSE
## Introduction
**kcp-go** is a **Production-Grade Reliable-UDP** library for [golang](https://golang.org/).
It provides **fast, ordered and error-checked** delivery of streams over **UDP** packets, has been well tested with opensource project [kcptun](https://github.com/xtaci/kcptun). Millions of devices(from low-end MIPS routers to high-end servers) are running with **kcp-go** at present, including applications like **online games, live broadcasting, file synchronization and network acceleration**.
[Lastest Release](https://github.com/xtaci/kcp-go/releases)
## Features
1. Optimized for **Realtime Multiplayer Games, Audio/Video Streaming**.
1. Compatible with [skywind3000's](https://github.com/skywind3000) C version with language specific optimizations.
1. **Cache friendly** and **Memory optimized** design, offers extremely **High Performance** core.
1. Compatible with [net.Conn](https://golang.org/pkg/net/#Conn) and [net.Listener](https://golang.org/pkg/net/#Listener), easy to use.
1. [FEC(Forward Error Correction)](https://en.wikipedia.org/wiki/Forward_error_correction) Support with [Reed-Solomon Codes](https://en.wikipedia.org/wiki/Reed%E2%80%93Solomon_error_correction)
1. Packet level encryption support with [AES](https://en.wikipedia.org/wiki/Advanced_Encryption_Standard), [TEA](https://en.wikipedia.org/wiki/Tiny_Encryption_Algorithm), [3DES](https://en.wikipedia.org/wiki/Triple_DES), [Blowfish](https://en.wikipedia.org/wiki/Blowfish_(cipher)), [Cast5](https://en.wikipedia.org/wiki/CAST-128), [Salsa20]( https://en.wikipedia.org/wiki/Salsa20), etc. in [CFB](https://en.wikipedia.org/wiki/Block_cipher_mode_of_operation#Cipher_Feedback_.28CFB.29) mode.
1. **O(1) goroutines** created for the entire server application, minimized goroutine context switch.
## Conventions
Control messages like **SYN/FIN/RST** in TCP **are not defined** in KCP, you need some **keepalive/heartbeat mechanism** in the application-level. A real world example is to use some **multiplexing** protocol over session, such as [smux](https://github.com/xtaci/smux)(with embedded keepalive mechanism), see [kcptun](https://github.com/xtaci/kcptun) for example.
## Documentation
For complete documentation, see the associated [Godoc](https://godoc.org/github.com/xtaci/kcp-go).
## Specification
<img src="frame.png" alt="Frame Format" height="109px" />
```
+-----------------+
| SESSION |
+-----------------+
| KCP(ARQ) |
+-----------------+
| FEC(OPTIONAL) |
+-----------------+
| CRYPTO(OPTIONAL)|
+-----------------+
| UDP(PACKET) |
+-----------------+
| IP |
+-----------------+
| LINK |
+-----------------+
| PHY |
+-----------------+
(LAYER MODEL OF KCP-GO)
```
## Usage
Client: [full demo](https://github.com/xtaci/kcptun/blob/master/client/main.go)
```go
kcpconn, err := kcp.DialWithOptions("192.168.0.1:10000", nil, 10, 3)
```
Server: [full demo](https://github.com/xtaci/kcptun/blob/master/server/main.go)
```go
lis, err := kcp.ListenWithOptions(":10000", nil, 10, 3)
```
## Performance
```
Model Name: MacBook Pro
Model Identifier: MacBookPro12,1
Processor Name: Intel Core i5
Processor Speed: 2.7 GHz
Number of Processors: 1
Total Number of Cores: 2
L2 Cache (per Core): 256 KB
L3 Cache: 3 MB
Memory: 8 GB
```
```
$ go test -v -run=^$ -bench .
beginning tests, encryption:salsa20, fec:10/3
BenchmarkAES128-4 200000 8256 ns/op 363.33 MB/s 0 B/op 0 allocs/op
BenchmarkAES192-4 200000 9153 ns/op 327.74 MB/s 0 B/op 0 allocs/op
BenchmarkAES256-4 200000 10079 ns/op 297.64 MB/s 0 B/op 0 allocs/op
BenchmarkTEA-4 100000 18643 ns/op 160.91 MB/s 0 B/op 0 allocs/op
BenchmarkXOR-4 5000000 316 ns/op 9486.46 MB/s 0 B/op 0 allocs/op
BenchmarkBlowfish-4 50000 35643 ns/op 84.17 MB/s 0 B/op 0 allocs/op
BenchmarkNone-4 30000000 56.2 ns/op 53371.83 MB/s 0 B/op 0 allocs/op
BenchmarkCast5-4 30000 44744 ns/op 67.05 MB/s 0 B/op 0 allocs/op
Benchmark3DES-4 2000 639839 ns/op 4.69 MB/s 2 B/op 0 allocs/op
BenchmarkTwofish-4 30000 43368 ns/op 69.17 MB/s 0 B/op 0 allocs/op
BenchmarkXTEA-4 30000 57673 ns/op 52.02 MB/s 0 B/op 0 allocs/op
BenchmarkSalsa20-4 300000 3917 ns/op 765.80 MB/s 0 B/op 0 allocs/op
BenchmarkFlush-4 10000000 226 ns/op 0 B/op 0 allocs/op
BenchmarkEchoSpeed4K-4 5000 300030 ns/op 13.65 MB/s 5672 B/op 177 allocs/op
BenchmarkEchoSpeed64K-4 500 3202335 ns/op 20.47 MB/s 73295 B/op 2198 allocs/op
BenchmarkEchoSpeed512K-4 50 24926924 ns/op 21.03 MB/s 659339 B/op 17602 allocs/op
BenchmarkEchoSpeed1M-4 20 64857821 ns/op 16.17 MB/s 1772437 B/op 42869 allocs/op
BenchmarkSinkSpeed4K-4 30000 50230 ns/op 81.54 MB/s 2058 B/op 48 allocs/op
BenchmarkSinkSpeed64K-4 2000 648718 ns/op 101.02 MB/s 31165 B/op 687 allocs/op
BenchmarkSinkSpeed256K-4 300 4635905 ns/op 113.09 MB/s 286229 B/op 5516 allocs/op
BenchmarkSinkSpeed1M-4 200 9566933 ns/op 109.60 MB/s 463771 B/op 10701 allocs/op
PASS
ok _/Users/xtaci/.godeps/src/github.com/xtaci/kcp-go 39.689s
```
## Design Considerations
1. slice vs. container/list
`kcp.flush()` loops through the send queue for retransmission checking for every 20ms(interval).
I've wrote a benchmark for comparing sequential loop through *slice* and *container/list* here:
https://github.com/xtaci/notes/blob/master/golang/benchmark2/cachemiss_test.go
```
BenchmarkLoopSlice-4 2000000000 0.39 ns/op
BenchmarkLoopList-4 100000000 54.6 ns/op
```
List structure introduces **heavy cache misses** compared to slice which owns better **locality**, 5000 connections with 32 window size and 20ms interval will cost 6us/0.03%(cpu) using slice, and 8.7ms/43.5%(cpu) for list for each `kcp.flush()`.
2. Timing accuracy vs. syscall clock_gettime
Timing is **critical** to **RTT estimator**, inaccurate timing introduces false retransmissions in KCP, but calling `time.Now()` costs 42 cycles(10.5ns on 4GHz CPU, 15.6ns on my MacBook Pro 2.7GHz), the benchmark for time.Now():
https://github.com/xtaci/notes/blob/master/golang/benchmark2/syscall_test.go
```
BenchmarkNow-4 100000000 15.6 ns/op
```
In kcp-go, after each `kcp.output()` function call, current time will be updated upon return, and each `kcp.flush()` will get current time once. For most of the time, 5000 connections costs 5000 * 15.6ns = 78us(no packet needs to be sent by `kcp.output()`), as for 10MB/s data transfering with 1400 MTU, `kcp.output()` will be called around 7500 times and costs 117us for `time.Now()` in **every second**.
## Tuning
Q: I'm running > 3000 connections on my server. the CPU utilization is high.
A: A standalone `agent` or `gate` server for kcp-go is suggested, not only for CPU utilization, but also important to the **precision** of RTT measurements which indirectly affects retransmission. By increasing update `interval` with `SetNoDelay` like `conn.SetNoDelay(1, 40, 1, 1)` will dramatically reduce system load.
## Who is using this?
1. https://github.com/xtaci/kcptun -- A Secure Tunnel Based On KCP over UDP.
2. https://github.com/getlantern/lantern -- Lantern delivers fast access to the open Internet.
3. https://github.com/smallnest/rpcx -- A RPC service framework based on net/rpc like alibaba Dubbo and weibo Motan.
4. https://github.com/gonet2/agent -- A gateway for games with stream multiplexing.
5. https://github.com/syncthing/syncthing -- Open Source Continuous File Synchronization.
6. https://play.google.com/store/apps/details?id=com.k17game.k3 -- Battle Zone - Earth 2048, a world-wide strategy game.
## Links
1. https://github.com/xtaci/libkcp -- FEC enhanced KCP session library for iOS/Android in C++
2. https://github.com/skywind3000/kcp -- A Fast and Reliable ARQ Protocol
3. https://github.com/klauspost/reedsolomon -- Reed-Solomon Erasure Coding in Go

263
vendor/github.com/xtaci/kcp-go/crypt.go generated vendored Normal file
View File

@ -0,0 +1,263 @@
package kcp
import (
"crypto/aes"
"crypto/cipher"
"crypto/des"
"crypto/sha1"
"golang.org/x/crypto/blowfish"
"golang.org/x/crypto/cast5"
"golang.org/x/crypto/pbkdf2"
"golang.org/x/crypto/salsa20"
"golang.org/x/crypto/tea"
"golang.org/x/crypto/twofish"
"golang.org/x/crypto/xtea"
)
var (
initialVector = []byte{167, 115, 79, 156, 18, 172, 27, 1, 164, 21, 242, 193, 252, 120, 230, 107}
saltxor = `sH3CIVoF#rWLtJo6`
)
// BlockCrypt defines encryption/decryption methods for a given byte slice.
// Notes on implementing: the data to be encrypted contains a builtin
// nonce at the first 16 bytes
type BlockCrypt interface {
// Encrypt encrypts the whole block in src into dst.
// Dst and src may point at the same memory.
Encrypt(dst, src []byte)
// Decrypt decrypts the whole block in src into dst.
// Dst and src may point at the same memory.
Decrypt(dst, src []byte)
}
type salsa20BlockCrypt struct {
key [32]byte
}
// NewSalsa20BlockCrypt https://en.wikipedia.org/wiki/Salsa20
func NewSalsa20BlockCrypt(key []byte) (BlockCrypt, error) {
c := new(salsa20BlockCrypt)
copy(c.key[:], key)
return c, nil
}
func (c *salsa20BlockCrypt) Encrypt(dst, src []byte) {
salsa20.XORKeyStream(dst[8:], src[8:], src[:8], &c.key)
copy(dst[:8], src[:8])
}
func (c *salsa20BlockCrypt) Decrypt(dst, src []byte) {
salsa20.XORKeyStream(dst[8:], src[8:], src[:8], &c.key)
copy(dst[:8], src[:8])
}
type twofishBlockCrypt struct {
encbuf []byte
decbuf []byte
block cipher.Block
}
// NewTwofishBlockCrypt https://en.wikipedia.org/wiki/Twofish
func NewTwofishBlockCrypt(key []byte) (BlockCrypt, error) {
c := new(twofishBlockCrypt)
block, err := twofish.NewCipher(key)
if err != nil {
return nil, err
}
c.block = block
c.encbuf = make([]byte, twofish.BlockSize)
c.decbuf = make([]byte, 2*twofish.BlockSize)
return c, nil
}
func (c *twofishBlockCrypt) Encrypt(dst, src []byte) { encrypt(c.block, dst, src, c.encbuf) }
func (c *twofishBlockCrypt) Decrypt(dst, src []byte) { decrypt(c.block, dst, src, c.decbuf) }
type tripleDESBlockCrypt struct {
encbuf []byte
decbuf []byte
block cipher.Block
}
// NewTripleDESBlockCrypt https://en.wikipedia.org/wiki/Triple_DES
func NewTripleDESBlockCrypt(key []byte) (BlockCrypt, error) {
c := new(tripleDESBlockCrypt)
block, err := des.NewTripleDESCipher(key)
if err != nil {
return nil, err
}
c.block = block
c.encbuf = make([]byte, des.BlockSize)
c.decbuf = make([]byte, 2*des.BlockSize)
return c, nil
}
func (c *tripleDESBlockCrypt) Encrypt(dst, src []byte) { encrypt(c.block, dst, src, c.encbuf) }
func (c *tripleDESBlockCrypt) Decrypt(dst, src []byte) { decrypt(c.block, dst, src, c.decbuf) }
type cast5BlockCrypt struct {
encbuf []byte
decbuf []byte
block cipher.Block
}
// NewCast5BlockCrypt https://en.wikipedia.org/wiki/CAST-128
func NewCast5BlockCrypt(key []byte) (BlockCrypt, error) {
c := new(cast5BlockCrypt)
block, err := cast5.NewCipher(key)
if err != nil {
return nil, err
}
c.block = block
c.encbuf = make([]byte, cast5.BlockSize)
c.decbuf = make([]byte, 2*cast5.BlockSize)
return c, nil
}
func (c *cast5BlockCrypt) Encrypt(dst, src []byte) { encrypt(c.block, dst, src, c.encbuf) }
func (c *cast5BlockCrypt) Decrypt(dst, src []byte) { decrypt(c.block, dst, src, c.decbuf) }
type blowfishBlockCrypt struct {
encbuf []byte
decbuf []byte
block cipher.Block
}
// NewBlowfishBlockCrypt https://en.wikipedia.org/wiki/Blowfish_(cipher)
func NewBlowfishBlockCrypt(key []byte) (BlockCrypt, error) {
c := new(blowfishBlockCrypt)
block, err := blowfish.NewCipher(key)
if err != nil {
return nil, err
}
c.block = block
c.encbuf = make([]byte, blowfish.BlockSize)
c.decbuf = make([]byte, 2*blowfish.BlockSize)
return c, nil
}
func (c *blowfishBlockCrypt) Encrypt(dst, src []byte) { encrypt(c.block, dst, src, c.encbuf) }
func (c *blowfishBlockCrypt) Decrypt(dst, src []byte) { decrypt(c.block, dst, src, c.decbuf) }
type aesBlockCrypt struct {
encbuf []byte
decbuf []byte
block cipher.Block
}
// NewAESBlockCrypt https://en.wikipedia.org/wiki/Advanced_Encryption_Standard
func NewAESBlockCrypt(key []byte) (BlockCrypt, error) {
c := new(aesBlockCrypt)
block, err := aes.NewCipher(key)
if err != nil {
return nil, err
}
c.block = block
c.encbuf = make([]byte, aes.BlockSize)
c.decbuf = make([]byte, 2*aes.BlockSize)
return c, nil
}
func (c *aesBlockCrypt) Encrypt(dst, src []byte) { encrypt(c.block, dst, src, c.encbuf) }
func (c *aesBlockCrypt) Decrypt(dst, src []byte) { decrypt(c.block, dst, src, c.decbuf) }
type teaBlockCrypt struct {
encbuf []byte
decbuf []byte
block cipher.Block
}
// NewTEABlockCrypt https://en.wikipedia.org/wiki/Tiny_Encryption_Algorithm
func NewTEABlockCrypt(key []byte) (BlockCrypt, error) {
c := new(teaBlockCrypt)
block, err := tea.NewCipherWithRounds(key, 16)
if err != nil {
return nil, err
}
c.block = block
c.encbuf = make([]byte, tea.BlockSize)
c.decbuf = make([]byte, 2*tea.BlockSize)
return c, nil
}
func (c *teaBlockCrypt) Encrypt(dst, src []byte) { encrypt(c.block, dst, src, c.encbuf) }
func (c *teaBlockCrypt) Decrypt(dst, src []byte) { decrypt(c.block, dst, src, c.decbuf) }
type xteaBlockCrypt struct {
encbuf []byte
decbuf []byte
block cipher.Block
}
// NewXTEABlockCrypt https://en.wikipedia.org/wiki/XTEA
func NewXTEABlockCrypt(key []byte) (BlockCrypt, error) {
c := new(xteaBlockCrypt)
block, err := xtea.NewCipher(key)
if err != nil {
return nil, err
}
c.block = block
c.encbuf = make([]byte, xtea.BlockSize)
c.decbuf = make([]byte, 2*xtea.BlockSize)
return c, nil
}
func (c *xteaBlockCrypt) Encrypt(dst, src []byte) { encrypt(c.block, dst, src, c.encbuf) }
func (c *xteaBlockCrypt) Decrypt(dst, src []byte) { decrypt(c.block, dst, src, c.decbuf) }
type simpleXORBlockCrypt struct {
xortbl []byte
}
// NewSimpleXORBlockCrypt simple xor with key expanding
func NewSimpleXORBlockCrypt(key []byte) (BlockCrypt, error) {
c := new(simpleXORBlockCrypt)
c.xortbl = pbkdf2.Key(key, []byte(saltxor), 32, mtuLimit, sha1.New)
return c, nil
}
func (c *simpleXORBlockCrypt) Encrypt(dst, src []byte) { xorBytes(dst, src, c.xortbl) }
func (c *simpleXORBlockCrypt) Decrypt(dst, src []byte) { xorBytes(dst, src, c.xortbl) }
type noneBlockCrypt struct{}
// NewNoneBlockCrypt does nothing but copying
func NewNoneBlockCrypt(key []byte) (BlockCrypt, error) {
return new(noneBlockCrypt), nil
}
func (c *noneBlockCrypt) Encrypt(dst, src []byte) { copy(dst, src) }
func (c *noneBlockCrypt) Decrypt(dst, src []byte) { copy(dst, src) }
// packet encryption with local CFB mode
func encrypt(block cipher.Block, dst, src, buf []byte) {
blocksize := block.BlockSize()
tbl := buf[:blocksize]
block.Encrypt(tbl, initialVector)
n := len(src) / blocksize
base := 0
for i := 0; i < n; i++ {
xorWords(dst[base:], src[base:], tbl)
block.Encrypt(tbl, dst[base:])
base += blocksize
}
xorBytes(dst[base:], src[base:], tbl)
}
func decrypt(block cipher.Block, dst, src, buf []byte) {
blocksize := block.BlockSize()
tbl := buf[:blocksize]
next := buf[blocksize:]
block.Encrypt(tbl, initialVector)
n := len(src) / blocksize
base := 0
for i := 0; i < n; i++ {
block.Encrypt(next, src[base:])
xorWords(dst[base:], src[base:], tbl)
tbl, next = next, tbl
base += blocksize
}
xorBytes(dst[base:], src[base:], tbl)
}

BIN
vendor/github.com/xtaci/kcp-go/donate.png generated vendored Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 4.3 KiB

303
vendor/github.com/xtaci/kcp-go/fec.go generated vendored Normal file
View File

@ -0,0 +1,303 @@
package kcp
import (
"encoding/binary"
"sync/atomic"
"github.com/klauspost/reedsolomon"
)
const (
fecHeaderSize = 6
fecHeaderSizePlus2 = fecHeaderSize + 2 // plus 2B data size
typeData = 0xf1
typeFEC = 0xf2
)
type (
// fecPacket is a decoded FEC packet
fecPacket struct {
seqid uint32
flag uint16
data []byte
}
// fecDecoder for decoding incoming packets
fecDecoder struct {
rxlimit int // queue size limit
dataShards int
parityShards int
shardSize int
rx []fecPacket // ordered receive queue
// caches
decodeCache [][]byte
flagCache []bool
// RS decoder
codec reedsolomon.Encoder
}
)
func newFECDecoder(rxlimit, dataShards, parityShards int) *fecDecoder {
if dataShards <= 0 || parityShards <= 0 {
return nil
}
if rxlimit < dataShards+parityShards {
return nil
}
fec := new(fecDecoder)
fec.rxlimit = rxlimit
fec.dataShards = dataShards
fec.parityShards = parityShards
fec.shardSize = dataShards + parityShards
enc, err := reedsolomon.New(dataShards, parityShards, reedsolomon.WithMaxGoroutines(1))
if err != nil {
return nil
}
fec.codec = enc
fec.decodeCache = make([][]byte, fec.shardSize)
fec.flagCache = make([]bool, fec.shardSize)
return fec
}
// decodeBytes a fec packet
func (dec *fecDecoder) decodeBytes(data []byte) fecPacket {
var pkt fecPacket
pkt.seqid = binary.LittleEndian.Uint32(data)
pkt.flag = binary.LittleEndian.Uint16(data[4:])
// allocate memory & copy
buf := xmitBuf.Get().([]byte)[:len(data)-6]
copy(buf, data[6:])
pkt.data = buf
return pkt
}
// decode a fec packet
func (dec *fecDecoder) decode(pkt fecPacket) (recovered [][]byte) {
// insertion
n := len(dec.rx) - 1
insertIdx := 0
for i := n; i >= 0; i-- {
if pkt.seqid == dec.rx[i].seqid { // de-duplicate
xmitBuf.Put(pkt.data)
return nil
} else if _itimediff(pkt.seqid, dec.rx[i].seqid) > 0 { // insertion
insertIdx = i + 1
break
}
}
// insert into ordered rx queue
if insertIdx == n+1 {
dec.rx = append(dec.rx, pkt)
} else {
dec.rx = append(dec.rx, fecPacket{})
copy(dec.rx[insertIdx+1:], dec.rx[insertIdx:]) // shift right
dec.rx[insertIdx] = pkt
}
// shard range for current packet
shardBegin := pkt.seqid - pkt.seqid%uint32(dec.shardSize)
shardEnd := shardBegin + uint32(dec.shardSize) - 1
// max search range in ordered queue for current shard
searchBegin := insertIdx - int(pkt.seqid%uint32(dec.shardSize))
if searchBegin < 0 {
searchBegin = 0
}
searchEnd := searchBegin + dec.shardSize - 1
if searchEnd >= len(dec.rx) {
searchEnd = len(dec.rx) - 1
}
// re-construct datashards
if searchEnd-searchBegin+1 >= dec.dataShards {
var numshard, numDataShard, first, maxlen int
// zero cache
shards := dec.decodeCache
shardsflag := dec.flagCache
for k := range dec.decodeCache {
shards[k] = nil
shardsflag[k] = false
}
// shard assembly
for i := searchBegin; i <= searchEnd; i++ {
seqid := dec.rx[i].seqid
if _itimediff(seqid, shardEnd) > 0 {
break
} else if _itimediff(seqid, shardBegin) >= 0 {
shards[seqid%uint32(dec.shardSize)] = dec.rx[i].data
shardsflag[seqid%uint32(dec.shardSize)] = true
numshard++
if dec.rx[i].flag == typeData {
numDataShard++
}
if numshard == 1 {
first = i
}
if len(dec.rx[i].data) > maxlen {
maxlen = len(dec.rx[i].data)
}
}
}
if numDataShard == dec.dataShards {
// case 1: no lost data shards
dec.rx = dec.freeRange(first, numshard, dec.rx)
} else if numshard >= dec.dataShards {
// case 2: data shard lost, but recoverable from parity shard
for k := range shards {
if shards[k] != nil {
dlen := len(shards[k])
shards[k] = shards[k][:maxlen]
xorBytes(shards[k][dlen:], shards[k][dlen:], shards[k][dlen:])
}
}
if err := dec.codec.Reconstruct(shards); err == nil {
for k := range shards[:dec.dataShards] {
if !shardsflag[k] {
recovered = append(recovered, shards[k])
}
}
}
dec.rx = dec.freeRange(first, numshard, dec.rx)
}
}
// keep rxlimit
if len(dec.rx) > dec.rxlimit {
if dec.rx[0].flag == typeData { // record unrecoverable data
atomic.AddUint64(&DefaultSnmp.FECShortShards, 1)
}
dec.rx = dec.freeRange(0, 1, dec.rx)
}
return
}
// free a range of fecPacket, and zero for GC recycling
func (dec *fecDecoder) freeRange(first, n int, q []fecPacket) []fecPacket {
for i := first; i < first+n; i++ { // free
xmitBuf.Put(q[i].data)
}
copy(q[first:], q[first+n:])
for i := 0; i < n; i++ { // dereference data
q[len(q)-1-i].data = nil
}
return q[:len(q)-n]
}
type (
// fecEncoder for encoding outgoing packets
fecEncoder struct {
dataShards int
parityShards int
shardSize int
paws uint32 // Protect Against Wrapped Sequence numbers
next uint32 // next seqid
shardCount int // count the number of datashards collected
maxSize int // record maximum data length in datashard
headerOffset int // FEC header offset
payloadOffset int // FEC payload offset
// caches
shardCache [][]byte
encodeCache [][]byte
// RS encoder
codec reedsolomon.Encoder
}
)
func newFECEncoder(dataShards, parityShards, offset int) *fecEncoder {
if dataShards <= 0 || parityShards <= 0 {
return nil
}
fec := new(fecEncoder)
fec.dataShards = dataShards
fec.parityShards = parityShards
fec.shardSize = dataShards + parityShards
fec.paws = (0xffffffff/uint32(fec.shardSize) - 1) * uint32(fec.shardSize)
fec.headerOffset = offset
fec.payloadOffset = fec.headerOffset + fecHeaderSize
enc, err := reedsolomon.New(dataShards, parityShards, reedsolomon.WithMaxGoroutines(1))
if err != nil {
return nil
}
fec.codec = enc
// caches
fec.encodeCache = make([][]byte, fec.shardSize)
fec.shardCache = make([][]byte, fec.shardSize)
for k := range fec.shardCache {
fec.shardCache[k] = make([]byte, mtuLimit)
}
return fec
}
// encode the packet, output parity shards if we have enough datashards
// the content of returned parityshards will change in next encode
func (enc *fecEncoder) encode(b []byte) (ps [][]byte) {
enc.markData(b[enc.headerOffset:])
binary.LittleEndian.PutUint16(b[enc.payloadOffset:], uint16(len(b[enc.payloadOffset:])))
// copy data to fec datashards
sz := len(b)
enc.shardCache[enc.shardCount] = enc.shardCache[enc.shardCount][:sz]
copy(enc.shardCache[enc.shardCount], b)
enc.shardCount++
// record max datashard length
if sz > enc.maxSize {
enc.maxSize = sz
}
// calculate Reed-Solomon Erasure Code
if enc.shardCount == enc.dataShards {
// bzero each datashard's tail
for i := 0; i < enc.dataShards; i++ {
shard := enc.shardCache[i]
slen := len(shard)
xorBytes(shard[slen:enc.maxSize], shard[slen:enc.maxSize], shard[slen:enc.maxSize])
}
// construct equal-sized slice with stripped header
cache := enc.encodeCache
for k := range cache {
cache[k] = enc.shardCache[k][enc.payloadOffset:enc.maxSize]
}
// rs encode
if err := enc.codec.Encode(cache); err == nil {
ps = enc.shardCache[enc.dataShards:]
for k := range ps {
enc.markFEC(ps[k][enc.headerOffset:])
ps[k] = ps[k][:enc.maxSize]
}
}
// reset counters to zero
enc.shardCount = 0
enc.maxSize = 0
}
return
}
func (enc *fecEncoder) markData(data []byte) {
binary.LittleEndian.PutUint32(data, enc.next)
binary.LittleEndian.PutUint16(data[4:], typeData)
enc.next++
}
func (enc *fecEncoder) markFEC(data []byte) {
binary.LittleEndian.PutUint32(data, enc.next)
binary.LittleEndian.PutUint16(data[4:], typeFEC)
enc.next = (enc.next + 1) % enc.paws
}

BIN
vendor/github.com/xtaci/kcp-go/frame.png generated vendored Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 35 KiB

BIN
vendor/github.com/xtaci/kcp-go/kcp-go.png generated vendored Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 8.9 KiB

998
vendor/github.com/xtaci/kcp-go/kcp.go generated vendored Normal file
View File

@ -0,0 +1,998 @@
// Package kcp - A Fast and Reliable ARQ Protocol
package kcp
import (
"encoding/binary"
"sync/atomic"
)
const (
IKCP_RTO_NDL = 30 // no delay min rto
IKCP_RTO_MIN = 100 // normal min rto
IKCP_RTO_DEF = 200
IKCP_RTO_MAX = 60000
IKCP_CMD_PUSH = 81 // cmd: push data
IKCP_CMD_ACK = 82 // cmd: ack
IKCP_CMD_WASK = 83 // cmd: window probe (ask)
IKCP_CMD_WINS = 84 // cmd: window size (tell)
IKCP_ASK_SEND = 1 // need to send IKCP_CMD_WASK
IKCP_ASK_TELL = 2 // need to send IKCP_CMD_WINS
IKCP_WND_SND = 32
IKCP_WND_RCV = 32
IKCP_MTU_DEF = 1400
IKCP_ACK_FAST = 3
IKCP_INTERVAL = 100
IKCP_OVERHEAD = 24
IKCP_DEADLINK = 20
IKCP_THRESH_INIT = 2
IKCP_THRESH_MIN = 2
IKCP_PROBE_INIT = 7000 // 7 secs to probe window size
IKCP_PROBE_LIMIT = 120000 // up to 120 secs to probe window
)
// output_callback is a prototype which ought capture conn and call conn.Write
type output_callback func(buf []byte, size int)
/* encode 8 bits unsigned int */
func ikcp_encode8u(p []byte, c byte) []byte {
p[0] = c
return p[1:]
}
/* decode 8 bits unsigned int */
func ikcp_decode8u(p []byte, c *byte) []byte {
*c = p[0]
return p[1:]
}
/* encode 16 bits unsigned int (lsb) */
func ikcp_encode16u(p []byte, w uint16) []byte {
binary.LittleEndian.PutUint16(p, w)
return p[2:]
}
/* decode 16 bits unsigned int (lsb) */
func ikcp_decode16u(p []byte, w *uint16) []byte {
*w = binary.LittleEndian.Uint16(p)
return p[2:]
}
/* encode 32 bits unsigned int (lsb) */
func ikcp_encode32u(p []byte, l uint32) []byte {
binary.LittleEndian.PutUint32(p, l)
return p[4:]
}
/* decode 32 bits unsigned int (lsb) */
func ikcp_decode32u(p []byte, l *uint32) []byte {
*l = binary.LittleEndian.Uint32(p)
return p[4:]
}
func _imin_(a, b uint32) uint32 {
if a <= b {
return a
}
return b
}
func _imax_(a, b uint32) uint32 {
if a >= b {
return a
}
return b
}
func _ibound_(lower, middle, upper uint32) uint32 {
return _imin_(_imax_(lower, middle), upper)
}
func _itimediff(later, earlier uint32) int32 {
return (int32)(later - earlier)
}
// segment defines a KCP segment
type segment struct {
conv uint32
cmd uint8
frg uint8
wnd uint16
ts uint32
sn uint32
una uint32
rto uint32
xmit uint32
resendts uint32
fastack uint32
data []byte
}
// encode a segment into buffer
func (seg *segment) encode(ptr []byte) []byte {
ptr = ikcp_encode32u(ptr, seg.conv)
ptr = ikcp_encode8u(ptr, seg.cmd)
ptr = ikcp_encode8u(ptr, seg.frg)
ptr = ikcp_encode16u(ptr, seg.wnd)
ptr = ikcp_encode32u(ptr, seg.ts)
ptr = ikcp_encode32u(ptr, seg.sn)
ptr = ikcp_encode32u(ptr, seg.una)
ptr = ikcp_encode32u(ptr, uint32(len(seg.data)))
atomic.AddUint64(&DefaultSnmp.OutSegs, 1)
return ptr
}
// KCP defines a single KCP connection
type KCP struct {
conv, mtu, mss, state uint32
snd_una, snd_nxt, rcv_nxt uint32
ssthresh uint32
rx_rttvar, rx_srtt int32
rx_rto, rx_minrto uint32
snd_wnd, rcv_wnd, rmt_wnd, cwnd, probe uint32
interval, ts_flush uint32
nodelay, updated uint32
ts_probe, probe_wait uint32
dead_link, incr uint32
fastresend int32
nocwnd, stream int32
snd_queue []segment
rcv_queue []segment
snd_buf []segment
rcv_buf []segment
acklist []ackItem
buffer []byte
output output_callback
}
type ackItem struct {
sn uint32
ts uint32
}
// NewKCP create a new kcp control object, 'conv' must equal in two endpoint
// from the same connection.
func NewKCP(conv uint32, output output_callback) *KCP {
kcp := new(KCP)
kcp.conv = conv
kcp.snd_wnd = IKCP_WND_SND
kcp.rcv_wnd = IKCP_WND_RCV
kcp.rmt_wnd = IKCP_WND_RCV
kcp.mtu = IKCP_MTU_DEF
kcp.mss = kcp.mtu - IKCP_OVERHEAD
kcp.buffer = make([]byte, (kcp.mtu+IKCP_OVERHEAD)*3)
kcp.rx_rto = IKCP_RTO_DEF
kcp.rx_minrto = IKCP_RTO_MIN
kcp.interval = IKCP_INTERVAL
kcp.ts_flush = IKCP_INTERVAL
kcp.ssthresh = IKCP_THRESH_INIT
kcp.dead_link = IKCP_DEADLINK
kcp.output = output
return kcp
}
// newSegment creates a KCP segment
func (kcp *KCP) newSegment(size int) (seg segment) {
seg.data = xmitBuf.Get().([]byte)[:size]
return
}
// delSegment recycles a KCP segment
func (kcp *KCP) delSegment(seg segment) {
xmitBuf.Put(seg.data)
}
// PeekSize checks the size of next message in the recv queue
func (kcp *KCP) PeekSize() (length int) {
if len(kcp.rcv_queue) == 0 {
return -1
}
seg := &kcp.rcv_queue[0]
if seg.frg == 0 {
return len(seg.data)
}
if len(kcp.rcv_queue) < int(seg.frg+1) {
return -1
}
for k := range kcp.rcv_queue {
seg := &kcp.rcv_queue[k]
length += len(seg.data)
if seg.frg == 0 {
break
}
}
return
}
// Recv is user/upper level recv: returns size, returns below zero for EAGAIN
func (kcp *KCP) Recv(buffer []byte) (n int) {
if len(kcp.rcv_queue) == 0 {
return -1
}
peeksize := kcp.PeekSize()
if peeksize < 0 {
return -2
}
if peeksize > len(buffer) {
return -3
}
var fast_recover bool
if len(kcp.rcv_queue) >= int(kcp.rcv_wnd) {
fast_recover = true
}
// merge fragment
count := 0
for k := range kcp.rcv_queue {
seg := &kcp.rcv_queue[k]
copy(buffer, seg.data)
buffer = buffer[len(seg.data):]
n += len(seg.data)
count++
kcp.delSegment(*seg)
if seg.frg == 0 {
break
}
}
if count > 0 {
kcp.rcv_queue = kcp.remove_front(kcp.rcv_queue, count)
}
// move available data from rcv_buf -> rcv_queue
count = 0
for k := range kcp.rcv_buf {
seg := &kcp.rcv_buf[k]
if seg.sn == kcp.rcv_nxt && len(kcp.rcv_queue) < int(kcp.rcv_wnd) {
kcp.rcv_nxt++
count++
} else {
break
}
}
if count > 0 {
kcp.rcv_queue = append(kcp.rcv_queue, kcp.rcv_buf[:count]...)
kcp.rcv_buf = kcp.remove_front(kcp.rcv_buf, count)
}
// fast recover
if len(kcp.rcv_queue) < int(kcp.rcv_wnd) && fast_recover {
// ready to send back IKCP_CMD_WINS in ikcp_flush
// tell remote my window size
kcp.probe |= IKCP_ASK_TELL
}
return
}
// Send is user/upper level send, returns below zero for error
func (kcp *KCP) Send(buffer []byte) int {
var count int
if len(buffer) == 0 {
return -1
}
// append to previous segment in streaming mode (if possible)
if kcp.stream != 0 {
n := len(kcp.snd_queue)
if n > 0 {
seg := &kcp.snd_queue[n-1]
if len(seg.data) < int(kcp.mss) {
capacity := int(kcp.mss) - len(seg.data)
extend := capacity
if len(buffer) < capacity {
extend = len(buffer)
}
// grow slice, the underlying cap is guaranteed to
// be larger than kcp.mss
oldlen := len(seg.data)
seg.data = seg.data[:oldlen+extend]
copy(seg.data[oldlen:], buffer)
buffer = buffer[extend:]
}
}
if len(buffer) == 0 {
return 0
}
}
if len(buffer) <= int(kcp.mss) {
count = 1
} else {
count = (len(buffer) + int(kcp.mss) - 1) / int(kcp.mss)
}
if count > 255 {
return -2
}
if count == 0 {
count = 1
}
for i := 0; i < count; i++ {
var size int
if len(buffer) > int(kcp.mss) {
size = int(kcp.mss)
} else {
size = len(buffer)
}
seg := kcp.newSegment(size)
copy(seg.data, buffer[:size])
if kcp.stream == 0 { // message mode
seg.frg = uint8(count - i - 1)
} else { // stream mode
seg.frg = 0
}
kcp.snd_queue = append(kcp.snd_queue, seg)
buffer = buffer[size:]
}
return 0
}
func (kcp *KCP) update_ack(rtt int32) {
// https://tools.ietf.org/html/rfc6298
var rto uint32
if kcp.rx_srtt == 0 {
kcp.rx_srtt = rtt
kcp.rx_rttvar = rtt >> 1
} else {
delta := rtt - kcp.rx_srtt
kcp.rx_srtt += delta >> 3
if delta < 0 {
delta = -delta
}
if rtt < kcp.rx_srtt-kcp.rx_rttvar {
// if the new RTT sample is below the bottom of the range of
// what an RTT measurement is expected to be.
// give an 8x reduced weight versus its normal weighting
kcp.rx_rttvar += (delta - kcp.rx_rttvar) >> 5
} else {
kcp.rx_rttvar += (delta - kcp.rx_rttvar) >> 2
}
}
rto = uint32(kcp.rx_srtt) + _imax_(kcp.interval, uint32(kcp.rx_rttvar)<<2)
kcp.rx_rto = _ibound_(kcp.rx_minrto, rto, IKCP_RTO_MAX)
}
func (kcp *KCP) shrink_buf() {
if len(kcp.snd_buf) > 0 {
seg := &kcp.snd_buf[0]
kcp.snd_una = seg.sn
} else {
kcp.snd_una = kcp.snd_nxt
}
}
func (kcp *KCP) parse_ack(sn uint32) {
if _itimediff(sn, kcp.snd_una) < 0 || _itimediff(sn, kcp.snd_nxt) >= 0 {
return
}
for k := range kcp.snd_buf {
seg := &kcp.snd_buf[k]
if sn == seg.sn {
kcp.delSegment(*seg)
copy(kcp.snd_buf[k:], kcp.snd_buf[k+1:])
kcp.snd_buf[len(kcp.snd_buf)-1] = segment{}
kcp.snd_buf = kcp.snd_buf[:len(kcp.snd_buf)-1]
break
}
if _itimediff(sn, seg.sn) < 0 {
break
}
}
}
func (kcp *KCP) parse_fastack(sn uint32) {
if _itimediff(sn, kcp.snd_una) < 0 || _itimediff(sn, kcp.snd_nxt) >= 0 {
return
}
for k := range kcp.snd_buf {
seg := &kcp.snd_buf[k]
if _itimediff(sn, seg.sn) < 0 {
break
} else if sn != seg.sn {
seg.fastack++
}
}
}
func (kcp *KCP) parse_una(una uint32) {
count := 0
for k := range kcp.snd_buf {
seg := &kcp.snd_buf[k]
if _itimediff(una, seg.sn) > 0 {
kcp.delSegment(*seg)
count++
} else {
break
}
}
if count > 0 {
kcp.snd_buf = kcp.remove_front(kcp.snd_buf, count)
}
}
// ack append
func (kcp *KCP) ack_push(sn, ts uint32) {
kcp.acklist = append(kcp.acklist, ackItem{sn, ts})
}
func (kcp *KCP) parse_data(newseg segment) {
sn := newseg.sn
if _itimediff(sn, kcp.rcv_nxt+kcp.rcv_wnd) >= 0 ||
_itimediff(sn, kcp.rcv_nxt) < 0 {
kcp.delSegment(newseg)
return
}
n := len(kcp.rcv_buf) - 1
insert_idx := 0
repeat := false
for i := n; i >= 0; i-- {
seg := &kcp.rcv_buf[i]
if seg.sn == sn {
repeat = true
atomic.AddUint64(&DefaultSnmp.RepeatSegs, 1)
break
}
if _itimediff(sn, seg.sn) > 0 {
insert_idx = i + 1
break
}
}
if !repeat {
if insert_idx == n+1 {
kcp.rcv_buf = append(kcp.rcv_buf, newseg)
} else {
kcp.rcv_buf = append(kcp.rcv_buf, segment{})
copy(kcp.rcv_buf[insert_idx+1:], kcp.rcv_buf[insert_idx:])
kcp.rcv_buf[insert_idx] = newseg
}
} else {
kcp.delSegment(newseg)
}
// move available data from rcv_buf -> rcv_queue
count := 0
for k := range kcp.rcv_buf {
seg := &kcp.rcv_buf[k]
if seg.sn == kcp.rcv_nxt && len(kcp.rcv_queue) < int(kcp.rcv_wnd) {
kcp.rcv_nxt++
count++
} else {
break
}
}
if count > 0 {
kcp.rcv_queue = append(kcp.rcv_queue, kcp.rcv_buf[:count]...)
kcp.rcv_buf = kcp.remove_front(kcp.rcv_buf, count)
}
}
// Input when you received a low level packet (eg. UDP packet), call it
// regular indicates a regular packet has received(not from FEC)
func (kcp *KCP) Input(data []byte, regular, ackNoDelay bool) int {
una := kcp.snd_una
if len(data) < IKCP_OVERHEAD {
return -1
}
var maxack uint32
var lastackts uint32
var flag int
var inSegs uint64
for {
var ts, sn, length, una, conv uint32
var wnd uint16
var cmd, frg uint8
if len(data) < int(IKCP_OVERHEAD) {
break
}
data = ikcp_decode32u(data, &conv)
if conv != kcp.conv {
return -1
}
data = ikcp_decode8u(data, &cmd)
data = ikcp_decode8u(data, &frg)
data = ikcp_decode16u(data, &wnd)
data = ikcp_decode32u(data, &ts)
data = ikcp_decode32u(data, &sn)
data = ikcp_decode32u(data, &una)
data = ikcp_decode32u(data, &length)
if len(data) < int(length) {
return -2
}
if cmd != IKCP_CMD_PUSH && cmd != IKCP_CMD_ACK &&
cmd != IKCP_CMD_WASK && cmd != IKCP_CMD_WINS {
return -3
}
// only trust window updates from regular packets. i.e: latest update
if regular {
kcp.rmt_wnd = uint32(wnd)
}
kcp.parse_una(una)
kcp.shrink_buf()
if cmd == IKCP_CMD_ACK {
kcp.parse_ack(sn)
kcp.shrink_buf()
if flag == 0 {
flag = 1
maxack = sn
} else if _itimediff(sn, maxack) > 0 {
maxack = sn
}
lastackts = ts
} else if cmd == IKCP_CMD_PUSH {
if _itimediff(sn, kcp.rcv_nxt+kcp.rcv_wnd) < 0 {
kcp.ack_push(sn, ts)
if _itimediff(sn, kcp.rcv_nxt) >= 0 {
seg := kcp.newSegment(int(length))
seg.conv = conv
seg.cmd = cmd
seg.frg = frg
seg.wnd = wnd
seg.ts = ts
seg.sn = sn
seg.una = una
copy(seg.data, data[:length])
kcp.parse_data(seg)
} else {
atomic.AddUint64(&DefaultSnmp.RepeatSegs, 1)
}
} else {
atomic.AddUint64(&DefaultSnmp.RepeatSegs, 1)
}
} else if cmd == IKCP_CMD_WASK {
// ready to send back IKCP_CMD_WINS in Ikcp_flush
// tell remote my window size
kcp.probe |= IKCP_ASK_TELL
} else if cmd == IKCP_CMD_WINS {
// do nothing
} else {
return -3
}
inSegs++
data = data[length:]
}
atomic.AddUint64(&DefaultSnmp.InSegs, inSegs)
if flag != 0 && regular {
kcp.parse_fastack(maxack)
current := currentMs()
if _itimediff(current, lastackts) >= 0 {
kcp.update_ack(_itimediff(current, lastackts))
}
}
if _itimediff(kcp.snd_una, una) > 0 {
if kcp.cwnd < kcp.rmt_wnd {
mss := kcp.mss
if kcp.cwnd < kcp.ssthresh {
kcp.cwnd++
kcp.incr += mss
} else {
if kcp.incr < mss {
kcp.incr = mss
}
kcp.incr += (mss*mss)/kcp.incr + (mss / 16)
if (kcp.cwnd+1)*mss <= kcp.incr {
kcp.cwnd++
}
}
if kcp.cwnd > kcp.rmt_wnd {
kcp.cwnd = kcp.rmt_wnd
kcp.incr = kcp.rmt_wnd * mss
}
}
}
if ackNoDelay && len(kcp.acklist) > 0 { // ack immediately
kcp.flush(true)
} else if kcp.rmt_wnd == 0 && len(kcp.acklist) > 0 { // window zero
kcp.flush(true)
}
return 0
}
func (kcp *KCP) wnd_unused() uint16 {
if len(kcp.rcv_queue) < int(kcp.rcv_wnd) {
return uint16(int(kcp.rcv_wnd) - len(kcp.rcv_queue))
}
return 0
}
// flush pending data
func (kcp *KCP) flush(ackOnly bool) {
var seg segment
seg.conv = kcp.conv
seg.cmd = IKCP_CMD_ACK
seg.wnd = kcp.wnd_unused()
seg.una = kcp.rcv_nxt
buffer := kcp.buffer
// flush acknowledges
ptr := buffer
for i, ack := range kcp.acklist {
size := len(buffer) - len(ptr)
if size+IKCP_OVERHEAD > int(kcp.mtu) {
kcp.output(buffer, size)
ptr = buffer
}
// filter jitters caused by bufferbloat
if ack.sn >= kcp.rcv_nxt || len(kcp.acklist)-1 == i {
seg.sn, seg.ts = ack.sn, ack.ts
ptr = seg.encode(ptr)
}
}
kcp.acklist = kcp.acklist[0:0]
if ackOnly { // flash remain ack segments
size := len(buffer) - len(ptr)
if size > 0 {
kcp.output(buffer, size)
}
return
}
// probe window size (if remote window size equals zero)
if kcp.rmt_wnd == 0 {
current := currentMs()
if kcp.probe_wait == 0 {
kcp.probe_wait = IKCP_PROBE_INIT
kcp.ts_probe = current + kcp.probe_wait
} else {
if _itimediff(current, kcp.ts_probe) >= 0 {
if kcp.probe_wait < IKCP_PROBE_INIT {
kcp.probe_wait = IKCP_PROBE_INIT
}
kcp.probe_wait += kcp.probe_wait / 2
if kcp.probe_wait > IKCP_PROBE_LIMIT {
kcp.probe_wait = IKCP_PROBE_LIMIT
}
kcp.ts_probe = current + kcp.probe_wait
kcp.probe |= IKCP_ASK_SEND
}
}
} else {
kcp.ts_probe = 0
kcp.probe_wait = 0
}
// flush window probing commands
if (kcp.probe & IKCP_ASK_SEND) != 0 {
seg.cmd = IKCP_CMD_WASK
size := len(buffer) - len(ptr)
if size+IKCP_OVERHEAD > int(kcp.mtu) {
kcp.output(buffer, size)
ptr = buffer
}
ptr = seg.encode(ptr)
}
// flush window probing commands
if (kcp.probe & IKCP_ASK_TELL) != 0 {
seg.cmd = IKCP_CMD_WINS
size := len(buffer) - len(ptr)
if size+IKCP_OVERHEAD > int(kcp.mtu) {
kcp.output(buffer, size)
ptr = buffer
}
ptr = seg.encode(ptr)
}
kcp.probe = 0
// calculate window size
cwnd := _imin_(kcp.snd_wnd, kcp.rmt_wnd)
if kcp.nocwnd == 0 {
cwnd = _imin_(kcp.cwnd, cwnd)
}
// sliding window, controlled by snd_nxt && sna_una+cwnd
newSegsCount := 0
for k := range kcp.snd_queue {
if _itimediff(kcp.snd_nxt, kcp.snd_una+cwnd) >= 0 {
break
}
newseg := kcp.snd_queue[k]
newseg.conv = kcp.conv
newseg.cmd = IKCP_CMD_PUSH
newseg.sn = kcp.snd_nxt
kcp.snd_buf = append(kcp.snd_buf, newseg)
kcp.snd_nxt++
newSegsCount++
kcp.snd_queue[k].data = nil
}
if newSegsCount > 0 {
kcp.snd_queue = kcp.remove_front(kcp.snd_queue, newSegsCount)
}
// calculate resent
resent := uint32(kcp.fastresend)
if kcp.fastresend <= 0 {
resent = 0xffffffff
}
// check for retransmissions
current := currentMs()
var change, lost, lostSegs, fastRetransSegs, earlyRetransSegs uint64
for k := range kcp.snd_buf {
segment := &kcp.snd_buf[k]
needsend := false
if segment.xmit == 0 { // initial transmit
needsend = true
segment.rto = kcp.rx_rto
segment.resendts = current + segment.rto
} else if _itimediff(current, segment.resendts) >= 0 { // RTO
needsend = true
if kcp.nodelay == 0 {
segment.rto += kcp.rx_rto
} else {
segment.rto += kcp.rx_rto / 2
}
segment.resendts = current + segment.rto
lost++
lostSegs++
} else if segment.fastack >= resent { // fast retransmit
needsend = true
segment.fastack = 0
segment.rto = kcp.rx_rto
segment.resendts = current + segment.rto
change++
fastRetransSegs++
} else if segment.fastack > 0 && newSegsCount == 0 { // early retransmit
needsend = true
segment.fastack = 0
segment.rto = kcp.rx_rto
segment.resendts = current + segment.rto
change++
earlyRetransSegs++
}
if needsend {
segment.xmit++
segment.ts = current
segment.wnd = seg.wnd
segment.una = seg.una
size := len(buffer) - len(ptr)
need := IKCP_OVERHEAD + len(segment.data)
if size+need > int(kcp.mtu) {
kcp.output(buffer, size)
current = currentMs() // time update for a blocking call
ptr = buffer
}
ptr = segment.encode(ptr)
copy(ptr, segment.data)
ptr = ptr[len(segment.data):]
if segment.xmit >= kcp.dead_link {
kcp.state = 0xFFFFFFFF
}
}
}
// flash remain segments
size := len(buffer) - len(ptr)
if size > 0 {
kcp.output(buffer, size)
}
// counter updates
sum := lostSegs
if lostSegs > 0 {
atomic.AddUint64(&DefaultSnmp.LostSegs, lostSegs)
}
if fastRetransSegs > 0 {
atomic.AddUint64(&DefaultSnmp.FastRetransSegs, fastRetransSegs)
sum += fastRetransSegs
}
if earlyRetransSegs > 0 {
atomic.AddUint64(&DefaultSnmp.EarlyRetransSegs, earlyRetransSegs)
sum += earlyRetransSegs
}
if sum > 0 {
atomic.AddUint64(&DefaultSnmp.RetransSegs, sum)
}
// update ssthresh
// rate halving, https://tools.ietf.org/html/rfc6937
if change > 0 {
inflight := kcp.snd_nxt - kcp.snd_una
kcp.ssthresh = inflight / 2
if kcp.ssthresh < IKCP_THRESH_MIN {
kcp.ssthresh = IKCP_THRESH_MIN
}
kcp.cwnd = kcp.ssthresh + resent
kcp.incr = kcp.cwnd * kcp.mss
}
// congestion control, https://tools.ietf.org/html/rfc5681
if lost > 0 {
kcp.ssthresh = cwnd / 2
if kcp.ssthresh < IKCP_THRESH_MIN {
kcp.ssthresh = IKCP_THRESH_MIN
}
kcp.cwnd = 1
kcp.incr = kcp.mss
}
if kcp.cwnd < 1 {
kcp.cwnd = 1
kcp.incr = kcp.mss
}
}
// Update updates state (call it repeatedly, every 10ms-100ms), or you can ask
// ikcp_check when to call it again (without ikcp_input/_send calling).
// 'current' - current timestamp in millisec.
func (kcp *KCP) Update() {
var slap int32
current := currentMs()
if kcp.updated == 0 {
kcp.updated = 1
kcp.ts_flush = current
}
slap = _itimediff(current, kcp.ts_flush)
if slap >= 10000 || slap < -10000 {
kcp.ts_flush = current
slap = 0
}
if slap >= 0 {
kcp.ts_flush += kcp.interval
if _itimediff(current, kcp.ts_flush) >= 0 {
kcp.ts_flush = current + kcp.interval
}
kcp.flush(false)
}
}
// Check determines when should you invoke ikcp_update:
// returns when you should invoke ikcp_update in millisec, if there
// is no ikcp_input/_send calling. you can call ikcp_update in that
// time, instead of call update repeatly.
// Important to reduce unnacessary ikcp_update invoking. use it to
// schedule ikcp_update (eg. implementing an epoll-like mechanism,
// or optimize ikcp_update when handling massive kcp connections)
func (kcp *KCP) Check() uint32 {
current := currentMs()
ts_flush := kcp.ts_flush
tm_flush := int32(0x7fffffff)
tm_packet := int32(0x7fffffff)
minimal := uint32(0)
if kcp.updated == 0 {
return current
}
if _itimediff(current, ts_flush) >= 10000 ||
_itimediff(current, ts_flush) < -10000 {
ts_flush = current
}
if _itimediff(current, ts_flush) >= 0 {
return current
}
tm_flush = _itimediff(ts_flush, current)
for k := range kcp.snd_buf {
seg := &kcp.snd_buf[k]
diff := _itimediff(seg.resendts, current)
if diff <= 0 {
return current
}
if diff < tm_packet {
tm_packet = diff
}
}
minimal = uint32(tm_packet)
if tm_packet >= tm_flush {
minimal = uint32(tm_flush)
}
if minimal >= kcp.interval {
minimal = kcp.interval
}
return current + minimal
}
// SetMtu changes MTU size, default is 1400
func (kcp *KCP) SetMtu(mtu int) int {
if mtu < 50 || mtu < IKCP_OVERHEAD {
return -1
}
buffer := make([]byte, (mtu+IKCP_OVERHEAD)*3)
if buffer == nil {
return -2
}
kcp.mtu = uint32(mtu)
kcp.mss = kcp.mtu - IKCP_OVERHEAD
kcp.buffer = buffer
return 0
}
// NoDelay options
// fastest: ikcp_nodelay(kcp, 1, 20, 2, 1)
// nodelay: 0:disable(default), 1:enable
// interval: internal update timer interval in millisec, default is 100ms
// resend: 0:disable fast resend(default), 1:enable fast resend
// nc: 0:normal congestion control(default), 1:disable congestion control
func (kcp *KCP) NoDelay(nodelay, interval, resend, nc int) int {
if nodelay >= 0 {
kcp.nodelay = uint32(nodelay)
if nodelay != 0 {
kcp.rx_minrto = IKCP_RTO_NDL
} else {
kcp.rx_minrto = IKCP_RTO_MIN
}
}
if interval >= 0 {
if interval > 5000 {
interval = 5000
} else if interval < 10 {
interval = 10
}
kcp.interval = uint32(interval)
}
if resend >= 0 {
kcp.fastresend = int32(resend)
}
if nc >= 0 {
kcp.nocwnd = int32(nc)
}
return 0
}
// WndSize sets maximum window size: sndwnd=32, rcvwnd=32 by default
func (kcp *KCP) WndSize(sndwnd, rcvwnd int) int {
if sndwnd > 0 {
kcp.snd_wnd = uint32(sndwnd)
}
if rcvwnd > 0 {
kcp.rcv_wnd = uint32(rcvwnd)
}
return 0
}
// WaitSnd gets how many packet is waiting to be sent
func (kcp *KCP) WaitSnd() int {
return len(kcp.snd_buf) + len(kcp.snd_queue)
}
// remove front n elements from queue
func (kcp *KCP) remove_front(q []segment, n int) []segment {
newn := copy(q, q[n:])
for i := newn; i < len(q); i++ {
q[i] = segment{} // manual set nil for GC
}
return q[:newn]
}

932
vendor/github.com/xtaci/kcp-go/sess.go generated vendored Normal file
View File

@ -0,0 +1,932 @@
package kcp
import (
"crypto/rand"
"encoding/binary"
"hash/crc32"
"io"
"net"
"sync"
"sync/atomic"
"time"
"github.com/pkg/errors"
"golang.org/x/net/ipv4"
)
type errTimeout struct {
error
}
func (errTimeout) Timeout() bool { return true }
func (errTimeout) Temporary() bool { return true }
func (errTimeout) Error() string { return "i/o timeout" }
const (
// 16-bytes magic number for each packet
nonceSize = 16
// 4-bytes packet checksum
crcSize = 4
// overall crypto header size
cryptHeaderSize = nonceSize + crcSize
// maximum packet size
mtuLimit = 1500
// FEC keeps rxFECMulti* (dataShard+parityShard) ordered packets in memory
rxFECMulti = 3
// accept backlog
acceptBacklog = 128
// prerouting(to session) queue
qlen = 128
)
const (
errBrokenPipe = "broken pipe"
errInvalidOperation = "invalid operation"
)
var (
// global packet buffer
// shared among sending/receiving/FEC
xmitBuf sync.Pool
)
func init() {
xmitBuf.New = func() interface{} {
return make([]byte, mtuLimit)
}
}
type (
// UDPSession defines a KCP session implemented by UDP
UDPSession struct {
updaterIdx int // record slice index in updater
conn net.PacketConn // the underlying packet connection
kcp *KCP // KCP ARQ protocol
l *Listener // point to the Listener if it's accepted by Listener
block BlockCrypt // block encryption
// kcp receiving is based on packets
// recvbuf turns packets into stream
recvbuf []byte
bufptr []byte
// extended output buffer(with header)
ext []byte
// FEC
fecDecoder *fecDecoder
fecEncoder *fecEncoder
// settings
remote net.Addr // remote peer address
rd time.Time // read deadline
wd time.Time // write deadline
headerSize int // the overall header size added before KCP frame
ackNoDelay bool // send ack immediately for each incoming packet
writeDelay bool // delay kcp.flush() for Write() for bulk transfer
dup int // duplicate udp packets
// notifications
die chan struct{} // notify session has Closed
chReadEvent chan struct{} // notify Read() can be called without blocking
chWriteEvent chan struct{} // notify Write() can be called without blocking
chErrorEvent chan error // notify Read() have an error
isClosed bool // flag the session has Closed
mu sync.Mutex
}
setReadBuffer interface {
SetReadBuffer(bytes int) error
}
setWriteBuffer interface {
SetWriteBuffer(bytes int) error
}
)
// newUDPSession create a new udp session for client or server
func newUDPSession(conv uint32, dataShards, parityShards int, l *Listener, conn net.PacketConn, remote net.Addr, block BlockCrypt) *UDPSession {
sess := new(UDPSession)
sess.die = make(chan struct{})
sess.chReadEvent = make(chan struct{}, 1)
sess.chWriteEvent = make(chan struct{}, 1)
sess.chErrorEvent = make(chan error, 1)
sess.remote = remote
sess.conn = conn
sess.l = l
sess.block = block
sess.recvbuf = make([]byte, mtuLimit)
// FEC initialization
sess.fecDecoder = newFECDecoder(rxFECMulti*(dataShards+parityShards), dataShards, parityShards)
if sess.block != nil {
sess.fecEncoder = newFECEncoder(dataShards, parityShards, cryptHeaderSize)
} else {
sess.fecEncoder = newFECEncoder(dataShards, parityShards, 0)
}
// calculate header size
if sess.block != nil {
sess.headerSize += cryptHeaderSize
}
if sess.fecEncoder != nil {
sess.headerSize += fecHeaderSizePlus2
}
// only allocate extended packet buffer
// when the extra header is required
if sess.headerSize > 0 {
sess.ext = make([]byte, mtuLimit)
}
sess.kcp = NewKCP(conv, func(buf []byte, size int) {
if size >= IKCP_OVERHEAD {
sess.output(buf[:size])
}
})
sess.kcp.SetMtu(IKCP_MTU_DEF - sess.headerSize)
// add current session to the global updater,
// which periodically calls sess.update()
updater.addSession(sess)
if sess.l == nil { // it's a client connection
go sess.readLoop()
atomic.AddUint64(&DefaultSnmp.ActiveOpens, 1)
} else {
atomic.AddUint64(&DefaultSnmp.PassiveOpens, 1)
}
currestab := atomic.AddUint64(&DefaultSnmp.CurrEstab, 1)
maxconn := atomic.LoadUint64(&DefaultSnmp.MaxConn)
if currestab > maxconn {
atomic.CompareAndSwapUint64(&DefaultSnmp.MaxConn, maxconn, currestab)
}
return sess
}
// Read implements net.Conn
func (s *UDPSession) Read(b []byte) (n int, err error) {
for {
s.mu.Lock()
if len(s.bufptr) > 0 { // copy from buffer into b
n = copy(b, s.bufptr)
s.bufptr = s.bufptr[n:]
s.mu.Unlock()
return n, nil
}
if s.isClosed {
s.mu.Unlock()
return 0, errors.New(errBrokenPipe)
}
if size := s.kcp.PeekSize(); size > 0 { // peek data size from kcp
atomic.AddUint64(&DefaultSnmp.BytesReceived, uint64(size))
if len(b) >= size { // direct write to b
s.kcp.Recv(b)
s.mu.Unlock()
return size, nil
}
// resize kcp receive buffer
// to make sure recvbuf has enough capacity
if cap(s.recvbuf) < size {
s.recvbuf = make([]byte, size)
}
// resize recvbuf slice length
s.recvbuf = s.recvbuf[:size]
s.kcp.Recv(s.recvbuf)
n = copy(b, s.recvbuf) // copy to b
s.bufptr = s.recvbuf[n:] // update pointer
s.mu.Unlock()
return n, nil
}
// read deadline
var timeout *time.Timer
var c <-chan time.Time
if !s.rd.IsZero() {
if time.Now().After(s.rd) {
s.mu.Unlock()
return 0, errTimeout{}
}
delay := s.rd.Sub(time.Now())
timeout = time.NewTimer(delay)
c = timeout.C
}
s.mu.Unlock()
// wait for read event or timeout
select {
case <-s.chReadEvent:
case <-c:
case <-s.die:
case err = <-s.chErrorEvent:
if timeout != nil {
timeout.Stop()
}
return n, err
}
if timeout != nil {
timeout.Stop()
}
}
}
// Write implements net.Conn
func (s *UDPSession) Write(b []byte) (n int, err error) {
for {
s.mu.Lock()
if s.isClosed {
s.mu.Unlock()
return 0, errors.New(errBrokenPipe)
}
// api flow control
if s.kcp.WaitSnd() < int(s.kcp.snd_wnd) {
n = len(b)
for {
if len(b) <= int(s.kcp.mss) {
s.kcp.Send(b)
break
} else {
s.kcp.Send(b[:s.kcp.mss])
b = b[s.kcp.mss:]
}
}
if !s.writeDelay {
s.kcp.flush(false)
}
s.mu.Unlock()
atomic.AddUint64(&DefaultSnmp.BytesSent, uint64(n))
return n, nil
}
// write deadline
var timeout *time.Timer
var c <-chan time.Time
if !s.wd.IsZero() {
if time.Now().After(s.wd) {
s.mu.Unlock()
return 0, errTimeout{}
}
delay := s.wd.Sub(time.Now())
timeout = time.NewTimer(delay)
c = timeout.C
}
s.mu.Unlock()
// wait for write event or timeout
select {
case <-s.chWriteEvent:
case <-c:
case <-s.die:
}
if timeout != nil {
timeout.Stop()
}
}
}
// Close closes the connection.
func (s *UDPSession) Close() error {
// remove this session from updater & listener(if necessary)
updater.removeSession(s)
if s.l != nil { // notify listener
s.l.closeSession(s.remote)
}
s.mu.Lock()
defer s.mu.Unlock()
if s.isClosed {
return errors.New(errBrokenPipe)
}
close(s.die)
s.isClosed = true
atomic.AddUint64(&DefaultSnmp.CurrEstab, ^uint64(0))
if s.l == nil { // client socket close
return s.conn.Close()
}
return nil
}
// LocalAddr returns the local network address. The Addr returned is shared by all invocations of LocalAddr, so do not modify it.
func (s *UDPSession) LocalAddr() net.Addr { return s.conn.LocalAddr() }
// RemoteAddr returns the remote network address. The Addr returned is shared by all invocations of RemoteAddr, so do not modify it.
func (s *UDPSession) RemoteAddr() net.Addr { return s.remote }
// SetDeadline sets the deadline associated with the listener. A zero time value disables the deadline.
func (s *UDPSession) SetDeadline(t time.Time) error {
s.mu.Lock()
defer s.mu.Unlock()
s.rd = t
s.wd = t
return nil
}
// SetReadDeadline implements the Conn SetReadDeadline method.
func (s *UDPSession) SetReadDeadline(t time.Time) error {
s.mu.Lock()
defer s.mu.Unlock()
s.rd = t
return nil
}
// SetWriteDeadline implements the Conn SetWriteDeadline method.
func (s *UDPSession) SetWriteDeadline(t time.Time) error {
s.mu.Lock()
defer s.mu.Unlock()
s.wd = t
return nil
}
// SetWriteDelay delays write for bulk transfer until the next update interval
func (s *UDPSession) SetWriteDelay(delay bool) {
s.mu.Lock()
defer s.mu.Unlock()
s.writeDelay = delay
}
// SetWindowSize set maximum window size
func (s *UDPSession) SetWindowSize(sndwnd, rcvwnd int) {
s.mu.Lock()
defer s.mu.Unlock()
s.kcp.WndSize(sndwnd, rcvwnd)
}
// SetMtu sets the maximum transmission unit(not including UDP header)
func (s *UDPSession) SetMtu(mtu int) bool {
if mtu > mtuLimit {
return false
}
s.mu.Lock()
defer s.mu.Unlock()
s.kcp.SetMtu(mtu - s.headerSize)
return true
}
// SetStreamMode toggles the stream mode on/off
func (s *UDPSession) SetStreamMode(enable bool) {
s.mu.Lock()
defer s.mu.Unlock()
if enable {
s.kcp.stream = 1
} else {
s.kcp.stream = 0
}
}
// SetACKNoDelay changes ack flush option, set true to flush ack immediately,
func (s *UDPSession) SetACKNoDelay(nodelay bool) {
s.mu.Lock()
defer s.mu.Unlock()
s.ackNoDelay = nodelay
}
// SetDUP duplicates udp packets for kcp output, for testing purpose only
func (s *UDPSession) SetDUP(dup int) {
s.mu.Lock()
defer s.mu.Unlock()
s.dup = dup
}
// SetNoDelay calls nodelay() of kcp
// https://github.com/skywind3000/kcp/blob/master/README.en.md#protocol-configuration
func (s *UDPSession) SetNoDelay(nodelay, interval, resend, nc int) {
s.mu.Lock()
defer s.mu.Unlock()
s.kcp.NoDelay(nodelay, interval, resend, nc)
}
// SetDSCP sets the 6bit DSCP field of IP header, no effect if it's accepted from Listener
func (s *UDPSession) SetDSCP(dscp int) error {
s.mu.Lock()
defer s.mu.Unlock()
if s.l == nil {
if nc, ok := s.conn.(*connectedUDPConn); ok {
return ipv4.NewConn(nc.UDPConn).SetTOS(dscp << 2)
} else if nc, ok := s.conn.(net.Conn); ok {
return ipv4.NewConn(nc).SetTOS(dscp << 2)
}
}
return errors.New(errInvalidOperation)
}
// SetReadBuffer sets the socket read buffer, no effect if it's accepted from Listener
func (s *UDPSession) SetReadBuffer(bytes int) error {
s.mu.Lock()
defer s.mu.Unlock()
if s.l == nil {
if nc, ok := s.conn.(setReadBuffer); ok {
return nc.SetReadBuffer(bytes)
}
}
return errors.New(errInvalidOperation)
}
// SetWriteBuffer sets the socket write buffer, no effect if it's accepted from Listener
func (s *UDPSession) SetWriteBuffer(bytes int) error {
s.mu.Lock()
defer s.mu.Unlock()
if s.l == nil {
if nc, ok := s.conn.(setWriteBuffer); ok {
return nc.SetWriteBuffer(bytes)
}
}
return errors.New(errInvalidOperation)
}
// output pipeline entry
// steps for output data processing:
// 0. Header extends
// 1. FEC
// 2. CRC32
// 3. Encryption
// 4. WriteTo kernel
func (s *UDPSession) output(buf []byte) {
var ecc [][]byte
// 0. extend buf's header space(if necessary)
ext := buf
if s.headerSize > 0 {
ext = s.ext[:s.headerSize+len(buf)]
copy(ext[s.headerSize:], buf)
}
// 1. FEC encoding
if s.fecEncoder != nil {
ecc = s.fecEncoder.encode(ext)
}
// 2&3. crc32 & encryption
if s.block != nil {
io.ReadFull(rand.Reader, ext[:nonceSize])
checksum := crc32.ChecksumIEEE(ext[cryptHeaderSize:])
binary.LittleEndian.PutUint32(ext[nonceSize:], checksum)
s.block.Encrypt(ext, ext)
for k := range ecc {
io.ReadFull(rand.Reader, ecc[k][:nonceSize])
checksum := crc32.ChecksumIEEE(ecc[k][cryptHeaderSize:])
binary.LittleEndian.PutUint32(ecc[k][nonceSize:], checksum)
s.block.Encrypt(ecc[k], ecc[k])
}
}
// 4. WriteTo kernel
nbytes := 0
npkts := 0
for i := 0; i < s.dup+1; i++ {
if n, err := s.conn.WriteTo(ext, s.remote); err == nil {
nbytes += n
npkts++
}
}
for k := range ecc {
if n, err := s.conn.WriteTo(ecc[k], s.remote); err == nil {
nbytes += n
npkts++
}
}
atomic.AddUint64(&DefaultSnmp.OutPkts, uint64(npkts))
atomic.AddUint64(&DefaultSnmp.OutBytes, uint64(nbytes))
}
// kcp update, returns interval for next calling
func (s *UDPSession) update() (interval time.Duration) {
s.mu.Lock()
s.kcp.flush(false)
if s.kcp.WaitSnd() < int(s.kcp.snd_wnd) {
s.notifyWriteEvent()
}
interval = time.Duration(s.kcp.interval) * time.Millisecond
s.mu.Unlock()
return
}
// GetConv gets conversation id of a session
func (s *UDPSession) GetConv() uint32 { return s.kcp.conv }
func (s *UDPSession) notifyReadEvent() {
select {
case s.chReadEvent <- struct{}{}:
default:
}
}
func (s *UDPSession) notifyWriteEvent() {
select {
case s.chWriteEvent <- struct{}{}:
default:
}
}
func (s *UDPSession) kcpInput(data []byte) {
var kcpInErrors, fecErrs, fecRecovered, fecParityShards uint64
if s.fecDecoder != nil {
f := s.fecDecoder.decodeBytes(data)
s.mu.Lock()
if f.flag == typeData {
if ret := s.kcp.Input(data[fecHeaderSizePlus2:], true, s.ackNoDelay); ret != 0 {
kcpInErrors++
}
}
if f.flag == typeData || f.flag == typeFEC {
if f.flag == typeFEC {
fecParityShards++
}
recovers := s.fecDecoder.decode(f)
for _, r := range recovers {
if len(r) >= 2 { // must be larger than 2bytes
sz := binary.LittleEndian.Uint16(r)
if int(sz) <= len(r) && sz >= 2 {
if ret := s.kcp.Input(r[2:sz], false, s.ackNoDelay); ret == 0 {
fecRecovered++
} else {
kcpInErrors++
}
} else {
fecErrs++
}
} else {
fecErrs++
}
}
}
// notify reader
if n := s.kcp.PeekSize(); n > 0 {
s.notifyReadEvent()
}
s.mu.Unlock()
} else {
s.mu.Lock()
if ret := s.kcp.Input(data, true, s.ackNoDelay); ret != 0 {
kcpInErrors++
}
// notify reader
if n := s.kcp.PeekSize(); n > 0 {
s.notifyReadEvent()
}
s.mu.Unlock()
}
atomic.AddUint64(&DefaultSnmp.InPkts, 1)
atomic.AddUint64(&DefaultSnmp.InBytes, uint64(len(data)))
if fecParityShards > 0 {
atomic.AddUint64(&DefaultSnmp.FECParityShards, fecParityShards)
}
if kcpInErrors > 0 {
atomic.AddUint64(&DefaultSnmp.KCPInErrors, kcpInErrors)
}
if fecErrs > 0 {
atomic.AddUint64(&DefaultSnmp.FECErrs, fecErrs)
}
if fecRecovered > 0 {
atomic.AddUint64(&DefaultSnmp.FECRecovered, fecRecovered)
}
}
func (s *UDPSession) receiver(ch chan<- []byte) {
for {
data := xmitBuf.Get().([]byte)[:mtuLimit]
if n, _, err := s.conn.ReadFrom(data); err == nil && n >= s.headerSize+IKCP_OVERHEAD {
select {
case ch <- data[:n]:
case <-s.die:
return
}
} else if err != nil {
s.chErrorEvent <- err
return
} else {
atomic.AddUint64(&DefaultSnmp.InErrs, 1)
}
}
}
// read loop for client session
func (s *UDPSession) readLoop() {
chPacket := make(chan []byte, qlen)
go s.receiver(chPacket)
for {
select {
case data := <-chPacket:
raw := data
dataValid := false
if s.block != nil {
s.block.Decrypt(data, data)
data = data[nonceSize:]
checksum := crc32.ChecksumIEEE(data[crcSize:])
if checksum == binary.LittleEndian.Uint32(data) {
data = data[crcSize:]
dataValid = true
} else {
atomic.AddUint64(&DefaultSnmp.InCsumErrors, 1)
}
} else if s.block == nil {
dataValid = true
}
if dataValid {
s.kcpInput(data)
}
xmitBuf.Put(raw)
case <-s.die:
return
}
}
}
type (
// Listener defines a server listening for connections
Listener struct {
block BlockCrypt // block encryption
dataShards int // FEC data shard
parityShards int // FEC parity shard
fecDecoder *fecDecoder // FEC mock initialization
conn net.PacketConn // the underlying packet connection
sessions map[string]*UDPSession // all sessions accepted by this Listener
chAccepts chan *UDPSession // Listen() backlog
chSessionClosed chan net.Addr // session close queue
headerSize int // the overall header size added before KCP frame
die chan struct{} // notify the listener has closed
rd atomic.Value // read deadline for Accept()
wd atomic.Value
}
// incoming packet
inPacket struct {
from net.Addr
data []byte
}
)
// monitor incoming data for all connections of server
func (l *Listener) monitor() {
chPacket := make(chan inPacket, qlen)
go l.receiver(chPacket)
for {
select {
case p := <-chPacket:
raw := p.data
data := p.data
from := p.from
dataValid := false
if l.block != nil {
l.block.Decrypt(data, data)
data = data[nonceSize:]
checksum := crc32.ChecksumIEEE(data[crcSize:])
if checksum == binary.LittleEndian.Uint32(data) {
data = data[crcSize:]
dataValid = true
} else {
atomic.AddUint64(&DefaultSnmp.InCsumErrors, 1)
}
} else if l.block == nil {
dataValid = true
}
if dataValid {
addr := from.String()
s, ok := l.sessions[addr]
if !ok { // new session
if len(l.chAccepts) < cap(l.chAccepts) { // do not let new session overwhelm accept queue
var conv uint32
convValid := false
if l.fecDecoder != nil {
isfec := binary.LittleEndian.Uint16(data[4:])
if isfec == typeData {
conv = binary.LittleEndian.Uint32(data[fecHeaderSizePlus2:])
convValid = true
}
} else {
conv = binary.LittleEndian.Uint32(data)
convValid = true
}
if convValid {
s := newUDPSession(conv, l.dataShards, l.parityShards, l, l.conn, from, l.block)
s.kcpInput(data)
l.sessions[addr] = s
l.chAccepts <- s
}
}
} else {
s.kcpInput(data)
}
}
xmitBuf.Put(raw)
case deadlink := <-l.chSessionClosed:
delete(l.sessions, deadlink.String())
case <-l.die:
return
}
}
}
func (l *Listener) receiver(ch chan<- inPacket) {
for {
data := xmitBuf.Get().([]byte)[:mtuLimit]
if n, from, err := l.conn.ReadFrom(data); err == nil && n >= l.headerSize+IKCP_OVERHEAD {
select {
case ch <- inPacket{from, data[:n]}:
case <-l.die:
return
}
} else if err != nil {
return
} else {
atomic.AddUint64(&DefaultSnmp.InErrs, 1)
}
}
}
// SetReadBuffer sets the socket read buffer for the Listener
func (l *Listener) SetReadBuffer(bytes int) error {
if nc, ok := l.conn.(setReadBuffer); ok {
return nc.SetReadBuffer(bytes)
}
return errors.New(errInvalidOperation)
}
// SetWriteBuffer sets the socket write buffer for the Listener
func (l *Listener) SetWriteBuffer(bytes int) error {
if nc, ok := l.conn.(setWriteBuffer); ok {
return nc.SetWriteBuffer(bytes)
}
return errors.New(errInvalidOperation)
}
// SetDSCP sets the 6bit DSCP field of IP header
func (l *Listener) SetDSCP(dscp int) error {
if nc, ok := l.conn.(net.Conn); ok {
return ipv4.NewConn(nc).SetTOS(dscp << 2)
}
return errors.New(errInvalidOperation)
}
// Accept implements the Accept method in the Listener interface; it waits for the next call and returns a generic Conn.
func (l *Listener) Accept() (net.Conn, error) {
return l.AcceptKCP()
}
// AcceptKCP accepts a KCP connection
func (l *Listener) AcceptKCP() (*UDPSession, error) {
var timeout <-chan time.Time
if tdeadline, ok := l.rd.Load().(time.Time); ok && !tdeadline.IsZero() {
timeout = time.After(tdeadline.Sub(time.Now()))
}
select {
case <-timeout:
return nil, &errTimeout{}
case c := <-l.chAccepts:
return c, nil
case <-l.die:
return nil, errors.New(errBrokenPipe)
}
}
// SetDeadline sets the deadline associated with the listener. A zero time value disables the deadline.
func (l *Listener) SetDeadline(t time.Time) error {
l.SetReadDeadline(t)
l.SetWriteDeadline(t)
return nil
}
// SetReadDeadline implements the Conn SetReadDeadline method.
func (l *Listener) SetReadDeadline(t time.Time) error {
l.rd.Store(t)
return nil
}
// SetWriteDeadline implements the Conn SetWriteDeadline method.
func (l *Listener) SetWriteDeadline(t time.Time) error {
l.wd.Store(t)
return nil
}
// Close stops listening on the UDP address. Already Accepted connections are not closed.
func (l *Listener) Close() error {
close(l.die)
return l.conn.Close()
}
// closeSession notify the listener that a session has closed
func (l *Listener) closeSession(remote net.Addr) bool {
select {
case l.chSessionClosed <- remote:
return true
case <-l.die:
return false
}
}
// Addr returns the listener's network address, The Addr returned is shared by all invocations of Addr, so do not modify it.
func (l *Listener) Addr() net.Addr { return l.conn.LocalAddr() }
// Listen listens for incoming KCP packets addressed to the local address laddr on the network "udp",
func Listen(laddr string) (net.Listener, error) { return ListenWithOptions(laddr, nil, 0, 0) }
// ListenWithOptions listens for incoming KCP packets addressed to the local address laddr on the network "udp" with packet encryption,
// dataShards, parityShards defines Reed-Solomon Erasure Coding parameters
func ListenWithOptions(laddr string, block BlockCrypt, dataShards, parityShards int) (*Listener, error) {
udpaddr, err := net.ResolveUDPAddr("udp", laddr)
if err != nil {
return nil, errors.Wrap(err, "net.ResolveUDPAddr")
}
conn, err := net.ListenUDP("udp", udpaddr)
if err != nil {
return nil, errors.Wrap(err, "net.ListenUDP")
}
return ServeConn(block, dataShards, parityShards, conn)
}
// ServeConn serves KCP protocol for a single packet connection.
func ServeConn(block BlockCrypt, dataShards, parityShards int, conn net.PacketConn) (*Listener, error) {
l := new(Listener)
l.conn = conn
l.sessions = make(map[string]*UDPSession)
l.chAccepts = make(chan *UDPSession, acceptBacklog)
l.chSessionClosed = make(chan net.Addr)
l.die = make(chan struct{})
l.dataShards = dataShards
l.parityShards = parityShards
l.block = block
l.fecDecoder = newFECDecoder(rxFECMulti*(dataShards+parityShards), dataShards, parityShards)
// calculate header size
if l.block != nil {
l.headerSize += cryptHeaderSize
}
if l.fecDecoder != nil {
l.headerSize += fecHeaderSizePlus2
}
go l.monitor()
return l, nil
}
// Dial connects to the remote address "raddr" on the network "udp"
func Dial(raddr string) (net.Conn, error) { return DialWithOptions(raddr, nil, 0, 0) }
// DialWithOptions connects to the remote address "raddr" on the network "udp" with packet encryption
func DialWithOptions(raddr string, block BlockCrypt, dataShards, parityShards int) (*UDPSession, error) {
udpaddr, err := net.ResolveUDPAddr("udp", raddr)
if err != nil {
return nil, errors.Wrap(err, "net.ResolveUDPAddr")
}
udpconn, err := net.DialUDP("udp", nil, udpaddr)
if err != nil {
return nil, errors.Wrap(err, "net.DialUDP")
}
return NewConn(raddr, block, dataShards, parityShards, &connectedUDPConn{udpconn})
}
// NewConn establishes a session and talks KCP protocol over a packet connection.
func NewConn(raddr string, block BlockCrypt, dataShards, parityShards int, conn net.PacketConn) (*UDPSession, error) {
udpaddr, err := net.ResolveUDPAddr("udp", raddr)
if err != nil {
return nil, errors.Wrap(err, "net.ResolveUDPAddr")
}
var convid uint32
binary.Read(rand.Reader, binary.LittleEndian, &convid)
return newUDPSession(convid, dataShards, parityShards, nil, conn, udpaddr, block), nil
}
// returns current time in milliseconds
func currentMs() uint32 { return uint32(time.Now().UnixNano() / int64(time.Millisecond)) }
// connectedUDPConn is a wrapper for net.UDPConn which converts WriteTo syscalls
// to Write syscalls that are 4 times faster on some OS'es. This should only be
// used for connections that were produced by a net.Dial* call.
type connectedUDPConn struct{ *net.UDPConn }
// WriteTo redirects all writes to the Write syscall, which is 4 times faster.
func (c *connectedUDPConn) WriteTo(b []byte, addr net.Addr) (int, error) { return c.Write(b) }

164
vendor/github.com/xtaci/kcp-go/snmp.go generated vendored Normal file
View File

@ -0,0 +1,164 @@
package kcp
import (
"fmt"
"sync/atomic"
)
// Snmp defines network statistics indicator
type Snmp struct {
BytesSent uint64 // bytes sent from upper level
BytesReceived uint64 // bytes received to upper level
MaxConn uint64 // max number of connections ever reached
ActiveOpens uint64 // accumulated active open connections
PassiveOpens uint64 // accumulated passive open connections
CurrEstab uint64 // current number of established connections
InErrs uint64 // UDP read errors reported from net.PacketConn
InCsumErrors uint64 // checksum errors from CRC32
KCPInErrors uint64 // packet iput errors reported from KCP
InPkts uint64 // incoming packets count
OutPkts uint64 // outgoing packets count
InSegs uint64 // incoming KCP segments
OutSegs uint64 // outgoing KCP segments
InBytes uint64 // UDP bytes received
OutBytes uint64 // UDP bytes sent
RetransSegs uint64 // accmulated retransmited segments
FastRetransSegs uint64 // accmulated fast retransmitted segments
EarlyRetransSegs uint64 // accmulated early retransmitted segments
LostSegs uint64 // number of segs infered as lost
RepeatSegs uint64 // number of segs duplicated
FECRecovered uint64 // correct packets recovered from FEC
FECErrs uint64 // incorrect packets recovered from FEC
FECParityShards uint64 // FEC segments received
FECShortShards uint64 // number of data shards that's not enough for recovery
}
func newSnmp() *Snmp {
return new(Snmp)
}
// Header returns all field names
func (s *Snmp) Header() []string {
return []string{
"BytesSent",
"BytesReceived",
"MaxConn",
"ActiveOpens",
"PassiveOpens",
"CurrEstab",
"InErrs",
"InCsumErrors",
"KCPInErrors",
"InPkts",
"OutPkts",
"InSegs",
"OutSegs",
"InBytes",
"OutBytes",
"RetransSegs",
"FastRetransSegs",
"EarlyRetransSegs",
"LostSegs",
"RepeatSegs",
"FECParityShards",
"FECErrs",
"FECRecovered",
"FECShortShards",
}
}
// ToSlice returns current snmp info as slice
func (s *Snmp) ToSlice() []string {
snmp := s.Copy()
return []string{
fmt.Sprint(snmp.BytesSent),
fmt.Sprint(snmp.BytesReceived),
fmt.Sprint(snmp.MaxConn),
fmt.Sprint(snmp.ActiveOpens),
fmt.Sprint(snmp.PassiveOpens),
fmt.Sprint(snmp.CurrEstab),
fmt.Sprint(snmp.InErrs),
fmt.Sprint(snmp.InCsumErrors),
fmt.Sprint(snmp.KCPInErrors),
fmt.Sprint(snmp.InPkts),
fmt.Sprint(snmp.OutPkts),
fmt.Sprint(snmp.InSegs),
fmt.Sprint(snmp.OutSegs),
fmt.Sprint(snmp.InBytes),
fmt.Sprint(snmp.OutBytes),
fmt.Sprint(snmp.RetransSegs),
fmt.Sprint(snmp.FastRetransSegs),
fmt.Sprint(snmp.EarlyRetransSegs),
fmt.Sprint(snmp.LostSegs),
fmt.Sprint(snmp.RepeatSegs),
fmt.Sprint(snmp.FECParityShards),
fmt.Sprint(snmp.FECErrs),
fmt.Sprint(snmp.FECRecovered),
fmt.Sprint(snmp.FECShortShards),
}
}
// Copy make a copy of current snmp snapshot
func (s *Snmp) Copy() *Snmp {
d := newSnmp()
d.BytesSent = atomic.LoadUint64(&s.BytesSent)
d.BytesReceived = atomic.LoadUint64(&s.BytesReceived)
d.MaxConn = atomic.LoadUint64(&s.MaxConn)
d.ActiveOpens = atomic.LoadUint64(&s.ActiveOpens)
d.PassiveOpens = atomic.LoadUint64(&s.PassiveOpens)
d.CurrEstab = atomic.LoadUint64(&s.CurrEstab)
d.InErrs = atomic.LoadUint64(&s.InErrs)
d.InCsumErrors = atomic.LoadUint64(&s.InCsumErrors)
d.KCPInErrors = atomic.LoadUint64(&s.KCPInErrors)
d.InPkts = atomic.LoadUint64(&s.InPkts)
d.OutPkts = atomic.LoadUint64(&s.OutPkts)
d.InSegs = atomic.LoadUint64(&s.InSegs)
d.OutSegs = atomic.LoadUint64(&s.OutSegs)
d.InBytes = atomic.LoadUint64(&s.InBytes)
d.OutBytes = atomic.LoadUint64(&s.OutBytes)
d.RetransSegs = atomic.LoadUint64(&s.RetransSegs)
d.FastRetransSegs = atomic.LoadUint64(&s.FastRetransSegs)
d.EarlyRetransSegs = atomic.LoadUint64(&s.EarlyRetransSegs)
d.LostSegs = atomic.LoadUint64(&s.LostSegs)
d.RepeatSegs = atomic.LoadUint64(&s.RepeatSegs)
d.FECParityShards = atomic.LoadUint64(&s.FECParityShards)
d.FECErrs = atomic.LoadUint64(&s.FECErrs)
d.FECRecovered = atomic.LoadUint64(&s.FECRecovered)
d.FECShortShards = atomic.LoadUint64(&s.FECShortShards)
return d
}
// Reset values to zero
func (s *Snmp) Reset() {
atomic.StoreUint64(&s.BytesSent, 0)
atomic.StoreUint64(&s.BytesReceived, 0)
atomic.StoreUint64(&s.MaxConn, 0)
atomic.StoreUint64(&s.ActiveOpens, 0)
atomic.StoreUint64(&s.PassiveOpens, 0)
atomic.StoreUint64(&s.CurrEstab, 0)
atomic.StoreUint64(&s.InErrs, 0)
atomic.StoreUint64(&s.InCsumErrors, 0)
atomic.StoreUint64(&s.KCPInErrors, 0)
atomic.StoreUint64(&s.InPkts, 0)
atomic.StoreUint64(&s.OutPkts, 0)
atomic.StoreUint64(&s.InSegs, 0)
atomic.StoreUint64(&s.OutSegs, 0)
atomic.StoreUint64(&s.InBytes, 0)
atomic.StoreUint64(&s.OutBytes, 0)
atomic.StoreUint64(&s.RetransSegs, 0)
atomic.StoreUint64(&s.FastRetransSegs, 0)
atomic.StoreUint64(&s.EarlyRetransSegs, 0)
atomic.StoreUint64(&s.LostSegs, 0)
atomic.StoreUint64(&s.RepeatSegs, 0)
atomic.StoreUint64(&s.FECParityShards, 0)
atomic.StoreUint64(&s.FECErrs, 0)
atomic.StoreUint64(&s.FECRecovered, 0)
atomic.StoreUint64(&s.FECShortShards, 0)
}
// DefaultSnmp is the global KCP connection statistics collector
var DefaultSnmp *Snmp
func init() {
DefaultSnmp = newSnmp()
}

105
vendor/github.com/xtaci/kcp-go/updater.go generated vendored Normal file
View File

@ -0,0 +1,105 @@
package kcp
import (
"container/heap"
"sync"
"time"
)
var updater updateHeap
func init() {
updater.init()
go updater.updateTask()
}
// entry contains a session update info
type entry struct {
ts time.Time
s *UDPSession
}
// a global heap managed kcp.flush() caller
type updateHeap struct {
entries []entry
mu sync.Mutex
chWakeUp chan struct{}
}
func (h *updateHeap) Len() int { return len(h.entries) }
func (h *updateHeap) Less(i, j int) bool { return h.entries[i].ts.Before(h.entries[j].ts) }
func (h *updateHeap) Swap(i, j int) {
h.entries[i], h.entries[j] = h.entries[j], h.entries[i]
h.entries[i].s.updaterIdx = i
h.entries[j].s.updaterIdx = j
}
func (h *updateHeap) Push(x interface{}) {
h.entries = append(h.entries, x.(entry))
n := len(h.entries)
h.entries[n-1].s.updaterIdx = n - 1
}
func (h *updateHeap) Pop() interface{} {
n := len(h.entries)
x := h.entries[n-1]
h.entries[n-1].s.updaterIdx = -1
h.entries[n-1] = entry{} // manual set nil for GC
h.entries = h.entries[0 : n-1]
return x
}
func (h *updateHeap) init() {
h.chWakeUp = make(chan struct{}, 1)
}
func (h *updateHeap) addSession(s *UDPSession) {
h.mu.Lock()
heap.Push(h, entry{time.Now(), s})
h.mu.Unlock()
h.wakeup()
}
func (h *updateHeap) removeSession(s *UDPSession) {
h.mu.Lock()
if s.updaterIdx != -1 {
heap.Remove(h, s.updaterIdx)
}
h.mu.Unlock()
}
func (h *updateHeap) wakeup() {
select {
case h.chWakeUp <- struct{}{}:
default:
}
}
func (h *updateHeap) updateTask() {
var timer <-chan time.Time
for {
select {
case <-timer:
case <-h.chWakeUp:
}
h.mu.Lock()
hlen := h.Len()
now := time.Now()
for i := 0; i < hlen; i++ {
entry := heap.Pop(h).(entry)
if now.After(entry.ts) {
entry.ts = now.Add(entry.s.update())
heap.Push(h, entry)
} else {
heap.Push(h, entry)
break
}
}
if hlen > 0 {
timer = time.After(h.entries[0].ts.Sub(now))
}
h.mu.Unlock()
}
}

110
vendor/github.com/xtaci/kcp-go/xor.go generated vendored Normal file
View File

@ -0,0 +1,110 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package kcp
import (
"runtime"
"unsafe"
)
const wordSize = int(unsafe.Sizeof(uintptr(0)))
const supportsUnaligned = runtime.GOARCH == "386" || runtime.GOARCH == "amd64" || runtime.GOARCH == "ppc64" || runtime.GOARCH == "ppc64le" || runtime.GOARCH == "s390x"
// fastXORBytes xors in bulk. It only works on architectures that
// support unaligned read/writes.
func fastXORBytes(dst, a, b []byte) int {
n := len(a)
if len(b) < n {
n = len(b)
}
w := n / wordSize
if w > 0 {
wordBytes := w * wordSize
fastXORWords(dst[:wordBytes], a[:wordBytes], b[:wordBytes])
}
for i := (n - n%wordSize); i < n; i++ {
dst[i] = a[i] ^ b[i]
}
return n
}
func safeXORBytes(dst, a, b []byte) int {
n := len(a)
if len(b) < n {
n = len(b)
}
ex := n % 8
for i := 0; i < ex; i++ {
dst[i] = a[i] ^ b[i]
}
for i := ex; i < n; i += 8 {
_dst := dst[i : i+8]
_a := a[i : i+8]
_b := b[i : i+8]
_dst[0] = _a[0] ^ _b[0]
_dst[1] = _a[1] ^ _b[1]
_dst[2] = _a[2] ^ _b[2]
_dst[3] = _a[3] ^ _b[3]
_dst[4] = _a[4] ^ _b[4]
_dst[5] = _a[5] ^ _b[5]
_dst[6] = _a[6] ^ _b[6]
_dst[7] = _a[7] ^ _b[7]
}
return n
}
// xorBytes xors the bytes in a and b. The destination is assumed to have enough
// space. Returns the number of bytes xor'd.
func xorBytes(dst, a, b []byte) int {
if supportsUnaligned {
return fastXORBytes(dst, a, b)
}
// TODO(hanwen): if (dst, a, b) have common alignment
// we could still try fastXORBytes. It is not clear
// how often this happens, and it's only worth it if
// the block encryption itself is hardware
// accelerated.
return safeXORBytes(dst, a, b)
}
// fastXORWords XORs multiples of 4 or 8 bytes (depending on architecture.)
// The arguments are assumed to be of equal length.
func fastXORWords(dst, a, b []byte) {
dw := *(*[]uintptr)(unsafe.Pointer(&dst))
aw := *(*[]uintptr)(unsafe.Pointer(&a))
bw := *(*[]uintptr)(unsafe.Pointer(&b))
n := len(b) / wordSize
ex := n % 8
for i := 0; i < ex; i++ {
dw[i] = aw[i] ^ bw[i]
}
for i := ex; i < n; i += 8 {
_dw := dw[i : i+8]
_aw := aw[i : i+8]
_bw := bw[i : i+8]
_dw[0] = _aw[0] ^ _bw[0]
_dw[1] = _aw[1] ^ _bw[1]
_dw[2] = _aw[2] ^ _bw[2]
_dw[3] = _aw[3] ^ _bw[3]
_dw[4] = _aw[4] ^ _bw[4]
_dw[5] = _aw[5] ^ _bw[5]
_dw[6] = _aw[6] ^ _bw[6]
_dw[7] = _aw[7] ^ _bw[7]
}
}
func xorWords(dst, a, b []byte) {
if supportsUnaligned {
fastXORWords(dst, a, b)
} else {
safeXORBytes(dst, a, b)
}
}

159
vendor/golang.org/x/crypto/blowfish/block.go generated vendored Normal file
View File

@ -0,0 +1,159 @@
// Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package blowfish
// getNextWord returns the next big-endian uint32 value from the byte slice
// at the given position in a circular manner, updating the position.
func getNextWord(b []byte, pos *int) uint32 {
var w uint32
j := *pos
for i := 0; i < 4; i++ {
w = w<<8 | uint32(b[j])
j++
if j >= len(b) {
j = 0
}
}
*pos = j
return w
}
// ExpandKey performs a key expansion on the given *Cipher. Specifically, it
// performs the Blowfish algorithm's key schedule which sets up the *Cipher's
// pi and substitution tables for calls to Encrypt. This is used, primarily,
// by the bcrypt package to reuse the Blowfish key schedule during its
// set up. It's unlikely that you need to use this directly.
func ExpandKey(key []byte, c *Cipher) {
j := 0
for i := 0; i < 18; i++ {
// Using inlined getNextWord for performance.
var d uint32
for k := 0; k < 4; k++ {
d = d<<8 | uint32(key[j])
j++
if j >= len(key) {
j = 0
}
}
c.p[i] ^= d
}
var l, r uint32
for i := 0; i < 18; i += 2 {
l, r = encryptBlock(l, r, c)
c.p[i], c.p[i+1] = l, r
}
for i := 0; i < 256; i += 2 {
l, r = encryptBlock(l, r, c)
c.s0[i], c.s0[i+1] = l, r
}
for i := 0; i < 256; i += 2 {
l, r = encryptBlock(l, r, c)
c.s1[i], c.s1[i+1] = l, r
}
for i := 0; i < 256; i += 2 {
l, r = encryptBlock(l, r, c)
c.s2[i], c.s2[i+1] = l, r
}
for i := 0; i < 256; i += 2 {
l, r = encryptBlock(l, r, c)
c.s3[i], c.s3[i+1] = l, r
}
}
// This is similar to ExpandKey, but folds the salt during the key
// schedule. While ExpandKey is essentially expandKeyWithSalt with an all-zero
// salt passed in, reusing ExpandKey turns out to be a place of inefficiency
// and specializing it here is useful.
func expandKeyWithSalt(key []byte, salt []byte, c *Cipher) {
j := 0
for i := 0; i < 18; i++ {
c.p[i] ^= getNextWord(key, &j)
}
j = 0
var l, r uint32
for i := 0; i < 18; i += 2 {
l ^= getNextWord(salt, &j)
r ^= getNextWord(salt, &j)
l, r = encryptBlock(l, r, c)
c.p[i], c.p[i+1] = l, r
}
for i := 0; i < 256; i += 2 {
l ^= getNextWord(salt, &j)
r ^= getNextWord(salt, &j)
l, r = encryptBlock(l, r, c)
c.s0[i], c.s0[i+1] = l, r
}
for i := 0; i < 256; i += 2 {
l ^= getNextWord(salt, &j)
r ^= getNextWord(salt, &j)
l, r = encryptBlock(l, r, c)
c.s1[i], c.s1[i+1] = l, r
}
for i := 0; i < 256; i += 2 {
l ^= getNextWord(salt, &j)
r ^= getNextWord(salt, &j)
l, r = encryptBlock(l, r, c)
c.s2[i], c.s2[i+1] = l, r
}
for i := 0; i < 256; i += 2 {
l ^= getNextWord(salt, &j)
r ^= getNextWord(salt, &j)
l, r = encryptBlock(l, r, c)
c.s3[i], c.s3[i+1] = l, r
}
}
func encryptBlock(l, r uint32, c *Cipher) (uint32, uint32) {
xl, xr := l, r
xl ^= c.p[0]
xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[1]
xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[2]
xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[3]
xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[4]
xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[5]
xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[6]
xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[7]
xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[8]
xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[9]
xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[10]
xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[11]
xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[12]
xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[13]
xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[14]
xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[15]
xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[16]
xr ^= c.p[17]
return xr, xl
}
func decryptBlock(l, r uint32, c *Cipher) (uint32, uint32) {
xl, xr := l, r
xl ^= c.p[17]
xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[16]
xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[15]
xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[14]
xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[13]
xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[12]
xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[11]
xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[10]
xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[9]
xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[8]
xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[7]
xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[6]
xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[5]
xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[4]
xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[3]
xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[2]
xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[1]
xr ^= c.p[0]
return xr, xl
}

91
vendor/golang.org/x/crypto/blowfish/cipher.go generated vendored Normal file
View File

@ -0,0 +1,91 @@
// Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package blowfish implements Bruce Schneier's Blowfish encryption algorithm.
package blowfish
// The code is a port of Bruce Schneier's C implementation.
// See http://www.schneier.com/blowfish.html.
import "strconv"
// The Blowfish block size in bytes.
const BlockSize = 8
// A Cipher is an instance of Blowfish encryption using a particular key.
type Cipher struct {
p [18]uint32
s0, s1, s2, s3 [256]uint32
}
type KeySizeError int
func (k KeySizeError) Error() string {
return "crypto/blowfish: invalid key size " + strconv.Itoa(int(k))
}
// NewCipher creates and returns a Cipher.
// The key argument should be the Blowfish key, from 1 to 56 bytes.
func NewCipher(key []byte) (*Cipher, error) {
var result Cipher
if k := len(key); k < 1 || k > 56 {
return nil, KeySizeError(k)
}
initCipher(&result)
ExpandKey(key, &result)
return &result, nil
}
// NewSaltedCipher creates a returns a Cipher that folds a salt into its key
// schedule. For most purposes, NewCipher, instead of NewSaltedCipher, is
// sufficient and desirable. For bcrypt compatibility, the key can be over 56
// bytes.
func NewSaltedCipher(key, salt []byte) (*Cipher, error) {
if len(salt) == 0 {
return NewCipher(key)
}
var result Cipher
if k := len(key); k < 1 {
return nil, KeySizeError(k)
}
initCipher(&result)
expandKeyWithSalt(key, salt, &result)
return &result, nil
}
// BlockSize returns the Blowfish block size, 8 bytes.
// It is necessary to satisfy the Block interface in the
// package "crypto/cipher".
func (c *Cipher) BlockSize() int { return BlockSize }
// Encrypt encrypts the 8-byte buffer src using the key k
// and stores the result in dst.
// Note that for amounts of data larger than a block,
// it is not safe to just call Encrypt on successive blocks;
// instead, use an encryption mode like CBC (see crypto/cipher/cbc.go).
func (c *Cipher) Encrypt(dst, src []byte) {
l := uint32(src[0])<<24 | uint32(src[1])<<16 | uint32(src[2])<<8 | uint32(src[3])
r := uint32(src[4])<<24 | uint32(src[5])<<16 | uint32(src[6])<<8 | uint32(src[7])
l, r = encryptBlock(l, r, c)
dst[0], dst[1], dst[2], dst[3] = byte(l>>24), byte(l>>16), byte(l>>8), byte(l)
dst[4], dst[5], dst[6], dst[7] = byte(r>>24), byte(r>>16), byte(r>>8), byte(r)
}
// Decrypt decrypts the 8-byte buffer src using the key k
// and stores the result in dst.
func (c *Cipher) Decrypt(dst, src []byte) {
l := uint32(src[0])<<24 | uint32(src[1])<<16 | uint32(src[2])<<8 | uint32(src[3])
r := uint32(src[4])<<24 | uint32(src[5])<<16 | uint32(src[6])<<8 | uint32(src[7])
l, r = decryptBlock(l, r, c)
dst[0], dst[1], dst[2], dst[3] = byte(l>>24), byte(l>>16), byte(l>>8), byte(l)
dst[4], dst[5], dst[6], dst[7] = byte(r>>24), byte(r>>16), byte(r>>8), byte(r)
}
func initCipher(c *Cipher) {
copy(c.p[0:], p[0:])
copy(c.s0[0:], s0[0:])
copy(c.s1[0:], s1[0:])
copy(c.s2[0:], s2[0:])
copy(c.s3[0:], s3[0:])
}

199
vendor/golang.org/x/crypto/blowfish/const.go generated vendored Normal file
View File

@ -0,0 +1,199 @@
// Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// The startup permutation array and substitution boxes.
// They are the hexadecimal digits of PI; see:
// http://www.schneier.com/code/constants.txt.
package blowfish
var s0 = [256]uint32{
0xd1310ba6, 0x98dfb5ac, 0x2ffd72db, 0xd01adfb7, 0xb8e1afed, 0x6a267e96,
0xba7c9045, 0xf12c7f99, 0x24a19947, 0xb3916cf7, 0x0801f2e2, 0x858efc16,
0x636920d8, 0x71574e69, 0xa458fea3, 0xf4933d7e, 0x0d95748f, 0x728eb658,
0x718bcd58, 0x82154aee, 0x7b54a41d, 0xc25a59b5, 0x9c30d539, 0x2af26013,
0xc5d1b023, 0x286085f0, 0xca417918, 0xb8db38ef, 0x8e79dcb0, 0x603a180e,
0x6c9e0e8b, 0xb01e8a3e, 0xd71577c1, 0xbd314b27, 0x78af2fda, 0x55605c60,
0xe65525f3, 0xaa55ab94, 0x57489862, 0x63e81440, 0x55ca396a, 0x2aab10b6,
0xb4cc5c34, 0x1141e8ce, 0xa15486af, 0x7c72e993, 0xb3ee1411, 0x636fbc2a,
0x2ba9c55d, 0x741831f6, 0xce5c3e16, 0x9b87931e, 0xafd6ba33, 0x6c24cf5c,
0x7a325381, 0x28958677, 0x3b8f4898, 0x6b4bb9af, 0xc4bfe81b, 0x66282193,
0x61d809cc, 0xfb21a991, 0x487cac60, 0x5dec8032, 0xef845d5d, 0xe98575b1,
0xdc262302, 0xeb651b88, 0x23893e81, 0xd396acc5, 0x0f6d6ff3, 0x83f44239,
0x2e0b4482, 0xa4842004, 0x69c8f04a, 0x9e1f9b5e, 0x21c66842, 0xf6e96c9a,
0x670c9c61, 0xabd388f0, 0x6a51a0d2, 0xd8542f68, 0x960fa728, 0xab5133a3,
0x6eef0b6c, 0x137a3be4, 0xba3bf050, 0x7efb2a98, 0xa1f1651d, 0x39af0176,
0x66ca593e, 0x82430e88, 0x8cee8619, 0x456f9fb4, 0x7d84a5c3, 0x3b8b5ebe,
0xe06f75d8, 0x85c12073, 0x401a449f, 0x56c16aa6, 0x4ed3aa62, 0x363f7706,
0x1bfedf72, 0x429b023d, 0x37d0d724, 0xd00a1248, 0xdb0fead3, 0x49f1c09b,
0x075372c9, 0x80991b7b, 0x25d479d8, 0xf6e8def7, 0xe3fe501a, 0xb6794c3b,
0x976ce0bd, 0x04c006ba, 0xc1a94fb6, 0x409f60c4, 0x5e5c9ec2, 0x196a2463,
0x68fb6faf, 0x3e6c53b5, 0x1339b2eb, 0x3b52ec6f, 0x6dfc511f, 0x9b30952c,
0xcc814544, 0xaf5ebd09, 0xbee3d004, 0xde334afd, 0x660f2807, 0x192e4bb3,
0xc0cba857, 0x45c8740f, 0xd20b5f39, 0xb9d3fbdb, 0x5579c0bd, 0x1a60320a,
0xd6a100c6, 0x402c7279, 0x679f25fe, 0xfb1fa3cc, 0x8ea5e9f8, 0xdb3222f8,
0x3c7516df, 0xfd616b15, 0x2f501ec8, 0xad0552ab, 0x323db5fa, 0xfd238760,
0x53317b48, 0x3e00df82, 0x9e5c57bb, 0xca6f8ca0, 0x1a87562e, 0xdf1769db,
0xd542a8f6, 0x287effc3, 0xac6732c6, 0x8c4f5573, 0x695b27b0, 0xbbca58c8,
0xe1ffa35d, 0xb8f011a0, 0x10fa3d98, 0xfd2183b8, 0x4afcb56c, 0x2dd1d35b,
0x9a53e479, 0xb6f84565, 0xd28e49bc, 0x4bfb9790, 0xe1ddf2da, 0xa4cb7e33,
0x62fb1341, 0xcee4c6e8, 0xef20cada, 0x36774c01, 0xd07e9efe, 0x2bf11fb4,
0x95dbda4d, 0xae909198, 0xeaad8e71, 0x6b93d5a0, 0xd08ed1d0, 0xafc725e0,
0x8e3c5b2f, 0x8e7594b7, 0x8ff6e2fb, 0xf2122b64, 0x8888b812, 0x900df01c,
0x4fad5ea0, 0x688fc31c, 0xd1cff191, 0xb3a8c1ad, 0x2f2f2218, 0xbe0e1777,
0xea752dfe, 0x8b021fa1, 0xe5a0cc0f, 0xb56f74e8, 0x18acf3d6, 0xce89e299,
0xb4a84fe0, 0xfd13e0b7, 0x7cc43b81, 0xd2ada8d9, 0x165fa266, 0x80957705,
0x93cc7314, 0x211a1477, 0xe6ad2065, 0x77b5fa86, 0xc75442f5, 0xfb9d35cf,
0xebcdaf0c, 0x7b3e89a0, 0xd6411bd3, 0xae1e7e49, 0x00250e2d, 0x2071b35e,
0x226800bb, 0x57b8e0af, 0x2464369b, 0xf009b91e, 0x5563911d, 0x59dfa6aa,
0x78c14389, 0xd95a537f, 0x207d5ba2, 0x02e5b9c5, 0x83260376, 0x6295cfa9,
0x11c81968, 0x4e734a41, 0xb3472dca, 0x7b14a94a, 0x1b510052, 0x9a532915,
0xd60f573f, 0xbc9bc6e4, 0x2b60a476, 0x81e67400, 0x08ba6fb5, 0x571be91f,
0xf296ec6b, 0x2a0dd915, 0xb6636521, 0xe7b9f9b6, 0xff34052e, 0xc5855664,
0x53b02d5d, 0xa99f8fa1, 0x08ba4799, 0x6e85076a,
}
var s1 = [256]uint32{
0x4b7a70e9, 0xb5b32944, 0xdb75092e, 0xc4192623, 0xad6ea6b0, 0x49a7df7d,
0x9cee60b8, 0x8fedb266, 0xecaa8c71, 0x699a17ff, 0x5664526c, 0xc2b19ee1,
0x193602a5, 0x75094c29, 0xa0591340, 0xe4183a3e, 0x3f54989a, 0x5b429d65,
0x6b8fe4d6, 0x99f73fd6, 0xa1d29c07, 0xefe830f5, 0x4d2d38e6, 0xf0255dc1,
0x4cdd2086, 0x8470eb26, 0x6382e9c6, 0x021ecc5e, 0x09686b3f, 0x3ebaefc9,
0x3c971814, 0x6b6a70a1, 0x687f3584, 0x52a0e286, 0xb79c5305, 0xaa500737,
0x3e07841c, 0x7fdeae5c, 0x8e7d44ec, 0x5716f2b8, 0xb03ada37, 0xf0500c0d,
0xf01c1f04, 0x0200b3ff, 0xae0cf51a, 0x3cb574b2, 0x25837a58, 0xdc0921bd,
0xd19113f9, 0x7ca92ff6, 0x94324773, 0x22f54701, 0x3ae5e581, 0x37c2dadc,
0xc8b57634, 0x9af3dda7, 0xa9446146, 0x0fd0030e, 0xecc8c73e, 0xa4751e41,
0xe238cd99, 0x3bea0e2f, 0x3280bba1, 0x183eb331, 0x4e548b38, 0x4f6db908,
0x6f420d03, 0xf60a04bf, 0x2cb81290, 0x24977c79, 0x5679b072, 0xbcaf89af,
0xde9a771f, 0xd9930810, 0xb38bae12, 0xdccf3f2e, 0x5512721f, 0x2e6b7124,
0x501adde6, 0x9f84cd87, 0x7a584718, 0x7408da17, 0xbc9f9abc, 0xe94b7d8c,
0xec7aec3a, 0xdb851dfa, 0x63094366, 0xc464c3d2, 0xef1c1847, 0x3215d908,
0xdd433b37, 0x24c2ba16, 0x12a14d43, 0x2a65c451, 0x50940002, 0x133ae4dd,
0x71dff89e, 0x10314e55, 0x81ac77d6, 0x5f11199b, 0x043556f1, 0xd7a3c76b,
0x3c11183b, 0x5924a509, 0xf28fe6ed, 0x97f1fbfa, 0x9ebabf2c, 0x1e153c6e,
0x86e34570, 0xeae96fb1, 0x860e5e0a, 0x5a3e2ab3, 0x771fe71c, 0x4e3d06fa,
0x2965dcb9, 0x99e71d0f, 0x803e89d6, 0x5266c825, 0x2e4cc978, 0x9c10b36a,
0xc6150eba, 0x94e2ea78, 0xa5fc3c53, 0x1e0a2df4, 0xf2f74ea7, 0x361d2b3d,
0x1939260f, 0x19c27960, 0x5223a708, 0xf71312b6, 0xebadfe6e, 0xeac31f66,
0xe3bc4595, 0xa67bc883, 0xb17f37d1, 0x018cff28, 0xc332ddef, 0xbe6c5aa5,
0x65582185, 0x68ab9802, 0xeecea50f, 0xdb2f953b, 0x2aef7dad, 0x5b6e2f84,
0x1521b628, 0x29076170, 0xecdd4775, 0x619f1510, 0x13cca830, 0xeb61bd96,
0x0334fe1e, 0xaa0363cf, 0xb5735c90, 0x4c70a239, 0xd59e9e0b, 0xcbaade14,
0xeecc86bc, 0x60622ca7, 0x9cab5cab, 0xb2f3846e, 0x648b1eaf, 0x19bdf0ca,
0xa02369b9, 0x655abb50, 0x40685a32, 0x3c2ab4b3, 0x319ee9d5, 0xc021b8f7,
0x9b540b19, 0x875fa099, 0x95f7997e, 0x623d7da8, 0xf837889a, 0x97e32d77,
0x11ed935f, 0x16681281, 0x0e358829, 0xc7e61fd6, 0x96dedfa1, 0x7858ba99,
0x57f584a5, 0x1b227263, 0x9b83c3ff, 0x1ac24696, 0xcdb30aeb, 0x532e3054,
0x8fd948e4, 0x6dbc3128, 0x58ebf2ef, 0x34c6ffea, 0xfe28ed61, 0xee7c3c73,
0x5d4a14d9, 0xe864b7e3, 0x42105d14, 0x203e13e0, 0x45eee2b6, 0xa3aaabea,
0xdb6c4f15, 0xfacb4fd0, 0xc742f442, 0xef6abbb5, 0x654f3b1d, 0x41cd2105,
0xd81e799e, 0x86854dc7, 0xe44b476a, 0x3d816250, 0xcf62a1f2, 0x5b8d2646,
0xfc8883a0, 0xc1c7b6a3, 0x7f1524c3, 0x69cb7492, 0x47848a0b, 0x5692b285,
0x095bbf00, 0xad19489d, 0x1462b174, 0x23820e00, 0x58428d2a, 0x0c55f5ea,
0x1dadf43e, 0x233f7061, 0x3372f092, 0x8d937e41, 0xd65fecf1, 0x6c223bdb,
0x7cde3759, 0xcbee7460, 0x4085f2a7, 0xce77326e, 0xa6078084, 0x19f8509e,
0xe8efd855, 0x61d99735, 0xa969a7aa, 0xc50c06c2, 0x5a04abfc, 0x800bcadc,
0x9e447a2e, 0xc3453484, 0xfdd56705, 0x0e1e9ec9, 0xdb73dbd3, 0x105588cd,
0x675fda79, 0xe3674340, 0xc5c43465, 0x713e38d8, 0x3d28f89e, 0xf16dff20,
0x153e21e7, 0x8fb03d4a, 0xe6e39f2b, 0xdb83adf7,
}
var s2 = [256]uint32{
0xe93d5a68, 0x948140f7, 0xf64c261c, 0x94692934, 0x411520f7, 0x7602d4f7,
0xbcf46b2e, 0xd4a20068, 0xd4082471, 0x3320f46a, 0x43b7d4b7, 0x500061af,
0x1e39f62e, 0x97244546, 0x14214f74, 0xbf8b8840, 0x4d95fc1d, 0x96b591af,
0x70f4ddd3, 0x66a02f45, 0xbfbc09ec, 0x03bd9785, 0x7fac6dd0, 0x31cb8504,
0x96eb27b3, 0x55fd3941, 0xda2547e6, 0xabca0a9a, 0x28507825, 0x530429f4,
0x0a2c86da, 0xe9b66dfb, 0x68dc1462, 0xd7486900, 0x680ec0a4, 0x27a18dee,
0x4f3ffea2, 0xe887ad8c, 0xb58ce006, 0x7af4d6b6, 0xaace1e7c, 0xd3375fec,
0xce78a399, 0x406b2a42, 0x20fe9e35, 0xd9f385b9, 0xee39d7ab, 0x3b124e8b,
0x1dc9faf7, 0x4b6d1856, 0x26a36631, 0xeae397b2, 0x3a6efa74, 0xdd5b4332,
0x6841e7f7, 0xca7820fb, 0xfb0af54e, 0xd8feb397, 0x454056ac, 0xba489527,
0x55533a3a, 0x20838d87, 0xfe6ba9b7, 0xd096954b, 0x55a867bc, 0xa1159a58,
0xcca92963, 0x99e1db33, 0xa62a4a56, 0x3f3125f9, 0x5ef47e1c, 0x9029317c,
0xfdf8e802, 0x04272f70, 0x80bb155c, 0x05282ce3, 0x95c11548, 0xe4c66d22,
0x48c1133f, 0xc70f86dc, 0x07f9c9ee, 0x41041f0f, 0x404779a4, 0x5d886e17,
0x325f51eb, 0xd59bc0d1, 0xf2bcc18f, 0x41113564, 0x257b7834, 0x602a9c60,
0xdff8e8a3, 0x1f636c1b, 0x0e12b4c2, 0x02e1329e, 0xaf664fd1, 0xcad18115,
0x6b2395e0, 0x333e92e1, 0x3b240b62, 0xeebeb922, 0x85b2a20e, 0xe6ba0d99,
0xde720c8c, 0x2da2f728, 0xd0127845, 0x95b794fd, 0x647d0862, 0xe7ccf5f0,
0x5449a36f, 0x877d48fa, 0xc39dfd27, 0xf33e8d1e, 0x0a476341, 0x992eff74,
0x3a6f6eab, 0xf4f8fd37, 0xa812dc60, 0xa1ebddf8, 0x991be14c, 0xdb6e6b0d,
0xc67b5510, 0x6d672c37, 0x2765d43b, 0xdcd0e804, 0xf1290dc7, 0xcc00ffa3,
0xb5390f92, 0x690fed0b, 0x667b9ffb, 0xcedb7d9c, 0xa091cf0b, 0xd9155ea3,
0xbb132f88, 0x515bad24, 0x7b9479bf, 0x763bd6eb, 0x37392eb3, 0xcc115979,
0x8026e297, 0xf42e312d, 0x6842ada7, 0xc66a2b3b, 0x12754ccc, 0x782ef11c,
0x6a124237, 0xb79251e7, 0x06a1bbe6, 0x4bfb6350, 0x1a6b1018, 0x11caedfa,
0x3d25bdd8, 0xe2e1c3c9, 0x44421659, 0x0a121386, 0xd90cec6e, 0xd5abea2a,
0x64af674e, 0xda86a85f, 0xbebfe988, 0x64e4c3fe, 0x9dbc8057, 0xf0f7c086,
0x60787bf8, 0x6003604d, 0xd1fd8346, 0xf6381fb0, 0x7745ae04, 0xd736fccc,
0x83426b33, 0xf01eab71, 0xb0804187, 0x3c005e5f, 0x77a057be, 0xbde8ae24,
0x55464299, 0xbf582e61, 0x4e58f48f, 0xf2ddfda2, 0xf474ef38, 0x8789bdc2,
0x5366f9c3, 0xc8b38e74, 0xb475f255, 0x46fcd9b9, 0x7aeb2661, 0x8b1ddf84,
0x846a0e79, 0x915f95e2, 0x466e598e, 0x20b45770, 0x8cd55591, 0xc902de4c,
0xb90bace1, 0xbb8205d0, 0x11a86248, 0x7574a99e, 0xb77f19b6, 0xe0a9dc09,
0x662d09a1, 0xc4324633, 0xe85a1f02, 0x09f0be8c, 0x4a99a025, 0x1d6efe10,
0x1ab93d1d, 0x0ba5a4df, 0xa186f20f, 0x2868f169, 0xdcb7da83, 0x573906fe,
0xa1e2ce9b, 0x4fcd7f52, 0x50115e01, 0xa70683fa, 0xa002b5c4, 0x0de6d027,
0x9af88c27, 0x773f8641, 0xc3604c06, 0x61a806b5, 0xf0177a28, 0xc0f586e0,
0x006058aa, 0x30dc7d62, 0x11e69ed7, 0x2338ea63, 0x53c2dd94, 0xc2c21634,
0xbbcbee56, 0x90bcb6de, 0xebfc7da1, 0xce591d76, 0x6f05e409, 0x4b7c0188,
0x39720a3d, 0x7c927c24, 0x86e3725f, 0x724d9db9, 0x1ac15bb4, 0xd39eb8fc,
0xed545578, 0x08fca5b5, 0xd83d7cd3, 0x4dad0fc4, 0x1e50ef5e, 0xb161e6f8,
0xa28514d9, 0x6c51133c, 0x6fd5c7e7, 0x56e14ec4, 0x362abfce, 0xddc6c837,
0xd79a3234, 0x92638212, 0x670efa8e, 0x406000e0,
}
var s3 = [256]uint32{
0x3a39ce37, 0xd3faf5cf, 0xabc27737, 0x5ac52d1b, 0x5cb0679e, 0x4fa33742,
0xd3822740, 0x99bc9bbe, 0xd5118e9d, 0xbf0f7315, 0xd62d1c7e, 0xc700c47b,
0xb78c1b6b, 0x21a19045, 0xb26eb1be, 0x6a366eb4, 0x5748ab2f, 0xbc946e79,
0xc6a376d2, 0x6549c2c8, 0x530ff8ee, 0x468dde7d, 0xd5730a1d, 0x4cd04dc6,
0x2939bbdb, 0xa9ba4650, 0xac9526e8, 0xbe5ee304, 0xa1fad5f0, 0x6a2d519a,
0x63ef8ce2, 0x9a86ee22, 0xc089c2b8, 0x43242ef6, 0xa51e03aa, 0x9cf2d0a4,
0x83c061ba, 0x9be96a4d, 0x8fe51550, 0xba645bd6, 0x2826a2f9, 0xa73a3ae1,
0x4ba99586, 0xef5562e9, 0xc72fefd3, 0xf752f7da, 0x3f046f69, 0x77fa0a59,
0x80e4a915, 0x87b08601, 0x9b09e6ad, 0x3b3ee593, 0xe990fd5a, 0x9e34d797,
0x2cf0b7d9, 0x022b8b51, 0x96d5ac3a, 0x017da67d, 0xd1cf3ed6, 0x7c7d2d28,
0x1f9f25cf, 0xadf2b89b, 0x5ad6b472, 0x5a88f54c, 0xe029ac71, 0xe019a5e6,
0x47b0acfd, 0xed93fa9b, 0xe8d3c48d, 0x283b57cc, 0xf8d56629, 0x79132e28,
0x785f0191, 0xed756055, 0xf7960e44, 0xe3d35e8c, 0x15056dd4, 0x88f46dba,
0x03a16125, 0x0564f0bd, 0xc3eb9e15, 0x3c9057a2, 0x97271aec, 0xa93a072a,
0x1b3f6d9b, 0x1e6321f5, 0xf59c66fb, 0x26dcf319, 0x7533d928, 0xb155fdf5,
0x03563482, 0x8aba3cbb, 0x28517711, 0xc20ad9f8, 0xabcc5167, 0xccad925f,
0x4de81751, 0x3830dc8e, 0x379d5862, 0x9320f991, 0xea7a90c2, 0xfb3e7bce,
0x5121ce64, 0x774fbe32, 0xa8b6e37e, 0xc3293d46, 0x48de5369, 0x6413e680,
0xa2ae0810, 0xdd6db224, 0x69852dfd, 0x09072166, 0xb39a460a, 0x6445c0dd,
0x586cdecf, 0x1c20c8ae, 0x5bbef7dd, 0x1b588d40, 0xccd2017f, 0x6bb4e3bb,
0xdda26a7e, 0x3a59ff45, 0x3e350a44, 0xbcb4cdd5, 0x72eacea8, 0xfa6484bb,
0x8d6612ae, 0xbf3c6f47, 0xd29be463, 0x542f5d9e, 0xaec2771b, 0xf64e6370,
0x740e0d8d, 0xe75b1357, 0xf8721671, 0xaf537d5d, 0x4040cb08, 0x4eb4e2cc,
0x34d2466a, 0x0115af84, 0xe1b00428, 0x95983a1d, 0x06b89fb4, 0xce6ea048,
0x6f3f3b82, 0x3520ab82, 0x011a1d4b, 0x277227f8, 0x611560b1, 0xe7933fdc,
0xbb3a792b, 0x344525bd, 0xa08839e1, 0x51ce794b, 0x2f32c9b7, 0xa01fbac9,
0xe01cc87e, 0xbcc7d1f6, 0xcf0111c3, 0xa1e8aac7, 0x1a908749, 0xd44fbd9a,
0xd0dadecb, 0xd50ada38, 0x0339c32a, 0xc6913667, 0x8df9317c, 0xe0b12b4f,
0xf79e59b7, 0x43f5bb3a, 0xf2d519ff, 0x27d9459c, 0xbf97222c, 0x15e6fc2a,
0x0f91fc71, 0x9b941525, 0xfae59361, 0xceb69ceb, 0xc2a86459, 0x12baa8d1,
0xb6c1075e, 0xe3056a0c, 0x10d25065, 0xcb03a442, 0xe0ec6e0e, 0x1698db3b,
0x4c98a0be, 0x3278e964, 0x9f1f9532, 0xe0d392df, 0xd3a0342b, 0x8971f21e,
0x1b0a7441, 0x4ba3348c, 0xc5be7120, 0xc37632d8, 0xdf359f8d, 0x9b992f2e,
0xe60b6f47, 0x0fe3f11d, 0xe54cda54, 0x1edad891, 0xce6279cf, 0xcd3e7e6f,
0x1618b166, 0xfd2c1d05, 0x848fd2c5, 0xf6fb2299, 0xf523f357, 0xa6327623,
0x93a83531, 0x56cccd02, 0xacf08162, 0x5a75ebb5, 0x6e163697, 0x88d273cc,
0xde966292, 0x81b949d0, 0x4c50901b, 0x71c65614, 0xe6c6c7bd, 0x327a140a,
0x45e1d006, 0xc3f27b9a, 0xc9aa53fd, 0x62a80f00, 0xbb25bfe2, 0x35bdd2f6,
0x71126905, 0xb2040222, 0xb6cbcf7c, 0xcd769c2b, 0x53113ec0, 0x1640e3d3,
0x38abbd60, 0x2547adf0, 0xba38209c, 0xf746ce76, 0x77afa1c5, 0x20756060,
0x85cbfe4e, 0x8ae88dd8, 0x7aaaf9b0, 0x4cf9aa7e, 0x1948c25c, 0x02fb8a8c,
0x01c36ae4, 0xd6ebe1f9, 0x90d4f869, 0xa65cdea0, 0x3f09252d, 0xc208e69f,
0xb74e6132, 0xce77e25b, 0x578fdfe3, 0x3ac372e6,
}
var p = [18]uint32{
0x243f6a88, 0x85a308d3, 0x13198a2e, 0x03707344, 0xa4093822, 0x299f31d0,
0x082efa98, 0xec4e6c89, 0x452821e6, 0x38d01377, 0xbe5466cf, 0x34e90c6c,
0xc0ac29b7, 0xc97c50dd, 0x3f84d5b5, 0xb5470917, 0x9216d5d9, 0x8979fb1b,
}

526
vendor/golang.org/x/crypto/cast5/cast5.go generated vendored Normal file
View File

@ -0,0 +1,526 @@
// Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package cast5 implements CAST5, as defined in RFC 2144. CAST5 is a common
// OpenPGP cipher.
package cast5
import "errors"
const BlockSize = 8
const KeySize = 16
type Cipher struct {
masking [16]uint32
rotate [16]uint8
}
func NewCipher(key []byte) (c *Cipher, err error) {
if len(key) != KeySize {
return nil, errors.New("CAST5: keys must be 16 bytes")
}
c = new(Cipher)
c.keySchedule(key)
return
}
func (c *Cipher) BlockSize() int {
return BlockSize
}
func (c *Cipher) Encrypt(dst, src []byte) {
l := uint32(src[0])<<24 | uint32(src[1])<<16 | uint32(src[2])<<8 | uint32(src[3])
r := uint32(src[4])<<24 | uint32(src[5])<<16 | uint32(src[6])<<8 | uint32(src[7])
l, r = r, l^f1(r, c.masking[0], c.rotate[0])
l, r = r, l^f2(r, c.masking[1], c.rotate[1])
l, r = r, l^f3(r, c.masking[2], c.rotate[2])
l, r = r, l^f1(r, c.masking[3], c.rotate[3])
l, r = r, l^f2(r, c.masking[4], c.rotate[4])
l, r = r, l^f3(r, c.masking[5], c.rotate[5])
l, r = r, l^f1(r, c.masking[6], c.rotate[6])
l, r = r, l^f2(r, c.masking[7], c.rotate[7])
l, r = r, l^f3(r, c.masking[8], c.rotate[8])
l, r = r, l^f1(r, c.masking[9], c.rotate[9])
l, r = r, l^f2(r, c.masking[10], c.rotate[10])
l, r = r, l^f3(r, c.masking[11], c.rotate[11])
l, r = r, l^f1(r, c.masking[12], c.rotate[12])
l, r = r, l^f2(r, c.masking[13], c.rotate[13])
l, r = r, l^f3(r, c.masking[14], c.rotate[14])
l, r = r, l^f1(r, c.masking[15], c.rotate[15])
dst[0] = uint8(r >> 24)
dst[1] = uint8(r >> 16)
dst[2] = uint8(r >> 8)
dst[3] = uint8(r)
dst[4] = uint8(l >> 24)
dst[5] = uint8(l >> 16)
dst[6] = uint8(l >> 8)
dst[7] = uint8(l)
}
func (c *Cipher) Decrypt(dst, src []byte) {
l := uint32(src[0])<<24 | uint32(src[1])<<16 | uint32(src[2])<<8 | uint32(src[3])
r := uint32(src[4])<<24 | uint32(src[5])<<16 | uint32(src[6])<<8 | uint32(src[7])
l, r = r, l^f1(r, c.masking[15], c.rotate[15])
l, r = r, l^f3(r, c.masking[14], c.rotate[14])
l, r = r, l^f2(r, c.masking[13], c.rotate[13])
l, r = r, l^f1(r, c.masking[12], c.rotate[12])
l, r = r, l^f3(r, c.masking[11], c.rotate[11])
l, r = r, l^f2(r, c.masking[10], c.rotate[10])
l, r = r, l^f1(r, c.masking[9], c.rotate[9])
l, r = r, l^f3(r, c.masking[8], c.rotate[8])
l, r = r, l^f2(r, c.masking[7], c.rotate[7])
l, r = r, l^f1(r, c.masking[6], c.rotate[6])
l, r = r, l^f3(r, c.masking[5], c.rotate[5])
l, r = r, l^f2(r, c.masking[4], c.rotate[4])
l, r = r, l^f1(r, c.masking[3], c.rotate[3])
l, r = r, l^f3(r, c.masking[2], c.rotate[2])
l, r = r, l^f2(r, c.masking[1], c.rotate[1])
l, r = r, l^f1(r, c.masking[0], c.rotate[0])
dst[0] = uint8(r >> 24)
dst[1] = uint8(r >> 16)
dst[2] = uint8(r >> 8)
dst[3] = uint8(r)
dst[4] = uint8(l >> 24)
dst[5] = uint8(l >> 16)
dst[6] = uint8(l >> 8)
dst[7] = uint8(l)
}
type keyScheduleA [4][7]uint8
type keyScheduleB [4][5]uint8
// keyScheduleRound contains the magic values for a round of the key schedule.
// The keyScheduleA deals with the lines like:
// z0z1z2z3 = x0x1x2x3 ^ S5[xD] ^ S6[xF] ^ S7[xC] ^ S8[xE] ^ S7[x8]
// Conceptually, both x and z are in the same array, x first. The first
// element describes which word of this array gets written to and the
// second, which word gets read. So, for the line above, it's "4, 0", because
// it's writing to the first word of z, which, being after x, is word 4, and
// reading from the first word of x: word 0.
//
// Next are the indexes into the S-boxes. Now the array is treated as bytes. So
// "xD" is 0xd. The first byte of z is written as "16 + 0", just to be clear
// that it's z that we're indexing.
//
// keyScheduleB deals with lines like:
// K1 = S5[z8] ^ S6[z9] ^ S7[z7] ^ S8[z6] ^ S5[z2]
// "K1" is ignored because key words are always written in order. So the five
// elements are the S-box indexes. They use the same form as in keyScheduleA,
// above.
type keyScheduleRound struct{}
type keySchedule []keyScheduleRound
var schedule = []struct {
a keyScheduleA
b keyScheduleB
}{
{
keyScheduleA{
{4, 0, 0xd, 0xf, 0xc, 0xe, 0x8},
{5, 2, 16 + 0, 16 + 2, 16 + 1, 16 + 3, 0xa},
{6, 3, 16 + 7, 16 + 6, 16 + 5, 16 + 4, 9},
{7, 1, 16 + 0xa, 16 + 9, 16 + 0xb, 16 + 8, 0xb},
},
keyScheduleB{
{16 + 8, 16 + 9, 16 + 7, 16 + 6, 16 + 2},
{16 + 0xa, 16 + 0xb, 16 + 5, 16 + 4, 16 + 6},
{16 + 0xc, 16 + 0xd, 16 + 3, 16 + 2, 16 + 9},
{16 + 0xe, 16 + 0xf, 16 + 1, 16 + 0, 16 + 0xc},
},
},
{
keyScheduleA{
{0, 6, 16 + 5, 16 + 7, 16 + 4, 16 + 6, 16 + 0},
{1, 4, 0, 2, 1, 3, 16 + 2},
{2, 5, 7, 6, 5, 4, 16 + 1},
{3, 7, 0xa, 9, 0xb, 8, 16 + 3},
},
keyScheduleB{
{3, 2, 0xc, 0xd, 8},
{1, 0, 0xe, 0xf, 0xd},
{7, 6, 8, 9, 3},
{5, 4, 0xa, 0xb, 7},
},
},
{
keyScheduleA{
{4, 0, 0xd, 0xf, 0xc, 0xe, 8},
{5, 2, 16 + 0, 16 + 2, 16 + 1, 16 + 3, 0xa},
{6, 3, 16 + 7, 16 + 6, 16 + 5, 16 + 4, 9},
{7, 1, 16 + 0xa, 16 + 9, 16 + 0xb, 16 + 8, 0xb},
},
keyScheduleB{
{16 + 3, 16 + 2, 16 + 0xc, 16 + 0xd, 16 + 9},
{16 + 1, 16 + 0, 16 + 0xe, 16 + 0xf, 16 + 0xc},
{16 + 7, 16 + 6, 16 + 8, 16 + 9, 16 + 2},
{16 + 5, 16 + 4, 16 + 0xa, 16 + 0xb, 16 + 6},
},
},
{
keyScheduleA{
{0, 6, 16 + 5, 16 + 7, 16 + 4, 16 + 6, 16 + 0},
{1, 4, 0, 2, 1, 3, 16 + 2},
{2, 5, 7, 6, 5, 4, 16 + 1},
{3, 7, 0xa, 9, 0xb, 8, 16 + 3},
},
keyScheduleB{
{8, 9, 7, 6, 3},
{0xa, 0xb, 5, 4, 7},
{0xc, 0xd, 3, 2, 8},
{0xe, 0xf, 1, 0, 0xd},
},
},
}
func (c *Cipher) keySchedule(in []byte) {
var t [8]uint32
var k [32]uint32
for i := 0; i < 4; i++ {
j := i * 4
t[i] = uint32(in[j])<<24 | uint32(in[j+1])<<16 | uint32(in[j+2])<<8 | uint32(in[j+3])
}
x := []byte{6, 7, 4, 5}
ki := 0
for half := 0; half < 2; half++ {
for _, round := range schedule {
for j := 0; j < 4; j++ {
var a [7]uint8
copy(a[:], round.a[j][:])
w := t[a[1]]
w ^= sBox[4][(t[a[2]>>2]>>(24-8*(a[2]&3)))&0xff]
w ^= sBox[5][(t[a[3]>>2]>>(24-8*(a[3]&3)))&0xff]
w ^= sBox[6][(t[a[4]>>2]>>(24-8*(a[4]&3)))&0xff]
w ^= sBox[7][(t[a[5]>>2]>>(24-8*(a[5]&3)))&0xff]
w ^= sBox[x[j]][(t[a[6]>>2]>>(24-8*(a[6]&3)))&0xff]
t[a[0]] = w
}
for j := 0; j < 4; j++ {
var b [5]uint8
copy(b[:], round.b[j][:])
w := sBox[4][(t[b[0]>>2]>>(24-8*(b[0]&3)))&0xff]
w ^= sBox[5][(t[b[1]>>2]>>(24-8*(b[1]&3)))&0xff]
w ^= sBox[6][(t[b[2]>>2]>>(24-8*(b[2]&3)))&0xff]
w ^= sBox[7][(t[b[3]>>2]>>(24-8*(b[3]&3)))&0xff]
w ^= sBox[4+j][(t[b[4]>>2]>>(24-8*(b[4]&3)))&0xff]
k[ki] = w
ki++
}
}
}
for i := 0; i < 16; i++ {
c.masking[i] = k[i]
c.rotate[i] = uint8(k[16+i] & 0x1f)
}
}
// These are the three 'f' functions. See RFC 2144, section 2.2.
func f1(d, m uint32, r uint8) uint32 {
t := m + d
I := (t << r) | (t >> (32 - r))
return ((sBox[0][I>>24] ^ sBox[1][(I>>16)&0xff]) - sBox[2][(I>>8)&0xff]) + sBox[3][I&0xff]
}
func f2(d, m uint32, r uint8) uint32 {
t := m ^ d
I := (t << r) | (t >> (32 - r))
return ((sBox[0][I>>24] - sBox[1][(I>>16)&0xff]) + sBox[2][(I>>8)&0xff]) ^ sBox[3][I&0xff]
}
func f3(d, m uint32, r uint8) uint32 {
t := m - d
I := (t << r) | (t >> (32 - r))
return ((sBox[0][I>>24] + sBox[1][(I>>16)&0xff]) ^ sBox[2][(I>>8)&0xff]) - sBox[3][I&0xff]
}
var sBox = [8][256]uint32{
{
0x30fb40d4, 0x9fa0ff0b, 0x6beccd2f, 0x3f258c7a, 0x1e213f2f, 0x9c004dd3, 0x6003e540, 0xcf9fc949,
0xbfd4af27, 0x88bbbdb5, 0xe2034090, 0x98d09675, 0x6e63a0e0, 0x15c361d2, 0xc2e7661d, 0x22d4ff8e,
0x28683b6f, 0xc07fd059, 0xff2379c8, 0x775f50e2, 0x43c340d3, 0xdf2f8656, 0x887ca41a, 0xa2d2bd2d,
0xa1c9e0d6, 0x346c4819, 0x61b76d87, 0x22540f2f, 0x2abe32e1, 0xaa54166b, 0x22568e3a, 0xa2d341d0,
0x66db40c8, 0xa784392f, 0x004dff2f, 0x2db9d2de, 0x97943fac, 0x4a97c1d8, 0x527644b7, 0xb5f437a7,
0xb82cbaef, 0xd751d159, 0x6ff7f0ed, 0x5a097a1f, 0x827b68d0, 0x90ecf52e, 0x22b0c054, 0xbc8e5935,
0x4b6d2f7f, 0x50bb64a2, 0xd2664910, 0xbee5812d, 0xb7332290, 0xe93b159f, 0xb48ee411, 0x4bff345d,
0xfd45c240, 0xad31973f, 0xc4f6d02e, 0x55fc8165, 0xd5b1caad, 0xa1ac2dae, 0xa2d4b76d, 0xc19b0c50,
0x882240f2, 0x0c6e4f38, 0xa4e4bfd7, 0x4f5ba272, 0x564c1d2f, 0xc59c5319, 0xb949e354, 0xb04669fe,
0xb1b6ab8a, 0xc71358dd, 0x6385c545, 0x110f935d, 0x57538ad5, 0x6a390493, 0xe63d37e0, 0x2a54f6b3,
0x3a787d5f, 0x6276a0b5, 0x19a6fcdf, 0x7a42206a, 0x29f9d4d5, 0xf61b1891, 0xbb72275e, 0xaa508167,
0x38901091, 0xc6b505eb, 0x84c7cb8c, 0x2ad75a0f, 0x874a1427, 0xa2d1936b, 0x2ad286af, 0xaa56d291,
0xd7894360, 0x425c750d, 0x93b39e26, 0x187184c9, 0x6c00b32d, 0x73e2bb14, 0xa0bebc3c, 0x54623779,
0x64459eab, 0x3f328b82, 0x7718cf82, 0x59a2cea6, 0x04ee002e, 0x89fe78e6, 0x3fab0950, 0x325ff6c2,
0x81383f05, 0x6963c5c8, 0x76cb5ad6, 0xd49974c9, 0xca180dcf, 0x380782d5, 0xc7fa5cf6, 0x8ac31511,
0x35e79e13, 0x47da91d0, 0xf40f9086, 0xa7e2419e, 0x31366241, 0x051ef495, 0xaa573b04, 0x4a805d8d,
0x548300d0, 0x00322a3c, 0xbf64cddf, 0xba57a68e, 0x75c6372b, 0x50afd341, 0xa7c13275, 0x915a0bf5,
0x6b54bfab, 0x2b0b1426, 0xab4cc9d7, 0x449ccd82, 0xf7fbf265, 0xab85c5f3, 0x1b55db94, 0xaad4e324,
0xcfa4bd3f, 0x2deaa3e2, 0x9e204d02, 0xc8bd25ac, 0xeadf55b3, 0xd5bd9e98, 0xe31231b2, 0x2ad5ad6c,
0x954329de, 0xadbe4528, 0xd8710f69, 0xaa51c90f, 0xaa786bf6, 0x22513f1e, 0xaa51a79b, 0x2ad344cc,
0x7b5a41f0, 0xd37cfbad, 0x1b069505, 0x41ece491, 0xb4c332e6, 0x032268d4, 0xc9600acc, 0xce387e6d,
0xbf6bb16c, 0x6a70fb78, 0x0d03d9c9, 0xd4df39de, 0xe01063da, 0x4736f464, 0x5ad328d8, 0xb347cc96,
0x75bb0fc3, 0x98511bfb, 0x4ffbcc35, 0xb58bcf6a, 0xe11f0abc, 0xbfc5fe4a, 0xa70aec10, 0xac39570a,
0x3f04442f, 0x6188b153, 0xe0397a2e, 0x5727cb79, 0x9ceb418f, 0x1cacd68d, 0x2ad37c96, 0x0175cb9d,
0xc69dff09, 0xc75b65f0, 0xd9db40d8, 0xec0e7779, 0x4744ead4, 0xb11c3274, 0xdd24cb9e, 0x7e1c54bd,
0xf01144f9, 0xd2240eb1, 0x9675b3fd, 0xa3ac3755, 0xd47c27af, 0x51c85f4d, 0x56907596, 0xa5bb15e6,
0x580304f0, 0xca042cf1, 0x011a37ea, 0x8dbfaadb, 0x35ba3e4a, 0x3526ffa0, 0xc37b4d09, 0xbc306ed9,
0x98a52666, 0x5648f725, 0xff5e569d, 0x0ced63d0, 0x7c63b2cf, 0x700b45e1, 0xd5ea50f1, 0x85a92872,
0xaf1fbda7, 0xd4234870, 0xa7870bf3, 0x2d3b4d79, 0x42e04198, 0x0cd0ede7, 0x26470db8, 0xf881814c,
0x474d6ad7, 0x7c0c5e5c, 0xd1231959, 0x381b7298, 0xf5d2f4db, 0xab838653, 0x6e2f1e23, 0x83719c9e,
0xbd91e046, 0x9a56456e, 0xdc39200c, 0x20c8c571, 0x962bda1c, 0xe1e696ff, 0xb141ab08, 0x7cca89b9,
0x1a69e783, 0x02cc4843, 0xa2f7c579, 0x429ef47d, 0x427b169c, 0x5ac9f049, 0xdd8f0f00, 0x5c8165bf,
},
{
0x1f201094, 0xef0ba75b, 0x69e3cf7e, 0x393f4380, 0xfe61cf7a, 0xeec5207a, 0x55889c94, 0x72fc0651,
0xada7ef79, 0x4e1d7235, 0xd55a63ce, 0xde0436ba, 0x99c430ef, 0x5f0c0794, 0x18dcdb7d, 0xa1d6eff3,
0xa0b52f7b, 0x59e83605, 0xee15b094, 0xe9ffd909, 0xdc440086, 0xef944459, 0xba83ccb3, 0xe0c3cdfb,
0xd1da4181, 0x3b092ab1, 0xf997f1c1, 0xa5e6cf7b, 0x01420ddb, 0xe4e7ef5b, 0x25a1ff41, 0xe180f806,
0x1fc41080, 0x179bee7a, 0xd37ac6a9, 0xfe5830a4, 0x98de8b7f, 0x77e83f4e, 0x79929269, 0x24fa9f7b,
0xe113c85b, 0xacc40083, 0xd7503525, 0xf7ea615f, 0x62143154, 0x0d554b63, 0x5d681121, 0xc866c359,
0x3d63cf73, 0xcee234c0, 0xd4d87e87, 0x5c672b21, 0x071f6181, 0x39f7627f, 0x361e3084, 0xe4eb573b,
0x602f64a4, 0xd63acd9c, 0x1bbc4635, 0x9e81032d, 0x2701f50c, 0x99847ab4, 0xa0e3df79, 0xba6cf38c,
0x10843094, 0x2537a95e, 0xf46f6ffe, 0xa1ff3b1f, 0x208cfb6a, 0x8f458c74, 0xd9e0a227, 0x4ec73a34,
0xfc884f69, 0x3e4de8df, 0xef0e0088, 0x3559648d, 0x8a45388c, 0x1d804366, 0x721d9bfd, 0xa58684bb,
0xe8256333, 0x844e8212, 0x128d8098, 0xfed33fb4, 0xce280ae1, 0x27e19ba5, 0xd5a6c252, 0xe49754bd,
0xc5d655dd, 0xeb667064, 0x77840b4d, 0xa1b6a801, 0x84db26a9, 0xe0b56714, 0x21f043b7, 0xe5d05860,
0x54f03084, 0x066ff472, 0xa31aa153, 0xdadc4755, 0xb5625dbf, 0x68561be6, 0x83ca6b94, 0x2d6ed23b,
0xeccf01db, 0xa6d3d0ba, 0xb6803d5c, 0xaf77a709, 0x33b4a34c, 0x397bc8d6, 0x5ee22b95, 0x5f0e5304,
0x81ed6f61, 0x20e74364, 0xb45e1378, 0xde18639b, 0x881ca122, 0xb96726d1, 0x8049a7e8, 0x22b7da7b,
0x5e552d25, 0x5272d237, 0x79d2951c, 0xc60d894c, 0x488cb402, 0x1ba4fe5b, 0xa4b09f6b, 0x1ca815cf,
0xa20c3005, 0x8871df63, 0xb9de2fcb, 0x0cc6c9e9, 0x0beeff53, 0xe3214517, 0xb4542835, 0x9f63293c,
0xee41e729, 0x6e1d2d7c, 0x50045286, 0x1e6685f3, 0xf33401c6, 0x30a22c95, 0x31a70850, 0x60930f13,
0x73f98417, 0xa1269859, 0xec645c44, 0x52c877a9, 0xcdff33a6, 0xa02b1741, 0x7cbad9a2, 0x2180036f,
0x50d99c08, 0xcb3f4861, 0xc26bd765, 0x64a3f6ab, 0x80342676, 0x25a75e7b, 0xe4e6d1fc, 0x20c710e6,
0xcdf0b680, 0x17844d3b, 0x31eef84d, 0x7e0824e4, 0x2ccb49eb, 0x846a3bae, 0x8ff77888, 0xee5d60f6,
0x7af75673, 0x2fdd5cdb, 0xa11631c1, 0x30f66f43, 0xb3faec54, 0x157fd7fa, 0xef8579cc, 0xd152de58,
0xdb2ffd5e, 0x8f32ce19, 0x306af97a, 0x02f03ef8, 0x99319ad5, 0xc242fa0f, 0xa7e3ebb0, 0xc68e4906,
0xb8da230c, 0x80823028, 0xdcdef3c8, 0xd35fb171, 0x088a1bc8, 0xbec0c560, 0x61a3c9e8, 0xbca8f54d,
0xc72feffa, 0x22822e99, 0x82c570b4, 0xd8d94e89, 0x8b1c34bc, 0x301e16e6, 0x273be979, 0xb0ffeaa6,
0x61d9b8c6, 0x00b24869, 0xb7ffce3f, 0x08dc283b, 0x43daf65a, 0xf7e19798, 0x7619b72f, 0x8f1c9ba4,
0xdc8637a0, 0x16a7d3b1, 0x9fc393b7, 0xa7136eeb, 0xc6bcc63e, 0x1a513742, 0xef6828bc, 0x520365d6,
0x2d6a77ab, 0x3527ed4b, 0x821fd216, 0x095c6e2e, 0xdb92f2fb, 0x5eea29cb, 0x145892f5, 0x91584f7f,
0x5483697b, 0x2667a8cc, 0x85196048, 0x8c4bacea, 0x833860d4, 0x0d23e0f9, 0x6c387e8a, 0x0ae6d249,
0xb284600c, 0xd835731d, 0xdcb1c647, 0xac4c56ea, 0x3ebd81b3, 0x230eabb0, 0x6438bc87, 0xf0b5b1fa,
0x8f5ea2b3, 0xfc184642, 0x0a036b7a, 0x4fb089bd, 0x649da589, 0xa345415e, 0x5c038323, 0x3e5d3bb9,
0x43d79572, 0x7e6dd07c, 0x06dfdf1e, 0x6c6cc4ef, 0x7160a539, 0x73bfbe70, 0x83877605, 0x4523ecf1,
},
{
0x8defc240, 0x25fa5d9f, 0xeb903dbf, 0xe810c907, 0x47607fff, 0x369fe44b, 0x8c1fc644, 0xaececa90,
0xbeb1f9bf, 0xeefbcaea, 0xe8cf1950, 0x51df07ae, 0x920e8806, 0xf0ad0548, 0xe13c8d83, 0x927010d5,
0x11107d9f, 0x07647db9, 0xb2e3e4d4, 0x3d4f285e, 0xb9afa820, 0xfade82e0, 0xa067268b, 0x8272792e,
0x553fb2c0, 0x489ae22b, 0xd4ef9794, 0x125e3fbc, 0x21fffcee, 0x825b1bfd, 0x9255c5ed, 0x1257a240,
0x4e1a8302, 0xbae07fff, 0x528246e7, 0x8e57140e, 0x3373f7bf, 0x8c9f8188, 0xa6fc4ee8, 0xc982b5a5,
0xa8c01db7, 0x579fc264, 0x67094f31, 0xf2bd3f5f, 0x40fff7c1, 0x1fb78dfc, 0x8e6bd2c1, 0x437be59b,
0x99b03dbf, 0xb5dbc64b, 0x638dc0e6, 0x55819d99, 0xa197c81c, 0x4a012d6e, 0xc5884a28, 0xccc36f71,
0xb843c213, 0x6c0743f1, 0x8309893c, 0x0feddd5f, 0x2f7fe850, 0xd7c07f7e, 0x02507fbf, 0x5afb9a04,
0xa747d2d0, 0x1651192e, 0xaf70bf3e, 0x58c31380, 0x5f98302e, 0x727cc3c4, 0x0a0fb402, 0x0f7fef82,
0x8c96fdad, 0x5d2c2aae, 0x8ee99a49, 0x50da88b8, 0x8427f4a0, 0x1eac5790, 0x796fb449, 0x8252dc15,
0xefbd7d9b, 0xa672597d, 0xada840d8, 0x45f54504, 0xfa5d7403, 0xe83ec305, 0x4f91751a, 0x925669c2,
0x23efe941, 0xa903f12e, 0x60270df2, 0x0276e4b6, 0x94fd6574, 0x927985b2, 0x8276dbcb, 0x02778176,
0xf8af918d, 0x4e48f79e, 0x8f616ddf, 0xe29d840e, 0x842f7d83, 0x340ce5c8, 0x96bbb682, 0x93b4b148,
0xef303cab, 0x984faf28, 0x779faf9b, 0x92dc560d, 0x224d1e20, 0x8437aa88, 0x7d29dc96, 0x2756d3dc,
0x8b907cee, 0xb51fd240, 0xe7c07ce3, 0xe566b4a1, 0xc3e9615e, 0x3cf8209d, 0x6094d1e3, 0xcd9ca341,
0x5c76460e, 0x00ea983b, 0xd4d67881, 0xfd47572c, 0xf76cedd9, 0xbda8229c, 0x127dadaa, 0x438a074e,
0x1f97c090, 0x081bdb8a, 0x93a07ebe, 0xb938ca15, 0x97b03cff, 0x3dc2c0f8, 0x8d1ab2ec, 0x64380e51,
0x68cc7bfb, 0xd90f2788, 0x12490181, 0x5de5ffd4, 0xdd7ef86a, 0x76a2e214, 0xb9a40368, 0x925d958f,
0x4b39fffa, 0xba39aee9, 0xa4ffd30b, 0xfaf7933b, 0x6d498623, 0x193cbcfa, 0x27627545, 0x825cf47a,
0x61bd8ba0, 0xd11e42d1, 0xcead04f4, 0x127ea392, 0x10428db7, 0x8272a972, 0x9270c4a8, 0x127de50b,
0x285ba1c8, 0x3c62f44f, 0x35c0eaa5, 0xe805d231, 0x428929fb, 0xb4fcdf82, 0x4fb66a53, 0x0e7dc15b,
0x1f081fab, 0x108618ae, 0xfcfd086d, 0xf9ff2889, 0x694bcc11, 0x236a5cae, 0x12deca4d, 0x2c3f8cc5,
0xd2d02dfe, 0xf8ef5896, 0xe4cf52da, 0x95155b67, 0x494a488c, 0xb9b6a80c, 0x5c8f82bc, 0x89d36b45,
0x3a609437, 0xec00c9a9, 0x44715253, 0x0a874b49, 0xd773bc40, 0x7c34671c, 0x02717ef6, 0x4feb5536,
0xa2d02fff, 0xd2bf60c4, 0xd43f03c0, 0x50b4ef6d, 0x07478cd1, 0x006e1888, 0xa2e53f55, 0xb9e6d4bc,
0xa2048016, 0x97573833, 0xd7207d67, 0xde0f8f3d, 0x72f87b33, 0xabcc4f33, 0x7688c55d, 0x7b00a6b0,
0x947b0001, 0x570075d2, 0xf9bb88f8, 0x8942019e, 0x4264a5ff, 0x856302e0, 0x72dbd92b, 0xee971b69,
0x6ea22fde, 0x5f08ae2b, 0xaf7a616d, 0xe5c98767, 0xcf1febd2, 0x61efc8c2, 0xf1ac2571, 0xcc8239c2,
0x67214cb8, 0xb1e583d1, 0xb7dc3e62, 0x7f10bdce, 0xf90a5c38, 0x0ff0443d, 0x606e6dc6, 0x60543a49,
0x5727c148, 0x2be98a1d, 0x8ab41738, 0x20e1be24, 0xaf96da0f, 0x68458425, 0x99833be5, 0x600d457d,
0x282f9350, 0x8334b362, 0xd91d1120, 0x2b6d8da0, 0x642b1e31, 0x9c305a00, 0x52bce688, 0x1b03588a,
0xf7baefd5, 0x4142ed9c, 0xa4315c11, 0x83323ec5, 0xdfef4636, 0xa133c501, 0xe9d3531c, 0xee353783,
},
{
0x9db30420, 0x1fb6e9de, 0xa7be7bef, 0xd273a298, 0x4a4f7bdb, 0x64ad8c57, 0x85510443, 0xfa020ed1,
0x7e287aff, 0xe60fb663, 0x095f35a1, 0x79ebf120, 0xfd059d43, 0x6497b7b1, 0xf3641f63, 0x241e4adf,
0x28147f5f, 0x4fa2b8cd, 0xc9430040, 0x0cc32220, 0xfdd30b30, 0xc0a5374f, 0x1d2d00d9, 0x24147b15,
0xee4d111a, 0x0fca5167, 0x71ff904c, 0x2d195ffe, 0x1a05645f, 0x0c13fefe, 0x081b08ca, 0x05170121,
0x80530100, 0xe83e5efe, 0xac9af4f8, 0x7fe72701, 0xd2b8ee5f, 0x06df4261, 0xbb9e9b8a, 0x7293ea25,
0xce84ffdf, 0xf5718801, 0x3dd64b04, 0xa26f263b, 0x7ed48400, 0x547eebe6, 0x446d4ca0, 0x6cf3d6f5,
0x2649abdf, 0xaea0c7f5, 0x36338cc1, 0x503f7e93, 0xd3772061, 0x11b638e1, 0x72500e03, 0xf80eb2bb,
0xabe0502e, 0xec8d77de, 0x57971e81, 0xe14f6746, 0xc9335400, 0x6920318f, 0x081dbb99, 0xffc304a5,
0x4d351805, 0x7f3d5ce3, 0xa6c866c6, 0x5d5bcca9, 0xdaec6fea, 0x9f926f91, 0x9f46222f, 0x3991467d,
0xa5bf6d8e, 0x1143c44f, 0x43958302, 0xd0214eeb, 0x022083b8, 0x3fb6180c, 0x18f8931e, 0x281658e6,
0x26486e3e, 0x8bd78a70, 0x7477e4c1, 0xb506e07c, 0xf32d0a25, 0x79098b02, 0xe4eabb81, 0x28123b23,
0x69dead38, 0x1574ca16, 0xdf871b62, 0x211c40b7, 0xa51a9ef9, 0x0014377b, 0x041e8ac8, 0x09114003,
0xbd59e4d2, 0xe3d156d5, 0x4fe876d5, 0x2f91a340, 0x557be8de, 0x00eae4a7, 0x0ce5c2ec, 0x4db4bba6,
0xe756bdff, 0xdd3369ac, 0xec17b035, 0x06572327, 0x99afc8b0, 0x56c8c391, 0x6b65811c, 0x5e146119,
0x6e85cb75, 0xbe07c002, 0xc2325577, 0x893ff4ec, 0x5bbfc92d, 0xd0ec3b25, 0xb7801ab7, 0x8d6d3b24,
0x20c763ef, 0xc366a5fc, 0x9c382880, 0x0ace3205, 0xaac9548a, 0xeca1d7c7, 0x041afa32, 0x1d16625a,
0x6701902c, 0x9b757a54, 0x31d477f7, 0x9126b031, 0x36cc6fdb, 0xc70b8b46, 0xd9e66a48, 0x56e55a79,
0x026a4ceb, 0x52437eff, 0x2f8f76b4, 0x0df980a5, 0x8674cde3, 0xedda04eb, 0x17a9be04, 0x2c18f4df,
0xb7747f9d, 0xab2af7b4, 0xefc34d20, 0x2e096b7c, 0x1741a254, 0xe5b6a035, 0x213d42f6, 0x2c1c7c26,
0x61c2f50f, 0x6552daf9, 0xd2c231f8, 0x25130f69, 0xd8167fa2, 0x0418f2c8, 0x001a96a6, 0x0d1526ab,
0x63315c21, 0x5e0a72ec, 0x49bafefd, 0x187908d9, 0x8d0dbd86, 0x311170a7, 0x3e9b640c, 0xcc3e10d7,
0xd5cad3b6, 0x0caec388, 0xf73001e1, 0x6c728aff, 0x71eae2a1, 0x1f9af36e, 0xcfcbd12f, 0xc1de8417,
0xac07be6b, 0xcb44a1d8, 0x8b9b0f56, 0x013988c3, 0xb1c52fca, 0xb4be31cd, 0xd8782806, 0x12a3a4e2,
0x6f7de532, 0x58fd7eb6, 0xd01ee900, 0x24adffc2, 0xf4990fc5, 0x9711aac5, 0x001d7b95, 0x82e5e7d2,
0x109873f6, 0x00613096, 0xc32d9521, 0xada121ff, 0x29908415, 0x7fbb977f, 0xaf9eb3db, 0x29c9ed2a,
0x5ce2a465, 0xa730f32c, 0xd0aa3fe8, 0x8a5cc091, 0xd49e2ce7, 0x0ce454a9, 0xd60acd86, 0x015f1919,
0x77079103, 0xdea03af6, 0x78a8565e, 0xdee356df, 0x21f05cbe, 0x8b75e387, 0xb3c50651, 0xb8a5c3ef,
0xd8eeb6d2, 0xe523be77, 0xc2154529, 0x2f69efdf, 0xafe67afb, 0xf470c4b2, 0xf3e0eb5b, 0xd6cc9876,
0x39e4460c, 0x1fda8538, 0x1987832f, 0xca007367, 0xa99144f8, 0x296b299e, 0x492fc295, 0x9266beab,
0xb5676e69, 0x9bd3ddda, 0xdf7e052f, 0xdb25701c, 0x1b5e51ee, 0xf65324e6, 0x6afce36c, 0x0316cc04,
0x8644213e, 0xb7dc59d0, 0x7965291f, 0xccd6fd43, 0x41823979, 0x932bcdf6, 0xb657c34d, 0x4edfd282,
0x7ae5290c, 0x3cb9536b, 0x851e20fe, 0x9833557e, 0x13ecf0b0, 0xd3ffb372, 0x3f85c5c1, 0x0aef7ed2,
},
{
0x7ec90c04, 0x2c6e74b9, 0x9b0e66df, 0xa6337911, 0xb86a7fff, 0x1dd358f5, 0x44dd9d44, 0x1731167f,
0x08fbf1fa, 0xe7f511cc, 0xd2051b00, 0x735aba00, 0x2ab722d8, 0x386381cb, 0xacf6243a, 0x69befd7a,
0xe6a2e77f, 0xf0c720cd, 0xc4494816, 0xccf5c180, 0x38851640, 0x15b0a848, 0xe68b18cb, 0x4caadeff,
0x5f480a01, 0x0412b2aa, 0x259814fc, 0x41d0efe2, 0x4e40b48d, 0x248eb6fb, 0x8dba1cfe, 0x41a99b02,
0x1a550a04, 0xba8f65cb, 0x7251f4e7, 0x95a51725, 0xc106ecd7, 0x97a5980a, 0xc539b9aa, 0x4d79fe6a,
0xf2f3f763, 0x68af8040, 0xed0c9e56, 0x11b4958b, 0xe1eb5a88, 0x8709e6b0, 0xd7e07156, 0x4e29fea7,
0x6366e52d, 0x02d1c000, 0xc4ac8e05, 0x9377f571, 0x0c05372a, 0x578535f2, 0x2261be02, 0xd642a0c9,
0xdf13a280, 0x74b55bd2, 0x682199c0, 0xd421e5ec, 0x53fb3ce8, 0xc8adedb3, 0x28a87fc9, 0x3d959981,
0x5c1ff900, 0xfe38d399, 0x0c4eff0b, 0x062407ea, 0xaa2f4fb1, 0x4fb96976, 0x90c79505, 0xb0a8a774,
0xef55a1ff, 0xe59ca2c2, 0xa6b62d27, 0xe66a4263, 0xdf65001f, 0x0ec50966, 0xdfdd55bc, 0x29de0655,
0x911e739a, 0x17af8975, 0x32c7911c, 0x89f89468, 0x0d01e980, 0x524755f4, 0x03b63cc9, 0x0cc844b2,
0xbcf3f0aa, 0x87ac36e9, 0xe53a7426, 0x01b3d82b, 0x1a9e7449, 0x64ee2d7e, 0xcddbb1da, 0x01c94910,
0xb868bf80, 0x0d26f3fd, 0x9342ede7, 0x04a5c284, 0x636737b6, 0x50f5b616, 0xf24766e3, 0x8eca36c1,
0x136e05db, 0xfef18391, 0xfb887a37, 0xd6e7f7d4, 0xc7fb7dc9, 0x3063fcdf, 0xb6f589de, 0xec2941da,
0x26e46695, 0xb7566419, 0xf654efc5, 0xd08d58b7, 0x48925401, 0xc1bacb7f, 0xe5ff550f, 0xb6083049,
0x5bb5d0e8, 0x87d72e5a, 0xab6a6ee1, 0x223a66ce, 0xc62bf3cd, 0x9e0885f9, 0x68cb3e47, 0x086c010f,
0xa21de820, 0xd18b69de, 0xf3f65777, 0xfa02c3f6, 0x407edac3, 0xcbb3d550, 0x1793084d, 0xb0d70eba,
0x0ab378d5, 0xd951fb0c, 0xded7da56, 0x4124bbe4, 0x94ca0b56, 0x0f5755d1, 0xe0e1e56e, 0x6184b5be,
0x580a249f, 0x94f74bc0, 0xe327888e, 0x9f7b5561, 0xc3dc0280, 0x05687715, 0x646c6bd7, 0x44904db3,
0x66b4f0a3, 0xc0f1648a, 0x697ed5af, 0x49e92ff6, 0x309e374f, 0x2cb6356a, 0x85808573, 0x4991f840,
0x76f0ae02, 0x083be84d, 0x28421c9a, 0x44489406, 0x736e4cb8, 0xc1092910, 0x8bc95fc6, 0x7d869cf4,
0x134f616f, 0x2e77118d, 0xb31b2be1, 0xaa90b472, 0x3ca5d717, 0x7d161bba, 0x9cad9010, 0xaf462ba2,
0x9fe459d2, 0x45d34559, 0xd9f2da13, 0xdbc65487, 0xf3e4f94e, 0x176d486f, 0x097c13ea, 0x631da5c7,
0x445f7382, 0x175683f4, 0xcdc66a97, 0x70be0288, 0xb3cdcf72, 0x6e5dd2f3, 0x20936079, 0x459b80a5,
0xbe60e2db, 0xa9c23101, 0xeba5315c, 0x224e42f2, 0x1c5c1572, 0xf6721b2c, 0x1ad2fff3, 0x8c25404e,
0x324ed72f, 0x4067b7fd, 0x0523138e, 0x5ca3bc78, 0xdc0fd66e, 0x75922283, 0x784d6b17, 0x58ebb16e,
0x44094f85, 0x3f481d87, 0xfcfeae7b, 0x77b5ff76, 0x8c2302bf, 0xaaf47556, 0x5f46b02a, 0x2b092801,
0x3d38f5f7, 0x0ca81f36, 0x52af4a8a, 0x66d5e7c0, 0xdf3b0874, 0x95055110, 0x1b5ad7a8, 0xf61ed5ad,
0x6cf6e479, 0x20758184, 0xd0cefa65, 0x88f7be58, 0x4a046826, 0x0ff6f8f3, 0xa09c7f70, 0x5346aba0,
0x5ce96c28, 0xe176eda3, 0x6bac307f, 0x376829d2, 0x85360fa9, 0x17e3fe2a, 0x24b79767, 0xf5a96b20,
0xd6cd2595, 0x68ff1ebf, 0x7555442c, 0xf19f06be, 0xf9e0659a, 0xeeb9491d, 0x34010718, 0xbb30cab8,
0xe822fe15, 0x88570983, 0x750e6249, 0xda627e55, 0x5e76ffa8, 0xb1534546, 0x6d47de08, 0xefe9e7d4,
},
{
0xf6fa8f9d, 0x2cac6ce1, 0x4ca34867, 0xe2337f7c, 0x95db08e7, 0x016843b4, 0xeced5cbc, 0x325553ac,
0xbf9f0960, 0xdfa1e2ed, 0x83f0579d, 0x63ed86b9, 0x1ab6a6b8, 0xde5ebe39, 0xf38ff732, 0x8989b138,
0x33f14961, 0xc01937bd, 0xf506c6da, 0xe4625e7e, 0xa308ea99, 0x4e23e33c, 0x79cbd7cc, 0x48a14367,
0xa3149619, 0xfec94bd5, 0xa114174a, 0xeaa01866, 0xa084db2d, 0x09a8486f, 0xa888614a, 0x2900af98,
0x01665991, 0xe1992863, 0xc8f30c60, 0x2e78ef3c, 0xd0d51932, 0xcf0fec14, 0xf7ca07d2, 0xd0a82072,
0xfd41197e, 0x9305a6b0, 0xe86be3da, 0x74bed3cd, 0x372da53c, 0x4c7f4448, 0xdab5d440, 0x6dba0ec3,
0x083919a7, 0x9fbaeed9, 0x49dbcfb0, 0x4e670c53, 0x5c3d9c01, 0x64bdb941, 0x2c0e636a, 0xba7dd9cd,
0xea6f7388, 0xe70bc762, 0x35f29adb, 0x5c4cdd8d, 0xf0d48d8c, 0xb88153e2, 0x08a19866, 0x1ae2eac8,
0x284caf89, 0xaa928223, 0x9334be53, 0x3b3a21bf, 0x16434be3, 0x9aea3906, 0xefe8c36e, 0xf890cdd9,
0x80226dae, 0xc340a4a3, 0xdf7e9c09, 0xa694a807, 0x5b7c5ecc, 0x221db3a6, 0x9a69a02f, 0x68818a54,
0xceb2296f, 0x53c0843a, 0xfe893655, 0x25bfe68a, 0xb4628abc, 0xcf222ebf, 0x25ac6f48, 0xa9a99387,
0x53bddb65, 0xe76ffbe7, 0xe967fd78, 0x0ba93563, 0x8e342bc1, 0xe8a11be9, 0x4980740d, 0xc8087dfc,
0x8de4bf99, 0xa11101a0, 0x7fd37975, 0xda5a26c0, 0xe81f994f, 0x9528cd89, 0xfd339fed, 0xb87834bf,
0x5f04456d, 0x22258698, 0xc9c4c83b, 0x2dc156be, 0x4f628daa, 0x57f55ec5, 0xe2220abe, 0xd2916ebf,
0x4ec75b95, 0x24f2c3c0, 0x42d15d99, 0xcd0d7fa0, 0x7b6e27ff, 0xa8dc8af0, 0x7345c106, 0xf41e232f,
0x35162386, 0xe6ea8926, 0x3333b094, 0x157ec6f2, 0x372b74af, 0x692573e4, 0xe9a9d848, 0xf3160289,
0x3a62ef1d, 0xa787e238, 0xf3a5f676, 0x74364853, 0x20951063, 0x4576698d, 0xb6fad407, 0x592af950,
0x36f73523, 0x4cfb6e87, 0x7da4cec0, 0x6c152daa, 0xcb0396a8, 0xc50dfe5d, 0xfcd707ab, 0x0921c42f,
0x89dff0bb, 0x5fe2be78, 0x448f4f33, 0x754613c9, 0x2b05d08d, 0x48b9d585, 0xdc049441, 0xc8098f9b,
0x7dede786, 0xc39a3373, 0x42410005, 0x6a091751, 0x0ef3c8a6, 0x890072d6, 0x28207682, 0xa9a9f7be,
0xbf32679d, 0xd45b5b75, 0xb353fd00, 0xcbb0e358, 0x830f220a, 0x1f8fb214, 0xd372cf08, 0xcc3c4a13,
0x8cf63166, 0x061c87be, 0x88c98f88, 0x6062e397, 0x47cf8e7a, 0xb6c85283, 0x3cc2acfb, 0x3fc06976,
0x4e8f0252, 0x64d8314d, 0xda3870e3, 0x1e665459, 0xc10908f0, 0x513021a5, 0x6c5b68b7, 0x822f8aa0,
0x3007cd3e, 0x74719eef, 0xdc872681, 0x073340d4, 0x7e432fd9, 0x0c5ec241, 0x8809286c, 0xf592d891,
0x08a930f6, 0x957ef305, 0xb7fbffbd, 0xc266e96f, 0x6fe4ac98, 0xb173ecc0, 0xbc60b42a, 0x953498da,
0xfba1ae12, 0x2d4bd736, 0x0f25faab, 0xa4f3fceb, 0xe2969123, 0x257f0c3d, 0x9348af49, 0x361400bc,
0xe8816f4a, 0x3814f200, 0xa3f94043, 0x9c7a54c2, 0xbc704f57, 0xda41e7f9, 0xc25ad33a, 0x54f4a084,
0xb17f5505, 0x59357cbe, 0xedbd15c8, 0x7f97c5ab, 0xba5ac7b5, 0xb6f6deaf, 0x3a479c3a, 0x5302da25,
0x653d7e6a, 0x54268d49, 0x51a477ea, 0x5017d55b, 0xd7d25d88, 0x44136c76, 0x0404a8c8, 0xb8e5a121,
0xb81a928a, 0x60ed5869, 0x97c55b96, 0xeaec991b, 0x29935913, 0x01fdb7f1, 0x088e8dfa, 0x9ab6f6f5,
0x3b4cbf9f, 0x4a5de3ab, 0xe6051d35, 0xa0e1d855, 0xd36b4cf1, 0xf544edeb, 0xb0e93524, 0xbebb8fbd,
0xa2d762cf, 0x49c92f54, 0x38b5f331, 0x7128a454, 0x48392905, 0xa65b1db8, 0x851c97bd, 0xd675cf2f,
},
{
0x85e04019, 0x332bf567, 0x662dbfff, 0xcfc65693, 0x2a8d7f6f, 0xab9bc912, 0xde6008a1, 0x2028da1f,
0x0227bce7, 0x4d642916, 0x18fac300, 0x50f18b82, 0x2cb2cb11, 0xb232e75c, 0x4b3695f2, 0xb28707de,
0xa05fbcf6, 0xcd4181e9, 0xe150210c, 0xe24ef1bd, 0xb168c381, 0xfde4e789, 0x5c79b0d8, 0x1e8bfd43,
0x4d495001, 0x38be4341, 0x913cee1d, 0x92a79c3f, 0x089766be, 0xbaeeadf4, 0x1286becf, 0xb6eacb19,
0x2660c200, 0x7565bde4, 0x64241f7a, 0x8248dca9, 0xc3b3ad66, 0x28136086, 0x0bd8dfa8, 0x356d1cf2,
0x107789be, 0xb3b2e9ce, 0x0502aa8f, 0x0bc0351e, 0x166bf52a, 0xeb12ff82, 0xe3486911, 0xd34d7516,
0x4e7b3aff, 0x5f43671b, 0x9cf6e037, 0x4981ac83, 0x334266ce, 0x8c9341b7, 0xd0d854c0, 0xcb3a6c88,
0x47bc2829, 0x4725ba37, 0xa66ad22b, 0x7ad61f1e, 0x0c5cbafa, 0x4437f107, 0xb6e79962, 0x42d2d816,
0x0a961288, 0xe1a5c06e, 0x13749e67, 0x72fc081a, 0xb1d139f7, 0xf9583745, 0xcf19df58, 0xbec3f756,
0xc06eba30, 0x07211b24, 0x45c28829, 0xc95e317f, 0xbc8ec511, 0x38bc46e9, 0xc6e6fa14, 0xbae8584a,
0xad4ebc46, 0x468f508b, 0x7829435f, 0xf124183b, 0x821dba9f, 0xaff60ff4, 0xea2c4e6d, 0x16e39264,
0x92544a8b, 0x009b4fc3, 0xaba68ced, 0x9ac96f78, 0x06a5b79a, 0xb2856e6e, 0x1aec3ca9, 0xbe838688,
0x0e0804e9, 0x55f1be56, 0xe7e5363b, 0xb3a1f25d, 0xf7debb85, 0x61fe033c, 0x16746233, 0x3c034c28,
0xda6d0c74, 0x79aac56c, 0x3ce4e1ad, 0x51f0c802, 0x98f8f35a, 0x1626a49f, 0xeed82b29, 0x1d382fe3,
0x0c4fb99a, 0xbb325778, 0x3ec6d97b, 0x6e77a6a9, 0xcb658b5c, 0xd45230c7, 0x2bd1408b, 0x60c03eb7,
0xb9068d78, 0xa33754f4, 0xf430c87d, 0xc8a71302, 0xb96d8c32, 0xebd4e7be, 0xbe8b9d2d, 0x7979fb06,
0xe7225308, 0x8b75cf77, 0x11ef8da4, 0xe083c858, 0x8d6b786f, 0x5a6317a6, 0xfa5cf7a0, 0x5dda0033,
0xf28ebfb0, 0xf5b9c310, 0xa0eac280, 0x08b9767a, 0xa3d9d2b0, 0x79d34217, 0x021a718d, 0x9ac6336a,
0x2711fd60, 0x438050e3, 0x069908a8, 0x3d7fedc4, 0x826d2bef, 0x4eeb8476, 0x488dcf25, 0x36c9d566,
0x28e74e41, 0xc2610aca, 0x3d49a9cf, 0xbae3b9df, 0xb65f8de6, 0x92aeaf64, 0x3ac7d5e6, 0x9ea80509,
0xf22b017d, 0xa4173f70, 0xdd1e16c3, 0x15e0d7f9, 0x50b1b887, 0x2b9f4fd5, 0x625aba82, 0x6a017962,
0x2ec01b9c, 0x15488aa9, 0xd716e740, 0x40055a2c, 0x93d29a22, 0xe32dbf9a, 0x058745b9, 0x3453dc1e,
0xd699296e, 0x496cff6f, 0x1c9f4986, 0xdfe2ed07, 0xb87242d1, 0x19de7eae, 0x053e561a, 0x15ad6f8c,
0x66626c1c, 0x7154c24c, 0xea082b2a, 0x93eb2939, 0x17dcb0f0, 0x58d4f2ae, 0x9ea294fb, 0x52cf564c,
0x9883fe66, 0x2ec40581, 0x763953c3, 0x01d6692e, 0xd3a0c108, 0xa1e7160e, 0xe4f2dfa6, 0x693ed285,
0x74904698, 0x4c2b0edd, 0x4f757656, 0x5d393378, 0xa132234f, 0x3d321c5d, 0xc3f5e194, 0x4b269301,
0xc79f022f, 0x3c997e7e, 0x5e4f9504, 0x3ffafbbd, 0x76f7ad0e, 0x296693f4, 0x3d1fce6f, 0xc61e45be,
0xd3b5ab34, 0xf72bf9b7, 0x1b0434c0, 0x4e72b567, 0x5592a33d, 0xb5229301, 0xcfd2a87f, 0x60aeb767,
0x1814386b, 0x30bcc33d, 0x38a0c07d, 0xfd1606f2, 0xc363519b, 0x589dd390, 0x5479f8e6, 0x1cb8d647,
0x97fd61a9, 0xea7759f4, 0x2d57539d, 0x569a58cf, 0xe84e63ad, 0x462e1b78, 0x6580f87e, 0xf3817914,
0x91da55f4, 0x40a230f3, 0xd1988f35, 0xb6e318d2, 0x3ffa50bc, 0x3d40f021, 0xc3c0bdae, 0x4958c24c,
0x518f36b2, 0x84b1d370, 0x0fedce83, 0x878ddada, 0xf2a279c7, 0x94e01be8, 0x90716f4b, 0x954b8aa3,
},
{
0xe216300d, 0xbbddfffc, 0xa7ebdabd, 0x35648095, 0x7789f8b7, 0xe6c1121b, 0x0e241600, 0x052ce8b5,
0x11a9cfb0, 0xe5952f11, 0xece7990a, 0x9386d174, 0x2a42931c, 0x76e38111, 0xb12def3a, 0x37ddddfc,
0xde9adeb1, 0x0a0cc32c, 0xbe197029, 0x84a00940, 0xbb243a0f, 0xb4d137cf, 0xb44e79f0, 0x049eedfd,
0x0b15a15d, 0x480d3168, 0x8bbbde5a, 0x669ded42, 0xc7ece831, 0x3f8f95e7, 0x72df191b, 0x7580330d,
0x94074251, 0x5c7dcdfa, 0xabbe6d63, 0xaa402164, 0xb301d40a, 0x02e7d1ca, 0x53571dae, 0x7a3182a2,
0x12a8ddec, 0xfdaa335d, 0x176f43e8, 0x71fb46d4, 0x38129022, 0xce949ad4, 0xb84769ad, 0x965bd862,
0x82f3d055, 0x66fb9767, 0x15b80b4e, 0x1d5b47a0, 0x4cfde06f, 0xc28ec4b8, 0x57e8726e, 0x647a78fc,
0x99865d44, 0x608bd593, 0x6c200e03, 0x39dc5ff6, 0x5d0b00a3, 0xae63aff2, 0x7e8bd632, 0x70108c0c,
0xbbd35049, 0x2998df04, 0x980cf42a, 0x9b6df491, 0x9e7edd53, 0x06918548, 0x58cb7e07, 0x3b74ef2e,
0x522fffb1, 0xd24708cc, 0x1c7e27cd, 0xa4eb215b, 0x3cf1d2e2, 0x19b47a38, 0x424f7618, 0x35856039,
0x9d17dee7, 0x27eb35e6, 0xc9aff67b, 0x36baf5b8, 0x09c467cd, 0xc18910b1, 0xe11dbf7b, 0x06cd1af8,
0x7170c608, 0x2d5e3354, 0xd4de495a, 0x64c6d006, 0xbcc0c62c, 0x3dd00db3, 0x708f8f34, 0x77d51b42,
0x264f620f, 0x24b8d2bf, 0x15c1b79e, 0x46a52564, 0xf8d7e54e, 0x3e378160, 0x7895cda5, 0x859c15a5,
0xe6459788, 0xc37bc75f, 0xdb07ba0c, 0x0676a3ab, 0x7f229b1e, 0x31842e7b, 0x24259fd7, 0xf8bef472,
0x835ffcb8, 0x6df4c1f2, 0x96f5b195, 0xfd0af0fc, 0xb0fe134c, 0xe2506d3d, 0x4f9b12ea, 0xf215f225,
0xa223736f, 0x9fb4c428, 0x25d04979, 0x34c713f8, 0xc4618187, 0xea7a6e98, 0x7cd16efc, 0x1436876c,
0xf1544107, 0xbedeee14, 0x56e9af27, 0xa04aa441, 0x3cf7c899, 0x92ecbae6, 0xdd67016d, 0x151682eb,
0xa842eedf, 0xfdba60b4, 0xf1907b75, 0x20e3030f, 0x24d8c29e, 0xe139673b, 0xefa63fb8, 0x71873054,
0xb6f2cf3b, 0x9f326442, 0xcb15a4cc, 0xb01a4504, 0xf1e47d8d, 0x844a1be5, 0xbae7dfdc, 0x42cbda70,
0xcd7dae0a, 0x57e85b7a, 0xd53f5af6, 0x20cf4d8c, 0xcea4d428, 0x79d130a4, 0x3486ebfb, 0x33d3cddc,
0x77853b53, 0x37effcb5, 0xc5068778, 0xe580b3e6, 0x4e68b8f4, 0xc5c8b37e, 0x0d809ea2, 0x398feb7c,
0x132a4f94, 0x43b7950e, 0x2fee7d1c, 0x223613bd, 0xdd06caa2, 0x37df932b, 0xc4248289, 0xacf3ebc3,
0x5715f6b7, 0xef3478dd, 0xf267616f, 0xc148cbe4, 0x9052815e, 0x5e410fab, 0xb48a2465, 0x2eda7fa4,
0xe87b40e4, 0xe98ea084, 0x5889e9e1, 0xefd390fc, 0xdd07d35b, 0xdb485694, 0x38d7e5b2, 0x57720101,
0x730edebc, 0x5b643113, 0x94917e4f, 0x503c2fba, 0x646f1282, 0x7523d24a, 0xe0779695, 0xf9c17a8f,
0x7a5b2121, 0xd187b896, 0x29263a4d, 0xba510cdf, 0x81f47c9f, 0xad1163ed, 0xea7b5965, 0x1a00726e,
0x11403092, 0x00da6d77, 0x4a0cdd61, 0xad1f4603, 0x605bdfb0, 0x9eedc364, 0x22ebe6a8, 0xcee7d28a,
0xa0e736a0, 0x5564a6b9, 0x10853209, 0xc7eb8f37, 0x2de705ca, 0x8951570f, 0xdf09822b, 0xbd691a6c,
0xaa12e4f2, 0x87451c0f, 0xe0f6a27a, 0x3ada4819, 0x4cf1764f, 0x0d771c2b, 0x67cdb156, 0x350d8384,
0x5938fa0f, 0x42399ef3, 0x36997b07, 0x0e84093d, 0x4aa93e61, 0x8360d87b, 0x1fa98b0c, 0x1149382c,
0xe97625a5, 0x0614d1b7, 0x0e25244b, 0x0c768347, 0x589e8d82, 0x0d2059d1, 0xa466bb1e, 0xf8da0a82,
0x04f19130, 0xba6e4ec0, 0x99265164, 0x1ee7230d, 0x50b2ad80, 0xeaee6801, 0x8db2a283, 0xea8bf59e,
},
}

144
vendor/golang.org/x/crypto/salsa20/salsa/hsalsa20.go generated vendored Normal file
View File

@ -0,0 +1,144 @@
// Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package salsa provides low-level access to functions in the Salsa family.
package salsa
// Sigma is the Salsa20 constant for 256-bit keys.
var Sigma = [16]byte{'e', 'x', 'p', 'a', 'n', 'd', ' ', '3', '2', '-', 'b', 'y', 't', 'e', ' ', 'k'}
// HSalsa20 applies the HSalsa20 core function to a 16-byte input in, 32-byte
// key k, and 16-byte constant c, and puts the result into the 32-byte array
// out.
func HSalsa20(out *[32]byte, in *[16]byte, k *[32]byte, c *[16]byte) {
x0 := uint32(c[0]) | uint32(c[1])<<8 | uint32(c[2])<<16 | uint32(c[3])<<24
x1 := uint32(k[0]) | uint32(k[1])<<8 | uint32(k[2])<<16 | uint32(k[3])<<24
x2 := uint32(k[4]) | uint32(k[5])<<8 | uint32(k[6])<<16 | uint32(k[7])<<24
x3 := uint32(k[8]) | uint32(k[9])<<8 | uint32(k[10])<<16 | uint32(k[11])<<24
x4 := uint32(k[12]) | uint32(k[13])<<8 | uint32(k[14])<<16 | uint32(k[15])<<24
x5 := uint32(c[4]) | uint32(c[5])<<8 | uint32(c[6])<<16 | uint32(c[7])<<24
x6 := uint32(in[0]) | uint32(in[1])<<8 | uint32(in[2])<<16 | uint32(in[3])<<24
x7 := uint32(in[4]) | uint32(in[5])<<8 | uint32(in[6])<<16 | uint32(in[7])<<24
x8 := uint32(in[8]) | uint32(in[9])<<8 | uint32(in[10])<<16 | uint32(in[11])<<24
x9 := uint32(in[12]) | uint32(in[13])<<8 | uint32(in[14])<<16 | uint32(in[15])<<24
x10 := uint32(c[8]) | uint32(c[9])<<8 | uint32(c[10])<<16 | uint32(c[11])<<24
x11 := uint32(k[16]) | uint32(k[17])<<8 | uint32(k[18])<<16 | uint32(k[19])<<24
x12 := uint32(k[20]) | uint32(k[21])<<8 | uint32(k[22])<<16 | uint32(k[23])<<24
x13 := uint32(k[24]) | uint32(k[25])<<8 | uint32(k[26])<<16 | uint32(k[27])<<24
x14 := uint32(k[28]) | uint32(k[29])<<8 | uint32(k[30])<<16 | uint32(k[31])<<24
x15 := uint32(c[12]) | uint32(c[13])<<8 | uint32(c[14])<<16 | uint32(c[15])<<24
for i := 0; i < 20; i += 2 {
u := x0 + x12
x4 ^= u<<7 | u>>(32-7)
u = x4 + x0
x8 ^= u<<9 | u>>(32-9)
u = x8 + x4
x12 ^= u<<13 | u>>(32-13)
u = x12 + x8
x0 ^= u<<18 | u>>(32-18)
u = x5 + x1
x9 ^= u<<7 | u>>(32-7)
u = x9 + x5
x13 ^= u<<9 | u>>(32-9)
u = x13 + x9
x1 ^= u<<13 | u>>(32-13)
u = x1 + x13
x5 ^= u<<18 | u>>(32-18)
u = x10 + x6
x14 ^= u<<7 | u>>(32-7)
u = x14 + x10
x2 ^= u<<9 | u>>(32-9)
u = x2 + x14
x6 ^= u<<13 | u>>(32-13)
u = x6 + x2
x10 ^= u<<18 | u>>(32-18)
u = x15 + x11
x3 ^= u<<7 | u>>(32-7)
u = x3 + x15
x7 ^= u<<9 | u>>(32-9)
u = x7 + x3
x11 ^= u<<13 | u>>(32-13)
u = x11 + x7
x15 ^= u<<18 | u>>(32-18)
u = x0 + x3
x1 ^= u<<7 | u>>(32-7)
u = x1 + x0
x2 ^= u<<9 | u>>(32-9)
u = x2 + x1
x3 ^= u<<13 | u>>(32-13)
u = x3 + x2
x0 ^= u<<18 | u>>(32-18)
u = x5 + x4
x6 ^= u<<7 | u>>(32-7)
u = x6 + x5
x7 ^= u<<9 | u>>(32-9)
u = x7 + x6
x4 ^= u<<13 | u>>(32-13)
u = x4 + x7
x5 ^= u<<18 | u>>(32-18)
u = x10 + x9
x11 ^= u<<7 | u>>(32-7)
u = x11 + x10
x8 ^= u<<9 | u>>(32-9)
u = x8 + x11
x9 ^= u<<13 | u>>(32-13)
u = x9 + x8
x10 ^= u<<18 | u>>(32-18)
u = x15 + x14
x12 ^= u<<7 | u>>(32-7)
u = x12 + x15
x13 ^= u<<9 | u>>(32-9)
u = x13 + x12
x14 ^= u<<13 | u>>(32-13)
u = x14 + x13
x15 ^= u<<18 | u>>(32-18)
}
out[0] = byte(x0)
out[1] = byte(x0 >> 8)
out[2] = byte(x0 >> 16)
out[3] = byte(x0 >> 24)
out[4] = byte(x5)
out[5] = byte(x5 >> 8)
out[6] = byte(x5 >> 16)
out[7] = byte(x5 >> 24)
out[8] = byte(x10)
out[9] = byte(x10 >> 8)
out[10] = byte(x10 >> 16)
out[11] = byte(x10 >> 24)
out[12] = byte(x15)
out[13] = byte(x15 >> 8)
out[14] = byte(x15 >> 16)
out[15] = byte(x15 >> 24)
out[16] = byte(x6)
out[17] = byte(x6 >> 8)
out[18] = byte(x6 >> 16)
out[19] = byte(x6 >> 24)
out[20] = byte(x7)
out[21] = byte(x7 >> 8)
out[22] = byte(x7 >> 16)
out[23] = byte(x7 >> 24)
out[24] = byte(x8)
out[25] = byte(x8 >> 8)
out[26] = byte(x8 >> 16)
out[27] = byte(x8 >> 24)
out[28] = byte(x9)
out[29] = byte(x9 >> 8)
out[30] = byte(x9 >> 16)
out[31] = byte(x9 >> 24)
}

View File

@ -0,0 +1,889 @@
// Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build amd64,!appengine,!gccgo
// This code was translated into a form compatible with 6a from the public
// domain sources in SUPERCOP: http://bench.cr.yp.to/supercop.html
// func salsa2020XORKeyStream(out, in *byte, n uint64, nonce, key *byte)
// This needs up to 64 bytes at 360(SP); hence the non-obvious frame size.
TEXT ·salsa2020XORKeyStream(SB),0,$456-40 // frame = 424 + 32 byte alignment
MOVQ out+0(FP),DI
MOVQ in+8(FP),SI
MOVQ n+16(FP),DX
MOVQ nonce+24(FP),CX
MOVQ key+32(FP),R8
MOVQ SP,R12
MOVQ SP,R9
ADDQ $31, R9
ANDQ $~31, R9
MOVQ R9, SP
MOVQ DX,R9
MOVQ CX,DX
MOVQ R8,R10
CMPQ R9,$0
JBE DONE
START:
MOVL 20(R10),CX
MOVL 0(R10),R8
MOVL 0(DX),AX
MOVL 16(R10),R11
MOVL CX,0(SP)
MOVL R8, 4 (SP)
MOVL AX, 8 (SP)
MOVL R11, 12 (SP)
MOVL 8(DX),CX
MOVL 24(R10),R8
MOVL 4(R10),AX
MOVL 4(DX),R11
MOVL CX,16(SP)
MOVL R8, 20 (SP)
MOVL AX, 24 (SP)
MOVL R11, 28 (SP)
MOVL 12(DX),CX
MOVL 12(R10),DX
MOVL 28(R10),R8
MOVL 8(R10),AX
MOVL DX,32(SP)
MOVL CX, 36 (SP)
MOVL R8, 40 (SP)
MOVL AX, 44 (SP)
MOVQ $1634760805,DX
MOVQ $857760878,CX
MOVQ $2036477234,R8
MOVQ $1797285236,AX
MOVL DX,48(SP)
MOVL CX, 52 (SP)
MOVL R8, 56 (SP)
MOVL AX, 60 (SP)
CMPQ R9,$256
JB BYTESBETWEEN1AND255
MOVOA 48(SP),X0
PSHUFL $0X55,X0,X1
PSHUFL $0XAA,X0,X2
PSHUFL $0XFF,X0,X3
PSHUFL $0X00,X0,X0
MOVOA X1,64(SP)
MOVOA X2,80(SP)
MOVOA X3,96(SP)
MOVOA X0,112(SP)
MOVOA 0(SP),X0
PSHUFL $0XAA,X0,X1
PSHUFL $0XFF,X0,X2
PSHUFL $0X00,X0,X3
PSHUFL $0X55,X0,X0
MOVOA X1,128(SP)
MOVOA X2,144(SP)
MOVOA X3,160(SP)
MOVOA X0,176(SP)
MOVOA 16(SP),X0
PSHUFL $0XFF,X0,X1
PSHUFL $0X55,X0,X2
PSHUFL $0XAA,X0,X0
MOVOA X1,192(SP)
MOVOA X2,208(SP)
MOVOA X0,224(SP)
MOVOA 32(SP),X0
PSHUFL $0X00,X0,X1
PSHUFL $0XAA,X0,X2
PSHUFL $0XFF,X0,X0
MOVOA X1,240(SP)
MOVOA X2,256(SP)
MOVOA X0,272(SP)
BYTESATLEAST256:
MOVL 16(SP),DX
MOVL 36 (SP),CX
MOVL DX,288(SP)
MOVL CX,304(SP)
ADDQ $1,DX
SHLQ $32,CX
ADDQ CX,DX
MOVQ DX,CX
SHRQ $32,CX
MOVL DX, 292 (SP)
MOVL CX, 308 (SP)
ADDQ $1,DX
SHLQ $32,CX
ADDQ CX,DX
MOVQ DX,CX
SHRQ $32,CX
MOVL DX, 296 (SP)
MOVL CX, 312 (SP)
ADDQ $1,DX
SHLQ $32,CX
ADDQ CX,DX
MOVQ DX,CX
SHRQ $32,CX
MOVL DX, 300 (SP)
MOVL CX, 316 (SP)
ADDQ $1,DX
SHLQ $32,CX
ADDQ CX,DX
MOVQ DX,CX
SHRQ $32,CX
MOVL DX,16(SP)
MOVL CX, 36 (SP)
MOVQ R9,352(SP)
MOVQ $20,DX
MOVOA 64(SP),X0
MOVOA 80(SP),X1
MOVOA 96(SP),X2
MOVOA 256(SP),X3
MOVOA 272(SP),X4
MOVOA 128(SP),X5
MOVOA 144(SP),X6
MOVOA 176(SP),X7
MOVOA 192(SP),X8
MOVOA 208(SP),X9
MOVOA 224(SP),X10
MOVOA 304(SP),X11
MOVOA 112(SP),X12
MOVOA 160(SP),X13
MOVOA 240(SP),X14
MOVOA 288(SP),X15
MAINLOOP1:
MOVOA X1,320(SP)
MOVOA X2,336(SP)
MOVOA X13,X1
PADDL X12,X1
MOVOA X1,X2
PSLLL $7,X1
PXOR X1,X14
PSRLL $25,X2
PXOR X2,X14
MOVOA X7,X1
PADDL X0,X1
MOVOA X1,X2
PSLLL $7,X1
PXOR X1,X11
PSRLL $25,X2
PXOR X2,X11
MOVOA X12,X1
PADDL X14,X1
MOVOA X1,X2
PSLLL $9,X1
PXOR X1,X15
PSRLL $23,X2
PXOR X2,X15
MOVOA X0,X1
PADDL X11,X1
MOVOA X1,X2
PSLLL $9,X1
PXOR X1,X9
PSRLL $23,X2
PXOR X2,X9
MOVOA X14,X1
PADDL X15,X1
MOVOA X1,X2
PSLLL $13,X1
PXOR X1,X13
PSRLL $19,X2
PXOR X2,X13
MOVOA X11,X1
PADDL X9,X1
MOVOA X1,X2
PSLLL $13,X1
PXOR X1,X7
PSRLL $19,X2
PXOR X2,X7
MOVOA X15,X1
PADDL X13,X1
MOVOA X1,X2
PSLLL $18,X1
PXOR X1,X12
PSRLL $14,X2
PXOR X2,X12
MOVOA 320(SP),X1
MOVOA X12,320(SP)
MOVOA X9,X2
PADDL X7,X2
MOVOA X2,X12
PSLLL $18,X2
PXOR X2,X0
PSRLL $14,X12
PXOR X12,X0
MOVOA X5,X2
PADDL X1,X2
MOVOA X2,X12
PSLLL $7,X2
PXOR X2,X3
PSRLL $25,X12
PXOR X12,X3
MOVOA 336(SP),X2
MOVOA X0,336(SP)
MOVOA X6,X0
PADDL X2,X0
MOVOA X0,X12
PSLLL $7,X0
PXOR X0,X4
PSRLL $25,X12
PXOR X12,X4
MOVOA X1,X0
PADDL X3,X0
MOVOA X0,X12
PSLLL $9,X0
PXOR X0,X10
PSRLL $23,X12
PXOR X12,X10
MOVOA X2,X0
PADDL X4,X0
MOVOA X0,X12
PSLLL $9,X0
PXOR X0,X8
PSRLL $23,X12
PXOR X12,X8
MOVOA X3,X0
PADDL X10,X0
MOVOA X0,X12
PSLLL $13,X0
PXOR X0,X5
PSRLL $19,X12
PXOR X12,X5
MOVOA X4,X0
PADDL X8,X0
MOVOA X0,X12
PSLLL $13,X0
PXOR X0,X6
PSRLL $19,X12
PXOR X12,X6
MOVOA X10,X0
PADDL X5,X0
MOVOA X0,X12
PSLLL $18,X0
PXOR X0,X1
PSRLL $14,X12
PXOR X12,X1
MOVOA 320(SP),X0
MOVOA X1,320(SP)
MOVOA X4,X1
PADDL X0,X1
MOVOA X1,X12
PSLLL $7,X1
PXOR X1,X7
PSRLL $25,X12
PXOR X12,X7
MOVOA X8,X1
PADDL X6,X1
MOVOA X1,X12
PSLLL $18,X1
PXOR X1,X2
PSRLL $14,X12
PXOR X12,X2
MOVOA 336(SP),X12
MOVOA X2,336(SP)
MOVOA X14,X1
PADDL X12,X1
MOVOA X1,X2
PSLLL $7,X1
PXOR X1,X5
PSRLL $25,X2
PXOR X2,X5
MOVOA X0,X1
PADDL X7,X1
MOVOA X1,X2
PSLLL $9,X1
PXOR X1,X10
PSRLL $23,X2
PXOR X2,X10
MOVOA X12,X1
PADDL X5,X1
MOVOA X1,X2
PSLLL $9,X1
PXOR X1,X8
PSRLL $23,X2
PXOR X2,X8
MOVOA X7,X1
PADDL X10,X1
MOVOA X1,X2
PSLLL $13,X1
PXOR X1,X4
PSRLL $19,X2
PXOR X2,X4
MOVOA X5,X1
PADDL X8,X1
MOVOA X1,X2
PSLLL $13,X1
PXOR X1,X14
PSRLL $19,X2
PXOR X2,X14
MOVOA X10,X1
PADDL X4,X1
MOVOA X1,X2
PSLLL $18,X1
PXOR X1,X0
PSRLL $14,X2
PXOR X2,X0
MOVOA 320(SP),X1
MOVOA X0,320(SP)
MOVOA X8,X0
PADDL X14,X0
MOVOA X0,X2
PSLLL $18,X0
PXOR X0,X12
PSRLL $14,X2
PXOR X2,X12
MOVOA X11,X0
PADDL X1,X0
MOVOA X0,X2
PSLLL $7,X0
PXOR X0,X6
PSRLL $25,X2
PXOR X2,X6
MOVOA 336(SP),X2
MOVOA X12,336(SP)
MOVOA X3,X0
PADDL X2,X0
MOVOA X0,X12
PSLLL $7,X0
PXOR X0,X13
PSRLL $25,X12
PXOR X12,X13
MOVOA X1,X0
PADDL X6,X0
MOVOA X0,X12
PSLLL $9,X0
PXOR X0,X15
PSRLL $23,X12
PXOR X12,X15
MOVOA X2,X0
PADDL X13,X0
MOVOA X0,X12
PSLLL $9,X0
PXOR X0,X9
PSRLL $23,X12
PXOR X12,X9
MOVOA X6,X0
PADDL X15,X0
MOVOA X0,X12
PSLLL $13,X0
PXOR X0,X11
PSRLL $19,X12
PXOR X12,X11
MOVOA X13,X0
PADDL X9,X0
MOVOA X0,X12
PSLLL $13,X0
PXOR X0,X3
PSRLL $19,X12
PXOR X12,X3
MOVOA X15,X0
PADDL X11,X0
MOVOA X0,X12
PSLLL $18,X0
PXOR X0,X1
PSRLL $14,X12
PXOR X12,X1
MOVOA X9,X0
PADDL X3,X0
MOVOA X0,X12
PSLLL $18,X0
PXOR X0,X2
PSRLL $14,X12
PXOR X12,X2
MOVOA 320(SP),X12
MOVOA 336(SP),X0
SUBQ $2,DX
JA MAINLOOP1
PADDL 112(SP),X12
PADDL 176(SP),X7
PADDL 224(SP),X10
PADDL 272(SP),X4
MOVD X12,DX
MOVD X7,CX
MOVD X10,R8
MOVD X4,R9
PSHUFL $0X39,X12,X12
PSHUFL $0X39,X7,X7
PSHUFL $0X39,X10,X10
PSHUFL $0X39,X4,X4
XORL 0(SI),DX
XORL 4(SI),CX
XORL 8(SI),R8
XORL 12(SI),R9
MOVL DX,0(DI)
MOVL CX,4(DI)
MOVL R8,8(DI)
MOVL R9,12(DI)
MOVD X12,DX
MOVD X7,CX
MOVD X10,R8
MOVD X4,R9
PSHUFL $0X39,X12,X12
PSHUFL $0X39,X7,X7
PSHUFL $0X39,X10,X10
PSHUFL $0X39,X4,X4
XORL 64(SI),DX
XORL 68(SI),CX
XORL 72(SI),R8
XORL 76(SI),R9
MOVL DX,64(DI)
MOVL CX,68(DI)
MOVL R8,72(DI)
MOVL R9,76(DI)
MOVD X12,DX
MOVD X7,CX
MOVD X10,R8
MOVD X4,R9
PSHUFL $0X39,X12,X12
PSHUFL $0X39,X7,X7
PSHUFL $0X39,X10,X10
PSHUFL $0X39,X4,X4
XORL 128(SI),DX
XORL 132(SI),CX
XORL 136(SI),R8
XORL 140(SI),R9
MOVL DX,128(DI)
MOVL CX,132(DI)
MOVL R8,136(DI)
MOVL R9,140(DI)
MOVD X12,DX
MOVD X7,CX
MOVD X10,R8
MOVD X4,R9
XORL 192(SI),DX
XORL 196(SI),CX
XORL 200(SI),R8
XORL 204(SI),R9
MOVL DX,192(DI)
MOVL CX,196(DI)
MOVL R8,200(DI)
MOVL R9,204(DI)
PADDL 240(SP),X14
PADDL 64(SP),X0
PADDL 128(SP),X5
PADDL 192(SP),X8
MOVD X14,DX
MOVD X0,CX
MOVD X5,R8
MOVD X8,R9
PSHUFL $0X39,X14,X14
PSHUFL $0X39,X0,X0
PSHUFL $0X39,X5,X5
PSHUFL $0X39,X8,X8
XORL 16(SI),DX
XORL 20(SI),CX
XORL 24(SI),R8
XORL 28(SI),R9
MOVL DX,16(DI)
MOVL CX,20(DI)
MOVL R8,24(DI)
MOVL R9,28(DI)
MOVD X14,DX
MOVD X0,CX
MOVD X5,R8
MOVD X8,R9
PSHUFL $0X39,X14,X14
PSHUFL $0X39,X0,X0
PSHUFL $0X39,X5,X5
PSHUFL $0X39,X8,X8
XORL 80(SI),DX
XORL 84(SI),CX
XORL 88(SI),R8
XORL 92(SI),R9
MOVL DX,80(DI)
MOVL CX,84(DI)
MOVL R8,88(DI)
MOVL R9,92(DI)
MOVD X14,DX
MOVD X0,CX
MOVD X5,R8
MOVD X8,R9
PSHUFL $0X39,X14,X14
PSHUFL $0X39,X0,X0
PSHUFL $0X39,X5,X5
PSHUFL $0X39,X8,X8
XORL 144(SI),DX
XORL 148(SI),CX
XORL 152(SI),R8
XORL 156(SI),R9
MOVL DX,144(DI)
MOVL CX,148(DI)
MOVL R8,152(DI)
MOVL R9,156(DI)
MOVD X14,DX
MOVD X0,CX
MOVD X5,R8
MOVD X8,R9
XORL 208(SI),DX
XORL 212(SI),CX
XORL 216(SI),R8
XORL 220(SI),R9
MOVL DX,208(DI)
MOVL CX,212(DI)
MOVL R8,216(DI)
MOVL R9,220(DI)
PADDL 288(SP),X15
PADDL 304(SP),X11
PADDL 80(SP),X1
PADDL 144(SP),X6
MOVD X15,DX
MOVD X11,CX
MOVD X1,R8
MOVD X6,R9
PSHUFL $0X39,X15,X15
PSHUFL $0X39,X11,X11
PSHUFL $0X39,X1,X1
PSHUFL $0X39,X6,X6
XORL 32(SI),DX
XORL 36(SI),CX
XORL 40(SI),R8
XORL 44(SI),R9
MOVL DX,32(DI)
MOVL CX,36(DI)
MOVL R8,40(DI)
MOVL R9,44(DI)
MOVD X15,DX
MOVD X11,CX
MOVD X1,R8
MOVD X6,R9
PSHUFL $0X39,X15,X15
PSHUFL $0X39,X11,X11
PSHUFL $0X39,X1,X1
PSHUFL $0X39,X6,X6
XORL 96(SI),DX
XORL 100(SI),CX
XORL 104(SI),R8
XORL 108(SI),R9
MOVL DX,96(DI)
MOVL CX,100(DI)
MOVL R8,104(DI)
MOVL R9,108(DI)
MOVD X15,DX
MOVD X11,CX
MOVD X1,R8
MOVD X6,R9
PSHUFL $0X39,X15,X15
PSHUFL $0X39,X11,X11
PSHUFL $0X39,X1,X1
PSHUFL $0X39,X6,X6
XORL 160(SI),DX
XORL 164(SI),CX
XORL 168(SI),R8
XORL 172(SI),R9
MOVL DX,160(DI)
MOVL CX,164(DI)
MOVL R8,168(DI)
MOVL R9,172(DI)
MOVD X15,DX
MOVD X11,CX
MOVD X1,R8
MOVD X6,R9
XORL 224(SI),DX
XORL 228(SI),CX
XORL 232(SI),R8
XORL 236(SI),R9
MOVL DX,224(DI)
MOVL CX,228(DI)
MOVL R8,232(DI)
MOVL R9,236(DI)
PADDL 160(SP),X13
PADDL 208(SP),X9
PADDL 256(SP),X3
PADDL 96(SP),X2
MOVD X13,DX
MOVD X9,CX
MOVD X3,R8
MOVD X2,R9
PSHUFL $0X39,X13,X13
PSHUFL $0X39,X9,X9
PSHUFL $0X39,X3,X3
PSHUFL $0X39,X2,X2
XORL 48(SI),DX
XORL 52(SI),CX
XORL 56(SI),R8
XORL 60(SI),R9
MOVL DX,48(DI)
MOVL CX,52(DI)
MOVL R8,56(DI)
MOVL R9,60(DI)
MOVD X13,DX
MOVD X9,CX
MOVD X3,R8
MOVD X2,R9
PSHUFL $0X39,X13,X13
PSHUFL $0X39,X9,X9
PSHUFL $0X39,X3,X3
PSHUFL $0X39,X2,X2
XORL 112(SI),DX
XORL 116(SI),CX
XORL 120(SI),R8
XORL 124(SI),R9
MOVL DX,112(DI)
MOVL CX,116(DI)
MOVL R8,120(DI)
MOVL R9,124(DI)
MOVD X13,DX
MOVD X9,CX
MOVD X3,R8
MOVD X2,R9
PSHUFL $0X39,X13,X13
PSHUFL $0X39,X9,X9
PSHUFL $0X39,X3,X3
PSHUFL $0X39,X2,X2
XORL 176(SI),DX
XORL 180(SI),CX
XORL 184(SI),R8
XORL 188(SI),R9
MOVL DX,176(DI)
MOVL CX,180(DI)
MOVL R8,184(DI)
MOVL R9,188(DI)
MOVD X13,DX
MOVD X9,CX
MOVD X3,R8
MOVD X2,R9
XORL 240(SI),DX
XORL 244(SI),CX
XORL 248(SI),R8
XORL 252(SI),R9
MOVL DX,240(DI)
MOVL CX,244(DI)
MOVL R8,248(DI)
MOVL R9,252(DI)
MOVQ 352(SP),R9
SUBQ $256,R9
ADDQ $256,SI
ADDQ $256,DI
CMPQ R9,$256
JAE BYTESATLEAST256
CMPQ R9,$0
JBE DONE
BYTESBETWEEN1AND255:
CMPQ R9,$64
JAE NOCOPY
MOVQ DI,DX
LEAQ 360(SP),DI
MOVQ R9,CX
REP; MOVSB
LEAQ 360(SP),DI
LEAQ 360(SP),SI
NOCOPY:
MOVQ R9,352(SP)
MOVOA 48(SP),X0
MOVOA 0(SP),X1
MOVOA 16(SP),X2
MOVOA 32(SP),X3
MOVOA X1,X4
MOVQ $20,CX
MAINLOOP2:
PADDL X0,X4
MOVOA X0,X5
MOVOA X4,X6
PSLLL $7,X4
PSRLL $25,X6
PXOR X4,X3
PXOR X6,X3
PADDL X3,X5
MOVOA X3,X4
MOVOA X5,X6
PSLLL $9,X5
PSRLL $23,X6
PXOR X5,X2
PSHUFL $0X93,X3,X3
PXOR X6,X2
PADDL X2,X4
MOVOA X2,X5
MOVOA X4,X6
PSLLL $13,X4
PSRLL $19,X6
PXOR X4,X1
PSHUFL $0X4E,X2,X2
PXOR X6,X1
PADDL X1,X5
MOVOA X3,X4
MOVOA X5,X6
PSLLL $18,X5
PSRLL $14,X6
PXOR X5,X0
PSHUFL $0X39,X1,X1
PXOR X6,X0
PADDL X0,X4
MOVOA X0,X5
MOVOA X4,X6
PSLLL $7,X4
PSRLL $25,X6
PXOR X4,X1
PXOR X6,X1
PADDL X1,X5
MOVOA X1,X4
MOVOA X5,X6
PSLLL $9,X5
PSRLL $23,X6
PXOR X5,X2
PSHUFL $0X93,X1,X1
PXOR X6,X2
PADDL X2,X4
MOVOA X2,X5
MOVOA X4,X6
PSLLL $13,X4
PSRLL $19,X6
PXOR X4,X3
PSHUFL $0X4E,X2,X2
PXOR X6,X3
PADDL X3,X5
MOVOA X1,X4
MOVOA X5,X6
PSLLL $18,X5
PSRLL $14,X6
PXOR X5,X0
PSHUFL $0X39,X3,X3
PXOR X6,X0
PADDL X0,X4
MOVOA X0,X5
MOVOA X4,X6
PSLLL $7,X4
PSRLL $25,X6
PXOR X4,X3
PXOR X6,X3
PADDL X3,X5
MOVOA X3,X4
MOVOA X5,X6
PSLLL $9,X5
PSRLL $23,X6
PXOR X5,X2
PSHUFL $0X93,X3,X3
PXOR X6,X2
PADDL X2,X4
MOVOA X2,X5
MOVOA X4,X6
PSLLL $13,X4
PSRLL $19,X6
PXOR X4,X1
PSHUFL $0X4E,X2,X2
PXOR X6,X1
PADDL X1,X5
MOVOA X3,X4
MOVOA X5,X6
PSLLL $18,X5
PSRLL $14,X6
PXOR X5,X0
PSHUFL $0X39,X1,X1
PXOR X6,X0
PADDL X0,X4
MOVOA X0,X5
MOVOA X4,X6
PSLLL $7,X4
PSRLL $25,X6
PXOR X4,X1
PXOR X6,X1
PADDL X1,X5
MOVOA X1,X4
MOVOA X5,X6
PSLLL $9,X5
PSRLL $23,X6
PXOR X5,X2
PSHUFL $0X93,X1,X1
PXOR X6,X2
PADDL X2,X4
MOVOA X2,X5
MOVOA X4,X6
PSLLL $13,X4
PSRLL $19,X6
PXOR X4,X3
PSHUFL $0X4E,X2,X2
PXOR X6,X3
SUBQ $4,CX
PADDL X3,X5
MOVOA X1,X4
MOVOA X5,X6
PSLLL $18,X5
PXOR X7,X7
PSRLL $14,X6
PXOR X5,X0
PSHUFL $0X39,X3,X3
PXOR X6,X0
JA MAINLOOP2
PADDL 48(SP),X0
PADDL 0(SP),X1
PADDL 16(SP),X2
PADDL 32(SP),X3
MOVD X0,CX
MOVD X1,R8
MOVD X2,R9
MOVD X3,AX
PSHUFL $0X39,X0,X0
PSHUFL $0X39,X1,X1
PSHUFL $0X39,X2,X2
PSHUFL $0X39,X3,X3
XORL 0(SI),CX
XORL 48(SI),R8
XORL 32(SI),R9
XORL 16(SI),AX
MOVL CX,0(DI)
MOVL R8,48(DI)
MOVL R9,32(DI)
MOVL AX,16(DI)
MOVD X0,CX
MOVD X1,R8
MOVD X2,R9
MOVD X3,AX
PSHUFL $0X39,X0,X0
PSHUFL $0X39,X1,X1
PSHUFL $0X39,X2,X2
PSHUFL $0X39,X3,X3
XORL 20(SI),CX
XORL 4(SI),R8
XORL 52(SI),R9
XORL 36(SI),AX
MOVL CX,20(DI)
MOVL R8,4(DI)
MOVL R9,52(DI)
MOVL AX,36(DI)
MOVD X0,CX
MOVD X1,R8
MOVD X2,R9
MOVD X3,AX
PSHUFL $0X39,X0,X0
PSHUFL $0X39,X1,X1
PSHUFL $0X39,X2,X2
PSHUFL $0X39,X3,X3
XORL 40(SI),CX
XORL 24(SI),R8
XORL 8(SI),R9
XORL 56(SI),AX
MOVL CX,40(DI)
MOVL R8,24(DI)
MOVL R9,8(DI)
MOVL AX,56(DI)
MOVD X0,CX
MOVD X1,R8
MOVD X2,R9
MOVD X3,AX
XORL 60(SI),CX
XORL 44(SI),R8
XORL 28(SI),R9
XORL 12(SI),AX
MOVL CX,60(DI)
MOVL R8,44(DI)
MOVL R9,28(DI)
MOVL AX,12(DI)
MOVQ 352(SP),R9
MOVL 16(SP),CX
MOVL 36 (SP),R8
ADDQ $1,CX
SHLQ $32,R8
ADDQ R8,CX
MOVQ CX,R8
SHRQ $32,R8
MOVL CX,16(SP)
MOVL R8, 36 (SP)
CMPQ R9,$64
JA BYTESATLEAST65
JAE BYTESATLEAST64
MOVQ DI,SI
MOVQ DX,DI
MOVQ R9,CX
REP; MOVSB
BYTESATLEAST64:
DONE:
MOVQ R12,SP
RET
BYTESATLEAST65:
SUBQ $64,R9
ADDQ $64,DI
ADDQ $64,SI
JMP BYTESBETWEEN1AND255

199
vendor/golang.org/x/crypto/salsa20/salsa/salsa208.go generated vendored Normal file
View File

@ -0,0 +1,199 @@
// Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package salsa
// Core208 applies the Salsa20/8 core function to the 64-byte array in and puts
// the result into the 64-byte array out. The input and output may be the same array.
func Core208(out *[64]byte, in *[64]byte) {
j0 := uint32(in[0]) | uint32(in[1])<<8 | uint32(in[2])<<16 | uint32(in[3])<<24
j1 := uint32(in[4]) | uint32(in[5])<<8 | uint32(in[6])<<16 | uint32(in[7])<<24
j2 := uint32(in[8]) | uint32(in[9])<<8 | uint32(in[10])<<16 | uint32(in[11])<<24
j3 := uint32(in[12]) | uint32(in[13])<<8 | uint32(in[14])<<16 | uint32(in[15])<<24
j4 := uint32(in[16]) | uint32(in[17])<<8 | uint32(in[18])<<16 | uint32(in[19])<<24
j5 := uint32(in[20]) | uint32(in[21])<<8 | uint32(in[22])<<16 | uint32(in[23])<<24
j6 := uint32(in[24]) | uint32(in[25])<<8 | uint32(in[26])<<16 | uint32(in[27])<<24
j7 := uint32(in[28]) | uint32(in[29])<<8 | uint32(in[30])<<16 | uint32(in[31])<<24
j8 := uint32(in[32]) | uint32(in[33])<<8 | uint32(in[34])<<16 | uint32(in[35])<<24
j9 := uint32(in[36]) | uint32(in[37])<<8 | uint32(in[38])<<16 | uint32(in[39])<<24
j10 := uint32(in[40]) | uint32(in[41])<<8 | uint32(in[42])<<16 | uint32(in[43])<<24
j11 := uint32(in[44]) | uint32(in[45])<<8 | uint32(in[46])<<16 | uint32(in[47])<<24
j12 := uint32(in[48]) | uint32(in[49])<<8 | uint32(in[50])<<16 | uint32(in[51])<<24
j13 := uint32(in[52]) | uint32(in[53])<<8 | uint32(in[54])<<16 | uint32(in[55])<<24
j14 := uint32(in[56]) | uint32(in[57])<<8 | uint32(in[58])<<16 | uint32(in[59])<<24
j15 := uint32(in[60]) | uint32(in[61])<<8 | uint32(in[62])<<16 | uint32(in[63])<<24
x0, x1, x2, x3, x4, x5, x6, x7, x8 := j0, j1, j2, j3, j4, j5, j6, j7, j8
x9, x10, x11, x12, x13, x14, x15 := j9, j10, j11, j12, j13, j14, j15
for i := 0; i < 8; i += 2 {
u := x0 + x12
x4 ^= u<<7 | u>>(32-7)
u = x4 + x0
x8 ^= u<<9 | u>>(32-9)
u = x8 + x4
x12 ^= u<<13 | u>>(32-13)
u = x12 + x8
x0 ^= u<<18 | u>>(32-18)
u = x5 + x1
x9 ^= u<<7 | u>>(32-7)
u = x9 + x5
x13 ^= u<<9 | u>>(32-9)
u = x13 + x9
x1 ^= u<<13 | u>>(32-13)
u = x1 + x13
x5 ^= u<<18 | u>>(32-18)
u = x10 + x6
x14 ^= u<<7 | u>>(32-7)
u = x14 + x10
x2 ^= u<<9 | u>>(32-9)
u = x2 + x14
x6 ^= u<<13 | u>>(32-13)
u = x6 + x2
x10 ^= u<<18 | u>>(32-18)
u = x15 + x11
x3 ^= u<<7 | u>>(32-7)
u = x3 + x15
x7 ^= u<<9 | u>>(32-9)
u = x7 + x3
x11 ^= u<<13 | u>>(32-13)
u = x11 + x7
x15 ^= u<<18 | u>>(32-18)
u = x0 + x3
x1 ^= u<<7 | u>>(32-7)
u = x1 + x0
x2 ^= u<<9 | u>>(32-9)
u = x2 + x1
x3 ^= u<<13 | u>>(32-13)
u = x3 + x2
x0 ^= u<<18 | u>>(32-18)
u = x5 + x4
x6 ^= u<<7 | u>>(32-7)
u = x6 + x5
x7 ^= u<<9 | u>>(32-9)
u = x7 + x6
x4 ^= u<<13 | u>>(32-13)
u = x4 + x7
x5 ^= u<<18 | u>>(32-18)
u = x10 + x9
x11 ^= u<<7 | u>>(32-7)
u = x11 + x10
x8 ^= u<<9 | u>>(32-9)
u = x8 + x11
x9 ^= u<<13 | u>>(32-13)
u = x9 + x8
x10 ^= u<<18 | u>>(32-18)
u = x15 + x14
x12 ^= u<<7 | u>>(32-7)
u = x12 + x15
x13 ^= u<<9 | u>>(32-9)
u = x13 + x12
x14 ^= u<<13 | u>>(32-13)
u = x14 + x13
x15 ^= u<<18 | u>>(32-18)
}
x0 += j0
x1 += j1
x2 += j2
x3 += j3
x4 += j4
x5 += j5
x6 += j6
x7 += j7
x8 += j8
x9 += j9
x10 += j10
x11 += j11
x12 += j12
x13 += j13
x14 += j14
x15 += j15
out[0] = byte(x0)
out[1] = byte(x0 >> 8)
out[2] = byte(x0 >> 16)
out[3] = byte(x0 >> 24)
out[4] = byte(x1)
out[5] = byte(x1 >> 8)
out[6] = byte(x1 >> 16)
out[7] = byte(x1 >> 24)
out[8] = byte(x2)
out[9] = byte(x2 >> 8)
out[10] = byte(x2 >> 16)
out[11] = byte(x2 >> 24)
out[12] = byte(x3)
out[13] = byte(x3 >> 8)
out[14] = byte(x3 >> 16)
out[15] = byte(x3 >> 24)
out[16] = byte(x4)
out[17] = byte(x4 >> 8)
out[18] = byte(x4 >> 16)
out[19] = byte(x4 >> 24)
out[20] = byte(x5)
out[21] = byte(x5 >> 8)
out[22] = byte(x5 >> 16)
out[23] = byte(x5 >> 24)
out[24] = byte(x6)
out[25] = byte(x6 >> 8)
out[26] = byte(x6 >> 16)
out[27] = byte(x6 >> 24)
out[28] = byte(x7)
out[29] = byte(x7 >> 8)
out[30] = byte(x7 >> 16)
out[31] = byte(x7 >> 24)
out[32] = byte(x8)
out[33] = byte(x8 >> 8)
out[34] = byte(x8 >> 16)
out[35] = byte(x8 >> 24)
out[36] = byte(x9)
out[37] = byte(x9 >> 8)
out[38] = byte(x9 >> 16)
out[39] = byte(x9 >> 24)
out[40] = byte(x10)
out[41] = byte(x10 >> 8)
out[42] = byte(x10 >> 16)
out[43] = byte(x10 >> 24)
out[44] = byte(x11)
out[45] = byte(x11 >> 8)
out[46] = byte(x11 >> 16)
out[47] = byte(x11 >> 24)
out[48] = byte(x12)
out[49] = byte(x12 >> 8)
out[50] = byte(x12 >> 16)
out[51] = byte(x12 >> 24)
out[52] = byte(x13)
out[53] = byte(x13 >> 8)
out[54] = byte(x13 >> 16)
out[55] = byte(x13 >> 24)
out[56] = byte(x14)
out[57] = byte(x14 >> 8)
out[58] = byte(x14 >> 16)
out[59] = byte(x14 >> 24)
out[60] = byte(x15)
out[61] = byte(x15 >> 8)
out[62] = byte(x15 >> 16)
out[63] = byte(x15 >> 24)
}

View File

@ -0,0 +1,23 @@
// Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build amd64,!appengine,!gccgo
package salsa
// This function is implemented in salsa2020_amd64.s.
//go:noescape
func salsa2020XORKeyStream(out, in *byte, n uint64, nonce, key *byte)
// XORKeyStream crypts bytes from in to out using the given key and counters.
// In and out may be the same slice but otherwise should not overlap. Counter
// contains the raw salsa20 counter bytes (both nonce and block counter).
func XORKeyStream(out, in []byte, counter *[16]byte, key *[32]byte) {
if len(in) == 0 {
return
}
salsa2020XORKeyStream(&out[0], &in[0], uint64(len(in)), &counter[0], &key[0])
}

234
vendor/golang.org/x/crypto/salsa20/salsa/salsa20_ref.go generated vendored Normal file
View File

@ -0,0 +1,234 @@
// Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build !amd64 appengine gccgo
package salsa
const rounds = 20
// core applies the Salsa20 core function to 16-byte input in, 32-byte key k,
// and 16-byte constant c, and puts the result into 64-byte array out.
func core(out *[64]byte, in *[16]byte, k *[32]byte, c *[16]byte) {
j0 := uint32(c[0]) | uint32(c[1])<<8 | uint32(c[2])<<16 | uint32(c[3])<<24
j1 := uint32(k[0]) | uint32(k[1])<<8 | uint32(k[2])<<16 | uint32(k[3])<<24
j2 := uint32(k[4]) | uint32(k[5])<<8 | uint32(k[6])<<16 | uint32(k[7])<<24
j3 := uint32(k[8]) | uint32(k[9])<<8 | uint32(k[10])<<16 | uint32(k[11])<<24
j4 := uint32(k[12]) | uint32(k[13])<<8 | uint32(k[14])<<16 | uint32(k[15])<<24
j5 := uint32(c[4]) | uint32(c[5])<<8 | uint32(c[6])<<16 | uint32(c[7])<<24
j6 := uint32(in[0]) | uint32(in[1])<<8 | uint32(in[2])<<16 | uint32(in[3])<<24
j7 := uint32(in[4]) | uint32(in[5])<<8 | uint32(in[6])<<16 | uint32(in[7])<<24
j8 := uint32(in[8]) | uint32(in[9])<<8 | uint32(in[10])<<16 | uint32(in[11])<<24
j9 := uint32(in[12]) | uint32(in[13])<<8 | uint32(in[14])<<16 | uint32(in[15])<<24
j10 := uint32(c[8]) | uint32(c[9])<<8 | uint32(c[10])<<16 | uint32(c[11])<<24
j11 := uint32(k[16]) | uint32(k[17])<<8 | uint32(k[18])<<16 | uint32(k[19])<<24
j12 := uint32(k[20]) | uint32(k[21])<<8 | uint32(k[22])<<16 | uint32(k[23])<<24
j13 := uint32(k[24]) | uint32(k[25])<<8 | uint32(k[26])<<16 | uint32(k[27])<<24
j14 := uint32(k[28]) | uint32(k[29])<<8 | uint32(k[30])<<16 | uint32(k[31])<<24
j15 := uint32(c[12]) | uint32(c[13])<<8 | uint32(c[14])<<16 | uint32(c[15])<<24
x0, x1, x2, x3, x4, x5, x6, x7, x8 := j0, j1, j2, j3, j4, j5, j6, j7, j8
x9, x10, x11, x12, x13, x14, x15 := j9, j10, j11, j12, j13, j14, j15
for i := 0; i < rounds; i += 2 {
u := x0 + x12
x4 ^= u<<7 | u>>(32-7)
u = x4 + x0
x8 ^= u<<9 | u>>(32-9)
u = x8 + x4
x12 ^= u<<13 | u>>(32-13)
u = x12 + x8
x0 ^= u<<18 | u>>(32-18)
u = x5 + x1
x9 ^= u<<7 | u>>(32-7)
u = x9 + x5
x13 ^= u<<9 | u>>(32-9)
u = x13 + x9
x1 ^= u<<13 | u>>(32-13)
u = x1 + x13
x5 ^= u<<18 | u>>(32-18)
u = x10 + x6
x14 ^= u<<7 | u>>(32-7)
u = x14 + x10
x2 ^= u<<9 | u>>(32-9)
u = x2 + x14
x6 ^= u<<13 | u>>(32-13)
u = x6 + x2
x10 ^= u<<18 | u>>(32-18)
u = x15 + x11
x3 ^= u<<7 | u>>(32-7)
u = x3 + x15
x7 ^= u<<9 | u>>(32-9)
u = x7 + x3
x11 ^= u<<13 | u>>(32-13)
u = x11 + x7
x15 ^= u<<18 | u>>(32-18)
u = x0 + x3
x1 ^= u<<7 | u>>(32-7)
u = x1 + x0
x2 ^= u<<9 | u>>(32-9)
u = x2 + x1
x3 ^= u<<13 | u>>(32-13)
u = x3 + x2
x0 ^= u<<18 | u>>(32-18)
u = x5 + x4
x6 ^= u<<7 | u>>(32-7)
u = x6 + x5
x7 ^= u<<9 | u>>(32-9)
u = x7 + x6
x4 ^= u<<13 | u>>(32-13)
u = x4 + x7
x5 ^= u<<18 | u>>(32-18)
u = x10 + x9
x11 ^= u<<7 | u>>(32-7)
u = x11 + x10
x8 ^= u<<9 | u>>(32-9)
u = x8 + x11
x9 ^= u<<13 | u>>(32-13)
u = x9 + x8
x10 ^= u<<18 | u>>(32-18)
u = x15 + x14
x12 ^= u<<7 | u>>(32-7)
u = x12 + x15
x13 ^= u<<9 | u>>(32-9)
u = x13 + x12
x14 ^= u<<13 | u>>(32-13)
u = x14 + x13
x15 ^= u<<18 | u>>(32-18)
}
x0 += j0
x1 += j1
x2 += j2
x3 += j3
x4 += j4
x5 += j5
x6 += j6
x7 += j7
x8 += j8
x9 += j9
x10 += j10
x11 += j11
x12 += j12
x13 += j13
x14 += j14
x15 += j15
out[0] = byte(x0)
out[1] = byte(x0 >> 8)
out[2] = byte(x0 >> 16)
out[3] = byte(x0 >> 24)
out[4] = byte(x1)
out[5] = byte(x1 >> 8)
out[6] = byte(x1 >> 16)
out[7] = byte(x1 >> 24)
out[8] = byte(x2)
out[9] = byte(x2 >> 8)
out[10] = byte(x2 >> 16)
out[11] = byte(x2 >> 24)
out[12] = byte(x3)
out[13] = byte(x3 >> 8)
out[14] = byte(x3 >> 16)
out[15] = byte(x3 >> 24)
out[16] = byte(x4)
out[17] = byte(x4 >> 8)
out[18] = byte(x4 >> 16)
out[19] = byte(x4 >> 24)
out[20] = byte(x5)
out[21] = byte(x5 >> 8)
out[22] = byte(x5 >> 16)
out[23] = byte(x5 >> 24)
out[24] = byte(x6)
out[25] = byte(x6 >> 8)
out[26] = byte(x6 >> 16)
out[27] = byte(x6 >> 24)
out[28] = byte(x7)
out[29] = byte(x7 >> 8)
out[30] = byte(x7 >> 16)
out[31] = byte(x7 >> 24)
out[32] = byte(x8)
out[33] = byte(x8 >> 8)
out[34] = byte(x8 >> 16)
out[35] = byte(x8 >> 24)
out[36] = byte(x9)
out[37] = byte(x9 >> 8)
out[38] = byte(x9 >> 16)
out[39] = byte(x9 >> 24)
out[40] = byte(x10)
out[41] = byte(x10 >> 8)
out[42] = byte(x10 >> 16)
out[43] = byte(x10 >> 24)
out[44] = byte(x11)
out[45] = byte(x11 >> 8)
out[46] = byte(x11 >> 16)
out[47] = byte(x11 >> 24)
out[48] = byte(x12)
out[49] = byte(x12 >> 8)
out[50] = byte(x12 >> 16)
out[51] = byte(x12 >> 24)
out[52] = byte(x13)
out[53] = byte(x13 >> 8)
out[54] = byte(x13 >> 16)
out[55] = byte(x13 >> 24)
out[56] = byte(x14)
out[57] = byte(x14 >> 8)
out[58] = byte(x14 >> 16)
out[59] = byte(x14 >> 24)
out[60] = byte(x15)
out[61] = byte(x15 >> 8)
out[62] = byte(x15 >> 16)
out[63] = byte(x15 >> 24)
}
// XORKeyStream crypts bytes from in to out using the given key and counters.
// In and out may be the same slice but otherwise should not overlap. Counter
// contains the raw salsa20 counter bytes (both nonce and block counter).
func XORKeyStream(out, in []byte, counter *[16]byte, key *[32]byte) {
var block [64]byte
var counterCopy [16]byte
copy(counterCopy[:], counter[:])
for len(in) >= 64 {
core(&block, &counterCopy, key, &Sigma)
for i, x := range block {
out[i] = in[i] ^ x
}
u := uint32(1)
for i := 8; i < 16; i++ {
u += uint32(counterCopy[i])
counterCopy[i] = byte(u)
u >>= 8
}
in = in[64:]
out = out[64:]
}
if len(in) > 0 {
core(&block, &counterCopy, key, &Sigma)
for i, v := range in {
out[i] = v ^ block[i]
}
}
}

54
vendor/golang.org/x/crypto/salsa20/salsa20.go generated vendored Normal file
View File

@ -0,0 +1,54 @@
// Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
/*
Package salsa20 implements the Salsa20 stream cipher as specified in http://cr.yp.to/snuffle/spec.pdf.
Salsa20 differs from many other stream ciphers in that it is message orientated
rather than byte orientated. Keystream blocks are not preserved between calls,
therefore each side must encrypt/decrypt data with the same segmentation.
Another aspect of this difference is that part of the counter is exposed as
an nonce in each call. Encrypting two different messages with the same (key,
nonce) pair leads to trivial plaintext recovery. This is analogous to
encrypting two different messages with the same key with a traditional stream
cipher.
This package also implements XSalsa20: a version of Salsa20 with a 24-byte
nonce as specified in http://cr.yp.to/snuffle/xsalsa-20081128.pdf. Simply
passing a 24-byte slice as the nonce triggers XSalsa20.
*/
package salsa20
// TODO(agl): implement XORKeyStream12 and XORKeyStream8 - the reduced round variants of Salsa20.
import (
"golang.org/x/crypto/salsa20/salsa"
)
// XORKeyStream crypts bytes from in to out using the given key and nonce. In
// and out may be the same slice but otherwise should not overlap. Nonce must
// be either 8 or 24 bytes long.
func XORKeyStream(out, in []byte, nonce []byte, key *[32]byte) {
if len(out) < len(in) {
in = in[:len(out)]
}
var subNonce [16]byte
if len(nonce) == 24 {
var subKey [32]byte
var hNonce [16]byte
copy(hNonce[:], nonce[:16])
salsa.HSalsa20(&subKey, &hNonce, key, &salsa.Sigma)
copy(subNonce[:], nonce[16:])
key = &subKey
} else if len(nonce) == 8 {
copy(subNonce[:], nonce[:])
} else {
panic("salsa20: nonce must be 8 or 24 bytes")
}
salsa.XORKeyStream(out, in, &subNonce, key)
}

109
vendor/golang.org/x/crypto/tea/cipher.go generated vendored Normal file
View File

@ -0,0 +1,109 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package tea implements the TEA algorithm, as defined in Needham and
// Wheeler's 1994 technical report, “TEA, a Tiny Encryption Algorithm”. See
// http://www.cix.co.uk/~klockstone/tea.pdf for details.
package tea
import (
"crypto/cipher"
"encoding/binary"
"errors"
)
const (
// BlockSize is the size of a TEA block, in bytes.
BlockSize = 8
// KeySize is the size of a TEA key, in bytes.
KeySize = 16
// delta is the TEA key schedule constant.
delta = 0x9e3779b9
// numRounds is the standard number of rounds in TEA.
numRounds = 64
)
// tea is an instance of the TEA cipher with a particular key.
type tea struct {
key [16]byte
rounds int
}
// NewCipher returns an instance of the TEA cipher with the standard number of
// rounds. The key argument must be 16 bytes long.
func NewCipher(key []byte) (cipher.Block, error) {
return NewCipherWithRounds(key, numRounds)
}
// NewCipherWithRounds returns an instance of the TEA cipher with a given
// number of rounds, which must be even. The key argument must be 16 bytes
// long.
func NewCipherWithRounds(key []byte, rounds int) (cipher.Block, error) {
if len(key) != 16 {
return nil, errors.New("tea: incorrect key size")
}
if rounds&1 != 0 {
return nil, errors.New("tea: odd number of rounds specified")
}
c := &tea{
rounds: rounds,
}
copy(c.key[:], key)
return c, nil
}
// BlockSize returns the TEA block size, which is eight bytes. It is necessary
// to satisfy the Block interface in the package "crypto/cipher".
func (*tea) BlockSize() int {
return BlockSize
}
// Encrypt encrypts the 8 byte buffer src using the key in t and stores the
// result in dst. Note that for amounts of data larger than a block, it is not
// safe to just call Encrypt on successive blocks; instead, use an encryption
// mode like CBC (see crypto/cipher/cbc.go).
func (t *tea) Encrypt(dst, src []byte) {
e := binary.BigEndian
v0, v1 := e.Uint32(src), e.Uint32(src[4:])
k0, k1, k2, k3 := e.Uint32(t.key[0:]), e.Uint32(t.key[4:]), e.Uint32(t.key[8:]), e.Uint32(t.key[12:])
sum := uint32(0)
delta := uint32(delta)
for i := 0; i < t.rounds/2; i++ {
sum += delta
v0 += ((v1 << 4) + k0) ^ (v1 + sum) ^ ((v1 >> 5) + k1)
v1 += ((v0 << 4) + k2) ^ (v0 + sum) ^ ((v0 >> 5) + k3)
}
e.PutUint32(dst, v0)
e.PutUint32(dst[4:], v1)
}
// Decrypt decrypts the 8 byte buffer src using the key in t and stores the
// result in dst.
func (t *tea) Decrypt(dst, src []byte) {
e := binary.BigEndian
v0, v1 := e.Uint32(src), e.Uint32(src[4:])
k0, k1, k2, k3 := e.Uint32(t.key[0:]), e.Uint32(t.key[4:]), e.Uint32(t.key[8:]), e.Uint32(t.key[12:])
delta := uint32(delta)
sum := delta * uint32(t.rounds/2) // in general, sum = delta * n
for i := 0; i < t.rounds/2; i++ {
v1 -= ((v0 << 4) + k2) ^ (v0 + sum) ^ ((v0 >> 5) + k3)
v0 -= ((v1 << 4) + k0) ^ (v1 + sum) ^ ((v1 >> 5) + k1)
sum -= delta
}
e.PutUint32(dst, v0)
e.PutUint32(dst[4:], v1)
}

342
vendor/golang.org/x/crypto/twofish/twofish.go generated vendored Normal file
View File

@ -0,0 +1,342 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package twofish implements Bruce Schneier's Twofish encryption algorithm.
package twofish
// Twofish is defined in http://www.schneier.com/paper-twofish-paper.pdf [TWOFISH]
// This code is a port of the LibTom C implementation.
// See http://libtom.org/?page=features&newsitems=5&whatfile=crypt.
// LibTomCrypt is free for all purposes under the public domain.
// It was heavily inspired by the go blowfish package.
import "strconv"
// BlockSize is the constant block size of Twofish.
const BlockSize = 16
const mdsPolynomial = 0x169 // x^8 + x^6 + x^5 + x^3 + 1, see [TWOFISH] 4.2
const rsPolynomial = 0x14d // x^8 + x^6 + x^3 + x^2 + 1, see [TWOFISH] 4.3
// A Cipher is an instance of Twofish encryption using a particular key.
type Cipher struct {
s [4][256]uint32
k [40]uint32
}
type KeySizeError int
func (k KeySizeError) Error() string {
return "crypto/twofish: invalid key size " + strconv.Itoa(int(k))
}
// NewCipher creates and returns a Cipher.
// The key argument should be the Twofish key, 16, 24 or 32 bytes.
func NewCipher(key []byte) (*Cipher, error) {
keylen := len(key)
if keylen != 16 && keylen != 24 && keylen != 32 {
return nil, KeySizeError(keylen)
}
// k is the number of 64 bit words in key
k := keylen / 8
// Create the S[..] words
var S [4 * 4]byte
for i := 0; i < k; i++ {
// Computes [y0 y1 y2 y3] = rs . [x0 x1 x2 x3 x4 x5 x6 x7]
for j, rsRow := range rs {
for k, rsVal := range rsRow {
S[4*i+j] ^= gfMult(key[8*i+k], rsVal, rsPolynomial)
}
}
}
// Calculate subkeys
c := new(Cipher)
var tmp [4]byte
for i := byte(0); i < 20; i++ {
// A = h(p * 2x, Me)
for j := range tmp {
tmp[j] = 2 * i
}
A := h(tmp[:], key, 0)
// B = rolc(h(p * (2x + 1), Mo), 8)
for j := range tmp {
tmp[j] = 2*i + 1
}
B := h(tmp[:], key, 1)
B = rol(B, 8)
c.k[2*i] = A + B
// K[2i+1] = (A + 2B) <<< 9
c.k[2*i+1] = rol(2*B+A, 9)
}
// Calculate sboxes
switch k {
case 2:
for i := range c.s[0] {
c.s[0][i] = mdsColumnMult(sbox[1][sbox[0][sbox[0][byte(i)]^S[0]]^S[4]], 0)
c.s[1][i] = mdsColumnMult(sbox[0][sbox[0][sbox[1][byte(i)]^S[1]]^S[5]], 1)
c.s[2][i] = mdsColumnMult(sbox[1][sbox[1][sbox[0][byte(i)]^S[2]]^S[6]], 2)
c.s[3][i] = mdsColumnMult(sbox[0][sbox[1][sbox[1][byte(i)]^S[3]]^S[7]], 3)
}
case 3:
for i := range c.s[0] {
c.s[0][i] = mdsColumnMult(sbox[1][sbox[0][sbox[0][sbox[1][byte(i)]^S[0]]^S[4]]^S[8]], 0)
c.s[1][i] = mdsColumnMult(sbox[0][sbox[0][sbox[1][sbox[1][byte(i)]^S[1]]^S[5]]^S[9]], 1)
c.s[2][i] = mdsColumnMult(sbox[1][sbox[1][sbox[0][sbox[0][byte(i)]^S[2]]^S[6]]^S[10]], 2)
c.s[3][i] = mdsColumnMult(sbox[0][sbox[1][sbox[1][sbox[0][byte(i)]^S[3]]^S[7]]^S[11]], 3)
}
default:
for i := range c.s[0] {
c.s[0][i] = mdsColumnMult(sbox[1][sbox[0][sbox[0][sbox[1][sbox[1][byte(i)]^S[0]]^S[4]]^S[8]]^S[12]], 0)
c.s[1][i] = mdsColumnMult(sbox[0][sbox[0][sbox[1][sbox[1][sbox[0][byte(i)]^S[1]]^S[5]]^S[9]]^S[13]], 1)
c.s[2][i] = mdsColumnMult(sbox[1][sbox[1][sbox[0][sbox[0][sbox[0][byte(i)]^S[2]]^S[6]]^S[10]]^S[14]], 2)
c.s[3][i] = mdsColumnMult(sbox[0][sbox[1][sbox[1][sbox[0][sbox[1][byte(i)]^S[3]]^S[7]]^S[11]]^S[15]], 3)
}
}
return c, nil
}
// BlockSize returns the Twofish block size, 16 bytes.
func (c *Cipher) BlockSize() int { return BlockSize }
// store32l stores src in dst in little-endian form.
func store32l(dst []byte, src uint32) {
dst[0] = byte(src)
dst[1] = byte(src >> 8)
dst[2] = byte(src >> 16)
dst[3] = byte(src >> 24)
return
}
// load32l reads a little-endian uint32 from src.
func load32l(src []byte) uint32 {
return uint32(src[0]) | uint32(src[1])<<8 | uint32(src[2])<<16 | uint32(src[3])<<24
}
// rol returns x after a left circular rotation of y bits.
func rol(x, y uint32) uint32 {
return (x << (y & 31)) | (x >> (32 - (y & 31)))
}
// ror returns x after a right circular rotation of y bits.
func ror(x, y uint32) uint32 {
return (x >> (y & 31)) | (x << (32 - (y & 31)))
}
// The RS matrix. See [TWOFISH] 4.3
var rs = [4][8]byte{
{0x01, 0xA4, 0x55, 0x87, 0x5A, 0x58, 0xDB, 0x9E},
{0xA4, 0x56, 0x82, 0xF3, 0x1E, 0xC6, 0x68, 0xE5},
{0x02, 0xA1, 0xFC, 0xC1, 0x47, 0xAE, 0x3D, 0x19},
{0xA4, 0x55, 0x87, 0x5A, 0x58, 0xDB, 0x9E, 0x03},
}
// sbox tables
var sbox = [2][256]byte{
{
0xa9, 0x67, 0xb3, 0xe8, 0x04, 0xfd, 0xa3, 0x76, 0x9a, 0x92, 0x80, 0x78, 0xe4, 0xdd, 0xd1, 0x38,
0x0d, 0xc6, 0x35, 0x98, 0x18, 0xf7, 0xec, 0x6c, 0x43, 0x75, 0x37, 0x26, 0xfa, 0x13, 0x94, 0x48,
0xf2, 0xd0, 0x8b, 0x30, 0x84, 0x54, 0xdf, 0x23, 0x19, 0x5b, 0x3d, 0x59, 0xf3, 0xae, 0xa2, 0x82,
0x63, 0x01, 0x83, 0x2e, 0xd9, 0x51, 0x9b, 0x7c, 0xa6, 0xeb, 0xa5, 0xbe, 0x16, 0x0c, 0xe3, 0x61,
0xc0, 0x8c, 0x3a, 0xf5, 0x73, 0x2c, 0x25, 0x0b, 0xbb, 0x4e, 0x89, 0x6b, 0x53, 0x6a, 0xb4, 0xf1,
0xe1, 0xe6, 0xbd, 0x45, 0xe2, 0xf4, 0xb6, 0x66, 0xcc, 0x95, 0x03, 0x56, 0xd4, 0x1c, 0x1e, 0xd7,
0xfb, 0xc3, 0x8e, 0xb5, 0xe9, 0xcf, 0xbf, 0xba, 0xea, 0x77, 0x39, 0xaf, 0x33, 0xc9, 0x62, 0x71,
0x81, 0x79, 0x09, 0xad, 0x24, 0xcd, 0xf9, 0xd8, 0xe5, 0xc5, 0xb9, 0x4d, 0x44, 0x08, 0x86, 0xe7,
0xa1, 0x1d, 0xaa, 0xed, 0x06, 0x70, 0xb2, 0xd2, 0x41, 0x7b, 0xa0, 0x11, 0x31, 0xc2, 0x27, 0x90,
0x20, 0xf6, 0x60, 0xff, 0x96, 0x5c, 0xb1, 0xab, 0x9e, 0x9c, 0x52, 0x1b, 0x5f, 0x93, 0x0a, 0xef,
0x91, 0x85, 0x49, 0xee, 0x2d, 0x4f, 0x8f, 0x3b, 0x47, 0x87, 0x6d, 0x46, 0xd6, 0x3e, 0x69, 0x64,
0x2a, 0xce, 0xcb, 0x2f, 0xfc, 0x97, 0x05, 0x7a, 0xac, 0x7f, 0xd5, 0x1a, 0x4b, 0x0e, 0xa7, 0x5a,
0x28, 0x14, 0x3f, 0x29, 0x88, 0x3c, 0x4c, 0x02, 0xb8, 0xda, 0xb0, 0x17, 0x55, 0x1f, 0x8a, 0x7d,
0x57, 0xc7, 0x8d, 0x74, 0xb7, 0xc4, 0x9f, 0x72, 0x7e, 0x15, 0x22, 0x12, 0x58, 0x07, 0x99, 0x34,
0x6e, 0x50, 0xde, 0x68, 0x65, 0xbc, 0xdb, 0xf8, 0xc8, 0xa8, 0x2b, 0x40, 0xdc, 0xfe, 0x32, 0xa4,
0xca, 0x10, 0x21, 0xf0, 0xd3, 0x5d, 0x0f, 0x00, 0x6f, 0x9d, 0x36, 0x42, 0x4a, 0x5e, 0xc1, 0xe0,
},
{
0x75, 0xf3, 0xc6, 0xf4, 0xdb, 0x7b, 0xfb, 0xc8, 0x4a, 0xd3, 0xe6, 0x6b, 0x45, 0x7d, 0xe8, 0x4b,
0xd6, 0x32, 0xd8, 0xfd, 0x37, 0x71, 0xf1, 0xe1, 0x30, 0x0f, 0xf8, 0x1b, 0x87, 0xfa, 0x06, 0x3f,
0x5e, 0xba, 0xae, 0x5b, 0x8a, 0x00, 0xbc, 0x9d, 0x6d, 0xc1, 0xb1, 0x0e, 0x80, 0x5d, 0xd2, 0xd5,
0xa0, 0x84, 0x07, 0x14, 0xb5, 0x90, 0x2c, 0xa3, 0xb2, 0x73, 0x4c, 0x54, 0x92, 0x74, 0x36, 0x51,
0x38, 0xb0, 0xbd, 0x5a, 0xfc, 0x60, 0x62, 0x96, 0x6c, 0x42, 0xf7, 0x10, 0x7c, 0x28, 0x27, 0x8c,
0x13, 0x95, 0x9c, 0xc7, 0x24, 0x46, 0x3b, 0x70, 0xca, 0xe3, 0x85, 0xcb, 0x11, 0xd0, 0x93, 0xb8,
0xa6, 0x83, 0x20, 0xff, 0x9f, 0x77, 0xc3, 0xcc, 0x03, 0x6f, 0x08, 0xbf, 0x40, 0xe7, 0x2b, 0xe2,
0x79, 0x0c, 0xaa, 0x82, 0x41, 0x3a, 0xea, 0xb9, 0xe4, 0x9a, 0xa4, 0x97, 0x7e, 0xda, 0x7a, 0x17,
0x66, 0x94, 0xa1, 0x1d, 0x3d, 0xf0, 0xde, 0xb3, 0x0b, 0x72, 0xa7, 0x1c, 0xef, 0xd1, 0x53, 0x3e,
0x8f, 0x33, 0x26, 0x5f, 0xec, 0x76, 0x2a, 0x49, 0x81, 0x88, 0xee, 0x21, 0xc4, 0x1a, 0xeb, 0xd9,
0xc5, 0x39, 0x99, 0xcd, 0xad, 0x31, 0x8b, 0x01, 0x18, 0x23, 0xdd, 0x1f, 0x4e, 0x2d, 0xf9, 0x48,
0x4f, 0xf2, 0x65, 0x8e, 0x78, 0x5c, 0x58, 0x19, 0x8d, 0xe5, 0x98, 0x57, 0x67, 0x7f, 0x05, 0x64,
0xaf, 0x63, 0xb6, 0xfe, 0xf5, 0xb7, 0x3c, 0xa5, 0xce, 0xe9, 0x68, 0x44, 0xe0, 0x4d, 0x43, 0x69,
0x29, 0x2e, 0xac, 0x15, 0x59, 0xa8, 0x0a, 0x9e, 0x6e, 0x47, 0xdf, 0x34, 0x35, 0x6a, 0xcf, 0xdc,
0x22, 0xc9, 0xc0, 0x9b, 0x89, 0xd4, 0xed, 0xab, 0x12, 0xa2, 0x0d, 0x52, 0xbb, 0x02, 0x2f, 0xa9,
0xd7, 0x61, 0x1e, 0xb4, 0x50, 0x04, 0xf6, 0xc2, 0x16, 0x25, 0x86, 0x56, 0x55, 0x09, 0xbe, 0x91,
},
}
// gfMult returns a·b in GF(2^8)/p
func gfMult(a, b byte, p uint32) byte {
B := [2]uint32{0, uint32(b)}
P := [2]uint32{0, p}
var result uint32
// branchless GF multiplier
for i := 0; i < 7; i++ {
result ^= B[a&1]
a >>= 1
B[1] = P[B[1]>>7] ^ (B[1] << 1)
}
result ^= B[a&1]
return byte(result)
}
// mdsColumnMult calculates y{col} where [y0 y1 y2 y3] = MDS · [x0]
func mdsColumnMult(in byte, col int) uint32 {
mul01 := in
mul5B := gfMult(in, 0x5B, mdsPolynomial)
mulEF := gfMult(in, 0xEF, mdsPolynomial)
switch col {
case 0:
return uint32(mul01) | uint32(mul5B)<<8 | uint32(mulEF)<<16 | uint32(mulEF)<<24
case 1:
return uint32(mulEF) | uint32(mulEF)<<8 | uint32(mul5B)<<16 | uint32(mul01)<<24
case 2:
return uint32(mul5B) | uint32(mulEF)<<8 | uint32(mul01)<<16 | uint32(mulEF)<<24
case 3:
return uint32(mul5B) | uint32(mul01)<<8 | uint32(mulEF)<<16 | uint32(mul5B)<<24
}
panic("unreachable")
}
// h implements the S-box generation function. See [TWOFISH] 4.3.5
func h(in, key []byte, offset int) uint32 {
var y [4]byte
for x := range y {
y[x] = in[x]
}
switch len(key) / 8 {
case 4:
y[0] = sbox[1][y[0]] ^ key[4*(6+offset)+0]
y[1] = sbox[0][y[1]] ^ key[4*(6+offset)+1]
y[2] = sbox[0][y[2]] ^ key[4*(6+offset)+2]
y[3] = sbox[1][y[3]] ^ key[4*(6+offset)+3]
fallthrough
case 3:
y[0] = sbox[1][y[0]] ^ key[4*(4+offset)+0]
y[1] = sbox[1][y[1]] ^ key[4*(4+offset)+1]
y[2] = sbox[0][y[2]] ^ key[4*(4+offset)+2]
y[3] = sbox[0][y[3]] ^ key[4*(4+offset)+3]
fallthrough
case 2:
y[0] = sbox[1][sbox[0][sbox[0][y[0]]^key[4*(2+offset)+0]]^key[4*(0+offset)+0]]
y[1] = sbox[0][sbox[0][sbox[1][y[1]]^key[4*(2+offset)+1]]^key[4*(0+offset)+1]]
y[2] = sbox[1][sbox[1][sbox[0][y[2]]^key[4*(2+offset)+2]]^key[4*(0+offset)+2]]
y[3] = sbox[0][sbox[1][sbox[1][y[3]]^key[4*(2+offset)+3]]^key[4*(0+offset)+3]]
}
// [y0 y1 y2 y3] = MDS . [x0 x1 x2 x3]
var mdsMult uint32
for i := range y {
mdsMult ^= mdsColumnMult(y[i], i)
}
return mdsMult
}
// Encrypt encrypts a 16-byte block from src to dst, which may overlap.
// Note that for amounts of data larger than a block,
// it is not safe to just call Encrypt on successive blocks;
// instead, use an encryption mode like CBC (see crypto/cipher/cbc.go).
func (c *Cipher) Encrypt(dst, src []byte) {
S1 := c.s[0]
S2 := c.s[1]
S3 := c.s[2]
S4 := c.s[3]
// Load input
ia := load32l(src[0:4])
ib := load32l(src[4:8])
ic := load32l(src[8:12])
id := load32l(src[12:16])
// Pre-whitening
ia ^= c.k[0]
ib ^= c.k[1]
ic ^= c.k[2]
id ^= c.k[3]
for i := 0; i < 8; i++ {
k := c.k[8+i*4 : 12+i*4]
t2 := S2[byte(ib)] ^ S3[byte(ib>>8)] ^ S4[byte(ib>>16)] ^ S1[byte(ib>>24)]
t1 := S1[byte(ia)] ^ S2[byte(ia>>8)] ^ S3[byte(ia>>16)] ^ S4[byte(ia>>24)] + t2
ic = ror(ic^(t1+k[0]), 1)
id = rol(id, 1) ^ (t2 + t1 + k[1])
t2 = S2[byte(id)] ^ S3[byte(id>>8)] ^ S4[byte(id>>16)] ^ S1[byte(id>>24)]
t1 = S1[byte(ic)] ^ S2[byte(ic>>8)] ^ S3[byte(ic>>16)] ^ S4[byte(ic>>24)] + t2
ia = ror(ia^(t1+k[2]), 1)
ib = rol(ib, 1) ^ (t2 + t1 + k[3])
}
// Output with "undo last swap"
ta := ic ^ c.k[4]
tb := id ^ c.k[5]
tc := ia ^ c.k[6]
td := ib ^ c.k[7]
store32l(dst[0:4], ta)
store32l(dst[4:8], tb)
store32l(dst[8:12], tc)
store32l(dst[12:16], td)
}
// Decrypt decrypts a 16-byte block from src to dst, which may overlap.
func (c *Cipher) Decrypt(dst, src []byte) {
S1 := c.s[0]
S2 := c.s[1]
S3 := c.s[2]
S4 := c.s[3]
// Load input
ta := load32l(src[0:4])
tb := load32l(src[4:8])
tc := load32l(src[8:12])
td := load32l(src[12:16])
// Undo undo final swap
ia := tc ^ c.k[6]
ib := td ^ c.k[7]
ic := ta ^ c.k[4]
id := tb ^ c.k[5]
for i := 8; i > 0; i-- {
k := c.k[4+i*4 : 8+i*4]
t2 := S2[byte(id)] ^ S3[byte(id>>8)] ^ S4[byte(id>>16)] ^ S1[byte(id>>24)]
t1 := S1[byte(ic)] ^ S2[byte(ic>>8)] ^ S3[byte(ic>>16)] ^ S4[byte(ic>>24)] + t2
ia = rol(ia, 1) ^ (t1 + k[2])
ib = ror(ib^(t2+t1+k[3]), 1)
t2 = S2[byte(ib)] ^ S3[byte(ib>>8)] ^ S4[byte(ib>>16)] ^ S1[byte(ib>>24)]
t1 = S1[byte(ia)] ^ S2[byte(ia>>8)] ^ S3[byte(ia>>16)] ^ S4[byte(ia>>24)] + t2
ic = rol(ic, 1) ^ (t1 + k[0])
id = ror(id^(t2+t1+k[1]), 1)
}
// Undo pre-whitening
ia ^= c.k[0]
ib ^= c.k[1]
ic ^= c.k[2]
id ^= c.k[3]
store32l(dst[0:4], ia)
store32l(dst[4:8], ib)
store32l(dst[8:12], ic)
store32l(dst[12:16], id)
}

66
vendor/golang.org/x/crypto/xtea/block.go generated vendored Normal file
View File

@ -0,0 +1,66 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
/*
Implementation adapted from Needham and Wheeler's paper:
http://www.cix.co.uk/~klockstone/xtea.pdf
A precalculated look up table is used during encryption/decryption for values that are based purely on the key.
*/
package xtea
// XTEA is based on 64 rounds.
const numRounds = 64
// blockToUint32 reads an 8 byte slice into two uint32s.
// The block is treated as big endian.
func blockToUint32(src []byte) (uint32, uint32) {
r0 := uint32(src[0])<<24 | uint32(src[1])<<16 | uint32(src[2])<<8 | uint32(src[3])
r1 := uint32(src[4])<<24 | uint32(src[5])<<16 | uint32(src[6])<<8 | uint32(src[7])
return r0, r1
}
// uint32ToBlock writes two uint32s into an 8 byte data block.
// Values are written as big endian.
func uint32ToBlock(v0, v1 uint32, dst []byte) {
dst[0] = byte(v0 >> 24)
dst[1] = byte(v0 >> 16)
dst[2] = byte(v0 >> 8)
dst[3] = byte(v0)
dst[4] = byte(v1 >> 24)
dst[5] = byte(v1 >> 16)
dst[6] = byte(v1 >> 8)
dst[7] = byte(v1 >> 0)
}
// encryptBlock encrypts a single 8 byte block using XTEA.
func encryptBlock(c *Cipher, dst, src []byte) {
v0, v1 := blockToUint32(src)
// Two rounds of XTEA applied per loop
for i := 0; i < numRounds; {
v0 += ((v1<<4 ^ v1>>5) + v1) ^ c.table[i]
i++
v1 += ((v0<<4 ^ v0>>5) + v0) ^ c.table[i]
i++
}
uint32ToBlock(v0, v1, dst)
}
// decryptBlock decrypt a single 8 byte block using XTEA.
func decryptBlock(c *Cipher, dst, src []byte) {
v0, v1 := blockToUint32(src)
// Two rounds of XTEA applied per loop
for i := numRounds; i > 0; {
i--
v1 -= ((v0<<4 ^ v0>>5) + v0) ^ c.table[i]
i--
v0 -= ((v1<<4 ^ v1>>5) + v1) ^ c.table[i]
}
uint32ToBlock(v0, v1, dst)
}

82
vendor/golang.org/x/crypto/xtea/cipher.go generated vendored Normal file
View File

@ -0,0 +1,82 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package xtea implements XTEA encryption, as defined in Needham and Wheeler's
// 1997 technical report, "Tea extensions."
package xtea
// For details, see http://www.cix.co.uk/~klockstone/xtea.pdf
import "strconv"
// The XTEA block size in bytes.
const BlockSize = 8
// A Cipher is an instance of an XTEA cipher using a particular key.
// table contains a series of precalculated values that are used each round.
type Cipher struct {
table [64]uint32
}
type KeySizeError int
func (k KeySizeError) Error() string {
return "crypto/xtea: invalid key size " + strconv.Itoa(int(k))
}
// NewCipher creates and returns a new Cipher.
// The key argument should be the XTEA key.
// XTEA only supports 128 bit (16 byte) keys.
func NewCipher(key []byte) (*Cipher, error) {
k := len(key)
switch k {
default:
return nil, KeySizeError(k)
case 16:
break
}
c := new(Cipher)
initCipher(c, key)
return c, nil
}
// BlockSize returns the XTEA block size, 8 bytes.
// It is necessary to satisfy the Block interface in the
// package "crypto/cipher".
func (c *Cipher) BlockSize() int { return BlockSize }
// Encrypt encrypts the 8 byte buffer src using the key and stores the result in dst.
// Note that for amounts of data larger than a block,
// it is not safe to just call Encrypt on successive blocks;
// instead, use an encryption mode like CBC (see crypto/cipher/cbc.go).
func (c *Cipher) Encrypt(dst, src []byte) { encryptBlock(c, dst, src) }
// Decrypt decrypts the 8 byte buffer src using the key k and stores the result in dst.
func (c *Cipher) Decrypt(dst, src []byte) { decryptBlock(c, dst, src) }
// initCipher initializes the cipher context by creating a look up table
// of precalculated values that are based on the key.
func initCipher(c *Cipher, key []byte) {
// Load the key into four uint32s
var k [4]uint32
for i := 0; i < len(k); i++ {
j := i << 2 // Multiply by 4
k[i] = uint32(key[j+0])<<24 | uint32(key[j+1])<<16 | uint32(key[j+2])<<8 | uint32(key[j+3])
}
// Precalculate the table
const delta = 0x9E3779B9
var sum uint32 = 0
// Two rounds of XTEA applied per loop
for i := 0; i < numRounds; {
c.table[i] = sum + k[sum&3]
i++
sum += delta
c.table[i] = sum + k[(sum>>11)&3]
i++
}
}

3
vendor/golang.org/x/net/AUTHORS generated vendored Normal file
View File

@ -0,0 +1,3 @@
# This source code refers to The Go Authors for copyright purposes.
# The master list of authors is in the main Go distribution,
# visible at http://tip.golang.org/AUTHORS.

3
vendor/golang.org/x/net/CONTRIBUTORS generated vendored Normal file
View File

@ -0,0 +1,3 @@
# This source code was written by the Go contributors.
# The master list of contributors is in the main Go distribution,
# visible at http://tip.golang.org/CONTRIBUTORS.

27
vendor/golang.org/x/net/LICENSE generated vendored Normal file
View File

@ -0,0 +1,27 @@
Copyright (c) 2009 The Go Authors. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

22
vendor/golang.org/x/net/PATENTS generated vendored Normal file
View File

@ -0,0 +1,22 @@
Additional IP Rights Grant (Patents)
"This implementation" means the copyrightable works distributed by
Google as part of the Go project.
Google hereby grants to You a perpetual, worldwide, non-exclusive,
no-charge, royalty-free, irrevocable (except as stated in this section)
patent license to make, have made, use, offer to sell, sell, import,
transfer and otherwise run, modify and propagate the contents of this
implementation of Go, where such license applies only to those patent
claims, both currently owned or controlled by Google and acquired in
the future, licensable by Google that are necessarily infringed by this
implementation of Go. This grant does not include claims that would be
infringed only as a consequence of further modification of this
implementation. If you or your agent or exclusive licensee institute or
order or agree to the institution of patent litigation against any
entity (including a cross-claim or counterclaim in a lawsuit) alleging
that this implementation of Go or any code incorporated within this
implementation of Go constitutes direct or contributory patent
infringement, or inducement of patent infringement, then any patent
rights granted to you under this License for this implementation of Go
shall terminate as of the date such litigation is filed.

41
vendor/golang.org/x/net/bpf/asm.go generated vendored Normal file
View File

@ -0,0 +1,41 @@
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package bpf
import "fmt"
// Assemble converts insts into raw instructions suitable for loading
// into a BPF virtual machine.
//
// Currently, no optimization is attempted, the assembled program flow
// is exactly as provided.
func Assemble(insts []Instruction) ([]RawInstruction, error) {
ret := make([]RawInstruction, len(insts))
var err error
for i, inst := range insts {
ret[i], err = inst.Assemble()
if err != nil {
return nil, fmt.Errorf("assembling instruction %d: %s", i+1, err)
}
}
return ret, nil
}
// Disassemble attempts to parse raw back into
// Instructions. Unrecognized RawInstructions are assumed to be an
// extension not implemented by this package, and are passed through
// unchanged to the output. The allDecoded value reports whether insts
// contains no RawInstructions.
func Disassemble(raw []RawInstruction) (insts []Instruction, allDecoded bool) {
insts = make([]Instruction, len(raw))
allDecoded = true
for i, r := range raw {
insts[i] = r.Disassemble()
if _, ok := insts[i].(RawInstruction); ok {
allDecoded = false
}
}
return insts, allDecoded
}

218
vendor/golang.org/x/net/bpf/constants.go generated vendored Normal file
View File

@ -0,0 +1,218 @@
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package bpf
// A Register is a register of the BPF virtual machine.
type Register uint16
const (
// RegA is the accumulator register. RegA is always the
// destination register of ALU operations.
RegA Register = iota
// RegX is the indirection register, used by LoadIndirect
// operations.
RegX
)
// An ALUOp is an arithmetic or logic operation.
type ALUOp uint16
// ALU binary operation types.
const (
ALUOpAdd ALUOp = iota << 4
ALUOpSub
ALUOpMul
ALUOpDiv
ALUOpOr
ALUOpAnd
ALUOpShiftLeft
ALUOpShiftRight
aluOpNeg // Not exported because it's the only unary ALU operation, and gets its own instruction type.
ALUOpMod
ALUOpXor
)
// A JumpTest is a comparison operator used in conditional jumps.
type JumpTest uint16
// Supported operators for conditional jumps.
const (
// K == A
JumpEqual JumpTest = iota
// K != A
JumpNotEqual
// K > A
JumpGreaterThan
// K < A
JumpLessThan
// K >= A
JumpGreaterOrEqual
// K <= A
JumpLessOrEqual
// K & A != 0
JumpBitsSet
// K & A == 0
JumpBitsNotSet
)
// An Extension is a function call provided by the kernel that
// performs advanced operations that are expensive or impossible
// within the BPF virtual machine.
//
// Extensions are only implemented by the Linux kernel.
//
// TODO: should we prune this list? Some of these extensions seem
// either broken or near-impossible to use correctly, whereas other
// (len, random, ifindex) are quite useful.
type Extension int
// Extension functions available in the Linux kernel.
const (
// extOffset is the negative maximum number of instructions used
// to load instructions by overloading the K argument.
extOffset = -0x1000
// ExtLen returns the length of the packet.
ExtLen Extension = 1
// ExtProto returns the packet's L3 protocol type.
ExtProto Extension = 0
// ExtType returns the packet's type (skb->pkt_type in the kernel)
//
// TODO: better documentation. How nice an API do we want to
// provide for these esoteric extensions?
ExtType Extension = 4
// ExtPayloadOffset returns the offset of the packet payload, or
// the first protocol header that the kernel does not know how to
// parse.
ExtPayloadOffset Extension = 52
// ExtInterfaceIndex returns the index of the interface on which
// the packet was received.
ExtInterfaceIndex Extension = 8
// ExtNetlinkAttr returns the netlink attribute of type X at
// offset A.
ExtNetlinkAttr Extension = 12
// ExtNetlinkAttrNested returns the nested netlink attribute of
// type X at offset A.
ExtNetlinkAttrNested Extension = 16
// ExtMark returns the packet's mark value.
ExtMark Extension = 20
// ExtQueue returns the packet's assigned hardware queue.
ExtQueue Extension = 24
// ExtLinkLayerType returns the packet's hardware address type
// (e.g. Ethernet, Infiniband).
ExtLinkLayerType Extension = 28
// ExtRXHash returns the packets receive hash.
//
// TODO: figure out what this rxhash actually is.
ExtRXHash Extension = 32
// ExtCPUID returns the ID of the CPU processing the current
// packet.
ExtCPUID Extension = 36
// ExtVLANTag returns the packet's VLAN tag.
ExtVLANTag Extension = 44
// ExtVLANTagPresent returns non-zero if the packet has a VLAN
// tag.
//
// TODO: I think this might be a lie: it reads bit 0x1000 of the
// VLAN header, which changed meaning in recent revisions of the
// spec - this extension may now return meaningless information.
ExtVLANTagPresent Extension = 48
// ExtVLANProto returns 0x8100 if the frame has a VLAN header,
// 0x88a8 if the frame has a "Q-in-Q" double VLAN header, or some
// other value if no VLAN information is present.
ExtVLANProto Extension = 60
// ExtRand returns a uniformly random uint32.
ExtRand Extension = 56
)
// The following gives names to various bit patterns used in opcode construction.
const (
opMaskCls uint16 = 0x7
// opClsLoad masks
opMaskLoadDest = 0x01
opMaskLoadWidth = 0x18
opMaskLoadMode = 0xe0
// opClsALU
opMaskOperandSrc = 0x08
opMaskOperator = 0xf0
// opClsJump
opMaskJumpConst = 0x0f
opMaskJumpCond = 0xf0
)
const (
// +---------------+-----------------+---+---+---+
// | AddrMode (3b) | LoadWidth (2b) | 0 | 0 | 0 |
// +---------------+-----------------+---+---+---+
opClsLoadA uint16 = iota
// +---------------+-----------------+---+---+---+
// | AddrMode (3b) | LoadWidth (2b) | 0 | 0 | 1 |
// +---------------+-----------------+---+---+---+
opClsLoadX
// +---+---+---+---+---+---+---+---+
// | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
// +---+---+---+---+---+---+---+---+
opClsStoreA
// +---+---+---+---+---+---+---+---+
// | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 |
// +---+---+---+---+---+---+---+---+
opClsStoreX
// +---------------+-----------------+---+---+---+
// | Operator (4b) | OperandSrc (1b) | 1 | 0 | 0 |
// +---------------+-----------------+---+---+---+
opClsALU
// +-----------------------------+---+---+---+---+
// | TestOperator (4b) | 0 | 1 | 0 | 1 |
// +-----------------------------+---+---+---+---+
opClsJump
// +---+-------------------------+---+---+---+---+
// | 0 | 0 | 0 | RetSrc (1b) | 0 | 1 | 1 | 0 |
// +---+-------------------------+---+---+---+---+
opClsReturn
// +---+-------------------------+---+---+---+---+
// | 0 | 0 | 0 | TXAorTAX (1b) | 0 | 1 | 1 | 1 |
// +---+-------------------------+---+---+---+---+
opClsMisc
)
const (
opAddrModeImmediate uint16 = iota << 5
opAddrModeAbsolute
opAddrModeIndirect
opAddrModeScratch
opAddrModePacketLen // actually an extension, not an addressing mode.
opAddrModeMemShift
)
const (
opLoadWidth4 uint16 = iota << 3
opLoadWidth2
opLoadWidth1
)
// Operator defined by ALUOp*
const (
opALUSrcConstant uint16 = iota << 3
opALUSrcX
)
const (
opJumpAlways = iota << 4
opJumpEqual
opJumpGT
opJumpGE
opJumpSet
)
const (
opRetSrcConstant uint16 = iota << 4
opRetSrcA
)
const (
opMiscTAX = 0x00
opMiscTXA = 0x80
)

82
vendor/golang.org/x/net/bpf/doc.go generated vendored Normal file
View File

@ -0,0 +1,82 @@
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
/*
Package bpf implements marshaling and unmarshaling of programs for the
Berkeley Packet Filter virtual machine, and provides a Go implementation
of the virtual machine.
BPF's main use is to specify a packet filter for network taps, so that
the kernel doesn't have to expensively copy every packet it sees to
userspace. However, it's been repurposed to other areas where running
user code in-kernel is needed. For example, Linux's seccomp uses BPF
to apply security policies to system calls. For simplicity, this
documentation refers only to packets, but other uses of BPF have their
own data payloads.
BPF programs run in a restricted virtual machine. It has almost no
access to kernel functions, and while conditional branches are
allowed, they can only jump forwards, to guarantee that there are no
infinite loops.
The virtual machine
The BPF VM is an accumulator machine. Its main register, called
register A, is an implicit source and destination in all arithmetic
and logic operations. The machine also has 16 scratch registers for
temporary storage, and an indirection register (register X) for
indirect memory access. All registers are 32 bits wide.
Each run of a BPF program is given one packet, which is placed in the
VM's read-only "main memory". LoadAbsolute and LoadIndirect
instructions can fetch up to 32 bits at a time into register A for
examination.
The goal of a BPF program is to produce and return a verdict (uint32),
which tells the kernel what to do with the packet. In the context of
packet filtering, the returned value is the number of bytes of the
packet to forward to userspace, or 0 to ignore the packet. Other
contexts like seccomp define their own return values.
In order to simplify programs, attempts to read past the end of the
packet terminate the program execution with a verdict of 0 (ignore
packet). This means that the vast majority of BPF programs don't need
to do any explicit bounds checking.
In addition to the bytes of the packet, some BPF programs have access
to extensions, which are essentially calls to kernel utility
functions. Currently, the only extensions supported by this package
are the Linux packet filter extensions.
Examples
This packet filter selects all ARP packets.
bpf.Assemble([]bpf.Instruction{
// Load "EtherType" field from the ethernet header.
bpf.LoadAbsolute{Off: 12, Size: 2},
// Skip over the next instruction if EtherType is not ARP.
bpf.JumpIf{Cond: bpf.JumpNotEqual, Val: 0x0806, SkipTrue: 1},
// Verdict is "send up to 4k of the packet to userspace."
bpf.RetConstant{Val: 4096},
// Verdict is "ignore packet."
bpf.RetConstant{Val: 0},
})
This packet filter captures a random 1% sample of traffic.
bpf.Assemble([]bpf.Instruction{
// Get a 32-bit random number from the Linux kernel.
bpf.LoadExtension{Num: bpf.ExtRand},
// 1% dice roll?
bpf.JumpIf{Cond: bpf.JumpLessThan, Val: 2^32/100, SkipFalse: 1},
// Capture.
bpf.RetConstant{Val: 4096},
// Ignore.
bpf.RetConstant{Val: 0},
})
*/
package bpf

704
vendor/golang.org/x/net/bpf/instructions.go generated vendored Normal file
View File

@ -0,0 +1,704 @@
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package bpf
import "fmt"
// An Instruction is one instruction executed by the BPF virtual
// machine.
type Instruction interface {
// Assemble assembles the Instruction into a RawInstruction.
Assemble() (RawInstruction, error)
}
// A RawInstruction is a raw BPF virtual machine instruction.
type RawInstruction struct {
// Operation to execute.
Op uint16
// For conditional jump instructions, the number of instructions
// to skip if the condition is true/false.
Jt uint8
Jf uint8
// Constant parameter. The meaning depends on the Op.
K uint32
}
// Assemble implements the Instruction Assemble method.
func (ri RawInstruction) Assemble() (RawInstruction, error) { return ri, nil }
// Disassemble parses ri into an Instruction and returns it. If ri is
// not recognized by this package, ri itself is returned.
func (ri RawInstruction) Disassemble() Instruction {
switch ri.Op & opMaskCls {
case opClsLoadA, opClsLoadX:
reg := Register(ri.Op & opMaskLoadDest)
sz := 0
switch ri.Op & opMaskLoadWidth {
case opLoadWidth4:
sz = 4
case opLoadWidth2:
sz = 2
case opLoadWidth1:
sz = 1
default:
return ri
}
switch ri.Op & opMaskLoadMode {
case opAddrModeImmediate:
if sz != 4 {
return ri
}
return LoadConstant{Dst: reg, Val: ri.K}
case opAddrModeScratch:
if sz != 4 || ri.K > 15 {
return ri
}
return LoadScratch{Dst: reg, N: int(ri.K)}
case opAddrModeAbsolute:
if ri.K > extOffset+0xffffffff {
return LoadExtension{Num: Extension(-extOffset + ri.K)}
}
return LoadAbsolute{Size: sz, Off: ri.K}
case opAddrModeIndirect:
return LoadIndirect{Size: sz, Off: ri.K}
case opAddrModePacketLen:
if sz != 4 {
return ri
}
return LoadExtension{Num: ExtLen}
case opAddrModeMemShift:
return LoadMemShift{Off: ri.K}
default:
return ri
}
case opClsStoreA:
if ri.Op != opClsStoreA || ri.K > 15 {
return ri
}
return StoreScratch{Src: RegA, N: int(ri.K)}
case opClsStoreX:
if ri.Op != opClsStoreX || ri.K > 15 {
return ri
}
return StoreScratch{Src: RegX, N: int(ri.K)}
case opClsALU:
switch op := ALUOp(ri.Op & opMaskOperator); op {
case ALUOpAdd, ALUOpSub, ALUOpMul, ALUOpDiv, ALUOpOr, ALUOpAnd, ALUOpShiftLeft, ALUOpShiftRight, ALUOpMod, ALUOpXor:
if ri.Op&opMaskOperandSrc != 0 {
return ALUOpX{Op: op}
}
return ALUOpConstant{Op: op, Val: ri.K}
case aluOpNeg:
return NegateA{}
default:
return ri
}
case opClsJump:
if ri.Op&opMaskJumpConst != opClsJump {
return ri
}
switch ri.Op & opMaskJumpCond {
case opJumpAlways:
return Jump{Skip: ri.K}
case opJumpEqual:
if ri.Jt == 0 {
return JumpIf{
Cond: JumpNotEqual,
Val: ri.K,
SkipTrue: ri.Jf,
SkipFalse: 0,
}
}
return JumpIf{
Cond: JumpEqual,
Val: ri.K,
SkipTrue: ri.Jt,
SkipFalse: ri.Jf,
}
case opJumpGT:
if ri.Jt == 0 {
return JumpIf{
Cond: JumpLessOrEqual,
Val: ri.K,
SkipTrue: ri.Jf,
SkipFalse: 0,
}
}
return JumpIf{
Cond: JumpGreaterThan,
Val: ri.K,
SkipTrue: ri.Jt,
SkipFalse: ri.Jf,
}
case opJumpGE:
if ri.Jt == 0 {
return JumpIf{
Cond: JumpLessThan,
Val: ri.K,
SkipTrue: ri.Jf,
SkipFalse: 0,
}
}
return JumpIf{
Cond: JumpGreaterOrEqual,
Val: ri.K,
SkipTrue: ri.Jt,
SkipFalse: ri.Jf,
}
case opJumpSet:
return JumpIf{
Cond: JumpBitsSet,
Val: ri.K,
SkipTrue: ri.Jt,
SkipFalse: ri.Jf,
}
default:
return ri
}
case opClsReturn:
switch ri.Op {
case opClsReturn | opRetSrcA:
return RetA{}
case opClsReturn | opRetSrcConstant:
return RetConstant{Val: ri.K}
default:
return ri
}
case opClsMisc:
switch ri.Op {
case opClsMisc | opMiscTAX:
return TAX{}
case opClsMisc | opMiscTXA:
return TXA{}
default:
return ri
}
default:
panic("unreachable") // switch is exhaustive on the bit pattern
}
}
// LoadConstant loads Val into register Dst.
type LoadConstant struct {
Dst Register
Val uint32
}
// Assemble implements the Instruction Assemble method.
func (a LoadConstant) Assemble() (RawInstruction, error) {
return assembleLoad(a.Dst, 4, opAddrModeImmediate, a.Val)
}
// String returns the the instruction in assembler notation.
func (a LoadConstant) String() string {
switch a.Dst {
case RegA:
return fmt.Sprintf("ld #%d", a.Val)
case RegX:
return fmt.Sprintf("ldx #%d", a.Val)
default:
return fmt.Sprintf("unknown instruction: %#v", a)
}
}
// LoadScratch loads scratch[N] into register Dst.
type LoadScratch struct {
Dst Register
N int // 0-15
}
// Assemble implements the Instruction Assemble method.
func (a LoadScratch) Assemble() (RawInstruction, error) {
if a.N < 0 || a.N > 15 {
return RawInstruction{}, fmt.Errorf("invalid scratch slot %d", a.N)
}
return assembleLoad(a.Dst, 4, opAddrModeScratch, uint32(a.N))
}
// String returns the the instruction in assembler notation.
func (a LoadScratch) String() string {
switch a.Dst {
case RegA:
return fmt.Sprintf("ld M[%d]", a.N)
case RegX:
return fmt.Sprintf("ldx M[%d]", a.N)
default:
return fmt.Sprintf("unknown instruction: %#v", a)
}
}
// LoadAbsolute loads packet[Off:Off+Size] as an integer value into
// register A.
type LoadAbsolute struct {
Off uint32
Size int // 1, 2 or 4
}
// Assemble implements the Instruction Assemble method.
func (a LoadAbsolute) Assemble() (RawInstruction, error) {
return assembleLoad(RegA, a.Size, opAddrModeAbsolute, a.Off)
}
// String returns the the instruction in assembler notation.
func (a LoadAbsolute) String() string {
switch a.Size {
case 1: // byte
return fmt.Sprintf("ldb [%d]", a.Off)
case 2: // half word
return fmt.Sprintf("ldh [%d]", a.Off)
case 4: // word
if a.Off > extOffset+0xffffffff {
return LoadExtension{Num: Extension(a.Off + 0x1000)}.String()
}
return fmt.Sprintf("ld [%d]", a.Off)
default:
return fmt.Sprintf("unknown instruction: %#v", a)
}
}
// LoadIndirect loads packet[X+Off:X+Off+Size] as an integer value
// into register A.
type LoadIndirect struct {
Off uint32
Size int // 1, 2 or 4
}
// Assemble implements the Instruction Assemble method.
func (a LoadIndirect) Assemble() (RawInstruction, error) {
return assembleLoad(RegA, a.Size, opAddrModeIndirect, a.Off)
}
// String returns the the instruction in assembler notation.
func (a LoadIndirect) String() string {
switch a.Size {
case 1: // byte
return fmt.Sprintf("ldb [x + %d]", a.Off)
case 2: // half word
return fmt.Sprintf("ldh [x + %d]", a.Off)
case 4: // word
return fmt.Sprintf("ld [x + %d]", a.Off)
default:
return fmt.Sprintf("unknown instruction: %#v", a)
}
}
// LoadMemShift multiplies the first 4 bits of the byte at packet[Off]
// by 4 and stores the result in register X.
//
// This instruction is mainly useful to load into X the length of an
// IPv4 packet header in a single instruction, rather than have to do
// the arithmetic on the header's first byte by hand.
type LoadMemShift struct {
Off uint32
}
// Assemble implements the Instruction Assemble method.
func (a LoadMemShift) Assemble() (RawInstruction, error) {
return assembleLoad(RegX, 1, opAddrModeMemShift, a.Off)
}
// String returns the the instruction in assembler notation.
func (a LoadMemShift) String() string {
return fmt.Sprintf("ldx 4*([%d]&0xf)", a.Off)
}
// LoadExtension invokes a linux-specific extension and stores the
// result in register A.
type LoadExtension struct {
Num Extension
}
// Assemble implements the Instruction Assemble method.
func (a LoadExtension) Assemble() (RawInstruction, error) {
if a.Num == ExtLen {
return assembleLoad(RegA, 4, opAddrModePacketLen, 0)
}
return assembleLoad(RegA, 4, opAddrModeAbsolute, uint32(extOffset+a.Num))
}
// String returns the the instruction in assembler notation.
func (a LoadExtension) String() string {
switch a.Num {
case ExtLen:
return "ld #len"
case ExtProto:
return "ld #proto"
case ExtType:
return "ld #type"
case ExtPayloadOffset:
return "ld #poff"
case ExtInterfaceIndex:
return "ld #ifidx"
case ExtNetlinkAttr:
return "ld #nla"
case ExtNetlinkAttrNested:
return "ld #nlan"
case ExtMark:
return "ld #mark"
case ExtQueue:
return "ld #queue"
case ExtLinkLayerType:
return "ld #hatype"
case ExtRXHash:
return "ld #rxhash"
case ExtCPUID:
return "ld #cpu"
case ExtVLANTag:
return "ld #vlan_tci"
case ExtVLANTagPresent:
return "ld #vlan_avail"
case ExtVLANProto:
return "ld #vlan_tpid"
case ExtRand:
return "ld #rand"
default:
return fmt.Sprintf("unknown instruction: %#v", a)
}
}
// StoreScratch stores register Src into scratch[N].
type StoreScratch struct {
Src Register
N int // 0-15
}
// Assemble implements the Instruction Assemble method.
func (a StoreScratch) Assemble() (RawInstruction, error) {
if a.N < 0 || a.N > 15 {
return RawInstruction{}, fmt.Errorf("invalid scratch slot %d", a.N)
}
var op uint16
switch a.Src {
case RegA:
op = opClsStoreA
case RegX:
op = opClsStoreX
default:
return RawInstruction{}, fmt.Errorf("invalid source register %v", a.Src)
}
return RawInstruction{
Op: op,
K: uint32(a.N),
}, nil
}
// String returns the the instruction in assembler notation.
func (a StoreScratch) String() string {
switch a.Src {
case RegA:
return fmt.Sprintf("st M[%d]", a.N)
case RegX:
return fmt.Sprintf("stx M[%d]", a.N)
default:
return fmt.Sprintf("unknown instruction: %#v", a)
}
}
// ALUOpConstant executes A = A <Op> Val.
type ALUOpConstant struct {
Op ALUOp
Val uint32
}
// Assemble implements the Instruction Assemble method.
func (a ALUOpConstant) Assemble() (RawInstruction, error) {
return RawInstruction{
Op: opClsALU | opALUSrcConstant | uint16(a.Op),
K: a.Val,
}, nil
}
// String returns the the instruction in assembler notation.
func (a ALUOpConstant) String() string {
switch a.Op {
case ALUOpAdd:
return fmt.Sprintf("add #%d", a.Val)
case ALUOpSub:
return fmt.Sprintf("sub #%d", a.Val)
case ALUOpMul:
return fmt.Sprintf("mul #%d", a.Val)
case ALUOpDiv:
return fmt.Sprintf("div #%d", a.Val)
case ALUOpMod:
return fmt.Sprintf("mod #%d", a.Val)
case ALUOpAnd:
return fmt.Sprintf("and #%d", a.Val)
case ALUOpOr:
return fmt.Sprintf("or #%d", a.Val)
case ALUOpXor:
return fmt.Sprintf("xor #%d", a.Val)
case ALUOpShiftLeft:
return fmt.Sprintf("lsh #%d", a.Val)
case ALUOpShiftRight:
return fmt.Sprintf("rsh #%d", a.Val)
default:
return fmt.Sprintf("unknown instruction: %#v", a)
}
}
// ALUOpX executes A = A <Op> X
type ALUOpX struct {
Op ALUOp
}
// Assemble implements the Instruction Assemble method.
func (a ALUOpX) Assemble() (RawInstruction, error) {
return RawInstruction{
Op: opClsALU | opALUSrcX | uint16(a.Op),
}, nil
}
// String returns the the instruction in assembler notation.
func (a ALUOpX) String() string {
switch a.Op {
case ALUOpAdd:
return "add x"
case ALUOpSub:
return "sub x"
case ALUOpMul:
return "mul x"
case ALUOpDiv:
return "div x"
case ALUOpMod:
return "mod x"
case ALUOpAnd:
return "and x"
case ALUOpOr:
return "or x"
case ALUOpXor:
return "xor x"
case ALUOpShiftLeft:
return "lsh x"
case ALUOpShiftRight:
return "rsh x"
default:
return fmt.Sprintf("unknown instruction: %#v", a)
}
}
// NegateA executes A = -A.
type NegateA struct{}
// Assemble implements the Instruction Assemble method.
func (a NegateA) Assemble() (RawInstruction, error) {
return RawInstruction{
Op: opClsALU | uint16(aluOpNeg),
}, nil
}
// String returns the the instruction in assembler notation.
func (a NegateA) String() string {
return fmt.Sprintf("neg")
}
// Jump skips the following Skip instructions in the program.
type Jump struct {
Skip uint32
}
// Assemble implements the Instruction Assemble method.
func (a Jump) Assemble() (RawInstruction, error) {
return RawInstruction{
Op: opClsJump | opJumpAlways,
K: a.Skip,
}, nil
}
// String returns the the instruction in assembler notation.
func (a Jump) String() string {
return fmt.Sprintf("ja %d", a.Skip)
}
// JumpIf skips the following Skip instructions in the program if A
// <Cond> Val is true.
type JumpIf struct {
Cond JumpTest
Val uint32
SkipTrue uint8
SkipFalse uint8
}
// Assemble implements the Instruction Assemble method.
func (a JumpIf) Assemble() (RawInstruction, error) {
var (
cond uint16
flip bool
)
switch a.Cond {
case JumpEqual:
cond = opJumpEqual
case JumpNotEqual:
cond, flip = opJumpEqual, true
case JumpGreaterThan:
cond = opJumpGT
case JumpLessThan:
cond, flip = opJumpGE, true
case JumpGreaterOrEqual:
cond = opJumpGE
case JumpLessOrEqual:
cond, flip = opJumpGT, true
case JumpBitsSet:
cond = opJumpSet
case JumpBitsNotSet:
cond, flip = opJumpSet, true
default:
return RawInstruction{}, fmt.Errorf("unknown JumpTest %v", a.Cond)
}
jt, jf := a.SkipTrue, a.SkipFalse
if flip {
jt, jf = jf, jt
}
return RawInstruction{
Op: opClsJump | cond,
Jt: jt,
Jf: jf,
K: a.Val,
}, nil
}
// String returns the the instruction in assembler notation.
func (a JumpIf) String() string {
switch a.Cond {
// K == A
case JumpEqual:
return conditionalJump(a, "jeq", "jneq")
// K != A
case JumpNotEqual:
return fmt.Sprintf("jneq #%d,%d", a.Val, a.SkipTrue)
// K > A
case JumpGreaterThan:
return conditionalJump(a, "jgt", "jle")
// K < A
case JumpLessThan:
return fmt.Sprintf("jlt #%d,%d", a.Val, a.SkipTrue)
// K >= A
case JumpGreaterOrEqual:
return conditionalJump(a, "jge", "jlt")
// K <= A
case JumpLessOrEqual:
return fmt.Sprintf("jle #%d,%d", a.Val, a.SkipTrue)
// K & A != 0
case JumpBitsSet:
if a.SkipFalse > 0 {
return fmt.Sprintf("jset #%d,%d,%d", a.Val, a.SkipTrue, a.SkipFalse)
}
return fmt.Sprintf("jset #%d,%d", a.Val, a.SkipTrue)
// K & A == 0, there is no assembler instruction for JumpBitNotSet, use JumpBitSet and invert skips
case JumpBitsNotSet:
return JumpIf{Cond: JumpBitsSet, SkipTrue: a.SkipFalse, SkipFalse: a.SkipTrue, Val: a.Val}.String()
default:
return fmt.Sprintf("unknown instruction: %#v", a)
}
}
func conditionalJump(inst JumpIf, positiveJump, negativeJump string) string {
if inst.SkipTrue > 0 {
if inst.SkipFalse > 0 {
return fmt.Sprintf("%s #%d,%d,%d", positiveJump, inst.Val, inst.SkipTrue, inst.SkipFalse)
}
return fmt.Sprintf("%s #%d,%d", positiveJump, inst.Val, inst.SkipTrue)
}
return fmt.Sprintf("%s #%d,%d", negativeJump, inst.Val, inst.SkipFalse)
}
// RetA exits the BPF program, returning the value of register A.
type RetA struct{}
// Assemble implements the Instruction Assemble method.
func (a RetA) Assemble() (RawInstruction, error) {
return RawInstruction{
Op: opClsReturn | opRetSrcA,
}, nil
}
// String returns the the instruction in assembler notation.
func (a RetA) String() string {
return fmt.Sprintf("ret a")
}
// RetConstant exits the BPF program, returning a constant value.
type RetConstant struct {
Val uint32
}
// Assemble implements the Instruction Assemble method.
func (a RetConstant) Assemble() (RawInstruction, error) {
return RawInstruction{
Op: opClsReturn | opRetSrcConstant,
K: a.Val,
}, nil
}
// String returns the the instruction in assembler notation.
func (a RetConstant) String() string {
return fmt.Sprintf("ret #%d", a.Val)
}
// TXA copies the value of register X to register A.
type TXA struct{}
// Assemble implements the Instruction Assemble method.
func (a TXA) Assemble() (RawInstruction, error) {
return RawInstruction{
Op: opClsMisc | opMiscTXA,
}, nil
}
// String returns the the instruction in assembler notation.
func (a TXA) String() string {
return fmt.Sprintf("txa")
}
// TAX copies the value of register A to register X.
type TAX struct{}
// Assemble implements the Instruction Assemble method.
func (a TAX) Assemble() (RawInstruction, error) {
return RawInstruction{
Op: opClsMisc | opMiscTAX,
}, nil
}
// String returns the the instruction in assembler notation.
func (a TAX) String() string {
return fmt.Sprintf("tax")
}
func assembleLoad(dst Register, loadSize int, mode uint16, k uint32) (RawInstruction, error) {
var (
cls uint16
sz uint16
)
switch dst {
case RegA:
cls = opClsLoadA
case RegX:
cls = opClsLoadX
default:
return RawInstruction{}, fmt.Errorf("invalid target register %v", dst)
}
switch loadSize {
case 1:
sz = opLoadWidth1
case 2:
sz = opLoadWidth2
case 4:
sz = opLoadWidth4
default:
return RawInstruction{}, fmt.Errorf("invalid load byte length %d", sz)
}
return RawInstruction{
Op: cls | sz | mode,
K: k,
}, nil
}

140
vendor/golang.org/x/net/bpf/vm.go generated vendored Normal file
View File

@ -0,0 +1,140 @@
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package bpf
import (
"errors"
"fmt"
)
// A VM is an emulated BPF virtual machine.
type VM struct {
filter []Instruction
}
// NewVM returns a new VM using the input BPF program.
func NewVM(filter []Instruction) (*VM, error) {
if len(filter) == 0 {
return nil, errors.New("one or more Instructions must be specified")
}
for i, ins := range filter {
check := len(filter) - (i + 1)
switch ins := ins.(type) {
// Check for out-of-bounds jumps in instructions
case Jump:
if check <= int(ins.Skip) {
return nil, fmt.Errorf("cannot jump %d instructions; jumping past program bounds", ins.Skip)
}
case JumpIf:
if check <= int(ins.SkipTrue) {
return nil, fmt.Errorf("cannot jump %d instructions in true case; jumping past program bounds", ins.SkipTrue)
}
if check <= int(ins.SkipFalse) {
return nil, fmt.Errorf("cannot jump %d instructions in false case; jumping past program bounds", ins.SkipFalse)
}
// Check for division or modulus by zero
case ALUOpConstant:
if ins.Val != 0 {
break
}
switch ins.Op {
case ALUOpDiv, ALUOpMod:
return nil, errors.New("cannot divide by zero using ALUOpConstant")
}
// Check for unknown extensions
case LoadExtension:
switch ins.Num {
case ExtLen:
default:
return nil, fmt.Errorf("extension %d not implemented", ins.Num)
}
}
}
// Make sure last instruction is a return instruction
switch filter[len(filter)-1].(type) {
case RetA, RetConstant:
default:
return nil, errors.New("BPF program must end with RetA or RetConstant")
}
// Though our VM works using disassembled instructions, we
// attempt to assemble the input filter anyway to ensure it is compatible
// with an operating system VM.
_, err := Assemble(filter)
return &VM{
filter: filter,
}, err
}
// Run runs the VM's BPF program against the input bytes.
// Run returns the number of bytes accepted by the BPF program, and any errors
// which occurred while processing the program.
func (v *VM) Run(in []byte) (int, error) {
var (
// Registers of the virtual machine
regA uint32
regX uint32
regScratch [16]uint32
// OK is true if the program should continue processing the next
// instruction, or false if not, causing the loop to break
ok = true
)
// TODO(mdlayher): implement:
// - NegateA:
// - would require a change from uint32 registers to int32
// registers
// TODO(mdlayher): add interop tests that check signedness of ALU
// operations against kernel implementation, and make sure Go
// implementation matches behavior
for i := 0; i < len(v.filter) && ok; i++ {
ins := v.filter[i]
switch ins := ins.(type) {
case ALUOpConstant:
regA = aluOpConstant(ins, regA)
case ALUOpX:
regA, ok = aluOpX(ins, regA, regX)
case Jump:
i += int(ins.Skip)
case JumpIf:
jump := jumpIf(ins, regA)
i += jump
case LoadAbsolute:
regA, ok = loadAbsolute(ins, in)
case LoadConstant:
regA, regX = loadConstant(ins, regA, regX)
case LoadExtension:
regA = loadExtension(ins, in)
case LoadIndirect:
regA, ok = loadIndirect(ins, in, regX)
case LoadMemShift:
regX, ok = loadMemShift(ins, in)
case LoadScratch:
regA, regX = loadScratch(ins, regScratch, regA, regX)
case RetA:
return int(regA), nil
case RetConstant:
return int(ins.Val), nil
case StoreScratch:
regScratch = storeScratch(ins, regScratch, regA, regX)
case TAX:
regX = regA
case TXA:
regA = regX
default:
return 0, fmt.Errorf("unknown Instruction at index %d: %T", i, ins)
}
}
return 0, nil
}

174
vendor/golang.org/x/net/bpf/vm_instructions.go generated vendored Normal file
View File

@ -0,0 +1,174 @@
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package bpf
import (
"encoding/binary"
"fmt"
)
func aluOpConstant(ins ALUOpConstant, regA uint32) uint32 {
return aluOpCommon(ins.Op, regA, ins.Val)
}
func aluOpX(ins ALUOpX, regA uint32, regX uint32) (uint32, bool) {
// Guard against division or modulus by zero by terminating
// the program, as the OS BPF VM does
if regX == 0 {
switch ins.Op {
case ALUOpDiv, ALUOpMod:
return 0, false
}
}
return aluOpCommon(ins.Op, regA, regX), true
}
func aluOpCommon(op ALUOp, regA uint32, value uint32) uint32 {
switch op {
case ALUOpAdd:
return regA + value
case ALUOpSub:
return regA - value
case ALUOpMul:
return regA * value
case ALUOpDiv:
// Division by zero not permitted by NewVM and aluOpX checks
return regA / value
case ALUOpOr:
return regA | value
case ALUOpAnd:
return regA & value
case ALUOpShiftLeft:
return regA << value
case ALUOpShiftRight:
return regA >> value
case ALUOpMod:
// Modulus by zero not permitted by NewVM and aluOpX checks
return regA % value
case ALUOpXor:
return regA ^ value
default:
return regA
}
}
func jumpIf(ins JumpIf, value uint32) int {
var ok bool
inV := uint32(ins.Val)
switch ins.Cond {
case JumpEqual:
ok = value == inV
case JumpNotEqual:
ok = value != inV
case JumpGreaterThan:
ok = value > inV
case JumpLessThan:
ok = value < inV
case JumpGreaterOrEqual:
ok = value >= inV
case JumpLessOrEqual:
ok = value <= inV
case JumpBitsSet:
ok = (value & inV) != 0
case JumpBitsNotSet:
ok = (value & inV) == 0
}
if ok {
return int(ins.SkipTrue)
}
return int(ins.SkipFalse)
}
func loadAbsolute(ins LoadAbsolute, in []byte) (uint32, bool) {
offset := int(ins.Off)
size := int(ins.Size)
return loadCommon(in, offset, size)
}
func loadConstant(ins LoadConstant, regA uint32, regX uint32) (uint32, uint32) {
switch ins.Dst {
case RegA:
regA = ins.Val
case RegX:
regX = ins.Val
}
return regA, regX
}
func loadExtension(ins LoadExtension, in []byte) uint32 {
switch ins.Num {
case ExtLen:
return uint32(len(in))
default:
panic(fmt.Sprintf("unimplemented extension: %d", ins.Num))
}
}
func loadIndirect(ins LoadIndirect, in []byte, regX uint32) (uint32, bool) {
offset := int(ins.Off) + int(regX)
size := int(ins.Size)
return loadCommon(in, offset, size)
}
func loadMemShift(ins LoadMemShift, in []byte) (uint32, bool) {
offset := int(ins.Off)
if !inBounds(len(in), offset, 0) {
return 0, false
}
// Mask off high 4 bits and multiply low 4 bits by 4
return uint32(in[offset]&0x0f) * 4, true
}
func inBounds(inLen int, offset int, size int) bool {
return offset+size <= inLen
}
func loadCommon(in []byte, offset int, size int) (uint32, bool) {
if !inBounds(len(in), offset, size) {
return 0, false
}
switch size {
case 1:
return uint32(in[offset]), true
case 2:
return uint32(binary.BigEndian.Uint16(in[offset : offset+size])), true
case 4:
return uint32(binary.BigEndian.Uint32(in[offset : offset+size])), true
default:
panic(fmt.Sprintf("invalid load size: %d", size))
}
}
func loadScratch(ins LoadScratch, regScratch [16]uint32, regA uint32, regX uint32) (uint32, uint32) {
switch ins.Dst {
case RegA:
regA = regScratch[ins.N]
case RegX:
regX = regScratch[ins.N]
}
return regA, regX
}
func storeScratch(ins StoreScratch, regScratch [16]uint32, regA uint32, regX uint32) [16]uint32 {
switch ins.Src {
case RegA:
regScratch[ins.N] = regA
case RegX:
regScratch[ins.N] = regX
}
return regScratch
}

180
vendor/golang.org/x/net/internal/iana/const.go generated vendored Normal file
View File

@ -0,0 +1,180 @@
// go generate gen.go
// GENERATED BY THE COMMAND ABOVE; DO NOT EDIT
// Package iana provides protocol number resources managed by the Internet Assigned Numbers Authority (IANA).
package iana
// Differentiated Services Field Codepoints (DSCP), Updated: 2017-05-12
const (
DiffServCS0 = 0x0 // CS0
DiffServCS1 = 0x20 // CS1
DiffServCS2 = 0x40 // CS2
DiffServCS3 = 0x60 // CS3
DiffServCS4 = 0x80 // CS4
DiffServCS5 = 0xa0 // CS5
DiffServCS6 = 0xc0 // CS6
DiffServCS7 = 0xe0 // CS7
DiffServAF11 = 0x28 // AF11
DiffServAF12 = 0x30 // AF12
DiffServAF13 = 0x38 // AF13
DiffServAF21 = 0x48 // AF21
DiffServAF22 = 0x50 // AF22
DiffServAF23 = 0x58 // AF23
DiffServAF31 = 0x68 // AF31
DiffServAF32 = 0x70 // AF32
DiffServAF33 = 0x78 // AF33
DiffServAF41 = 0x88 // AF41
DiffServAF42 = 0x90 // AF42
DiffServAF43 = 0x98 // AF43
DiffServEF = 0xb8 // EF
DiffServVOICEADMIT = 0xb0 // VOICE-ADMIT
)
// IPv4 TOS Byte and IPv6 Traffic Class Octet, Updated: 2001-09-06
const (
NotECNTransport = 0x0 // Not-ECT (Not ECN-Capable Transport)
ECNTransport1 = 0x1 // ECT(1) (ECN-Capable Transport(1))
ECNTransport0 = 0x2 // ECT(0) (ECN-Capable Transport(0))
CongestionExperienced = 0x3 // CE (Congestion Experienced)
)
// Protocol Numbers, Updated: 2016-06-22
const (
ProtocolIP = 0 // IPv4 encapsulation, pseudo protocol number
ProtocolHOPOPT = 0 // IPv6 Hop-by-Hop Option
ProtocolICMP = 1 // Internet Control Message
ProtocolIGMP = 2 // Internet Group Management
ProtocolGGP = 3 // Gateway-to-Gateway
ProtocolIPv4 = 4 // IPv4 encapsulation
ProtocolST = 5 // Stream
ProtocolTCP = 6 // Transmission Control
ProtocolCBT = 7 // CBT
ProtocolEGP = 8 // Exterior Gateway Protocol
ProtocolIGP = 9 // any private interior gateway (used by Cisco for their IGRP)
ProtocolBBNRCCMON = 10 // BBN RCC Monitoring
ProtocolNVPII = 11 // Network Voice Protocol
ProtocolPUP = 12 // PUP
ProtocolEMCON = 14 // EMCON
ProtocolXNET = 15 // Cross Net Debugger
ProtocolCHAOS = 16 // Chaos
ProtocolUDP = 17 // User Datagram
ProtocolMUX = 18 // Multiplexing
ProtocolDCNMEAS = 19 // DCN Measurement Subsystems
ProtocolHMP = 20 // Host Monitoring
ProtocolPRM = 21 // Packet Radio Measurement
ProtocolXNSIDP = 22 // XEROX NS IDP
ProtocolTRUNK1 = 23 // Trunk-1
ProtocolTRUNK2 = 24 // Trunk-2
ProtocolLEAF1 = 25 // Leaf-1
ProtocolLEAF2 = 26 // Leaf-2
ProtocolRDP = 27 // Reliable Data Protocol
ProtocolIRTP = 28 // Internet Reliable Transaction
ProtocolISOTP4 = 29 // ISO Transport Protocol Class 4
ProtocolNETBLT = 30 // Bulk Data Transfer Protocol
ProtocolMFENSP = 31 // MFE Network Services Protocol
ProtocolMERITINP = 32 // MERIT Internodal Protocol
ProtocolDCCP = 33 // Datagram Congestion Control Protocol
Protocol3PC = 34 // Third Party Connect Protocol
ProtocolIDPR = 35 // Inter-Domain Policy Routing Protocol
ProtocolXTP = 36 // XTP
ProtocolDDP = 37 // Datagram Delivery Protocol
ProtocolIDPRCMTP = 38 // IDPR Control Message Transport Proto
ProtocolTPPP = 39 // TP++ Transport Protocol
ProtocolIL = 40 // IL Transport Protocol
ProtocolIPv6 = 41 // IPv6 encapsulation
ProtocolSDRP = 42 // Source Demand Routing Protocol
ProtocolIPv6Route = 43 // Routing Header for IPv6
ProtocolIPv6Frag = 44 // Fragment Header for IPv6
ProtocolIDRP = 45 // Inter-Domain Routing Protocol
ProtocolRSVP = 46 // Reservation Protocol
ProtocolGRE = 47 // Generic Routing Encapsulation
ProtocolDSR = 48 // Dynamic Source Routing Protocol
ProtocolBNA = 49 // BNA
ProtocolESP = 50 // Encap Security Payload
ProtocolAH = 51 // Authentication Header
ProtocolINLSP = 52 // Integrated Net Layer Security TUBA
ProtocolNARP = 54 // NBMA Address Resolution Protocol
ProtocolMOBILE = 55 // IP Mobility
ProtocolTLSP = 56 // Transport Layer Security Protocol using Kryptonet key management
ProtocolSKIP = 57 // SKIP
ProtocolIPv6ICMP = 58 // ICMP for IPv6
ProtocolIPv6NoNxt = 59 // No Next Header for IPv6
ProtocolIPv6Opts = 60 // Destination Options for IPv6
ProtocolCFTP = 62 // CFTP
ProtocolSATEXPAK = 64 // SATNET and Backroom EXPAK
ProtocolKRYPTOLAN = 65 // Kryptolan
ProtocolRVD = 66 // MIT Remote Virtual Disk Protocol
ProtocolIPPC = 67 // Internet Pluribus Packet Core
ProtocolSATMON = 69 // SATNET Monitoring
ProtocolVISA = 70 // VISA Protocol
ProtocolIPCV = 71 // Internet Packet Core Utility
ProtocolCPNX = 72 // Computer Protocol Network Executive
ProtocolCPHB = 73 // Computer Protocol Heart Beat
ProtocolWSN = 74 // Wang Span Network
ProtocolPVP = 75 // Packet Video Protocol
ProtocolBRSATMON = 76 // Backroom SATNET Monitoring
ProtocolSUNND = 77 // SUN ND PROTOCOL-Temporary
ProtocolWBMON = 78 // WIDEBAND Monitoring
ProtocolWBEXPAK = 79 // WIDEBAND EXPAK
ProtocolISOIP = 80 // ISO Internet Protocol
ProtocolVMTP = 81 // VMTP
ProtocolSECUREVMTP = 82 // SECURE-VMTP
ProtocolVINES = 83 // VINES
ProtocolTTP = 84 // Transaction Transport Protocol
ProtocolIPTM = 84 // Internet Protocol Traffic Manager
ProtocolNSFNETIGP = 85 // NSFNET-IGP
ProtocolDGP = 86 // Dissimilar Gateway Protocol
ProtocolTCF = 87 // TCF
ProtocolEIGRP = 88 // EIGRP
ProtocolOSPFIGP = 89 // OSPFIGP
ProtocolSpriteRPC = 90 // Sprite RPC Protocol
ProtocolLARP = 91 // Locus Address Resolution Protocol
ProtocolMTP = 92 // Multicast Transport Protocol
ProtocolAX25 = 93 // AX.25 Frames
ProtocolIPIP = 94 // IP-within-IP Encapsulation Protocol
ProtocolSCCSP = 96 // Semaphore Communications Sec. Pro.
ProtocolETHERIP = 97 // Ethernet-within-IP Encapsulation
ProtocolENCAP = 98 // Encapsulation Header
ProtocolGMTP = 100 // GMTP
ProtocolIFMP = 101 // Ipsilon Flow Management Protocol
ProtocolPNNI = 102 // PNNI over IP
ProtocolPIM = 103 // Protocol Independent Multicast
ProtocolARIS = 104 // ARIS
ProtocolSCPS = 105 // SCPS
ProtocolQNX = 106 // QNX
ProtocolAN = 107 // Active Networks
ProtocolIPComp = 108 // IP Payload Compression Protocol
ProtocolSNP = 109 // Sitara Networks Protocol
ProtocolCompaqPeer = 110 // Compaq Peer Protocol
ProtocolIPXinIP = 111 // IPX in IP
ProtocolVRRP = 112 // Virtual Router Redundancy Protocol
ProtocolPGM = 113 // PGM Reliable Transport Protocol
ProtocolL2TP = 115 // Layer Two Tunneling Protocol
ProtocolDDX = 116 // D-II Data Exchange (DDX)
ProtocolIATP = 117 // Interactive Agent Transfer Protocol
ProtocolSTP = 118 // Schedule Transfer Protocol
ProtocolSRP = 119 // SpectraLink Radio Protocol
ProtocolUTI = 120 // UTI
ProtocolSMP = 121 // Simple Message Protocol
ProtocolPTP = 123 // Performance Transparency Protocol
ProtocolISIS = 124 // ISIS over IPv4
ProtocolFIRE = 125 // FIRE
ProtocolCRTP = 126 // Combat Radio Transport Protocol
ProtocolCRUDP = 127 // Combat Radio User Datagram
ProtocolSSCOPMCE = 128 // SSCOPMCE
ProtocolIPLT = 129 // IPLT
ProtocolSPS = 130 // Secure Packet Shield
ProtocolPIPE = 131 // Private IP Encapsulation within IP
ProtocolSCTP = 132 // Stream Control Transmission Protocol
ProtocolFC = 133 // Fibre Channel
ProtocolRSVPE2EIGNORE = 134 // RSVP-E2E-IGNORE
ProtocolMobilityHeader = 135 // Mobility Header
ProtocolUDPLite = 136 // UDPLite
ProtocolMPLSinIP = 137 // MPLS-in-IP
ProtocolMANET = 138 // MANET Protocols
ProtocolHIP = 139 // Host Identity Protocol
ProtocolShim6 = 140 // Shim6 Protocol
ProtocolWESP = 141 // Wrapped Encapsulating Security Payload
ProtocolROHC = 142 // Robust Header Compression
ProtocolReserved = 255 // Reserved
)

293
vendor/golang.org/x/net/internal/iana/gen.go generated vendored Normal file
View File

@ -0,0 +1,293 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build ignore
//go:generate go run gen.go
// This program generates internet protocol constants and tables by
// reading IANA protocol registries.
package main
import (
"bytes"
"encoding/xml"
"fmt"
"go/format"
"io"
"io/ioutil"
"net/http"
"os"
"strconv"
"strings"
)
var registries = []struct {
url string
parse func(io.Writer, io.Reader) error
}{
{
"http://www.iana.org/assignments/dscp-registry/dscp-registry.xml",
parseDSCPRegistry,
},
{
"http://www.iana.org/assignments/ipv4-tos-byte/ipv4-tos-byte.xml",
parseTOSTCByte,
},
{
"http://www.iana.org/assignments/protocol-numbers/protocol-numbers.xml",
parseProtocolNumbers,
},
}
func main() {
var bb bytes.Buffer
fmt.Fprintf(&bb, "// go generate gen.go\n")
fmt.Fprintf(&bb, "// GENERATED BY THE COMMAND ABOVE; DO NOT EDIT\n\n")
fmt.Fprintf(&bb, "// Package iana provides protocol number resources managed by the Internet Assigned Numbers Authority (IANA).\n")
fmt.Fprintf(&bb, `package iana // import "golang.org/x/net/internal/iana"`+"\n\n")
for _, r := range registries {
resp, err := http.Get(r.url)
if err != nil {
fmt.Fprintln(os.Stderr, err)
os.Exit(1)
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
fmt.Fprintf(os.Stderr, "got HTTP status code %v for %v\n", resp.StatusCode, r.url)
os.Exit(1)
}
if err := r.parse(&bb, resp.Body); err != nil {
fmt.Fprintln(os.Stderr, err)
os.Exit(1)
}
fmt.Fprintf(&bb, "\n")
}
b, err := format.Source(bb.Bytes())
if err != nil {
fmt.Fprintln(os.Stderr, err)
os.Exit(1)
}
if err := ioutil.WriteFile("const.go", b, 0644); err != nil {
fmt.Fprintln(os.Stderr, err)
os.Exit(1)
}
}
func parseDSCPRegistry(w io.Writer, r io.Reader) error {
dec := xml.NewDecoder(r)
var dr dscpRegistry
if err := dec.Decode(&dr); err != nil {
return err
}
drs := dr.escape()
fmt.Fprintf(w, "// %s, Updated: %s\n", dr.Title, dr.Updated)
fmt.Fprintf(w, "const (\n")
for _, dr := range drs {
fmt.Fprintf(w, "DiffServ%s = %#x", dr.Name, dr.Value)
fmt.Fprintf(w, "// %s\n", dr.OrigName)
}
fmt.Fprintf(w, ")\n")
return nil
}
type dscpRegistry struct {
XMLName xml.Name `xml:"registry"`
Title string `xml:"title"`
Updated string `xml:"updated"`
Note string `xml:"note"`
RegTitle string `xml:"registry>title"`
PoolRecords []struct {
Name string `xml:"name"`
Space string `xml:"space"`
} `xml:"registry>record"`
Records []struct {
Name string `xml:"name"`
Space string `xml:"space"`
} `xml:"registry>registry>record"`
}
type canonDSCPRecord struct {
OrigName string
Name string
Value int
}
func (drr *dscpRegistry) escape() []canonDSCPRecord {
drs := make([]canonDSCPRecord, len(drr.Records))
sr := strings.NewReplacer(
"+", "",
"-", "",
"/", "",
".", "",
" ", "",
)
for i, dr := range drr.Records {
s := strings.TrimSpace(dr.Name)
drs[i].OrigName = s
drs[i].Name = sr.Replace(s)
n, err := strconv.ParseUint(dr.Space, 2, 8)
if err != nil {
continue
}
drs[i].Value = int(n) << 2
}
return drs
}
func parseTOSTCByte(w io.Writer, r io.Reader) error {
dec := xml.NewDecoder(r)
var ttb tosTCByte
if err := dec.Decode(&ttb); err != nil {
return err
}
trs := ttb.escape()
fmt.Fprintf(w, "// %s, Updated: %s\n", ttb.Title, ttb.Updated)
fmt.Fprintf(w, "const (\n")
for _, tr := range trs {
fmt.Fprintf(w, "%s = %#x", tr.Keyword, tr.Value)
fmt.Fprintf(w, "// %s\n", tr.OrigKeyword)
}
fmt.Fprintf(w, ")\n")
return nil
}
type tosTCByte struct {
XMLName xml.Name `xml:"registry"`
Title string `xml:"title"`
Updated string `xml:"updated"`
Note string `xml:"note"`
RegTitle string `xml:"registry>title"`
Records []struct {
Binary string `xml:"binary"`
Keyword string `xml:"keyword"`
} `xml:"registry>record"`
}
type canonTOSTCByteRecord struct {
OrigKeyword string
Keyword string
Value int
}
func (ttb *tosTCByte) escape() []canonTOSTCByteRecord {
trs := make([]canonTOSTCByteRecord, len(ttb.Records))
sr := strings.NewReplacer(
"Capable", "",
"(", "",
")", "",
"+", "",
"-", "",
"/", "",
".", "",
" ", "",
)
for i, tr := range ttb.Records {
s := strings.TrimSpace(tr.Keyword)
trs[i].OrigKeyword = s
ss := strings.Split(s, " ")
if len(ss) > 1 {
trs[i].Keyword = strings.Join(ss[1:], " ")
} else {
trs[i].Keyword = ss[0]
}
trs[i].Keyword = sr.Replace(trs[i].Keyword)
n, err := strconv.ParseUint(tr.Binary, 2, 8)
if err != nil {
continue
}
trs[i].Value = int(n)
}
return trs
}
func parseProtocolNumbers(w io.Writer, r io.Reader) error {
dec := xml.NewDecoder(r)
var pn protocolNumbers
if err := dec.Decode(&pn); err != nil {
return err
}
prs := pn.escape()
prs = append([]canonProtocolRecord{{
Name: "IP",
Descr: "IPv4 encapsulation, pseudo protocol number",
Value: 0,
}}, prs...)
fmt.Fprintf(w, "// %s, Updated: %s\n", pn.Title, pn.Updated)
fmt.Fprintf(w, "const (\n")
for _, pr := range prs {
if pr.Name == "" {
continue
}
fmt.Fprintf(w, "Protocol%s = %d", pr.Name, pr.Value)
s := pr.Descr
if s == "" {
s = pr.OrigName
}
fmt.Fprintf(w, "// %s\n", s)
}
fmt.Fprintf(w, ")\n")
return nil
}
type protocolNumbers struct {
XMLName xml.Name `xml:"registry"`
Title string `xml:"title"`
Updated string `xml:"updated"`
RegTitle string `xml:"registry>title"`
Note string `xml:"registry>note"`
Records []struct {
Value string `xml:"value"`
Name string `xml:"name"`
Descr string `xml:"description"`
} `xml:"registry>record"`
}
type canonProtocolRecord struct {
OrigName string
Name string
Descr string
Value int
}
func (pn *protocolNumbers) escape() []canonProtocolRecord {
prs := make([]canonProtocolRecord, len(pn.Records))
sr := strings.NewReplacer(
"-in-", "in",
"-within-", "within",
"-over-", "over",
"+", "P",
"-", "",
"/", "",
".", "",
" ", "",
)
for i, pr := range pn.Records {
if strings.Contains(pr.Name, "Deprecated") ||
strings.Contains(pr.Name, "deprecated") {
continue
}
prs[i].OrigName = pr.Name
s := strings.TrimSpace(pr.Name)
switch pr.Name {
case "ISIS over IPv4":
prs[i].Name = "ISIS"
case "manet":
prs[i].Name = "MANET"
default:
prs[i].Name = sr.Replace(s)
}
ss := strings.Split(pr.Descr, "\n")
for i := range ss {
ss[i] = strings.TrimSpace(ss[i])
}
if len(ss) > 1 {
prs[i].Descr = strings.Join(ss, " ")
} else {
prs[i].Descr = ss[0]
}
prs[i].Value, _ = strconv.Atoi(pr.Value)
}
return prs
}

11
vendor/golang.org/x/net/internal/socket/cmsghdr.go generated vendored Normal file
View File

@ -0,0 +1,11 @@
// Copyright 2017 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build darwin dragonfly freebsd linux netbsd openbsd solaris
package socket
func (h *cmsghdr) len() int { return int(h.Len) }
func (h *cmsghdr) lvl() int { return int(h.Level) }
func (h *cmsghdr) typ() int { return int(h.Type) }

13
vendor/golang.org/x/net/internal/socket/cmsghdr_bsd.go generated vendored Normal file
View File

@ -0,0 +1,13 @@
// Copyright 2017 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build darwin dragonfly freebsd netbsd openbsd
package socket
func (h *cmsghdr) set(l, lvl, typ int) {
h.Len = uint32(l)
h.Level = int32(lvl)
h.Type = int32(typ)
}

View File

@ -0,0 +1,14 @@
// Copyright 2017 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build arm mips mipsle 386
// +build linux
package socket
func (h *cmsghdr) set(l, lvl, typ int) {
h.Len = uint32(l)
h.Level = int32(lvl)
h.Type = int32(typ)
}

Some files were not shown because too many files have changed in this diff Show More