Merge branch 'pr_182' into dev

This commit is contained in:
fatedier 2016-12-24 08:51:26 -06:00 committed by GitHub
commit b1d1a7a20a
43 changed files with 5540 additions and 163 deletions

View File

@ -3,8 +3,8 @@ language: go
go: go:
- 1.5.4 - 1.5.4
- 1.6.3 - 1.6.4
- 1.7 - 1.7.4
install: install:
- make - make

21
Godeps/Godeps.json generated
View File

@ -1,9 +1,9 @@
{ {
"ImportPath": "github.com/fatedier/frp", "ImportPath": "github.com/fatedier/frp",
"GoVersion": "go1.6", "GoVersion": "go1.7",
"GodepVersion": "v74", "GodepVersion": "v75",
"Packages": [ "Packages": [
"./src/..." "./..."
], ],
"Deps": [ "Deps": [
{ {
@ -11,16 +11,31 @@
"Comment": "v1.7.0-7-gefbde1e", "Comment": "v1.7.0-7-gefbde1e",
"Rev": "efbde1ee77517486eac03e814e01d724ddad18e6" "Rev": "efbde1ee77517486eac03e814e01d724ddad18e6"
}, },
{
"ImportPath": "github.com/davecgh/go-spew/spew",
"Comment": "v1.1.0",
"Rev": "346938d642f2ec3594ed81d874461961cd0faa76"
},
{ {
"ImportPath": "github.com/docopt/docopt-go", "ImportPath": "github.com/docopt/docopt-go",
"Comment": "0.6.2", "Comment": "0.6.2",
"Rev": "784ddc588536785e7299f7272f39101f7faccc3f" "Rev": "784ddc588536785e7299f7272f39101f7faccc3f"
}, },
{
"ImportPath": "github.com/pmezard/go-difflib/difflib",
"Comment": "v1.0.0",
"Rev": "792786c7400a136282c1664665ae0a8db921c6c2"
},
{ {
"ImportPath": "github.com/rakyll/statik/fs", "ImportPath": "github.com/rakyll/statik/fs",
"Comment": "v0.1.0", "Comment": "v0.1.0",
"Rev": "274df120e9065bdd08eb1120e0375e3dc1ae8465" "Rev": "274df120e9065bdd08eb1120e0375e3dc1ae8465"
}, },
{
"ImportPath": "github.com/stretchr/testify/assert",
"Comment": "v1.1.4-25-g2402e8e",
"Rev": "2402e8e7a02fc811447d11f881aa9746cdc57983"
},
{ {
"ImportPath": "github.com/vaughan0/go-ini", "ImportPath": "github.com/vaughan0/go-ini",
"Rev": "a98ad7ee00ec53921f08832bc06ecf7fd600e6a1" "Rev": "a98ad7ee00ec53921f08832bc06ecf7fd600e6a1"

145
README.md
View File

@ -6,30 +6,39 @@
## What is frp? ## What is frp?
frp is a fast reverse proxy to help you expose a local server behind a NAT or firewall to the internet. Now, it supports tcp, http and https protocol when requests can be forwarded by domains to backward web services. frp is a fast reverse proxy to help you expose a local server behind a NAT or firewall to the internet. Now, it supports tcp, udp, http and https protocol when requests can be forwarded by domains to backward web services.
## Catalog ## Catalog
<!-- vim-markdown-toc GFM -->
* [What can I do with frp?](#what-can-i-do-with-frp) * [What can I do with frp?](#what-can-i-do-with-frp)
* [Status](#status) * [Status](#status)
* [Architecture](#architecture) * [Architecture](#architecture)
* [Example Usage](#example-usage) * [Example Usage](#example-usage)
* [Communicate with your computer in LAN by SSH](#communicate-with-your-computer-in-lan-by-ssh) * [Communicate with your computer in LAN by SSH](#communicate-with-your-computer-in-lan-by-ssh)
* [Visit your web service in LAN by custom domains](#visit-your-web-service-in-lan-by-custom-domains) * [Visit your web service in LAN by custom domains](#visit-your-web-service-in-lan-by-custom-domains)
* [Forward DNS query request](#forward-dns-query-request)
* [Features](#features) * [Features](#features)
* [Dashboard](#dashboard) * [Dashboard](#dashboard)
* [Authentication](#authentication) * [Authentication](#authentication)
* [Encryption and Compression](#encryption-and-compression) * [Encryption and Compression](#encryption-and-compression)
* [Reload configures without frps stopped](#reload-configures-without-frps-stopped) * [Reload configures without frps stopped](#reload-configures-without-frps-stopped)
* [Privilege Mode](#privilege-mode) * [Privilege Mode](#privilege-mode)
* [Port White List](#port-white-list) * [Port White List](#port-white-list)
* [Connection Pool](#connection-pool) * [Connection Pool](#connection-pool)
* [Rewriting the Host Header](#rewriting-the-host-header) * [Rewriting the Host Header](#rewriting-the-host-header)
* [Password protecting your web service](#password-protecting-your-web-service)
* [Custom subdomain names](#custom-subdomain-names)
* [Connect frps by HTTP PROXY](#connect-frps-by-http-proxy)
* [Development Plan](#development-plan) * [Development Plan](#development-plan)
* [Contributing](#contributing) * [Contributing](#contributing)
* [Donation](#donation) * [Donation](#donation)
* [AliPay](#alipay)
* [Paypal](#paypal)
* [Contributors](#contributors) * [Contributors](#contributors)
<!-- vim-markdown-toc -->
## What can I do with frp? ## What can I do with frp?
* Expose any http and https service behind a NAT or firewall to the internet by a server with public IP address(Name-based Virtual Host Support). * Expose any http and https service behind a NAT or firewall to the internet by a server with public IP address(Name-based Virtual Host Support).
@ -82,6 +91,8 @@ Put **frpc** and **frpc.ini** to your server in LAN.
auth_token = 123 auth_token = 123
[ssh] [ssh]
type = tcp
local_ip = 127.0.0.1
local_port = 22 local_port = 22
``` ```
@ -97,7 +108,7 @@ Put **frpc** and **frpc.ini** to your server in LAN.
Sometimes we want to expose a local web service behind a NAT network to others for testing with your own domain name and unfortunately we can't resolve a domain name to a local ip. Sometimes we want to expose a local web service behind a NAT network to others for testing with your own domain name and unfortunately we can't resolve a domain name to a local ip.
Howerver, we can expose a http or https service using frp. However, we can expose a http or https service using frp.
1. Modify frps.ini, configure a http reverse proxy named [web] and set http port as 8080, custom domain as `www.yourdomain.com`: 1. Modify frps.ini, configure a http reverse proxy named [web] and set http port as 8080, custom domain as `www.yourdomain.com`:
@ -139,6 +150,48 @@ Howerver, we can expose a http or https service using frp.
6. Now visit your local web service using url `http://www.yourdomain.com:8080`. 6. Now visit your local web service using url `http://www.yourdomain.com:8080`.
### Forward DNS query request
1. Modify frps.ini, configure a reverse proxy named [dns]:
```ini
# frps.ini
[common]
bind_port = 7000
[dns]
type = udp
listen_port = 6000
auth_token = 123
```
2. Start frps:
`./frps -c ./frps.ini`
3. Modify frpc.ini, set remote frps's server IP as x.x.x.x, forward dns query request to google dns server `8.8.8.8:53`:
```ini
# frpc.ini
[common]
server_addr = x.x.x.x
server_port = 7000
auth_token = 123
[dns]
type = udp
local_ip = 8.8.8.8
local_port = 53
```
4. Start frpc:
`./frpc -c ./frpc.ini`
5. Send dns query request by dig:
`dig @x.x.x.x -p 6000 www.goolge.com`
## Features ## Features
### Dashboard ### Dashboard
@ -151,8 +204,8 @@ Configure a port for dashboard to enable this feature:
[common] [common]
dashboard_port = 7500 dashboard_port = 7500
# dashboard's username and password are both optionalif not set, default is admin. # dashboard's username and password are both optionalif not set, default is admin.
dashboard_username = abc dashboard_user = admin
dashboard_password = abc dashboard_pwd = admin
``` ```
Then visit `http://[server_addr]:7500` to see dashboard, default username and password are both `admin`. Then visit `http://[server_addr]:7500` to see dashboard, default username and password are both `admin`.
@ -167,6 +220,8 @@ Client that want's to register must set a global `auth_token` equals to frps.ini
Note that time duration bewtween frpc and frps mustn't exceed 15 minutes because timestamp is used for authentication. Note that time duration bewtween frpc and frps mustn't exceed 15 minutes because timestamp is used for authentication.
Howerver, this timeout duration can be modified by setting `authentication_timeout` in frps's configure file. It's defalut value is 900, means 15 minutes. If it is equals 0, then frps will not check authentication timeout.
### Encryption and Compression ### Encryption and Compression
Defalut value is false, you could decide if the proxy will use encryption or compression whether the type is: Defalut value is false, you could decide if the proxy will use encryption or compression whether the type is:
@ -183,7 +238,7 @@ use_gzip = true
### Reload configures without frps stopped ### Reload configures without frps stopped
If your want to add a new reverse proxy and avoid restarting frps, you can use this function: If you want to add a new reverse proxy and avoid restarting frps, you can use this function:
1. `dashboard_port` should be set in frps.ini: 1. `dashboard_port` should be set in frps.ini:
@ -314,11 +369,67 @@ host_header_rewrite = dev.yourdomain.com
If `host_header_rewrite` is specified, the Host header will be rewritten to match the hostname portion of the forwarding address. If `host_header_rewrite` is specified, the Host header will be rewritten to match the hostname portion of the forwarding address.
### Password protecting your web service
Anyone who can guess your tunnel URL can access your local web server unless you protect it with a password.
This enforces HTTP Basic Auth on all requests with the username and password you specify in frpc's configure file.
It can be only enabled when proxy type is http.
```ini
# frpc.ini
[web]
privilege_mode = true
type = http
local_port = 80
custom_domains = test.yourdomain.com
http_user = abc
http_pwd = abc
```
Visit `test.yourdomain.com` and now you need to input username and password.
### Custom subdomain names
It is convenient to use `subdomain` configure for http、https type when many people use one frps server together.
```ini
# frps.ini
subdomain_host = frps.com
```
Resolve `*.frps.com` to the frps server's IP.
```ini
# frpc.ini
[web]
privilege_mode = true
type = http
local_port = 80
subdomain = test
```
Now you can visit your web service by host `test.frps.com`.
Note that if `subdomain_host` is not empty, `custom_domains` should not be the subdomain of `subdomain_host`.
### Connect frps by HTTP PROXY
frpc can connect frps using HTTP PROXY if you set os environment `HTTP_PROXY` or configure `http_proxy` param in frpc.ini file.
```ini
# frpc.ini
server_addr = x.x.x.x
server_port = 7000
http_proxy = http://user:pwd@192.168.1.128:8080
```
## Development Plan ## Development Plan
* Support udp protocol.
* Support wildcard domain name.
* Url router. * Url router.
* Log http request information in frps.
* Direct reverse proxy, like haproxy.
* Load balance to different service in frpc. * Load balance to different service in frpc.
* Debug mode for frpc, prestent proxy status in terminal. * Debug mode for frpc, prestent proxy status in terminal.
* Inspect all http requests/responses that are transmitted over the tunnel. * Inspect all http requests/responses that are transmitted over the tunnel.

View File

@ -4,30 +4,39 @@
[README](README.md) | [中文文档](README_zh.md) [README](README.md) | [中文文档](README_zh.md)
frp 是一个高性能的反向代理应用,可以帮助您轻松地进行内网穿透,对外网提供服务,支持 tcp, http, https 等协议类型,并且 web 服务支持根据域名进行路由转发。 frp 是一个高性能的反向代理应用,可以帮助您轻松地进行内网穿透,对外网提供服务,支持 tcp, udp, http, https 等协议类型,并且 web 服务支持根据域名进行路由转发。
## 目录 ## 目录
<!-- vim-markdown-toc GFM -->
* [frp 的作用](#frp-的作用) * [frp 的作用](#frp-的作用)
* [开发状态](#开发状态) * [开发状态](#开发状态)
* [架构](#架构) * [架构](#架构)
* [使用示例](#使用示例) * [使用示例](#使用示例)
* [通过 ssh 访问公司内网机器](#通过-ssh-访问公司内网机器) * [通过 ssh 访问公司内网机器](#通过-ssh-访问公司内网机器)
* [通过自定义域名访问部署于内网的 web 服务](#通过自定义域名访问部署于内网的-web-服务) * [通过自定义域名访问部署于内网的 web 服务](#通过自定义域名访问部署于内网的-web-服务)
* [转发 DNS 查询请求](#转发-dns-查询请求)
* [功能说明](#功能说明) * [功能说明](#功能说明)
* [Dashboard](#dashboard) * [Dashboard](#dashboard)
* [身份验证](#身份验证) * [身份验证](#身份验证)
* [加密与压缩](#加密与压缩) * [加密与压缩](#加密与压缩)
* [服务器端热加载配置文件](#服务器端热加载配置文件) * [服务器端热加载配置文件](#服务器端热加载配置文件)
* [特权模式](#特权模式) * [特权模式](#特权模式)
* [端口白名单](#端口白名单) * [端口白名单](#端口白名单)
* [连接池](#连接池) * [连接池](#连接池)
* [修改 Host Header](#修改-host-header) * [修改 Host Header](#修改-host-header)
* [通过密码保护你的 web 服务](#通过密码保护你的-web-服务)
* [自定义二级域名](#自定义二级域名)
* [通过 HTTP PROXY 连接 frps](#通过-http-proxy-连接-frps)
* [开发计划](#开发计划) * [开发计划](#开发计划)
* [为 frp 做贡献](#为-frp-做贡献) * [为 frp 做贡献](#为-frp-做贡献)
* [捐助](#捐助) * [捐助](#捐助)
* [支付宝扫码捐赠](#支付宝扫码捐赠)
* [Paypal 捐赠](#paypal-捐赠)
* [贡献者](#贡献者) * [贡献者](#贡献者)
<!-- vim-markdown-toc -->
## frp 的作用 ## frp 的作用
* 利用处于内网或防火墙后的机器,对外网环境提供 http 或 https 服务。 * 利用处于内网或防火墙后的机器,对外网环境提供 http 或 https 服务。
@ -81,6 +90,7 @@ frp 目前正在前期开发阶段master 分支用于发布稳定版本dev
auth_token = 123 auth_token = 123
[ssh] [ssh]
local_ip = 127.0.0.1
local_port = 22 local_port = 22
``` ```
@ -136,6 +146,50 @@ frp 目前正在前期开发阶段master 分支用于发布稳定版本dev
6. 通过浏览器访问 `http://www.yourdomain.com:8080` 即可访问到处于内网机器上的 web 服务。 6. 通过浏览器访问 `http://www.yourdomain.com:8080` 即可访问到处于内网机器上的 web 服务。
### 转发 DNS 查询请求
DNS 查询请求通常使用 UDP 协议frp 支持对内网 UDP 服务的穿透,配置方式和 TCP 基本一致。
1. 修改 frps.ini 文件,配置一个名为 dns 的反向代理:
```ini
# frps.ini
[common]
bind_port = 7000
[dns]
type = udp
listen_port = 6000
auth_token = 123
```
2. 启动 frps
`./frps -c ./frps.ini`
3. 修改 frpc.ini 文件,设置 frps 所在服务器的 IP 为 x.x.x.x转发到 Google 的 DNS 查询服务器 `8.8.8.8` 的 udp 53 端口:
```ini
# frpc.ini
[common]
server_addr = x.x.x.x
server_port = 7000
auth_token = 123
[dns]
type = udp
local_ip = 8.8.8.8
local_port = 53
```
4. 启动 frpc
`./frpc -c ./frpc.ini`
5. 通过 dig 测试 UDP 包转发是否成功,预期会返回 `www.google.com` 域名的解析结果:
`dig @x.x.x.x -p 6000 www.goolge.com`
## 功能说明 ## 功能说明
### Dashboard ### Dashboard
@ -148,8 +202,8 @@ frp 目前正在前期开发阶段master 分支用于发布稳定版本dev
[common] [common]
dashboard_port = 7500 dashboard_port = 7500
# dashboard 用户名密码可选,默认都为 admin # dashboard 用户名密码可选,默认都为 admin
dashboard_username = abc dashboard_user = admin
dashboard_password = abc dashboard_pwd = admin
``` ```
打开浏览器通过 `http://[server_addr]:7500` 访问 dashboard 界面,用户名密码默认为 `admin` 打开浏览器通过 `http://[server_addr]:7500` 访问 dashboard 界面,用户名密码默认为 `admin`
@ -164,6 +218,8 @@ dashboard_password = abc
需要注意的是 frpc 所在机器和 frps 所在机器的时间相差不能超过 15 分钟,因为时间戳会被用于加密验证中,防止报文被劫持后被其他人利用。 需要注意的是 frpc 所在机器和 frps 所在机器的时间相差不能超过 15 分钟,因为时间戳会被用于加密验证中,防止报文被劫持后被其他人利用。
这个超时时间可以在配置文件中通过 `authentication_timeout` 这个参数来修改,单位为秒,默认值为 900即 15 分钟。如果修改为 0则 frps 将不对身份验证报文的时间戳进行超时校验。
### 加密与压缩 ### 加密与压缩
这两个功能默认是不开启的,需要在 frpc.ini 中通过配置来为指定的代理启用加密与压缩的功能,无论类型是 tcp, http 还是 https 这两个功能默认是不开启的,需要在 frpc.ini 中通过配置来为指定的代理启用加密与压缩的功能,无论类型是 tcp, http 还是 https
@ -319,13 +375,77 @@ host_header_rewrite = dev.yourdomain.com
原来 http 请求中的 host 字段 `test.yourdomain.com` 转发到后端服务时会被替换为 `dev.yourdomain.com` 原来 http 请求中的 host 字段 `test.yourdomain.com` 转发到后端服务时会被替换为 `dev.yourdomain.com`
### 通过密码保护你的 web 服务
由于所有客户端共用一个 frps 的 http 服务端口,任何知道你的域名和 url 的人都能访问到你部署在内网的 web 服务,但是在某些场景下需要确保只有限定的用户才能访问。
frp 支持通过 HTTP Basic Auth 来保护你的 web 服务,使用户需要通过用户名和密码才能访问到你的服务。
该功能目前仅限于 http 类型的代理,需要在 frpc 的代理配置中添加用户名和密码的设置。
```ini
# frpc.ini
[web]
privilege_mode = true
type = http
local_port = 80
custom_domains = test.yourdomain.com
http_user = abc
http_pwd = abc
```
通过浏览器访问 `test.yourdomain.com`,需要输入配置的用户名和密码才能访问。
### 自定义二级域名
在多人同时使用一个 frps 时,通过自定义二级域名的方式来使用会更加方便。
通过在 frps 的配置文件中配置 `subdomain_host`,就可以启用该特性。之后在 frpc 的 http、https 类型的代理中可以不配置 `custom_domains`,而是配置一个 `subdomain` 参数。
只需要将 `*.subdomain_host` 解析到 frps 所在服务器。之后用户可以通过 `subdomain` 自行指定自己的 web 服务所需要使用的二级域名,通过 `{subdomain}.{subdomain_host}` 来访问自己的 web 服务。
```ini
# frps.ini
subdomain_host = frps.com
```
将泛域名 `*.frps.com` 解析到 frps 所在服务器的 IP 地址。
```ini
# frpc.ini
[web]
privilege_mode = true
type = http
local_port = 80
subdomain = test
```
frps 和 fprc 都启动成功后,通过 `test.frps.com` 就可以访问到内网的 web 服务。
需要注意的是如果 frps 配置了 `subdomain_host`,则 `custom_domains` 中不能是属于 `subdomain_host` 的子域名或者泛域名。
同一个 http 或 https 类型的代理中 `custom_domains``subdomain` 可以同时配置。
### 通过 HTTP PROXY 连接 frps
在只能通过代理访问外网的环境内frpc 支持通过 HTTP PROXY 和 frps 进行通信。
可以通过设置 `HTTP_PROXY` 系统环境变量或者通过在 frpc 的配置文件中设置 `http_proxy` 参数来使用此功能。
```ini
# frpc.ini
server_addr = x.x.x.x
server_port = 7000
http_proxy = http://user:pwd@192.168.1.128:8080
```
## 开发计划 ## 开发计划
计划在后续版本中加入的功能与优化,排名不分先后,如果有其他功能建议欢迎在 [issues](https://github.com/fatedier/frp/issues) 中反馈。 计划在后续版本中加入的功能与优化,排名不分先后,如果有其他功能建议欢迎在 [issues](https://github.com/fatedier/frp/issues) 中反馈。
* 支持 udp 协议。
* 支持泛域名。
* 支持 url 路由转发。 * 支持 url 路由转发。
* frps 记录 http 请求日志。
* frps 支持直接反向代理,类似 haproxy。
* frpc 支持负载均衡到后端不同服务。 * frpc 支持负载均衡到后端不同服务。
* frpc debug 模式,控制台显示代理状态,类似 ngrok 启动后的界面。 * frpc debug 模式,控制台显示代理状态,类似 ngrok 启动后的界面。
* frpc http 请求及响应信息展示。 * frpc http 请求及响应信息展示。

View File

@ -4,18 +4,25 @@
# in square brackets, as in "[::1]:80", "[ipv6-host]:http" or "[ipv6-host%zone]:80" # in square brackets, as in "[::1]:80", "[ipv6-host]:http" or "[ipv6-host%zone]:80"
server_addr = 0.0.0.0 server_addr = 0.0.0.0
server_port = 7000 server_port = 7000
# if you want to connect frps by http proxy, you can set http_proxy here or in global environment variables # if you want to connect frps by http proxy, you can set http_proxy here or in global environment variables
# http_proxy = http://user:pwd@192.168.1.128:8080 # http_proxy = http://user:pwd@192.168.1.128:8080
# console or real logFile path like ./frpc.log # console or real logFile path like ./frpc.log
log_file = ./frpc.log log_file = ./frpc.log
# debug, info, warn, error # debug, info, warn, error
log_level = info log_level = info
log_max_days = 3 log_max_days = 3
# for authentication # for authentication
auth_token = 123 auth_token = 123
# for privilege mode # for privilege mode
privilege_token = 12345678 privilege_token = 12345678
# ssh is the proxy name same as server's configuration # ssh is the proxy name same as server's configuration
[ssh] [ssh]
# tcp | http, default is tcp # tcp | http, default is tcp
@ -29,6 +36,11 @@ use_gzip = false
# connections will be established in advance, default value is zero # connections will be established in advance, default value is zero
pool_count = 10 pool_count = 10
[dns]
type = udp
local_ip = 114.114.114.114
local_port = 53
# Resolve your domain names to [server_addr] so you can use http://web01.yourdomain.com to browse web01 and http://web02.yourdomain.com to browse web02, the domains are set in frps.ini # Resolve your domain names to [server_addr] so you can use http://web01.yourdomain.com to browse web01 and http://web02.yourdomain.com to browse web02, the domains are set in frps.ini
[web01] [web01]
type = http type = http
@ -36,10 +48,10 @@ local_ip = 127.0.0.1
local_port = 80 local_port = 80
use_gzip = true use_gzip = true
pool_count = 20 pool_count = 20
# http username and password are safety certification for http protoc # http username and password are safety certification for http protocol
# if not set, you can access this custom_domains without certification # if not set, you can access this custom_domains without certification
http_username = admin http_user = admin
http_password = admin http_pwd = admin
# if domain for frps is frps.com, then you can access [web01] proxy by URL http://test.frps.com # if domain for frps is frps.com, then you can access [web01] proxy by URL http://test.frps.com
subdomain = test subdomain = test

View File

@ -4,33 +4,45 @@
# in square brackets, as in "[::1]:80", "[ipv6-host]:http" or "[ipv6-host%zone]:80" # in square brackets, as in "[::1]:80", "[ipv6-host]:http" or "[ipv6-host%zone]:80"
bind_addr = 0.0.0.0 bind_addr = 0.0.0.0
bind_port = 7000 bind_port = 7000
# if you want to support virtual host, you must set the http port for listening (optional) # if you want to support virtual host, you must set the http port for listening (optional)
vhost_http_port = 80 vhost_http_port = 80
vhost_https_port = 443 vhost_https_port = 443
# if you want to configure or reload frps by dashboard, dashboard_port must be set # if you want to configure or reload frps by dashboard, dashboard_port must be set
dashboard_port = 7500 dashboard_port = 7500
# dashboard username and password for basic protect, if not set, both default value is admin
dashboard_username = abc # dashboard user and pwd for basic auth protect, if not set, both default value is admin
dashboard_password = abc dashboard_user = admin
dashboard_pwd = admin
# dashboard assets directory(only for debug mode) # dashboard assets directory(only for debug mode)
# assets_dir = ./static # assets_dir = ./static
# console or real logFile path like ./frps.log # console or real logFile path like ./frps.log
log_file = ./frps.log log_file = ./frps.log
# debug, info, warn, error # debug, info, warn, error
log_level = info log_level = info
log_max_days = 3 log_max_days = 3
# if you enable privilege mode, frpc can create a proxy without pre-configure in frps when privilege_token is correct # if you enable privilege mode, frpc can create a proxy without pre-configure in frps when privilege_token is correct
privilege_mode = true privilege_mode = true
privilege_token = 12345678 privilege_token = 12345678
# only allow frpc to bind ports you list, if you set nothing, there won't be any limit # only allow frpc to bind ports you list, if you set nothing, there won't be any limit
privilege_allow_ports = 2000-3000,3001,3003,4000-50000 privilege_allow_ports = 2000-3000,3001,3003,4000-50000
# pool_count in each proxy will change to max_pool_count if they exceed the maximum value # pool_count in each proxy will change to max_pool_count if they exceed the maximum value
max_pool_count = 100 max_pool_count = 100
# authentication_timeout means the timeout interval (minute units) when the frpc connects frps
# if authentication_timeout set zero, the time is not verified # authentication_timeout means the timeout interval (seconds) when the frpc connects frps
authentication_timeout = 15 # if authentication_timeout is zero, the time is not verified, default is 900s
# domain for frps authentication_timeout = 900
domain = frps.com
# if subdomain_host is not empty, you can set subdomain when type is http or https in frpc's configure file
# when subdomain is test, the host used by routing is test.frps.com
subdomain_host = frps.com
# ssh is the proxy name, client will use this name and auth_token to connect to server # ssh is the proxy name, client will use this name and auth_token to connect to server
[ssh] [ssh]
@ -39,6 +51,12 @@ auth_token = 123
bind_addr = 0.0.0.0 bind_addr = 0.0.0.0
listen_port = 6000 listen_port = 6000
[dns]
type = udp
auth_token = 123
bind_addr = 0.0.0.0
listen_port = 5353
[web01] [web01]
# if type equals http, vhost_http_port must be set # if type equals http, vhost_http_port must be set
type = http type = http

View File

@ -120,7 +120,7 @@ func msgSender(cli *client.ProxyClient, c *conn.Conn, msgSendChan chan interface
} }
buf, _ := json.Marshal(msg) buf, _ := json.Marshal(msg)
err := c.Write(string(buf) + "\n") err := c.WriteString(string(buf) + "\n")
if err != nil { if err != nil {
log.Warn("ProxyName [%s], write to server error, proxy exit", cli.Name) log.Warn("ProxyName [%s], write to server error, proxy exit", cli.Name)
c.Close() c.Close()
@ -166,7 +166,7 @@ func loginToServer(cli *client.ProxyClient) (c *conn.Conn, err error) {
} }
buf, _ := json.Marshal(req) buf, _ := json.Marshal(req)
err = c.Write(string(buf) + "\n") err = c.WriteString(string(buf) + "\n")
if err != nil { if err != nil {
log.Error("ProxyName [%s], write to server error, %v", cli.Name, err) log.Error("ProxyName [%s], write to server error, %v", cli.Name, err)
return return
@ -191,6 +191,12 @@ func loginToServer(cli *client.ProxyClient) (c *conn.Conn, err error) {
} }
log.Info("ProxyName [%s], connect to server [%s:%d] success!", cli.Name, client.ServerAddr, client.ServerPort) log.Info("ProxyName [%s], connect to server [%s:%d] success!", cli.Name, client.ServerAddr, client.ServerPort)
if cli.Type == "udp" {
// we only need one udp work connection
// all udp messages will be forwarded throngh this connection
go cli.StartUdpTunnelOnce(client.ServerAddr, client.ServerPort)
}
return return
} }

View File

@ -72,14 +72,14 @@ func controlWorker(c *conn.Conn) {
// login when type is NewCtlConn or NewWorkConn // login when type is NewCtlConn or NewWorkConn
ret, info := doLogin(cliReq, c) ret, info := doLogin(cliReq, c)
// if login type is NewWorkConn, nothing will be send to frpc // if login type is NewWorkConn, nothing will be send to frpc
if cliReq.Type != consts.NewWorkConn { if cliReq.Type == consts.NewCtlConn {
cliRes := &msg.ControlRes{ cliRes := &msg.ControlRes{
Type: consts.NewCtlConnRes, Type: consts.NewCtlConnRes,
Code: ret, Code: ret,
Msg: info, Msg: info,
} }
byteBuf, _ := json.Marshal(cliRes) byteBuf, _ := json.Marshal(cliRes)
err = c.Write(string(byteBuf) + "\n") err = c.WriteString(string(byteBuf) + "\n")
if err != nil { if err != nil {
log.Warn("ProxyName [%s], write to client error, proxy exit", cliReq.ProxyName) log.Warn("ProxyName [%s], write to client error, proxy exit", cliReq.ProxyName)
return return
@ -94,7 +94,7 @@ func controlWorker(c *conn.Conn) {
return return
} }
s, ok := server.ProxyServers[cliReq.ProxyName] s, ok := server.GetProxyServer(cliReq.ProxyName)
if !ok { if !ok {
log.Warn("ProxyName [%s] does not exist now", cliReq.ProxyName) log.Warn("ProxyName [%s] does not exist now", cliReq.ProxyName)
return return
@ -145,9 +145,11 @@ func msgReader(s *server.ProxyServer, c *conn.Conn, msgSendChan chan interface{}
if err != nil { if err != nil {
if err == io.EOF { if err == io.EOF {
log.Warn("ProxyName [%s], client is dead!", s.Name) log.Warn("ProxyName [%s], client is dead!", s.Name)
s.Close()
return err return err
} else if c == nil || c.IsClosed() { } else if c == nil || c.IsClosed() {
log.Warn("ProxyName [%s], client connection is closed", s.Name) log.Warn("ProxyName [%s], client connection is closed", s.Name)
s.Close()
return err return err
} }
log.Warn("ProxyName [%s], read error: %v", s.Name, err) log.Warn("ProxyName [%s], read error: %v", s.Name, err)
@ -184,7 +186,7 @@ func msgSender(s *server.ProxyServer, c *conn.Conn, msgSendChan chan interface{}
} }
buf, _ := json.Marshal(msg) buf, _ := json.Marshal(msg)
err := c.Write(string(buf) + "\n") err := c.WriteString(string(buf) + "\n")
if err != nil { if err != nil {
log.Warn("ProxyName [%s], write to client error, proxy exit", s.Name) log.Warn("ProxyName [%s], write to client error, proxy exit", s.Name)
s.Close() s.Close()
@ -194,6 +196,9 @@ func msgSender(s *server.ProxyServer, c *conn.Conn, msgSendChan chan interface{}
} }
// if success, ret equals 0, otherwise greater than 0 // if success, ret equals 0, otherwise greater than 0
// NewCtlConn
// NewWorkConn
// NewWorkConnUdp
func doLogin(req *msg.ControlReq, c *conn.Conn) (ret int64, info string) { func doLogin(req *msg.ControlReq, c *conn.Conn) (ret int64, info string) {
ret = 1 ret = 1
// check if PrivilegeMode is enabled // check if PrivilegeMode is enabled
@ -207,7 +212,7 @@ func doLogin(req *msg.ControlReq, c *conn.Conn) (ret int64, info string) {
s *server.ProxyServer s *server.ProxyServer
ok bool ok bool
) )
s, ok = server.ProxyServers[req.ProxyName] s, ok = server.GetProxyServer(req.ProxyName)
if req.PrivilegeMode && req.Type == consts.NewCtlConn { if req.PrivilegeMode && req.Type == consts.NewCtlConn {
log.Debug("ProxyName [%s], doLogin and privilege mode is enabled", req.ProxyName) log.Debug("ProxyName [%s], doLogin and privilege mode is enabled", req.ProxyName)
} else { } else {
@ -262,6 +267,14 @@ func doLogin(req *msg.ControlReq, c *conn.Conn) (ret int64, info string) {
return return
} }
} }
} else if s.Type == "http" || s.Type == "https" {
for _, domain := range s.CustomDomains {
if server.SubDomainHost != "" && strings.Contains(domain, server.SubDomainHost) {
info = fmt.Sprintf("ProxyName [%s], custom domain [%s] should not belong to subdomain_host [%s]", req.ProxyName, domain, server.SubDomainHost)
log.Warn(info)
return
}
}
} }
err := server.CreateProxy(s) err := server.CreateProxy(s)
if err != nil { if err != nil {
@ -289,14 +302,20 @@ func doLogin(req *msg.ControlReq, c *conn.Conn) (ret int64, info string) {
s.HostHeaderRewrite = req.HostHeaderRewrite s.HostHeaderRewrite = req.HostHeaderRewrite
s.HttpUserName = req.HttpUserName s.HttpUserName = req.HttpUserName
s.HttpPassWord = req.HttpPassWord s.HttpPassWord = req.HttpPassWord
// package URL // package URL
if req.SubDomain != "" { if req.SubDomain != "" {
if strings.Contains(req.SubDomain, ".") || strings.Contains(req.SubDomain, "*") { if strings.Contains(req.SubDomain, ".") || strings.Contains(req.SubDomain, "*") {
info = fmt.Sprintf("ProxyName [%s], type [%s] not support when subdomain is not set", req.ProxyName, req.Type) info = fmt.Sprintf("ProxyName [%s], '.' or '*' is not supported in subdomain", req.ProxyName)
log.Warn(info) log.Warn(info)
return return
} }
s.SubDomain = req.SubDomain + "." + server.Domain if server.SubDomainHost == "" {
info = fmt.Sprintf("ProxyName [%s], subdomain in not supported because this feature is not enabled by remote server", req.ProxyName)
log.Warn(info)
return
}
s.SubDomain = req.SubDomain + "." + server.SubDomainHost
} }
if req.PoolCount > server.MaxPoolCount { if req.PoolCount > server.MaxPoolCount {
s.PoolCount = server.MaxPoolCount s.PoolCount = server.MaxPoolCount
@ -335,6 +354,13 @@ func doLogin(req *msg.ControlReq, c *conn.Conn) (ret int64, info string) {
} }
// the connection will close after join over // the connection will close after join over
s.RegisterNewWorkConn(c) s.RegisterNewWorkConn(c)
} else if req.Type == consts.NewWorkConnUdp {
// work conn for udp
if s.Status != consts.Working {
log.Warn("ProxyName [%s], is not working when it gets one new work connnection for udp", req.ProxyName)
return
}
s.RegisterNewWorkConnUdp(c)
} else { } else {
info = fmt.Sprintf("Unsupport login message type [%d]", req.Type) info = fmt.Sprintf("Unsupport login message type [%d]", req.Type)
log.Warn("Unsupport login message type [%d]", req.Type) log.Warn("Unsupport login message type [%d]", req.Type)

View File

@ -15,6 +15,7 @@
package main package main
import ( import (
"encoding/base64"
"encoding/json" "encoding/json"
"fmt" "fmt"
"io/ioutil" "io/ioutil"
@ -68,7 +69,18 @@ func main() {
// reload check // reload check
if args["--reload"] != nil { if args["--reload"] != nil {
if args["--reload"].(bool) { if args["--reload"].(bool) {
resp, err := http.Get("http://" + server.BindAddr + ":" + fmt.Sprintf("%d", server.DashboardPort) + "/api/reload") req, err := http.NewRequest("GET", "http://"+server.BindAddr+":"+fmt.Sprintf("%d", server.DashboardPort)+"/api/reload", nil)
if err != nil {
fmt.Printf("frps reload error: %v\n", err)
os.Exit(1)
}
authStr := "Basic " + base64.StdEncoding.EncodeToString([]byte(server.DashboardUsername+":"+server.DashboardPassword))
req.Header.Add("Authorization", authStr)
defaultClient := &http.Client{}
resp, err := defaultClient.Do(req)
if err != nil { if err != nil {
fmt.Printf("frps reload error: %v\n", err) fmt.Printf("frps reload error: %v\n", err)
os.Exit(1) os.Exit(1)
@ -82,7 +94,7 @@ func main() {
res := &server.GeneralResponse{} res := &server.GeneralResponse{}
err = json.Unmarshal(body, &res) err = json.Unmarshal(body, &res)
if err != nil { if err != nil {
fmt.Printf("http response error: %v\n", err) fmt.Printf("http response error: %s\n", strings.TrimSpace(string(body)))
os.Exit(1) os.Exit(1)
} else if res.Code != 0 { } else if res.Code != 0 {
fmt.Printf("reload error: %s\n", res.Msg) fmt.Printf("reload error: %s\n", res.Msg)

View File

@ -17,6 +17,7 @@ package client
import ( import (
"encoding/json" "encoding/json"
"fmt" "fmt"
"sync"
"time" "time"
"github.com/fatedier/frp/src/models/config" "github.com/fatedier/frp/src/models/config"
@ -34,19 +35,72 @@ type ProxyClient struct {
RemotePort int64 RemotePort int64
CustomDomains []string CustomDomains []string
udpTunnel *conn.Conn
once sync.Once
} }
func (p *ProxyClient) GetLocalConn() (c *conn.Conn, err error) { // if proxy type is udp, keep a tcp connection for transferring udp packages
c, err = conn.ConnectServer(fmt.Sprintf("%s:%d", p.LocalIp, p.LocalPort)) func (pc *ProxyClient) StartUdpTunnelOnce(addr string, port int64) {
pc.once.Do(func() {
var err error
var c *conn.Conn
udpProcessor := NewUdpProcesser(nil, pc.LocalIp, pc.LocalPort)
for {
if pc.udpTunnel == nil || pc.udpTunnel.IsClosed() {
if HttpProxy == "" {
c, err = conn.ConnectServer(fmt.Sprintf("%s:%d", addr, port))
} else {
c, err = conn.ConnectServerByHttpProxy(HttpProxy, fmt.Sprintf("%s:%d", addr, port))
}
if err != nil {
log.Error("ProxyName [%s], udp tunnel connect to server [%s:%d] error, %v", pc.Name, addr, port, err)
time.Sleep(10 * time.Second)
continue
}
log.Info("ProxyName [%s], udp tunnel reconnect to server [%s:%d] success", pc.Name, addr, port)
nowTime := time.Now().Unix()
req := &msg.ControlReq{
Type: consts.NewWorkConnUdp,
ProxyName: pc.Name,
PrivilegeMode: pc.PrivilegeMode,
Timestamp: nowTime,
}
if pc.PrivilegeMode == true {
req.PrivilegeKey = pcrypto.GetAuthKey(pc.Name + PrivilegeToken + fmt.Sprintf("%d", nowTime))
} else {
req.AuthKey = pcrypto.GetAuthKey(pc.Name + pc.AuthToken + fmt.Sprintf("%d", nowTime))
}
buf, _ := json.Marshal(req)
err = c.WriteString(string(buf) + "\n")
if err != nil {
log.Error("ProxyName [%s], udp tunnel write to server error, %v", pc.Name, err)
c.Close()
time.Sleep(1 * time.Second)
continue
}
pc.udpTunnel = c
udpProcessor.UpdateTcpConn(pc.udpTunnel)
udpProcessor.Run()
}
time.Sleep(1 * time.Second)
}
})
}
func (pc *ProxyClient) GetLocalConn() (c *conn.Conn, err error) {
c, err = conn.ConnectServer(fmt.Sprintf("%s:%d", pc.LocalIp, pc.LocalPort))
if err != nil { if err != nil {
log.Error("ProxyName [%s], connect to local port error, %v", p.Name, err) log.Error("ProxyName [%s], connect to local port error, %v", pc.Name, err)
} }
return return
} }
func (p *ProxyClient) GetRemoteConn(addr string, port int64) (c *conn.Conn, err error) { func (pc *ProxyClient) GetRemoteConn(addr string, port int64) (c *conn.Conn, err error) {
defer func() { defer func() {
if err != nil { if err != nil && c != nil {
c.Close() c.Close()
} }
}() }()
@ -57,29 +111,27 @@ func (p *ProxyClient) GetRemoteConn(addr string, port int64) (c *conn.Conn, err
c, err = conn.ConnectServerByHttpProxy(HttpProxy, fmt.Sprintf("%s:%d", addr, port)) c, err = conn.ConnectServerByHttpProxy(HttpProxy, fmt.Sprintf("%s:%d", addr, port))
} }
if err != nil { if err != nil {
log.Error("ProxyName [%s], connect to server [%s:%d] error, %v", p.Name, addr, port, err) log.Error("ProxyName [%s], connect to server [%s:%d] error, %v", pc.Name, addr, port, err)
return return
} }
nowTime := time.Now().Unix() nowTime := time.Now().Unix()
req := &msg.ControlReq{ req := &msg.ControlReq{
Type: consts.NewWorkConn, Type: consts.NewWorkConn,
ProxyName: p.Name, ProxyName: pc.Name,
PrivilegeMode: p.PrivilegeMode, PrivilegeMode: pc.PrivilegeMode,
Timestamp: nowTime, Timestamp: nowTime,
} }
if p.PrivilegeMode == true { if pc.PrivilegeMode == true {
privilegeKey := pcrypto.GetAuthKey(p.Name + PrivilegeToken + fmt.Sprintf("%d", nowTime)) req.PrivilegeKey = pcrypto.GetAuthKey(pc.Name + PrivilegeToken + fmt.Sprintf("%d", nowTime))
req.PrivilegeKey = privilegeKey
} else { } else {
authKey := pcrypto.GetAuthKey(p.Name + p.AuthToken + fmt.Sprintf("%d", nowTime)) req.AuthKey = pcrypto.GetAuthKey(pc.Name + pc.AuthToken + fmt.Sprintf("%d", nowTime))
req.AuthKey = authKey
} }
buf, _ := json.Marshal(req) buf, _ := json.Marshal(req)
err = c.Write(string(buf) + "\n") err = c.WriteString(string(buf) + "\n")
if err != nil { if err != nil {
log.Error("ProxyName [%s], write to server error, %v", p.Name, err) log.Error("ProxyName [%s], write to server error, %v", pc.Name, err)
return return
} }
@ -87,12 +139,12 @@ func (p *ProxyClient) GetRemoteConn(addr string, port int64) (c *conn.Conn, err
return return
} }
func (p *ProxyClient) StartTunnel(serverAddr string, serverPort int64) (err error) { func (pc *ProxyClient) StartTunnel(serverAddr string, serverPort int64) (err error) {
localConn, err := p.GetLocalConn() localConn, err := pc.GetLocalConn()
if err != nil { if err != nil {
return return
} }
remoteConn, err := p.GetRemoteConn(serverAddr, serverPort) remoteConn, err := pc.GetRemoteConn(serverAddr, serverPort)
if err != nil { if err != nil {
return return
} }
@ -101,7 +153,7 @@ func (p *ProxyClient) StartTunnel(serverAddr string, serverPort int64) (err erro
log.Debug("Join two connections, (l[%s] r[%s]) (l[%s] r[%s])", localConn.GetLocalAddr(), localConn.GetRemoteAddr(), log.Debug("Join two connections, (l[%s] r[%s]) (l[%s] r[%s])", localConn.GetLocalAddr(), localConn.GetRemoteAddr(),
remoteConn.GetLocalAddr(), remoteConn.GetRemoteAddr()) remoteConn.GetLocalAddr(), remoteConn.GetRemoteAddr())
needRecord := false needRecord := false
go msg.JoinMore(localConn, remoteConn, p.BaseConf, needRecord) go msg.JoinMore(localConn, remoteConn, pc.BaseConf, needRecord)
return nil return nil
} }

View File

@ -130,7 +130,7 @@ func LoadConf(confFile string) (err error) {
proxyClient.Type = "tcp" proxyClient.Type = "tcp"
tmpStr, ok = section["type"] tmpStr, ok = section["type"]
if ok { if ok {
if tmpStr != "tcp" && tmpStr != "http" && tmpStr != "https" { if tmpStr != "tcp" && tmpStr != "http" && tmpStr != "https" && tmpStr != "udp" {
return fmt.Errorf("Parse conf error: proxy [%s] type error", proxyClient.Name) return fmt.Errorf("Parse conf error: proxy [%s] type error", proxyClient.Name)
} }
proxyClient.Type = tmpStr proxyClient.Type = tmpStr
@ -156,13 +156,13 @@ func LoadConf(confFile string) (err error) {
if ok { if ok {
proxyClient.HostHeaderRewrite = tmpStr proxyClient.HostHeaderRewrite = tmpStr
} }
// http_username // http_user
tmpStr, ok = section["http_username"] tmpStr, ok = section["http_user"]
if ok { if ok {
proxyClient.HttpUserName = tmpStr proxyClient.HttpUserName = tmpStr
} }
// http_password // http_pwd
tmpStr, ok = section["http_password"] tmpStr, ok = section["http_pwd"]
if ok { if ok {
proxyClient.HttpPassWord = tmpStr proxyClient.HttpPassWord = tmpStr
} }
@ -199,7 +199,7 @@ func LoadConf(confFile string) (err error) {
proxyClient.PrivilegeToken = PrivilegeToken proxyClient.PrivilegeToken = PrivilegeToken
} }
if proxyClient.Type == "tcp" { if proxyClient.Type == "tcp" || proxyClient.Type == "udp" {
// remote_port // remote_port
tmpStr, ok = section["remote_port"] tmpStr, ok = section["remote_port"]
if ok { if ok {
@ -216,30 +216,33 @@ func LoadConf(confFile string) (err error) {
if ok { if ok {
proxyClient.CustomDomains = strings.Split(domainStr, ",") proxyClient.CustomDomains = strings.Split(domainStr, ",")
if len(proxyClient.CustomDomains) == 0 { if len(proxyClient.CustomDomains) == 0 {
return fmt.Errorf("Parse conf error: proxy [%s] custom_domains must be set when type equals http", proxyClient.Name) ok = false
} else {
for i, domain := range proxyClient.CustomDomains {
proxyClient.CustomDomains[i] = strings.ToLower(strings.TrimSpace(domain))
}
} }
for i, domain := range proxyClient.CustomDomains {
proxyClient.CustomDomains[i] = strings.ToLower(strings.TrimSpace(domain))
}
} else {
return fmt.Errorf("Parse conf error: proxy [%s] custom_domains must be set when type equals http", proxyClient.Name)
} }
// subdomain if !ok && proxyClient.SubDomain == "" {
proxyClient.SubDomain, ok = section["subdomain"] return fmt.Errorf("Parse conf error: proxy [%s] custom_domains and subdomain should set at least one of them when type is http", proxyClient.Name)
}
} else if proxyClient.Type == "https" { } else if proxyClient.Type == "https" {
// custom_domains // custom_domains
domainStr, ok := section["custom_domains"] domainStr, ok := section["custom_domains"]
if ok { if ok {
proxyClient.CustomDomains = strings.Split(domainStr, ",") proxyClient.CustomDomains = strings.Split(domainStr, ",")
if len(proxyClient.CustomDomains) == 0 { if len(proxyClient.CustomDomains) == 0 {
return fmt.Errorf("Parse conf error: proxy [%s] custom_domains must be set when type equals https", proxyClient.Name) ok = false
} else {
for i, domain := range proxyClient.CustomDomains {
proxyClient.CustomDomains[i] = strings.ToLower(strings.TrimSpace(domain))
}
} }
for i, domain := range proxyClient.CustomDomains { }
proxyClient.CustomDomains[i] = strings.ToLower(strings.TrimSpace(domain))
} if !ok && proxyClient.SubDomain == "" {
} else { return fmt.Errorf("Parse conf error: proxy [%s] custom_domains and subdomain should set at least one of them when type is https", proxyClient.Name)
return fmt.Errorf("Parse conf error: proxy [%s] custom_domains must be set when type equals http", proxyClient.Name)
} }
} }
} }

View File

@ -0,0 +1,153 @@
// Copyright 2016 fatedier, fatedier@gmail.com
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package client
import (
"fmt"
"io"
"net"
"sync"
"time"
"github.com/fatedier/frp/src/models/msg"
"github.com/fatedier/frp/src/utils/conn"
"github.com/fatedier/frp/src/utils/pool"
)
type UdpProcesser struct {
tcpConn *conn.Conn
closeCh chan struct{}
localAddr string
// cache local udp connections
// key is remoteAddr
localUdpConns map[string]*net.UDPConn
mutex sync.RWMutex
tcpConnMutex sync.RWMutex
}
func NewUdpProcesser(c *conn.Conn, localIp string, localPort int64) *UdpProcesser {
return &UdpProcesser{
tcpConn: c,
closeCh: make(chan struct{}),
localAddr: fmt.Sprintf("%s:%d", localIp, localPort),
localUdpConns: make(map[string]*net.UDPConn),
}
}
func (up *UdpProcesser) UpdateTcpConn(c *conn.Conn) {
up.tcpConnMutex.Lock()
defer up.tcpConnMutex.Unlock()
up.tcpConn = c
}
func (up *UdpProcesser) Run() {
go up.ReadLoop()
}
func (up *UdpProcesser) ReadLoop() {
var (
buf string
err error
)
for {
udpPacket := &msg.UdpPacket{}
// read udp package from frps
buf, err = up.tcpConn.ReadLine()
if err != nil {
if err == io.EOF {
return
} else {
continue
}
}
err = udpPacket.UnPack([]byte(buf))
if err != nil {
continue
}
// write to local udp port
sendConn, ok := up.GetUdpConn(udpPacket.SrcStr)
if !ok {
dstAddr, err := net.ResolveUDPAddr("udp", up.localAddr)
if err != nil {
continue
}
sendConn, err = net.DialUDP("udp", nil, dstAddr)
if err != nil {
continue
}
up.SetUdpConn(udpPacket.SrcStr, sendConn)
}
_, err = sendConn.Write(udpPacket.Content)
if err != nil {
sendConn.Close()
continue
}
if !ok {
go up.Forward(udpPacket, sendConn)
}
}
}
func (up *UdpProcesser) Forward(udpPacket *msg.UdpPacket, singleConn *net.UDPConn) {
addr := udpPacket.SrcStr
defer up.RemoveUdpConn(addr)
buf := pool.GetBuf(2048)
for {
singleConn.SetReadDeadline(time.Now().Add(120 * time.Second))
n, remoteAddr, err := singleConn.ReadFromUDP(buf)
if err != nil {
return
}
// forward to frps
forwardPacket := msg.NewUdpPacket(buf[0:n], remoteAddr, udpPacket.Src)
up.tcpConnMutex.RLock()
err = up.tcpConn.WriteString(string(forwardPacket.Pack()) + "\n")
up.tcpConnMutex.RUnlock()
if err != nil {
return
}
}
}
func (up *UdpProcesser) GetUdpConn(addr string) (singleConn *net.UDPConn, ok bool) {
up.mutex.RLock()
defer up.mutex.RUnlock()
singleConn, ok = up.localUdpConns[addr]
return
}
func (up *UdpProcesser) SetUdpConn(addr string, conn *net.UDPConn) {
up.mutex.Lock()
defer up.mutex.Unlock()
up.localUdpConns[addr] = conn
}
func (up *UdpProcesser) RemoveUdpConn(addr string) {
up.mutex.Lock()
defer up.mutex.Unlock()
if c, ok := up.localUdpConns[addr]; ok {
c.Close()
}
delete(up.localUdpConns, addr)
}

View File

@ -37,4 +37,5 @@ const (
NewCtlConnRes NewCtlConnRes
HeartbeatReq HeartbeatReq
HeartbeatRes HeartbeatRes
NewWorkConnUdp
) )

View File

@ -53,9 +53,9 @@ func Join(c1 *conn.Conn, c2 *conn.Conn) {
} }
// join two connections and do some operations // join two connections and do some operations
func JoinMore(c1 *conn.Conn, c2 *conn.Conn, conf config.BaseConf, needRecord bool) { func JoinMore(c1 io.ReadWriteCloser, c2 io.ReadWriteCloser, conf config.BaseConf, needRecord bool) {
var wait sync.WaitGroup var wait sync.WaitGroup
encryptPipe := func(from *conn.Conn, to *conn.Conn) { encryptPipe := func(from io.ReadCloser, to io.WriteCloser) {
defer from.Close() defer from.Close()
defer to.Close() defer to.Close()
defer wait.Done() defer wait.Done()
@ -64,7 +64,7 @@ func JoinMore(c1 *conn.Conn, c2 *conn.Conn, conf config.BaseConf, needRecord boo
pipeEncrypt(from, to, conf, needRecord) pipeEncrypt(from, to, conf, needRecord)
} }
decryptPipe := func(to *conn.Conn, from *conn.Conn) { decryptPipe := func(to io.ReadCloser, from io.WriteCloser) {
defer from.Close() defer from.Close()
defer to.Close() defer to.Close()
defer wait.Done() defer wait.Done()
@ -109,7 +109,7 @@ func unpkgMsg(data []byte) (int, []byte, []byte) {
} }
// decrypt msg from reader, then write into writer // decrypt msg from reader, then write into writer
func pipeDecrypt(r *conn.Conn, w *conn.Conn, conf config.BaseConf, needRecord bool) (err error) { func pipeDecrypt(r io.Reader, w io.Writer, conf config.BaseConf, needRecord bool) (err error) {
laes := new(pcrypto.Pcrypto) laes := new(pcrypto.Pcrypto)
key := conf.AuthToken key := conf.AuthToken
if conf.PrivilegeMode { if conf.PrivilegeMode {
@ -175,7 +175,7 @@ func pipeDecrypt(r *conn.Conn, w *conn.Conn, conf config.BaseConf, needRecord bo
} }
} }
_, err = w.WriteBytes(res) _, err = w.Write(res)
if err != nil { if err != nil {
return err return err
} }
@ -192,7 +192,7 @@ func pipeDecrypt(r *conn.Conn, w *conn.Conn, conf config.BaseConf, needRecord bo
} }
// recvive msg from reader, then encrypt msg into writer // recvive msg from reader, then encrypt msg into writer
func pipeEncrypt(r *conn.Conn, w *conn.Conn, conf config.BaseConf, needRecord bool) (err error) { func pipeEncrypt(r io.Reader, w io.Writer, conf config.BaseConf, needRecord bool) (err error) {
laes := new(pcrypto.Pcrypto) laes := new(pcrypto.Pcrypto)
key := conf.AuthToken key := conf.AuthToken
if conf.PrivilegeMode { if conf.PrivilegeMode {
@ -247,7 +247,7 @@ func pipeEncrypt(r *conn.Conn, w *conn.Conn, conf config.BaseConf, needRecord bo
} }
res = pkgMsg(res) res = pkgMsg(res)
_, err = w.WriteBytes(res) _, err = w.Write(res)
if err != nil { if err != nil {
return err return err
} }

72
src/models/msg/udp.go Normal file
View File

@ -0,0 +1,72 @@
// Copyright 2016 fatedier, fatedier@gmail.com
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package msg
import (
"encoding/base64"
"encoding/json"
"net"
)
type UdpPacket struct {
Content []byte `json:"-"`
Src *net.UDPAddr `json:"-"`
Dst *net.UDPAddr `json:"-"`
EncodeContent string `json:"content"`
SrcStr string `json:"src"`
DstStr string `json:"dst"`
}
func NewUdpPacket(content []byte, src, dst *net.UDPAddr) *UdpPacket {
up := &UdpPacket{
Src: src,
Dst: dst,
EncodeContent: base64.StdEncoding.EncodeToString(content),
SrcStr: src.String(),
DstStr: dst.String(),
}
return up
}
// parse one udp packet struct to bytes
func (up *UdpPacket) Pack() []byte {
b, _ := json.Marshal(up)
return b
}
// parse from bytes to UdpPacket struct
func (up *UdpPacket) UnPack(packet []byte) error {
err := json.Unmarshal(packet, &up)
if err != nil {
return err
}
up.Content, err = base64.StdEncoding.DecodeString(up.EncodeContent)
if err != nil {
return err
}
up.Src, err = net.ResolveUDPAddr("udp", up.SrcStr)
if err != nil {
return err
}
up.Dst, err = net.ResolveUDPAddr("udp", up.DstStr)
if err != nil {
return err
}
return nil
}

View File

@ -0,0 +1,50 @@
// Copyright 2016 fatedier, fatedier@gmail.com
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package msg
import (
"net"
"testing"
"github.com/stretchr/testify/assert"
)
var (
content string = "udp packet test"
src string = "1.1.1.1:1000"
dst string = "2.2.2.2:2000"
udpMsg *UdpPacket
)
func init() {
srcAddr, _ := net.ResolveUDPAddr("udp", src)
dstAddr, _ := net.ResolveUDPAddr("udp", dst)
udpMsg = NewUdpPacket([]byte(content), srcAddr, dstAddr)
}
func TestPack(t *testing.T) {
assert := assert.New(t)
msg := udpMsg.Pack()
assert.Equal(string(msg), `{"content":"dWRwIHBhY2tldCB0ZXN0","src":"1.1.1.1:1000","dst":"2.2.2.2:2000"}`)
}
func TestUnpack(t *testing.T) {
assert := assert.New(t)
udpMsg.UnPack([]byte(`{"content":"dWRwIHBhY2tldCB0ZXN0","src":"1.1.1.1:1000","dst":"2.2.2.2:2000"}`))
assert.Equal(content, string(udpMsg.Content))
assert.Equal(src, udpMsg.Src.String())
assert.Equal(dst, udpMsg.Dst.String())
}

View File

@ -45,8 +45,8 @@ var (
LogMaxDays int64 = 3 LogMaxDays int64 = 3
PrivilegeMode bool = false PrivilegeMode bool = false
PrivilegeToken string = "" PrivilegeToken string = ""
AuthTimeout int64 = 15 AuthTimeout int64 = 900
Domain string = "" SubDomainHost string = ""
// if PrivilegeAllowPorts is not nil, tcp proxies which remote port exist in this map can be connected // if PrivilegeAllowPorts is not nil, tcp proxies which remote port exist in this map can be connected
PrivilegeAllowPorts map[int64]struct{} PrivilegeAllowPorts map[int64]struct{}
@ -123,12 +123,12 @@ func loadCommonConf(confFile string) error {
DashboardPort = 0 DashboardPort = 0
} }
tmpStr, ok = conf.Get("common", "dashboard_username") tmpStr, ok = conf.Get("common", "dashboard_user")
if ok { if ok {
DashboardUsername = tmpStr DashboardUsername = tmpStr
} }
tmpStr, ok = conf.Get("common", "dashboard_password") tmpStr, ok = conf.Get("common", "dashboard_pwd")
if ok { if ok {
DashboardPassword = tmpStr DashboardPassword = tmpStr
} }
@ -233,7 +233,10 @@ func loadCommonConf(confFile string) error {
AuthTimeout = v AuthTimeout = v
} }
} }
Domain, ok = conf.Get("common", "domain") SubDomainHost, ok = conf.Get("common", "subdomain_host")
if ok {
SubDomainHost = strings.ToLower(strings.TrimSpace(SubDomainHost))
}
return nil return nil
} }
@ -252,7 +255,7 @@ func loadProxyConf(confFile string) (proxyServers map[string]*ProxyServer, err e
proxyServer.Type, ok = section["type"] proxyServer.Type, ok = section["type"]
if ok { if ok {
if proxyServer.Type != "tcp" && proxyServer.Type != "http" && proxyServer.Type != "https" { if proxyServer.Type != "tcp" && proxyServer.Type != "http" && proxyServer.Type != "https" && proxyServer.Type != "udp" {
return proxyServers, fmt.Errorf("Parse conf error: proxy [%s] type error", proxyServer.Name) return proxyServers, fmt.Errorf("Parse conf error: proxy [%s] type error", proxyServer.Name)
} }
} else { } else {
@ -264,8 +267,8 @@ func loadProxyConf(confFile string) (proxyServers map[string]*ProxyServer, err e
return proxyServers, fmt.Errorf("Parse conf error: proxy [%s] no auth_token found", proxyServer.Name) return proxyServers, fmt.Errorf("Parse conf error: proxy [%s] no auth_token found", proxyServer.Name)
} }
// for tcp // for tcp and udp
if proxyServer.Type == "tcp" { if proxyServer.Type == "tcp" || proxyServer.Type == "udp" {
proxyServer.BindAddr, ok = section["bind_addr"] proxyServer.BindAddr, ok = section["bind_addr"]
if !ok { if !ok {
proxyServer.BindAddr = "0.0.0.0" proxyServer.BindAddr = "0.0.0.0"
@ -288,13 +291,18 @@ func loadProxyConf(confFile string) (proxyServers map[string]*ProxyServer, err e
if ok { if ok {
proxyServer.CustomDomains = strings.Split(domainStr, ",") proxyServer.CustomDomains = strings.Split(domainStr, ",")
if len(proxyServer.CustomDomains) == 0 { if len(proxyServer.CustomDomains) == 0 {
return proxyServers, fmt.Errorf("Parse conf error: proxy [%s] custom_domains must be set when type equals http", proxyServer.Name) return proxyServers, fmt.Errorf("Parse conf error: proxy [%s] custom_domains must be set when type is http", proxyServer.Name)
} }
for i, domain := range proxyServer.CustomDomains { for i, domain := range proxyServer.CustomDomains {
proxyServer.CustomDomains[i] = strings.ToLower(strings.TrimSpace(domain)) domain = strings.ToLower(strings.TrimSpace(domain))
// custom domain should not belong to subdomain_host
if SubDomainHost != "" && strings.Contains(domain, SubDomainHost) {
return proxyServers, fmt.Errorf("Parse conf error: proxy [%s] custom domain should not belong to subdomain_host", proxyServer.Name)
}
proxyServer.CustomDomains[i] = domain
} }
} else { } else {
return proxyServers, fmt.Errorf("Parse conf error: proxy [%s] custom_domains must be set when type equals http", proxyServer.Name) return proxyServers, fmt.Errorf("Parse conf error: proxy [%s] custom_domains must be set when type is http", proxyServer.Name)
} }
//location //location
@ -312,13 +320,17 @@ func loadProxyConf(confFile string) (proxyServers map[string]*ProxyServer, err e
if ok { if ok {
proxyServer.CustomDomains = strings.Split(domainStr, ",") proxyServer.CustomDomains = strings.Split(domainStr, ",")
if len(proxyServer.CustomDomains) == 0 { if len(proxyServer.CustomDomains) == 0 {
return proxyServers, fmt.Errorf("Parse conf error: proxy [%s] custom_domains must be set when type equals https", proxyServer.Name) return proxyServers, fmt.Errorf("Parse conf error: proxy [%s] custom_domains must be set when type is https", proxyServer.Name)
} }
for i, domain := range proxyServer.CustomDomains { for i, domain := range proxyServer.CustomDomains {
proxyServer.CustomDomains[i] = strings.ToLower(strings.TrimSpace(domain)) domain = strings.ToLower(strings.TrimSpace(domain))
if SubDomainHost != "" && strings.Contains(domain, SubDomainHost) {
return proxyServers, fmt.Errorf("Parse conf error: proxy [%s] custom domain should not belong to subdomain_host", proxyServer.Name)
}
proxyServer.CustomDomains[i] = domain
} }
} else { } else {
return proxyServers, fmt.Errorf("Parse conf error: proxy [%s] custom_domains must be set when type equals https", proxyServer.Name) return proxyServers, fmt.Errorf("Parse conf error: proxy [%s] custom_domains must be set when type is https", proxyServer.Name)
} }
} }
proxyServers[proxyServer.Name] = proxyServer proxyServers[proxyServer.Name] = proxyServer
@ -398,3 +410,10 @@ func DeleteProxy(proxyName string) {
defer ProxyServersMutex.Unlock() defer ProxyServersMutex.Unlock()
delete(ProxyServers, proxyName) delete(ProxyServers, proxyName)
} }
func GetProxyServer(proxyName string) (p *ProxyServer, ok bool) {
ProxyServersMutex.RLock()
defer ProxyServersMutex.RUnlock()
p, ok = ProxyServers[proxyName]
return
}

View File

@ -34,7 +34,7 @@ func RunDashboardServer(addr string, port int64) (err error) {
// url router // url router
mux := http.NewServeMux() mux := http.NewServeMux()
// api, see dashboard_api.go // api, see dashboard_api.go
mux.HandleFunc("/api/reload", apiReload) mux.HandleFunc("/api/reload", use(apiReload, basicAuth))
mux.HandleFunc("/api/proxies", apiProxies) mux.HandleFunc("/api/proxies", apiProxies)
// view, see dashboard_view.go // view, see dashboard_view.go

View File

@ -16,6 +16,7 @@ package server
import ( import (
"fmt" "fmt"
"net"
"sync" "sync"
"time" "time"
@ -25,6 +26,7 @@ import (
"github.com/fatedier/frp/src/models/msg" "github.com/fatedier/frp/src/models/msg"
"github.com/fatedier/frp/src/utils/conn" "github.com/fatedier/frp/src/utils/conn"
"github.com/fatedier/frp/src/utils/log" "github.com/fatedier/frp/src/utils/log"
"github.com/fatedier/frp/src/utils/pool"
) )
type Listener interface { type Listener interface {
@ -35,12 +37,17 @@ type Listener interface {
type ProxyServer struct { type ProxyServer struct {
*config.ProxyServerConf *config.ProxyServerConf
CtlConn *conn.Conn `json:"-"` // control connection with frpc Status int64
listeners []Listener // accept new connection from remote users CtlConn *conn.Conn // control connection with frpc
ctlMsgChan chan int64 // every time accept a new user conn, put "1" to the channel WorkConnUdp *conn.Conn // work connection for udp
workConnChan chan *conn.Conn // get new work conns from control goroutine
mutex sync.RWMutex udpConn *net.UDPConn
closeChan chan struct{} // for notify other goroutines that the proxy is closed by close this channel listeners []Listener // accept new connection from remote users
ctlMsgChan chan int64 // every time accept a new user conn, put "1" to the channel
workConnChan chan *conn.Conn // get new work conns from control goroutine
udpSenderChan chan *msg.UdpPacket
mutex sync.RWMutex
closeChan chan struct{} // close this channel for notifying other goroutines that the proxy is closed
} }
func NewProxyServer() (p *ProxyServer) { func NewProxyServer() (p *ProxyServer) {
@ -60,7 +67,7 @@ func NewProxyServerFromCtlMsg(req *msg.ControlReq) (p *ProxyServer) {
p.PrivilegeMode = req.PrivilegeMode p.PrivilegeMode = req.PrivilegeMode
p.PrivilegeToken = PrivilegeToken p.PrivilegeToken = PrivilegeToken
p.BindAddr = BindAddr p.BindAddr = BindAddr
if p.Type == "tcp" { if p.Type == "tcp" || p.Type == "udp" {
p.ListenPort = req.RemotePort p.ListenPort = req.RemotePort
} else if p.Type == "http" { } else if p.Type == "http" {
p.ListenPort = VhostHttpPort p.ListenPort = VhostHttpPort
@ -80,6 +87,7 @@ func (p *ProxyServer) Init() {
metric.SetStatus(p.Name, p.Status) metric.SetStatus(p.Name, p.Status)
p.workConnChan = make(chan *conn.Conn, p.PoolCount+10) p.workConnChan = make(chan *conn.Conn, p.PoolCount+10)
p.ctlMsgChan = make(chan int64, p.PoolCount+10) p.ctlMsgChan = make(chan int64, p.PoolCount+10)
p.udpSenderChan = make(chan *msg.UdpPacket, 1024)
p.listeners = make([]Listener, 0) p.listeners = make([]Listener, 0)
p.closeChan = make(chan struct{}) p.closeChan = make(chan struct{})
p.Unlock() p.Unlock()
@ -145,46 +153,73 @@ func (p *ProxyServer) Start(c *conn.Conn) (err error) {
p.Unlock() p.Unlock()
metric.SetStatus(p.Name, p.Status) metric.SetStatus(p.Name, p.Status)
// create connection pool if needed if p.Type == "udp" {
if p.PoolCount > 0 { // udp is special
go p.connectionPoolManager(p.closeChan) p.udpConn, err = conn.ListenUDP(p.BindAddr, p.ListenPort)
} if err != nil {
log.Warn("ProxyName [%s], listen udp port error: %v", p.Name, err)
// start a goroutine for every listener to accept user connection return err
for _, listener := range p.listeners { }
go func(l Listener) { go func() {
for { for {
// block buf := pool.GetBuf(2048)
// if listener is closed, err returned n, remoteAddr, err := p.udpConn.ReadFromUDP(buf)
c, err := l.Accept()
if err != nil { if err != nil {
log.Info("ProxyName [%s], listener is closed", p.Name) log.Info("ProxyName [%s], udp listener is closed", p.Name)
return return
} }
log.Debug("ProxyName [%s], get one new user conn [%s]", p.Name, c.GetRemoteAddr()) localAddr, _ := net.ResolveUDPAddr("udp", p.udpConn.LocalAddr().String())
udpPacket := msg.NewUdpPacket(buf[0:n], remoteAddr, localAddr)
if p.Status != consts.Working { select {
log.Debug("ProxyName [%s] is not working, new user conn close", p.Name) case p.udpSenderChan <- udpPacket:
c.Close() default:
return log.Warn("ProxyName [%s], udp sender channel is full", p.Name)
} }
pool.PutBuf(buf)
}
}()
} else {
// create connection pool if needed
if p.PoolCount > 0 {
go p.connectionPoolManager(p.closeChan)
}
go func(userConn *conn.Conn) { // start a goroutine for every listener to accept user connection
workConn, err := p.getWorkConn() for _, listener := range p.listeners {
go func(l Listener) {
for {
// block
// if listener is closed, err returned
c, err := l.Accept()
if err != nil { if err != nil {
log.Info("ProxyName [%s], listener is closed", p.Name)
return
}
log.Debug("ProxyName [%s], get one new user conn [%s]", p.Name, c.GetRemoteAddr())
if p.Status != consts.Working {
log.Debug("ProxyName [%s] is not working, new user conn close", p.Name)
c.Close()
return return
} }
// message will be transferred to another without modifying go func(userConn *conn.Conn) {
// l means local, r means remote workConn, err := p.getWorkConn()
log.Debug("Join two connections, (l[%s] r[%s]) (l[%s] r[%s])", workConn.GetLocalAddr(), workConn.GetRemoteAddr(), if err != nil {
userConn.GetLocalAddr(), userConn.GetRemoteAddr()) return
}
needRecord := true // message will be transferred to another without modifying
go msg.JoinMore(userConn, workConn, p.BaseConf, needRecord) // l means local, r means remote
}(c) log.Debug("Join two connections, (l[%s] r[%s]) (l[%s] r[%s])", workConn.GetLocalAddr(), workConn.GetRemoteAddr(),
} userConn.GetLocalAddr(), userConn.GetRemoteAddr())
}(listener)
needRecord := true
go msg.JoinMore(userConn, workConn, p.BaseConf, needRecord)
}(c)
}
}(listener)
}
} }
return nil return nil
} }
@ -200,10 +235,18 @@ func (p *ProxyServer) Close() {
} }
close(p.ctlMsgChan) close(p.ctlMsgChan)
close(p.workConnChan) close(p.workConnChan)
close(p.udpSenderChan)
close(p.closeChan) close(p.closeChan)
if p.CtlConn != nil { if p.CtlConn != nil {
p.CtlConn.Close() p.CtlConn.Close()
} }
if p.WorkConnUdp != nil {
p.WorkConnUdp.Close()
}
if p.udpConn != nil {
p.udpConn.Close()
p.udpConn = nil
}
} }
metric.SetStatus(p.Name, p.Status) metric.SetStatus(p.Name, p.Status)
// if the proxy created by PrivilegeMode, delete it when closed // if the proxy created by PrivilegeMode, delete it when closed
@ -228,9 +271,60 @@ func (p *ProxyServer) RegisterNewWorkConn(c *conn.Conn) {
case p.workConnChan <- c: case p.workConnChan <- c:
default: default:
log.Debug("ProxyName [%s], workConnChan is full, so close this work connection", p.Name) log.Debug("ProxyName [%s], workConnChan is full, so close this work connection", p.Name)
c.Close()
} }
} }
// create a tcp connection for forwarding udp packages
func (p *ProxyServer) RegisterNewWorkConnUdp(c *conn.Conn) {
if p.WorkConnUdp != nil && !p.WorkConnUdp.IsClosed() {
p.WorkConnUdp.Close()
}
p.WorkConnUdp = c
// read
go func() {
var (
buf string
err error
)
for {
buf, err = c.ReadLine()
if err != nil {
log.Warn("ProxyName [%s], work connection for udp closed", p.Name)
return
}
udpPacket := &msg.UdpPacket{}
err = udpPacket.UnPack([]byte(buf))
if err != nil {
log.Warn("ProxyName [%s], unpack udp packet error: %v", p.Name, err)
continue
}
// send to user
_, err = p.udpConn.WriteToUDP(udpPacket.Content, udpPacket.Dst)
if err != nil {
continue
}
}
}()
// write
go func() {
for {
udpPacket, ok := <-p.udpSenderChan
if !ok {
return
}
err := c.WriteString(string(udpPacket.Pack()) + "\n")
if err != nil {
log.Debug("ProxyName [%s], write to work connection for udp error: %v", p.Name, err)
return
}
}
}()
}
// When frps get one user connection, we get one work connection from the pool and return it. // When frps get one user connection, we get one work connection from the pool and return it.
// If no workConn available in the pool, send message to frpc to get one or more // If no workConn available in the pool, send message to frpc to get one or more
// and wait until it is available. // and wait until it is available.

View File

@ -202,12 +202,12 @@ func (c *Conn) ReadLine() (buff string, err error) {
return buff, err return buff, err
} }
func (c *Conn) WriteBytes(content []byte) (n int, err error) { func (c *Conn) Write(content []byte) (n int, err error) {
n, err = c.TcpConn.Write(content) n, err = c.TcpConn.Write(content)
return return
} }
func (c *Conn) Write(content string) (err error) { func (c *Conn) WriteString(content string) (err error) {
_, err = c.TcpConn.Write([]byte(content)) _, err = c.TcpConn.Write([]byte(content))
return err return err
} }
@ -220,13 +220,14 @@ func (c *Conn) SetReadDeadline(t time.Time) error {
return c.TcpConn.SetReadDeadline(t) return c.TcpConn.SetReadDeadline(t)
} }
func (c *Conn) Close() { func (c *Conn) Close() error {
c.mutex.Lock() c.mutex.Lock()
defer c.mutex.Unlock()
if c.TcpConn != nil && c.closeFlag == false { if c.TcpConn != nil && c.closeFlag == false {
c.closeFlag = true c.closeFlag = true
c.TcpConn.Close() c.TcpConn.Close()
} }
c.mutex.Unlock() return nil
} }
func (c *Conn) IsClosed() (closeFlag bool) { func (c *Conn) IsClosed() (closeFlag bool) {
@ -245,7 +246,6 @@ func (c *Conn) CheckClosed() bool {
} }
c.mutex.RUnlock() c.mutex.RUnlock()
// err := c.TcpConn.SetReadDeadline(time.Now().Add(100 * time.Microsecond))
err := c.TcpConn.SetReadDeadline(time.Now().Add(time.Millisecond)) err := c.TcpConn.SetReadDeadline(time.Now().Add(time.Millisecond))
if err != nil { if err != nil {
c.Close() c.Close()

View File

@ -0,0 +1,29 @@
// Copyright 2016 fatedier, fatedier@gmail.com
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package conn
import (
"fmt"
"net"
)
func ListenUDP(bindAddr string, bindPort int64) (conn *net.UDPConn, err error) {
udpAddr, err := net.ResolveUDPAddr("udp", fmt.Sprintf("%s:%d", bindAddr, bindPort))
if err != nil {
return conn, err
}
conn, err = net.ListenUDP("udp", udpAddr)
return
}

View File

@ -40,6 +40,6 @@ func echoWorker(c *conn.Conn) {
return return
} }
c.Write(buff) c.WriteString(buff)
} }
} }

View File

@ -26,7 +26,7 @@ func TestEchoServer(t *testing.T) {
timer := time.Now().Add(time.Duration(5) * time.Second) timer := time.Now().Add(time.Duration(5) * time.Second)
c.SetDeadline(timer) c.SetDeadline(timer)
c.Write(ECHO_TEST_STR) c.WriteString(ECHO_TEST_STR)
buff, err := c.ReadLine() buff, err := c.ReadLine()
if err != nil { if err != nil {

15
vendor/github.com/davecgh/go-spew/LICENSE generated vendored Normal file
View File

@ -0,0 +1,15 @@
ISC License
Copyright (c) 2012-2016 Dave Collins <dave@davec.name>
Permission to use, copy, modify, and distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.

152
vendor/github.com/davecgh/go-spew/spew/bypass.go generated vendored Normal file
View File

@ -0,0 +1,152 @@
// Copyright (c) 2015-2016 Dave Collins <dave@davec.name>
//
// Permission to use, copy, modify, and distribute this software for any
// purpose with or without fee is hereby granted, provided that the above
// copyright notice and this permission notice appear in all copies.
//
// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
// NOTE: Due to the following build constraints, this file will only be compiled
// when the code is not running on Google App Engine, compiled by GopherJS, and
// "-tags safe" is not added to the go build command line. The "disableunsafe"
// tag is deprecated and thus should not be used.
// +build !js,!appengine,!safe,!disableunsafe
package spew
import (
"reflect"
"unsafe"
)
const (
// UnsafeDisabled is a build-time constant which specifies whether or
// not access to the unsafe package is available.
UnsafeDisabled = false
// ptrSize is the size of a pointer on the current arch.
ptrSize = unsafe.Sizeof((*byte)(nil))
)
var (
// offsetPtr, offsetScalar, and offsetFlag are the offsets for the
// internal reflect.Value fields. These values are valid before golang
// commit ecccf07e7f9d which changed the format. The are also valid
// after commit 82f48826c6c7 which changed the format again to mirror
// the original format. Code in the init function updates these offsets
// as necessary.
offsetPtr = uintptr(ptrSize)
offsetScalar = uintptr(0)
offsetFlag = uintptr(ptrSize * 2)
// flagKindWidth and flagKindShift indicate various bits that the
// reflect package uses internally to track kind information.
//
// flagRO indicates whether or not the value field of a reflect.Value is
// read-only.
//
// flagIndir indicates whether the value field of a reflect.Value is
// the actual data or a pointer to the data.
//
// These values are valid before golang commit 90a7c3c86944 which
// changed their positions. Code in the init function updates these
// flags as necessary.
flagKindWidth = uintptr(5)
flagKindShift = uintptr(flagKindWidth - 1)
flagRO = uintptr(1 << 0)
flagIndir = uintptr(1 << 1)
)
func init() {
// Older versions of reflect.Value stored small integers directly in the
// ptr field (which is named val in the older versions). Versions
// between commits ecccf07e7f9d and 82f48826c6c7 added a new field named
// scalar for this purpose which unfortunately came before the flag
// field, so the offset of the flag field is different for those
// versions.
//
// This code constructs a new reflect.Value from a known small integer
// and checks if the size of the reflect.Value struct indicates it has
// the scalar field. When it does, the offsets are updated accordingly.
vv := reflect.ValueOf(0xf00)
if unsafe.Sizeof(vv) == (ptrSize * 4) {
offsetScalar = ptrSize * 2
offsetFlag = ptrSize * 3
}
// Commit 90a7c3c86944 changed the flag positions such that the low
// order bits are the kind. This code extracts the kind from the flags
// field and ensures it's the correct type. When it's not, the flag
// order has been changed to the newer format, so the flags are updated
// accordingly.
upf := unsafe.Pointer(uintptr(unsafe.Pointer(&vv)) + offsetFlag)
upfv := *(*uintptr)(upf)
flagKindMask := uintptr((1<<flagKindWidth - 1) << flagKindShift)
if (upfv&flagKindMask)>>flagKindShift != uintptr(reflect.Int) {
flagKindShift = 0
flagRO = 1 << 5
flagIndir = 1 << 6
// Commit adf9b30e5594 modified the flags to separate the
// flagRO flag into two bits which specifies whether or not the
// field is embedded. This causes flagIndir to move over a bit
// and means that flagRO is the combination of either of the
// original flagRO bit and the new bit.
//
// This code detects the change by extracting what used to be
// the indirect bit to ensure it's set. When it's not, the flag
// order has been changed to the newer format, so the flags are
// updated accordingly.
if upfv&flagIndir == 0 {
flagRO = 3 << 5
flagIndir = 1 << 7
}
}
}
// unsafeReflectValue converts the passed reflect.Value into a one that bypasses
// the typical safety restrictions preventing access to unaddressable and
// unexported data. It works by digging the raw pointer to the underlying
// value out of the protected value and generating a new unprotected (unsafe)
// reflect.Value to it.
//
// This allows us to check for implementations of the Stringer and error
// interfaces to be used for pretty printing ordinarily unaddressable and
// inaccessible values such as unexported struct fields.
func unsafeReflectValue(v reflect.Value) (rv reflect.Value) {
indirects := 1
vt := v.Type()
upv := unsafe.Pointer(uintptr(unsafe.Pointer(&v)) + offsetPtr)
rvf := *(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&v)) + offsetFlag))
if rvf&flagIndir != 0 {
vt = reflect.PtrTo(v.Type())
indirects++
} else if offsetScalar != 0 {
// The value is in the scalar field when it's not one of the
// reference types.
switch vt.Kind() {
case reflect.Uintptr:
case reflect.Chan:
case reflect.Func:
case reflect.Map:
case reflect.Ptr:
case reflect.UnsafePointer:
default:
upv = unsafe.Pointer(uintptr(unsafe.Pointer(&v)) +
offsetScalar)
}
}
pv := reflect.NewAt(vt, upv)
rv = pv
for i := 0; i < indirects; i++ {
rv = rv.Elem()
}
return rv
}

38
vendor/github.com/davecgh/go-spew/spew/bypasssafe.go generated vendored Normal file
View File

@ -0,0 +1,38 @@
// Copyright (c) 2015-2016 Dave Collins <dave@davec.name>
//
// Permission to use, copy, modify, and distribute this software for any
// purpose with or without fee is hereby granted, provided that the above
// copyright notice and this permission notice appear in all copies.
//
// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
// NOTE: Due to the following build constraints, this file will only be compiled
// when the code is running on Google App Engine, compiled by GopherJS, or
// "-tags safe" is added to the go build command line. The "disableunsafe"
// tag is deprecated and thus should not be used.
// +build js appengine safe disableunsafe
package spew
import "reflect"
const (
// UnsafeDisabled is a build-time constant which specifies whether or
// not access to the unsafe package is available.
UnsafeDisabled = true
)
// unsafeReflectValue typically converts the passed reflect.Value into a one
// that bypasses the typical safety restrictions preventing access to
// unaddressable and unexported data. However, doing this relies on access to
// the unsafe package. This is a stub version which simply returns the passed
// reflect.Value when the unsafe package is not available.
func unsafeReflectValue(v reflect.Value) reflect.Value {
return v
}

341
vendor/github.com/davecgh/go-spew/spew/common.go generated vendored Normal file
View File

@ -0,0 +1,341 @@
/*
* Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
package spew
import (
"bytes"
"fmt"
"io"
"reflect"
"sort"
"strconv"
)
// Some constants in the form of bytes to avoid string overhead. This mirrors
// the technique used in the fmt package.
var (
panicBytes = []byte("(PANIC=")
plusBytes = []byte("+")
iBytes = []byte("i")
trueBytes = []byte("true")
falseBytes = []byte("false")
interfaceBytes = []byte("(interface {})")
commaNewlineBytes = []byte(",\n")
newlineBytes = []byte("\n")
openBraceBytes = []byte("{")
openBraceNewlineBytes = []byte("{\n")
closeBraceBytes = []byte("}")
asteriskBytes = []byte("*")
colonBytes = []byte(":")
colonSpaceBytes = []byte(": ")
openParenBytes = []byte("(")
closeParenBytes = []byte(")")
spaceBytes = []byte(" ")
pointerChainBytes = []byte("->")
nilAngleBytes = []byte("<nil>")
maxNewlineBytes = []byte("<max depth reached>\n")
maxShortBytes = []byte("<max>")
circularBytes = []byte("<already shown>")
circularShortBytes = []byte("<shown>")
invalidAngleBytes = []byte("<invalid>")
openBracketBytes = []byte("[")
closeBracketBytes = []byte("]")
percentBytes = []byte("%")
precisionBytes = []byte(".")
openAngleBytes = []byte("<")
closeAngleBytes = []byte(">")
openMapBytes = []byte("map[")
closeMapBytes = []byte("]")
lenEqualsBytes = []byte("len=")
capEqualsBytes = []byte("cap=")
)
// hexDigits is used to map a decimal value to a hex digit.
var hexDigits = "0123456789abcdef"
// catchPanic handles any panics that might occur during the handleMethods
// calls.
func catchPanic(w io.Writer, v reflect.Value) {
if err := recover(); err != nil {
w.Write(panicBytes)
fmt.Fprintf(w, "%v", err)
w.Write(closeParenBytes)
}
}
// handleMethods attempts to call the Error and String methods on the underlying
// type the passed reflect.Value represents and outputes the result to Writer w.
//
// It handles panics in any called methods by catching and displaying the error
// as the formatted value.
func handleMethods(cs *ConfigState, w io.Writer, v reflect.Value) (handled bool) {
// We need an interface to check if the type implements the error or
// Stringer interface. However, the reflect package won't give us an
// interface on certain things like unexported struct fields in order
// to enforce visibility rules. We use unsafe, when it's available,
// to bypass these restrictions since this package does not mutate the
// values.
if !v.CanInterface() {
if UnsafeDisabled {
return false
}
v = unsafeReflectValue(v)
}
// Choose whether or not to do error and Stringer interface lookups against
// the base type or a pointer to the base type depending on settings.
// Technically calling one of these methods with a pointer receiver can
// mutate the value, however, types which choose to satisify an error or
// Stringer interface with a pointer receiver should not be mutating their
// state inside these interface methods.
if !cs.DisablePointerMethods && !UnsafeDisabled && !v.CanAddr() {
v = unsafeReflectValue(v)
}
if v.CanAddr() {
v = v.Addr()
}
// Is it an error or Stringer?
switch iface := v.Interface().(type) {
case error:
defer catchPanic(w, v)
if cs.ContinueOnMethod {
w.Write(openParenBytes)
w.Write([]byte(iface.Error()))
w.Write(closeParenBytes)
w.Write(spaceBytes)
return false
}
w.Write([]byte(iface.Error()))
return true
case fmt.Stringer:
defer catchPanic(w, v)
if cs.ContinueOnMethod {
w.Write(openParenBytes)
w.Write([]byte(iface.String()))
w.Write(closeParenBytes)
w.Write(spaceBytes)
return false
}
w.Write([]byte(iface.String()))
return true
}
return false
}
// printBool outputs a boolean value as true or false to Writer w.
func printBool(w io.Writer, val bool) {
if val {
w.Write(trueBytes)
} else {
w.Write(falseBytes)
}
}
// printInt outputs a signed integer value to Writer w.
func printInt(w io.Writer, val int64, base int) {
w.Write([]byte(strconv.FormatInt(val, base)))
}
// printUint outputs an unsigned integer value to Writer w.
func printUint(w io.Writer, val uint64, base int) {
w.Write([]byte(strconv.FormatUint(val, base)))
}
// printFloat outputs a floating point value using the specified precision,
// which is expected to be 32 or 64bit, to Writer w.
func printFloat(w io.Writer, val float64, precision int) {
w.Write([]byte(strconv.FormatFloat(val, 'g', -1, precision)))
}
// printComplex outputs a complex value using the specified float precision
// for the real and imaginary parts to Writer w.
func printComplex(w io.Writer, c complex128, floatPrecision int) {
r := real(c)
w.Write(openParenBytes)
w.Write([]byte(strconv.FormatFloat(r, 'g', -1, floatPrecision)))
i := imag(c)
if i >= 0 {
w.Write(plusBytes)
}
w.Write([]byte(strconv.FormatFloat(i, 'g', -1, floatPrecision)))
w.Write(iBytes)
w.Write(closeParenBytes)
}
// printHexPtr outputs a uintptr formatted as hexidecimal with a leading '0x'
// prefix to Writer w.
func printHexPtr(w io.Writer, p uintptr) {
// Null pointer.
num := uint64(p)
if num == 0 {
w.Write(nilAngleBytes)
return
}
// Max uint64 is 16 bytes in hex + 2 bytes for '0x' prefix
buf := make([]byte, 18)
// It's simpler to construct the hex string right to left.
base := uint64(16)
i := len(buf) - 1
for num >= base {
buf[i] = hexDigits[num%base]
num /= base
i--
}
buf[i] = hexDigits[num]
// Add '0x' prefix.
i--
buf[i] = 'x'
i--
buf[i] = '0'
// Strip unused leading bytes.
buf = buf[i:]
w.Write(buf)
}
// valuesSorter implements sort.Interface to allow a slice of reflect.Value
// elements to be sorted.
type valuesSorter struct {
values []reflect.Value
strings []string // either nil or same len and values
cs *ConfigState
}
// newValuesSorter initializes a valuesSorter instance, which holds a set of
// surrogate keys on which the data should be sorted. It uses flags in
// ConfigState to decide if and how to populate those surrogate keys.
func newValuesSorter(values []reflect.Value, cs *ConfigState) sort.Interface {
vs := &valuesSorter{values: values, cs: cs}
if canSortSimply(vs.values[0].Kind()) {
return vs
}
if !cs.DisableMethods {
vs.strings = make([]string, len(values))
for i := range vs.values {
b := bytes.Buffer{}
if !handleMethods(cs, &b, vs.values[i]) {
vs.strings = nil
break
}
vs.strings[i] = b.String()
}
}
if vs.strings == nil && cs.SpewKeys {
vs.strings = make([]string, len(values))
for i := range vs.values {
vs.strings[i] = Sprintf("%#v", vs.values[i].Interface())
}
}
return vs
}
// canSortSimply tests whether a reflect.Kind is a primitive that can be sorted
// directly, or whether it should be considered for sorting by surrogate keys
// (if the ConfigState allows it).
func canSortSimply(kind reflect.Kind) bool {
// This switch parallels valueSortLess, except for the default case.
switch kind {
case reflect.Bool:
return true
case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
return true
case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:
return true
case reflect.Float32, reflect.Float64:
return true
case reflect.String:
return true
case reflect.Uintptr:
return true
case reflect.Array:
return true
}
return false
}
// Len returns the number of values in the slice. It is part of the
// sort.Interface implementation.
func (s *valuesSorter) Len() int {
return len(s.values)
}
// Swap swaps the values at the passed indices. It is part of the
// sort.Interface implementation.
func (s *valuesSorter) Swap(i, j int) {
s.values[i], s.values[j] = s.values[j], s.values[i]
if s.strings != nil {
s.strings[i], s.strings[j] = s.strings[j], s.strings[i]
}
}
// valueSortLess returns whether the first value should sort before the second
// value. It is used by valueSorter.Less as part of the sort.Interface
// implementation.
func valueSortLess(a, b reflect.Value) bool {
switch a.Kind() {
case reflect.Bool:
return !a.Bool() && b.Bool()
case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
return a.Int() < b.Int()
case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:
return a.Uint() < b.Uint()
case reflect.Float32, reflect.Float64:
return a.Float() < b.Float()
case reflect.String:
return a.String() < b.String()
case reflect.Uintptr:
return a.Uint() < b.Uint()
case reflect.Array:
// Compare the contents of both arrays.
l := a.Len()
for i := 0; i < l; i++ {
av := a.Index(i)
bv := b.Index(i)
if av.Interface() == bv.Interface() {
continue
}
return valueSortLess(av, bv)
}
}
return a.String() < b.String()
}
// Less returns whether the value at index i should sort before the
// value at index j. It is part of the sort.Interface implementation.
func (s *valuesSorter) Less(i, j int) bool {
if s.strings == nil {
return valueSortLess(s.values[i], s.values[j])
}
return s.strings[i] < s.strings[j]
}
// sortValues is a sort function that handles both native types and any type that
// can be converted to error or Stringer. Other inputs are sorted according to
// their Value.String() value to ensure display stability.
func sortValues(values []reflect.Value, cs *ConfigState) {
if len(values) == 0 {
return
}
sort.Sort(newValuesSorter(values, cs))
}

306
vendor/github.com/davecgh/go-spew/spew/config.go generated vendored Normal file
View File

@ -0,0 +1,306 @@
/*
* Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
package spew
import (
"bytes"
"fmt"
"io"
"os"
)
// ConfigState houses the configuration options used by spew to format and
// display values. There is a global instance, Config, that is used to control
// all top-level Formatter and Dump functionality. Each ConfigState instance
// provides methods equivalent to the top-level functions.
//
// The zero value for ConfigState provides no indentation. You would typically
// want to set it to a space or a tab.
//
// Alternatively, you can use NewDefaultConfig to get a ConfigState instance
// with default settings. See the documentation of NewDefaultConfig for default
// values.
type ConfigState struct {
// Indent specifies the string to use for each indentation level. The
// global config instance that all top-level functions use set this to a
// single space by default. If you would like more indentation, you might
// set this to a tab with "\t" or perhaps two spaces with " ".
Indent string
// MaxDepth controls the maximum number of levels to descend into nested
// data structures. The default, 0, means there is no limit.
//
// NOTE: Circular data structures are properly detected, so it is not
// necessary to set this value unless you specifically want to limit deeply
// nested data structures.
MaxDepth int
// DisableMethods specifies whether or not error and Stringer interfaces are
// invoked for types that implement them.
DisableMethods bool
// DisablePointerMethods specifies whether or not to check for and invoke
// error and Stringer interfaces on types which only accept a pointer
// receiver when the current type is not a pointer.
//
// NOTE: This might be an unsafe action since calling one of these methods
// with a pointer receiver could technically mutate the value, however,
// in practice, types which choose to satisify an error or Stringer
// interface with a pointer receiver should not be mutating their state
// inside these interface methods. As a result, this option relies on
// access to the unsafe package, so it will not have any effect when
// running in environments without access to the unsafe package such as
// Google App Engine or with the "safe" build tag specified.
DisablePointerMethods bool
// DisablePointerAddresses specifies whether to disable the printing of
// pointer addresses. This is useful when diffing data structures in tests.
DisablePointerAddresses bool
// DisableCapacities specifies whether to disable the printing of capacities
// for arrays, slices, maps and channels. This is useful when diffing
// data structures in tests.
DisableCapacities bool
// ContinueOnMethod specifies whether or not recursion should continue once
// a custom error or Stringer interface is invoked. The default, false,
// means it will print the results of invoking the custom error or Stringer
// interface and return immediately instead of continuing to recurse into
// the internals of the data type.
//
// NOTE: This flag does not have any effect if method invocation is disabled
// via the DisableMethods or DisablePointerMethods options.
ContinueOnMethod bool
// SortKeys specifies map keys should be sorted before being printed. Use
// this to have a more deterministic, diffable output. Note that only
// native types (bool, int, uint, floats, uintptr and string) and types
// that support the error or Stringer interfaces (if methods are
// enabled) are supported, with other types sorted according to the
// reflect.Value.String() output which guarantees display stability.
SortKeys bool
// SpewKeys specifies that, as a last resort attempt, map keys should
// be spewed to strings and sorted by those strings. This is only
// considered if SortKeys is true.
SpewKeys bool
}
// Config is the active configuration of the top-level functions.
// The configuration can be changed by modifying the contents of spew.Config.
var Config = ConfigState{Indent: " "}
// Errorf is a wrapper for fmt.Errorf that treats each argument as if it were
// passed with a Formatter interface returned by c.NewFormatter. It returns
// the formatted string as a value that satisfies error. See NewFormatter
// for formatting details.
//
// This function is shorthand for the following syntax:
//
// fmt.Errorf(format, c.NewFormatter(a), c.NewFormatter(b))
func (c *ConfigState) Errorf(format string, a ...interface{}) (err error) {
return fmt.Errorf(format, c.convertArgs(a)...)
}
// Fprint is a wrapper for fmt.Fprint that treats each argument as if it were
// passed with a Formatter interface returned by c.NewFormatter. It returns
// the number of bytes written and any write error encountered. See
// NewFormatter for formatting details.
//
// This function is shorthand for the following syntax:
//
// fmt.Fprint(w, c.NewFormatter(a), c.NewFormatter(b))
func (c *ConfigState) Fprint(w io.Writer, a ...interface{}) (n int, err error) {
return fmt.Fprint(w, c.convertArgs(a)...)
}
// Fprintf is a wrapper for fmt.Fprintf that treats each argument as if it were
// passed with a Formatter interface returned by c.NewFormatter. It returns
// the number of bytes written and any write error encountered. See
// NewFormatter for formatting details.
//
// This function is shorthand for the following syntax:
//
// fmt.Fprintf(w, format, c.NewFormatter(a), c.NewFormatter(b))
func (c *ConfigState) Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) {
return fmt.Fprintf(w, format, c.convertArgs(a)...)
}
// Fprintln is a wrapper for fmt.Fprintln that treats each argument as if it
// passed with a Formatter interface returned by c.NewFormatter. See
// NewFormatter for formatting details.
//
// This function is shorthand for the following syntax:
//
// fmt.Fprintln(w, c.NewFormatter(a), c.NewFormatter(b))
func (c *ConfigState) Fprintln(w io.Writer, a ...interface{}) (n int, err error) {
return fmt.Fprintln(w, c.convertArgs(a)...)
}
// Print is a wrapper for fmt.Print that treats each argument as if it were
// passed with a Formatter interface returned by c.NewFormatter. It returns
// the number of bytes written and any write error encountered. See
// NewFormatter for formatting details.
//
// This function is shorthand for the following syntax:
//
// fmt.Print(c.NewFormatter(a), c.NewFormatter(b))
func (c *ConfigState) Print(a ...interface{}) (n int, err error) {
return fmt.Print(c.convertArgs(a)...)
}
// Printf is a wrapper for fmt.Printf that treats each argument as if it were
// passed with a Formatter interface returned by c.NewFormatter. It returns
// the number of bytes written and any write error encountered. See
// NewFormatter for formatting details.
//
// This function is shorthand for the following syntax:
//
// fmt.Printf(format, c.NewFormatter(a), c.NewFormatter(b))
func (c *ConfigState) Printf(format string, a ...interface{}) (n int, err error) {
return fmt.Printf(format, c.convertArgs(a)...)
}
// Println is a wrapper for fmt.Println that treats each argument as if it were
// passed with a Formatter interface returned by c.NewFormatter. It returns
// the number of bytes written and any write error encountered. See
// NewFormatter for formatting details.
//
// This function is shorthand for the following syntax:
//
// fmt.Println(c.NewFormatter(a), c.NewFormatter(b))
func (c *ConfigState) Println(a ...interface{}) (n int, err error) {
return fmt.Println(c.convertArgs(a)...)
}
// Sprint is a wrapper for fmt.Sprint that treats each argument as if it were
// passed with a Formatter interface returned by c.NewFormatter. It returns
// the resulting string. See NewFormatter for formatting details.
//
// This function is shorthand for the following syntax:
//
// fmt.Sprint(c.NewFormatter(a), c.NewFormatter(b))
func (c *ConfigState) Sprint(a ...interface{}) string {
return fmt.Sprint(c.convertArgs(a)...)
}
// Sprintf is a wrapper for fmt.Sprintf that treats each argument as if it were
// passed with a Formatter interface returned by c.NewFormatter. It returns
// the resulting string. See NewFormatter for formatting details.
//
// This function is shorthand for the following syntax:
//
// fmt.Sprintf(format, c.NewFormatter(a), c.NewFormatter(b))
func (c *ConfigState) Sprintf(format string, a ...interface{}) string {
return fmt.Sprintf(format, c.convertArgs(a)...)
}
// Sprintln is a wrapper for fmt.Sprintln that treats each argument as if it
// were passed with a Formatter interface returned by c.NewFormatter. It
// returns the resulting string. See NewFormatter for formatting details.
//
// This function is shorthand for the following syntax:
//
// fmt.Sprintln(c.NewFormatter(a), c.NewFormatter(b))
func (c *ConfigState) Sprintln(a ...interface{}) string {
return fmt.Sprintln(c.convertArgs(a)...)
}
/*
NewFormatter returns a custom formatter that satisfies the fmt.Formatter
interface. As a result, it integrates cleanly with standard fmt package
printing functions. The formatter is useful for inline printing of smaller data
types similar to the standard %v format specifier.
The custom formatter only responds to the %v (most compact), %+v (adds pointer
addresses), %#v (adds types), and %#+v (adds types and pointer addresses) verb
combinations. Any other verbs such as %x and %q will be sent to the the
standard fmt package for formatting. In addition, the custom formatter ignores
the width and precision arguments (however they will still work on the format
specifiers not handled by the custom formatter).
Typically this function shouldn't be called directly. It is much easier to make
use of the custom formatter by calling one of the convenience functions such as
c.Printf, c.Println, or c.Printf.
*/
func (c *ConfigState) NewFormatter(v interface{}) fmt.Formatter {
return newFormatter(c, v)
}
// Fdump formats and displays the passed arguments to io.Writer w. It formats
// exactly the same as Dump.
func (c *ConfigState) Fdump(w io.Writer, a ...interface{}) {
fdump(c, w, a...)
}
/*
Dump displays the passed parameters to standard out with newlines, customizable
indentation, and additional debug information such as complete types and all
pointer addresses used to indirect to the final value. It provides the
following features over the built-in printing facilities provided by the fmt
package:
* Pointers are dereferenced and followed
* Circular data structures are detected and handled properly
* Custom Stringer/error interfaces are optionally invoked, including
on unexported types
* Custom types which only implement the Stringer/error interfaces via
a pointer receiver are optionally invoked when passing non-pointer
variables
* Byte arrays and slices are dumped like the hexdump -C command which
includes offsets, byte values in hex, and ASCII output
The configuration options are controlled by modifying the public members
of c. See ConfigState for options documentation.
See Fdump if you would prefer dumping to an arbitrary io.Writer or Sdump to
get the formatted result as a string.
*/
func (c *ConfigState) Dump(a ...interface{}) {
fdump(c, os.Stdout, a...)
}
// Sdump returns a string with the passed arguments formatted exactly the same
// as Dump.
func (c *ConfigState) Sdump(a ...interface{}) string {
var buf bytes.Buffer
fdump(c, &buf, a...)
return buf.String()
}
// convertArgs accepts a slice of arguments and returns a slice of the same
// length with each argument converted to a spew Formatter interface using
// the ConfigState associated with s.
func (c *ConfigState) convertArgs(args []interface{}) (formatters []interface{}) {
formatters = make([]interface{}, len(args))
for index, arg := range args {
formatters[index] = newFormatter(c, arg)
}
return formatters
}
// NewDefaultConfig returns a ConfigState with the following default settings.
//
// Indent: " "
// MaxDepth: 0
// DisableMethods: false
// DisablePointerMethods: false
// ContinueOnMethod: false
// SortKeys: false
func NewDefaultConfig() *ConfigState {
return &ConfigState{Indent: " "}
}

211
vendor/github.com/davecgh/go-spew/spew/doc.go generated vendored Normal file
View File

@ -0,0 +1,211 @@
/*
* Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
/*
Package spew implements a deep pretty printer for Go data structures to aid in
debugging.
A quick overview of the additional features spew provides over the built-in
printing facilities for Go data types are as follows:
* Pointers are dereferenced and followed
* Circular data structures are detected and handled properly
* Custom Stringer/error interfaces are optionally invoked, including
on unexported types
* Custom types which only implement the Stringer/error interfaces via
a pointer receiver are optionally invoked when passing non-pointer
variables
* Byte arrays and slices are dumped like the hexdump -C command which
includes offsets, byte values in hex, and ASCII output (only when using
Dump style)
There are two different approaches spew allows for dumping Go data structures:
* Dump style which prints with newlines, customizable indentation,
and additional debug information such as types and all pointer addresses
used to indirect to the final value
* A custom Formatter interface that integrates cleanly with the standard fmt
package and replaces %v, %+v, %#v, and %#+v to provide inline printing
similar to the default %v while providing the additional functionality
outlined above and passing unsupported format verbs such as %x and %q
along to fmt
Quick Start
This section demonstrates how to quickly get started with spew. See the
sections below for further details on formatting and configuration options.
To dump a variable with full newlines, indentation, type, and pointer
information use Dump, Fdump, or Sdump:
spew.Dump(myVar1, myVar2, ...)
spew.Fdump(someWriter, myVar1, myVar2, ...)
str := spew.Sdump(myVar1, myVar2, ...)
Alternatively, if you would prefer to use format strings with a compacted inline
printing style, use the convenience wrappers Printf, Fprintf, etc with
%v (most compact), %+v (adds pointer addresses), %#v (adds types), or
%#+v (adds types and pointer addresses):
spew.Printf("myVar1: %v -- myVar2: %+v", myVar1, myVar2)
spew.Printf("myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
spew.Fprintf(someWriter, "myVar1: %v -- myVar2: %+v", myVar1, myVar2)
spew.Fprintf(someWriter, "myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
Configuration Options
Configuration of spew is handled by fields in the ConfigState type. For
convenience, all of the top-level functions use a global state available
via the spew.Config global.
It is also possible to create a ConfigState instance that provides methods
equivalent to the top-level functions. This allows concurrent configuration
options. See the ConfigState documentation for more details.
The following configuration options are available:
* Indent
String to use for each indentation level for Dump functions.
It is a single space by default. A popular alternative is "\t".
* MaxDepth
Maximum number of levels to descend into nested data structures.
There is no limit by default.
* DisableMethods
Disables invocation of error and Stringer interface methods.
Method invocation is enabled by default.
* DisablePointerMethods
Disables invocation of error and Stringer interface methods on types
which only accept pointer receivers from non-pointer variables.
Pointer method invocation is enabled by default.
* DisablePointerAddresses
DisablePointerAddresses specifies whether to disable the printing of
pointer addresses. This is useful when diffing data structures in tests.
* DisableCapacities
DisableCapacities specifies whether to disable the printing of
capacities for arrays, slices, maps and channels. This is useful when
diffing data structures in tests.
* ContinueOnMethod
Enables recursion into types after invoking error and Stringer interface
methods. Recursion after method invocation is disabled by default.
* SortKeys
Specifies map keys should be sorted before being printed. Use
this to have a more deterministic, diffable output. Note that
only native types (bool, int, uint, floats, uintptr and string)
and types which implement error or Stringer interfaces are
supported with other types sorted according to the
reflect.Value.String() output which guarantees display
stability. Natural map order is used by default.
* SpewKeys
Specifies that, as a last resort attempt, map keys should be
spewed to strings and sorted by those strings. This is only
considered if SortKeys is true.
Dump Usage
Simply call spew.Dump with a list of variables you want to dump:
spew.Dump(myVar1, myVar2, ...)
You may also call spew.Fdump if you would prefer to output to an arbitrary
io.Writer. For example, to dump to standard error:
spew.Fdump(os.Stderr, myVar1, myVar2, ...)
A third option is to call spew.Sdump to get the formatted output as a string:
str := spew.Sdump(myVar1, myVar2, ...)
Sample Dump Output
See the Dump example for details on the setup of the types and variables being
shown here.
(main.Foo) {
unexportedField: (*main.Bar)(0xf84002e210)({
flag: (main.Flag) flagTwo,
data: (uintptr) <nil>
}),
ExportedField: (map[interface {}]interface {}) (len=1) {
(string) (len=3) "one": (bool) true
}
}
Byte (and uint8) arrays and slices are displayed uniquely like the hexdump -C
command as shown.
([]uint8) (len=32 cap=32) {
00000000 11 12 13 14 15 16 17 18 19 1a 1b 1c 1d 1e 1f 20 |............... |
00000010 21 22 23 24 25 26 27 28 29 2a 2b 2c 2d 2e 2f 30 |!"#$%&'()*+,-./0|
00000020 31 32 |12|
}
Custom Formatter
Spew provides a custom formatter that implements the fmt.Formatter interface
so that it integrates cleanly with standard fmt package printing functions. The
formatter is useful for inline printing of smaller data types similar to the
standard %v format specifier.
The custom formatter only responds to the %v (most compact), %+v (adds pointer
addresses), %#v (adds types), or %#+v (adds types and pointer addresses) verb
combinations. Any other verbs such as %x and %q will be sent to the the
standard fmt package for formatting. In addition, the custom formatter ignores
the width and precision arguments (however they will still work on the format
specifiers not handled by the custom formatter).
Custom Formatter Usage
The simplest way to make use of the spew custom formatter is to call one of the
convenience functions such as spew.Printf, spew.Println, or spew.Printf. The
functions have syntax you are most likely already familiar with:
spew.Printf("myVar1: %v -- myVar2: %+v", myVar1, myVar2)
spew.Printf("myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
spew.Println(myVar, myVar2)
spew.Fprintf(os.Stderr, "myVar1: %v -- myVar2: %+v", myVar1, myVar2)
spew.Fprintf(os.Stderr, "myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
See the Index for the full list convenience functions.
Sample Formatter Output
Double pointer to a uint8:
%v: <**>5
%+v: <**>(0xf8400420d0->0xf8400420c8)5
%#v: (**uint8)5
%#+v: (**uint8)(0xf8400420d0->0xf8400420c8)5
Pointer to circular struct with a uint8 field and a pointer to itself:
%v: <*>{1 <*><shown>}
%+v: <*>(0xf84003e260){ui8:1 c:<*>(0xf84003e260)<shown>}
%#v: (*main.circular){ui8:(uint8)1 c:(*main.circular)<shown>}
%#+v: (*main.circular)(0xf84003e260){ui8:(uint8)1 c:(*main.circular)(0xf84003e260)<shown>}
See the Printf example for details on the setup of variables being shown
here.
Errors
Since it is possible for custom Stringer/error interfaces to panic, spew
detects them and handles them internally by printing the panic information
inline with the output. Since spew is intended to provide deep pretty printing
capabilities on structures, it intentionally does not return any errors.
*/
package spew

509
vendor/github.com/davecgh/go-spew/spew/dump.go generated vendored Normal file
View File

@ -0,0 +1,509 @@
/*
* Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
package spew
import (
"bytes"
"encoding/hex"
"fmt"
"io"
"os"
"reflect"
"regexp"
"strconv"
"strings"
)
var (
// uint8Type is a reflect.Type representing a uint8. It is used to
// convert cgo types to uint8 slices for hexdumping.
uint8Type = reflect.TypeOf(uint8(0))
// cCharRE is a regular expression that matches a cgo char.
// It is used to detect character arrays to hexdump them.
cCharRE = regexp.MustCompile("^.*\\._Ctype_char$")
// cUnsignedCharRE is a regular expression that matches a cgo unsigned
// char. It is used to detect unsigned character arrays to hexdump
// them.
cUnsignedCharRE = regexp.MustCompile("^.*\\._Ctype_unsignedchar$")
// cUint8tCharRE is a regular expression that matches a cgo uint8_t.
// It is used to detect uint8_t arrays to hexdump them.
cUint8tCharRE = regexp.MustCompile("^.*\\._Ctype_uint8_t$")
)
// dumpState contains information about the state of a dump operation.
type dumpState struct {
w io.Writer
depth int
pointers map[uintptr]int
ignoreNextType bool
ignoreNextIndent bool
cs *ConfigState
}
// indent performs indentation according to the depth level and cs.Indent
// option.
func (d *dumpState) indent() {
if d.ignoreNextIndent {
d.ignoreNextIndent = false
return
}
d.w.Write(bytes.Repeat([]byte(d.cs.Indent), d.depth))
}
// unpackValue returns values inside of non-nil interfaces when possible.
// This is useful for data types like structs, arrays, slices, and maps which
// can contain varying types packed inside an interface.
func (d *dumpState) unpackValue(v reflect.Value) reflect.Value {
if v.Kind() == reflect.Interface && !v.IsNil() {
v = v.Elem()
}
return v
}
// dumpPtr handles formatting of pointers by indirecting them as necessary.
func (d *dumpState) dumpPtr(v reflect.Value) {
// Remove pointers at or below the current depth from map used to detect
// circular refs.
for k, depth := range d.pointers {
if depth >= d.depth {
delete(d.pointers, k)
}
}
// Keep list of all dereferenced pointers to show later.
pointerChain := make([]uintptr, 0)
// Figure out how many levels of indirection there are by dereferencing
// pointers and unpacking interfaces down the chain while detecting circular
// references.
nilFound := false
cycleFound := false
indirects := 0
ve := v
for ve.Kind() == reflect.Ptr {
if ve.IsNil() {
nilFound = true
break
}
indirects++
addr := ve.Pointer()
pointerChain = append(pointerChain, addr)
if pd, ok := d.pointers[addr]; ok && pd < d.depth {
cycleFound = true
indirects--
break
}
d.pointers[addr] = d.depth
ve = ve.Elem()
if ve.Kind() == reflect.Interface {
if ve.IsNil() {
nilFound = true
break
}
ve = ve.Elem()
}
}
// Display type information.
d.w.Write(openParenBytes)
d.w.Write(bytes.Repeat(asteriskBytes, indirects))
d.w.Write([]byte(ve.Type().String()))
d.w.Write(closeParenBytes)
// Display pointer information.
if !d.cs.DisablePointerAddresses && len(pointerChain) > 0 {
d.w.Write(openParenBytes)
for i, addr := range pointerChain {
if i > 0 {
d.w.Write(pointerChainBytes)
}
printHexPtr(d.w, addr)
}
d.w.Write(closeParenBytes)
}
// Display dereferenced value.
d.w.Write(openParenBytes)
switch {
case nilFound == true:
d.w.Write(nilAngleBytes)
case cycleFound == true:
d.w.Write(circularBytes)
default:
d.ignoreNextType = true
d.dump(ve)
}
d.w.Write(closeParenBytes)
}
// dumpSlice handles formatting of arrays and slices. Byte (uint8 under
// reflection) arrays and slices are dumped in hexdump -C fashion.
func (d *dumpState) dumpSlice(v reflect.Value) {
// Determine whether this type should be hex dumped or not. Also,
// for types which should be hexdumped, try to use the underlying data
// first, then fall back to trying to convert them to a uint8 slice.
var buf []uint8
doConvert := false
doHexDump := false
numEntries := v.Len()
if numEntries > 0 {
vt := v.Index(0).Type()
vts := vt.String()
switch {
// C types that need to be converted.
case cCharRE.MatchString(vts):
fallthrough
case cUnsignedCharRE.MatchString(vts):
fallthrough
case cUint8tCharRE.MatchString(vts):
doConvert = true
// Try to use existing uint8 slices and fall back to converting
// and copying if that fails.
case vt.Kind() == reflect.Uint8:
// We need an addressable interface to convert the type
// to a byte slice. However, the reflect package won't
// give us an interface on certain things like
// unexported struct fields in order to enforce
// visibility rules. We use unsafe, when available, to
// bypass these restrictions since this package does not
// mutate the values.
vs := v
if !vs.CanInterface() || !vs.CanAddr() {
vs = unsafeReflectValue(vs)
}
if !UnsafeDisabled {
vs = vs.Slice(0, numEntries)
// Use the existing uint8 slice if it can be
// type asserted.
iface := vs.Interface()
if slice, ok := iface.([]uint8); ok {
buf = slice
doHexDump = true
break
}
}
// The underlying data needs to be converted if it can't
// be type asserted to a uint8 slice.
doConvert = true
}
// Copy and convert the underlying type if needed.
if doConvert && vt.ConvertibleTo(uint8Type) {
// Convert and copy each element into a uint8 byte
// slice.
buf = make([]uint8, numEntries)
for i := 0; i < numEntries; i++ {
vv := v.Index(i)
buf[i] = uint8(vv.Convert(uint8Type).Uint())
}
doHexDump = true
}
}
// Hexdump the entire slice as needed.
if doHexDump {
indent := strings.Repeat(d.cs.Indent, d.depth)
str := indent + hex.Dump(buf)
str = strings.Replace(str, "\n", "\n"+indent, -1)
str = strings.TrimRight(str, d.cs.Indent)
d.w.Write([]byte(str))
return
}
// Recursively call dump for each item.
for i := 0; i < numEntries; i++ {
d.dump(d.unpackValue(v.Index(i)))
if i < (numEntries - 1) {
d.w.Write(commaNewlineBytes)
} else {
d.w.Write(newlineBytes)
}
}
}
// dump is the main workhorse for dumping a value. It uses the passed reflect
// value to figure out what kind of object we are dealing with and formats it
// appropriately. It is a recursive function, however circular data structures
// are detected and handled properly.
func (d *dumpState) dump(v reflect.Value) {
// Handle invalid reflect values immediately.
kind := v.Kind()
if kind == reflect.Invalid {
d.w.Write(invalidAngleBytes)
return
}
// Handle pointers specially.
if kind == reflect.Ptr {
d.indent()
d.dumpPtr(v)
return
}
// Print type information unless already handled elsewhere.
if !d.ignoreNextType {
d.indent()
d.w.Write(openParenBytes)
d.w.Write([]byte(v.Type().String()))
d.w.Write(closeParenBytes)
d.w.Write(spaceBytes)
}
d.ignoreNextType = false
// Display length and capacity if the built-in len and cap functions
// work with the value's kind and the len/cap itself is non-zero.
valueLen, valueCap := 0, 0
switch v.Kind() {
case reflect.Array, reflect.Slice, reflect.Chan:
valueLen, valueCap = v.Len(), v.Cap()
case reflect.Map, reflect.String:
valueLen = v.Len()
}
if valueLen != 0 || !d.cs.DisableCapacities && valueCap != 0 {
d.w.Write(openParenBytes)
if valueLen != 0 {
d.w.Write(lenEqualsBytes)
printInt(d.w, int64(valueLen), 10)
}
if !d.cs.DisableCapacities && valueCap != 0 {
if valueLen != 0 {
d.w.Write(spaceBytes)
}
d.w.Write(capEqualsBytes)
printInt(d.w, int64(valueCap), 10)
}
d.w.Write(closeParenBytes)
d.w.Write(spaceBytes)
}
// Call Stringer/error interfaces if they exist and the handle methods flag
// is enabled
if !d.cs.DisableMethods {
if (kind != reflect.Invalid) && (kind != reflect.Interface) {
if handled := handleMethods(d.cs, d.w, v); handled {
return
}
}
}
switch kind {
case reflect.Invalid:
// Do nothing. We should never get here since invalid has already
// been handled above.
case reflect.Bool:
printBool(d.w, v.Bool())
case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
printInt(d.w, v.Int(), 10)
case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:
printUint(d.w, v.Uint(), 10)
case reflect.Float32:
printFloat(d.w, v.Float(), 32)
case reflect.Float64:
printFloat(d.w, v.Float(), 64)
case reflect.Complex64:
printComplex(d.w, v.Complex(), 32)
case reflect.Complex128:
printComplex(d.w, v.Complex(), 64)
case reflect.Slice:
if v.IsNil() {
d.w.Write(nilAngleBytes)
break
}
fallthrough
case reflect.Array:
d.w.Write(openBraceNewlineBytes)
d.depth++
if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) {
d.indent()
d.w.Write(maxNewlineBytes)
} else {
d.dumpSlice(v)
}
d.depth--
d.indent()
d.w.Write(closeBraceBytes)
case reflect.String:
d.w.Write([]byte(strconv.Quote(v.String())))
case reflect.Interface:
// The only time we should get here is for nil interfaces due to
// unpackValue calls.
if v.IsNil() {
d.w.Write(nilAngleBytes)
}
case reflect.Ptr:
// Do nothing. We should never get here since pointers have already
// been handled above.
case reflect.Map:
// nil maps should be indicated as different than empty maps
if v.IsNil() {
d.w.Write(nilAngleBytes)
break
}
d.w.Write(openBraceNewlineBytes)
d.depth++
if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) {
d.indent()
d.w.Write(maxNewlineBytes)
} else {
numEntries := v.Len()
keys := v.MapKeys()
if d.cs.SortKeys {
sortValues(keys, d.cs)
}
for i, key := range keys {
d.dump(d.unpackValue(key))
d.w.Write(colonSpaceBytes)
d.ignoreNextIndent = true
d.dump(d.unpackValue(v.MapIndex(key)))
if i < (numEntries - 1) {
d.w.Write(commaNewlineBytes)
} else {
d.w.Write(newlineBytes)
}
}
}
d.depth--
d.indent()
d.w.Write(closeBraceBytes)
case reflect.Struct:
d.w.Write(openBraceNewlineBytes)
d.depth++
if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) {
d.indent()
d.w.Write(maxNewlineBytes)
} else {
vt := v.Type()
numFields := v.NumField()
for i := 0; i < numFields; i++ {
d.indent()
vtf := vt.Field(i)
d.w.Write([]byte(vtf.Name))
d.w.Write(colonSpaceBytes)
d.ignoreNextIndent = true
d.dump(d.unpackValue(v.Field(i)))
if i < (numFields - 1) {
d.w.Write(commaNewlineBytes)
} else {
d.w.Write(newlineBytes)
}
}
}
d.depth--
d.indent()
d.w.Write(closeBraceBytes)
case reflect.Uintptr:
printHexPtr(d.w, uintptr(v.Uint()))
case reflect.UnsafePointer, reflect.Chan, reflect.Func:
printHexPtr(d.w, v.Pointer())
// There were not any other types at the time this code was written, but
// fall back to letting the default fmt package handle it in case any new
// types are added.
default:
if v.CanInterface() {
fmt.Fprintf(d.w, "%v", v.Interface())
} else {
fmt.Fprintf(d.w, "%v", v.String())
}
}
}
// fdump is a helper function to consolidate the logic from the various public
// methods which take varying writers and config states.
func fdump(cs *ConfigState, w io.Writer, a ...interface{}) {
for _, arg := range a {
if arg == nil {
w.Write(interfaceBytes)
w.Write(spaceBytes)
w.Write(nilAngleBytes)
w.Write(newlineBytes)
continue
}
d := dumpState{w: w, cs: cs}
d.pointers = make(map[uintptr]int)
d.dump(reflect.ValueOf(arg))
d.w.Write(newlineBytes)
}
}
// Fdump formats and displays the passed arguments to io.Writer w. It formats
// exactly the same as Dump.
func Fdump(w io.Writer, a ...interface{}) {
fdump(&Config, w, a...)
}
// Sdump returns a string with the passed arguments formatted exactly the same
// as Dump.
func Sdump(a ...interface{}) string {
var buf bytes.Buffer
fdump(&Config, &buf, a...)
return buf.String()
}
/*
Dump displays the passed parameters to standard out with newlines, customizable
indentation, and additional debug information such as complete types and all
pointer addresses used to indirect to the final value. It provides the
following features over the built-in printing facilities provided by the fmt
package:
* Pointers are dereferenced and followed
* Circular data structures are detected and handled properly
* Custom Stringer/error interfaces are optionally invoked, including
on unexported types
* Custom types which only implement the Stringer/error interfaces via
a pointer receiver are optionally invoked when passing non-pointer
variables
* Byte arrays and slices are dumped like the hexdump -C command which
includes offsets, byte values in hex, and ASCII output
The configuration options are controlled by an exported package global,
spew.Config. See ConfigState for options documentation.
See Fdump if you would prefer dumping to an arbitrary io.Writer or Sdump to
get the formatted result as a string.
*/
func Dump(a ...interface{}) {
fdump(&Config, os.Stdout, a...)
}

419
vendor/github.com/davecgh/go-spew/spew/format.go generated vendored Normal file
View File

@ -0,0 +1,419 @@
/*
* Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
package spew
import (
"bytes"
"fmt"
"reflect"
"strconv"
"strings"
)
// supportedFlags is a list of all the character flags supported by fmt package.
const supportedFlags = "0-+# "
// formatState implements the fmt.Formatter interface and contains information
// about the state of a formatting operation. The NewFormatter function can
// be used to get a new Formatter which can be used directly as arguments
// in standard fmt package printing calls.
type formatState struct {
value interface{}
fs fmt.State
depth int
pointers map[uintptr]int
ignoreNextType bool
cs *ConfigState
}
// buildDefaultFormat recreates the original format string without precision
// and width information to pass in to fmt.Sprintf in the case of an
// unrecognized type. Unless new types are added to the language, this
// function won't ever be called.
func (f *formatState) buildDefaultFormat() (format string) {
buf := bytes.NewBuffer(percentBytes)
for _, flag := range supportedFlags {
if f.fs.Flag(int(flag)) {
buf.WriteRune(flag)
}
}
buf.WriteRune('v')
format = buf.String()
return format
}
// constructOrigFormat recreates the original format string including precision
// and width information to pass along to the standard fmt package. This allows
// automatic deferral of all format strings this package doesn't support.
func (f *formatState) constructOrigFormat(verb rune) (format string) {
buf := bytes.NewBuffer(percentBytes)
for _, flag := range supportedFlags {
if f.fs.Flag(int(flag)) {
buf.WriteRune(flag)
}
}
if width, ok := f.fs.Width(); ok {
buf.WriteString(strconv.Itoa(width))
}
if precision, ok := f.fs.Precision(); ok {
buf.Write(precisionBytes)
buf.WriteString(strconv.Itoa(precision))
}
buf.WriteRune(verb)
format = buf.String()
return format
}
// unpackValue returns values inside of non-nil interfaces when possible and
// ensures that types for values which have been unpacked from an interface
// are displayed when the show types flag is also set.
// This is useful for data types like structs, arrays, slices, and maps which
// can contain varying types packed inside an interface.
func (f *formatState) unpackValue(v reflect.Value) reflect.Value {
if v.Kind() == reflect.Interface {
f.ignoreNextType = false
if !v.IsNil() {
v = v.Elem()
}
}
return v
}
// formatPtr handles formatting of pointers by indirecting them as necessary.
func (f *formatState) formatPtr(v reflect.Value) {
// Display nil if top level pointer is nil.
showTypes := f.fs.Flag('#')
if v.IsNil() && (!showTypes || f.ignoreNextType) {
f.fs.Write(nilAngleBytes)
return
}
// Remove pointers at or below the current depth from map used to detect
// circular refs.
for k, depth := range f.pointers {
if depth >= f.depth {
delete(f.pointers, k)
}
}
// Keep list of all dereferenced pointers to possibly show later.
pointerChain := make([]uintptr, 0)
// Figure out how many levels of indirection there are by derferencing
// pointers and unpacking interfaces down the chain while detecting circular
// references.
nilFound := false
cycleFound := false
indirects := 0
ve := v
for ve.Kind() == reflect.Ptr {
if ve.IsNil() {
nilFound = true
break
}
indirects++
addr := ve.Pointer()
pointerChain = append(pointerChain, addr)
if pd, ok := f.pointers[addr]; ok && pd < f.depth {
cycleFound = true
indirects--
break
}
f.pointers[addr] = f.depth
ve = ve.Elem()
if ve.Kind() == reflect.Interface {
if ve.IsNil() {
nilFound = true
break
}
ve = ve.Elem()
}
}
// Display type or indirection level depending on flags.
if showTypes && !f.ignoreNextType {
f.fs.Write(openParenBytes)
f.fs.Write(bytes.Repeat(asteriskBytes, indirects))
f.fs.Write([]byte(ve.Type().String()))
f.fs.Write(closeParenBytes)
} else {
if nilFound || cycleFound {
indirects += strings.Count(ve.Type().String(), "*")
}
f.fs.Write(openAngleBytes)
f.fs.Write([]byte(strings.Repeat("*", indirects)))
f.fs.Write(closeAngleBytes)
}
// Display pointer information depending on flags.
if f.fs.Flag('+') && (len(pointerChain) > 0) {
f.fs.Write(openParenBytes)
for i, addr := range pointerChain {
if i > 0 {
f.fs.Write(pointerChainBytes)
}
printHexPtr(f.fs, addr)
}
f.fs.Write(closeParenBytes)
}
// Display dereferenced value.
switch {
case nilFound == true:
f.fs.Write(nilAngleBytes)
case cycleFound == true:
f.fs.Write(circularShortBytes)
default:
f.ignoreNextType = true
f.format(ve)
}
}
// format is the main workhorse for providing the Formatter interface. It
// uses the passed reflect value to figure out what kind of object we are
// dealing with and formats it appropriately. It is a recursive function,
// however circular data structures are detected and handled properly.
func (f *formatState) format(v reflect.Value) {
// Handle invalid reflect values immediately.
kind := v.Kind()
if kind == reflect.Invalid {
f.fs.Write(invalidAngleBytes)
return
}
// Handle pointers specially.
if kind == reflect.Ptr {
f.formatPtr(v)
return
}
// Print type information unless already handled elsewhere.
if !f.ignoreNextType && f.fs.Flag('#') {
f.fs.Write(openParenBytes)
f.fs.Write([]byte(v.Type().String()))
f.fs.Write(closeParenBytes)
}
f.ignoreNextType = false
// Call Stringer/error interfaces if they exist and the handle methods
// flag is enabled.
if !f.cs.DisableMethods {
if (kind != reflect.Invalid) && (kind != reflect.Interface) {
if handled := handleMethods(f.cs, f.fs, v); handled {
return
}
}
}
switch kind {
case reflect.Invalid:
// Do nothing. We should never get here since invalid has already
// been handled above.
case reflect.Bool:
printBool(f.fs, v.Bool())
case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
printInt(f.fs, v.Int(), 10)
case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:
printUint(f.fs, v.Uint(), 10)
case reflect.Float32:
printFloat(f.fs, v.Float(), 32)
case reflect.Float64:
printFloat(f.fs, v.Float(), 64)
case reflect.Complex64:
printComplex(f.fs, v.Complex(), 32)
case reflect.Complex128:
printComplex(f.fs, v.Complex(), 64)
case reflect.Slice:
if v.IsNil() {
f.fs.Write(nilAngleBytes)
break
}
fallthrough
case reflect.Array:
f.fs.Write(openBracketBytes)
f.depth++
if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) {
f.fs.Write(maxShortBytes)
} else {
numEntries := v.Len()
for i := 0; i < numEntries; i++ {
if i > 0 {
f.fs.Write(spaceBytes)
}
f.ignoreNextType = true
f.format(f.unpackValue(v.Index(i)))
}
}
f.depth--
f.fs.Write(closeBracketBytes)
case reflect.String:
f.fs.Write([]byte(v.String()))
case reflect.Interface:
// The only time we should get here is for nil interfaces due to
// unpackValue calls.
if v.IsNil() {
f.fs.Write(nilAngleBytes)
}
case reflect.Ptr:
// Do nothing. We should never get here since pointers have already
// been handled above.
case reflect.Map:
// nil maps should be indicated as different than empty maps
if v.IsNil() {
f.fs.Write(nilAngleBytes)
break
}
f.fs.Write(openMapBytes)
f.depth++
if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) {
f.fs.Write(maxShortBytes)
} else {
keys := v.MapKeys()
if f.cs.SortKeys {
sortValues(keys, f.cs)
}
for i, key := range keys {
if i > 0 {
f.fs.Write(spaceBytes)
}
f.ignoreNextType = true
f.format(f.unpackValue(key))
f.fs.Write(colonBytes)
f.ignoreNextType = true
f.format(f.unpackValue(v.MapIndex(key)))
}
}
f.depth--
f.fs.Write(closeMapBytes)
case reflect.Struct:
numFields := v.NumField()
f.fs.Write(openBraceBytes)
f.depth++
if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) {
f.fs.Write(maxShortBytes)
} else {
vt := v.Type()
for i := 0; i < numFields; i++ {
if i > 0 {
f.fs.Write(spaceBytes)
}
vtf := vt.Field(i)
if f.fs.Flag('+') || f.fs.Flag('#') {
f.fs.Write([]byte(vtf.Name))
f.fs.Write(colonBytes)
}
f.format(f.unpackValue(v.Field(i)))
}
}
f.depth--
f.fs.Write(closeBraceBytes)
case reflect.Uintptr:
printHexPtr(f.fs, uintptr(v.Uint()))
case reflect.UnsafePointer, reflect.Chan, reflect.Func:
printHexPtr(f.fs, v.Pointer())
// There were not any other types at the time this code was written, but
// fall back to letting the default fmt package handle it if any get added.
default:
format := f.buildDefaultFormat()
if v.CanInterface() {
fmt.Fprintf(f.fs, format, v.Interface())
} else {
fmt.Fprintf(f.fs, format, v.String())
}
}
}
// Format satisfies the fmt.Formatter interface. See NewFormatter for usage
// details.
func (f *formatState) Format(fs fmt.State, verb rune) {
f.fs = fs
// Use standard formatting for verbs that are not v.
if verb != 'v' {
format := f.constructOrigFormat(verb)
fmt.Fprintf(fs, format, f.value)
return
}
if f.value == nil {
if fs.Flag('#') {
fs.Write(interfaceBytes)
}
fs.Write(nilAngleBytes)
return
}
f.format(reflect.ValueOf(f.value))
}
// newFormatter is a helper function to consolidate the logic from the various
// public methods which take varying config states.
func newFormatter(cs *ConfigState, v interface{}) fmt.Formatter {
fs := &formatState{value: v, cs: cs}
fs.pointers = make(map[uintptr]int)
return fs
}
/*
NewFormatter returns a custom formatter that satisfies the fmt.Formatter
interface. As a result, it integrates cleanly with standard fmt package
printing functions. The formatter is useful for inline printing of smaller data
types similar to the standard %v format specifier.
The custom formatter only responds to the %v (most compact), %+v (adds pointer
addresses), %#v (adds types), or %#+v (adds types and pointer addresses) verb
combinations. Any other verbs such as %x and %q will be sent to the the
standard fmt package for formatting. In addition, the custom formatter ignores
the width and precision arguments (however they will still work on the format
specifiers not handled by the custom formatter).
Typically this function shouldn't be called directly. It is much easier to make
use of the custom formatter by calling one of the convenience functions such as
Printf, Println, or Fprintf.
*/
func NewFormatter(v interface{}) fmt.Formatter {
return newFormatter(&Config, v)
}

148
vendor/github.com/davecgh/go-spew/spew/spew.go generated vendored Normal file
View File

@ -0,0 +1,148 @@
/*
* Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
package spew
import (
"fmt"
"io"
)
// Errorf is a wrapper for fmt.Errorf that treats each argument as if it were
// passed with a default Formatter interface returned by NewFormatter. It
// returns the formatted string as a value that satisfies error. See
// NewFormatter for formatting details.
//
// This function is shorthand for the following syntax:
//
// fmt.Errorf(format, spew.NewFormatter(a), spew.NewFormatter(b))
func Errorf(format string, a ...interface{}) (err error) {
return fmt.Errorf(format, convertArgs(a)...)
}
// Fprint is a wrapper for fmt.Fprint that treats each argument as if it were
// passed with a default Formatter interface returned by NewFormatter. It
// returns the number of bytes written and any write error encountered. See
// NewFormatter for formatting details.
//
// This function is shorthand for the following syntax:
//
// fmt.Fprint(w, spew.NewFormatter(a), spew.NewFormatter(b))
func Fprint(w io.Writer, a ...interface{}) (n int, err error) {
return fmt.Fprint(w, convertArgs(a)...)
}
// Fprintf is a wrapper for fmt.Fprintf that treats each argument as if it were
// passed with a default Formatter interface returned by NewFormatter. It
// returns the number of bytes written and any write error encountered. See
// NewFormatter for formatting details.
//
// This function is shorthand for the following syntax:
//
// fmt.Fprintf(w, format, spew.NewFormatter(a), spew.NewFormatter(b))
func Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) {
return fmt.Fprintf(w, format, convertArgs(a)...)
}
// Fprintln is a wrapper for fmt.Fprintln that treats each argument as if it
// passed with a default Formatter interface returned by NewFormatter. See
// NewFormatter for formatting details.
//
// This function is shorthand for the following syntax:
//
// fmt.Fprintln(w, spew.NewFormatter(a), spew.NewFormatter(b))
func Fprintln(w io.Writer, a ...interface{}) (n int, err error) {
return fmt.Fprintln(w, convertArgs(a)...)
}
// Print is a wrapper for fmt.Print that treats each argument as if it were
// passed with a default Formatter interface returned by NewFormatter. It
// returns the number of bytes written and any write error encountered. See
// NewFormatter for formatting details.
//
// This function is shorthand for the following syntax:
//
// fmt.Print(spew.NewFormatter(a), spew.NewFormatter(b))
func Print(a ...interface{}) (n int, err error) {
return fmt.Print(convertArgs(a)...)
}
// Printf is a wrapper for fmt.Printf that treats each argument as if it were
// passed with a default Formatter interface returned by NewFormatter. It
// returns the number of bytes written and any write error encountered. See
// NewFormatter for formatting details.
//
// This function is shorthand for the following syntax:
//
// fmt.Printf(format, spew.NewFormatter(a), spew.NewFormatter(b))
func Printf(format string, a ...interface{}) (n int, err error) {
return fmt.Printf(format, convertArgs(a)...)
}
// Println is a wrapper for fmt.Println that treats each argument as if it were
// passed with a default Formatter interface returned by NewFormatter. It
// returns the number of bytes written and any write error encountered. See
// NewFormatter for formatting details.
//
// This function is shorthand for the following syntax:
//
// fmt.Println(spew.NewFormatter(a), spew.NewFormatter(b))
func Println(a ...interface{}) (n int, err error) {
return fmt.Println(convertArgs(a)...)
}
// Sprint is a wrapper for fmt.Sprint that treats each argument as if it were
// passed with a default Formatter interface returned by NewFormatter. It
// returns the resulting string. See NewFormatter for formatting details.
//
// This function is shorthand for the following syntax:
//
// fmt.Sprint(spew.NewFormatter(a), spew.NewFormatter(b))
func Sprint(a ...interface{}) string {
return fmt.Sprint(convertArgs(a)...)
}
// Sprintf is a wrapper for fmt.Sprintf that treats each argument as if it were
// passed with a default Formatter interface returned by NewFormatter. It
// returns the resulting string. See NewFormatter for formatting details.
//
// This function is shorthand for the following syntax:
//
// fmt.Sprintf(format, spew.NewFormatter(a), spew.NewFormatter(b))
func Sprintf(format string, a ...interface{}) string {
return fmt.Sprintf(format, convertArgs(a)...)
}
// Sprintln is a wrapper for fmt.Sprintln that treats each argument as if it
// were passed with a default Formatter interface returned by NewFormatter. It
// returns the resulting string. See NewFormatter for formatting details.
//
// This function is shorthand for the following syntax:
//
// fmt.Sprintln(spew.NewFormatter(a), spew.NewFormatter(b))
func Sprintln(a ...interface{}) string {
return fmt.Sprintln(convertArgs(a)...)
}
// convertArgs accepts a slice of arguments and returns a slice of the same
// length with each argument converted to a default spew Formatter interface.
func convertArgs(args []interface{}) (formatters []interface{}) {
formatters = make([]interface{}, len(args))
for index, arg := range args {
formatters[index] = NewFormatter(arg)
}
return formatters
}

27
vendor/github.com/pmezard/go-difflib/LICENSE generated vendored Normal file
View File

@ -0,0 +1,27 @@
Copyright (c) 2013, Patrick Mezard
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
The names of its contributors may not be used to endorse or promote
products derived from this software without specific prior written
permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

772
vendor/github.com/pmezard/go-difflib/difflib/difflib.go generated vendored Normal file
View File

@ -0,0 +1,772 @@
// Package difflib is a partial port of Python difflib module.
//
// It provides tools to compare sequences of strings and generate textual diffs.
//
// The following class and functions have been ported:
//
// - SequenceMatcher
//
// - unified_diff
//
// - context_diff
//
// Getting unified diffs was the main goal of the port. Keep in mind this code
// is mostly suitable to output text differences in a human friendly way, there
// are no guarantees generated diffs are consumable by patch(1).
package difflib
import (
"bufio"
"bytes"
"fmt"
"io"
"strings"
)
func min(a, b int) int {
if a < b {
return a
}
return b
}
func max(a, b int) int {
if a > b {
return a
}
return b
}
func calculateRatio(matches, length int) float64 {
if length > 0 {
return 2.0 * float64(matches) / float64(length)
}
return 1.0
}
type Match struct {
A int
B int
Size int
}
type OpCode struct {
Tag byte
I1 int
I2 int
J1 int
J2 int
}
// SequenceMatcher compares sequence of strings. The basic
// algorithm predates, and is a little fancier than, an algorithm
// published in the late 1980's by Ratcliff and Obershelp under the
// hyperbolic name "gestalt pattern matching". The basic idea is to find
// the longest contiguous matching subsequence that contains no "junk"
// elements (R-O doesn't address junk). The same idea is then applied
// recursively to the pieces of the sequences to the left and to the right
// of the matching subsequence. This does not yield minimal edit
// sequences, but does tend to yield matches that "look right" to people.
//
// SequenceMatcher tries to compute a "human-friendly diff" between two
// sequences. Unlike e.g. UNIX(tm) diff, the fundamental notion is the
// longest *contiguous* & junk-free matching subsequence. That's what
// catches peoples' eyes. The Windows(tm) windiff has another interesting
// notion, pairing up elements that appear uniquely in each sequence.
// That, and the method here, appear to yield more intuitive difference
// reports than does diff. This method appears to be the least vulnerable
// to synching up on blocks of "junk lines", though (like blank lines in
// ordinary text files, or maybe "<P>" lines in HTML files). That may be
// because this is the only method of the 3 that has a *concept* of
// "junk" <wink>.
//
// Timing: Basic R-O is cubic time worst case and quadratic time expected
// case. SequenceMatcher is quadratic time for the worst case and has
// expected-case behavior dependent in a complicated way on how many
// elements the sequences have in common; best case time is linear.
type SequenceMatcher struct {
a []string
b []string
b2j map[string][]int
IsJunk func(string) bool
autoJunk bool
bJunk map[string]struct{}
matchingBlocks []Match
fullBCount map[string]int
bPopular map[string]struct{}
opCodes []OpCode
}
func NewMatcher(a, b []string) *SequenceMatcher {
m := SequenceMatcher{autoJunk: true}
m.SetSeqs(a, b)
return &m
}
func NewMatcherWithJunk(a, b []string, autoJunk bool,
isJunk func(string) bool) *SequenceMatcher {
m := SequenceMatcher{IsJunk: isJunk, autoJunk: autoJunk}
m.SetSeqs(a, b)
return &m
}
// Set two sequences to be compared.
func (m *SequenceMatcher) SetSeqs(a, b []string) {
m.SetSeq1(a)
m.SetSeq2(b)
}
// Set the first sequence to be compared. The second sequence to be compared is
// not changed.
//
// SequenceMatcher computes and caches detailed information about the second
// sequence, so if you want to compare one sequence S against many sequences,
// use .SetSeq2(s) once and call .SetSeq1(x) repeatedly for each of the other
// sequences.
//
// See also SetSeqs() and SetSeq2().
func (m *SequenceMatcher) SetSeq1(a []string) {
if &a == &m.a {
return
}
m.a = a
m.matchingBlocks = nil
m.opCodes = nil
}
// Set the second sequence to be compared. The first sequence to be compared is
// not changed.
func (m *SequenceMatcher) SetSeq2(b []string) {
if &b == &m.b {
return
}
m.b = b
m.matchingBlocks = nil
m.opCodes = nil
m.fullBCount = nil
m.chainB()
}
func (m *SequenceMatcher) chainB() {
// Populate line -> index mapping
b2j := map[string][]int{}
for i, s := range m.b {
indices := b2j[s]
indices = append(indices, i)
b2j[s] = indices
}
// Purge junk elements
m.bJunk = map[string]struct{}{}
if m.IsJunk != nil {
junk := m.bJunk
for s, _ := range b2j {
if m.IsJunk(s) {
junk[s] = struct{}{}
}
}
for s, _ := range junk {
delete(b2j, s)
}
}
// Purge remaining popular elements
popular := map[string]struct{}{}
n := len(m.b)
if m.autoJunk && n >= 200 {
ntest := n/100 + 1
for s, indices := range b2j {
if len(indices) > ntest {
popular[s] = struct{}{}
}
}
for s, _ := range popular {
delete(b2j, s)
}
}
m.bPopular = popular
m.b2j = b2j
}
func (m *SequenceMatcher) isBJunk(s string) bool {
_, ok := m.bJunk[s]
return ok
}
// Find longest matching block in a[alo:ahi] and b[blo:bhi].
//
// If IsJunk is not defined:
//
// Return (i,j,k) such that a[i:i+k] is equal to b[j:j+k], where
// alo <= i <= i+k <= ahi
// blo <= j <= j+k <= bhi
// and for all (i',j',k') meeting those conditions,
// k >= k'
// i <= i'
// and if i == i', j <= j'
//
// In other words, of all maximal matching blocks, return one that
// starts earliest in a, and of all those maximal matching blocks that
// start earliest in a, return the one that starts earliest in b.
//
// If IsJunk is defined, first the longest matching block is
// determined as above, but with the additional restriction that no
// junk element appears in the block. Then that block is extended as
// far as possible by matching (only) junk elements on both sides. So
// the resulting block never matches on junk except as identical junk
// happens to be adjacent to an "interesting" match.
//
// If no blocks match, return (alo, blo, 0).
func (m *SequenceMatcher) findLongestMatch(alo, ahi, blo, bhi int) Match {
// CAUTION: stripping common prefix or suffix would be incorrect.
// E.g.,
// ab
// acab
// Longest matching block is "ab", but if common prefix is
// stripped, it's "a" (tied with "b"). UNIX(tm) diff does so
// strip, so ends up claiming that ab is changed to acab by
// inserting "ca" in the middle. That's minimal but unintuitive:
// "it's obvious" that someone inserted "ac" at the front.
// Windiff ends up at the same place as diff, but by pairing up
// the unique 'b's and then matching the first two 'a's.
besti, bestj, bestsize := alo, blo, 0
// find longest junk-free match
// during an iteration of the loop, j2len[j] = length of longest
// junk-free match ending with a[i-1] and b[j]
j2len := map[int]int{}
for i := alo; i != ahi; i++ {
// look at all instances of a[i] in b; note that because
// b2j has no junk keys, the loop is skipped if a[i] is junk
newj2len := map[int]int{}
for _, j := range m.b2j[m.a[i]] {
// a[i] matches b[j]
if j < blo {
continue
}
if j >= bhi {
break
}
k := j2len[j-1] + 1
newj2len[j] = k
if k > bestsize {
besti, bestj, bestsize = i-k+1, j-k+1, k
}
}
j2len = newj2len
}
// Extend the best by non-junk elements on each end. In particular,
// "popular" non-junk elements aren't in b2j, which greatly speeds
// the inner loop above, but also means "the best" match so far
// doesn't contain any junk *or* popular non-junk elements.
for besti > alo && bestj > blo && !m.isBJunk(m.b[bestj-1]) &&
m.a[besti-1] == m.b[bestj-1] {
besti, bestj, bestsize = besti-1, bestj-1, bestsize+1
}
for besti+bestsize < ahi && bestj+bestsize < bhi &&
!m.isBJunk(m.b[bestj+bestsize]) &&
m.a[besti+bestsize] == m.b[bestj+bestsize] {
bestsize += 1
}
// Now that we have a wholly interesting match (albeit possibly
// empty!), we may as well suck up the matching junk on each
// side of it too. Can't think of a good reason not to, and it
// saves post-processing the (possibly considerable) expense of
// figuring out what to do with it. In the case of an empty
// interesting match, this is clearly the right thing to do,
// because no other kind of match is possible in the regions.
for besti > alo && bestj > blo && m.isBJunk(m.b[bestj-1]) &&
m.a[besti-1] == m.b[bestj-1] {
besti, bestj, bestsize = besti-1, bestj-1, bestsize+1
}
for besti+bestsize < ahi && bestj+bestsize < bhi &&
m.isBJunk(m.b[bestj+bestsize]) &&
m.a[besti+bestsize] == m.b[bestj+bestsize] {
bestsize += 1
}
return Match{A: besti, B: bestj, Size: bestsize}
}
// Return list of triples describing matching subsequences.
//
// Each triple is of the form (i, j, n), and means that
// a[i:i+n] == b[j:j+n]. The triples are monotonically increasing in
// i and in j. It's also guaranteed that if (i, j, n) and (i', j', n') are
// adjacent triples in the list, and the second is not the last triple in the
// list, then i+n != i' or j+n != j'. IOW, adjacent triples never describe
// adjacent equal blocks.
//
// The last triple is a dummy, (len(a), len(b), 0), and is the only
// triple with n==0.
func (m *SequenceMatcher) GetMatchingBlocks() []Match {
if m.matchingBlocks != nil {
return m.matchingBlocks
}
var matchBlocks func(alo, ahi, blo, bhi int, matched []Match) []Match
matchBlocks = func(alo, ahi, blo, bhi int, matched []Match) []Match {
match := m.findLongestMatch(alo, ahi, blo, bhi)
i, j, k := match.A, match.B, match.Size
if match.Size > 0 {
if alo < i && blo < j {
matched = matchBlocks(alo, i, blo, j, matched)
}
matched = append(matched, match)
if i+k < ahi && j+k < bhi {
matched = matchBlocks(i+k, ahi, j+k, bhi, matched)
}
}
return matched
}
matched := matchBlocks(0, len(m.a), 0, len(m.b), nil)
// It's possible that we have adjacent equal blocks in the
// matching_blocks list now.
nonAdjacent := []Match{}
i1, j1, k1 := 0, 0, 0
for _, b := range matched {
// Is this block adjacent to i1, j1, k1?
i2, j2, k2 := b.A, b.B, b.Size
if i1+k1 == i2 && j1+k1 == j2 {
// Yes, so collapse them -- this just increases the length of
// the first block by the length of the second, and the first
// block so lengthened remains the block to compare against.
k1 += k2
} else {
// Not adjacent. Remember the first block (k1==0 means it's
// the dummy we started with), and make the second block the
// new block to compare against.
if k1 > 0 {
nonAdjacent = append(nonAdjacent, Match{i1, j1, k1})
}
i1, j1, k1 = i2, j2, k2
}
}
if k1 > 0 {
nonAdjacent = append(nonAdjacent, Match{i1, j1, k1})
}
nonAdjacent = append(nonAdjacent, Match{len(m.a), len(m.b), 0})
m.matchingBlocks = nonAdjacent
return m.matchingBlocks
}
// Return list of 5-tuples describing how to turn a into b.
//
// Each tuple is of the form (tag, i1, i2, j1, j2). The first tuple
// has i1 == j1 == 0, and remaining tuples have i1 == the i2 from the
// tuple preceding it, and likewise for j1 == the previous j2.
//
// The tags are characters, with these meanings:
//
// 'r' (replace): a[i1:i2] should be replaced by b[j1:j2]
//
// 'd' (delete): a[i1:i2] should be deleted, j1==j2 in this case.
//
// 'i' (insert): b[j1:j2] should be inserted at a[i1:i1], i1==i2 in this case.
//
// 'e' (equal): a[i1:i2] == b[j1:j2]
func (m *SequenceMatcher) GetOpCodes() []OpCode {
if m.opCodes != nil {
return m.opCodes
}
i, j := 0, 0
matching := m.GetMatchingBlocks()
opCodes := make([]OpCode, 0, len(matching))
for _, m := range matching {
// invariant: we've pumped out correct diffs to change
// a[:i] into b[:j], and the next matching block is
// a[ai:ai+size] == b[bj:bj+size]. So we need to pump
// out a diff to change a[i:ai] into b[j:bj], pump out
// the matching block, and move (i,j) beyond the match
ai, bj, size := m.A, m.B, m.Size
tag := byte(0)
if i < ai && j < bj {
tag = 'r'
} else if i < ai {
tag = 'd'
} else if j < bj {
tag = 'i'
}
if tag > 0 {
opCodes = append(opCodes, OpCode{tag, i, ai, j, bj})
}
i, j = ai+size, bj+size
// the list of matching blocks is terminated by a
// sentinel with size 0
if size > 0 {
opCodes = append(opCodes, OpCode{'e', ai, i, bj, j})
}
}
m.opCodes = opCodes
return m.opCodes
}
// Isolate change clusters by eliminating ranges with no changes.
//
// Return a generator of groups with up to n lines of context.
// Each group is in the same format as returned by GetOpCodes().
func (m *SequenceMatcher) GetGroupedOpCodes(n int) [][]OpCode {
if n < 0 {
n = 3
}
codes := m.GetOpCodes()
if len(codes) == 0 {
codes = []OpCode{OpCode{'e', 0, 1, 0, 1}}
}
// Fixup leading and trailing groups if they show no changes.
if codes[0].Tag == 'e' {
c := codes[0]
i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2
codes[0] = OpCode{c.Tag, max(i1, i2-n), i2, max(j1, j2-n), j2}
}
if codes[len(codes)-1].Tag == 'e' {
c := codes[len(codes)-1]
i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2
codes[len(codes)-1] = OpCode{c.Tag, i1, min(i2, i1+n), j1, min(j2, j1+n)}
}
nn := n + n
groups := [][]OpCode{}
group := []OpCode{}
for _, c := range codes {
i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2
// End the current group and start a new one whenever
// there is a large range with no changes.
if c.Tag == 'e' && i2-i1 > nn {
group = append(group, OpCode{c.Tag, i1, min(i2, i1+n),
j1, min(j2, j1+n)})
groups = append(groups, group)
group = []OpCode{}
i1, j1 = max(i1, i2-n), max(j1, j2-n)
}
group = append(group, OpCode{c.Tag, i1, i2, j1, j2})
}
if len(group) > 0 && !(len(group) == 1 && group[0].Tag == 'e') {
groups = append(groups, group)
}
return groups
}
// Return a measure of the sequences' similarity (float in [0,1]).
//
// Where T is the total number of elements in both sequences, and
// M is the number of matches, this is 2.0*M / T.
// Note that this is 1 if the sequences are identical, and 0 if
// they have nothing in common.
//
// .Ratio() is expensive to compute if you haven't already computed
// .GetMatchingBlocks() or .GetOpCodes(), in which case you may
// want to try .QuickRatio() or .RealQuickRation() first to get an
// upper bound.
func (m *SequenceMatcher) Ratio() float64 {
matches := 0
for _, m := range m.GetMatchingBlocks() {
matches += m.Size
}
return calculateRatio(matches, len(m.a)+len(m.b))
}
// Return an upper bound on ratio() relatively quickly.
//
// This isn't defined beyond that it is an upper bound on .Ratio(), and
// is faster to compute.
func (m *SequenceMatcher) QuickRatio() float64 {
// viewing a and b as multisets, set matches to the cardinality
// of their intersection; this counts the number of matches
// without regard to order, so is clearly an upper bound
if m.fullBCount == nil {
m.fullBCount = map[string]int{}
for _, s := range m.b {
m.fullBCount[s] = m.fullBCount[s] + 1
}
}
// avail[x] is the number of times x appears in 'b' less the
// number of times we've seen it in 'a' so far ... kinda
avail := map[string]int{}
matches := 0
for _, s := range m.a {
n, ok := avail[s]
if !ok {
n = m.fullBCount[s]
}
avail[s] = n - 1
if n > 0 {
matches += 1
}
}
return calculateRatio(matches, len(m.a)+len(m.b))
}
// Return an upper bound on ratio() very quickly.
//
// This isn't defined beyond that it is an upper bound on .Ratio(), and
// is faster to compute than either .Ratio() or .QuickRatio().
func (m *SequenceMatcher) RealQuickRatio() float64 {
la, lb := len(m.a), len(m.b)
return calculateRatio(min(la, lb), la+lb)
}
// Convert range to the "ed" format
func formatRangeUnified(start, stop int) string {
// Per the diff spec at http://www.unix.org/single_unix_specification/
beginning := start + 1 // lines start numbering with one
length := stop - start
if length == 1 {
return fmt.Sprintf("%d", beginning)
}
if length == 0 {
beginning -= 1 // empty ranges begin at line just before the range
}
return fmt.Sprintf("%d,%d", beginning, length)
}
// Unified diff parameters
type UnifiedDiff struct {
A []string // First sequence lines
FromFile string // First file name
FromDate string // First file time
B []string // Second sequence lines
ToFile string // Second file name
ToDate string // Second file time
Eol string // Headers end of line, defaults to LF
Context int // Number of context lines
}
// Compare two sequences of lines; generate the delta as a unified diff.
//
// Unified diffs are a compact way of showing line changes and a few
// lines of context. The number of context lines is set by 'n' which
// defaults to three.
//
// By default, the diff control lines (those with ---, +++, or @@) are
// created with a trailing newline. This is helpful so that inputs
// created from file.readlines() result in diffs that are suitable for
// file.writelines() since both the inputs and outputs have trailing
// newlines.
//
// For inputs that do not have trailing newlines, set the lineterm
// argument to "" so that the output will be uniformly newline free.
//
// The unidiff format normally has a header for filenames and modification
// times. Any or all of these may be specified using strings for
// 'fromfile', 'tofile', 'fromfiledate', and 'tofiledate'.
// The modification times are normally expressed in the ISO 8601 format.
func WriteUnifiedDiff(writer io.Writer, diff UnifiedDiff) error {
buf := bufio.NewWriter(writer)
defer buf.Flush()
wf := func(format string, args ...interface{}) error {
_, err := buf.WriteString(fmt.Sprintf(format, args...))
return err
}
ws := func(s string) error {
_, err := buf.WriteString(s)
return err
}
if len(diff.Eol) == 0 {
diff.Eol = "\n"
}
started := false
m := NewMatcher(diff.A, diff.B)
for _, g := range m.GetGroupedOpCodes(diff.Context) {
if !started {
started = true
fromDate := ""
if len(diff.FromDate) > 0 {
fromDate = "\t" + diff.FromDate
}
toDate := ""
if len(diff.ToDate) > 0 {
toDate = "\t" + diff.ToDate
}
if diff.FromFile != "" || diff.ToFile != "" {
err := wf("--- %s%s%s", diff.FromFile, fromDate, diff.Eol)
if err != nil {
return err
}
err = wf("+++ %s%s%s", diff.ToFile, toDate, diff.Eol)
if err != nil {
return err
}
}
}
first, last := g[0], g[len(g)-1]
range1 := formatRangeUnified(first.I1, last.I2)
range2 := formatRangeUnified(first.J1, last.J2)
if err := wf("@@ -%s +%s @@%s", range1, range2, diff.Eol); err != nil {
return err
}
for _, c := range g {
i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2
if c.Tag == 'e' {
for _, line := range diff.A[i1:i2] {
if err := ws(" " + line); err != nil {
return err
}
}
continue
}
if c.Tag == 'r' || c.Tag == 'd' {
for _, line := range diff.A[i1:i2] {
if err := ws("-" + line); err != nil {
return err
}
}
}
if c.Tag == 'r' || c.Tag == 'i' {
for _, line := range diff.B[j1:j2] {
if err := ws("+" + line); err != nil {
return err
}
}
}
}
}
return nil
}
// Like WriteUnifiedDiff but returns the diff a string.
func GetUnifiedDiffString(diff UnifiedDiff) (string, error) {
w := &bytes.Buffer{}
err := WriteUnifiedDiff(w, diff)
return string(w.Bytes()), err
}
// Convert range to the "ed" format.
func formatRangeContext(start, stop int) string {
// Per the diff spec at http://www.unix.org/single_unix_specification/
beginning := start + 1 // lines start numbering with one
length := stop - start
if length == 0 {
beginning -= 1 // empty ranges begin at line just before the range
}
if length <= 1 {
return fmt.Sprintf("%d", beginning)
}
return fmt.Sprintf("%d,%d", beginning, beginning+length-1)
}
type ContextDiff UnifiedDiff
// Compare two sequences of lines; generate the delta as a context diff.
//
// Context diffs are a compact way of showing line changes and a few
// lines of context. The number of context lines is set by diff.Context
// which defaults to three.
//
// By default, the diff control lines (those with *** or ---) are
// created with a trailing newline.
//
// For inputs that do not have trailing newlines, set the diff.Eol
// argument to "" so that the output will be uniformly newline free.
//
// The context diff format normally has a header for filenames and
// modification times. Any or all of these may be specified using
// strings for diff.FromFile, diff.ToFile, diff.FromDate, diff.ToDate.
// The modification times are normally expressed in the ISO 8601 format.
// If not specified, the strings default to blanks.
func WriteContextDiff(writer io.Writer, diff ContextDiff) error {
buf := bufio.NewWriter(writer)
defer buf.Flush()
var diffErr error
wf := func(format string, args ...interface{}) {
_, err := buf.WriteString(fmt.Sprintf(format, args...))
if diffErr == nil && err != nil {
diffErr = err
}
}
ws := func(s string) {
_, err := buf.WriteString(s)
if diffErr == nil && err != nil {
diffErr = err
}
}
if len(diff.Eol) == 0 {
diff.Eol = "\n"
}
prefix := map[byte]string{
'i': "+ ",
'd': "- ",
'r': "! ",
'e': " ",
}
started := false
m := NewMatcher(diff.A, diff.B)
for _, g := range m.GetGroupedOpCodes(diff.Context) {
if !started {
started = true
fromDate := ""
if len(diff.FromDate) > 0 {
fromDate = "\t" + diff.FromDate
}
toDate := ""
if len(diff.ToDate) > 0 {
toDate = "\t" + diff.ToDate
}
if diff.FromFile != "" || diff.ToFile != "" {
wf("*** %s%s%s", diff.FromFile, fromDate, diff.Eol)
wf("--- %s%s%s", diff.ToFile, toDate, diff.Eol)
}
}
first, last := g[0], g[len(g)-1]
ws("***************" + diff.Eol)
range1 := formatRangeContext(first.I1, last.I2)
wf("*** %s ****%s", range1, diff.Eol)
for _, c := range g {
if c.Tag == 'r' || c.Tag == 'd' {
for _, cc := range g {
if cc.Tag == 'i' {
continue
}
for _, line := range diff.A[cc.I1:cc.I2] {
ws(prefix[cc.Tag] + line)
}
}
break
}
}
range2 := formatRangeContext(first.J1, last.J2)
wf("--- %s ----%s", range2, diff.Eol)
for _, c := range g {
if c.Tag == 'r' || c.Tag == 'i' {
for _, cc := range g {
if cc.Tag == 'd' {
continue
}
for _, line := range diff.B[cc.J1:cc.J2] {
ws(prefix[cc.Tag] + line)
}
}
break
}
}
}
return diffErr
}
// Like WriteContextDiff but returns the diff a string.
func GetContextDiffString(diff ContextDiff) (string, error) {
w := &bytes.Buffer{}
err := WriteContextDiff(w, diff)
return string(w.Bytes()), err
}
// Split a string on "\n" while preserving them. The output can be used
// as input for UnifiedDiff and ContextDiff structures.
func SplitLines(s string) []string {
lines := strings.SplitAfter(s, "\n")
lines[len(lines)-1] += "\n"
return lines
}

22
vendor/github.com/stretchr/testify/LICENCE.txt generated vendored Normal file
View File

@ -0,0 +1,22 @@
Copyright (c) 2012 - 2013 Mat Ryer and Tyler Bunnell
Please consider promoting this project if you find it useful.
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without restriction,
including without limitation the rights to use, copy, modify, merge,
publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included
in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT
OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE
OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

22
vendor/github.com/stretchr/testify/LICENSE generated vendored Normal file
View File

@ -0,0 +1,22 @@
Copyright (c) 2012 - 2013 Mat Ryer and Tyler Bunnell
Please consider promoting this project if you find it useful.
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without restriction,
including without limitation the rights to use, copy, modify, merge,
publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included
in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT
OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE
OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

View File

@ -0,0 +1,352 @@
/*
* CODE GENERATED AUTOMATICALLY WITH github.com/stretchr/testify/_codegen
* THIS FILE MUST NOT BE EDITED BY HAND
*/
package assert
import (
http "net/http"
url "net/url"
time "time"
)
// Condition uses a Comparison to assert a complex condition.
func (a *Assertions) Condition(comp Comparison, msgAndArgs ...interface{}) bool {
return Condition(a.t, comp, msgAndArgs...)
}
// Contains asserts that the specified string, list(array, slice...) or map contains the
// specified substring or element.
//
// a.Contains("Hello World", "World", "But 'Hello World' does contain 'World'")
// a.Contains(["Hello", "World"], "World", "But ["Hello", "World"] does contain 'World'")
// a.Contains({"Hello": "World"}, "Hello", "But {'Hello': 'World'} does contain 'Hello'")
//
// Returns whether the assertion was successful (true) or not (false).
func (a *Assertions) Contains(s interface{}, contains interface{}, msgAndArgs ...interface{}) bool {
return Contains(a.t, s, contains, msgAndArgs...)
}
// Empty asserts that the specified object is empty. I.e. nil, "", false, 0 or either
// a slice or a channel with len == 0.
//
// a.Empty(obj)
//
// Returns whether the assertion was successful (true) or not (false).
func (a *Assertions) Empty(object interface{}, msgAndArgs ...interface{}) bool {
return Empty(a.t, object, msgAndArgs...)
}
// Equal asserts that two objects are equal.
//
// a.Equal(123, 123, "123 and 123 should be equal")
//
// Returns whether the assertion was successful (true) or not (false).
//
// Pointer variable equality is determined based on the equality of the
// referenced values (as opposed to the memory addresses).
func (a *Assertions) Equal(expected interface{}, actual interface{}, msgAndArgs ...interface{}) bool {
return Equal(a.t, expected, actual, msgAndArgs...)
}
// EqualError asserts that a function returned an error (i.e. not `nil`)
// and that it is equal to the provided error.
//
// actualObj, err := SomeFunction()
// a.EqualError(err, expectedErrorString, "An error was expected")
//
// Returns whether the assertion was successful (true) or not (false).
func (a *Assertions) EqualError(theError error, errString string, msgAndArgs ...interface{}) bool {
return EqualError(a.t, theError, errString, msgAndArgs...)
}
// EqualValues asserts that two objects are equal or convertable to the same types
// and equal.
//
// a.EqualValues(uint32(123), int32(123), "123 and 123 should be equal")
//
// Returns whether the assertion was successful (true) or not (false).
func (a *Assertions) EqualValues(expected interface{}, actual interface{}, msgAndArgs ...interface{}) bool {
return EqualValues(a.t, expected, actual, msgAndArgs...)
}
// Error asserts that a function returned an error (i.e. not `nil`).
//
// actualObj, err := SomeFunction()
// if a.Error(err, "An error was expected") {
// assert.Equal(t, err, expectedError)
// }
//
// Returns whether the assertion was successful (true) or not (false).
func (a *Assertions) Error(err error, msgAndArgs ...interface{}) bool {
return Error(a.t, err, msgAndArgs...)
}
// Exactly asserts that two objects are equal is value and type.
//
// a.Exactly(int32(123), int64(123), "123 and 123 should NOT be equal")
//
// Returns whether the assertion was successful (true) or not (false).
func (a *Assertions) Exactly(expected interface{}, actual interface{}, msgAndArgs ...interface{}) bool {
return Exactly(a.t, expected, actual, msgAndArgs...)
}
// Fail reports a failure through
func (a *Assertions) Fail(failureMessage string, msgAndArgs ...interface{}) bool {
return Fail(a.t, failureMessage, msgAndArgs...)
}
// FailNow fails test
func (a *Assertions) FailNow(failureMessage string, msgAndArgs ...interface{}) bool {
return FailNow(a.t, failureMessage, msgAndArgs...)
}
// False asserts that the specified value is false.
//
// a.False(myBool, "myBool should be false")
//
// Returns whether the assertion was successful (true) or not (false).
func (a *Assertions) False(value bool, msgAndArgs ...interface{}) bool {
return False(a.t, value, msgAndArgs...)
}
// HTTPBodyContains asserts that a specified handler returns a
// body that contains a string.
//
// a.HTTPBodyContains(myHandler, "www.google.com", nil, "I'm Feeling Lucky")
//
// Returns whether the assertion was successful (true) or not (false).
func (a *Assertions) HTTPBodyContains(handler http.HandlerFunc, method string, url string, values url.Values, str interface{}) bool {
return HTTPBodyContains(a.t, handler, method, url, values, str)
}
// HTTPBodyNotContains asserts that a specified handler returns a
// body that does not contain a string.
//
// a.HTTPBodyNotContains(myHandler, "www.google.com", nil, "I'm Feeling Lucky")
//
// Returns whether the assertion was successful (true) or not (false).
func (a *Assertions) HTTPBodyNotContains(handler http.HandlerFunc, method string, url string, values url.Values, str interface{}) bool {
return HTTPBodyNotContains(a.t, handler, method, url, values, str)
}
// HTTPError asserts that a specified handler returns an error status code.
//
// a.HTTPError(myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}}
//
// Returns whether the assertion was successful (true) or not (false).
func (a *Assertions) HTTPError(handler http.HandlerFunc, method string, url string, values url.Values) bool {
return HTTPError(a.t, handler, method, url, values)
}
// HTTPRedirect asserts that a specified handler returns a redirect status code.
//
// a.HTTPRedirect(myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}}
//
// Returns whether the assertion was successful (true) or not (false).
func (a *Assertions) HTTPRedirect(handler http.HandlerFunc, method string, url string, values url.Values) bool {
return HTTPRedirect(a.t, handler, method, url, values)
}
// HTTPSuccess asserts that a specified handler returns a success status code.
//
// a.HTTPSuccess(myHandler, "POST", "http://www.google.com", nil)
//
// Returns whether the assertion was successful (true) or not (false).
func (a *Assertions) HTTPSuccess(handler http.HandlerFunc, method string, url string, values url.Values) bool {
return HTTPSuccess(a.t, handler, method, url, values)
}
// Implements asserts that an object is implemented by the specified interface.
//
// a.Implements((*MyInterface)(nil), new(MyObject), "MyObject")
func (a *Assertions) Implements(interfaceObject interface{}, object interface{}, msgAndArgs ...interface{}) bool {
return Implements(a.t, interfaceObject, object, msgAndArgs...)
}
// InDelta asserts that the two numerals are within delta of each other.
//
// a.InDelta(math.Pi, (22 / 7.0), 0.01)
//
// Returns whether the assertion was successful (true) or not (false).
func (a *Assertions) InDelta(expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) bool {
return InDelta(a.t, expected, actual, delta, msgAndArgs...)
}
// InDeltaSlice is the same as InDelta, except it compares two slices.
func (a *Assertions) InDeltaSlice(expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) bool {
return InDeltaSlice(a.t, expected, actual, delta, msgAndArgs...)
}
// InEpsilon asserts that expected and actual have a relative error less than epsilon
//
// Returns whether the assertion was successful (true) or not (false).
func (a *Assertions) InEpsilon(expected interface{}, actual interface{}, epsilon float64, msgAndArgs ...interface{}) bool {
return InEpsilon(a.t, expected, actual, epsilon, msgAndArgs...)
}
// InEpsilonSlice is the same as InEpsilon, except it compares each value from two slices.
func (a *Assertions) InEpsilonSlice(expected interface{}, actual interface{}, epsilon float64, msgAndArgs ...interface{}) bool {
return InEpsilonSlice(a.t, expected, actual, epsilon, msgAndArgs...)
}
// IsType asserts that the specified objects are of the same type.
func (a *Assertions) IsType(expectedType interface{}, object interface{}, msgAndArgs ...interface{}) bool {
return IsType(a.t, expectedType, object, msgAndArgs...)
}
// JSONEq asserts that two JSON strings are equivalent.
//
// a.JSONEq(`{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`)
//
// Returns whether the assertion was successful (true) or not (false).
func (a *Assertions) JSONEq(expected string, actual string, msgAndArgs ...interface{}) bool {
return JSONEq(a.t, expected, actual, msgAndArgs...)
}
// Len asserts that the specified object has specific length.
// Len also fails if the object has a type that len() not accept.
//
// a.Len(mySlice, 3, "The size of slice is not 3")
//
// Returns whether the assertion was successful (true) or not (false).
func (a *Assertions) Len(object interface{}, length int, msgAndArgs ...interface{}) bool {
return Len(a.t, object, length, msgAndArgs...)
}
// Nil asserts that the specified object is nil.
//
// a.Nil(err, "err should be nothing")
//
// Returns whether the assertion was successful (true) or not (false).
func (a *Assertions) Nil(object interface{}, msgAndArgs ...interface{}) bool {
return Nil(a.t, object, msgAndArgs...)
}
// NoError asserts that a function returned no error (i.e. `nil`).
//
// actualObj, err := SomeFunction()
// if a.NoError(err) {
// assert.Equal(t, actualObj, expectedObj)
// }
//
// Returns whether the assertion was successful (true) or not (false).
func (a *Assertions) NoError(err error, msgAndArgs ...interface{}) bool {
return NoError(a.t, err, msgAndArgs...)
}
// NotContains asserts that the specified string, list(array, slice...) or map does NOT contain the
// specified substring or element.
//
// a.NotContains("Hello World", "Earth", "But 'Hello World' does NOT contain 'Earth'")
// a.NotContains(["Hello", "World"], "Earth", "But ['Hello', 'World'] does NOT contain 'Earth'")
// a.NotContains({"Hello": "World"}, "Earth", "But {'Hello': 'World'} does NOT contain 'Earth'")
//
// Returns whether the assertion was successful (true) or not (false).
func (a *Assertions) NotContains(s interface{}, contains interface{}, msgAndArgs ...interface{}) bool {
return NotContains(a.t, s, contains, msgAndArgs...)
}
// NotEmpty asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either
// a slice or a channel with len == 0.
//
// if a.NotEmpty(obj) {
// assert.Equal(t, "two", obj[1])
// }
//
// Returns whether the assertion was successful (true) or not (false).
func (a *Assertions) NotEmpty(object interface{}, msgAndArgs ...interface{}) bool {
return NotEmpty(a.t, object, msgAndArgs...)
}
// NotEqual asserts that the specified values are NOT equal.
//
// a.NotEqual(obj1, obj2, "two objects shouldn't be equal")
//
// Returns whether the assertion was successful (true) or not (false).
//
// Pointer variable equality is determined based on the equality of the
// referenced values (as opposed to the memory addresses).
func (a *Assertions) NotEqual(expected interface{}, actual interface{}, msgAndArgs ...interface{}) bool {
return NotEqual(a.t, expected, actual, msgAndArgs...)
}
// NotNil asserts that the specified object is not nil.
//
// a.NotNil(err, "err should be something")
//
// Returns whether the assertion was successful (true) or not (false).
func (a *Assertions) NotNil(object interface{}, msgAndArgs ...interface{}) bool {
return NotNil(a.t, object, msgAndArgs...)
}
// NotPanics asserts that the code inside the specified PanicTestFunc does NOT panic.
//
// a.NotPanics(func(){
// RemainCalm()
// }, "Calling RemainCalm() should NOT panic")
//
// Returns whether the assertion was successful (true) or not (false).
func (a *Assertions) NotPanics(f PanicTestFunc, msgAndArgs ...interface{}) bool {
return NotPanics(a.t, f, msgAndArgs...)
}
// NotRegexp asserts that a specified regexp does not match a string.
//
// a.NotRegexp(regexp.MustCompile("starts"), "it's starting")
// a.NotRegexp("^start", "it's not starting")
//
// Returns whether the assertion was successful (true) or not (false).
func (a *Assertions) NotRegexp(rx interface{}, str interface{}, msgAndArgs ...interface{}) bool {
return NotRegexp(a.t, rx, str, msgAndArgs...)
}
// NotZero asserts that i is not the zero value for its type and returns the truth.
func (a *Assertions) NotZero(i interface{}, msgAndArgs ...interface{}) bool {
return NotZero(a.t, i, msgAndArgs...)
}
// Panics asserts that the code inside the specified PanicTestFunc panics.
//
// a.Panics(func(){
// GoCrazy()
// }, "Calling GoCrazy() should panic")
//
// Returns whether the assertion was successful (true) or not (false).
func (a *Assertions) Panics(f PanicTestFunc, msgAndArgs ...interface{}) bool {
return Panics(a.t, f, msgAndArgs...)
}
// Regexp asserts that a specified regexp matches a string.
//
// a.Regexp(regexp.MustCompile("start"), "it's starting")
// a.Regexp("start...$", "it's not starting")
//
// Returns whether the assertion was successful (true) or not (false).
func (a *Assertions) Regexp(rx interface{}, str interface{}, msgAndArgs ...interface{}) bool {
return Regexp(a.t, rx, str, msgAndArgs...)
}
// True asserts that the specified value is true.
//
// a.True(myBool, "myBool should be true")
//
// Returns whether the assertion was successful (true) or not (false).
func (a *Assertions) True(value bool, msgAndArgs ...interface{}) bool {
return True(a.t, value, msgAndArgs...)
}
// WithinDuration asserts that the two times are within duration delta of each other.
//
// a.WithinDuration(time.Now(), time.Now(), 10*time.Second, "The difference should not be more than 10s")
//
// Returns whether the assertion was successful (true) or not (false).
func (a *Assertions) WithinDuration(expected time.Time, actual time.Time, delta time.Duration, msgAndArgs ...interface{}) bool {
return WithinDuration(a.t, expected, actual, delta, msgAndArgs...)
}
// Zero asserts that i is the zero value for its type and returns the truth.
func (a *Assertions) Zero(i interface{}, msgAndArgs ...interface{}) bool {
return Zero(a.t, i, msgAndArgs...)
}

View File

@ -0,0 +1,4 @@
{{.CommentWithoutT "a"}}
func (a *Assertions) {{.DocInfo.Name}}({{.Params}}) bool {
return {{.DocInfo.Name}}(a.t, {{.ForwardedParams}})
}

1069
vendor/github.com/stretchr/testify/assert/assertions.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

45
vendor/github.com/stretchr/testify/assert/doc.go generated vendored Normal file
View File

@ -0,0 +1,45 @@
// Package assert provides a set of comprehensive testing tools for use with the normal Go testing system.
//
// Example Usage
//
// The following is a complete example using assert in a standard test function:
// import (
// "testing"
// "github.com/stretchr/testify/assert"
// )
//
// func TestSomething(t *testing.T) {
//
// var a string = "Hello"
// var b string = "Hello"
//
// assert.Equal(t, a, b, "The two words should be the same.")
//
// }
//
// if you assert many times, use the format below:
//
// import (
// "testing"
// "github.com/stretchr/testify/assert"
// )
//
// func TestSomething(t *testing.T) {
// assert := assert.New(t)
//
// var a string = "Hello"
// var b string = "Hello"
//
// assert.Equal(a, b, "The two words should be the same.")
// }
//
// Assertions
//
// Assertions allow you to easily write test code, and are global funcs in the `assert` package.
// All assertion functions take, as the first argument, the `*testing.T` object provided by the
// testing framework. This allows the assertion funcs to write the failings and other details to
// the correct place.
//
// Every assertion function also takes an optional string message as the final argument,
// allowing custom error messages to be appended to the message the assertion method outputs.
package assert

10
vendor/github.com/stretchr/testify/assert/errors.go generated vendored Normal file
View File

@ -0,0 +1,10 @@
package assert
import (
"errors"
)
// AnError is an error instance useful for testing. If the code does not care
// about error specifics, and only needs to return the error for example, this
// error should be used to make the test code more readable.
var AnError = errors.New("assert.AnError general error for testing")

View File

@ -0,0 +1,16 @@
package assert
// Assertions provides assertion methods around the
// TestingT interface.
type Assertions struct {
t TestingT
}
// New makes a new Assertions object for the specified TestingT.
func New(t TestingT) *Assertions {
return &Assertions{
t: t,
}
}
//go:generate go run ../_codegen/main.go -output-package=assert -template=assertion_forward.go.tmpl

View File

@ -0,0 +1,106 @@
package assert
import (
"fmt"
"net/http"
"net/http/httptest"
"net/url"
"strings"
)
// httpCode is a helper that returns HTTP code of the response. It returns -1
// if building a new request fails.
func httpCode(handler http.HandlerFunc, method, url string, values url.Values) int {
w := httptest.NewRecorder()
req, err := http.NewRequest(method, url+"?"+values.Encode(), nil)
if err != nil {
return -1
}
handler(w, req)
return w.Code
}
// HTTPSuccess asserts that a specified handler returns a success status code.
//
// assert.HTTPSuccess(t, myHandler, "POST", "http://www.google.com", nil)
//
// Returns whether the assertion was successful (true) or not (false).
func HTTPSuccess(t TestingT, handler http.HandlerFunc, method, url string, values url.Values) bool {
code := httpCode(handler, method, url, values)
if code == -1 {
return false
}
return code >= http.StatusOK && code <= http.StatusPartialContent
}
// HTTPRedirect asserts that a specified handler returns a redirect status code.
//
// assert.HTTPRedirect(t, myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}}
//
// Returns whether the assertion was successful (true) or not (false).
func HTTPRedirect(t TestingT, handler http.HandlerFunc, method, url string, values url.Values) bool {
code := httpCode(handler, method, url, values)
if code == -1 {
return false
}
return code >= http.StatusMultipleChoices && code <= http.StatusTemporaryRedirect
}
// HTTPError asserts that a specified handler returns an error status code.
//
// assert.HTTPError(t, myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}}
//
// Returns whether the assertion was successful (true) or not (false).
func HTTPError(t TestingT, handler http.HandlerFunc, method, url string, values url.Values) bool {
code := httpCode(handler, method, url, values)
if code == -1 {
return false
}
return code >= http.StatusBadRequest
}
// HTTPBody is a helper that returns HTTP body of the response. It returns
// empty string if building a new request fails.
func HTTPBody(handler http.HandlerFunc, method, url string, values url.Values) string {
w := httptest.NewRecorder()
req, err := http.NewRequest(method, url+"?"+values.Encode(), nil)
if err != nil {
return ""
}
handler(w, req)
return w.Body.String()
}
// HTTPBodyContains asserts that a specified handler returns a
// body that contains a string.
//
// assert.HTTPBodyContains(t, myHandler, "www.google.com", nil, "I'm Feeling Lucky")
//
// Returns whether the assertion was successful (true) or not (false).
func HTTPBodyContains(t TestingT, handler http.HandlerFunc, method, url string, values url.Values, str interface{}) bool {
body := HTTPBody(handler, method, url, values)
contains := strings.Contains(body, fmt.Sprint(str))
if !contains {
Fail(t, fmt.Sprintf("Expected response body for \"%s\" to contain \"%s\" but found \"%s\"", url+"?"+values.Encode(), str, body))
}
return contains
}
// HTTPBodyNotContains asserts that a specified handler returns a
// body that does not contain a string.
//
// assert.HTTPBodyNotContains(t, myHandler, "www.google.com", nil, "I'm Feeling Lucky")
//
// Returns whether the assertion was successful (true) or not (false).
func HTTPBodyNotContains(t TestingT, handler http.HandlerFunc, method, url string, values url.Values, str interface{}) bool {
body := HTTPBody(handler, method, url, values)
contains := strings.Contains(body, fmt.Sprint(str))
if contains {
Fail(t, fmt.Sprintf("Expected response body for \"%s\" to NOT contain \"%s\" but found \"%s\"", url+"?"+values.Encode(), str, body))
}
return !contains
}