release v0.8.0

release v0.8.0
This commit is contained in:
fatedier 2016-08-12 14:00:54 +08:00 committed by GitHub
commit a0903d4121
174 changed files with 3220 additions and 28022 deletions

View File

@ -2,8 +2,9 @@ sudo: false
language: go
go:
- 1.4.2
- 1.5.3
- 1.5.4
- 1.6.3
- 1.7rc6
install:
- make

49
Godeps/Godeps.json generated
View File

@ -1,15 +1,15 @@
{
"ImportPath": "frp",
"GoVersion": "go1.4",
"GodepVersion": "v62",
"ImportPath": "github.com/fatedier/frp",
"GoVersion": "go1.6",
"GodepVersion": "v74",
"Packages": [
"./..."
"./src/..."
],
"Deps": [
{
"ImportPath": "github.com/astaxie/beego/logs",
"Comment": "v1.5.0-9-gfb7314f",
"Rev": "fb7314f8ac86b83ccd34386518d97cf2363e2ae5"
"Comment": "v1.6.1-5-g88c5dfa",
"Rev": "88c5dfa6ead42e624c2e7d9e04eab6cb2d07412a"
},
{
"ImportPath": "github.com/docopt/docopt-go",
@ -17,44 +17,13 @@
"Rev": "784ddc588536785e7299f7272f39101f7faccc3f"
},
{
"ImportPath": "github.com/gin-gonic/gin",
"Comment": "v1.0rc1-262-g5caaac4",
"Rev": "5caaac4c5c712a9e7a7de29e6c24ef46c753017f"
},
{
"ImportPath": "github.com/gin-gonic/gin/binding",
"Comment": "v1.0rc1-262-g5caaac4",
"Rev": "5caaac4c5c712a9e7a7de29e6c24ef46c753017f"
},
{
"ImportPath": "github.com/gin-gonic/gin/render",
"Comment": "v1.0rc1-262-g5caaac4",
"Rev": "5caaac4c5c712a9e7a7de29e6c24ef46c753017f"
},
{
"ImportPath": "github.com/golang/protobuf/proto",
"Rev": "7cc19b78d562895b13596ddce7aafb59dd789318"
},
{
"ImportPath": "github.com/manucorporat/sse",
"Rev": "ee05b128a739a0fb76c7ebd3ae4810c1de808d6d"
"ImportPath": "github.com/rakyll/statik/fs",
"Comment": "v0.1.0",
"Rev": "274df120e9065bdd08eb1120e0375e3dc1ae8465"
},
{
"ImportPath": "github.com/vaughan0/go-ini",
"Rev": "a98ad7ee00ec53921f08832bc06ecf7fd600e6a1"
},
{
"ImportPath": "golang.org/x/net/context",
"Rev": "3b993948b6f0e651ffb58ba135d8538a68b1cddf"
},
{
"ImportPath": "gopkg.in/go-playground/validator.v8",
"Comment": "v8.17.1",
"Rev": "014792cf3e266caff1e916876be12282b33059e0"
},
{
"ImportPath": "gopkg.in/yaml.v2",
"Rev": "a83829b6f1293c91addabc89d0571c246397bbf4"
}
]
}

2
Godeps/_workspace/.gitignore generated vendored
View File

@ -1,2 +0,0 @@
/pkg
/bin

View File

@ -1,95 +0,0 @@
// Copyright 2014 beego Author. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package logs
import (
"encoding/json"
"log"
"os"
"runtime"
)
type Brush func(string) string
func NewBrush(color string) Brush {
pre := "\033["
reset := "\033[0m"
return func(text string) string {
return pre + color + "m" + text + reset
}
}
var colors = []Brush{
NewBrush("1;37"), // Emergency white
NewBrush("1;36"), // Alert cyan
NewBrush("1;35"), // Critical magenta
NewBrush("1;31"), // Error red
NewBrush("1;33"), // Warning yellow
NewBrush("1;32"), // Notice green
NewBrush("1;34"), // Informational blue
NewBrush("1;34"), // Debug blue
}
// ConsoleWriter implements LoggerInterface and writes messages to terminal.
type ConsoleWriter struct {
lg *log.Logger
Level int `json:"level"`
}
// create ConsoleWriter returning as LoggerInterface.
func NewConsole() LoggerInterface {
cw := &ConsoleWriter{
lg: log.New(os.Stdout, "", log.Ldate|log.Ltime),
Level: LevelDebug,
}
return cw
}
// init console logger.
// jsonconfig like '{"level":LevelTrace}'.
func (c *ConsoleWriter) Init(jsonconfig string) error {
if len(jsonconfig) == 0 {
return nil
}
return json.Unmarshal([]byte(jsonconfig), c)
}
// write message in console.
func (c *ConsoleWriter) WriteMsg(msg string, level int) error {
if level > c.Level {
return nil
}
if goos := runtime.GOOS; goos == "windows" {
c.lg.Println(msg)
return nil
}
c.lg.Println(colors[level](msg))
return nil
}
// implementing method. empty.
func (c *ConsoleWriter) Destroy() {
}
// implementing method. empty.
func (c *ConsoleWriter) Flush() {
}
func init() {
Register("console", NewConsole)
}

View File

@ -1,76 +0,0 @@
package es
import (
"encoding/json"
"errors"
"fmt"
"net"
"net/url"
"time"
"github.com/astaxie/beego/logs"
"github.com/belogik/goes"
)
func NewES() logs.LoggerInterface {
cw := &esLogger{
Level: logs.LevelDebug,
}
return cw
}
type esLogger struct {
*goes.Connection
DSN string `json:"dsn"`
Level int `json:"level"`
}
// {"dsn":"http://localhost:9200/","level":1}
func (el *esLogger) Init(jsonconfig string) error {
err := json.Unmarshal([]byte(jsonconfig), el)
if err != nil {
return err
}
if el.DSN == "" {
return errors.New("empty dsn")
} else if u, err := url.Parse(el.DSN); err != nil {
return err
} else if u.Path == "" {
return errors.New("missing prefix")
} else if host, port, err := net.SplitHostPort(u.Host); err != nil {
return err
} else {
conn := goes.NewConnection(host, port)
el.Connection = conn
}
return nil
}
func (el *esLogger) WriteMsg(msg string, level int) error {
if level > el.Level {
return nil
}
t := time.Now()
vals := make(map[string]interface{})
vals["@timestamp"] = t.Format(time.RFC3339)
vals["@msg"] = msg
d := goes.Document{
Index: fmt.Sprintf("%04d.%02d.%02d", t.Year(), t.Month(), t.Day()),
Type: "logs",
Fields: vals,
}
_, err := el.Index(d, nil)
return err
}
func (el *esLogger) Destroy() {
}
func (el *esLogger) Flush() {
}
func init() {
logs.Register("es", NewES)
}

View File

@ -1,283 +0,0 @@
// Copyright 2014 beego Author. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package logs
import (
"bytes"
"encoding/json"
"errors"
"fmt"
"io"
"log"
"os"
"path/filepath"
"strings"
"sync"
"time"
)
// FileLogWriter implements LoggerInterface.
// It writes messages by lines limit, file size limit, or time frequency.
type FileLogWriter struct {
*log.Logger
mw *MuxWriter
// The opened file
Filename string `json:"filename"`
Maxlines int `json:"maxlines"`
maxlines_curlines int
// Rotate at size
Maxsize int `json:"maxsize"`
maxsize_cursize int
// Rotate daily
Daily bool `json:"daily"`
Maxdays int64 `json:"maxdays"`
daily_opendate int
Rotate bool `json:"rotate"`
startLock sync.Mutex // Only one log can write to the file
Level int `json:"level"`
}
// an *os.File writer with locker.
type MuxWriter struct {
sync.Mutex
fd *os.File
}
// write to os.File.
func (l *MuxWriter) Write(b []byte) (int, error) {
l.Lock()
defer l.Unlock()
return l.fd.Write(b)
}
// set os.File in writer.
func (l *MuxWriter) SetFd(fd *os.File) {
if l.fd != nil {
l.fd.Close()
}
l.fd = fd
}
// create a FileLogWriter returning as LoggerInterface.
func NewFileWriter() LoggerInterface {
w := &FileLogWriter{
Filename: "",
Maxlines: 1000000,
Maxsize: 1 << 28, //256 MB
Daily: true,
Maxdays: 7,
Rotate: true,
Level: LevelTrace,
}
// use MuxWriter instead direct use os.File for lock write when rotate
w.mw = new(MuxWriter)
// set MuxWriter as Logger's io.Writer
w.Logger = log.New(w.mw, "", log.Ldate|log.Ltime)
return w
}
// Init file logger with json config.
// jsonconfig like:
// {
// "filename":"logs/beego.log",
// "maxlines":10000,
// "maxsize":1<<30,
// "daily":true,
// "maxdays":15,
// "rotate":true
// }
func (w *FileLogWriter) Init(jsonconfig string) error {
err := json.Unmarshal([]byte(jsonconfig), w)
if err != nil {
return err
}
if len(w.Filename) == 0 {
return errors.New("jsonconfig must have filename")
}
err = w.startLogger()
return err
}
// start file logger. create log file and set to locker-inside file writer.
func (w *FileLogWriter) startLogger() error {
fd, err := w.createLogFile()
if err != nil {
return err
}
w.mw.SetFd(fd)
return w.initFd()
}
func (w *FileLogWriter) docheck(size int) {
w.startLock.Lock()
defer w.startLock.Unlock()
if w.Rotate && ((w.Maxlines > 0 && w.maxlines_curlines >= w.Maxlines) ||
(w.Maxsize > 0 && w.maxsize_cursize >= w.Maxsize) ||
(w.Daily && time.Now().Day() != w.daily_opendate)) {
if err := w.DoRotate(); err != nil {
fmt.Fprintf(os.Stderr, "FileLogWriter(%q): %s\n", w.Filename, err)
return
}
}
w.maxlines_curlines++
w.maxsize_cursize += size
}
// write logger message into file.
func (w *FileLogWriter) WriteMsg(msg string, level int) error {
if level > w.Level {
return nil
}
n := 24 + len(msg) // 24 stand for the length "2013/06/23 21:00:22 [T] "
w.docheck(n)
w.Logger.Println(msg)
return nil
}
func (w *FileLogWriter) createLogFile() (*os.File, error) {
// Open the log file
fd, err := os.OpenFile(w.Filename, os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0660)
return fd, err
}
func (w *FileLogWriter) initFd() error {
fd := w.mw.fd
finfo, err := fd.Stat()
if err != nil {
return fmt.Errorf("get stat err: %s\n", err)
}
w.maxsize_cursize = int(finfo.Size())
w.daily_opendate = time.Now().Day()
w.maxlines_curlines = 0
if finfo.Size() > 0 {
count, err := w.lines()
if err != nil {
return err
}
w.maxlines_curlines = count
}
return nil
}
func (w *FileLogWriter) lines() (int, error) {
fd, err := os.Open(w.Filename)
if err != nil {
return 0, err
}
defer fd.Close()
buf := make([]byte, 32768) // 32k
count := 0
lineSep := []byte{'\n'}
for {
c, err := fd.Read(buf)
if err != nil && err != io.EOF {
return count, err
}
count += bytes.Count(buf[:c], lineSep)
if err == io.EOF {
break
}
}
return count, nil
}
// DoRotate means it need to write file in new file.
// new file name like xx.log.2013-01-01.2
func (w *FileLogWriter) DoRotate() error {
_, err := os.Lstat(w.Filename)
if err == nil { // file exists
// Find the next available number
num := 1
fname := ""
for ; err == nil && num <= 999; num++ {
fname = w.Filename + fmt.Sprintf(".%s.%03d", time.Now().Format("2006-01-02"), num)
_, err = os.Lstat(fname)
}
// return error if the last file checked still existed
if err == nil {
return fmt.Errorf("Rotate: Cannot find free log number to rename %s\n", w.Filename)
}
// block Logger's io.Writer
w.mw.Lock()
defer w.mw.Unlock()
fd := w.mw.fd
fd.Close()
// close fd before rename
// Rename the file to its newfound home
err = os.Rename(w.Filename, fname)
if err != nil {
return fmt.Errorf("Rotate: %s\n", err)
}
// re-start logger
err = w.startLogger()
if err != nil {
return fmt.Errorf("Rotate StartLogger: %s\n", err)
}
go w.deleteOldLog()
}
return nil
}
func (w *FileLogWriter) deleteOldLog() {
dir := filepath.Dir(w.Filename)
filepath.Walk(dir, func(path string, info os.FileInfo, err error) (returnErr error) {
defer func() {
if r := recover(); r != nil {
returnErr = fmt.Errorf("Unable to delete old log '%s', error: %+v", path, r)
fmt.Println(returnErr)
}
}()
if !info.IsDir() && info.ModTime().Unix() < (time.Now().Unix()-60*60*24*w.Maxdays) {
if strings.HasPrefix(filepath.Base(path), filepath.Base(w.Filename)) {
os.Remove(path)
}
}
return
})
}
// destroy file logger, close file writer.
func (w *FileLogWriter) Destroy() {
w.mw.fd.Close()
}
// flush file logger.
// there are no buffering messages in file logger in memory.
// flush file means sync file from disk.
func (w *FileLogWriter) Flush() {
w.mw.fd.Sync()
}
func init() {
Register("file", NewFileWriter)
}

View File

@ -1,19 +0,0 @@
Copyright (c) 2011-2014 Dmitry Chestnykh <dmitry@codingrobots.com>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.

View File

@ -1,4 +0,0 @@
Godeps/*
!Godeps/Godeps.json
coverage.out
count.out

View File

@ -1,23 +0,0 @@
language: go
sudo: false
go:
- 1.4
- 1.5
- 1.6
- tip
script:
- go get golang.org/x/tools/cmd/cover
- go get github.com/mattn/goveralls
- go test -v -covermode=count -coverprofile=coverage.out
after_success:
- goveralls -coverprofile=coverage.out -service=travis-ci -repotoken yFj7FrCeddvBzUaaCyG33jCLfWXeb93eA
notifications:
webhooks:
urls:
- https://webhooks.gitter.im/e/acc2c57482e94b44f557
on_success: change # options: [always|never|change] default: always
on_failure: always # options: [always|never|change] default: always
on_start: false # default: false

View File

@ -1,229 +0,0 @@
List of all the awesome people working to make Gin the best Web Framework in Go.
##gin 0.x series authors
**Maintainer:** Manu Martinez-Almeida (@manucorporat), Javier Provecho (@javierprovecho)
People and companies, who have contributed, in alphabetical order.
**@858806258 (杰哥)**
- Fix typo in example
**@achedeuzot (Klemen Sever)**
- Fix newline debug printing
**@adammck (Adam Mckaig)**
- Add MIT license
**@AlexanderChen1989 (Alexander)**
- Typos in README
**@alexanderdidenko (Aleksandr Didenko)**
- Add support multipart/form-data
**@alexandernyquist (Alexander Nyquist)**
- Using template.Must to fix multiple return issue
- ★ Added support for OPTIONS verb
- ★ Setting response headers before calling WriteHeader
- Improved documentation for model binding
- ★ Added Content.Redirect()
- ★ Added tons of Unit tests
**@austinheap (Austin Heap)**
- Added travis CI integration
**@andredublin (Andre Dublin)**
- Fix typo in comment
**@bredov (Ludwig Valda Vasquez)**
- Fix html templating in debug mode
**@bluele (Jun Kimura)**
- Fixes code examples in README
**@chad-russell**
- ★ Support for serializing gin.H into XML
**@dickeyxxx (Jeff Dickey)**
- Typos in README
- Add example about serving static files
**@donileo (Adonis)**
- Add NoMethod handler
**@dutchcoders (DutchCoders)**
- ★ Fix security bug that allows client to spoof ip
- Fix typo. r.HTMLTemplates -> SetHTMLTemplate
**@el3ctro- (Joshua Loper)**
- Fix typo in example
**@ethankan (Ethan Kan)**
- Unsigned integers in binding
**(Evgeny Persienko)**
- Validate sub structures
**@frankbille (Frank Bille)**
- Add support for HTTP Realm Auth
**@fmd (Fareed Dudhia)**
- Fix typo. SetHTTPTemplate -> SetHTMLTemplate
**@ironiridis (Christopher Harrington)**
- Remove old reference
**@jammie-stackhouse (Jamie Stackhouse)**
- Add more shortcuts for router methods
**@jasonrhansen**
- Fix spelling and grammar errors in documentation
**@JasonSoft (Jason Lee)**
- Fix typo in comment
**@joiggama (Ignacio Galindo)**
- Add utf-8 charset header on renders
**@julienschmidt (Julien Schmidt)**
- gofmt the code examples
**@kelcecil (Kel Cecil)**
- Fix readme typo
**@kyledinh (Kyle Dinh)**
- Adds RunTLS()
**@LinusU (Linus Unnebäck)**
- Small fixes in README
**@loongmxbt (Saint Asky)**
- Fix typo in example
**@lucas-clemente (Lucas Clemente)**
- ★ work around path.Join removing trailing slashes from routes
**@mattn (Yasuhiro Matsumoto)**
- Improve color logger
**@mdigger (Dmitry Sedykh)**
- Fixes Form binding when content-type is x-www-form-urlencoded
- No repeat call c.Writer.Status() in gin.Logger
- Fixes Content-Type for json render
**@mirzac (Mirza Ceric)**
- Fix debug printing
**@mopemope (Yutaka Matsubara)**
- ★ Adds Godep support (Dependencies Manager)
- Fix variadic parameter in the flexible render API
- Fix Corrupted plain render
- Add Pluggable View Renderer Example
**@msemenistyi (Mykyta Semenistyi)**
- update Readme.md. Add code to String method
**@msoedov (Sasha Myasoedov)**
- ★ Adds tons of unit tests.
**@ngerakines (Nick Gerakines)**
- ★ Improves API, c.GET() doesn't panic
- Adds MustGet() method
**@r8k (Rajiv Kilaparti)**
- Fix Port usage in README.
**@rayrod2030 (Ray Rodriguez)**
- Fix typo in example
**@rns**
- Fix typo in example
**@RobAWilkinson (Robert Wilkinson)**
- Add example of forms and params
**@rogierlommers (Rogier Lommers)**
- Add updated static serve example
**@se77en (Damon Zhao)**
- Improve color logging
**@silasb (Silas Baronda)**
- Fixing quotes in README
**@SkuliOskarsson (Skuli Oskarsson)**
- Fixes some texts in README II
**@slimmy (Jimmy Pettersson)**
- Added messages for required bindings
**@smira (Andrey Smirnov)**
- Add support for ignored/unexported fields in binding
**@superalsrk (SRK.Lyu)**
- Update httprouter godeps
**@tebeka (Miki Tebeka)**
- Use net/http constants instead of numeric values
**@techjanitor**
- Update context.go reserved IPs
**@yosssi (Keiji Yoshida)**
- Fix link in README
**@yuyabee**
- Fixed README

View File

@ -1,298 +0,0 @@
**Machine:** intel i7 ivy bridge quad-core. 8GB RAM.
**Date:** June 4th, 2015
[https://github.com/gin-gonic/go-http-routing-benchmark](https://github.com/gin-gonic/go-http-routing-benchmark)
```
BenchmarkAce_Param 5000000 372 ns/op 32 B/op 1 allocs/op
BenchmarkBear_Param 1000000 1165 ns/op 424 B/op 5 allocs/op
BenchmarkBeego_Param 1000000 2440 ns/op 720 B/op 10 allocs/op
BenchmarkBone_Param 1000000 1067 ns/op 384 B/op 3 allocs/op
BenchmarkDenco_Param 5000000 240 ns/op 32 B/op 1 allocs/op
BenchmarkEcho_Param 10000000 130 ns/op 0 B/op 0 allocs/op
BenchmarkGin_Param 10000000 133 ns/op 0 B/op 0 allocs/op
BenchmarkGocraftWeb_Param 1000000 1826 ns/op 656 B/op 9 allocs/op
BenchmarkGoji_Param 2000000 957 ns/op 336 B/op 2 allocs/op
BenchmarkGoJsonRest_Param 1000000 2021 ns/op 657 B/op 14 allocs/op
BenchmarkGoRestful_Param 200000 8825 ns/op 2496 B/op 31 allocs/op
BenchmarkGorillaMux_Param 500000 3340 ns/op 784 B/op 9 allocs/op
BenchmarkHttpRouter_Param 10000000 152 ns/op 32 B/op 1 allocs/op
BenchmarkHttpTreeMux_Param 2000000 717 ns/op 336 B/op 2 allocs/op
BenchmarkKocha_Param 3000000 423 ns/op 56 B/op 3 allocs/op
BenchmarkMacaron_Param 1000000 3410 ns/op 1104 B/op 11 allocs/op
BenchmarkMartini_Param 200000 7101 ns/op 1152 B/op 12 allocs/op
BenchmarkPat_Param 1000000 2040 ns/op 656 B/op 14 allocs/op
BenchmarkPossum_Param 1000000 2048 ns/op 624 B/op 7 allocs/op
BenchmarkR2router_Param 1000000 1144 ns/op 432 B/op 6 allocs/op
BenchmarkRevel_Param 200000 6725 ns/op 1672 B/op 28 allocs/op
BenchmarkRivet_Param 1000000 1121 ns/op 464 B/op 5 allocs/op
BenchmarkTango_Param 1000000 1479 ns/op 256 B/op 10 allocs/op
BenchmarkTigerTonic_Param 1000000 3393 ns/op 992 B/op 19 allocs/op
BenchmarkTraffic_Param 300000 5525 ns/op 1984 B/op 23 allocs/op
BenchmarkVulcan_Param 2000000 924 ns/op 98 B/op 3 allocs/op
BenchmarkZeus_Param 1000000 1084 ns/op 368 B/op 3 allocs/op
BenchmarkAce_Param5 3000000 614 ns/op 160 B/op 1 allocs/op
BenchmarkBear_Param5 1000000 1617 ns/op 469 B/op 5 allocs/op
BenchmarkBeego_Param5 1000000 3373 ns/op 992 B/op 13 allocs/op
BenchmarkBone_Param5 1000000 1478 ns/op 432 B/op 3 allocs/op
BenchmarkDenco_Param5 3000000 570 ns/op 160 B/op 1 allocs/op
BenchmarkEcho_Param5 5000000 256 ns/op 0 B/op 0 allocs/op
BenchmarkGin_Param5 10000000 222 ns/op 0 B/op 0 allocs/op
BenchmarkGocraftWeb_Param5 1000000 2789 ns/op 928 B/op 12 allocs/op
BenchmarkGoji_Param5 1000000 1287 ns/op 336 B/op 2 allocs/op
BenchmarkGoJsonRest_Param5 1000000 3670 ns/op 1105 B/op 17 allocs/op
BenchmarkGoRestful_Param5 200000 10756 ns/op 2672 B/op 31 allocs/op
BenchmarkGorillaMux_Param5 300000 5543 ns/op 912 B/op 9 allocs/op
BenchmarkHttpRouter_Param5 5000000 403 ns/op 160 B/op 1 allocs/op
BenchmarkHttpTreeMux_Param5 1000000 1089 ns/op 336 B/op 2 allocs/op
BenchmarkKocha_Param5 1000000 1682 ns/op 440 B/op 10 allocs/op
BenchmarkMacaron_Param5 300000 4596 ns/op 1376 B/op 14 allocs/op
BenchmarkMartini_Param5 100000 15703 ns/op 1280 B/op 12 allocs/op
BenchmarkPat_Param5 300000 5320 ns/op 1008 B/op 42 allocs/op
BenchmarkPossum_Param5 1000000 2155 ns/op 624 B/op 7 allocs/op
BenchmarkR2router_Param5 1000000 1559 ns/op 432 B/op 6 allocs/op
BenchmarkRevel_Param5 200000 8184 ns/op 2024 B/op 35 allocs/op
BenchmarkRivet_Param5 1000000 1914 ns/op 528 B/op 9 allocs/op
BenchmarkTango_Param5 1000000 3280 ns/op 944 B/op 18 allocs/op
BenchmarkTigerTonic_Param5 200000 11638 ns/op 2519 B/op 53 allocs/op
BenchmarkTraffic_Param5 200000 8941 ns/op 2280 B/op 31 allocs/op
BenchmarkVulcan_Param5 1000000 1279 ns/op 98 B/op 3 allocs/op
BenchmarkZeus_Param5 1000000 1574 ns/op 416 B/op 3 allocs/op
BenchmarkAce_Param20 1000000 1528 ns/op 640 B/op 1 allocs/op
BenchmarkBear_Param20 300000 4906 ns/op 1633 B/op 5 allocs/op
BenchmarkBeego_Param20 200000 10529 ns/op 3868 B/op 17 allocs/op
BenchmarkBone_Param20 300000 7362 ns/op 2539 B/op 5 allocs/op
BenchmarkDenco_Param20 1000000 1884 ns/op 640 B/op 1 allocs/op
BenchmarkEcho_Param20 2000000 689 ns/op 0 B/op 0 allocs/op
BenchmarkGin_Param20 3000000 545 ns/op 0 B/op 0 allocs/op
BenchmarkGocraftWeb_Param20 200000 9437 ns/op 3804 B/op 16 allocs/op
BenchmarkGoji_Param20 500000 3987 ns/op 1246 B/op 2 allocs/op
BenchmarkGoJsonRest_Param20 100000 12799 ns/op 4492 B/op 21 allocs/op
BenchmarkGoRestful_Param20 100000 19451 ns/op 5244 B/op 33 allocs/op
BenchmarkGorillaMux_Param20 100000 12456 ns/op 3275 B/op 11 allocs/op
BenchmarkHttpRouter_Param20 1000000 1333 ns/op 640 B/op 1 allocs/op
BenchmarkHttpTreeMux_Param20 300000 6490 ns/op 2187 B/op 4 allocs/op
BenchmarkKocha_Param20 300000 5335 ns/op 1808 B/op 27 allocs/op
BenchmarkMacaron_Param20 200000 11325 ns/op 4252 B/op 18 allocs/op
BenchmarkMartini_Param20 20000 64419 ns/op 3644 B/op 14 allocs/op
BenchmarkPat_Param20 50000 24672 ns/op 4888 B/op 151 allocs/op
BenchmarkPossum_Param20 1000000 2085 ns/op 624 B/op 7 allocs/op
BenchmarkR2router_Param20 300000 6809 ns/op 2283 B/op 8 allocs/op
BenchmarkRevel_Param20 100000 16600 ns/op 5551 B/op 54 allocs/op
BenchmarkRivet_Param20 200000 8428 ns/op 2620 B/op 26 allocs/op
BenchmarkTango_Param20 100000 16302 ns/op 8224 B/op 48 allocs/op
BenchmarkTigerTonic_Param20 30000 46828 ns/op 10538 B/op 178 allocs/op
BenchmarkTraffic_Param20 50000 28871 ns/op 7998 B/op 66 allocs/op
BenchmarkVulcan_Param20 1000000 2267 ns/op 98 B/op 3 allocs/op
BenchmarkZeus_Param20 300000 6828 ns/op 2507 B/op 5 allocs/op
BenchmarkAce_ParamWrite 3000000 502 ns/op 40 B/op 2 allocs/op
BenchmarkBear_ParamWrite 1000000 1303 ns/op 424 B/op 5 allocs/op
BenchmarkBeego_ParamWrite 1000000 2489 ns/op 728 B/op 11 allocs/op
BenchmarkBone_ParamWrite 1000000 1181 ns/op 384 B/op 3 allocs/op
BenchmarkDenco_ParamWrite 5000000 315 ns/op 32 B/op 1 allocs/op
BenchmarkEcho_ParamWrite 10000000 237 ns/op 8 B/op 1 allocs/op
BenchmarkGin_ParamWrite 5000000 336 ns/op 0 B/op 0 allocs/op
BenchmarkGocraftWeb_ParamWrite 1000000 2079 ns/op 664 B/op 10 allocs/op
BenchmarkGoji_ParamWrite 1000000 1092 ns/op 336 B/op 2 allocs/op
BenchmarkGoJsonRest_ParamWrite 1000000 3329 ns/op 1136 B/op 19 allocs/op
BenchmarkGoRestful_ParamWrite 200000 9273 ns/op 2504 B/op 32 allocs/op
BenchmarkGorillaMux_ParamWrite 500000 3919 ns/op 792 B/op 10 allocs/op
BenchmarkHttpRouter_ParamWrite 10000000 223 ns/op 32 B/op 1 allocs/op
BenchmarkHttpTreeMux_ParamWrite 2000000 788 ns/op 336 B/op 2 allocs/op
BenchmarkKocha_ParamWrite 3000000 549 ns/op 56 B/op 3 allocs/op
BenchmarkMacaron_ParamWrite 500000 4558 ns/op 1216 B/op 16 allocs/op
BenchmarkMartini_ParamWrite 200000 8850 ns/op 1256 B/op 16 allocs/op
BenchmarkPat_ParamWrite 500000 3679 ns/op 1088 B/op 19 allocs/op
BenchmarkPossum_ParamWrite 1000000 2114 ns/op 624 B/op 7 allocs/op
BenchmarkR2router_ParamWrite 1000000 1320 ns/op 432 B/op 6 allocs/op
BenchmarkRevel_ParamWrite 200000 8048 ns/op 2128 B/op 33 allocs/op
BenchmarkRivet_ParamWrite 1000000 1393 ns/op 472 B/op 6 allocs/op
BenchmarkTango_ParamWrite 2000000 819 ns/op 136 B/op 5 allocs/op
BenchmarkTigerTonic_ParamWrite 300000 5860 ns/op 1440 B/op 25 allocs/op
BenchmarkTraffic_ParamWrite 200000 7429 ns/op 2400 B/op 27 allocs/op
BenchmarkVulcan_ParamWrite 2000000 972 ns/op 98 B/op 3 allocs/op
BenchmarkZeus_ParamWrite 1000000 1226 ns/op 368 B/op 3 allocs/op
BenchmarkAce_GithubStatic 5000000 294 ns/op 0 B/op 0 allocs/op
BenchmarkBear_GithubStatic 3000000 575 ns/op 88 B/op 3 allocs/op
BenchmarkBeego_GithubStatic 1000000 1561 ns/op 368 B/op 7 allocs/op
BenchmarkBone_GithubStatic 200000 12301 ns/op 2880 B/op 60 allocs/op
BenchmarkDenco_GithubStatic 20000000 74.6 ns/op 0 B/op 0 allocs/op
BenchmarkEcho_GithubStatic 10000000 176 ns/op 0 B/op 0 allocs/op
BenchmarkGin_GithubStatic 10000000 159 ns/op 0 B/op 0 allocs/op
BenchmarkGocraftWeb_GithubStatic 1000000 1116 ns/op 304 B/op 6 allocs/op
BenchmarkGoji_GithubStatic 5000000 413 ns/op 0 B/op 0 allocs/op
BenchmarkGoRestful_GithubStatic 30000 55200 ns/op 3520 B/op 36 allocs/op
BenchmarkGoJsonRest_GithubStatic 1000000 1504 ns/op 337 B/op 12 allocs/op
BenchmarkGorillaMux_GithubStatic 100000 23620 ns/op 464 B/op 8 allocs/op
BenchmarkHttpRouter_GithubStatic 20000000 78.3 ns/op 0 B/op 0 allocs/op
BenchmarkHttpTreeMux_GithubStatic 20000000 84.9 ns/op 0 B/op 0 allocs/op
BenchmarkKocha_GithubStatic 20000000 111 ns/op 0 B/op 0 allocs/op
BenchmarkMacaron_GithubStatic 1000000 2686 ns/op 752 B/op 8 allocs/op
BenchmarkMartini_GithubStatic 100000 22244 ns/op 832 B/op 11 allocs/op
BenchmarkPat_GithubStatic 100000 13278 ns/op 3648 B/op 76 allocs/op
BenchmarkPossum_GithubStatic 1000000 1429 ns/op 480 B/op 4 allocs/op
BenchmarkR2router_GithubStatic 2000000 726 ns/op 144 B/op 5 allocs/op
BenchmarkRevel_GithubStatic 300000 6271 ns/op 1288 B/op 25 allocs/op
BenchmarkRivet_GithubStatic 3000000 474 ns/op 112 B/op 2 allocs/op
BenchmarkTango_GithubStatic 1000000 1842 ns/op 256 B/op 10 allocs/op
BenchmarkTigerTonic_GithubStatic 5000000 361 ns/op 48 B/op 1 allocs/op
BenchmarkTraffic_GithubStatic 30000 47197 ns/op 18920 B/op 149 allocs/op
BenchmarkVulcan_GithubStatic 1000000 1415 ns/op 98 B/op 3 allocs/op
BenchmarkZeus_GithubStatic 1000000 2522 ns/op 512 B/op 11 allocs/op
BenchmarkAce_GithubParam 3000000 578 ns/op 96 B/op 1 allocs/op
BenchmarkBear_GithubParam 1000000 1592 ns/op 464 B/op 5 allocs/op
BenchmarkBeego_GithubParam 1000000 2891 ns/op 784 B/op 11 allocs/op
BenchmarkBone_GithubParam 300000 6440 ns/op 1456 B/op 16 allocs/op
BenchmarkDenco_GithubParam 3000000 514 ns/op 128 B/op 1 allocs/op
BenchmarkEcho_GithubParam 5000000 292 ns/op 0 B/op 0 allocs/op
BenchmarkGin_GithubParam 10000000 242 ns/op 0 B/op 0 allocs/op
BenchmarkGocraftWeb_GithubParam 1000000 2343 ns/op 720 B/op 10 allocs/op
BenchmarkGoji_GithubParam 1000000 1566 ns/op 336 B/op 2 allocs/op
BenchmarkGoJsonRest_GithubParam 1000000 2828 ns/op 721 B/op 15 allocs/op
BenchmarkGoRestful_GithubParam 10000 177711 ns/op 2816 B/op 35 allocs/op
BenchmarkGorillaMux_GithubParam 100000 13591 ns/op 816 B/op 9 allocs/op
BenchmarkHttpRouter_GithubParam 5000000 352 ns/op 96 B/op 1 allocs/op
BenchmarkHttpTreeMux_GithubParam 2000000 973 ns/op 336 B/op 2 allocs/op
BenchmarkKocha_GithubParam 2000000 889 ns/op 128 B/op 5 allocs/op
BenchmarkMacaron_GithubParam 500000 4047 ns/op 1168 B/op 12 allocs/op
BenchmarkMartini_GithubParam 50000 28982 ns/op 1184 B/op 12 allocs/op
BenchmarkPat_GithubParam 200000 8747 ns/op 2480 B/op 56 allocs/op
BenchmarkPossum_GithubParam 1000000 2158 ns/op 624 B/op 7 allocs/op
BenchmarkR2router_GithubParam 1000000 1352 ns/op 432 B/op 6 allocs/op
BenchmarkRevel_GithubParam 200000 7673 ns/op 1784 B/op 30 allocs/op
BenchmarkRivet_GithubParam 1000000 1573 ns/op 480 B/op 6 allocs/op
BenchmarkTango_GithubParam 1000000 2418 ns/op 480 B/op 13 allocs/op
BenchmarkTigerTonic_GithubParam 300000 6048 ns/op 1440 B/op 28 allocs/op
BenchmarkTraffic_GithubParam 100000 20143 ns/op 6024 B/op 55 allocs/op
BenchmarkVulcan_GithubParam 1000000 2224 ns/op 98 B/op 3 allocs/op
BenchmarkZeus_GithubParam 500000 4156 ns/op 1312 B/op 12 allocs/op
BenchmarkAce_GithubAll 10000 109482 ns/op 13792 B/op 167 allocs/op
BenchmarkBear_GithubAll 10000 287490 ns/op 79952 B/op 943 allocs/op
BenchmarkBeego_GithubAll 3000 562184 ns/op 146272 B/op 2092 allocs/op
BenchmarkBone_GithubAll 500 2578716 ns/op 648016 B/op 8119 allocs/op
BenchmarkDenco_GithubAll 20000 94955 ns/op 20224 B/op 167 allocs/op
BenchmarkEcho_GithubAll 30000 58705 ns/op 0 B/op 0 allocs/op
BenchmarkGin_GithubAll 30000 50991 ns/op 0 B/op 0 allocs/op
BenchmarkGocraftWeb_GithubAll 5000 449648 ns/op 133280 B/op 1889 allocs/op
BenchmarkGoji_GithubAll 2000 689748 ns/op 56113 B/op 334 allocs/op
BenchmarkGoJsonRest_GithubAll 5000 537769 ns/op 135995 B/op 2940 allocs/op
BenchmarkGoRestful_GithubAll 100 18410628 ns/op 797236 B/op 7725 allocs/op
BenchmarkGorillaMux_GithubAll 200 8036360 ns/op 153137 B/op 1791 allocs/op
BenchmarkHttpRouter_GithubAll 20000 63506 ns/op 13792 B/op 167 allocs/op
BenchmarkHttpTreeMux_GithubAll 10000 165927 ns/op 56112 B/op 334 allocs/op
BenchmarkKocha_GithubAll 10000 171362 ns/op 23304 B/op 843 allocs/op
BenchmarkMacaron_GithubAll 2000 817008 ns/op 224960 B/op 2315 allocs/op
BenchmarkMartini_GithubAll 100 12609209 ns/op 237952 B/op 2686 allocs/op
BenchmarkPat_GithubAll 300 4830398 ns/op 1504101 B/op 32222 allocs/op
BenchmarkPossum_GithubAll 10000 301716 ns/op 97440 B/op 812 allocs/op
BenchmarkR2router_GithubAll 10000 270691 ns/op 77328 B/op 1182 allocs/op
BenchmarkRevel_GithubAll 1000 1491919 ns/op 345553 B/op 5918 allocs/op
BenchmarkRivet_GithubAll 10000 283860 ns/op 84272 B/op 1079 allocs/op
BenchmarkTango_GithubAll 5000 473821 ns/op 87078 B/op 2470 allocs/op
BenchmarkTigerTonic_GithubAll 2000 1120131 ns/op 241088 B/op 6052 allocs/op
BenchmarkTraffic_GithubAll 200 8708979 ns/op 2664762 B/op 22390 allocs/op
BenchmarkVulcan_GithubAll 5000 353392 ns/op 19894 B/op 609 allocs/op
BenchmarkZeus_GithubAll 2000 944234 ns/op 300688 B/op 2648 allocs/op
BenchmarkAce_GPlusStatic 5000000 251 ns/op 0 B/op 0 allocs/op
BenchmarkBear_GPlusStatic 3000000 415 ns/op 72 B/op 3 allocs/op
BenchmarkBeego_GPlusStatic 1000000 1416 ns/op 352 B/op 7 allocs/op
BenchmarkBone_GPlusStatic 10000000 192 ns/op 32 B/op 1 allocs/op
BenchmarkDenco_GPlusStatic 30000000 47.6 ns/op 0 B/op 0 allocs/op
BenchmarkEcho_GPlusStatic 10000000 131 ns/op 0 B/op 0 allocs/op
BenchmarkGin_GPlusStatic 10000000 131 ns/op 0 B/op 0 allocs/op
BenchmarkGocraftWeb_GPlusStatic 1000000 1035 ns/op 288 B/op 6 allocs/op
BenchmarkGoji_GPlusStatic 5000000 304 ns/op 0 B/op 0 allocs/op
BenchmarkGoJsonRest_GPlusStatic 1000000 1286 ns/op 337 B/op 12 allocs/op
BenchmarkGoRestful_GPlusStatic 200000 9649 ns/op 2160 B/op 30 allocs/op
BenchmarkGorillaMux_GPlusStatic 1000000 2346 ns/op 464 B/op 8 allocs/op
BenchmarkHttpRouter_GPlusStatic 30000000 42.7 ns/op 0 B/op 0 allocs/op
BenchmarkHttpTreeMux_GPlusStatic 30000000 49.5 ns/op 0 B/op 0 allocs/op
BenchmarkKocha_GPlusStatic 20000000 74.8 ns/op 0 B/op 0 allocs/op
BenchmarkMacaron_GPlusStatic 1000000 2520 ns/op 736 B/op 8 allocs/op
BenchmarkMartini_GPlusStatic 300000 5310 ns/op 832 B/op 11 allocs/op
BenchmarkPat_GPlusStatic 5000000 398 ns/op 96 B/op 2 allocs/op
BenchmarkPossum_GPlusStatic 1000000 1434 ns/op 480 B/op 4 allocs/op
BenchmarkR2router_GPlusStatic 2000000 646 ns/op 144 B/op 5 allocs/op
BenchmarkRevel_GPlusStatic 300000 6172 ns/op 1272 B/op 25 allocs/op
BenchmarkRivet_GPlusStatic 3000000 444 ns/op 112 B/op 2 allocs/op
BenchmarkTango_GPlusStatic 1000000 1400 ns/op 208 B/op 10 allocs/op
BenchmarkTigerTonic_GPlusStatic 10000000 213 ns/op 32 B/op 1 allocs/op
BenchmarkTraffic_GPlusStatic 1000000 3091 ns/op 1208 B/op 16 allocs/op
BenchmarkVulcan_GPlusStatic 2000000 863 ns/op 98 B/op 3 allocs/op
BenchmarkZeus_GPlusStatic 10000000 237 ns/op 16 B/op 1 allocs/op
BenchmarkAce_GPlusParam 3000000 435 ns/op 64 B/op 1 allocs/op
BenchmarkBear_GPlusParam 1000000 1205 ns/op 448 B/op 5 allocs/op
BenchmarkBeego_GPlusParam 1000000 2494 ns/op 720 B/op 10 allocs/op
BenchmarkBone_GPlusParam 1000000 1126 ns/op 384 B/op 3 allocs/op
BenchmarkDenco_GPlusParam 5000000 325 ns/op 64 B/op 1 allocs/op
BenchmarkEcho_GPlusParam 10000000 168 ns/op 0 B/op 0 allocs/op
BenchmarkGin_GPlusParam 10000000 170 ns/op 0 B/op 0 allocs/op
BenchmarkGocraftWeb_GPlusParam 1000000 1895 ns/op 656 B/op 9 allocs/op
BenchmarkGoji_GPlusParam 1000000 1071 ns/op 336 B/op 2 allocs/op
BenchmarkGoJsonRest_GPlusParam 1000000 2282 ns/op 657 B/op 14 allocs/op
BenchmarkGoRestful_GPlusParam 100000 19400 ns/op 2560 B/op 33 allocs/op
BenchmarkGorillaMux_GPlusParam 500000 5001 ns/op 784 B/op 9 allocs/op
BenchmarkHttpRouter_GPlusParam 10000000 240 ns/op 64 B/op 1 allocs/op
BenchmarkHttpTreeMux_GPlusParam 2000000 797 ns/op 336 B/op 2 allocs/op
BenchmarkKocha_GPlusParam 3000000 505 ns/op 56 B/op 3 allocs/op
BenchmarkMacaron_GPlusParam 1000000 3668 ns/op 1104 B/op 11 allocs/op
BenchmarkMartini_GPlusParam 200000 10672 ns/op 1152 B/op 12 allocs/op
BenchmarkPat_GPlusParam 1000000 2376 ns/op 704 B/op 14 allocs/op
BenchmarkPossum_GPlusParam 1000000 2090 ns/op 624 B/op 7 allocs/op
BenchmarkR2router_GPlusParam 1000000 1233 ns/op 432 B/op 6 allocs/op
BenchmarkRevel_GPlusParam 200000 6778 ns/op 1704 B/op 28 allocs/op
BenchmarkRivet_GPlusParam 1000000 1279 ns/op 464 B/op 5 allocs/op
BenchmarkTango_GPlusParam 1000000 1981 ns/op 272 B/op 10 allocs/op
BenchmarkTigerTonic_GPlusParam 500000 3893 ns/op 1064 B/op 19 allocs/op
BenchmarkTraffic_GPlusParam 200000 6585 ns/op 2000 B/op 23 allocs/op
BenchmarkVulcan_GPlusParam 1000000 1233 ns/op 98 B/op 3 allocs/op
BenchmarkZeus_GPlusParam 1000000 1350 ns/op 368 B/op 3 allocs/op
BenchmarkAce_GPlus2Params 3000000 512 ns/op 64 B/op 1 allocs/op
BenchmarkBear_GPlus2Params 1000000 1564 ns/op 464 B/op 5 allocs/op
BenchmarkBeego_GPlus2Params 1000000 3043 ns/op 784 B/op 11 allocs/op
BenchmarkBone_GPlus2Params 1000000 3152 ns/op 736 B/op 7 allocs/op
BenchmarkDenco_GPlus2Params 3000000 431 ns/op 64 B/op 1 allocs/op
BenchmarkEcho_GPlus2Params 5000000 247 ns/op 0 B/op 0 allocs/op
BenchmarkGin_GPlus2Params 10000000 219 ns/op 0 B/op 0 allocs/op
BenchmarkGocraftWeb_GPlus2Params 1000000 2363 ns/op 720 B/op 10 allocs/op
BenchmarkGoji_GPlus2Params 1000000 1540 ns/op 336 B/op 2 allocs/op
BenchmarkGoJsonRest_GPlus2Params 1000000 2872 ns/op 721 B/op 15 allocs/op
BenchmarkGoRestful_GPlus2Params 100000 23030 ns/op 2720 B/op 35 allocs/op
BenchmarkGorillaMux_GPlus2Params 200000 10516 ns/op 816 B/op 9 allocs/op
BenchmarkHttpRouter_GPlus2Params 5000000 273 ns/op 64 B/op 1 allocs/op
BenchmarkHttpTreeMux_GPlus2Params 2000000 939 ns/op 336 B/op 2 allocs/op
BenchmarkKocha_GPlus2Params 2000000 844 ns/op 128 B/op 5 allocs/op
BenchmarkMacaron_GPlus2Params 500000 3914 ns/op 1168 B/op 12 allocs/op
BenchmarkMartini_GPlus2Params 50000 35759 ns/op 1280 B/op 16 allocs/op
BenchmarkPat_GPlus2Params 200000 7089 ns/op 2304 B/op 41 allocs/op
BenchmarkPossum_GPlus2Params 1000000 2093 ns/op 624 B/op 7 allocs/op
BenchmarkR2router_GPlus2Params 1000000 1320 ns/op 432 B/op 6 allocs/op
BenchmarkRevel_GPlus2Params 200000 7351 ns/op 1800 B/op 30 allocs/op
BenchmarkRivet_GPlus2Params 1000000 1485 ns/op 480 B/op 6 allocs/op
BenchmarkTango_GPlus2Params 1000000 2111 ns/op 448 B/op 12 allocs/op
BenchmarkTigerTonic_GPlus2Params 300000 6271 ns/op 1528 B/op 28 allocs/op
BenchmarkTraffic_GPlus2Params 100000 14886 ns/op 3312 B/op 34 allocs/op
BenchmarkVulcan_GPlus2Params 1000000 1883 ns/op 98 B/op 3 allocs/op
BenchmarkZeus_GPlus2Params 1000000 2686 ns/op 784 B/op 6 allocs/op
BenchmarkAce_GPlusAll 300000 5912 ns/op 640 B/op 11 allocs/op
BenchmarkBear_GPlusAll 100000 16448 ns/op 5072 B/op 61 allocs/op
BenchmarkBeego_GPlusAll 50000 32916 ns/op 8976 B/op 129 allocs/op
BenchmarkBone_GPlusAll 50000 25836 ns/op 6992 B/op 76 allocs/op
BenchmarkDenco_GPlusAll 500000 4462 ns/op 672 B/op 11 allocs/op
BenchmarkEcho_GPlusAll 500000 2806 ns/op 0 B/op 0 allocs/op
BenchmarkGin_GPlusAll 500000 2579 ns/op 0 B/op 0 allocs/op
BenchmarkGocraftWeb_GPlusAll 50000 25223 ns/op 8144 B/op 116 allocs/op
BenchmarkGoji_GPlusAll 100000 14237 ns/op 3696 B/op 22 allocs/op
BenchmarkGoJsonRest_GPlusAll 50000 29227 ns/op 8221 B/op 183 allocs/op
BenchmarkGoRestful_GPlusAll 10000 203144 ns/op 36064 B/op 441 allocs/op
BenchmarkGorillaMux_GPlusAll 20000 80906 ns/op 9712 B/op 115 allocs/op
BenchmarkHttpRouter_GPlusAll 500000 3040 ns/op 640 B/op 11 allocs/op
BenchmarkHttpTreeMux_GPlusAll 200000 9627 ns/op 3696 B/op 22 allocs/op
BenchmarkKocha_GPlusAll 200000 8108 ns/op 976 B/op 43 allocs/op
BenchmarkMacaron_GPlusAll 30000 48083 ns/op 13968 B/op 142 allocs/op
BenchmarkMartini_GPlusAll 10000 196978 ns/op 15072 B/op 178 allocs/op
BenchmarkPat_GPlusAll 30000 58865 ns/op 16880 B/op 343 allocs/op
BenchmarkPossum_GPlusAll 100000 19685 ns/op 6240 B/op 52 allocs/op
BenchmarkR2router_GPlusAll 100000 16251 ns/op 5040 B/op 76 allocs/op
BenchmarkRevel_GPlusAll 20000 93489 ns/op 21656 B/op 368 allocs/op
BenchmarkRivet_GPlusAll 100000 16907 ns/op 5408 B/op 64 allocs/op
```

View File

@ -1,150 +0,0 @@
#CHANGELOG
###Gin 1.0rc2 (...)
- [PERFORMANCE] Fast path for writing Content-Type.
- [PERFORMANCE] Much faster 404 routing
- [PERFORMANCE] Allocation optimizations
- [PERFORMANCE] Faster root tree lookup
- [PERFORMANCE] Zero overhead, String() and JSON() rendering.
- [PERFORMANCE] Faster ClientIP parsing
- [PERFORMANCE] Much faster SSE implementation
- [NEW] Benchmarks suite
- [NEW] Bind validation can be disabled and replaced with custom validators.
- [NEW] More flexible HTML render
- [NEW] Multipart and PostForm bindings
- [NEW] Adds method to return all the registered routes
- [NEW] Context.HandlerName() returns the main handler's name
- [NEW] Adds Error.IsType() helper
- [FIX] Binding multipart form
- [FIX] Integration tests
- [FIX] Crash when binding non struct object in Context.
- [FIX] RunTLS() implementation
- [FIX] Logger() unit tests
- [FIX] Adds SetHTMLTemplate() warning
- [FIX] Context.IsAborted()
- [FIX] More unit tests
- [FIX] JSON, XML, HTML renders accept custom content-types
- [FIX] gin.AbortIndex is unexported
- [FIX] Better approach to avoid directory listing in StaticFS()
- [FIX] Context.ClientIP() always returns the IP with trimmed spaces.
- [FIX] Better warning when running in debug mode.
- [FIX] Google App Engine integration. debugPrint does not use os.Stdout
- [FIX] Fixes integer overflow in error type
- [FIX] Error implements the json.Marshaller interface
- [FIX] MIT license in every file
###Gin 1.0rc1 (May 22, 2015)
- [PERFORMANCE] Zero allocation router
- [PERFORMANCE] Faster JSON, XML and text rendering
- [PERFORMANCE] Custom hand optimized HttpRouter for Gin
- [PERFORMANCE] Misc code optimizations. Inlining, tail call optimizations
- [NEW] Built-in support for golang.org/x/net/context
- [NEW] Any(path, handler). Create a route that matches any path
- [NEW] Refactored rendering pipeline (faster and static typeded)
- [NEW] Refactored errors API
- [NEW] IndentedJSON() prints pretty JSON
- [NEW] Added gin.DefaultWriter
- [NEW] UNIX socket support
- [NEW] RouterGroup.BasePath is exposed
- [NEW] JSON validation using go-validate-yourself (very powerful options)
- [NEW] Completed suite of unit tests
- [NEW] HTTP streaming with c.Stream()
- [NEW] StaticFile() creates a router for serving just one file.
- [NEW] StaticFS() has an option to disable directory listing.
- [NEW] StaticFS() for serving static files through virtual filesystems
- [NEW] Server-Sent Events native support
- [NEW] WrapF() and WrapH() helpers for wrapping http.HandlerFunc and http.Handler
- [NEW] Added LoggerWithWriter() middleware
- [NEW] Added RecoveryWithWriter() middleware
- [NEW] Added DefaultPostFormValue()
- [NEW] Added DefaultFormValue()
- [NEW] Added DefaultParamValue()
- [FIX] BasicAuth() when using custom realm
- [FIX] Bug when serving static files in nested routing group
- [FIX] Redirect using built-in http.Redirect()
- [FIX] Logger when printing the requested path
- [FIX] Documentation typos
- [FIX] Context.Engine renamed to Context.engine
- [FIX] Better debugging messages
- [FIX] ErrorLogger
- [FIX] Debug HTTP render
- [FIX] Refactored binding and render modules
- [FIX] Refactored Context initialization
- [FIX] Refactored BasicAuth()
- [FIX] NoMethod/NoRoute handlers
- [FIX] Hijacking http
- [FIX] Better support for Google App Engine (using log instead of fmt)
###Gin 0.6 (Mar 9, 2015)
- [NEW] Support multipart/form-data
- [NEW] NoMethod handler
- [NEW] Validate sub structures
- [NEW] Support for HTTP Realm Auth
- [FIX] Unsigned integers in binding
- [FIX] Improve color logger
###Gin 0.5 (Feb 7, 2015)
- [NEW] Content Negotiation
- [FIX] Solved security bug that allow a client to spoof ip
- [FIX] Fix unexported/ignored fields in binding
###Gin 0.4 (Aug 21, 2014)
- [NEW] Development mode
- [NEW] Unit tests
- [NEW] Add Content.Redirect()
- [FIX] Deferring WriteHeader()
- [FIX] Improved documentation for model binding
###Gin 0.3 (Jul 18, 2014)
- [PERFORMANCE] Normal log and error log are printed in the same call.
- [PERFORMANCE] Improve performance of NoRouter()
- [PERFORMANCE] Improve context's memory locality, reduce CPU cache faults.
- [NEW] Flexible rendering API
- [NEW] Add Context.File()
- [NEW] Add shorcut RunTLS() for http.ListenAndServeTLS
- [FIX] Rename NotFound404() to NoRoute()
- [FIX] Errors in context are purged
- [FIX] Adds HEAD method in Static file serving
- [FIX] Refactors Static() file serving
- [FIX] Using keyed initialization to fix app-engine integration
- [FIX] Can't unmarshal JSON array, #63
- [FIX] Renaming Context.Req to Context.Request
- [FIX] Check application/x-www-form-urlencoded when parsing form
###Gin 0.2b (Jul 08, 2014)
- [PERFORMANCE] Using sync.Pool to allocatio/gc overhead
- [NEW] Travis CI integration
- [NEW] Completely new logger
- [NEW] New API for serving static files. gin.Static()
- [NEW] gin.H() can be serialized into XML
- [NEW] Typed errors. Errors can be typed. Internet/external/custom.
- [NEW] Support for Godeps
- [NEW] Travis/Godocs badges in README
- [NEW] New Bind() and BindWith() methods for parsing request body.
- [NEW] Add Content.Copy()
- [NEW] Add context.LastError()
- [NEW] Add shorcut for OPTIONS HTTP method
- [FIX] Tons of README fixes
- [FIX] Header is written before body
- [FIX] BasicAuth() and changes API a little bit
- [FIX] Recovery() middleware only prints panics
- [FIX] Context.Get() does not panic anymore. Use MustGet() instead.
- [FIX] Multiple http.WriteHeader() in NotFound handlers
- [FIX] Engine.Run() panics if http server can't be setted up
- [FIX] Crash when route path doesn't start with '/'
- [FIX] Do not update header when status code is negative
- [FIX] Setting response headers before calling WriteHeader in context.String()
- [FIX] Add MIT license
- [FIX] Changes behaviour of ErrorLogger() and Logger()

View File

@ -1,21 +0,0 @@
The MIT License (MIT)
Copyright (c) 2014 Manuel Martínez-Almeida
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.

View File

@ -1,710 +0,0 @@
#Gin Web Framework
<img align="right" src="https://raw.githubusercontent.com/gin-gonic/gin/master/logo.jpg">
[![Build Status](https://travis-ci.org/gin-gonic/gin.svg)](https://travis-ci.org/gin-gonic/gin)
[![Coverage Status](https://coveralls.io/repos/gin-gonic/gin/badge.svg?branch=master)](https://coveralls.io/r/gin-gonic/gin?branch=master)
[![Go Report Card](https://goreportcard.com/badge/github.com/gin-gonic/gin)](https://goreportcard.com/report/github.com/gin-gonic/gin)
[![GoDoc](https://godoc.org/github.com/gin-gonic/gin?status.svg)](https://godoc.org/github.com/gin-gonic/gin)
[![Join the chat at https://gitter.im/gin-gonic/gin](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/gin-gonic/gin?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
Gin is a web framework written in Go (Golang). It features a martini-like API with much better performance, up to 40 times faster thanks to [httprouter](https://github.com/julienschmidt/httprouter). If you need performance and good productivity, you will love Gin.
![Gin console logger](https://gin-gonic.github.io/gin/other/console.png)
```sh
$ cat test.go
```
```go
package main
import "github.com/gin-gonic/gin"
func main() {
r := gin.Default()
r.GET("/ping", func(c *gin.Context) {
c.JSON(200, gin.H{
"message": "pong",
})
})
r.Run() // listen and server on 0.0.0.0:8080
}
```
## Benchmarks
Gin uses a custom version of [HttpRouter](https://github.com/julienschmidt/httprouter)
[See all benchmarks](/BENCHMARKS.md)
Benchmark name | (1) | (2) | (3) | (4)
--------------------------------|----------:|----------:|----------:|------:
BenchmarkAce_GithubAll | 10000 | 109482 | 13792 | 167
BenchmarkBear_GithubAll | 10000 | 287490 | 79952 | 943
BenchmarkBeego_GithubAll | 3000 | 562184 | 146272 | 2092
BenchmarkBone_GithubAll | 500 | 2578716 | 648016 | 8119
BenchmarkDenco_GithubAll | 20000 | 94955 | 20224 | 167
BenchmarkEcho_GithubAll | 30000 | 58705 | 0 | 0
**BenchmarkGin_GithubAll** | **30000** | **50991** | **0** | **0**
BenchmarkGocraftWeb_GithubAll | 5000 | 449648 | 133280 | 1889
BenchmarkGoji_GithubAll | 2000 | 689748 | 56113 | 334
BenchmarkGoJsonRest_GithubAll | 5000 | 537769 | 135995 | 2940
BenchmarkGoRestful_GithubAll | 100 | 18410628 | 797236 | 7725
BenchmarkGorillaMux_GithubAll | 200 | 8036360 | 153137 | 1791
BenchmarkHttpRouter_GithubAll | 20000 | 63506 | 13792 | 167
BenchmarkHttpTreeMux_GithubAll | 10000 | 165927 | 56112 | 334
BenchmarkKocha_GithubAll | 10000 | 171362 | 23304 | 843
BenchmarkMacaron_GithubAll | 2000 | 817008 | 224960 | 2315
BenchmarkMartini_GithubAll | 100 | 12609209 | 237952 | 2686
BenchmarkPat_GithubAll | 300 | 4830398 | 1504101 | 32222
BenchmarkPossum_GithubAll | 10000 | 301716 | 97440 | 812
BenchmarkR2router_GithubAll | 10000 | 270691 | 77328 | 1182
BenchmarkRevel_GithubAll | 1000 | 1491919 | 345553 | 5918
BenchmarkRivet_GithubAll | 10000 | 283860 | 84272 | 1079
BenchmarkTango_GithubAll | 5000 | 473821 | 87078 | 2470
BenchmarkTigerTonic_GithubAll | 2000 | 1120131 | 241088 | 6052
BenchmarkTraffic_GithubAll | 200 | 8708979 | 2664762 | 22390
BenchmarkVulcan_GithubAll | 5000 | 353392 | 19894 | 609
BenchmarkZeus_GithubAll | 2000 | 944234 | 300688 | 2648
(1): Total Repetitions
(2): Single Repetition Duration (ns/op)
(3): Heap Memory (B/op)
(4): Average Allocations per Repetition (allocs/op)
##Gin v1. stable
- [x] Zero allocation router.
- [x] Still the fastest http router and framework. From routing to writing.
- [x] Complete suite of unit tests
- [x] Battle tested
- [x] API frozen, new releases will not break your code.
## Start using it
1. Download and install it:
```sh
$ go get github.com/gin-gonic/gin
```
2. Import it in your code:
```go
import "github.com/gin-gonic/gin"
```
3. (Optional) Import `net/http`. This is required for example if using constants such as `http.StatusOK`.
```go
import "net/http"
```
##API Examples
#### Using GET, POST, PUT, PATCH, DELETE and OPTIONS
```go
func main() {
// Creates a gin router with default middleware:
// logger and recovery (crash-free) middleware
router := gin.Default()
router.GET("/someGet", getting)
router.POST("/somePost", posting)
router.PUT("/somePut", putting)
router.DELETE("/someDelete", deleting)
router.PATCH("/somePatch", patching)
router.HEAD("/someHead", head)
router.OPTIONS("/someOptions", options)
// By default it serves on :8080 unless a
// PORT environment variable was defined.
router.Run()
// router.Run(":3000") for a hard coded port
}
```
#### Parameters in path
```go
func main() {
router := gin.Default()
// This handler will match /user/john but will not match neither /user/ or /user
router.GET("/user/:name", func(c *gin.Context) {
name := c.Param("name")
c.String(http.StatusOK, "Hello %s", name)
})
// However, this one will match /user/john/ and also /user/john/send
// If no other routers match /user/john, it will redirect to /user/john/
router.GET("/user/:name/*action", func(c *gin.Context) {
name := c.Param("name")
action := c.Param("action")
message := name + " is " + action
c.String(http.StatusOK, message)
})
router.Run(":8080")
}
```
#### Querystring parameters
```go
func main() {
router := gin.Default()
// Query string parameters are parsed using the existing underlying request object.
// The request responds to a url matching: /welcome?firstname=Jane&lastname=Doe
router.GET("/welcome", func(c *gin.Context) {
firstname := c.DefaultQuery("firstname", "Guest")
lastname := c.Query("lastname") // shortcut for c.Request.URL.Query().Get("lastname")
c.String(http.StatusOK, "Hello %s %s", firstname, lastname)
})
router.Run(":8080")
}
```
### Multipart/Urlencoded Form
```go
func main() {
router := gin.Default()
router.POST("/form_post", func(c *gin.Context) {
message := c.PostForm("message")
nick := c.DefaultPostForm("nick", "anonymous")
c.JSON(200, gin.H{
"status": "posted",
"message": message,
"nick": nick,
})
})
router.Run(":8080")
}
```
### Another example: query + post form
```
POST /post?id=1234&page=1 HTTP/1.1
Content-Type: application/x-www-form-urlencoded
name=manu&message=this_is_great
```
```go
func main() {
router := gin.Default()
router.POST("/post", func(c *gin.Context) {
id := c.Query("id")
page := c.DefaultQuery("page", "0")
name := c.PostForm("name")
message := c.PostForm("message")
fmt.Printf("id: %s; page: %s; name: %s; message: %s", id, page, name, message)
})
router.Run(":8080")
}
```
```
id: 1234; page: 1; name: manu; message: this_is_great
```
### Another example: upload file
References issue [#548](https://github.com/gin-gonic/gin/issues/548).
```go
func main() {
router := gin.Default()
router.POST("/upload", func(c *gin.Context) {
file, header , err := c.Request.FormFile("upload")
filename := header.Filename
fmt.Println(header.Filename)
out, err := os.Create("./tmp/"+filename+".png")
if err != nil {
log.Fatal(err)
}
defer out.Close()
_, err = io.Copy(out, file)
if err != nil {
log.Fatal(err)
}
})
router.Run(":8080")
}
```
#### Grouping routes
```go
func main() {
router := gin.Default()
// Simple group: v1
v1 := router.Group("/v1")
{
v1.POST("/login", loginEndpoint)
v1.POST("/submit", submitEndpoint)
v1.POST("/read", readEndpoint)
}
// Simple group: v2
v2 := router.Group("/v2")
{
v2.POST("/login", loginEndpoint)
v2.POST("/submit", submitEndpoint)
v2.POST("/read", readEndpoint)
}
router.Run(":8080")
}
```
#### Blank Gin without middleware by default
Use
```go
r := gin.New()
```
instead of
```go
r := gin.Default()
```
#### Using middleware
```go
func main() {
// Creates a router without any middleware by default
r := gin.New()
// Global middleware
r.Use(gin.Logger())
r.Use(gin.Recovery())
// Per route middleware, you can add as many as you desire.
r.GET("/benchmark", MyBenchLogger(), benchEndpoint)
// Authorization group
// authorized := r.Group("/", AuthRequired())
// exactly the same as:
authorized := r.Group("/")
// per group middleware! in this case we use the custom created
// AuthRequired() middleware just in the "authorized" group.
authorized.Use(AuthRequired())
{
authorized.POST("/login", loginEndpoint)
authorized.POST("/submit", submitEndpoint)
authorized.POST("/read", readEndpoint)
// nested group
testing := authorized.Group("testing")
testing.GET("/analytics", analyticsEndpoint)
}
// Listen and server on 0.0.0.0:8080
r.Run(":8080")
}
```
#### Model binding and validation
To bind a request body into a type, use model binding. We currently support binding of JSON, XML and standard form values (foo=bar&boo=baz).
Note that you need to set the corresponding binding tag on all fields you want to bind. For example, when binding from JSON, set `json:"fieldname"`.
When using the Bind-method, Gin tries to infer the binder depending on the Content-Type header. If you are sure what you are binding, you can use BindWith.
You can also specify that specific fields are required. If a field is decorated with `binding:"required"` and has a empty value when binding, the current request will fail with an error.
```go
// Binding from JSON
type Login struct {
User string `form:"user" json:"user" binding:"required"`
Password string `form:"password" json:"password" binding:"required"`
}
func main() {
router := gin.Default()
// Example for binding JSON ({"user": "manu", "password": "123"})
router.POST("/loginJSON", func(c *gin.Context) {
var json Login
if c.BindJSON(&json) == nil {
if json.User == "manu" && json.Password == "123" {
c.JSON(http.StatusOK, gin.H{"status": "you are logged in"})
} else {
c.JSON(http.StatusUnauthorized, gin.H{"status": "unauthorized"})
}
}
})
// Example for binding a HTML form (user=manu&password=123)
router.POST("/loginForm", func(c *gin.Context) {
var form Login
// This will infer what binder to use depending on the content-type header.
if c.Bind(&form) == nil {
if form.User == "manu" && form.Password == "123" {
c.JSON(http.StatusOK, gin.H{"status": "you are logged in"})
} else {
c.JSON(http.StatusUnauthorized, gin.H{"status": "unauthorized"})
}
}
})
// Listen and server on 0.0.0.0:8080
router.Run(":8080")
}
```
###Multipart/Urlencoded binding
```go
package main
import (
"github.com/gin-gonic/gin"
"github.com/gin-gonic/gin/binding"
)
type LoginForm struct {
User string `form:"user" binding:"required"`
Password string `form:"password" binding:"required"`
}
func main() {
router := gin.Default()
router.POST("/login", func(c *gin.Context) {
// you can bind multipart form with explicit binding declaration:
// c.BindWith(&form, binding.Form)
// or you can simply use autobinding with Bind method:
var form LoginForm
// in this case proper binding will be automatically selected
if c.Bind(&form) == nil {
if form.User == "user" && form.Password == "password" {
c.JSON(200, gin.H{"status": "you are logged in"})
} else {
c.JSON(401, gin.H{"status": "unauthorized"})
}
}
})
router.Run(":8080")
}
```
Test it with:
```sh
$ curl -v --form user=user --form password=password http://localhost:8080/login
```
#### XML and JSON rendering
```go
func main() {
r := gin.Default()
// gin.H is a shortcut for map[string]interface{}
r.GET("/someJSON", func(c *gin.Context) {
c.JSON(http.StatusOK, gin.H{"message": "hey", "status": http.StatusOK})
})
r.GET("/moreJSON", func(c *gin.Context) {
// You also can use a struct
var msg struct {
Name string `json:"user"`
Message string
Number int
}
msg.Name = "Lena"
msg.Message = "hey"
msg.Number = 123
// Note that msg.Name becomes "user" in the JSON
// Will output : {"user": "Lena", "Message": "hey", "Number": 123}
c.JSON(http.StatusOK, msg)
})
r.GET("/someXML", func(c *gin.Context) {
c.XML(http.StatusOK, gin.H{"message": "hey", "status": http.StatusOK})
})
// Listen and server on 0.0.0.0:8080
r.Run(":8080")
}
```
####Serving static files
```go
func main() {
router := gin.Default()
router.Static("/assets", "./assets")
router.StaticFS("/more_static", http.Dir("my_file_system"))
router.StaticFile("/favicon.ico", "./resources/favicon.ico")
// Listen and server on 0.0.0.0:8080
router.Run(":8080")
}
```
####HTML rendering
Using LoadHTMLTemplates()
```go
func main() {
router := gin.Default()
router.LoadHTMLGlob("templates/*")
//router.LoadHTMLFiles("templates/template1.html", "templates/template2.html")
router.GET("/index", func(c *gin.Context) {
c.HTML(http.StatusOK, "index.tmpl", gin.H{
"title": "Main website",
})
})
router.Run(":8080")
}
```
templates/index.tmpl
```html
<html>
<h1>
{{ .title }}
</h1>
</html>
```
Using templates with same name in different directories
```go
func main() {
router := gin.Default()
router.LoadHTMLGlob("templates/**/*")
router.GET("/posts/index", func(c *gin.Context) {
c.HTML(http.StatusOK, "posts/index.tmpl", gin.H{
"title": "Posts",
})
})
router.GET("/users/index", func(c *gin.Context) {
c.HTML(http.StatusOK, "users/index.tmpl", gin.H{
"title": "Users",
})
})
router.Run(":8080")
}
```
templates/posts/index.tmpl
```html
{{ define "posts/index.tmpl" }}
<html><h1>
{{ .title }}
</h1>
<p>Using posts/index.tmpl</p>
</html>
{{ end }}
```
templates/users/index.tmpl
```html
{{ define "users/index.tmpl" }}
<html><h1>
{{ .title }}
</h1>
<p>Using users/index.tmpl</p>
</html>
{{ end }}
```
You can also use your own html template render
```go
import "html/template"
func main() {
router := gin.Default()
html := template.Must(template.ParseFiles("file1", "file2"))
router.SetHTMLTemplate(html)
router.Run(":8080")
}
```
#### Redirects
Issuing a HTTP redirect is easy:
```go
r.GET("/test", func(c *gin.Context) {
c.Redirect(http.StatusMovedPermanently, "http://www.google.com/")
})
```
Both internal and external locations are supported.
#### Custom Middleware
```go
func Logger() gin.HandlerFunc {
return func(c *gin.Context) {
t := time.Now()
// Set example variable
c.Set("example", "12345")
// before request
c.Next()
// after request
latency := time.Since(t)
log.Print(latency)
// access the status we are sending
status := c.Writer.Status()
log.Println(status)
}
}
func main() {
r := gin.New()
r.Use(Logger())
r.GET("/test", func(c *gin.Context) {
example := c.MustGet("example").(string)
// it would print: "12345"
log.Println(example)
})
// Listen and server on 0.0.0.0:8080
r.Run(":8080")
}
```
#### Using BasicAuth() middleware
```go
// simulate some private data
var secrets = gin.H{
"foo": gin.H{"email": "foo@bar.com", "phone": "123433"},
"austin": gin.H{"email": "austin@example.com", "phone": "666"},
"lena": gin.H{"email": "lena@guapa.com", "phone": "523443"},
}
func main() {
r := gin.Default()
// Group using gin.BasicAuth() middleware
// gin.Accounts is a shortcut for map[string]string
authorized := r.Group("/admin", gin.BasicAuth(gin.Accounts{
"foo": "bar",
"austin": "1234",
"lena": "hello2",
"manu": "4321",
}))
// /admin/secrets endpoint
// hit "localhost:8080/admin/secrets
authorized.GET("/secrets", func(c *gin.Context) {
// get user, it was set by the BasicAuth middleware
user := c.MustGet(gin.AuthUserKey).(string)
if secret, ok := secrets[user]; ok {
c.JSON(http.StatusOK, gin.H{"user": user, "secret": secret})
} else {
c.JSON(http.StatusOK, gin.H{"user": user, "secret": "NO SECRET :("})
}
})
// Listen and server on 0.0.0.0:8080
r.Run(":8080")
}
```
#### Goroutines inside a middleware
When starting inside a middleware or handler, you **SHOULD NOT** use the original context inside it, you have to use a read-only copy.
```go
func main() {
r := gin.Default()
r.GET("/long_async", func(c *gin.Context) {
// create copy to be used inside the goroutine
cCp := c.Copy()
go func() {
// simulate a long task with time.Sleep(). 5 seconds
time.Sleep(5 * time.Second)
// note that you are using the copied context "cCp", IMPORTANT
log.Println("Done! in path " + cCp.Request.URL.Path)
}()
})
r.GET("/long_sync", func(c *gin.Context) {
// simulate a long task with time.Sleep(). 5 seconds
time.Sleep(5 * time.Second)
// since we are NOT using a goroutine, we do not have to copy the context
log.Println("Done! in path " + c.Request.URL.Path)
})
// Listen and server on 0.0.0.0:8080
r.Run(":8080")
}
```
#### Custom HTTP configuration
Use `http.ListenAndServe()` directly, like this:
```go
func main() {
router := gin.Default()
http.ListenAndServe(":8080", router)
}
```
or
```go
func main() {
router := gin.Default()
s := &http.Server{
Addr: ":8080",
Handler: router,
ReadTimeout: 10 * time.Second,
WriteTimeout: 10 * time.Second,
MaxHeaderBytes: 1 << 20,
}
s.ListenAndServe()
}
```
#### Graceful restart or stop
Do you want to graceful restart or stop your web server?
There are some ways this can be done.
We can use [fvbock/endless](https://github.com/fvbock/endless) to replace the default `ListenAndServe`. Refer issue [#296](https://github.com/gin-gonic/gin/issues/296) for more details.
```go
router := gin.Default()
router.GET("/", handler)
// [...]
endless.ListenAndServe(":4242", router)
```
An alternative to endless:
* [manners](https://github.com/braintree/manners): A polite Go HTTP server that shuts down gracefully.

View File

@ -1,92 +0,0 @@
// Copyright 2014 Manu Martinez-Almeida. All rights reserved.
// Use of this source code is governed by a MIT style
// license that can be found in the LICENSE file.
package gin
import (
"crypto/subtle"
"encoding/base64"
"strconv"
)
const AuthUserKey = "user"
type (
Accounts map[string]string
authPair struct {
Value string
User string
}
authPairs []authPair
)
func (a authPairs) searchCredential(authValue string) (string, bool) {
if len(authValue) == 0 {
return "", false
}
for _, pair := range a {
if pair.Value == authValue {
return pair.User, true
}
}
return "", false
}
// BasicAuthForRealm returns a Basic HTTP Authorization middleware. It takes as arguments a map[string]string where
// the key is the user name and the value is the password, as well as the name of the Realm.
// If the realm is empty, "Authorization Required" will be used by default.
// (see http://tools.ietf.org/html/rfc2617#section-1.2)
func BasicAuthForRealm(accounts Accounts, realm string) HandlerFunc {
if realm == "" {
realm = "Authorization Required"
}
realm = "Basic realm=" + strconv.Quote(realm)
pairs := processAccounts(accounts)
return func(c *Context) {
// Search user in the slice of allowed credentials
user, found := pairs.searchCredential(c.Request.Header.Get("Authorization"))
if !found {
// Credentials doesn't match, we return 401 and abort handlers chain.
c.Header("WWW-Authenticate", realm)
c.AbortWithStatus(401)
} else {
// The user credentials was found, set user's id to key AuthUserKey in this context, the userId can be read later using
// c.MustGet(gin.AuthUserKey)
c.Set(AuthUserKey, user)
}
}
}
// BasicAuth returns a Basic HTTP Authorization middleware. It takes as argument a map[string]string where
// the key is the user name and the value is the password.
func BasicAuth(accounts Accounts) HandlerFunc {
return BasicAuthForRealm(accounts, "")
}
func processAccounts(accounts Accounts) authPairs {
assert1(len(accounts) > 0, "Empty list of authorized credentials")
pairs := make(authPairs, 0, len(accounts))
for user, password := range accounts {
assert1(len(user) > 0, "User can not be empty")
value := authorizationHeader(user, password)
pairs = append(pairs, authPair{
Value: value,
User: user,
})
}
return pairs
}
func authorizationHeader(user, password string) string {
base := user + ":" + password
return "Basic " + base64.StdEncoding.EncodeToString([]byte(base))
}
func secureCompare(given, actual string) bool {
if subtle.ConstantTimeEq(int32(len(given)), int32(len(actual))) == 1 {
return subtle.ConstantTimeCompare([]byte(given), []byte(actual)) == 1
}
/* Securely compare actual to itself to keep constant time, but always return false */
return subtle.ConstantTimeCompare([]byte(actual), []byte(actual)) == 1 && false
}

View File

@ -1,67 +0,0 @@
// Copyright 2014 Manu Martinez-Almeida. All rights reserved.
// Use of this source code is governed by a MIT style
// license that can be found in the LICENSE file.
package binding
import "net/http"
const (
MIMEJSON = "application/json"
MIMEHTML = "text/html"
MIMEXML = "application/xml"
MIMEXML2 = "text/xml"
MIMEPlain = "text/plain"
MIMEPOSTForm = "application/x-www-form-urlencoded"
MIMEMultipartPOSTForm = "multipart/form-data"
MIMEPROTOBUF = "application/x-protobuf"
)
type Binding interface {
Name() string
Bind(*http.Request, interface{}) error
}
type StructValidator interface {
// ValidateStruct can receive any kind of type and it should never panic, even if the configuration is not right.
// If the received type is not a struct, any validation should be skipped and nil must be returned.
// If the received type is a struct or pointer to a struct, the validation should be performed.
// If the struct is not valid or the validation itself fails, a descriptive error should be returned.
// Otherwise nil must be returned.
ValidateStruct(interface{}) error
}
var Validator StructValidator = &defaultValidator{}
var (
JSON = jsonBinding{}
XML = xmlBinding{}
Form = formBinding{}
FormPost = formPostBinding{}
FormMultipart = formMultipartBinding{}
ProtoBuf = protobufBinding{}
)
func Default(method, contentType string) Binding {
if method == "GET" {
return Form
} else {
switch contentType {
case MIMEJSON:
return JSON
case MIMEXML, MIMEXML2:
return XML
case MIMEPROTOBUF:
return ProtoBuf
default: //case MIMEPOSTForm, MIMEMultipartPOSTForm:
return Form
}
}
}
func validate(obj interface{}) error {
if Validator == nil {
return nil
}
return Validator.ValidateStruct(obj)
}

View File

@ -1,41 +0,0 @@
package binding
import (
"reflect"
"sync"
"gopkg.in/go-playground/validator.v8"
)
type defaultValidator struct {
once sync.Once
validate *validator.Validate
}
var _ StructValidator = &defaultValidator{}
func (v *defaultValidator) ValidateStruct(obj interface{}) error {
if kindOfData(obj) == reflect.Struct {
v.lazyinit()
if err := v.validate.Struct(obj); err != nil {
return error(err)
}
}
return nil
}
func (v *defaultValidator) lazyinit() {
v.once.Do(func() {
config := &validator.Config{TagName: "binding"}
v.validate = validator.New(config)
})
}
func kindOfData(data interface{}) reflect.Kind {
value := reflect.ValueOf(data)
valueType := value.Kind()
if valueType == reflect.Ptr {
valueType = value.Elem().Kind()
}
return valueType
}

View File

@ -1,54 +0,0 @@
// Copyright 2014 Manu Martinez-Almeida. All rights reserved.
// Use of this source code is governed by a MIT style
// license that can be found in the LICENSE file.
package binding
import "net/http"
type formBinding struct{}
type formPostBinding struct{}
type formMultipartBinding struct{}
func (formBinding) Name() string {
return "form"
}
func (formBinding) Bind(req *http.Request, obj interface{}) error {
if err := req.ParseForm(); err != nil {
return err
}
req.ParseMultipartForm(32 << 10) // 32 MB
if err := mapForm(obj, req.Form); err != nil {
return err
}
return validate(obj)
}
func (formPostBinding) Name() string {
return "form-urlencoded"
}
func (formPostBinding) Bind(req *http.Request, obj interface{}) error {
if err := req.ParseForm(); err != nil {
return err
}
if err := mapForm(obj, req.PostForm); err != nil {
return err
}
return validate(obj)
}
func (formMultipartBinding) Name() string {
return "multipart/form-data"
}
func (formMultipartBinding) Bind(req *http.Request, obj interface{}) error {
if err := req.ParseMultipartForm(32 << 10); err != nil {
return err
}
if err := mapForm(obj, req.MultipartForm.Value); err != nil {
return err
}
return validate(obj)
}

View File

@ -1,150 +0,0 @@
// Copyright 2014 Manu Martinez-Almeida. All rights reserved.
// Use of this source code is governed by a MIT style
// license that can be found in the LICENSE file.
package binding
import (
"errors"
"reflect"
"strconv"
)
func mapForm(ptr interface{}, form map[string][]string) error {
typ := reflect.TypeOf(ptr).Elem()
val := reflect.ValueOf(ptr).Elem()
for i := 0; i < typ.NumField(); i++ {
typeField := typ.Field(i)
structField := val.Field(i)
if !structField.CanSet() {
continue
}
structFieldKind := structField.Kind()
inputFieldName := typeField.Tag.Get("form")
if inputFieldName == "" {
inputFieldName = typeField.Name
// if "form" tag is nil, we inspect if the field is a struct.
// this would not make sense for JSON parsing but it does for a form
// since data is flatten
if structFieldKind == reflect.Struct {
err := mapForm(structField.Addr().Interface(), form)
if err != nil {
return err
}
continue
}
}
inputValue, exists := form[inputFieldName]
if !exists {
continue
}
numElems := len(inputValue)
if structFieldKind == reflect.Slice && numElems > 0 {
sliceOf := structField.Type().Elem().Kind()
slice := reflect.MakeSlice(structField.Type(), numElems, numElems)
for i := 0; i < numElems; i++ {
if err := setWithProperType(sliceOf, inputValue[i], slice.Index(i)); err != nil {
return err
}
}
val.Field(i).Set(slice)
} else {
if err := setWithProperType(typeField.Type.Kind(), inputValue[0], structField); err != nil {
return err
}
}
}
return nil
}
func setWithProperType(valueKind reflect.Kind, val string, structField reflect.Value) error {
switch valueKind {
case reflect.Int:
return setIntField(val, 0, structField)
case reflect.Int8:
return setIntField(val, 8, structField)
case reflect.Int16:
return setIntField(val, 16, structField)
case reflect.Int32:
return setIntField(val, 32, structField)
case reflect.Int64:
return setIntField(val, 64, structField)
case reflect.Uint:
return setUintField(val, 0, structField)
case reflect.Uint8:
return setUintField(val, 8, structField)
case reflect.Uint16:
return setUintField(val, 16, structField)
case reflect.Uint32:
return setUintField(val, 32, structField)
case reflect.Uint64:
return setUintField(val, 64, structField)
case reflect.Bool:
return setBoolField(val, structField)
case reflect.Float32:
return setFloatField(val, 32, structField)
case reflect.Float64:
return setFloatField(val, 64, structField)
case reflect.String:
structField.SetString(val)
default:
return errors.New("Unknown type")
}
return nil
}
func setIntField(val string, bitSize int, field reflect.Value) error {
if val == "" {
val = "0"
}
intVal, err := strconv.ParseInt(val, 10, bitSize)
if err == nil {
field.SetInt(intVal)
}
return err
}
func setUintField(val string, bitSize int, field reflect.Value) error {
if val == "" {
val = "0"
}
uintVal, err := strconv.ParseUint(val, 10, bitSize)
if err == nil {
field.SetUint(uintVal)
}
return err
}
func setBoolField(val string, field reflect.Value) error {
if val == "" {
val = "false"
}
boolVal, err := strconv.ParseBool(val)
if err == nil {
field.SetBool(boolVal)
}
return nil
}
func setFloatField(val string, bitSize int, field reflect.Value) error {
if val == "" {
val = "0.0"
}
floatVal, err := strconv.ParseFloat(val, bitSize)
if err == nil {
field.SetFloat(floatVal)
}
return err
}
// Don't pass in pointers to bind to. Can lead to bugs. See:
// https://github.com/codegangsta/martini-contrib/issues/40
// https://github.com/codegangsta/martini-contrib/pull/34#issuecomment-29683659
func ensureNotPointer(obj interface{}) {
if reflect.TypeOf(obj).Kind() == reflect.Ptr {
panic("Pointers are not accepted as binding models")
}
}

View File

@ -1,25 +0,0 @@
// Copyright 2014 Manu Martinez-Almeida. All rights reserved.
// Use of this source code is governed by a MIT style
// license that can be found in the LICENSE file.
package binding
import (
"encoding/json"
"net/http"
)
type jsonBinding struct{}
func (jsonBinding) Name() string {
return "json"
}
func (jsonBinding) Bind(req *http.Request, obj interface{}) error {
decoder := json.NewDecoder(req.Body)
if err := decoder.Decode(obj); err != nil {
return err
}
return validate(obj)
}

View File

@ -1,35 +0,0 @@
// Copyright 2014 Manu Martinez-Almeida. All rights reserved.
// Use of this source code is governed by a MIT style
// license that can be found in the LICENSE file.
package binding
import (
"github.com/golang/protobuf/proto"
"io/ioutil"
"net/http"
)
type protobufBinding struct{}
func (protobufBinding) Name() string {
return "protobuf"
}
func (protobufBinding) Bind(req *http.Request, obj interface{}) error {
buf, err := ioutil.ReadAll(req.Body)
if err != nil {
return err
}
if err = proto.Unmarshal(buf, obj.(proto.Message)); err != nil {
return err
}
//Here it's same to return validate(obj), but util now we cann't add `binding:""` to the struct
//which automatically generate by gen-proto
return nil
//return validate(obj)
}

View File

@ -1,24 +0,0 @@
// Copyright 2014 Manu Martinez-Almeida. All rights reserved.
// Use of this source code is governed by a MIT style
// license that can be found in the LICENSE file.
package binding
import (
"encoding/xml"
"net/http"
)
type xmlBinding struct{}
func (xmlBinding) Name() string {
return "xml"
}
func (xmlBinding) Bind(req *http.Request, obj interface{}) error {
decoder := xml.NewDecoder(req.Body)
if err := decoder.Decode(obj); err != nil {
return err
}
return validate(obj)
}

View File

@ -1,568 +0,0 @@
// Copyright 2014 Manu Martinez-Almeida. All rights reserved.
// Use of this source code is governed by a MIT style
// license that can be found in the LICENSE file.
package gin
import (
"errors"
"io"
"math"
"net"
"net/http"
"net/url"
"strings"
"time"
"github.com/gin-gonic/gin/binding"
"github.com/gin-gonic/gin/render"
"github.com/manucorporat/sse"
"golang.org/x/net/context"
)
// Content-Type MIME of the most common data formats
const (
MIMEJSON = binding.MIMEJSON
MIMEHTML = binding.MIMEHTML
MIMEXML = binding.MIMEXML
MIMEXML2 = binding.MIMEXML2
MIMEPlain = binding.MIMEPlain
MIMEPOSTForm = binding.MIMEPOSTForm
MIMEMultipartPOSTForm = binding.MIMEMultipartPOSTForm
)
const abortIndex int8 = math.MaxInt8 / 2
// Context is the most important part of gin. It allows us to pass variables between middleware,
// manage the flow, validate the JSON of a request and render a JSON response for example.
type Context struct {
writermem responseWriter
Request *http.Request
Writer ResponseWriter
Params Params
handlers HandlersChain
index int8
engine *Engine
Keys map[string]interface{}
Errors errorMsgs
Accepted []string
}
var _ context.Context = &Context{}
/************************************/
/********** CONTEXT CREATION ********/
/************************************/
func (c *Context) reset() {
c.Writer = &c.writermem
c.Params = c.Params[0:0]
c.handlers = nil
c.index = -1
c.Keys = nil
c.Errors = c.Errors[0:0]
c.Accepted = nil
}
// Copy returns a copy of the current context that can be safely used outside the request's scope.
// This have to be used then the context has to be passed to a goroutine.
func (c *Context) Copy() *Context {
var cp = *c
cp.writermem.ResponseWriter = nil
cp.Writer = &cp.writermem
cp.index = abortIndex
cp.handlers = nil
return &cp
}
// HandlerName returns the main handler's name. For example if the handler is "handleGetUsers()", this
// function will return "main.handleGetUsers"
func (c *Context) HandlerName() string {
return nameOfFunction(c.handlers.Last())
}
/************************************/
/*********** FLOW CONTROL ***********/
/************************************/
// Next should be used only inside middleware.
// It executes the pending handlers in the chain inside the calling handler.
// See example in github.
func (c *Context) Next() {
c.index++
s := int8(len(c.handlers))
for ; c.index < s; c.index++ {
c.handlers[c.index](c)
}
}
// IsAborted returns true if the current context was aborted.
func (c *Context) IsAborted() bool {
return c.index >= abortIndex
}
// Abort prevents pending handlers from being called. Note that this will not stop the current handler.
// Let's say you have an authorization middleware that validates that the current request is authorized. If the
// authorization fails (ex: the password does not match), call Abort to ensure the remaining handlers
// for this request are not called.
func (c *Context) Abort() {
c.index = abortIndex
}
// AbortWithStatus calls `Abort()` and writes the headers with the specified status code.
// For example, a failed attempt to authentificate a request could use: context.AbortWithStatus(401).
func (c *Context) AbortWithStatus(code int) {
c.Status(code)
c.Writer.WriteHeaderNow()
c.Abort()
}
// AbortWithError calls `AbortWithStatus()` and `Error()` internally. This method stops the chain, writes the status code and
// pushes the specified error to `c.Errors`.
// See Context.Error() for more details.
func (c *Context) AbortWithError(code int, err error) *Error {
c.AbortWithStatus(code)
return c.Error(err)
}
/************************************/
/********* ERROR MANAGEMENT *********/
/************************************/
// Attaches an error to the current context. The error is pushed to a list of errors.
// It's a good idea to call Error for each error that occurred during the resolution of a request.
// A middleware can be used to collect all the errors
// and push them to a database together, print a log, or append it in the HTTP response.
func (c *Context) Error(err error) *Error {
var parsedError *Error
switch err.(type) {
case *Error:
parsedError = err.(*Error)
default:
parsedError = &Error{
Err: err,
Type: ErrorTypePrivate,
}
}
c.Errors = append(c.Errors, parsedError)
return parsedError
}
/************************************/
/******** METADATA MANAGEMENT********/
/************************************/
// Set is used to store a new key/value pair exclusivelly for this context.
// It also lazy initializes c.Keys if it was not used previously.
func (c *Context) Set(key string, value interface{}) {
if c.Keys == nil {
c.Keys = make(map[string]interface{})
}
c.Keys[key] = value
}
// Get returns the value for the given key, ie: (value, true).
// If the value does not exists it returns (nil, false)
func (c *Context) Get(key string) (value interface{}, exists bool) {
if c.Keys != nil {
value, exists = c.Keys[key]
}
return
}
// MustGet returns the value for the given key if it exists, otherwise it panics.
func (c *Context) MustGet(key string) interface{} {
if value, exists := c.Get(key); exists {
return value
}
panic("Key \"" + key + "\" does not exist")
}
/************************************/
/************ INPUT DATA ************/
/************************************/
// Param returns the value of the URL param.
// It is a shortcut for c.Params.ByName(key)
// router.GET("/user/:id", func(c *gin.Context) {
// // a GET request to /user/john
// id := c.Param("id") // id == "john"
// })
func (c *Context) Param(key string) string {
return c.Params.ByName(key)
}
// Query returns the keyed url query value if it exists,
// othewise it returns an empty string `("")`.
// It is shortcut for `c.Request.URL.Query().Get(key)`
// GET /path?id=1234&name=Manu&value=
// c.Query("id") == "1234"
// c.Query("name") == "Manu"
// c.Query("value") == ""
// c.Query("wtf") == ""
func (c *Context) Query(key string) string {
value, _ := c.GetQuery(key)
return value
}
// DefaultQuery returns the keyed url query value if it exists,
// othewise it returns the specified defaultValue string.
// See: Query() and GetQuery() for further information.
// GET /?name=Manu&lastname=
// c.DefaultQuery("name", "unknown") == "Manu"
// c.DefaultQuery("id", "none") == "none"
// c.DefaultQuery("lastname", "none") == ""
func (c *Context) DefaultQuery(key, defaultValue string) string {
if value, ok := c.GetQuery(key); ok {
return value
}
return defaultValue
}
// GetQuery is like Query(), it returns the keyed url query value
// if it exists `(value, true)` (even when the value is an empty string),
// othewise it returns `("", false)`.
// It is shortcut for `c.Request.URL.Query().Get(key)`
// GET /?name=Manu&lastname=
// ("Manu", true) == c.GetQuery("name")
// ("", false) == c.GetQuery("id")
// ("", true) == c.GetQuery("lastname")
func (c *Context) GetQuery(key string) (string, bool) {
req := c.Request
if values, ok := req.URL.Query()[key]; ok && len(values) > 0 {
return values[0], true
}
return "", false
}
// PostForm returns the specified key from a POST urlencoded form or multipart form
// when it exists, otherwise it returns an empty string `("")`.
func (c *Context) PostForm(key string) string {
value, _ := c.GetPostForm(key)
return value
}
// DefaultPostForm returns the specified key from a POST urlencoded form or multipart form
// when it exists, otherwise it returns the specified defaultValue string.
// See: PostForm() and GetPostForm() for further information.
func (c *Context) DefaultPostForm(key, defaultValue string) string {
if value, ok := c.GetPostForm(key); ok {
return value
}
return defaultValue
}
// GetPostForm is like PostForm(key). It returns the specified key from a POST urlencoded
// form or multipart form when it exists `(value, true)` (even when the value is an empty string),
// otherwise it returns ("", false).
// For example, during a PATCH request to update the user's email:
// email=mail@example.com --> ("mail@example.com", true) := GetPostForm("email") // set email to "mail@example.com"
// email= --> ("", true) := GetPostForm("email") // set email to ""
// --> ("", false) := GetPostForm("email") // do nothing with email
func (c *Context) GetPostForm(key string) (string, bool) {
req := c.Request
req.ParseMultipartForm(32 << 20) // 32 MB
if values := req.PostForm[key]; len(values) > 0 {
return values[0], true
}
if req.MultipartForm != nil && req.MultipartForm.File != nil {
if values := req.MultipartForm.Value[key]; len(values) > 0 {
return values[0], true
}
}
return "", false
}
// Bind checks the Content-Type to select a binding engine automatically,
// Depending the "Content-Type" header different bindings are used:
// "application/json" --> JSON binding
// "application/xml" --> XML binding
// otherwise --> returns an error
// It parses the request's body as JSON if Content-Type == "application/json" using JSON or XML as a JSON input.
// It decodes the json payload into the struct specified as a pointer.
// Like ParseBody() but this method also writes a 400 error if the json is not valid.
func (c *Context) Bind(obj interface{}) error {
b := binding.Default(c.Request.Method, c.ContentType())
return c.BindWith(obj, b)
}
// BindJSON is a shortcut for c.BindWith(obj, binding.JSON)
func (c *Context) BindJSON(obj interface{}) error {
return c.BindWith(obj, binding.JSON)
}
// BindWith binds the passed struct pointer using the specified binding engine.
// See the binding package.
func (c *Context) BindWith(obj interface{}, b binding.Binding) error {
if err := b.Bind(c.Request, obj); err != nil {
c.AbortWithError(400, err).SetType(ErrorTypeBind)
return err
}
return nil
}
// ClientIP implements a best effort algorithm to return the real client IP, it parses
// X-Real-IP and X-Forwarded-For in order to work properly with reverse-proxies such us: nginx or haproxy.
func (c *Context) ClientIP() string {
if c.engine.ForwardedByClientIP {
clientIP := strings.TrimSpace(c.requestHeader("X-Real-Ip"))
if len(clientIP) > 0 {
return clientIP
}
clientIP = c.requestHeader("X-Forwarded-For")
if index := strings.IndexByte(clientIP, ','); index >= 0 {
clientIP = clientIP[0:index]
}
clientIP = strings.TrimSpace(clientIP)
if len(clientIP) > 0 {
return clientIP
}
}
if ip, _, err := net.SplitHostPort(strings.TrimSpace(c.Request.RemoteAddr)); err == nil {
return ip
}
return ""
}
// ContentType returns the Content-Type header of the request.
func (c *Context) ContentType() string {
return filterFlags(c.requestHeader("Content-Type"))
}
func (c *Context) requestHeader(key string) string {
if values, _ := c.Request.Header[key]; len(values) > 0 {
return values[0]
}
return ""
}
/************************************/
/******** RESPONSE RENDERING ********/
/************************************/
func (c *Context) Status(code int) {
c.writermem.WriteHeader(code)
}
// Header is a intelligent shortcut for c.Writer.Header().Set(key, value)
// It writes a header in the response.
// If value == "", this method removes the header `c.Writer.Header().Del(key)`
func (c *Context) Header(key, value string) {
if len(value) == 0 {
c.Writer.Header().Del(key)
} else {
c.Writer.Header().Set(key, value)
}
}
func (c *Context) SetCookie(
name string,
value string,
maxAge int,
path string,
domain string,
secure bool,
httpOnly bool,
) {
if path == "" {
path = "/"
}
http.SetCookie(c.Writer, &http.Cookie{
Name: name,
Value: url.QueryEscape(value),
MaxAge: maxAge,
Path: path,
Domain: domain,
Secure: secure,
HttpOnly: httpOnly,
})
}
func (c *Context) Cookie(name string) (string, error) {
cookie, err := c.Request.Cookie(name)
if err != nil {
return "", err
}
val, _ := url.QueryUnescape(cookie.Value)
return val, nil
}
func (c *Context) Render(code int, r render.Render) {
c.Status(code)
if err := r.Render(c.Writer); err != nil {
panic(err)
}
}
// HTML renders the HTTP template specified by its file name.
// It also updates the HTTP code and sets the Content-Type as "text/html".
// See http://golang.org/doc/articles/wiki/
func (c *Context) HTML(code int, name string, obj interface{}) {
instance := c.engine.HTMLRender.Instance(name, obj)
c.Render(code, instance)
}
// IndentedJSON serializes the given struct as pretty JSON (indented + endlines) into the response body.
// It also sets the Content-Type as "application/json".
// WARNING: we recommend to use this only for development propuses since printing pretty JSON is
// more CPU and bandwidth consuming. Use Context.JSON() instead.
func (c *Context) IndentedJSON(code int, obj interface{}) {
c.Render(code, render.IndentedJSON{Data: obj})
}
// JSON serializes the given struct as JSON into the response body.
// It also sets the Content-Type as "application/json".
func (c *Context) JSON(code int, obj interface{}) {
c.Status(code)
if err := render.WriteJSON(c.Writer, obj); err != nil {
panic(err)
}
}
// XML serializes the given struct as XML into the response body.
// It also sets the Content-Type as "application/xml".
func (c *Context) XML(code int, obj interface{}) {
c.Render(code, render.XML{Data: obj})
}
// YAML serializes the given struct as YAML into the response body.
func (c *Context) YAML(code int, obj interface{}) {
c.Render(code, render.YAML{Data: obj})
}
// String writes the given string into the response body.
func (c *Context) String(code int, format string, values ...interface{}) {
c.Status(code)
render.WriteString(c.Writer, format, values)
}
// Redirect returns a HTTP redirect to the specific location.
func (c *Context) Redirect(code int, location string) {
c.Render(-1, render.Redirect{
Code: code,
Location: location,
Request: c.Request,
})
}
// Data writes some data into the body stream and updates the HTTP code.
func (c *Context) Data(code int, contentType string, data []byte) {
c.Render(code, render.Data{
ContentType: contentType,
Data: data,
})
}
// File writes the specified file into the body stream in a efficient way.
func (c *Context) File(filepath string) {
http.ServeFile(c.Writer, c.Request, filepath)
}
// SSEvent writes a Server-Sent Event into the body stream.
func (c *Context) SSEvent(name string, message interface{}) {
c.Render(-1, sse.Event{
Event: name,
Data: message,
})
}
func (c *Context) Stream(step func(w io.Writer) bool) {
w := c.Writer
clientGone := w.CloseNotify()
for {
select {
case <-clientGone:
return
default:
keepOpen := step(w)
w.Flush()
if !keepOpen {
return
}
}
}
}
/************************************/
/******** CONTENT NEGOTIATION *******/
/************************************/
type Negotiate struct {
Offered []string
HTMLName string
HTMLData interface{}
JSONData interface{}
XMLData interface{}
Data interface{}
}
func (c *Context) Negotiate(code int, config Negotiate) {
switch c.NegotiateFormat(config.Offered...) {
case binding.MIMEJSON:
data := chooseData(config.JSONData, config.Data)
c.JSON(code, data)
case binding.MIMEHTML:
data := chooseData(config.HTMLData, config.Data)
c.HTML(code, config.HTMLName, data)
case binding.MIMEXML:
data := chooseData(config.XMLData, config.Data)
c.XML(code, data)
default:
c.AbortWithError(http.StatusNotAcceptable, errors.New("the accepted formats are not offered by the server"))
}
}
func (c *Context) NegotiateFormat(offered ...string) string {
assert1(len(offered) > 0, "you must provide at least one offer")
if c.Accepted == nil {
c.Accepted = parseAccept(c.requestHeader("Accept"))
}
if len(c.Accepted) == 0 {
return offered[0]
}
for _, accepted := range c.Accepted {
for _, offert := range offered {
if accepted == offert {
return offert
}
}
}
return ""
}
func (c *Context) SetAccepted(formats ...string) {
c.Accepted = formats
}
/************************************/
/***** GOLANG.ORG/X/NET/CONTEXT *****/
/************************************/
func (c *Context) Deadline() (deadline time.Time, ok bool) {
return
}
func (c *Context) Done() <-chan struct{} {
return nil
}
func (c *Context) Err() error {
return nil
}
func (c *Context) Value(key interface{}) interface{} {
if key == 0 {
return c.Request
}
if keyAsString, ok := key.(string); ok {
val, _ := c.Get(keyAsString)
return val
}
return nil
}

View File

@ -1,71 +0,0 @@
// Copyright 2014 Manu Martinez-Almeida. All rights reserved.
// Use of this source code is governed by a MIT style
// license that can be found in the LICENSE file.
package gin
import (
"bytes"
"html/template"
"log"
)
func init() {
log.SetFlags(0)
}
// IsDebugging returns true if the framework is running in debug mode.
// Use SetMode(gin.Release) to switch to disable the debug mode.
func IsDebugging() bool {
return ginMode == debugCode
}
func debugPrintRoute(httpMethod, absolutePath string, handlers HandlersChain) {
if IsDebugging() {
nuHandlers := len(handlers)
handlerName := nameOfFunction(handlers.Last())
debugPrint("%-6s %-25s --> %s (%d handlers)\n", httpMethod, absolutePath, handlerName, nuHandlers)
}
}
func debugPrintLoadTemplate(tmpl *template.Template) {
if IsDebugging() {
var buf bytes.Buffer
for _, tmpl := range tmpl.Templates() {
buf.WriteString("\t- ")
buf.WriteString(tmpl.Name())
buf.WriteString("\n")
}
debugPrint("Loaded HTML Templates (%d): \n%s\n", len(tmpl.Templates()), buf.String())
}
}
func debugPrint(format string, values ...interface{}) {
if IsDebugging() {
log.Printf("[GIN-debug] "+format, values...)
}
}
func debugPrintWARNINGNew() {
debugPrint(`[WARNING] Running in "debug" mode. Switch to "release" mode in production.
- using env: export GIN_MODE=release
- using code: gin.SetMode(gin.ReleaseMode)
`)
}
func debugPrintWARNINGSetHTMLTemplate() {
debugPrint(`[WARNING] Since SetHTMLTemplate() is NOT thread-safe. It should only be called
at initialization. ie. before any route is registered or the router is listening in a socket:
router := gin.Default()
router.SetHTMLTemplate(template) // << good place
`)
}
func debugPrintError(err error) {
if err != nil {
debugPrint("[ERROR] %v\n", err)
}
}

View File

@ -1,12 +0,0 @@
// Copyright 2014 Manu Martinez-Almeida. All rights reserved.
// Use of this source code is governed by a MIT style
// license that can be found in the LICENSE file.
package gin
import "log"
func (c *Context) GetCookie(name string) (string, error) {
log.Println("GetCookie() method is deprecated. Use Cookie() instead.")
return c.Cookie(name)
}

View File

@ -1,159 +0,0 @@
// Copyright 2014 Manu Martinez-Almeida. All rights reserved.
// Use of this source code is governed by a MIT style
// license that can be found in the LICENSE file.
package gin
import (
"bytes"
"encoding/json"
"fmt"
"reflect"
)
type ErrorType uint64
const (
ErrorTypeBind ErrorType = 1 << 63 // used when c.Bind() fails
ErrorTypeRender ErrorType = 1 << 62 // used when c.Render() fails
ErrorTypePrivate ErrorType = 1 << 0
ErrorTypePublic ErrorType = 1 << 1
ErrorTypeAny ErrorType = 1<<64 - 1
ErrorTypeNu = 2
)
type (
Error struct {
Err error
Type ErrorType
Meta interface{}
}
errorMsgs []*Error
)
var _ error = &Error{}
func (msg *Error) SetType(flags ErrorType) *Error {
msg.Type = flags
return msg
}
func (msg *Error) SetMeta(data interface{}) *Error {
msg.Meta = data
return msg
}
func (msg *Error) JSON() interface{} {
json := H{}
if msg.Meta != nil {
value := reflect.ValueOf(msg.Meta)
switch value.Kind() {
case reflect.Struct:
return msg.Meta
case reflect.Map:
for _, key := range value.MapKeys() {
json[key.String()] = value.MapIndex(key).Interface()
}
default:
json["meta"] = msg.Meta
}
}
if _, ok := json["error"]; !ok {
json["error"] = msg.Error()
}
return json
}
// MarshalJSON implements the json.Marshaller interface
func (msg *Error) MarshalJSON() ([]byte, error) {
return json.Marshal(msg.JSON())
}
// Implements the error interface
func (msg *Error) Error() string {
return msg.Err.Error()
}
func (msg *Error) IsType(flags ErrorType) bool {
return (msg.Type & flags) > 0
}
// Returns a readonly copy filterd the byte.
// ie ByType(gin.ErrorTypePublic) returns a slice of errors with type=ErrorTypePublic
func (a errorMsgs) ByType(typ ErrorType) errorMsgs {
if len(a) == 0 {
return nil
}
if typ == ErrorTypeAny {
return a
}
var result errorMsgs
for _, msg := range a {
if msg.IsType(typ) {
result = append(result, msg)
}
}
return result
}
// Returns the last error in the slice. It returns nil if the array is empty.
// Shortcut for errors[len(errors)-1]
func (a errorMsgs) Last() *Error {
length := len(a)
if length > 0 {
return a[length-1]
}
return nil
}
// Returns an array will all the error messages.
// Example:
// c.Error(errors.New("first"))
// c.Error(errors.New("second"))
// c.Error(errors.New("third"))
// c.Errors.Errors() // == []string{"first", "second", "third"}
func (a errorMsgs) Errors() []string {
if len(a) == 0 {
return nil
}
errorStrings := make([]string, len(a))
for i, err := range a {
errorStrings[i] = err.Error()
}
return errorStrings
}
func (a errorMsgs) JSON() interface{} {
switch len(a) {
case 0:
return nil
case 1:
return a.Last().JSON()
default:
json := make([]interface{}, len(a))
for i, err := range a {
json[i] = err.JSON()
}
return json
}
}
func (a errorMsgs) MarshalJSON() ([]byte, error) {
return json.Marshal(a.JSON())
}
func (a errorMsgs) String() string {
if len(a) == 0 {
return ""
}
var buffer bytes.Buffer
for i, msg := range a {
fmt.Fprintf(&buffer, "Error #%02d: %s\n", (i + 1), msg.Err)
if msg.Meta != nil {
fmt.Fprintf(&buffer, " Meta: %v\n", msg.Meta)
}
}
return buffer.String()
}

View File

@ -1,42 +0,0 @@
package gin
import (
"net/http"
"os"
)
type (
onlyfilesFS struct {
fs http.FileSystem
}
neuteredReaddirFile struct {
http.File
}
)
// Dir returns a http.Filesystem that can be used by http.FileServer(). It is used interally
// in router.Static().
// if listDirectory == true, then it works the same as http.Dir() otherwise it returns
// a filesystem that prevents http.FileServer() to list the directory files.
func Dir(root string, listDirectory bool) http.FileSystem {
fs := http.Dir(root)
if listDirectory {
return fs
}
return &onlyfilesFS{fs}
}
// Conforms to http.Filesystem
func (fs onlyfilesFS) Open(name string) (http.File, error) {
f, err := fs.fs.Open(name)
if err != nil {
return nil, err
}
return neuteredReaddirFile{f}, nil
}
// Overrides the http.File default implementation
func (f neuteredReaddirFile) Readdir(count int) ([]os.FileInfo, error) {
// this disables directory listing
return nil, nil
}

View File

@ -1,370 +0,0 @@
// Copyright 2014 Manu Martinez-Almeida. All rights reserved.
// Use of this source code is governed by a MIT style
// license that can be found in the LICENSE file.
package gin
import (
"html/template"
"net"
"net/http"
"os"
"sync"
"github.com/gin-gonic/gin/render"
)
// Version is Framework's version
const Version = "v1.0rc2"
var default404Body = []byte("404 page not found")
var default405Body = []byte("405 method not allowed")
type HandlerFunc func(*Context)
type HandlersChain []HandlerFunc
// Last returns the last handler in the chain. ie. the last handler is the main own.
func (c HandlersChain) Last() HandlerFunc {
length := len(c)
if length > 0 {
return c[length-1]
}
return nil
}
type (
RoutesInfo []RouteInfo
RouteInfo struct {
Method string
Path string
Handler string
}
// Engine is the framework's instance, it contains the muxer, middleware and configuration settings.
// Create an instance of Engine, by using New() or Default()
Engine struct {
RouterGroup
HTMLRender render.HTMLRender
allNoRoute HandlersChain
allNoMethod HandlersChain
noRoute HandlersChain
noMethod HandlersChain
pool sync.Pool
trees methodTrees
// Enables automatic redirection if the current route can't be matched but a
// handler for the path with (without) the trailing slash exists.
// For example if /foo/ is requested but a route only exists for /foo, the
// client is redirected to /foo with http status code 301 for GET requests
// and 307 for all other request methods.
RedirectTrailingSlash bool
// If enabled, the router tries to fix the current request path, if no
// handle is registered for it.
// First superfluous path elements like ../ or // are removed.
// Afterwards the router does a case-insensitive lookup of the cleaned path.
// If a handle can be found for this route, the router makes a redirection
// to the corrected path with status code 301 for GET requests and 307 for
// all other request methods.
// For example /FOO and /..//Foo could be redirected to /foo.
// RedirectTrailingSlash is independent of this option.
RedirectFixedPath bool
// If enabled, the router checks if another method is allowed for the
// current route, if the current request can not be routed.
// If this is the case, the request is answered with 'Method Not Allowed'
// and HTTP status code 405.
// If no other Method is allowed, the request is delegated to the NotFound
// handler.
HandleMethodNotAllowed bool
ForwardedByClientIP bool
}
)
var _ IRouter = &Engine{}
// New returns a new blank Engine instance without any middleware attached.
// By default the configuration is:
// - RedirectTrailingSlash: true
// - RedirectFixedPath: false
// - HandleMethodNotAllowed: false
// - ForwardedByClientIP: true
func New() *Engine {
debugPrintWARNINGNew()
engine := &Engine{
RouterGroup: RouterGroup{
Handlers: nil,
basePath: "/",
root: true,
},
RedirectTrailingSlash: true,
RedirectFixedPath: false,
HandleMethodNotAllowed: false,
ForwardedByClientIP: true,
trees: make(methodTrees, 0, 9),
}
engine.RouterGroup.engine = engine
engine.pool.New = func() interface{} {
return engine.allocateContext()
}
return engine
}
// Default returns an Engine instance with the Logger and Recovery middleware already attached.
func Default() *Engine {
engine := New()
engine.Use(Logger(), Recovery())
return engine
}
func (engine *Engine) allocateContext() *Context {
return &Context{engine: engine}
}
func (engine *Engine) LoadHTMLGlob(pattern string) {
if IsDebugging() {
debugPrintLoadTemplate(template.Must(template.ParseGlob(pattern)))
engine.HTMLRender = render.HTMLDebug{Glob: pattern}
} else {
templ := template.Must(template.ParseGlob(pattern))
engine.SetHTMLTemplate(templ)
}
}
func (engine *Engine) LoadHTMLFiles(files ...string) {
if IsDebugging() {
engine.HTMLRender = render.HTMLDebug{Files: files}
} else {
templ := template.Must(template.ParseFiles(files...))
engine.SetHTMLTemplate(templ)
}
}
func (engine *Engine) SetHTMLTemplate(templ *template.Template) {
if len(engine.trees) > 0 {
debugPrintWARNINGSetHTMLTemplate()
}
engine.HTMLRender = render.HTMLProduction{Template: templ}
}
// NoRoute adds handlers for NoRoute. It return a 404 code by default.
func (engine *Engine) NoRoute(handlers ...HandlerFunc) {
engine.noRoute = handlers
engine.rebuild404Handlers()
}
// NoMethod sets the handlers called when... TODO
func (engine *Engine) NoMethod(handlers ...HandlerFunc) {
engine.noMethod = handlers
engine.rebuild405Handlers()
}
// Use attachs a global middleware to the router. ie. the middleware attached though Use() will be
// included in the handlers chain for every single request. Even 404, 405, static files...
// For example, this is the right place for a logger or error management middleware.
func (engine *Engine) Use(middleware ...HandlerFunc) IRoutes {
engine.RouterGroup.Use(middleware...)
engine.rebuild404Handlers()
engine.rebuild405Handlers()
return engine
}
func (engine *Engine) rebuild404Handlers() {
engine.allNoRoute = engine.combineHandlers(engine.noRoute)
}
func (engine *Engine) rebuild405Handlers() {
engine.allNoMethod = engine.combineHandlers(engine.noMethod)
}
func (engine *Engine) addRoute(method, path string, handlers HandlersChain) {
assert1(path[0] == '/', "path must begin with '/'")
assert1(len(method) > 0, "HTTP method can not be empty")
assert1(len(handlers) > 0, "there must be at least one handler")
debugPrintRoute(method, path, handlers)
root := engine.trees.get(method)
if root == nil {
root = new(node)
engine.trees = append(engine.trees, methodTree{method: method, root: root})
}
root.addRoute(path, handlers)
}
// Routes returns a slice of registered routes, including some useful information, such as:
// the http method, path and the handler name.
func (engine *Engine) Routes() (routes RoutesInfo) {
for _, tree := range engine.trees {
routes = iterate("", tree.method, routes, tree.root)
}
return routes
}
func iterate(path, method string, routes RoutesInfo, root *node) RoutesInfo {
path += root.path
if len(root.handlers) > 0 {
routes = append(routes, RouteInfo{
Method: method,
Path: path,
Handler: nameOfFunction(root.handlers.Last()),
})
}
for _, child := range root.children {
routes = iterate(path, method, routes, child)
}
return routes
}
// Run attaches the router to a http.Server and starts listening and serving HTTP requests.
// It is a shortcut for http.ListenAndServe(addr, router)
// Note: this method will block the calling goroutine indefinitely unless an error happens.
func (engine *Engine) Run(addr ...string) (err error) {
defer func() { debugPrintError(err) }()
address := resolveAddress(addr)
debugPrint("Listening and serving HTTP on %s\n", address)
err = http.ListenAndServe(address, engine)
return
}
// RunTLS attaches the router to a http.Server and starts listening and serving HTTPS (secure) requests.
// It is a shortcut for http.ListenAndServeTLS(addr, certFile, keyFile, router)
// Note: this method will block the calling goroutine indefinitely unless an error happens.
func (engine *Engine) RunTLS(addr string, certFile string, keyFile string) (err error) {
debugPrint("Listening and serving HTTPS on %s\n", addr)
defer func() { debugPrintError(err) }()
err = http.ListenAndServeTLS(addr, certFile, keyFile, engine)
return
}
// RunUnix attaches the router to a http.Server and starts listening and serving HTTP requests
// through the specified unix socket (ie. a file).
// Note: this method will block the calling goroutine indefinitely unless an error happens.
func (engine *Engine) RunUnix(file string) (err error) {
debugPrint("Listening and serving HTTP on unix:/%s", file)
defer func() { debugPrintError(err) }()
os.Remove(file)
listener, err := net.Listen("unix", file)
if err != nil {
return
}
defer listener.Close()
err = http.Serve(listener, engine)
return
}
// Conforms to the http.Handler interface.
func (engine *Engine) ServeHTTP(w http.ResponseWriter, req *http.Request) {
c := engine.pool.Get().(*Context)
c.writermem.reset(w)
c.Request = req
c.reset()
engine.handleHTTPRequest(c)
engine.pool.Put(c)
}
func (engine *Engine) handleHTTPRequest(context *Context) {
httpMethod := context.Request.Method
path := context.Request.URL.Path
// Find root of the tree for the given HTTP method
t := engine.trees
for i, tl := 0, len(t); i < tl; i++ {
if t[i].method == httpMethod {
root := t[i].root
// Find route in tree
handlers, params, tsr := root.getValue(path, context.Params)
if handlers != nil {
context.handlers = handlers
context.Params = params
context.Next()
context.writermem.WriteHeaderNow()
return
} else if httpMethod != "CONNECT" && path != "/" {
if tsr && engine.RedirectTrailingSlash {
redirectTrailingSlash(context)
return
}
if engine.RedirectFixedPath && redirectFixedPath(context, root, engine.RedirectFixedPath) {
return
}
}
break
}
}
// TODO: unit test
if engine.HandleMethodNotAllowed {
for _, tree := range engine.trees {
if tree.method != httpMethod {
if handlers, _, _ := tree.root.getValue(path, nil); handlers != nil {
context.handlers = engine.allNoMethod
serveError(context, 405, default405Body)
return
}
}
}
}
context.handlers = engine.allNoRoute
serveError(context, 404, default404Body)
}
var mimePlain = []string{MIMEPlain}
func serveError(c *Context, code int, defaultMessage []byte) {
c.writermem.status = code
c.Next()
if !c.writermem.Written() {
if c.writermem.Status() == code {
c.writermem.Header()["Content-Type"] = mimePlain
c.Writer.Write(defaultMessage)
} else {
c.writermem.WriteHeaderNow()
}
}
}
func redirectTrailingSlash(c *Context) {
req := c.Request
path := req.URL.Path
code := 301 // Permanent redirect, request with GET method
if req.Method != "GET" {
code = 307
}
if len(path) > 1 && path[len(path)-1] == '/' {
req.URL.Path = path[:len(path)-1]
} else {
req.URL.Path = path + "/"
}
debugPrint("redirecting request %d: %s --> %s", code, path, req.URL.String())
http.Redirect(c.Writer, req, req.URL.String(), code)
c.writermem.WriteHeaderNow()
}
func redirectFixedPath(c *Context, root *node, trailingSlash bool) bool {
req := c.Request
path := req.URL.Path
fixedPath, found := root.findCaseInsensitivePath(
cleanPath(path),
trailingSlash,
)
if found {
code := 301 // Permanent redirect, request with GET method
if req.Method != "GET" {
code = 307
}
req.URL.Path = string(fixedPath)
debugPrint("redirecting request %d: %s --> %s", code, path, req.URL.String())
http.Redirect(c.Writer, req, req.URL.String(), code)
c.writermem.WriteHeaderNow()
return true
}
return false
}

View File

@ -1,123 +0,0 @@
// Copyright 2014 Manu Martinez-Almeida. All rights reserved.
// Use of this source code is governed by a MIT style
// license that can be found in the LICENSE file.
package gin
import (
"fmt"
"io"
"time"
)
var (
green = string([]byte{27, 91, 57, 55, 59, 52, 50, 109})
white = string([]byte{27, 91, 57, 48, 59, 52, 55, 109})
yellow = string([]byte{27, 91, 57, 55, 59, 52, 51, 109})
red = string([]byte{27, 91, 57, 55, 59, 52, 49, 109})
blue = string([]byte{27, 91, 57, 55, 59, 52, 52, 109})
magenta = string([]byte{27, 91, 57, 55, 59, 52, 53, 109})
cyan = string([]byte{27, 91, 57, 55, 59, 52, 54, 109})
reset = string([]byte{27, 91, 48, 109})
)
func ErrorLogger() HandlerFunc {
return ErrorLoggerT(ErrorTypeAny)
}
func ErrorLoggerT(typ ErrorType) HandlerFunc {
return func(c *Context) {
c.Next()
errors := c.Errors.ByType(typ)
if len(errors) > 0 {
c.JSON(-1, errors)
}
}
}
// Logger instances a Logger middleware that will write the logs to gin.DefaultWriter
// By default gin.DefaultWriter = os.Stdout
func Logger() HandlerFunc {
return LoggerWithWriter(DefaultWriter)
}
// LoggerWithWriter instance a Logger middleware with the specified writter buffer.
// Example: os.Stdout, a file opened in write mode, a socket...
func LoggerWithWriter(out io.Writer, notlogged ...string) HandlerFunc {
var skip map[string]struct{}
if length := len(notlogged); length > 0 {
skip = make(map[string]struct{}, length)
for _, path := range notlogged {
skip[path] = struct{}{}
}
}
return func(c *Context) {
// Start timer
start := time.Now()
path := c.Request.URL.Path
// Process request
c.Next()
// Log only when path is not being skipped
if _, ok := skip[path]; !ok {
// Stop timer
end := time.Now()
latency := end.Sub(start)
clientIP := c.ClientIP()
method := c.Request.Method
statusCode := c.Writer.Status()
statusColor := colorForStatus(statusCode)
methodColor := colorForMethod(method)
comment := c.Errors.ByType(ErrorTypePrivate).String()
fmt.Fprintf(out, "[GIN] %v |%s %3d %s| %13v | %s |%s %s %-7s %s\n%s",
end.Format("2006/01/02 - 15:04:05"),
statusColor, statusCode, reset,
latency,
clientIP,
methodColor, reset, method,
path,
comment,
)
}
}
}
func colorForStatus(code int) string {
switch {
case code >= 200 && code < 300:
return green
case code >= 300 && code < 400:
return white
case code >= 400 && code < 500:
return yellow
default:
return red
}
}
func colorForMethod(method string) string {
switch method {
case "GET":
return blue
case "POST":
return cyan
case "PUT":
return yellow
case "DELETE":
return red
case "PATCH":
return green
case "HEAD":
return magenta
case "OPTIONS":
return white
default:
return reset
}
}

Binary file not shown.

Before

Width:  |  Height:  |  Size: 12 KiB

View File

@ -1,68 +0,0 @@
// Copyright 2014 Manu Martinez-Almeida. All rights reserved.
// Use of this source code is governed by a MIT style
// license that can be found in the LICENSE file.
package gin
import (
"os"
"github.com/gin-gonic/gin/binding"
)
const ENV_GIN_MODE = "GIN_MODE"
const (
DebugMode string = "debug"
ReleaseMode string = "release"
TestMode string = "test"
)
const (
debugCode = iota
releaseCode = iota
testCode = iota
)
// DefaultWriter is the default io.Writer used the Gin for debug output and
// middleware output like Logger() or Recovery().
// Note that both Logger and Recovery provides custom ways to configure their
// output io.Writer.
// To support coloring in Windows use:
// import "github.com/mattn/go-colorable"
// gin.DefaultWriter = colorable.NewColorableStdout()
var DefaultWriter = os.Stdout
var DefaultErrorWriter = os.Stderr
var ginMode = debugCode
var modeName = DebugMode
func init() {
mode := os.Getenv(ENV_GIN_MODE)
if len(mode) == 0 {
SetMode(DebugMode)
} else {
SetMode(mode)
}
}
func SetMode(value string) {
switch value {
case DebugMode:
ginMode = debugCode
case ReleaseMode:
ginMode = releaseCode
case TestMode:
ginMode = testCode
default:
panic("gin mode unknown: " + value)
}
modeName = value
}
func DisableBindValidation() {
binding.Validator = nil
}
func Mode() string {
return modeName
}

View File

@ -1,123 +0,0 @@
// Copyright 2013 Julien Schmidt. All rights reserved.
// Based on the path package, Copyright 2009 The Go Authors.
// Use of this source code is governed by a BSD-style license that can be found
// in the LICENSE file.
package gin
// CleanPath is the URL version of path.Clean, it returns a canonical URL path
// for p, eliminating . and .. elements.
//
// The following rules are applied iteratively until no further processing can
// be done:
// 1. Replace multiple slashes with a single slash.
// 2. Eliminate each . path name element (the current directory).
// 3. Eliminate each inner .. path name element (the parent directory)
// along with the non-.. element that precedes it.
// 4. Eliminate .. elements that begin a rooted path:
// that is, replace "/.." by "/" at the beginning of a path.
//
// If the result of this process is an empty string, "/" is returned
func cleanPath(p string) string {
// Turn empty string into "/"
if p == "" {
return "/"
}
n := len(p)
var buf []byte
// Invariants:
// reading from path; r is index of next byte to process.
// writing to buf; w is index of next byte to write.
// path must start with '/'
r := 1
w := 1
if p[0] != '/' {
r = 0
buf = make([]byte, n+1)
buf[0] = '/'
}
trailing := n > 2 && p[n-1] == '/'
// A bit more clunky without a 'lazybuf' like the path package, but the loop
// gets completely inlined (bufApp). So in contrast to the path package this
// loop has no expensive function calls (except 1x make)
for r < n {
switch {
case p[r] == '/':
// empty path element, trailing slash is added after the end
r++
case p[r] == '.' && r+1 == n:
trailing = true
r++
case p[r] == '.' && p[r+1] == '/':
// . element
r++
case p[r] == '.' && p[r+1] == '.' && (r+2 == n || p[r+2] == '/'):
// .. element: remove to last /
r += 2
if w > 1 {
// can backtrack
w--
if buf == nil {
for w > 1 && p[w] != '/' {
w--
}
} else {
for w > 1 && buf[w] != '/' {
w--
}
}
}
default:
// real path element.
// add slash if needed
if w > 1 {
bufApp(&buf, p, w, '/')
w++
}
// copy element
for r < n && p[r] != '/' {
bufApp(&buf, p, w, p[r])
w++
r++
}
}
}
// re-append trailing slash
if trailing && w > 1 {
bufApp(&buf, p, w, '/')
w++
}
if buf == nil {
return p[:w]
}
return string(buf[:w])
}
// internal helper to lazily create a buffer if necessary
func bufApp(buf *[]byte, s string, w int, c byte) {
if *buf == nil {
if s[w] == c {
return
}
*buf = make([]byte, len(s))
copy(*buf, s[:w])
}
(*buf)[w] = c
}

View File

@ -1,108 +0,0 @@
// Copyright 2014 Manu Martinez-Almeida. All rights reserved.
// Use of this source code is governed by a MIT style
// license that can be found in the LICENSE file.
package gin
import (
"bytes"
"fmt"
"io"
"io/ioutil"
"log"
"net/http/httputil"
"runtime"
)
var (
dunno = []byte("???")
centerDot = []byte("·")
dot = []byte(".")
slash = []byte("/")
)
// Recovery returns a middleware that recovers from any panics and writes a 500 if there was one.
func Recovery() HandlerFunc {
return RecoveryWithWriter(DefaultErrorWriter)
}
func RecoveryWithWriter(out io.Writer) HandlerFunc {
var logger *log.Logger
if out != nil {
logger = log.New(out, "\n\n\x1b[31m", log.LstdFlags)
}
return func(c *Context) {
defer func() {
if err := recover(); err != nil {
if logger != nil {
stack := stack(3)
httprequest, _ := httputil.DumpRequest(c.Request, false)
logger.Printf("[Recovery] panic recovered:\n%s\n%s\n%s%s", string(httprequest), err, stack, reset)
}
c.AbortWithStatus(500)
}
}()
c.Next()
}
}
// stack returns a nicely formated stack frame, skipping skip frames
func stack(skip int) []byte {
buf := new(bytes.Buffer) // the returned data
// As we loop, we open files and read them. These variables record the currently
// loaded file.
var lines [][]byte
var lastFile string
for i := skip; ; i++ { // Skip the expected number of frames
pc, file, line, ok := runtime.Caller(i)
if !ok {
break
}
// Print this much at least. If we can't find the source, it won't show.
fmt.Fprintf(buf, "%s:%d (0x%x)\n", file, line, pc)
if file != lastFile {
data, err := ioutil.ReadFile(file)
if err != nil {
continue
}
lines = bytes.Split(data, []byte{'\n'})
lastFile = file
}
fmt.Fprintf(buf, "\t%s: %s\n", function(pc), source(lines, line))
}
return buf.Bytes()
}
// source returns a space-trimmed slice of the n'th line.
func source(lines [][]byte, n int) []byte {
n-- // in stack trace, lines are 1-indexed but our array is 0-indexed
if n < 0 || n >= len(lines) {
return dunno
}
return bytes.TrimSpace(lines[n])
}
// function returns, if possible, the name of the function containing the PC.
func function(pc uintptr) []byte {
fn := runtime.FuncForPC(pc)
if fn == nil {
return dunno
}
name := []byte(fn.Name())
// The name includes the path name to the package, which is unnecessary
// since the file name is already included. Plus, it has center dots.
// That is, we see
// runtime/debug.*T·ptrmethod
// and want
// *T.ptrmethod
// Also the package path might contains dot (e.g. code.google.com/...),
// so first eliminate the path prefix
if lastslash := bytes.LastIndex(name, slash); lastslash >= 0 {
name = name[lastslash+1:]
}
if period := bytes.Index(name, dot); period >= 0 {
name = name[period+1:]
}
name = bytes.Replace(name, centerDot, dot, -1)
return name
}

View File

@ -1,20 +0,0 @@
// Copyright 2014 Manu Martinez-Almeida. All rights reserved.
// Use of this source code is governed by a MIT style
// license that can be found in the LICENSE file.
package render
import "net/http"
type Data struct {
ContentType string
Data []byte
}
func (r Data) Render(w http.ResponseWriter) error {
if len(r.ContentType) > 0 {
w.Header()["Content-Type"] = []string{r.ContentType}
}
w.Write(r.Data)
return nil
}

View File

@ -1,66 +0,0 @@
// Copyright 2014 Manu Martinez-Almeida. All rights reserved.
// Use of this source code is governed by a MIT style
// license that can be found in the LICENSE file.
package render
import (
"html/template"
"net/http"
)
type (
HTMLRender interface {
Instance(string, interface{}) Render
}
HTMLProduction struct {
Template *template.Template
}
HTMLDebug struct {
Files []string
Glob string
}
HTML struct {
Template *template.Template
Name string
Data interface{}
}
)
var htmlContentType = []string{"text/html; charset=utf-8"}
func (r HTMLProduction) Instance(name string, data interface{}) Render {
return HTML{
Template: r.Template,
Name: name,
Data: data,
}
}
func (r HTMLDebug) Instance(name string, data interface{}) Render {
return HTML{
Template: r.loadTemplate(),
Name: name,
Data: data,
}
}
func (r HTMLDebug) loadTemplate() *template.Template {
if len(r.Files) > 0 {
return template.Must(template.ParseFiles(r.Files...))
}
if len(r.Glob) > 0 {
return template.Must(template.ParseGlob(r.Glob))
}
panic("the HTML debug render was created without files or glob pattern")
}
func (r HTML) Render(w http.ResponseWriter) error {
writeContentType(w, htmlContentType)
if len(r.Name) == 0 {
return r.Template.Execute(w, r.Data)
}
return r.Template.ExecuteTemplate(w, r.Name, r.Data)
}

View File

@ -1,41 +0,0 @@
// Copyright 2014 Manu Martinez-Almeida. All rights reserved.
// Use of this source code is governed by a MIT style
// license that can be found in the LICENSE file.
package render
import (
"encoding/json"
"net/http"
)
type (
JSON struct {
Data interface{}
}
IndentedJSON struct {
Data interface{}
}
)
var jsonContentType = []string{"application/json; charset=utf-8"}
func (r JSON) Render(w http.ResponseWriter) error {
return WriteJSON(w, r.Data)
}
func (r IndentedJSON) Render(w http.ResponseWriter) error {
writeContentType(w, jsonContentType)
jsonBytes, err := json.MarshalIndent(r.Data, "", " ")
if err != nil {
return err
}
w.Write(jsonBytes)
return nil
}
func WriteJSON(w http.ResponseWriter, obj interface{}) error {
writeContentType(w, jsonContentType)
return json.NewEncoder(w).Encode(obj)
}

View File

@ -1,24 +0,0 @@
// Copyright 2014 Manu Martinez-Almeida. All rights reserved.
// Use of this source code is governed by a MIT style
// license that can be found in the LICENSE file.
package render
import (
"fmt"
"net/http"
)
type Redirect struct {
Code int
Request *http.Request
Location string
}
func (r Redirect) Render(w http.ResponseWriter) error {
if (r.Code < 300 || r.Code > 308) && r.Code != 201 {
panic(fmt.Sprintf("Cannot redirect with status code %d", r.Code))
}
http.Redirect(w, r.Request, r.Location, r.Code)
return nil
}

View File

@ -1,31 +0,0 @@
// Copyright 2014 Manu Martinez-Almeida. All rights reserved.
// Use of this source code is governed by a MIT style
// license that can be found in the LICENSE file.
package render
import "net/http"
type Render interface {
Render(http.ResponseWriter) error
}
var (
_ Render = JSON{}
_ Render = IndentedJSON{}
_ Render = XML{}
_ Render = String{}
_ Render = Redirect{}
_ Render = Data{}
_ Render = HTML{}
_ HTMLRender = HTMLDebug{}
_ HTMLRender = HTMLProduction{}
_ Render = YAML{}
)
func writeContentType(w http.ResponseWriter, value []string) {
header := w.Header()
if val := header["Content-Type"]; len(val) == 0 {
header["Content-Type"] = value
}
}

View File

@ -1,33 +0,0 @@
// Copyright 2014 Manu Martinez-Almeida. All rights reserved.
// Use of this source code is governed by a MIT style
// license that can be found in the LICENSE file.
package render
import (
"fmt"
"io"
"net/http"
)
type String struct {
Format string
Data []interface{}
}
var plainContentType = []string{"text/plain; charset=utf-8"}
func (r String) Render(w http.ResponseWriter) error {
WriteString(w, r.Format, r.Data)
return nil
}
func WriteString(w http.ResponseWriter, format string, data []interface{}) {
writeContentType(w, plainContentType)
if len(data) > 0 {
fmt.Fprintf(w, format, data...)
} else {
io.WriteString(w, format)
}
}

View File

@ -1,21 +0,0 @@
// Copyright 2014 Manu Martinez-Almeida. All rights reserved.
// Use of this source code is governed by a MIT style
// license that can be found in the LICENSE file.
package render
import (
"encoding/xml"
"net/http"
)
type XML struct {
Data interface{}
}
var xmlContentType = []string{"application/xml; charset=utf-8"}
func (r XML) Render(w http.ResponseWriter) error {
writeContentType(w, xmlContentType)
return xml.NewEncoder(w).Encode(r.Data)
}

View File

@ -1,29 +0,0 @@
// Copyright 2014 Manu Martinez-Almeida. All rights reserved.
// Use of this source code is governed by a MIT style
// license that can be found in the LICENSE file.
package render
import (
"net/http"
"gopkg.in/yaml.v2"
)
type YAML struct {
Data interface{}
}
var yamlContentType = []string{"application/x-yaml; charset=utf-8"}
func (r YAML) Render(w http.ResponseWriter) error {
writeContentType(w, yamlContentType)
bytes, err := yaml.Marshal(r.Data)
if err != nil {
return err
}
w.Write(bytes)
return nil
}

View File

@ -1,116 +0,0 @@
// Copyright 2014 Manu Martinez-Almeida. All rights reserved.
// Use of this source code is governed by a MIT style
// license that can be found in the LICENSE file.
package gin
import (
"bufio"
"io"
"net"
"net/http"
)
const (
noWritten = -1
defaultStatus = 200
)
type (
ResponseWriter interface {
http.ResponseWriter
http.Hijacker
http.Flusher
http.CloseNotifier
// Returns the HTTP response status code of the current request.
Status() int
// Returns the number of bytes already written into the response http body.
// See Written()
Size() int
// Writes the string into the response body.
WriteString(string) (int, error)
// Returns true if the response body was already written.
Written() bool
// Forces to write the http header (status code + headers).
WriteHeaderNow()
}
responseWriter struct {
http.ResponseWriter
size int
status int
}
)
var _ ResponseWriter = &responseWriter{}
func (w *responseWriter) reset(writer http.ResponseWriter) {
w.ResponseWriter = writer
w.size = noWritten
w.status = defaultStatus
}
func (w *responseWriter) WriteHeader(code int) {
if code > 0 && w.status != code {
if w.Written() {
debugPrint("[WARNING] Headers were already written. Wanted to override status code %d with %d", w.status, code)
}
w.status = code
}
}
func (w *responseWriter) WriteHeaderNow() {
if !w.Written() {
w.size = 0
w.ResponseWriter.WriteHeader(w.status)
}
}
func (w *responseWriter) Write(data []byte) (n int, err error) {
w.WriteHeaderNow()
n, err = w.ResponseWriter.Write(data)
w.size += n
return
}
func (w *responseWriter) WriteString(s string) (n int, err error) {
w.WriteHeaderNow()
n, err = io.WriteString(w.ResponseWriter, s)
w.size += n
return
}
func (w *responseWriter) Status() int {
return w.status
}
func (w *responseWriter) Size() int {
return w.size
}
func (w *responseWriter) Written() bool {
return w.size != noWritten
}
// Implements the http.Hijacker interface
func (w *responseWriter) Hijack() (net.Conn, *bufio.ReadWriter, error) {
if w.size < 0 {
w.size = 0
}
return w.ResponseWriter.(http.Hijacker).Hijack()
}
// Implements the http.CloseNotify interface
func (w *responseWriter) CloseNotify() <-chan bool {
return w.ResponseWriter.(http.CloseNotifier).CloseNotify()
}
// Implements the http.Flush interface
func (w *responseWriter) Flush() {
w.ResponseWriter.(http.Flusher).Flush()
}

View File

@ -1,215 +0,0 @@
// Copyright 2014 Manu Martinez-Almeida. All rights reserved.
// Use of this source code is governed by a MIT style
// license that can be found in the LICENSE file.
package gin
import (
"net/http"
"path"
"regexp"
"strings"
)
type (
IRouter interface {
IRoutes
Group(string, ...HandlerFunc) *RouterGroup
}
IRoutes interface {
Use(...HandlerFunc) IRoutes
Handle(string, string, ...HandlerFunc) IRoutes
Any(string, ...HandlerFunc) IRoutes
GET(string, ...HandlerFunc) IRoutes
POST(string, ...HandlerFunc) IRoutes
DELETE(string, ...HandlerFunc) IRoutes
PATCH(string, ...HandlerFunc) IRoutes
PUT(string, ...HandlerFunc) IRoutes
OPTIONS(string, ...HandlerFunc) IRoutes
HEAD(string, ...HandlerFunc) IRoutes
StaticFile(string, string) IRoutes
Static(string, string) IRoutes
StaticFS(string, http.FileSystem) IRoutes
}
// RouterGroup is used internally to configure router, a RouterGroup is associated with a prefix
// and an array of handlers (middleware)
RouterGroup struct {
Handlers HandlersChain
basePath string
engine *Engine
root bool
}
)
var _ IRouter = &RouterGroup{}
// Use adds middleware to the group, see example code in github.
func (group *RouterGroup) Use(middleware ...HandlerFunc) IRoutes {
group.Handlers = append(group.Handlers, middleware...)
return group.returnObj()
}
// Group creates a new router group. You should add all the routes that have common middlwares or the same path prefix.
// For example, all the routes that use a common middlware for authorization could be grouped.
func (group *RouterGroup) Group(relativePath string, handlers ...HandlerFunc) *RouterGroup {
return &RouterGroup{
Handlers: group.combineHandlers(handlers),
basePath: group.calculateAbsolutePath(relativePath),
engine: group.engine,
}
}
func (group *RouterGroup) BasePath() string {
return group.basePath
}
func (group *RouterGroup) handle(httpMethod, relativePath string, handlers HandlersChain) IRoutes {
absolutePath := group.calculateAbsolutePath(relativePath)
handlers = group.combineHandlers(handlers)
group.engine.addRoute(httpMethod, absolutePath, handlers)
return group.returnObj()
}
// Handle registers a new request handle and middleware with the given path and method.
// The last handler should be the real handler, the other ones should be middleware that can and should be shared among different routes.
// See the example code in github.
//
// For GET, POST, PUT, PATCH and DELETE requests the respective shortcut
// functions can be used.
//
// This function is intended for bulk loading and to allow the usage of less
// frequently used, non-standardized or custom methods (e.g. for internal
// communication with a proxy).
func (group *RouterGroup) Handle(httpMethod, relativePath string, handlers ...HandlerFunc) IRoutes {
if matches, err := regexp.MatchString("^[A-Z]+$", httpMethod); !matches || err != nil {
panic("http method " + httpMethod + " is not valid")
}
return group.handle(httpMethod, relativePath, handlers)
}
// POST is a shortcut for router.Handle("POST", path, handle)
func (group *RouterGroup) POST(relativePath string, handlers ...HandlerFunc) IRoutes {
return group.handle("POST", relativePath, handlers)
}
// GET is a shortcut for router.Handle("GET", path, handle)
func (group *RouterGroup) GET(relativePath string, handlers ...HandlerFunc) IRoutes {
return group.handle("GET", relativePath, handlers)
}
// DELETE is a shortcut for router.Handle("DELETE", path, handle)
func (group *RouterGroup) DELETE(relativePath string, handlers ...HandlerFunc) IRoutes {
return group.handle("DELETE", relativePath, handlers)
}
// PATCH is a shortcut for router.Handle("PATCH", path, handle)
func (group *RouterGroup) PATCH(relativePath string, handlers ...HandlerFunc) IRoutes {
return group.handle("PATCH", relativePath, handlers)
}
// PUT is a shortcut for router.Handle("PUT", path, handle)
func (group *RouterGroup) PUT(relativePath string, handlers ...HandlerFunc) IRoutes {
return group.handle("PUT", relativePath, handlers)
}
// OPTIONS is a shortcut for router.Handle("OPTIONS", path, handle)
func (group *RouterGroup) OPTIONS(relativePath string, handlers ...HandlerFunc) IRoutes {
return group.handle("OPTIONS", relativePath, handlers)
}
// HEAD is a shortcut for router.Handle("HEAD", path, handle)
func (group *RouterGroup) HEAD(relativePath string, handlers ...HandlerFunc) IRoutes {
return group.handle("HEAD", relativePath, handlers)
}
// Any registers a route that matches all the HTTP methods.
// GET, POST, PUT, PATCH, HEAD, OPTIONS, DELETE, CONNECT, TRACE
func (group *RouterGroup) Any(relativePath string, handlers ...HandlerFunc) IRoutes {
group.handle("GET", relativePath, handlers)
group.handle("POST", relativePath, handlers)
group.handle("PUT", relativePath, handlers)
group.handle("PATCH", relativePath, handlers)
group.handle("HEAD", relativePath, handlers)
group.handle("OPTIONS", relativePath, handlers)
group.handle("DELETE", relativePath, handlers)
group.handle("CONNECT", relativePath, handlers)
group.handle("TRACE", relativePath, handlers)
return group.returnObj()
}
// StaticFile registers a single route in order to server a single file of the local filesystem.
// router.StaticFile("favicon.ico", "./resources/favicon.ico")
func (group *RouterGroup) StaticFile(relativePath, filepath string) IRoutes {
if strings.Contains(relativePath, ":") || strings.Contains(relativePath, "*") {
panic("URL parameters can not be used when serving a static file")
}
handler := func(c *Context) {
c.File(filepath)
}
group.GET(relativePath, handler)
group.HEAD(relativePath, handler)
return group.returnObj()
}
// Static serves files from the given file system root.
// Internally a http.FileServer is used, therefore http.NotFound is used instead
// of the Router's NotFound handler.
// To use the operating system's file system implementation,
// use :
// router.Static("/static", "/var/www")
func (group *RouterGroup) Static(relativePath, root string) IRoutes {
return group.StaticFS(relativePath, Dir(root, false))
}
// StaticFS works just like `Static()` but a custom `http.FileSystem` can be used instead.
// Gin by default user: gin.Dir()
func (group *RouterGroup) StaticFS(relativePath string, fs http.FileSystem) IRoutes {
if strings.Contains(relativePath, ":") || strings.Contains(relativePath, "*") {
panic("URL parameters can not be used when serving a static folder")
}
handler := group.createStaticHandler(relativePath, fs)
urlPattern := path.Join(relativePath, "/*filepath")
// Register GET and HEAD handlers
group.GET(urlPattern, handler)
group.HEAD(urlPattern, handler)
return group.returnObj()
}
func (group *RouterGroup) createStaticHandler(relativePath string, fs http.FileSystem) HandlerFunc {
absolutePath := group.calculateAbsolutePath(relativePath)
fileServer := http.StripPrefix(absolutePath, http.FileServer(fs))
_, nolisting := fs.(*onlyfilesFS)
return func(c *Context) {
if nolisting {
c.Writer.WriteHeader(404)
}
fileServer.ServeHTTP(c.Writer, c.Request)
}
}
func (group *RouterGroup) combineHandlers(handlers HandlersChain) HandlersChain {
finalSize := len(group.Handlers) + len(handlers)
if finalSize >= int(abortIndex) {
panic("too many handlers")
}
mergedHandlers := make(HandlersChain, finalSize)
copy(mergedHandlers, group.Handlers)
copy(mergedHandlers[len(group.Handlers):], handlers)
return mergedHandlers
}
func (group *RouterGroup) calculateAbsolutePath(relativePath string) string {
return joinPaths(group.basePath, relativePath)
}
func (group *RouterGroup) returnObj() IRoutes {
if group.root {
return group.engine
}
return group
}

View File

@ -1,605 +0,0 @@
// Copyright 2013 Julien Schmidt. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be found
// in the LICENSE file.
package gin
import (
"strings"
"unicode"
)
// Param is a single URL parameter, consisting of a key and a value.
type Param struct {
Key string
Value string
}
// Params is a Param-slice, as returned by the router.
// The slice is ordered, the first URL parameter is also the first slice value.
// It is therefore safe to read values by the index.
type Params []Param
// Get returns the value of the first Param which key matches the given name.
// If no matching Param is found, an empty string is returned.
func (ps Params) Get(name string) (string, bool) {
for _, entry := range ps {
if entry.Key == name {
return entry.Value, true
}
}
return "", false
}
// ByName returns the value of the first Param which key matches the given name.
// If no matching Param is found, an empty string is returned.
func (ps Params) ByName(name string) (va string) {
va, _ = ps.Get(name)
return
}
type methodTree struct {
method string
root *node
}
type methodTrees []methodTree
func (trees methodTrees) get(method string) *node {
for _, tree := range trees {
if tree.method == method {
return tree.root
}
}
return nil
}
func min(a, b int) int {
if a <= b {
return a
}
return b
}
func countParams(path string) uint8 {
var n uint
for i := 0; i < len(path); i++ {
if path[i] != ':' && path[i] != '*' {
continue
}
n++
}
if n >= 255 {
return 255
}
return uint8(n)
}
type nodeType uint8
const (
static nodeType = iota // default
root
param
catchAll
)
type node struct {
path string
wildChild bool
nType nodeType
maxParams uint8
indices string
children []*node
handlers HandlersChain
priority uint32
}
// increments priority of the given child and reorders if necessary
func (n *node) incrementChildPrio(pos int) int {
n.children[pos].priority++
prio := n.children[pos].priority
// adjust position (move to front)
newPos := pos
for newPos > 0 && n.children[newPos-1].priority < prio {
// swap node positions
tmpN := n.children[newPos-1]
n.children[newPos-1] = n.children[newPos]
n.children[newPos] = tmpN
newPos--
}
// build new index char string
if newPos != pos {
n.indices = n.indices[:newPos] + // unchanged prefix, might be empty
n.indices[pos:pos+1] + // the index char we move
n.indices[newPos:pos] + n.indices[pos+1:] // rest without char at 'pos'
}
return newPos
}
// addRoute adds a node with the given handle to the path.
// Not concurrency-safe!
func (n *node) addRoute(path string, handlers HandlersChain) {
fullPath := path
n.priority++
numParams := countParams(path)
// non-empty tree
if len(n.path) > 0 || len(n.children) > 0 {
walk:
for {
// Update maxParams of the current node
if numParams > n.maxParams {
n.maxParams = numParams
}
// Find the longest common prefix.
// This also implies that the common prefix contains no ':' or '*'
// since the existing key can't contain those chars.
i := 0
max := min(len(path), len(n.path))
for i < max && path[i] == n.path[i] {
i++
}
// Split edge
if i < len(n.path) {
child := node{
path: n.path[i:],
wildChild: n.wildChild,
indices: n.indices,
children: n.children,
handlers: n.handlers,
priority: n.priority - 1,
}
// Update maxParams (max of all children)
for i := range child.children {
if child.children[i].maxParams > child.maxParams {
child.maxParams = child.children[i].maxParams
}
}
n.children = []*node{&child}
// []byte for proper unicode char conversion, see #65
n.indices = string([]byte{n.path[i]})
n.path = path[:i]
n.handlers = nil
n.wildChild = false
}
// Make new node a child of this node
if i < len(path) {
path = path[i:]
if n.wildChild {
n = n.children[0]
n.priority++
// Update maxParams of the child node
if numParams > n.maxParams {
n.maxParams = numParams
}
numParams--
// Check if the wildcard matches
if len(path) >= len(n.path) && n.path == path[:len(n.path)] {
// check for longer wildcard, e.g. :name and :names
if len(n.path) >= len(path) || path[len(n.path)] == '/' {
continue walk
}
}
panic("path segment '" + path +
"' conflicts with existing wildcard '" + n.path +
"' in path '" + fullPath + "'")
}
c := path[0]
// slash after param
if n.nType == param && c == '/' && len(n.children) == 1 {
n = n.children[0]
n.priority++
continue walk
}
// Check if a child with the next path byte exists
for i := 0; i < len(n.indices); i++ {
if c == n.indices[i] {
i = n.incrementChildPrio(i)
n = n.children[i]
continue walk
}
}
// Otherwise insert it
if c != ':' && c != '*' {
// []byte for proper unicode char conversion, see #65
n.indices += string([]byte{c})
child := &node{
maxParams: numParams,
}
n.children = append(n.children, child)
n.incrementChildPrio(len(n.indices) - 1)
n = child
}
n.insertChild(numParams, path, fullPath, handlers)
return
} else if i == len(path) { // Make node a (in-path) leaf
if n.handlers != nil {
panic("handlers are already registered for path ''" + fullPath + "'")
}
n.handlers = handlers
}
return
}
} else { // Empty tree
n.insertChild(numParams, path, fullPath, handlers)
n.nType = root
}
}
func (n *node) insertChild(numParams uint8, path string, fullPath string, handlers HandlersChain) {
var offset int // already handled bytes of the path
// find prefix until first wildcard (beginning with ':'' or '*'')
for i, max := 0, len(path); numParams > 0; i++ {
c := path[i]
if c != ':' && c != '*' {
continue
}
// find wildcard end (either '/' or path end)
end := i + 1
for end < max && path[end] != '/' {
switch path[end] {
// the wildcard name must not contain ':' and '*'
case ':', '*':
panic("only one wildcard per path segment is allowed, has: '" +
path[i:] + "' in path '" + fullPath + "'")
default:
end++
}
}
// check if this Node existing children which would be
// unreachable if we insert the wildcard here
if len(n.children) > 0 {
panic("wildcard route '" + path[i:end] +
"' conflicts with existing children in path '" + fullPath + "'")
}
// check if the wildcard has a name
if end-i < 2 {
panic("wildcards must be named with a non-empty name in path '" + fullPath + "'")
}
if c == ':' { // param
// split path at the beginning of the wildcard
if i > 0 {
n.path = path[offset:i]
offset = i
}
child := &node{
nType: param,
maxParams: numParams,
}
n.children = []*node{child}
n.wildChild = true
n = child
n.priority++
numParams--
// if the path doesn't end with the wildcard, then there
// will be another non-wildcard subpath starting with '/'
if end < max {
n.path = path[offset:end]
offset = end
child := &node{
maxParams: numParams,
priority: 1,
}
n.children = []*node{child}
n = child
}
} else { // catchAll
if end != max || numParams > 1 {
panic("catch-all routes are only allowed at the end of the path in path '" + fullPath + "'")
}
if len(n.path) > 0 && n.path[len(n.path)-1] == '/' {
panic("catch-all conflicts with existing handle for the path segment root in path '" + fullPath + "'")
}
// currently fixed width 1 for '/'
i--
if path[i] != '/' {
panic("no / before catch-all in path '" + fullPath + "'")
}
n.path = path[offset:i]
// first node: catchAll node with empty path
child := &node{
wildChild: true,
nType: catchAll,
maxParams: 1,
}
n.children = []*node{child}
n.indices = string(path[i])
n = child
n.priority++
// second node: node holding the variable
child = &node{
path: path[i:],
nType: catchAll,
maxParams: 1,
handlers: handlers,
priority: 1,
}
n.children = []*node{child}
return
}
}
// insert remaining path part and handle to the leaf
n.path = path[offset:]
n.handlers = handlers
}
// Returns the handle registered with the given path (key). The values of
// wildcards are saved to a map.
// If no handle can be found, a TSR (trailing slash redirect) recommendation is
// made if a handle exists with an extra (without the) trailing slash for the
// given path.
func (n *node) getValue(path string, po Params) (handlers HandlersChain, p Params, tsr bool) {
p = po
walk: // Outer loop for walking the tree
for {
if len(path) > len(n.path) {
if path[:len(n.path)] == n.path {
path = path[len(n.path):]
// If this node does not have a wildcard (param or catchAll)
// child, we can just look up the next child node and continue
// to walk down the tree
if !n.wildChild {
c := path[0]
for i := 0; i < len(n.indices); i++ {
if c == n.indices[i] {
n = n.children[i]
continue walk
}
}
// Nothing found.
// We can recommend to redirect to the same URL without a
// trailing slash if a leaf exists for that path.
tsr = (path == "/" && n.handlers != nil)
return
}
// handle wildcard child
n = n.children[0]
switch n.nType {
case param:
// find param end (either '/' or path end)
end := 0
for end < len(path) && path[end] != '/' {
end++
}
// save param value
if cap(p) < int(n.maxParams) {
p = make(Params, 0, n.maxParams)
}
i := len(p)
p = p[:i+1] // expand slice within preallocated capacity
p[i].Key = n.path[1:]
p[i].Value = path[:end]
// we need to go deeper!
if end < len(path) {
if len(n.children) > 0 {
path = path[end:]
n = n.children[0]
continue walk
}
// ... but we can't
tsr = (len(path) == end+1)
return
}
if handlers = n.handlers; handlers != nil {
return
} else if len(n.children) == 1 {
// No handle found. Check if a handle for this path + a
// trailing slash exists for TSR recommendation
n = n.children[0]
tsr = (n.path == "/" && n.handlers != nil)
}
return
case catchAll:
// save param value
if cap(p) < int(n.maxParams) {
p = make(Params, 0, n.maxParams)
}
i := len(p)
p = p[:i+1] // expand slice within preallocated capacity
p[i].Key = n.path[2:]
p[i].Value = path
handlers = n.handlers
return
default:
panic("invalid node type")
}
}
} else if path == n.path {
// We should have reached the node containing the handle.
// Check if this node has a handle registered.
if handlers = n.handlers; handlers != nil {
return
}
if path == "/" && n.wildChild && n.nType != root {
tsr = true
return
}
// No handle found. Check if a handle for this path + a
// trailing slash exists for trailing slash recommendation
for i := 0; i < len(n.indices); i++ {
if n.indices[i] == '/' {
n = n.children[i]
tsr = (len(n.path) == 1 && n.handlers != nil) ||
(n.nType == catchAll && n.children[0].handlers != nil)
return
}
}
return
}
// Nothing found. We can recommend to redirect to the same URL with an
// extra trailing slash if a leaf exists for that path
tsr = (path == "/") ||
(len(n.path) == len(path)+1 && n.path[len(path)] == '/' &&
path == n.path[:len(n.path)-1] && n.handlers != nil)
return
}
}
// Makes a case-insensitive lookup of the given path and tries to find a handler.
// It can optionally also fix trailing slashes.
// It returns the case-corrected path and a bool indicating whether the lookup
// was successful.
func (n *node) findCaseInsensitivePath(path string, fixTrailingSlash bool) (ciPath []byte, found bool) {
ciPath = make([]byte, 0, len(path)+1) // preallocate enough memory
// Outer loop for walking the tree
for len(path) >= len(n.path) && strings.ToLower(path[:len(n.path)]) == strings.ToLower(n.path) {
path = path[len(n.path):]
ciPath = append(ciPath, n.path...)
if len(path) > 0 {
// If this node does not have a wildcard (param or catchAll) child,
// we can just look up the next child node and continue to walk down
// the tree
if !n.wildChild {
r := unicode.ToLower(rune(path[0]))
for i, index := range n.indices {
// must use recursive approach since both index and
// ToLower(index) could exist. We must check both.
if r == unicode.ToLower(index) {
out, found := n.children[i].findCaseInsensitivePath(path, fixTrailingSlash)
if found {
return append(ciPath, out...), true
}
}
}
// Nothing found. We can recommend to redirect to the same URL
// without a trailing slash if a leaf exists for that path
found = (fixTrailingSlash && path == "/" && n.handlers != nil)
return
}
n = n.children[0]
switch n.nType {
case param:
// find param end (either '/' or path end)
k := 0
for k < len(path) && path[k] != '/' {
k++
}
// add param value to case insensitive path
ciPath = append(ciPath, path[:k]...)
// we need to go deeper!
if k < len(path) {
if len(n.children) > 0 {
path = path[k:]
n = n.children[0]
continue
}
// ... but we can't
if fixTrailingSlash && len(path) == k+1 {
return ciPath, true
}
return
}
if n.handlers != nil {
return ciPath, true
} else if fixTrailingSlash && len(n.children) == 1 {
// No handle found. Check if a handle for this path + a
// trailing slash exists
n = n.children[0]
if n.path == "/" && n.handlers != nil {
return append(ciPath, '/'), true
}
}
return
case catchAll:
return append(ciPath, path...), true
default:
panic("invalid node type")
}
} else {
// We should have reached the node containing the handle.
// Check if this node has a handle registered.
if n.handlers != nil {
return ciPath, true
}
// No handle found.
// Try to fix the path by adding a trailing slash
if fixTrailingSlash {
for i := 0; i < len(n.indices); i++ {
if n.indices[i] == '/' {
n = n.children[i]
if (len(n.path) == 1 && n.handlers != nil) ||
(n.nType == catchAll && n.children[0].handlers != nil) {
return append(ciPath, '/'), true
}
return
}
}
}
return
}
}
// Nothing found.
// Try to fix the path by adding / removing a trailing slash
if fixTrailingSlash {
if path == "/" {
return ciPath, true
}
if len(path)+1 == len(n.path) && n.path[len(path)] == '/' &&
strings.ToLower(path) == strings.ToLower(n.path[:len(path)]) &&
n.handlers != nil {
return append(ciPath, n.path...), true
}
}
return
}

View File

@ -1,154 +0,0 @@
// Copyright 2014 Manu Martinez-Almeida. All rights reserved.
// Use of this source code is governed by a MIT style
// license that can be found in the LICENSE file.
package gin
import (
"encoding/xml"
"net/http"
"os"
"path"
"reflect"
"runtime"
"strings"
)
const BindKey = "_gin-gonic/gin/bindkey"
func Bind(val interface{}) HandlerFunc {
value := reflect.ValueOf(val)
if value.Kind() == reflect.Ptr {
panic(`Bind struct can not be a pointer. Example:
Use: gin.Bind(Struct{}) instead of gin.Bind(&Struct{})
`)
}
typ := value.Type()
return func(c *Context) {
obj := reflect.New(typ).Interface()
if c.Bind(obj) == nil {
c.Set(BindKey, obj)
}
}
}
func WrapF(f http.HandlerFunc) HandlerFunc {
return func(c *Context) {
f(c.Writer, c.Request)
}
}
func WrapH(h http.Handler) HandlerFunc {
return func(c *Context) {
h.ServeHTTP(c.Writer, c.Request)
}
}
type H map[string]interface{}
// MarshalXML allows type H to be used with xml.Marshal
func (h H) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
start.Name = xml.Name{
Space: "",
Local: "map",
}
if err := e.EncodeToken(start); err != nil {
return err
}
for key, value := range h {
elem := xml.StartElement{
Name: xml.Name{Space: "", Local: key},
Attr: []xml.Attr{},
}
if err := e.EncodeElement(value, elem); err != nil {
return err
}
}
if err := e.EncodeToken(xml.EndElement{Name: start.Name}); err != nil {
return err
}
return nil
}
func assert1(guard bool, text string) {
if !guard {
panic(text)
}
}
func filterFlags(content string) string {
for i, char := range content {
if char == ' ' || char == ';' {
return content[:i]
}
}
return content
}
func chooseData(custom, wildcard interface{}) interface{} {
if custom == nil {
if wildcard == nil {
panic("negotiation config is invalid")
}
return wildcard
}
return custom
}
func parseAccept(acceptHeader string) []string {
parts := strings.Split(acceptHeader, ",")
out := make([]string, 0, len(parts))
for _, part := range parts {
index := strings.IndexByte(part, ';')
if index >= 0 {
part = part[0:index]
}
part = strings.TrimSpace(part)
if len(part) > 0 {
out = append(out, part)
}
}
return out
}
func lastChar(str string) uint8 {
size := len(str)
if size == 0 {
panic("The length of the string can't be 0")
}
return str[size-1]
}
func nameOfFunction(f interface{}) string {
return runtime.FuncForPC(reflect.ValueOf(f).Pointer()).Name()
}
func joinPaths(absolutePath, relativePath string) string {
if len(relativePath) == 0 {
return absolutePath
}
finalPath := path.Join(absolutePath, relativePath)
appendSlash := lastChar(relativePath) == '/' && lastChar(finalPath) != '/'
if appendSlash {
return finalPath + "/"
}
return finalPath
}
func resolveAddress(addr []string) string {
switch len(addr) {
case 0:
if port := os.Getenv("PORT"); len(port) > 0 {
debugPrint("Environment variable PORT=\"%s\"", port)
return ":" + port
}
debugPrint("Environment variable PORT is undefined. Using port :8080 by default")
return ":8080"
case 1:
return addr[0]
default:
panic("too much parameters")
}
}

View File

@ -1 +0,0 @@
box: wercker/default

View File

@ -1,31 +0,0 @@
Go support for Protocol Buffers - Google's data interchange format
Copyright 2010 The Go Authors. All rights reserved.
https://github.com/golang/protobuf
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

View File

@ -1,43 +0,0 @@
# Go support for Protocol Buffers - Google's data interchange format
#
# Copyright 2010 The Go Authors. All rights reserved.
# https://github.com/golang/protobuf
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
install:
go install
test: install generate-test-pbs
go test
generate-test-pbs:
make install
make -C testdata
protoc --go_out=Mtestdata/test.proto=github.com/golang/protobuf/proto/testdata,Mgoogle/protobuf/any.proto=github.com/golang/protobuf/ptypes/any:. proto3_proto/proto3.proto
make

View File

@ -1,223 +0,0 @@
// Go support for Protocol Buffers - Google's data interchange format
//
// Copyright 2011 The Go Authors. All rights reserved.
// https://github.com/golang/protobuf
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Protocol buffer deep copy and merge.
// TODO: RawMessage.
package proto
import (
"log"
"reflect"
"strings"
)
// Clone returns a deep copy of a protocol buffer.
func Clone(pb Message) Message {
in := reflect.ValueOf(pb)
if in.IsNil() {
return pb
}
out := reflect.New(in.Type().Elem())
// out is empty so a merge is a deep copy.
mergeStruct(out.Elem(), in.Elem())
return out.Interface().(Message)
}
// Merge merges src into dst.
// Required and optional fields that are set in src will be set to that value in dst.
// Elements of repeated fields will be appended.
// Merge panics if src and dst are not the same type, or if dst is nil.
func Merge(dst, src Message) {
in := reflect.ValueOf(src)
out := reflect.ValueOf(dst)
if out.IsNil() {
panic("proto: nil destination")
}
if in.Type() != out.Type() {
// Explicit test prior to mergeStruct so that mistyped nils will fail
panic("proto: type mismatch")
}
if in.IsNil() {
// Merging nil into non-nil is a quiet no-op
return
}
mergeStruct(out.Elem(), in.Elem())
}
func mergeStruct(out, in reflect.Value) {
sprop := GetProperties(in.Type())
for i := 0; i < in.NumField(); i++ {
f := in.Type().Field(i)
if strings.HasPrefix(f.Name, "XXX_") {
continue
}
mergeAny(out.Field(i), in.Field(i), false, sprop.Prop[i])
}
if emIn, ok := in.Addr().Interface().(extendableProto); ok {
emOut := out.Addr().Interface().(extendableProto)
mergeExtension(emOut.ExtensionMap(), emIn.ExtensionMap())
}
uf := in.FieldByName("XXX_unrecognized")
if !uf.IsValid() {
return
}
uin := uf.Bytes()
if len(uin) > 0 {
out.FieldByName("XXX_unrecognized").SetBytes(append([]byte(nil), uin...))
}
}
// mergeAny performs a merge between two values of the same type.
// viaPtr indicates whether the values were indirected through a pointer (implying proto2).
// prop is set if this is a struct field (it may be nil).
func mergeAny(out, in reflect.Value, viaPtr bool, prop *Properties) {
if in.Type() == protoMessageType {
if !in.IsNil() {
if out.IsNil() {
out.Set(reflect.ValueOf(Clone(in.Interface().(Message))))
} else {
Merge(out.Interface().(Message), in.Interface().(Message))
}
}
return
}
switch in.Kind() {
case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64,
reflect.String, reflect.Uint32, reflect.Uint64:
if !viaPtr && isProto3Zero(in) {
return
}
out.Set(in)
case reflect.Interface:
// Probably a oneof field; copy non-nil values.
if in.IsNil() {
return
}
// Allocate destination if it is not set, or set to a different type.
// Otherwise we will merge as normal.
if out.IsNil() || out.Elem().Type() != in.Elem().Type() {
out.Set(reflect.New(in.Elem().Elem().Type())) // interface -> *T -> T -> new(T)
}
mergeAny(out.Elem(), in.Elem(), false, nil)
case reflect.Map:
if in.Len() == 0 {
return
}
if out.IsNil() {
out.Set(reflect.MakeMap(in.Type()))
}
// For maps with value types of *T or []byte we need to deep copy each value.
elemKind := in.Type().Elem().Kind()
for _, key := range in.MapKeys() {
var val reflect.Value
switch elemKind {
case reflect.Ptr:
val = reflect.New(in.Type().Elem().Elem())
mergeAny(val, in.MapIndex(key), false, nil)
case reflect.Slice:
val = in.MapIndex(key)
val = reflect.ValueOf(append([]byte{}, val.Bytes()...))
default:
val = in.MapIndex(key)
}
out.SetMapIndex(key, val)
}
case reflect.Ptr:
if in.IsNil() {
return
}
if out.IsNil() {
out.Set(reflect.New(in.Elem().Type()))
}
mergeAny(out.Elem(), in.Elem(), true, nil)
case reflect.Slice:
if in.IsNil() {
return
}
if in.Type().Elem().Kind() == reflect.Uint8 {
// []byte is a scalar bytes field, not a repeated field.
// Edge case: if this is in a proto3 message, a zero length
// bytes field is considered the zero value, and should not
// be merged.
if prop != nil && prop.proto3 && in.Len() == 0 {
return
}
// Make a deep copy.
// Append to []byte{} instead of []byte(nil) so that we never end up
// with a nil result.
out.SetBytes(append([]byte{}, in.Bytes()...))
return
}
n := in.Len()
if out.IsNil() {
out.Set(reflect.MakeSlice(in.Type(), 0, n))
}
switch in.Type().Elem().Kind() {
case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64,
reflect.String, reflect.Uint32, reflect.Uint64:
out.Set(reflect.AppendSlice(out, in))
default:
for i := 0; i < n; i++ {
x := reflect.Indirect(reflect.New(in.Type().Elem()))
mergeAny(x, in.Index(i), false, nil)
out.Set(reflect.Append(out, x))
}
}
case reflect.Struct:
mergeStruct(out, in)
default:
// unknown type, so not a protocol buffer
log.Printf("proto: don't know how to copy %v", in)
}
}
func mergeExtension(out, in map[int32]Extension) {
for extNum, eIn := range in {
eOut := Extension{desc: eIn.desc}
if eIn.value != nil {
v := reflect.New(reflect.TypeOf(eIn.value)).Elem()
mergeAny(v, reflect.ValueOf(eIn.value), false, nil)
eOut.value = v.Interface()
}
if eIn.enc != nil {
eOut.enc = make([]byte, len(eIn.enc))
copy(eOut.enc, eIn.enc)
}
out[extNum] = eOut
}
}

View File

@ -1,868 +0,0 @@
// Go support for Protocol Buffers - Google's data interchange format
//
// Copyright 2010 The Go Authors. All rights reserved.
// https://github.com/golang/protobuf
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package proto
/*
* Routines for decoding protocol buffer data to construct in-memory representations.
*/
import (
"errors"
"fmt"
"io"
"os"
"reflect"
)
// errOverflow is returned when an integer is too large to be represented.
var errOverflow = errors.New("proto: integer overflow")
// ErrInternalBadWireType is returned by generated code when an incorrect
// wire type is encountered. It does not get returned to user code.
var ErrInternalBadWireType = errors.New("proto: internal error: bad wiretype for oneof")
// The fundamental decoders that interpret bytes on the wire.
// Those that take integer types all return uint64 and are
// therefore of type valueDecoder.
// DecodeVarint reads a varint-encoded integer from the slice.
// It returns the integer and the number of bytes consumed, or
// zero if there is not enough.
// This is the format for the
// int32, int64, uint32, uint64, bool, and enum
// protocol buffer types.
func DecodeVarint(buf []byte) (x uint64, n int) {
// x, n already 0
for shift := uint(0); shift < 64; shift += 7 {
if n >= len(buf) {
return 0, 0
}
b := uint64(buf[n])
n++
x |= (b & 0x7F) << shift
if (b & 0x80) == 0 {
return x, n
}
}
// The number is too large to represent in a 64-bit value.
return 0, 0
}
// DecodeVarint reads a varint-encoded integer from the Buffer.
// This is the format for the
// int32, int64, uint32, uint64, bool, and enum
// protocol buffer types.
func (p *Buffer) DecodeVarint() (x uint64, err error) {
// x, err already 0
i := p.index
l := len(p.buf)
for shift := uint(0); shift < 64; shift += 7 {
if i >= l {
err = io.ErrUnexpectedEOF
return
}
b := p.buf[i]
i++
x |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
p.index = i
return
}
}
// The number is too large to represent in a 64-bit value.
err = errOverflow
return
}
// DecodeFixed64 reads a 64-bit integer from the Buffer.
// This is the format for the
// fixed64, sfixed64, and double protocol buffer types.
func (p *Buffer) DecodeFixed64() (x uint64, err error) {
// x, err already 0
i := p.index + 8
if i < 0 || i > len(p.buf) {
err = io.ErrUnexpectedEOF
return
}
p.index = i
x = uint64(p.buf[i-8])
x |= uint64(p.buf[i-7]) << 8
x |= uint64(p.buf[i-6]) << 16
x |= uint64(p.buf[i-5]) << 24
x |= uint64(p.buf[i-4]) << 32
x |= uint64(p.buf[i-3]) << 40
x |= uint64(p.buf[i-2]) << 48
x |= uint64(p.buf[i-1]) << 56
return
}
// DecodeFixed32 reads a 32-bit integer from the Buffer.
// This is the format for the
// fixed32, sfixed32, and float protocol buffer types.
func (p *Buffer) DecodeFixed32() (x uint64, err error) {
// x, err already 0
i := p.index + 4
if i < 0 || i > len(p.buf) {
err = io.ErrUnexpectedEOF
return
}
p.index = i
x = uint64(p.buf[i-4])
x |= uint64(p.buf[i-3]) << 8
x |= uint64(p.buf[i-2]) << 16
x |= uint64(p.buf[i-1]) << 24
return
}
// DecodeZigzag64 reads a zigzag-encoded 64-bit integer
// from the Buffer.
// This is the format used for the sint64 protocol buffer type.
func (p *Buffer) DecodeZigzag64() (x uint64, err error) {
x, err = p.DecodeVarint()
if err != nil {
return
}
x = (x >> 1) ^ uint64((int64(x&1)<<63)>>63)
return
}
// DecodeZigzag32 reads a zigzag-encoded 32-bit integer
// from the Buffer.
// This is the format used for the sint32 protocol buffer type.
func (p *Buffer) DecodeZigzag32() (x uint64, err error) {
x, err = p.DecodeVarint()
if err != nil {
return
}
x = uint64((uint32(x) >> 1) ^ uint32((int32(x&1)<<31)>>31))
return
}
// These are not ValueDecoders: they produce an array of bytes or a string.
// bytes, embedded messages
// DecodeRawBytes reads a count-delimited byte buffer from the Buffer.
// This is the format used for the bytes protocol buffer
// type and for embedded messages.
func (p *Buffer) DecodeRawBytes(alloc bool) (buf []byte, err error) {
n, err := p.DecodeVarint()
if err != nil {
return nil, err
}
nb := int(n)
if nb < 0 {
return nil, fmt.Errorf("proto: bad byte length %d", nb)
}
end := p.index + nb
if end < p.index || end > len(p.buf) {
return nil, io.ErrUnexpectedEOF
}
if !alloc {
// todo: check if can get more uses of alloc=false
buf = p.buf[p.index:end]
p.index += nb
return
}
buf = make([]byte, nb)
copy(buf, p.buf[p.index:])
p.index += nb
return
}
// DecodeStringBytes reads an encoded string from the Buffer.
// This is the format used for the proto2 string type.
func (p *Buffer) DecodeStringBytes() (s string, err error) {
buf, err := p.DecodeRawBytes(false)
if err != nil {
return
}
return string(buf), nil
}
// Skip the next item in the buffer. Its wire type is decoded and presented as an argument.
// If the protocol buffer has extensions, and the field matches, add it as an extension.
// Otherwise, if the XXX_unrecognized field exists, append the skipped data there.
func (o *Buffer) skipAndSave(t reflect.Type, tag, wire int, base structPointer, unrecField field) error {
oi := o.index
err := o.skip(t, tag, wire)
if err != nil {
return err
}
if !unrecField.IsValid() {
return nil
}
ptr := structPointer_Bytes(base, unrecField)
// Add the skipped field to struct field
obuf := o.buf
o.buf = *ptr
o.EncodeVarint(uint64(tag<<3 | wire))
*ptr = append(o.buf, obuf[oi:o.index]...)
o.buf = obuf
return nil
}
// Skip the next item in the buffer. Its wire type is decoded and presented as an argument.
func (o *Buffer) skip(t reflect.Type, tag, wire int) error {
var u uint64
var err error
switch wire {
case WireVarint:
_, err = o.DecodeVarint()
case WireFixed64:
_, err = o.DecodeFixed64()
case WireBytes:
_, err = o.DecodeRawBytes(false)
case WireFixed32:
_, err = o.DecodeFixed32()
case WireStartGroup:
for {
u, err = o.DecodeVarint()
if err != nil {
break
}
fwire := int(u & 0x7)
if fwire == WireEndGroup {
break
}
ftag := int(u >> 3)
err = o.skip(t, ftag, fwire)
if err != nil {
break
}
}
default:
err = fmt.Errorf("proto: can't skip unknown wire type %d for %s", wire, t)
}
return err
}
// Unmarshaler is the interface representing objects that can
// unmarshal themselves. The method should reset the receiver before
// decoding starts. The argument points to data that may be
// overwritten, so implementations should not keep references to the
// buffer.
type Unmarshaler interface {
Unmarshal([]byte) error
}
// Unmarshal parses the protocol buffer representation in buf and places the
// decoded result in pb. If the struct underlying pb does not match
// the data in buf, the results can be unpredictable.
//
// Unmarshal resets pb before starting to unmarshal, so any
// existing data in pb is always removed. Use UnmarshalMerge
// to preserve and append to existing data.
func Unmarshal(buf []byte, pb Message) error {
pb.Reset()
return UnmarshalMerge(buf, pb)
}
// UnmarshalMerge parses the protocol buffer representation in buf and
// writes the decoded result to pb. If the struct underlying pb does not match
// the data in buf, the results can be unpredictable.
//
// UnmarshalMerge merges into existing data in pb.
// Most code should use Unmarshal instead.
func UnmarshalMerge(buf []byte, pb Message) error {
// If the object can unmarshal itself, let it.
if u, ok := pb.(Unmarshaler); ok {
return u.Unmarshal(buf)
}
return NewBuffer(buf).Unmarshal(pb)
}
// DecodeMessage reads a count-delimited message from the Buffer.
func (p *Buffer) DecodeMessage(pb Message) error {
enc, err := p.DecodeRawBytes(false)
if err != nil {
return err
}
return NewBuffer(enc).Unmarshal(pb)
}
// DecodeGroup reads a tag-delimited group from the Buffer.
func (p *Buffer) DecodeGroup(pb Message) error {
typ, base, err := getbase(pb)
if err != nil {
return err
}
return p.unmarshalType(typ.Elem(), GetProperties(typ.Elem()), true, base)
}
// Unmarshal parses the protocol buffer representation in the
// Buffer and places the decoded result in pb. If the struct
// underlying pb does not match the data in the buffer, the results can be
// unpredictable.
func (p *Buffer) Unmarshal(pb Message) error {
// If the object can unmarshal itself, let it.
if u, ok := pb.(Unmarshaler); ok {
err := u.Unmarshal(p.buf[p.index:])
p.index = len(p.buf)
return err
}
typ, base, err := getbase(pb)
if err != nil {
return err
}
err = p.unmarshalType(typ.Elem(), GetProperties(typ.Elem()), false, base)
if collectStats {
stats.Decode++
}
return err
}
// unmarshalType does the work of unmarshaling a structure.
func (o *Buffer) unmarshalType(st reflect.Type, prop *StructProperties, is_group bool, base structPointer) error {
var state errorState
required, reqFields := prop.reqCount, uint64(0)
var err error
for err == nil && o.index < len(o.buf) {
oi := o.index
var u uint64
u, err = o.DecodeVarint()
if err != nil {
break
}
wire := int(u & 0x7)
if wire == WireEndGroup {
if is_group {
return nil // input is satisfied
}
return fmt.Errorf("proto: %s: wiretype end group for non-group", st)
}
tag := int(u >> 3)
if tag <= 0 {
return fmt.Errorf("proto: %s: illegal tag %d (wire type %d)", st, tag, wire)
}
fieldnum, ok := prop.decoderTags.get(tag)
if !ok {
// Maybe it's an extension?
if prop.extendable {
if e := structPointer_Interface(base, st).(extendableProto); isExtensionField(e, int32(tag)) {
if err = o.skip(st, tag, wire); err == nil {
ext := e.ExtensionMap()[int32(tag)] // may be missing
ext.enc = append(ext.enc, o.buf[oi:o.index]...)
e.ExtensionMap()[int32(tag)] = ext
}
continue
}
}
// Maybe it's a oneof?
if prop.oneofUnmarshaler != nil {
m := structPointer_Interface(base, st).(Message)
// First return value indicates whether tag is a oneof field.
ok, err = prop.oneofUnmarshaler(m, tag, wire, o)
if err == ErrInternalBadWireType {
// Map the error to something more descriptive.
// Do the formatting here to save generated code space.
err = fmt.Errorf("bad wiretype for oneof field in %T", m)
}
if ok {
continue
}
}
err = o.skipAndSave(st, tag, wire, base, prop.unrecField)
continue
}
p := prop.Prop[fieldnum]
if p.dec == nil {
fmt.Fprintf(os.Stderr, "proto: no protobuf decoder for %s.%s\n", st, st.Field(fieldnum).Name)
continue
}
dec := p.dec
if wire != WireStartGroup && wire != p.WireType {
if wire == WireBytes && p.packedDec != nil {
// a packable field
dec = p.packedDec
} else {
err = fmt.Errorf("proto: bad wiretype for field %s.%s: got wiretype %d, want %d", st, st.Field(fieldnum).Name, wire, p.WireType)
continue
}
}
decErr := dec(o, p, base)
if decErr != nil && !state.shouldContinue(decErr, p) {
err = decErr
}
if err == nil && p.Required {
// Successfully decoded a required field.
if tag <= 64 {
// use bitmap for fields 1-64 to catch field reuse.
var mask uint64 = 1 << uint64(tag-1)
if reqFields&mask == 0 {
// new required field
reqFields |= mask
required--
}
} else {
// This is imprecise. It can be fooled by a required field
// with a tag > 64 that is encoded twice; that's very rare.
// A fully correct implementation would require allocating
// a data structure, which we would like to avoid.
required--
}
}
}
if err == nil {
if is_group {
return io.ErrUnexpectedEOF
}
if state.err != nil {
return state.err
}
if required > 0 {
// Not enough information to determine the exact field. If we use extra
// CPU, we could determine the field only if the missing required field
// has a tag <= 64 and we check reqFields.
return &RequiredNotSetError{"{Unknown}"}
}
}
return err
}
// Individual type decoders
// For each,
// u is the decoded value,
// v is a pointer to the field (pointer) in the struct
// Sizes of the pools to allocate inside the Buffer.
// The goal is modest amortization and allocation
// on at least 16-byte boundaries.
const (
boolPoolSize = 16
uint32PoolSize = 8
uint64PoolSize = 4
)
// Decode a bool.
func (o *Buffer) dec_bool(p *Properties, base structPointer) error {
u, err := p.valDec(o)
if err != nil {
return err
}
if len(o.bools) == 0 {
o.bools = make([]bool, boolPoolSize)
}
o.bools[0] = u != 0
*structPointer_Bool(base, p.field) = &o.bools[0]
o.bools = o.bools[1:]
return nil
}
func (o *Buffer) dec_proto3_bool(p *Properties, base structPointer) error {
u, err := p.valDec(o)
if err != nil {
return err
}
*structPointer_BoolVal(base, p.field) = u != 0
return nil
}
// Decode an int32.
func (o *Buffer) dec_int32(p *Properties, base structPointer) error {
u, err := p.valDec(o)
if err != nil {
return err
}
word32_Set(structPointer_Word32(base, p.field), o, uint32(u))
return nil
}
func (o *Buffer) dec_proto3_int32(p *Properties, base structPointer) error {
u, err := p.valDec(o)
if err != nil {
return err
}
word32Val_Set(structPointer_Word32Val(base, p.field), uint32(u))
return nil
}
// Decode an int64.
func (o *Buffer) dec_int64(p *Properties, base structPointer) error {
u, err := p.valDec(o)
if err != nil {
return err
}
word64_Set(structPointer_Word64(base, p.field), o, u)
return nil
}
func (o *Buffer) dec_proto3_int64(p *Properties, base structPointer) error {
u, err := p.valDec(o)
if err != nil {
return err
}
word64Val_Set(structPointer_Word64Val(base, p.field), o, u)
return nil
}
// Decode a string.
func (o *Buffer) dec_string(p *Properties, base structPointer) error {
s, err := o.DecodeStringBytes()
if err != nil {
return err
}
*structPointer_String(base, p.field) = &s
return nil
}
func (o *Buffer) dec_proto3_string(p *Properties, base structPointer) error {
s, err := o.DecodeStringBytes()
if err != nil {
return err
}
*structPointer_StringVal(base, p.field) = s
return nil
}
// Decode a slice of bytes ([]byte).
func (o *Buffer) dec_slice_byte(p *Properties, base structPointer) error {
b, err := o.DecodeRawBytes(true)
if err != nil {
return err
}
*structPointer_Bytes(base, p.field) = b
return nil
}
// Decode a slice of bools ([]bool).
func (o *Buffer) dec_slice_bool(p *Properties, base structPointer) error {
u, err := p.valDec(o)
if err != nil {
return err
}
v := structPointer_BoolSlice(base, p.field)
*v = append(*v, u != 0)
return nil
}
// Decode a slice of bools ([]bool) in packed format.
func (o *Buffer) dec_slice_packed_bool(p *Properties, base structPointer) error {
v := structPointer_BoolSlice(base, p.field)
nn, err := o.DecodeVarint()
if err != nil {
return err
}
nb := int(nn) // number of bytes of encoded bools
fin := o.index + nb
if fin < o.index {
return errOverflow
}
y := *v
for o.index < fin {
u, err := p.valDec(o)
if err != nil {
return err
}
y = append(y, u != 0)
}
*v = y
return nil
}
// Decode a slice of int32s ([]int32).
func (o *Buffer) dec_slice_int32(p *Properties, base structPointer) error {
u, err := p.valDec(o)
if err != nil {
return err
}
structPointer_Word32Slice(base, p.field).Append(uint32(u))
return nil
}
// Decode a slice of int32s ([]int32) in packed format.
func (o *Buffer) dec_slice_packed_int32(p *Properties, base structPointer) error {
v := structPointer_Word32Slice(base, p.field)
nn, err := o.DecodeVarint()
if err != nil {
return err
}
nb := int(nn) // number of bytes of encoded int32s
fin := o.index + nb
if fin < o.index {
return errOverflow
}
for o.index < fin {
u, err := p.valDec(o)
if err != nil {
return err
}
v.Append(uint32(u))
}
return nil
}
// Decode a slice of int64s ([]int64).
func (o *Buffer) dec_slice_int64(p *Properties, base structPointer) error {
u, err := p.valDec(o)
if err != nil {
return err
}
structPointer_Word64Slice(base, p.field).Append(u)
return nil
}
// Decode a slice of int64s ([]int64) in packed format.
func (o *Buffer) dec_slice_packed_int64(p *Properties, base structPointer) error {
v := structPointer_Word64Slice(base, p.field)
nn, err := o.DecodeVarint()
if err != nil {
return err
}
nb := int(nn) // number of bytes of encoded int64s
fin := o.index + nb
if fin < o.index {
return errOverflow
}
for o.index < fin {
u, err := p.valDec(o)
if err != nil {
return err
}
v.Append(u)
}
return nil
}
// Decode a slice of strings ([]string).
func (o *Buffer) dec_slice_string(p *Properties, base structPointer) error {
s, err := o.DecodeStringBytes()
if err != nil {
return err
}
v := structPointer_StringSlice(base, p.field)
*v = append(*v, s)
return nil
}
// Decode a slice of slice of bytes ([][]byte).
func (o *Buffer) dec_slice_slice_byte(p *Properties, base structPointer) error {
b, err := o.DecodeRawBytes(true)
if err != nil {
return err
}
v := structPointer_BytesSlice(base, p.field)
*v = append(*v, b)
return nil
}
// Decode a map field.
func (o *Buffer) dec_new_map(p *Properties, base structPointer) error {
raw, err := o.DecodeRawBytes(false)
if err != nil {
return err
}
oi := o.index // index at the end of this map entry
o.index -= len(raw) // move buffer back to start of map entry
mptr := structPointer_NewAt(base, p.field, p.mtype) // *map[K]V
if mptr.Elem().IsNil() {
mptr.Elem().Set(reflect.MakeMap(mptr.Type().Elem()))
}
v := mptr.Elem() // map[K]V
// Prepare addressable doubly-indirect placeholders for the key and value types.
// See enc_new_map for why.
keyptr := reflect.New(reflect.PtrTo(p.mtype.Key())).Elem() // addressable *K
keybase := toStructPointer(keyptr.Addr()) // **K
var valbase structPointer
var valptr reflect.Value
switch p.mtype.Elem().Kind() {
case reflect.Slice:
// []byte
var dummy []byte
valptr = reflect.ValueOf(&dummy) // *[]byte
valbase = toStructPointer(valptr) // *[]byte
case reflect.Ptr:
// message; valptr is **Msg; need to allocate the intermediate pointer
valptr = reflect.New(reflect.PtrTo(p.mtype.Elem())).Elem() // addressable *V
valptr.Set(reflect.New(valptr.Type().Elem()))
valbase = toStructPointer(valptr)
default:
// everything else
valptr = reflect.New(reflect.PtrTo(p.mtype.Elem())).Elem() // addressable *V
valbase = toStructPointer(valptr.Addr()) // **V
}
// Decode.
// This parses a restricted wire format, namely the encoding of a message
// with two fields. See enc_new_map for the format.
for o.index < oi {
// tagcode for key and value properties are always a single byte
// because they have tags 1 and 2.
tagcode := o.buf[o.index]
o.index++
switch tagcode {
case p.mkeyprop.tagcode[0]:
if err := p.mkeyprop.dec(o, p.mkeyprop, keybase); err != nil {
return err
}
case p.mvalprop.tagcode[0]:
if err := p.mvalprop.dec(o, p.mvalprop, valbase); err != nil {
return err
}
default:
// TODO: Should we silently skip this instead?
return fmt.Errorf("proto: bad map data tag %d", raw[0])
}
}
keyelem, valelem := keyptr.Elem(), valptr.Elem()
if !keyelem.IsValid() {
keyelem = reflect.Zero(p.mtype.Key())
}
if !valelem.IsValid() {
valelem = reflect.Zero(p.mtype.Elem())
}
v.SetMapIndex(keyelem, valelem)
return nil
}
// Decode a group.
func (o *Buffer) dec_struct_group(p *Properties, base structPointer) error {
bas := structPointer_GetStructPointer(base, p.field)
if structPointer_IsNil(bas) {
// allocate new nested message
bas = toStructPointer(reflect.New(p.stype))
structPointer_SetStructPointer(base, p.field, bas)
}
return o.unmarshalType(p.stype, p.sprop, true, bas)
}
// Decode an embedded message.
func (o *Buffer) dec_struct_message(p *Properties, base structPointer) (err error) {
raw, e := o.DecodeRawBytes(false)
if e != nil {
return e
}
bas := structPointer_GetStructPointer(base, p.field)
if structPointer_IsNil(bas) {
// allocate new nested message
bas = toStructPointer(reflect.New(p.stype))
structPointer_SetStructPointer(base, p.field, bas)
}
// If the object can unmarshal itself, let it.
if p.isUnmarshaler {
iv := structPointer_Interface(bas, p.stype)
return iv.(Unmarshaler).Unmarshal(raw)
}
obuf := o.buf
oi := o.index
o.buf = raw
o.index = 0
err = o.unmarshalType(p.stype, p.sprop, false, bas)
o.buf = obuf
o.index = oi
return err
}
// Decode a slice of embedded messages.
func (o *Buffer) dec_slice_struct_message(p *Properties, base structPointer) error {
return o.dec_slice_struct(p, false, base)
}
// Decode a slice of embedded groups.
func (o *Buffer) dec_slice_struct_group(p *Properties, base structPointer) error {
return o.dec_slice_struct(p, true, base)
}
// Decode a slice of structs ([]*struct).
func (o *Buffer) dec_slice_struct(p *Properties, is_group bool, base structPointer) error {
v := reflect.New(p.stype)
bas := toStructPointer(v)
structPointer_StructPointerSlice(base, p.field).Append(bas)
if is_group {
err := o.unmarshalType(p.stype, p.sprop, is_group, bas)
return err
}
raw, err := o.DecodeRawBytes(false)
if err != nil {
return err
}
// If the object can unmarshal itself, let it.
if p.isUnmarshaler {
iv := v.Interface()
return iv.(Unmarshaler).Unmarshal(raw)
}
obuf := o.buf
oi := o.index
o.buf = raw
o.index = 0
err = o.unmarshalType(p.stype, p.sprop, is_group, bas)
o.buf = obuf
o.index = oi
return err
}

File diff suppressed because it is too large Load Diff

View File

@ -1,276 +0,0 @@
// Go support for Protocol Buffers - Google's data interchange format
//
// Copyright 2011 The Go Authors. All rights reserved.
// https://github.com/golang/protobuf
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Protocol buffer comparison.
package proto
import (
"bytes"
"log"
"reflect"
"strings"
)
/*
Equal returns true iff protocol buffers a and b are equal.
The arguments must both be pointers to protocol buffer structs.
Equality is defined in this way:
- Two messages are equal iff they are the same type,
corresponding fields are equal, unknown field sets
are equal, and extensions sets are equal.
- Two set scalar fields are equal iff their values are equal.
If the fields are of a floating-point type, remember that
NaN != x for all x, including NaN. If the message is defined
in a proto3 .proto file, fields are not "set"; specifically,
zero length proto3 "bytes" fields are equal (nil == {}).
- Two repeated fields are equal iff their lengths are the same,
and their corresponding elements are equal (a "bytes" field,
although represented by []byte, is not a repeated field)
- Two unset fields are equal.
- Two unknown field sets are equal if their current
encoded state is equal.
- Two extension sets are equal iff they have corresponding
elements that are pairwise equal.
- Every other combination of things are not equal.
The return value is undefined if a and b are not protocol buffers.
*/
func Equal(a, b Message) bool {
if a == nil || b == nil {
return a == b
}
v1, v2 := reflect.ValueOf(a), reflect.ValueOf(b)
if v1.Type() != v2.Type() {
return false
}
if v1.Kind() == reflect.Ptr {
if v1.IsNil() {
return v2.IsNil()
}
if v2.IsNil() {
return false
}
v1, v2 = v1.Elem(), v2.Elem()
}
if v1.Kind() != reflect.Struct {
return false
}
return equalStruct(v1, v2)
}
// v1 and v2 are known to have the same type.
func equalStruct(v1, v2 reflect.Value) bool {
sprop := GetProperties(v1.Type())
for i := 0; i < v1.NumField(); i++ {
f := v1.Type().Field(i)
if strings.HasPrefix(f.Name, "XXX_") {
continue
}
f1, f2 := v1.Field(i), v2.Field(i)
if f.Type.Kind() == reflect.Ptr {
if n1, n2 := f1.IsNil(), f2.IsNil(); n1 && n2 {
// both unset
continue
} else if n1 != n2 {
// set/unset mismatch
return false
}
b1, ok := f1.Interface().(raw)
if ok {
b2 := f2.Interface().(raw)
// RawMessage
if !bytes.Equal(b1.Bytes(), b2.Bytes()) {
return false
}
continue
}
f1, f2 = f1.Elem(), f2.Elem()
}
if !equalAny(f1, f2, sprop.Prop[i]) {
return false
}
}
if em1 := v1.FieldByName("XXX_extensions"); em1.IsValid() {
em2 := v2.FieldByName("XXX_extensions")
if !equalExtensions(v1.Type(), em1.Interface().(map[int32]Extension), em2.Interface().(map[int32]Extension)) {
return false
}
}
uf := v1.FieldByName("XXX_unrecognized")
if !uf.IsValid() {
return true
}
u1 := uf.Bytes()
u2 := v2.FieldByName("XXX_unrecognized").Bytes()
if !bytes.Equal(u1, u2) {
return false
}
return true
}
// v1 and v2 are known to have the same type.
// prop may be nil.
func equalAny(v1, v2 reflect.Value, prop *Properties) bool {
if v1.Type() == protoMessageType {
m1, _ := v1.Interface().(Message)
m2, _ := v2.Interface().(Message)
return Equal(m1, m2)
}
switch v1.Kind() {
case reflect.Bool:
return v1.Bool() == v2.Bool()
case reflect.Float32, reflect.Float64:
return v1.Float() == v2.Float()
case reflect.Int32, reflect.Int64:
return v1.Int() == v2.Int()
case reflect.Interface:
// Probably a oneof field; compare the inner values.
n1, n2 := v1.IsNil(), v2.IsNil()
if n1 || n2 {
return n1 == n2
}
e1, e2 := v1.Elem(), v2.Elem()
if e1.Type() != e2.Type() {
return false
}
return equalAny(e1, e2, nil)
case reflect.Map:
if v1.Len() != v2.Len() {
return false
}
for _, key := range v1.MapKeys() {
val2 := v2.MapIndex(key)
if !val2.IsValid() {
// This key was not found in the second map.
return false
}
if !equalAny(v1.MapIndex(key), val2, nil) {
return false
}
}
return true
case reflect.Ptr:
return equalAny(v1.Elem(), v2.Elem(), prop)
case reflect.Slice:
if v1.Type().Elem().Kind() == reflect.Uint8 {
// short circuit: []byte
// Edge case: if this is in a proto3 message, a zero length
// bytes field is considered the zero value.
if prop != nil && prop.proto3 && v1.Len() == 0 && v2.Len() == 0 {
return true
}
if v1.IsNil() != v2.IsNil() {
return false
}
return bytes.Equal(v1.Interface().([]byte), v2.Interface().([]byte))
}
if v1.Len() != v2.Len() {
return false
}
for i := 0; i < v1.Len(); i++ {
if !equalAny(v1.Index(i), v2.Index(i), prop) {
return false
}
}
return true
case reflect.String:
return v1.Interface().(string) == v2.Interface().(string)
case reflect.Struct:
return equalStruct(v1, v2)
case reflect.Uint32, reflect.Uint64:
return v1.Uint() == v2.Uint()
}
// unknown type, so not a protocol buffer
log.Printf("proto: don't know how to compare %v", v1)
return false
}
// base is the struct type that the extensions are based on.
// em1 and em2 are extension maps.
func equalExtensions(base reflect.Type, em1, em2 map[int32]Extension) bool {
if len(em1) != len(em2) {
return false
}
for extNum, e1 := range em1 {
e2, ok := em2[extNum]
if !ok {
return false
}
m1, m2 := e1.value, e2.value
if m1 != nil && m2 != nil {
// Both are unencoded.
if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) {
return false
}
continue
}
// At least one is encoded. To do a semantically correct comparison
// we need to unmarshal them first.
var desc *ExtensionDesc
if m := extensionMaps[base]; m != nil {
desc = m[extNum]
}
if desc == nil {
log.Printf("proto: don't know how to compare extension %d of %v", extNum, base)
continue
}
var err error
if m1 == nil {
m1, err = decodeExtension(e1.enc, desc)
}
if m2 == nil && err == nil {
m2, err = decodeExtension(e2.enc, desc)
}
if err != nil {
// The encoded form is invalid.
log.Printf("proto: badly encoded extension %d of %v: %v", extNum, base, err)
return false
}
if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) {
return false
}
}
return true
}

View File

@ -1,399 +0,0 @@
// Go support for Protocol Buffers - Google's data interchange format
//
// Copyright 2010 The Go Authors. All rights reserved.
// https://github.com/golang/protobuf
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package proto
/*
* Types and routines for supporting protocol buffer extensions.
*/
import (
"errors"
"fmt"
"reflect"
"strconv"
"sync"
)
// ErrMissingExtension is the error returned by GetExtension if the named extension is not in the message.
var ErrMissingExtension = errors.New("proto: missing extension")
// ExtensionRange represents a range of message extensions for a protocol buffer.
// Used in code generated by the protocol compiler.
type ExtensionRange struct {
Start, End int32 // both inclusive
}
// extendableProto is an interface implemented by any protocol buffer that may be extended.
type extendableProto interface {
Message
ExtensionRangeArray() []ExtensionRange
ExtensionMap() map[int32]Extension
}
var extendableProtoType = reflect.TypeOf((*extendableProto)(nil)).Elem()
// ExtensionDesc represents an extension specification.
// Used in generated code from the protocol compiler.
type ExtensionDesc struct {
ExtendedType Message // nil pointer to the type that is being extended
ExtensionType interface{} // nil pointer to the extension type
Field int32 // field number
Name string // fully-qualified name of extension, for text formatting
Tag string // protobuf tag style
}
func (ed *ExtensionDesc) repeated() bool {
t := reflect.TypeOf(ed.ExtensionType)
return t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8
}
// Extension represents an extension in a message.
type Extension struct {
// When an extension is stored in a message using SetExtension
// only desc and value are set. When the message is marshaled
// enc will be set to the encoded form of the message.
//
// When a message is unmarshaled and contains extensions, each
// extension will have only enc set. When such an extension is
// accessed using GetExtension (or GetExtensions) desc and value
// will be set.
desc *ExtensionDesc
value interface{}
enc []byte
}
// SetRawExtension is for testing only.
func SetRawExtension(base extendableProto, id int32, b []byte) {
base.ExtensionMap()[id] = Extension{enc: b}
}
// isExtensionField returns true iff the given field number is in an extension range.
func isExtensionField(pb extendableProto, field int32) bool {
for _, er := range pb.ExtensionRangeArray() {
if er.Start <= field && field <= er.End {
return true
}
}
return false
}
// checkExtensionTypes checks that the given extension is valid for pb.
func checkExtensionTypes(pb extendableProto, extension *ExtensionDesc) error {
// Check the extended type.
if a, b := reflect.TypeOf(pb), reflect.TypeOf(extension.ExtendedType); a != b {
return errors.New("proto: bad extended type; " + b.String() + " does not extend " + a.String())
}
// Check the range.
if !isExtensionField(pb, extension.Field) {
return errors.New("proto: bad extension number; not in declared ranges")
}
return nil
}
// extPropKey is sufficient to uniquely identify an extension.
type extPropKey struct {
base reflect.Type
field int32
}
var extProp = struct {
sync.RWMutex
m map[extPropKey]*Properties
}{
m: make(map[extPropKey]*Properties),
}
func extensionProperties(ed *ExtensionDesc) *Properties {
key := extPropKey{base: reflect.TypeOf(ed.ExtendedType), field: ed.Field}
extProp.RLock()
if prop, ok := extProp.m[key]; ok {
extProp.RUnlock()
return prop
}
extProp.RUnlock()
extProp.Lock()
defer extProp.Unlock()
// Check again.
if prop, ok := extProp.m[key]; ok {
return prop
}
prop := new(Properties)
prop.Init(reflect.TypeOf(ed.ExtensionType), "unknown_name", ed.Tag, nil)
extProp.m[key] = prop
return prop
}
// encodeExtensionMap encodes any unmarshaled (unencoded) extensions in m.
func encodeExtensionMap(m map[int32]Extension) error {
for k, e := range m {
if e.value == nil || e.desc == nil {
// Extension is only in its encoded form.
continue
}
// We don't skip extensions that have an encoded form set,
// because the extension value may have been mutated after
// the last time this function was called.
et := reflect.TypeOf(e.desc.ExtensionType)
props := extensionProperties(e.desc)
p := NewBuffer(nil)
// If e.value has type T, the encoder expects a *struct{ X T }.
// Pass a *T with a zero field and hope it all works out.
x := reflect.New(et)
x.Elem().Set(reflect.ValueOf(e.value))
if err := props.enc(p, props, toStructPointer(x)); err != nil {
return err
}
e.enc = p.buf
m[k] = e
}
return nil
}
func sizeExtensionMap(m map[int32]Extension) (n int) {
for _, e := range m {
if e.value == nil || e.desc == nil {
// Extension is only in its encoded form.
n += len(e.enc)
continue
}
// We don't skip extensions that have an encoded form set,
// because the extension value may have been mutated after
// the last time this function was called.
et := reflect.TypeOf(e.desc.ExtensionType)
props := extensionProperties(e.desc)
// If e.value has type T, the encoder expects a *struct{ X T }.
// Pass a *T with a zero field and hope it all works out.
x := reflect.New(et)
x.Elem().Set(reflect.ValueOf(e.value))
n += props.size(props, toStructPointer(x))
}
return
}
// HasExtension returns whether the given extension is present in pb.
func HasExtension(pb extendableProto, extension *ExtensionDesc) bool {
// TODO: Check types, field numbers, etc.?
_, ok := pb.ExtensionMap()[extension.Field]
return ok
}
// ClearExtension removes the given extension from pb.
func ClearExtension(pb extendableProto, extension *ExtensionDesc) {
// TODO: Check types, field numbers, etc.?
delete(pb.ExtensionMap(), extension.Field)
}
// GetExtension parses and returns the given extension of pb.
// If the extension is not present and has no default value it returns ErrMissingExtension.
func GetExtension(pb extendableProto, extension *ExtensionDesc) (interface{}, error) {
if err := checkExtensionTypes(pb, extension); err != nil {
return nil, err
}
emap := pb.ExtensionMap()
e, ok := emap[extension.Field]
if !ok {
// defaultExtensionValue returns the default value or
// ErrMissingExtension if there is no default.
return defaultExtensionValue(extension)
}
if e.value != nil {
// Already decoded. Check the descriptor, though.
if e.desc != extension {
// This shouldn't happen. If it does, it means that
// GetExtension was called twice with two different
// descriptors with the same field number.
return nil, errors.New("proto: descriptor conflict")
}
return e.value, nil
}
v, err := decodeExtension(e.enc, extension)
if err != nil {
return nil, err
}
// Remember the decoded version and drop the encoded version.
// That way it is safe to mutate what we return.
e.value = v
e.desc = extension
e.enc = nil
emap[extension.Field] = e
return e.value, nil
}
// defaultExtensionValue returns the default value for extension.
// If no default for an extension is defined ErrMissingExtension is returned.
func defaultExtensionValue(extension *ExtensionDesc) (interface{}, error) {
t := reflect.TypeOf(extension.ExtensionType)
props := extensionProperties(extension)
sf, _, err := fieldDefault(t, props)
if err != nil {
return nil, err
}
if sf == nil || sf.value == nil {
// There is no default value.
return nil, ErrMissingExtension
}
if t.Kind() != reflect.Ptr {
// We do not need to return a Ptr, we can directly return sf.value.
return sf.value, nil
}
// We need to return an interface{} that is a pointer to sf.value.
value := reflect.New(t).Elem()
value.Set(reflect.New(value.Type().Elem()))
if sf.kind == reflect.Int32 {
// We may have an int32 or an enum, but the underlying data is int32.
// Since we can't set an int32 into a non int32 reflect.value directly
// set it as a int32.
value.Elem().SetInt(int64(sf.value.(int32)))
} else {
value.Elem().Set(reflect.ValueOf(sf.value))
}
return value.Interface(), nil
}
// decodeExtension decodes an extension encoded in b.
func decodeExtension(b []byte, extension *ExtensionDesc) (interface{}, error) {
o := NewBuffer(b)
t := reflect.TypeOf(extension.ExtensionType)
props := extensionProperties(extension)
// t is a pointer to a struct, pointer to basic type or a slice.
// Allocate a "field" to store the pointer/slice itself; the
// pointer/slice will be stored here. We pass
// the address of this field to props.dec.
// This passes a zero field and a *t and lets props.dec
// interpret it as a *struct{ x t }.
value := reflect.New(t).Elem()
for {
// Discard wire type and field number varint. It isn't needed.
if _, err := o.DecodeVarint(); err != nil {
return nil, err
}
if err := props.dec(o, props, toStructPointer(value.Addr())); err != nil {
return nil, err
}
if o.index >= len(o.buf) {
break
}
}
return value.Interface(), nil
}
// GetExtensions returns a slice of the extensions present in pb that are also listed in es.
// The returned slice has the same length as es; missing extensions will appear as nil elements.
func GetExtensions(pb Message, es []*ExtensionDesc) (extensions []interface{}, err error) {
epb, ok := pb.(extendableProto)
if !ok {
err = errors.New("proto: not an extendable proto")
return
}
extensions = make([]interface{}, len(es))
for i, e := range es {
extensions[i], err = GetExtension(epb, e)
if err == ErrMissingExtension {
err = nil
}
if err != nil {
return
}
}
return
}
// SetExtension sets the specified extension of pb to the specified value.
func SetExtension(pb extendableProto, extension *ExtensionDesc, value interface{}) error {
if err := checkExtensionTypes(pb, extension); err != nil {
return err
}
typ := reflect.TypeOf(extension.ExtensionType)
if typ != reflect.TypeOf(value) {
return errors.New("proto: bad extension value type")
}
// nil extension values need to be caught early, because the
// encoder can't distinguish an ErrNil due to a nil extension
// from an ErrNil due to a missing field. Extensions are
// always optional, so the encoder would just swallow the error
// and drop all the extensions from the encoded message.
if reflect.ValueOf(value).IsNil() {
return fmt.Errorf("proto: SetExtension called with nil value of type %T", value)
}
pb.ExtensionMap()[extension.Field] = Extension{desc: extension, value: value}
return nil
}
// A global registry of extensions.
// The generated code will register the generated descriptors by calling RegisterExtension.
var extensionMaps = make(map[reflect.Type]map[int32]*ExtensionDesc)
// RegisterExtension is called from the generated code.
func RegisterExtension(desc *ExtensionDesc) {
st := reflect.TypeOf(desc.ExtendedType).Elem()
m := extensionMaps[st]
if m == nil {
m = make(map[int32]*ExtensionDesc)
extensionMaps[st] = m
}
if _, ok := m[desc.Field]; ok {
panic("proto: duplicate extension registered: " + st.String() + " " + strconv.Itoa(int(desc.Field)))
}
m[desc.Field] = desc
}
// RegisteredExtensions returns a map of the registered extensions of a
// protocol buffer struct, indexed by the extension number.
// The argument pb should be a nil pointer to the struct type.
func RegisteredExtensions(pb Message) map[int32]*ExtensionDesc {
return extensionMaps[reflect.TypeOf(pb).Elem()]
}

View File

@ -1,894 +0,0 @@
// Go support for Protocol Buffers - Google's data interchange format
//
// Copyright 2010 The Go Authors. All rights reserved.
// https://github.com/golang/protobuf
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
/*
Package proto converts data structures to and from the wire format of
protocol buffers. It works in concert with the Go source code generated
for .proto files by the protocol compiler.
A summary of the properties of the protocol buffer interface
for a protocol buffer variable v:
- Names are turned from camel_case to CamelCase for export.
- There are no methods on v to set fields; just treat
them as structure fields.
- There are getters that return a field's value if set,
and return the field's default value if unset.
The getters work even if the receiver is a nil message.
- The zero value for a struct is its correct initialization state.
All desired fields must be set before marshaling.
- A Reset() method will restore a protobuf struct to its zero state.
- Non-repeated fields are pointers to the values; nil means unset.
That is, optional or required field int32 f becomes F *int32.
- Repeated fields are slices.
- Helper functions are available to aid the setting of fields.
msg.Foo = proto.String("hello") // set field
- Constants are defined to hold the default values of all fields that
have them. They have the form Default_StructName_FieldName.
Because the getter methods handle defaulted values,
direct use of these constants should be rare.
- Enums are given type names and maps from names to values.
Enum values are prefixed by the enclosing message's name, or by the
enum's type name if it is a top-level enum. Enum types have a String
method, and a Enum method to assist in message construction.
- Nested messages, groups and enums have type names prefixed with the name of
the surrounding message type.
- Extensions are given descriptor names that start with E_,
followed by an underscore-delimited list of the nested messages
that contain it (if any) followed by the CamelCased name of the
extension field itself. HasExtension, ClearExtension, GetExtension
and SetExtension are functions for manipulating extensions.
- Oneof field sets are given a single field in their message,
with distinguished wrapper types for each possible field value.
- Marshal and Unmarshal are functions to encode and decode the wire format.
When the .proto file specifies `syntax="proto3"`, there are some differences:
- Non-repeated fields of non-message type are values instead of pointers.
- Getters are only generated for message and oneof fields.
- Enum types do not get an Enum method.
The simplest way to describe this is to see an example.
Given file test.proto, containing
package example;
enum FOO { X = 17; }
message Test {
required string label = 1;
optional int32 type = 2 [default=77];
repeated int64 reps = 3;
optional group OptionalGroup = 4 {
required string RequiredField = 5;
}
oneof union {
int32 number = 6;
string name = 7;
}
}
The resulting file, test.pb.go, is:
package example
import proto "github.com/golang/protobuf/proto"
import math "math"
type FOO int32
const (
FOO_X FOO = 17
)
var FOO_name = map[int32]string{
17: "X",
}
var FOO_value = map[string]int32{
"X": 17,
}
func (x FOO) Enum() *FOO {
p := new(FOO)
*p = x
return p
}
func (x FOO) String() string {
return proto.EnumName(FOO_name, int32(x))
}
func (x *FOO) UnmarshalJSON(data []byte) error {
value, err := proto.UnmarshalJSONEnum(FOO_value, data)
if err != nil {
return err
}
*x = FOO(value)
return nil
}
type Test struct {
Label *string `protobuf:"bytes,1,req,name=label" json:"label,omitempty"`
Type *int32 `protobuf:"varint,2,opt,name=type,def=77" json:"type,omitempty"`
Reps []int64 `protobuf:"varint,3,rep,name=reps" json:"reps,omitempty"`
Optionalgroup *Test_OptionalGroup `protobuf:"group,4,opt,name=OptionalGroup" json:"optionalgroup,omitempty"`
// Types that are valid to be assigned to Union:
// *Test_Number
// *Test_Name
Union isTest_Union `protobuf_oneof:"union"`
XXX_unrecognized []byte `json:"-"`
}
func (m *Test) Reset() { *m = Test{} }
func (m *Test) String() string { return proto.CompactTextString(m) }
func (*Test) ProtoMessage() {}
type isTest_Union interface {
isTest_Union()
}
type Test_Number struct {
Number int32 `protobuf:"varint,6,opt,name=number"`
}
type Test_Name struct {
Name string `protobuf:"bytes,7,opt,name=name"`
}
func (*Test_Number) isTest_Union() {}
func (*Test_Name) isTest_Union() {}
func (m *Test) GetUnion() isTest_Union {
if m != nil {
return m.Union
}
return nil
}
const Default_Test_Type int32 = 77
func (m *Test) GetLabel() string {
if m != nil && m.Label != nil {
return *m.Label
}
return ""
}
func (m *Test) GetType() int32 {
if m != nil && m.Type != nil {
return *m.Type
}
return Default_Test_Type
}
func (m *Test) GetOptionalgroup() *Test_OptionalGroup {
if m != nil {
return m.Optionalgroup
}
return nil
}
type Test_OptionalGroup struct {
RequiredField *string `protobuf:"bytes,5,req" json:"RequiredField,omitempty"`
}
func (m *Test_OptionalGroup) Reset() { *m = Test_OptionalGroup{} }
func (m *Test_OptionalGroup) String() string { return proto.CompactTextString(m) }
func (m *Test_OptionalGroup) GetRequiredField() string {
if m != nil && m.RequiredField != nil {
return *m.RequiredField
}
return ""
}
func (m *Test) GetNumber() int32 {
if x, ok := m.GetUnion().(*Test_Number); ok {
return x.Number
}
return 0
}
func (m *Test) GetName() string {
if x, ok := m.GetUnion().(*Test_Name); ok {
return x.Name
}
return ""
}
func init() {
proto.RegisterEnum("example.FOO", FOO_name, FOO_value)
}
To create and play with a Test object:
package main
import (
"log"
"github.com/golang/protobuf/proto"
pb "./example.pb"
)
func main() {
test := &pb.Test{
Label: proto.String("hello"),
Type: proto.Int32(17),
Reps: []int64{1, 2, 3},
Optionalgroup: &pb.Test_OptionalGroup{
RequiredField: proto.String("good bye"),
},
Union: &pb.Test_Name{"fred"},
}
data, err := proto.Marshal(test)
if err != nil {
log.Fatal("marshaling error: ", err)
}
newTest := &pb.Test{}
err = proto.Unmarshal(data, newTest)
if err != nil {
log.Fatal("unmarshaling error: ", err)
}
// Now test and newTest contain the same data.
if test.GetLabel() != newTest.GetLabel() {
log.Fatalf("data mismatch %q != %q", test.GetLabel(), newTest.GetLabel())
}
// Use a type switch to determine which oneof was set.
switch u := test.Union.(type) {
case *pb.Test_Number: // u.Number contains the number.
case *pb.Test_Name: // u.Name contains the string.
}
// etc.
}
*/
package proto
import (
"encoding/json"
"fmt"
"log"
"reflect"
"sort"
"strconv"
"sync"
)
// Message is implemented by generated protocol buffer messages.
type Message interface {
Reset()
String() string
ProtoMessage()
}
// Stats records allocation details about the protocol buffer encoders
// and decoders. Useful for tuning the library itself.
type Stats struct {
Emalloc uint64 // mallocs in encode
Dmalloc uint64 // mallocs in decode
Encode uint64 // number of encodes
Decode uint64 // number of decodes
Chit uint64 // number of cache hits
Cmiss uint64 // number of cache misses
Size uint64 // number of sizes
}
// Set to true to enable stats collection.
const collectStats = false
var stats Stats
// GetStats returns a copy of the global Stats structure.
func GetStats() Stats { return stats }
// A Buffer is a buffer manager for marshaling and unmarshaling
// protocol buffers. It may be reused between invocations to
// reduce memory usage. It is not necessary to use a Buffer;
// the global functions Marshal and Unmarshal create a
// temporary Buffer and are fine for most applications.
type Buffer struct {
buf []byte // encode/decode byte stream
index int // write point
// pools of basic types to amortize allocation.
bools []bool
uint32s []uint32
uint64s []uint64
// extra pools, only used with pointer_reflect.go
int32s []int32
int64s []int64
float32s []float32
float64s []float64
}
// NewBuffer allocates a new Buffer and initializes its internal data to
// the contents of the argument slice.
func NewBuffer(e []byte) *Buffer {
return &Buffer{buf: e}
}
// Reset resets the Buffer, ready for marshaling a new protocol buffer.
func (p *Buffer) Reset() {
p.buf = p.buf[0:0] // for reading/writing
p.index = 0 // for reading
}
// SetBuf replaces the internal buffer with the slice,
// ready for unmarshaling the contents of the slice.
func (p *Buffer) SetBuf(s []byte) {
p.buf = s
p.index = 0
}
// Bytes returns the contents of the Buffer.
func (p *Buffer) Bytes() []byte { return p.buf }
/*
* Helper routines for simplifying the creation of optional fields of basic type.
*/
// Bool is a helper routine that allocates a new bool value
// to store v and returns a pointer to it.
func Bool(v bool) *bool {
return &v
}
// Int32 is a helper routine that allocates a new int32 value
// to store v and returns a pointer to it.
func Int32(v int32) *int32 {
return &v
}
// Int is a helper routine that allocates a new int32 value
// to store v and returns a pointer to it, but unlike Int32
// its argument value is an int.
func Int(v int) *int32 {
p := new(int32)
*p = int32(v)
return p
}
// Int64 is a helper routine that allocates a new int64 value
// to store v and returns a pointer to it.
func Int64(v int64) *int64 {
return &v
}
// Float32 is a helper routine that allocates a new float32 value
// to store v and returns a pointer to it.
func Float32(v float32) *float32 {
return &v
}
// Float64 is a helper routine that allocates a new float64 value
// to store v and returns a pointer to it.
func Float64(v float64) *float64 {
return &v
}
// Uint32 is a helper routine that allocates a new uint32 value
// to store v and returns a pointer to it.
func Uint32(v uint32) *uint32 {
return &v
}
// Uint64 is a helper routine that allocates a new uint64 value
// to store v and returns a pointer to it.
func Uint64(v uint64) *uint64 {
return &v
}
// String is a helper routine that allocates a new string value
// to store v and returns a pointer to it.
func String(v string) *string {
return &v
}
// EnumName is a helper function to simplify printing protocol buffer enums
// by name. Given an enum map and a value, it returns a useful string.
func EnumName(m map[int32]string, v int32) string {
s, ok := m[v]
if ok {
return s
}
return strconv.Itoa(int(v))
}
// UnmarshalJSONEnum is a helper function to simplify recovering enum int values
// from their JSON-encoded representation. Given a map from the enum's symbolic
// names to its int values, and a byte buffer containing the JSON-encoded
// value, it returns an int32 that can be cast to the enum type by the caller.
//
// The function can deal with both JSON representations, numeric and symbolic.
func UnmarshalJSONEnum(m map[string]int32, data []byte, enumName string) (int32, error) {
if data[0] == '"' {
// New style: enums are strings.
var repr string
if err := json.Unmarshal(data, &repr); err != nil {
return -1, err
}
val, ok := m[repr]
if !ok {
return 0, fmt.Errorf("unrecognized enum %s value %q", enumName, repr)
}
return val, nil
}
// Old style: enums are ints.
var val int32
if err := json.Unmarshal(data, &val); err != nil {
return 0, fmt.Errorf("cannot unmarshal %#q into enum %s", data, enumName)
}
return val, nil
}
// DebugPrint dumps the encoded data in b in a debugging format with a header
// including the string s. Used in testing but made available for general debugging.
func (p *Buffer) DebugPrint(s string, b []byte) {
var u uint64
obuf := p.buf
index := p.index
p.buf = b
p.index = 0
depth := 0
fmt.Printf("\n--- %s ---\n", s)
out:
for {
for i := 0; i < depth; i++ {
fmt.Print(" ")
}
index := p.index
if index == len(p.buf) {
break
}
op, err := p.DecodeVarint()
if err != nil {
fmt.Printf("%3d: fetching op err %v\n", index, err)
break out
}
tag := op >> 3
wire := op & 7
switch wire {
default:
fmt.Printf("%3d: t=%3d unknown wire=%d\n",
index, tag, wire)
break out
case WireBytes:
var r []byte
r, err = p.DecodeRawBytes(false)
if err != nil {
break out
}
fmt.Printf("%3d: t=%3d bytes [%d]", index, tag, len(r))
if len(r) <= 6 {
for i := 0; i < len(r); i++ {
fmt.Printf(" %.2x", r[i])
}
} else {
for i := 0; i < 3; i++ {
fmt.Printf(" %.2x", r[i])
}
fmt.Printf(" ..")
for i := len(r) - 3; i < len(r); i++ {
fmt.Printf(" %.2x", r[i])
}
}
fmt.Printf("\n")
case WireFixed32:
u, err = p.DecodeFixed32()
if err != nil {
fmt.Printf("%3d: t=%3d fix32 err %v\n", index, tag, err)
break out
}
fmt.Printf("%3d: t=%3d fix32 %d\n", index, tag, u)
case WireFixed64:
u, err = p.DecodeFixed64()
if err != nil {
fmt.Printf("%3d: t=%3d fix64 err %v\n", index, tag, err)
break out
}
fmt.Printf("%3d: t=%3d fix64 %d\n", index, tag, u)
case WireVarint:
u, err = p.DecodeVarint()
if err != nil {
fmt.Printf("%3d: t=%3d varint err %v\n", index, tag, err)
break out
}
fmt.Printf("%3d: t=%3d varint %d\n", index, tag, u)
case WireStartGroup:
fmt.Printf("%3d: t=%3d start\n", index, tag)
depth++
case WireEndGroup:
depth--
fmt.Printf("%3d: t=%3d end\n", index, tag)
}
}
if depth != 0 {
fmt.Printf("%3d: start-end not balanced %d\n", p.index, depth)
}
fmt.Printf("\n")
p.buf = obuf
p.index = index
}
// SetDefaults sets unset protocol buffer fields to their default values.
// It only modifies fields that are both unset and have defined defaults.
// It recursively sets default values in any non-nil sub-messages.
func SetDefaults(pb Message) {
setDefaults(reflect.ValueOf(pb), true, false)
}
// v is a pointer to a struct.
func setDefaults(v reflect.Value, recur, zeros bool) {
v = v.Elem()
defaultMu.RLock()
dm, ok := defaults[v.Type()]
defaultMu.RUnlock()
if !ok {
dm = buildDefaultMessage(v.Type())
defaultMu.Lock()
defaults[v.Type()] = dm
defaultMu.Unlock()
}
for _, sf := range dm.scalars {
f := v.Field(sf.index)
if !f.IsNil() {
// field already set
continue
}
dv := sf.value
if dv == nil && !zeros {
// no explicit default, and don't want to set zeros
continue
}
fptr := f.Addr().Interface() // **T
// TODO: Consider batching the allocations we do here.
switch sf.kind {
case reflect.Bool:
b := new(bool)
if dv != nil {
*b = dv.(bool)
}
*(fptr.(**bool)) = b
case reflect.Float32:
f := new(float32)
if dv != nil {
*f = dv.(float32)
}
*(fptr.(**float32)) = f
case reflect.Float64:
f := new(float64)
if dv != nil {
*f = dv.(float64)
}
*(fptr.(**float64)) = f
case reflect.Int32:
// might be an enum
if ft := f.Type(); ft != int32PtrType {
// enum
f.Set(reflect.New(ft.Elem()))
if dv != nil {
f.Elem().SetInt(int64(dv.(int32)))
}
} else {
// int32 field
i := new(int32)
if dv != nil {
*i = dv.(int32)
}
*(fptr.(**int32)) = i
}
case reflect.Int64:
i := new(int64)
if dv != nil {
*i = dv.(int64)
}
*(fptr.(**int64)) = i
case reflect.String:
s := new(string)
if dv != nil {
*s = dv.(string)
}
*(fptr.(**string)) = s
case reflect.Uint8:
// exceptional case: []byte
var b []byte
if dv != nil {
db := dv.([]byte)
b = make([]byte, len(db))
copy(b, db)
} else {
b = []byte{}
}
*(fptr.(*[]byte)) = b
case reflect.Uint32:
u := new(uint32)
if dv != nil {
*u = dv.(uint32)
}
*(fptr.(**uint32)) = u
case reflect.Uint64:
u := new(uint64)
if dv != nil {
*u = dv.(uint64)
}
*(fptr.(**uint64)) = u
default:
log.Printf("proto: can't set default for field %v (sf.kind=%v)", f, sf.kind)
}
}
for _, ni := range dm.nested {
f := v.Field(ni)
// f is *T or []*T or map[T]*T
switch f.Kind() {
case reflect.Ptr:
if f.IsNil() {
continue
}
setDefaults(f, recur, zeros)
case reflect.Slice:
for i := 0; i < f.Len(); i++ {
e := f.Index(i)
if e.IsNil() {
continue
}
setDefaults(e, recur, zeros)
}
case reflect.Map:
for _, k := range f.MapKeys() {
e := f.MapIndex(k)
if e.IsNil() {
continue
}
setDefaults(e, recur, zeros)
}
}
}
}
var (
// defaults maps a protocol buffer struct type to a slice of the fields,
// with its scalar fields set to their proto-declared non-zero default values.
defaultMu sync.RWMutex
defaults = make(map[reflect.Type]defaultMessage)
int32PtrType = reflect.TypeOf((*int32)(nil))
)
// defaultMessage represents information about the default values of a message.
type defaultMessage struct {
scalars []scalarField
nested []int // struct field index of nested messages
}
type scalarField struct {
index int // struct field index
kind reflect.Kind // element type (the T in *T or []T)
value interface{} // the proto-declared default value, or nil
}
// t is a struct type.
func buildDefaultMessage(t reflect.Type) (dm defaultMessage) {
sprop := GetProperties(t)
for _, prop := range sprop.Prop {
fi, ok := sprop.decoderTags.get(prop.Tag)
if !ok {
// XXX_unrecognized
continue
}
ft := t.Field(fi).Type
sf, nested, err := fieldDefault(ft, prop)
switch {
case err != nil:
log.Print(err)
case nested:
dm.nested = append(dm.nested, fi)
case sf != nil:
sf.index = fi
dm.scalars = append(dm.scalars, *sf)
}
}
return dm
}
// fieldDefault returns the scalarField for field type ft.
// sf will be nil if the field can not have a default.
// nestedMessage will be true if this is a nested message.
// Note that sf.index is not set on return.
func fieldDefault(ft reflect.Type, prop *Properties) (sf *scalarField, nestedMessage bool, err error) {
var canHaveDefault bool
switch ft.Kind() {
case reflect.Ptr:
if ft.Elem().Kind() == reflect.Struct {
nestedMessage = true
} else {
canHaveDefault = true // proto2 scalar field
}
case reflect.Slice:
switch ft.Elem().Kind() {
case reflect.Ptr:
nestedMessage = true // repeated message
case reflect.Uint8:
canHaveDefault = true // bytes field
}
case reflect.Map:
if ft.Elem().Kind() == reflect.Ptr {
nestedMessage = true // map with message values
}
}
if !canHaveDefault {
if nestedMessage {
return nil, true, nil
}
return nil, false, nil
}
// We now know that ft is a pointer or slice.
sf = &scalarField{kind: ft.Elem().Kind()}
// scalar fields without defaults
if !prop.HasDefault {
return sf, false, nil
}
// a scalar field: either *T or []byte
switch ft.Elem().Kind() {
case reflect.Bool:
x, err := strconv.ParseBool(prop.Default)
if err != nil {
return nil, false, fmt.Errorf("proto: bad default bool %q: %v", prop.Default, err)
}
sf.value = x
case reflect.Float32:
x, err := strconv.ParseFloat(prop.Default, 32)
if err != nil {
return nil, false, fmt.Errorf("proto: bad default float32 %q: %v", prop.Default, err)
}
sf.value = float32(x)
case reflect.Float64:
x, err := strconv.ParseFloat(prop.Default, 64)
if err != nil {
return nil, false, fmt.Errorf("proto: bad default float64 %q: %v", prop.Default, err)
}
sf.value = x
case reflect.Int32:
x, err := strconv.ParseInt(prop.Default, 10, 32)
if err != nil {
return nil, false, fmt.Errorf("proto: bad default int32 %q: %v", prop.Default, err)
}
sf.value = int32(x)
case reflect.Int64:
x, err := strconv.ParseInt(prop.Default, 10, 64)
if err != nil {
return nil, false, fmt.Errorf("proto: bad default int64 %q: %v", prop.Default, err)
}
sf.value = x
case reflect.String:
sf.value = prop.Default
case reflect.Uint8:
// []byte (not *uint8)
sf.value = []byte(prop.Default)
case reflect.Uint32:
x, err := strconv.ParseUint(prop.Default, 10, 32)
if err != nil {
return nil, false, fmt.Errorf("proto: bad default uint32 %q: %v", prop.Default, err)
}
sf.value = uint32(x)
case reflect.Uint64:
x, err := strconv.ParseUint(prop.Default, 10, 64)
if err != nil {
return nil, false, fmt.Errorf("proto: bad default uint64 %q: %v", prop.Default, err)
}
sf.value = x
default:
return nil, false, fmt.Errorf("proto: unhandled def kind %v", ft.Elem().Kind())
}
return sf, false, nil
}
// Map fields may have key types of non-float scalars, strings and enums.
// The easiest way to sort them in some deterministic order is to use fmt.
// If this turns out to be inefficient we can always consider other options,
// such as doing a Schwartzian transform.
func mapKeys(vs []reflect.Value) sort.Interface {
s := mapKeySorter{
vs: vs,
// default Less function: textual comparison
less: func(a, b reflect.Value) bool {
return fmt.Sprint(a.Interface()) < fmt.Sprint(b.Interface())
},
}
// Type specialization per https://developers.google.com/protocol-buffers/docs/proto#maps;
// numeric keys are sorted numerically.
if len(vs) == 0 {
return s
}
switch vs[0].Kind() {
case reflect.Int32, reflect.Int64:
s.less = func(a, b reflect.Value) bool { return a.Int() < b.Int() }
case reflect.Uint32, reflect.Uint64:
s.less = func(a, b reflect.Value) bool { return a.Uint() < b.Uint() }
}
return s
}
type mapKeySorter struct {
vs []reflect.Value
less func(a, b reflect.Value) bool
}
func (s mapKeySorter) Len() int { return len(s.vs) }
func (s mapKeySorter) Swap(i, j int) { s.vs[i], s.vs[j] = s.vs[j], s.vs[i] }
func (s mapKeySorter) Less(i, j int) bool {
return s.less(s.vs[i], s.vs[j])
}
// isProto3Zero reports whether v is a zero proto3 value.
func isProto3Zero(v reflect.Value) bool {
switch v.Kind() {
case reflect.Bool:
return !v.Bool()
case reflect.Int32, reflect.Int64:
return v.Int() == 0
case reflect.Uint32, reflect.Uint64:
return v.Uint() == 0
case reflect.Float32, reflect.Float64:
return v.Float() == 0
case reflect.String:
return v.String() == ""
}
return false
}
// ProtoPackageIsVersion1 is referenced from generated protocol buffer files
// to assert that that code is compatible with this version of the proto package.
const ProtoPackageIsVersion1 = true

View File

@ -1,280 +0,0 @@
// Go support for Protocol Buffers - Google's data interchange format
//
// Copyright 2010 The Go Authors. All rights reserved.
// https://github.com/golang/protobuf
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package proto
/*
* Support for message sets.
*/
import (
"bytes"
"encoding/json"
"errors"
"fmt"
"reflect"
"sort"
)
// errNoMessageTypeID occurs when a protocol buffer does not have a message type ID.
// A message type ID is required for storing a protocol buffer in a message set.
var errNoMessageTypeID = errors.New("proto does not have a message type ID")
// The first two types (_MessageSet_Item and messageSet)
// model what the protocol compiler produces for the following protocol message:
// message MessageSet {
// repeated group Item = 1 {
// required int32 type_id = 2;
// required string message = 3;
// };
// }
// That is the MessageSet wire format. We can't use a proto to generate these
// because that would introduce a circular dependency between it and this package.
type _MessageSet_Item struct {
TypeId *int32 `protobuf:"varint,2,req,name=type_id"`
Message []byte `protobuf:"bytes,3,req,name=message"`
}
type messageSet struct {
Item []*_MessageSet_Item `protobuf:"group,1,rep"`
XXX_unrecognized []byte
// TODO: caching?
}
// Make sure messageSet is a Message.
var _ Message = (*messageSet)(nil)
// messageTypeIder is an interface satisfied by a protocol buffer type
// that may be stored in a MessageSet.
type messageTypeIder interface {
MessageTypeId() int32
}
func (ms *messageSet) find(pb Message) *_MessageSet_Item {
mti, ok := pb.(messageTypeIder)
if !ok {
return nil
}
id := mti.MessageTypeId()
for _, item := range ms.Item {
if *item.TypeId == id {
return item
}
}
return nil
}
func (ms *messageSet) Has(pb Message) bool {
if ms.find(pb) != nil {
return true
}
return false
}
func (ms *messageSet) Unmarshal(pb Message) error {
if item := ms.find(pb); item != nil {
return Unmarshal(item.Message, pb)
}
if _, ok := pb.(messageTypeIder); !ok {
return errNoMessageTypeID
}
return nil // TODO: return error instead?
}
func (ms *messageSet) Marshal(pb Message) error {
msg, err := Marshal(pb)
if err != nil {
return err
}
if item := ms.find(pb); item != nil {
// reuse existing item
item.Message = msg
return nil
}
mti, ok := pb.(messageTypeIder)
if !ok {
return errNoMessageTypeID
}
mtid := mti.MessageTypeId()
ms.Item = append(ms.Item, &_MessageSet_Item{
TypeId: &mtid,
Message: msg,
})
return nil
}
func (ms *messageSet) Reset() { *ms = messageSet{} }
func (ms *messageSet) String() string { return CompactTextString(ms) }
func (*messageSet) ProtoMessage() {}
// Support for the message_set_wire_format message option.
func skipVarint(buf []byte) []byte {
i := 0
for ; buf[i]&0x80 != 0; i++ {
}
return buf[i+1:]
}
// MarshalMessageSet encodes the extension map represented by m in the message set wire format.
// It is called by generated Marshal methods on protocol buffer messages with the message_set_wire_format option.
func MarshalMessageSet(m map[int32]Extension) ([]byte, error) {
if err := encodeExtensionMap(m); err != nil {
return nil, err
}
// Sort extension IDs to provide a deterministic encoding.
// See also enc_map in encode.go.
ids := make([]int, 0, len(m))
for id := range m {
ids = append(ids, int(id))
}
sort.Ints(ids)
ms := &messageSet{Item: make([]*_MessageSet_Item, 0, len(m))}
for _, id := range ids {
e := m[int32(id)]
// Remove the wire type and field number varint, as well as the length varint.
msg := skipVarint(skipVarint(e.enc))
ms.Item = append(ms.Item, &_MessageSet_Item{
TypeId: Int32(int32(id)),
Message: msg,
})
}
return Marshal(ms)
}
// UnmarshalMessageSet decodes the extension map encoded in buf in the message set wire format.
// It is called by generated Unmarshal methods on protocol buffer messages with the message_set_wire_format option.
func UnmarshalMessageSet(buf []byte, m map[int32]Extension) error {
ms := new(messageSet)
if err := Unmarshal(buf, ms); err != nil {
return err
}
for _, item := range ms.Item {
id := *item.TypeId
msg := item.Message
// Restore wire type and field number varint, plus length varint.
// Be careful to preserve duplicate items.
b := EncodeVarint(uint64(id)<<3 | WireBytes)
if ext, ok := m[id]; ok {
// Existing data; rip off the tag and length varint
// so we join the new data correctly.
// We can assume that ext.enc is set because we are unmarshaling.
o := ext.enc[len(b):] // skip wire type and field number
_, n := DecodeVarint(o) // calculate length of length varint
o = o[n:] // skip length varint
msg = append(o, msg...) // join old data and new data
}
b = append(b, EncodeVarint(uint64(len(msg)))...)
b = append(b, msg...)
m[id] = Extension{enc: b}
}
return nil
}
// MarshalMessageSetJSON encodes the extension map represented by m in JSON format.
// It is called by generated MarshalJSON methods on protocol buffer messages with the message_set_wire_format option.
func MarshalMessageSetJSON(m map[int32]Extension) ([]byte, error) {
var b bytes.Buffer
b.WriteByte('{')
// Process the map in key order for deterministic output.
ids := make([]int32, 0, len(m))
for id := range m {
ids = append(ids, id)
}
sort.Sort(int32Slice(ids)) // int32Slice defined in text.go
for i, id := range ids {
ext := m[id]
if i > 0 {
b.WriteByte(',')
}
msd, ok := messageSetMap[id]
if !ok {
// Unknown type; we can't render it, so skip it.
continue
}
fmt.Fprintf(&b, `"[%s]":`, msd.name)
x := ext.value
if x == nil {
x = reflect.New(msd.t.Elem()).Interface()
if err := Unmarshal(ext.enc, x.(Message)); err != nil {
return nil, err
}
}
d, err := json.Marshal(x)
if err != nil {
return nil, err
}
b.Write(d)
}
b.WriteByte('}')
return b.Bytes(), nil
}
// UnmarshalMessageSetJSON decodes the extension map encoded in buf in JSON format.
// It is called by generated UnmarshalJSON methods on protocol buffer messages with the message_set_wire_format option.
func UnmarshalMessageSetJSON(buf []byte, m map[int32]Extension) error {
// Common-case fast path.
if len(buf) == 0 || bytes.Equal(buf, []byte("{}")) {
return nil
}
// This is fairly tricky, and it's not clear that it is needed.
return errors.New("TODO: UnmarshalMessageSetJSON not yet implemented")
}
// A global registry of types that can be used in a MessageSet.
var messageSetMap = make(map[int32]messageSetDesc)
type messageSetDesc struct {
t reflect.Type // pointer to struct
name string
}
// RegisterMessageSetType is called from the generated code.
func RegisterMessageSetType(m Message, fieldNum int32, name string) {
messageSetMap[fieldNum] = messageSetDesc{
t: reflect.TypeOf(m),
name: name,
}
}

View File

@ -1,479 +0,0 @@
// Go support for Protocol Buffers - Google's data interchange format
//
// Copyright 2012 The Go Authors. All rights reserved.
// https://github.com/golang/protobuf
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// +build appengine js
// This file contains an implementation of proto field accesses using package reflect.
// It is slower than the code in pointer_unsafe.go but it avoids package unsafe and can
// be used on App Engine.
package proto
import (
"math"
"reflect"
)
// A structPointer is a pointer to a struct.
type structPointer struct {
v reflect.Value
}
// toStructPointer returns a structPointer equivalent to the given reflect value.
// The reflect value must itself be a pointer to a struct.
func toStructPointer(v reflect.Value) structPointer {
return structPointer{v}
}
// IsNil reports whether p is nil.
func structPointer_IsNil(p structPointer) bool {
return p.v.IsNil()
}
// Interface returns the struct pointer as an interface value.
func structPointer_Interface(p structPointer, _ reflect.Type) interface{} {
return p.v.Interface()
}
// A field identifies a field in a struct, accessible from a structPointer.
// In this implementation, a field is identified by the sequence of field indices
// passed to reflect's FieldByIndex.
type field []int
// toField returns a field equivalent to the given reflect field.
func toField(f *reflect.StructField) field {
return f.Index
}
// invalidField is an invalid field identifier.
var invalidField = field(nil)
// IsValid reports whether the field identifier is valid.
func (f field) IsValid() bool { return f != nil }
// field returns the given field in the struct as a reflect value.
func structPointer_field(p structPointer, f field) reflect.Value {
// Special case: an extension map entry with a value of type T
// passes a *T to the struct-handling code with a zero field,
// expecting that it will be treated as equivalent to *struct{ X T },
// which has the same memory layout. We have to handle that case
// specially, because reflect will panic if we call FieldByIndex on a
// non-struct.
if f == nil {
return p.v.Elem()
}
return p.v.Elem().FieldByIndex(f)
}
// ifield returns the given field in the struct as an interface value.
func structPointer_ifield(p structPointer, f field) interface{} {
return structPointer_field(p, f).Addr().Interface()
}
// Bytes returns the address of a []byte field in the struct.
func structPointer_Bytes(p structPointer, f field) *[]byte {
return structPointer_ifield(p, f).(*[]byte)
}
// BytesSlice returns the address of a [][]byte field in the struct.
func structPointer_BytesSlice(p structPointer, f field) *[][]byte {
return structPointer_ifield(p, f).(*[][]byte)
}
// Bool returns the address of a *bool field in the struct.
func structPointer_Bool(p structPointer, f field) **bool {
return structPointer_ifield(p, f).(**bool)
}
// BoolVal returns the address of a bool field in the struct.
func structPointer_BoolVal(p structPointer, f field) *bool {
return structPointer_ifield(p, f).(*bool)
}
// BoolSlice returns the address of a []bool field in the struct.
func structPointer_BoolSlice(p structPointer, f field) *[]bool {
return structPointer_ifield(p, f).(*[]bool)
}
// String returns the address of a *string field in the struct.
func structPointer_String(p structPointer, f field) **string {
return structPointer_ifield(p, f).(**string)
}
// StringVal returns the address of a string field in the struct.
func structPointer_StringVal(p structPointer, f field) *string {
return structPointer_ifield(p, f).(*string)
}
// StringSlice returns the address of a []string field in the struct.
func structPointer_StringSlice(p structPointer, f field) *[]string {
return structPointer_ifield(p, f).(*[]string)
}
// ExtMap returns the address of an extension map field in the struct.
func structPointer_ExtMap(p structPointer, f field) *map[int32]Extension {
return structPointer_ifield(p, f).(*map[int32]Extension)
}
// NewAt returns the reflect.Value for a pointer to a field in the struct.
func structPointer_NewAt(p structPointer, f field, typ reflect.Type) reflect.Value {
return structPointer_field(p, f).Addr()
}
// SetStructPointer writes a *struct field in the struct.
func structPointer_SetStructPointer(p structPointer, f field, q structPointer) {
structPointer_field(p, f).Set(q.v)
}
// GetStructPointer reads a *struct field in the struct.
func structPointer_GetStructPointer(p structPointer, f field) structPointer {
return structPointer{structPointer_field(p, f)}
}
// StructPointerSlice the address of a []*struct field in the struct.
func structPointer_StructPointerSlice(p structPointer, f field) structPointerSlice {
return structPointerSlice{structPointer_field(p, f)}
}
// A structPointerSlice represents the address of a slice of pointers to structs
// (themselves messages or groups). That is, v.Type() is *[]*struct{...}.
type structPointerSlice struct {
v reflect.Value
}
func (p structPointerSlice) Len() int { return p.v.Len() }
func (p structPointerSlice) Index(i int) structPointer { return structPointer{p.v.Index(i)} }
func (p structPointerSlice) Append(q structPointer) {
p.v.Set(reflect.Append(p.v, q.v))
}
var (
int32Type = reflect.TypeOf(int32(0))
uint32Type = reflect.TypeOf(uint32(0))
float32Type = reflect.TypeOf(float32(0))
int64Type = reflect.TypeOf(int64(0))
uint64Type = reflect.TypeOf(uint64(0))
float64Type = reflect.TypeOf(float64(0))
)
// A word32 represents a field of type *int32, *uint32, *float32, or *enum.
// That is, v.Type() is *int32, *uint32, *float32, or *enum and v is assignable.
type word32 struct {
v reflect.Value
}
// IsNil reports whether p is nil.
func word32_IsNil(p word32) bool {
return p.v.IsNil()
}
// Set sets p to point at a newly allocated word with bits set to x.
func word32_Set(p word32, o *Buffer, x uint32) {
t := p.v.Type().Elem()
switch t {
case int32Type:
if len(o.int32s) == 0 {
o.int32s = make([]int32, uint32PoolSize)
}
o.int32s[0] = int32(x)
p.v.Set(reflect.ValueOf(&o.int32s[0]))
o.int32s = o.int32s[1:]
return
case uint32Type:
if len(o.uint32s) == 0 {
o.uint32s = make([]uint32, uint32PoolSize)
}
o.uint32s[0] = x
p.v.Set(reflect.ValueOf(&o.uint32s[0]))
o.uint32s = o.uint32s[1:]
return
case float32Type:
if len(o.float32s) == 0 {
o.float32s = make([]float32, uint32PoolSize)
}
o.float32s[0] = math.Float32frombits(x)
p.v.Set(reflect.ValueOf(&o.float32s[0]))
o.float32s = o.float32s[1:]
return
}
// must be enum
p.v.Set(reflect.New(t))
p.v.Elem().SetInt(int64(int32(x)))
}
// Get gets the bits pointed at by p, as a uint32.
func word32_Get(p word32) uint32 {
elem := p.v.Elem()
switch elem.Kind() {
case reflect.Int32:
return uint32(elem.Int())
case reflect.Uint32:
return uint32(elem.Uint())
case reflect.Float32:
return math.Float32bits(float32(elem.Float()))
}
panic("unreachable")
}
// Word32 returns a reference to a *int32, *uint32, *float32, or *enum field in the struct.
func structPointer_Word32(p structPointer, f field) word32 {
return word32{structPointer_field(p, f)}
}
// A word32Val represents a field of type int32, uint32, float32, or enum.
// That is, v.Type() is int32, uint32, float32, or enum and v is assignable.
type word32Val struct {
v reflect.Value
}
// Set sets *p to x.
func word32Val_Set(p word32Val, x uint32) {
switch p.v.Type() {
case int32Type:
p.v.SetInt(int64(x))
return
case uint32Type:
p.v.SetUint(uint64(x))
return
case float32Type:
p.v.SetFloat(float64(math.Float32frombits(x)))
return
}
// must be enum
p.v.SetInt(int64(int32(x)))
}
// Get gets the bits pointed at by p, as a uint32.
func word32Val_Get(p word32Val) uint32 {
elem := p.v
switch elem.Kind() {
case reflect.Int32:
return uint32(elem.Int())
case reflect.Uint32:
return uint32(elem.Uint())
case reflect.Float32:
return math.Float32bits(float32(elem.Float()))
}
panic("unreachable")
}
// Word32Val returns a reference to a int32, uint32, float32, or enum field in the struct.
func structPointer_Word32Val(p structPointer, f field) word32Val {
return word32Val{structPointer_field(p, f)}
}
// A word32Slice is a slice of 32-bit values.
// That is, v.Type() is []int32, []uint32, []float32, or []enum.
type word32Slice struct {
v reflect.Value
}
func (p word32Slice) Append(x uint32) {
n, m := p.v.Len(), p.v.Cap()
if n < m {
p.v.SetLen(n + 1)
} else {
t := p.v.Type().Elem()
p.v.Set(reflect.Append(p.v, reflect.Zero(t)))
}
elem := p.v.Index(n)
switch elem.Kind() {
case reflect.Int32:
elem.SetInt(int64(int32(x)))
case reflect.Uint32:
elem.SetUint(uint64(x))
case reflect.Float32:
elem.SetFloat(float64(math.Float32frombits(x)))
}
}
func (p word32Slice) Len() int {
return p.v.Len()
}
func (p word32Slice) Index(i int) uint32 {
elem := p.v.Index(i)
switch elem.Kind() {
case reflect.Int32:
return uint32(elem.Int())
case reflect.Uint32:
return uint32(elem.Uint())
case reflect.Float32:
return math.Float32bits(float32(elem.Float()))
}
panic("unreachable")
}
// Word32Slice returns a reference to a []int32, []uint32, []float32, or []enum field in the struct.
func structPointer_Word32Slice(p structPointer, f field) word32Slice {
return word32Slice{structPointer_field(p, f)}
}
// word64 is like word32 but for 64-bit values.
type word64 struct {
v reflect.Value
}
func word64_Set(p word64, o *Buffer, x uint64) {
t := p.v.Type().Elem()
switch t {
case int64Type:
if len(o.int64s) == 0 {
o.int64s = make([]int64, uint64PoolSize)
}
o.int64s[0] = int64(x)
p.v.Set(reflect.ValueOf(&o.int64s[0]))
o.int64s = o.int64s[1:]
return
case uint64Type:
if len(o.uint64s) == 0 {
o.uint64s = make([]uint64, uint64PoolSize)
}
o.uint64s[0] = x
p.v.Set(reflect.ValueOf(&o.uint64s[0]))
o.uint64s = o.uint64s[1:]
return
case float64Type:
if len(o.float64s) == 0 {
o.float64s = make([]float64, uint64PoolSize)
}
o.float64s[0] = math.Float64frombits(x)
p.v.Set(reflect.ValueOf(&o.float64s[0]))
o.float64s = o.float64s[1:]
return
}
panic("unreachable")
}
func word64_IsNil(p word64) bool {
return p.v.IsNil()
}
func word64_Get(p word64) uint64 {
elem := p.v.Elem()
switch elem.Kind() {
case reflect.Int64:
return uint64(elem.Int())
case reflect.Uint64:
return elem.Uint()
case reflect.Float64:
return math.Float64bits(elem.Float())
}
panic("unreachable")
}
func structPointer_Word64(p structPointer, f field) word64 {
return word64{structPointer_field(p, f)}
}
// word64Val is like word32Val but for 64-bit values.
type word64Val struct {
v reflect.Value
}
func word64Val_Set(p word64Val, o *Buffer, x uint64) {
switch p.v.Type() {
case int64Type:
p.v.SetInt(int64(x))
return
case uint64Type:
p.v.SetUint(x)
return
case float64Type:
p.v.SetFloat(math.Float64frombits(x))
return
}
panic("unreachable")
}
func word64Val_Get(p word64Val) uint64 {
elem := p.v
switch elem.Kind() {
case reflect.Int64:
return uint64(elem.Int())
case reflect.Uint64:
return elem.Uint()
case reflect.Float64:
return math.Float64bits(elem.Float())
}
panic("unreachable")
}
func structPointer_Word64Val(p structPointer, f field) word64Val {
return word64Val{structPointer_field(p, f)}
}
type word64Slice struct {
v reflect.Value
}
func (p word64Slice) Append(x uint64) {
n, m := p.v.Len(), p.v.Cap()
if n < m {
p.v.SetLen(n + 1)
} else {
t := p.v.Type().Elem()
p.v.Set(reflect.Append(p.v, reflect.Zero(t)))
}
elem := p.v.Index(n)
switch elem.Kind() {
case reflect.Int64:
elem.SetInt(int64(int64(x)))
case reflect.Uint64:
elem.SetUint(uint64(x))
case reflect.Float64:
elem.SetFloat(float64(math.Float64frombits(x)))
}
}
func (p word64Slice) Len() int {
return p.v.Len()
}
func (p word64Slice) Index(i int) uint64 {
elem := p.v.Index(i)
switch elem.Kind() {
case reflect.Int64:
return uint64(elem.Int())
case reflect.Uint64:
return uint64(elem.Uint())
case reflect.Float64:
return math.Float64bits(float64(elem.Float()))
}
panic("unreachable")
}
func structPointer_Word64Slice(p structPointer, f field) word64Slice {
return word64Slice{structPointer_field(p, f)}
}

View File

@ -1,266 +0,0 @@
// Go support for Protocol Buffers - Google's data interchange format
//
// Copyright 2012 The Go Authors. All rights reserved.
// https://github.com/golang/protobuf
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// +build !appengine,!js
// This file contains the implementation of the proto field accesses using package unsafe.
package proto
import (
"reflect"
"unsafe"
)
// NOTE: These type_Foo functions would more idiomatically be methods,
// but Go does not allow methods on pointer types, and we must preserve
// some pointer type for the garbage collector. We use these
// funcs with clunky names as our poor approximation to methods.
//
// An alternative would be
// type structPointer struct { p unsafe.Pointer }
// but that does not registerize as well.
// A structPointer is a pointer to a struct.
type structPointer unsafe.Pointer
// toStructPointer returns a structPointer equivalent to the given reflect value.
func toStructPointer(v reflect.Value) structPointer {
return structPointer(unsafe.Pointer(v.Pointer()))
}
// IsNil reports whether p is nil.
func structPointer_IsNil(p structPointer) bool {
return p == nil
}
// Interface returns the struct pointer, assumed to have element type t,
// as an interface value.
func structPointer_Interface(p structPointer, t reflect.Type) interface{} {
return reflect.NewAt(t, unsafe.Pointer(p)).Interface()
}
// A field identifies a field in a struct, accessible from a structPointer.
// In this implementation, a field is identified by its byte offset from the start of the struct.
type field uintptr
// toField returns a field equivalent to the given reflect field.
func toField(f *reflect.StructField) field {
return field(f.Offset)
}
// invalidField is an invalid field identifier.
const invalidField = ^field(0)
// IsValid reports whether the field identifier is valid.
func (f field) IsValid() bool {
return f != ^field(0)
}
// Bytes returns the address of a []byte field in the struct.
func structPointer_Bytes(p structPointer, f field) *[]byte {
return (*[]byte)(unsafe.Pointer(uintptr(p) + uintptr(f)))
}
// BytesSlice returns the address of a [][]byte field in the struct.
func structPointer_BytesSlice(p structPointer, f field) *[][]byte {
return (*[][]byte)(unsafe.Pointer(uintptr(p) + uintptr(f)))
}
// Bool returns the address of a *bool field in the struct.
func structPointer_Bool(p structPointer, f field) **bool {
return (**bool)(unsafe.Pointer(uintptr(p) + uintptr(f)))
}
// BoolVal returns the address of a bool field in the struct.
func structPointer_BoolVal(p structPointer, f field) *bool {
return (*bool)(unsafe.Pointer(uintptr(p) + uintptr(f)))
}
// BoolSlice returns the address of a []bool field in the struct.
func structPointer_BoolSlice(p structPointer, f field) *[]bool {
return (*[]bool)(unsafe.Pointer(uintptr(p) + uintptr(f)))
}
// String returns the address of a *string field in the struct.
func structPointer_String(p structPointer, f field) **string {
return (**string)(unsafe.Pointer(uintptr(p) + uintptr(f)))
}
// StringVal returns the address of a string field in the struct.
func structPointer_StringVal(p structPointer, f field) *string {
return (*string)(unsafe.Pointer(uintptr(p) + uintptr(f)))
}
// StringSlice returns the address of a []string field in the struct.
func structPointer_StringSlice(p structPointer, f field) *[]string {
return (*[]string)(unsafe.Pointer(uintptr(p) + uintptr(f)))
}
// ExtMap returns the address of an extension map field in the struct.
func structPointer_ExtMap(p structPointer, f field) *map[int32]Extension {
return (*map[int32]Extension)(unsafe.Pointer(uintptr(p) + uintptr(f)))
}
// NewAt returns the reflect.Value for a pointer to a field in the struct.
func structPointer_NewAt(p structPointer, f field, typ reflect.Type) reflect.Value {
return reflect.NewAt(typ, unsafe.Pointer(uintptr(p)+uintptr(f)))
}
// SetStructPointer writes a *struct field in the struct.
func structPointer_SetStructPointer(p structPointer, f field, q structPointer) {
*(*structPointer)(unsafe.Pointer(uintptr(p) + uintptr(f))) = q
}
// GetStructPointer reads a *struct field in the struct.
func structPointer_GetStructPointer(p structPointer, f field) structPointer {
return *(*structPointer)(unsafe.Pointer(uintptr(p) + uintptr(f)))
}
// StructPointerSlice the address of a []*struct field in the struct.
func structPointer_StructPointerSlice(p structPointer, f field) *structPointerSlice {
return (*structPointerSlice)(unsafe.Pointer(uintptr(p) + uintptr(f)))
}
// A structPointerSlice represents a slice of pointers to structs (themselves submessages or groups).
type structPointerSlice []structPointer
func (v *structPointerSlice) Len() int { return len(*v) }
func (v *structPointerSlice) Index(i int) structPointer { return (*v)[i] }
func (v *structPointerSlice) Append(p structPointer) { *v = append(*v, p) }
// A word32 is the address of a "pointer to 32-bit value" field.
type word32 **uint32
// IsNil reports whether *v is nil.
func word32_IsNil(p word32) bool {
return *p == nil
}
// Set sets *v to point at a newly allocated word set to x.
func word32_Set(p word32, o *Buffer, x uint32) {
if len(o.uint32s) == 0 {
o.uint32s = make([]uint32, uint32PoolSize)
}
o.uint32s[0] = x
*p = &o.uint32s[0]
o.uint32s = o.uint32s[1:]
}
// Get gets the value pointed at by *v.
func word32_Get(p word32) uint32 {
return **p
}
// Word32 returns the address of a *int32, *uint32, *float32, or *enum field in the struct.
func structPointer_Word32(p structPointer, f field) word32 {
return word32((**uint32)(unsafe.Pointer(uintptr(p) + uintptr(f))))
}
// A word32Val is the address of a 32-bit value field.
type word32Val *uint32
// Set sets *p to x.
func word32Val_Set(p word32Val, x uint32) {
*p = x
}
// Get gets the value pointed at by p.
func word32Val_Get(p word32Val) uint32 {
return *p
}
// Word32Val returns the address of a *int32, *uint32, *float32, or *enum field in the struct.
func structPointer_Word32Val(p structPointer, f field) word32Val {
return word32Val((*uint32)(unsafe.Pointer(uintptr(p) + uintptr(f))))
}
// A word32Slice is a slice of 32-bit values.
type word32Slice []uint32
func (v *word32Slice) Append(x uint32) { *v = append(*v, x) }
func (v *word32Slice) Len() int { return len(*v) }
func (v *word32Slice) Index(i int) uint32 { return (*v)[i] }
// Word32Slice returns the address of a []int32, []uint32, []float32, or []enum field in the struct.
func structPointer_Word32Slice(p structPointer, f field) *word32Slice {
return (*word32Slice)(unsafe.Pointer(uintptr(p) + uintptr(f)))
}
// word64 is like word32 but for 64-bit values.
type word64 **uint64
func word64_Set(p word64, o *Buffer, x uint64) {
if len(o.uint64s) == 0 {
o.uint64s = make([]uint64, uint64PoolSize)
}
o.uint64s[0] = x
*p = &o.uint64s[0]
o.uint64s = o.uint64s[1:]
}
func word64_IsNil(p word64) bool {
return *p == nil
}
func word64_Get(p word64) uint64 {
return **p
}
func structPointer_Word64(p structPointer, f field) word64 {
return word64((**uint64)(unsafe.Pointer(uintptr(p) + uintptr(f))))
}
// word64Val is like word32Val but for 64-bit values.
type word64Val *uint64
func word64Val_Set(p word64Val, o *Buffer, x uint64) {
*p = x
}
func word64Val_Get(p word64Val) uint64 {
return *p
}
func structPointer_Word64Val(p structPointer, f field) word64Val {
return word64Val((*uint64)(unsafe.Pointer(uintptr(p) + uintptr(f))))
}
// word64Slice is like word32Slice but for 64-bit values.
type word64Slice []uint64
func (v *word64Slice) Append(x uint64) { *v = append(*v, x) }
func (v *word64Slice) Len() int { return len(*v) }
func (v *word64Slice) Index(i int) uint64 { return (*v)[i] }
func structPointer_Word64Slice(p structPointer, f field) *word64Slice {
return (*word64Slice)(unsafe.Pointer(uintptr(p) + uintptr(f)))
}

View File

@ -1,850 +0,0 @@
// Go support for Protocol Buffers - Google's data interchange format
//
// Copyright 2010 The Go Authors. All rights reserved.
// https://github.com/golang/protobuf
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package proto
/*
* Routines for encoding data into the wire format for protocol buffers.
*/
import (
"fmt"
"log"
"os"
"reflect"
"sort"
"strconv"
"strings"
"sync"
)
const debug bool = false
// Constants that identify the encoding of a value on the wire.
const (
WireVarint = 0
WireFixed64 = 1
WireBytes = 2
WireStartGroup = 3
WireEndGroup = 4
WireFixed32 = 5
)
const startSize = 10 // initial slice/string sizes
// Encoders are defined in encode.go
// An encoder outputs the full representation of a field, including its
// tag and encoder type.
type encoder func(p *Buffer, prop *Properties, base structPointer) error
// A valueEncoder encodes a single integer in a particular encoding.
type valueEncoder func(o *Buffer, x uint64) error
// Sizers are defined in encode.go
// A sizer returns the encoded size of a field, including its tag and encoder
// type.
type sizer func(prop *Properties, base structPointer) int
// A valueSizer returns the encoded size of a single integer in a particular
// encoding.
type valueSizer func(x uint64) int
// Decoders are defined in decode.go
// A decoder creates a value from its wire representation.
// Unrecognized subelements are saved in unrec.
type decoder func(p *Buffer, prop *Properties, base structPointer) error
// A valueDecoder decodes a single integer in a particular encoding.
type valueDecoder func(o *Buffer) (x uint64, err error)
// A oneofMarshaler does the marshaling for all oneof fields in a message.
type oneofMarshaler func(Message, *Buffer) error
// A oneofUnmarshaler does the unmarshaling for a oneof field in a message.
type oneofUnmarshaler func(Message, int, int, *Buffer) (bool, error)
// A oneofSizer does the sizing for all oneof fields in a message.
type oneofSizer func(Message) int
// tagMap is an optimization over map[int]int for typical protocol buffer
// use-cases. Encoded protocol buffers are often in tag order with small tag
// numbers.
type tagMap struct {
fastTags []int
slowTags map[int]int
}
// tagMapFastLimit is the upper bound on the tag number that will be stored in
// the tagMap slice rather than its map.
const tagMapFastLimit = 1024
func (p *tagMap) get(t int) (int, bool) {
if t > 0 && t < tagMapFastLimit {
if t >= len(p.fastTags) {
return 0, false
}
fi := p.fastTags[t]
return fi, fi >= 0
}
fi, ok := p.slowTags[t]
return fi, ok
}
func (p *tagMap) put(t int, fi int) {
if t > 0 && t < tagMapFastLimit {
for len(p.fastTags) < t+1 {
p.fastTags = append(p.fastTags, -1)
}
p.fastTags[t] = fi
return
}
if p.slowTags == nil {
p.slowTags = make(map[int]int)
}
p.slowTags[t] = fi
}
// StructProperties represents properties for all the fields of a struct.
// decoderTags and decoderOrigNames should only be used by the decoder.
type StructProperties struct {
Prop []*Properties // properties for each field
reqCount int // required count
decoderTags tagMap // map from proto tag to struct field number
decoderOrigNames map[string]int // map from original name to struct field number
order []int // list of struct field numbers in tag order
unrecField field // field id of the XXX_unrecognized []byte field
extendable bool // is this an extendable proto
oneofMarshaler oneofMarshaler
oneofUnmarshaler oneofUnmarshaler
oneofSizer oneofSizer
stype reflect.Type
// OneofTypes contains information about the oneof fields in this message.
// It is keyed by the original name of a field.
OneofTypes map[string]*OneofProperties
}
// OneofProperties represents information about a specific field in a oneof.
type OneofProperties struct {
Type reflect.Type // pointer to generated struct type for this oneof field
Field int // struct field number of the containing oneof in the message
Prop *Properties
}
// Implement the sorting interface so we can sort the fields in tag order, as recommended by the spec.
// See encode.go, (*Buffer).enc_struct.
func (sp *StructProperties) Len() int { return len(sp.order) }
func (sp *StructProperties) Less(i, j int) bool {
return sp.Prop[sp.order[i]].Tag < sp.Prop[sp.order[j]].Tag
}
func (sp *StructProperties) Swap(i, j int) { sp.order[i], sp.order[j] = sp.order[j], sp.order[i] }
// Properties represents the protocol-specific behavior of a single struct field.
type Properties struct {
Name string // name of the field, for error messages
OrigName string // original name before protocol compiler (always set)
JSONName string // name to use for JSON; determined by protoc
Wire string
WireType int
Tag int
Required bool
Optional bool
Repeated bool
Packed bool // relevant for repeated primitives only
Enum string // set for enum types only
proto3 bool // whether this is known to be a proto3 field; set for []byte only
oneof bool // whether this is a oneof field
Default string // default value
HasDefault bool // whether an explicit default was provided
def_uint64 uint64
enc encoder
valEnc valueEncoder // set for bool and numeric types only
field field
tagcode []byte // encoding of EncodeVarint((Tag<<3)|WireType)
tagbuf [8]byte
stype reflect.Type // set for struct types only
sprop *StructProperties // set for struct types only
isMarshaler bool
isUnmarshaler bool
mtype reflect.Type // set for map types only
mkeyprop *Properties // set for map types only
mvalprop *Properties // set for map types only
size sizer
valSize valueSizer // set for bool and numeric types only
dec decoder
valDec valueDecoder // set for bool and numeric types only
// If this is a packable field, this will be the decoder for the packed version of the field.
packedDec decoder
}
// String formats the properties in the protobuf struct field tag style.
func (p *Properties) String() string {
s := p.Wire
s = ","
s += strconv.Itoa(p.Tag)
if p.Required {
s += ",req"
}
if p.Optional {
s += ",opt"
}
if p.Repeated {
s += ",rep"
}
if p.Packed {
s += ",packed"
}
s += ",name=" + p.OrigName
if p.JSONName != p.OrigName {
s += ",json=" + p.JSONName
}
if p.proto3 {
s += ",proto3"
}
if p.oneof {
s += ",oneof"
}
if len(p.Enum) > 0 {
s += ",enum=" + p.Enum
}
if p.HasDefault {
s += ",def=" + p.Default
}
return s
}
// Parse populates p by parsing a string in the protobuf struct field tag style.
func (p *Properties) Parse(s string) {
// "bytes,49,opt,name=foo,def=hello!"
fields := strings.Split(s, ",") // breaks def=, but handled below.
if len(fields) < 2 {
fmt.Fprintf(os.Stderr, "proto: tag has too few fields: %q\n", s)
return
}
p.Wire = fields[0]
switch p.Wire {
case "varint":
p.WireType = WireVarint
p.valEnc = (*Buffer).EncodeVarint
p.valDec = (*Buffer).DecodeVarint
p.valSize = sizeVarint
case "fixed32":
p.WireType = WireFixed32
p.valEnc = (*Buffer).EncodeFixed32
p.valDec = (*Buffer).DecodeFixed32
p.valSize = sizeFixed32
case "fixed64":
p.WireType = WireFixed64
p.valEnc = (*Buffer).EncodeFixed64
p.valDec = (*Buffer).DecodeFixed64
p.valSize = sizeFixed64
case "zigzag32":
p.WireType = WireVarint
p.valEnc = (*Buffer).EncodeZigzag32
p.valDec = (*Buffer).DecodeZigzag32
p.valSize = sizeZigzag32
case "zigzag64":
p.WireType = WireVarint
p.valEnc = (*Buffer).EncodeZigzag64
p.valDec = (*Buffer).DecodeZigzag64
p.valSize = sizeZigzag64
case "bytes", "group":
p.WireType = WireBytes
// no numeric converter for non-numeric types
default:
fmt.Fprintf(os.Stderr, "proto: tag has unknown wire type: %q\n", s)
return
}
var err error
p.Tag, err = strconv.Atoi(fields[1])
if err != nil {
return
}
for i := 2; i < len(fields); i++ {
f := fields[i]
switch {
case f == "req":
p.Required = true
case f == "opt":
p.Optional = true
case f == "rep":
p.Repeated = true
case f == "packed":
p.Packed = true
case strings.HasPrefix(f, "name="):
p.OrigName = f[5:]
case strings.HasPrefix(f, "json="):
p.JSONName = f[5:]
case strings.HasPrefix(f, "enum="):
p.Enum = f[5:]
case f == "proto3":
p.proto3 = true
case f == "oneof":
p.oneof = true
case strings.HasPrefix(f, "def="):
p.HasDefault = true
p.Default = f[4:] // rest of string
if i+1 < len(fields) {
// Commas aren't escaped, and def is always last.
p.Default += "," + strings.Join(fields[i+1:], ",")
break
}
}
}
}
func logNoSliceEnc(t1, t2 reflect.Type) {
fmt.Fprintf(os.Stderr, "proto: no slice oenc for %T = []%T\n", t1, t2)
}
var protoMessageType = reflect.TypeOf((*Message)(nil)).Elem()
// Initialize the fields for encoding and decoding.
func (p *Properties) setEncAndDec(typ reflect.Type, f *reflect.StructField, lockGetProp bool) {
p.enc = nil
p.dec = nil
p.size = nil
switch t1 := typ; t1.Kind() {
default:
fmt.Fprintf(os.Stderr, "proto: no coders for %v\n", t1)
// proto3 scalar types
case reflect.Bool:
p.enc = (*Buffer).enc_proto3_bool
p.dec = (*Buffer).dec_proto3_bool
p.size = size_proto3_bool
case reflect.Int32:
p.enc = (*Buffer).enc_proto3_int32
p.dec = (*Buffer).dec_proto3_int32
p.size = size_proto3_int32
case reflect.Uint32:
p.enc = (*Buffer).enc_proto3_uint32
p.dec = (*Buffer).dec_proto3_int32 // can reuse
p.size = size_proto3_uint32
case reflect.Int64, reflect.Uint64:
p.enc = (*Buffer).enc_proto3_int64
p.dec = (*Buffer).dec_proto3_int64
p.size = size_proto3_int64
case reflect.Float32:
p.enc = (*Buffer).enc_proto3_uint32 // can just treat them as bits
p.dec = (*Buffer).dec_proto3_int32
p.size = size_proto3_uint32
case reflect.Float64:
p.enc = (*Buffer).enc_proto3_int64 // can just treat them as bits
p.dec = (*Buffer).dec_proto3_int64
p.size = size_proto3_int64
case reflect.String:
p.enc = (*Buffer).enc_proto3_string
p.dec = (*Buffer).dec_proto3_string
p.size = size_proto3_string
case reflect.Ptr:
switch t2 := t1.Elem(); t2.Kind() {
default:
fmt.Fprintf(os.Stderr, "proto: no encoder function for %v -> %v\n", t1, t2)
break
case reflect.Bool:
p.enc = (*Buffer).enc_bool
p.dec = (*Buffer).dec_bool
p.size = size_bool
case reflect.Int32:
p.enc = (*Buffer).enc_int32
p.dec = (*Buffer).dec_int32
p.size = size_int32
case reflect.Uint32:
p.enc = (*Buffer).enc_uint32
p.dec = (*Buffer).dec_int32 // can reuse
p.size = size_uint32
case reflect.Int64, reflect.Uint64:
p.enc = (*Buffer).enc_int64
p.dec = (*Buffer).dec_int64
p.size = size_int64
case reflect.Float32:
p.enc = (*Buffer).enc_uint32 // can just treat them as bits
p.dec = (*Buffer).dec_int32
p.size = size_uint32
case reflect.Float64:
p.enc = (*Buffer).enc_int64 // can just treat them as bits
p.dec = (*Buffer).dec_int64
p.size = size_int64
case reflect.String:
p.enc = (*Buffer).enc_string
p.dec = (*Buffer).dec_string
p.size = size_string
case reflect.Struct:
p.stype = t1.Elem()
p.isMarshaler = isMarshaler(t1)
p.isUnmarshaler = isUnmarshaler(t1)
if p.Wire == "bytes" {
p.enc = (*Buffer).enc_struct_message
p.dec = (*Buffer).dec_struct_message
p.size = size_struct_message
} else {
p.enc = (*Buffer).enc_struct_group
p.dec = (*Buffer).dec_struct_group
p.size = size_struct_group
}
}
case reflect.Slice:
switch t2 := t1.Elem(); t2.Kind() {
default:
logNoSliceEnc(t1, t2)
break
case reflect.Bool:
if p.Packed {
p.enc = (*Buffer).enc_slice_packed_bool
p.size = size_slice_packed_bool
} else {
p.enc = (*Buffer).enc_slice_bool
p.size = size_slice_bool
}
p.dec = (*Buffer).dec_slice_bool
p.packedDec = (*Buffer).dec_slice_packed_bool
case reflect.Int32:
if p.Packed {
p.enc = (*Buffer).enc_slice_packed_int32
p.size = size_slice_packed_int32
} else {
p.enc = (*Buffer).enc_slice_int32
p.size = size_slice_int32
}
p.dec = (*Buffer).dec_slice_int32
p.packedDec = (*Buffer).dec_slice_packed_int32
case reflect.Uint32:
if p.Packed {
p.enc = (*Buffer).enc_slice_packed_uint32
p.size = size_slice_packed_uint32
} else {
p.enc = (*Buffer).enc_slice_uint32
p.size = size_slice_uint32
}
p.dec = (*Buffer).dec_slice_int32
p.packedDec = (*Buffer).dec_slice_packed_int32
case reflect.Int64, reflect.Uint64:
if p.Packed {
p.enc = (*Buffer).enc_slice_packed_int64
p.size = size_slice_packed_int64
} else {
p.enc = (*Buffer).enc_slice_int64
p.size = size_slice_int64
}
p.dec = (*Buffer).dec_slice_int64
p.packedDec = (*Buffer).dec_slice_packed_int64
case reflect.Uint8:
p.enc = (*Buffer).enc_slice_byte
p.dec = (*Buffer).dec_slice_byte
p.size = size_slice_byte
// This is a []byte, which is either a bytes field,
// or the value of a map field. In the latter case,
// we always encode an empty []byte, so we should not
// use the proto3 enc/size funcs.
// f == nil iff this is the key/value of a map field.
if p.proto3 && f != nil {
p.enc = (*Buffer).enc_proto3_slice_byte
p.size = size_proto3_slice_byte
}
case reflect.Float32, reflect.Float64:
switch t2.Bits() {
case 32:
// can just treat them as bits
if p.Packed {
p.enc = (*Buffer).enc_slice_packed_uint32
p.size = size_slice_packed_uint32
} else {
p.enc = (*Buffer).enc_slice_uint32
p.size = size_slice_uint32
}
p.dec = (*Buffer).dec_slice_int32
p.packedDec = (*Buffer).dec_slice_packed_int32
case 64:
// can just treat them as bits
if p.Packed {
p.enc = (*Buffer).enc_slice_packed_int64
p.size = size_slice_packed_int64
} else {
p.enc = (*Buffer).enc_slice_int64
p.size = size_slice_int64
}
p.dec = (*Buffer).dec_slice_int64
p.packedDec = (*Buffer).dec_slice_packed_int64
default:
logNoSliceEnc(t1, t2)
break
}
case reflect.String:
p.enc = (*Buffer).enc_slice_string
p.dec = (*Buffer).dec_slice_string
p.size = size_slice_string
case reflect.Ptr:
switch t3 := t2.Elem(); t3.Kind() {
default:
fmt.Fprintf(os.Stderr, "proto: no ptr oenc for %T -> %T -> %T\n", t1, t2, t3)
break
case reflect.Struct:
p.stype = t2.Elem()
p.isMarshaler = isMarshaler(t2)
p.isUnmarshaler = isUnmarshaler(t2)
if p.Wire == "bytes" {
p.enc = (*Buffer).enc_slice_struct_message
p.dec = (*Buffer).dec_slice_struct_message
p.size = size_slice_struct_message
} else {
p.enc = (*Buffer).enc_slice_struct_group
p.dec = (*Buffer).dec_slice_struct_group
p.size = size_slice_struct_group
}
}
case reflect.Slice:
switch t2.Elem().Kind() {
default:
fmt.Fprintf(os.Stderr, "proto: no slice elem oenc for %T -> %T -> %T\n", t1, t2, t2.Elem())
break
case reflect.Uint8:
p.enc = (*Buffer).enc_slice_slice_byte
p.dec = (*Buffer).dec_slice_slice_byte
p.size = size_slice_slice_byte
}
}
case reflect.Map:
p.enc = (*Buffer).enc_new_map
p.dec = (*Buffer).dec_new_map
p.size = size_new_map
p.mtype = t1
p.mkeyprop = &Properties{}
p.mkeyprop.init(reflect.PtrTo(p.mtype.Key()), "Key", f.Tag.Get("protobuf_key"), nil, lockGetProp)
p.mvalprop = &Properties{}
vtype := p.mtype.Elem()
if vtype.Kind() != reflect.Ptr && vtype.Kind() != reflect.Slice {
// The value type is not a message (*T) or bytes ([]byte),
// so we need encoders for the pointer to this type.
vtype = reflect.PtrTo(vtype)
}
p.mvalprop.init(vtype, "Value", f.Tag.Get("protobuf_val"), nil, lockGetProp)
}
// precalculate tag code
wire := p.WireType
if p.Packed {
wire = WireBytes
}
x := uint32(p.Tag)<<3 | uint32(wire)
i := 0
for i = 0; x > 127; i++ {
p.tagbuf[i] = 0x80 | uint8(x&0x7F)
x >>= 7
}
p.tagbuf[i] = uint8(x)
p.tagcode = p.tagbuf[0 : i+1]
if p.stype != nil {
if lockGetProp {
p.sprop = GetProperties(p.stype)
} else {
p.sprop = getPropertiesLocked(p.stype)
}
}
}
var (
marshalerType = reflect.TypeOf((*Marshaler)(nil)).Elem()
unmarshalerType = reflect.TypeOf((*Unmarshaler)(nil)).Elem()
)
// isMarshaler reports whether type t implements Marshaler.
func isMarshaler(t reflect.Type) bool {
// We're checking for (likely) pointer-receiver methods
// so if t is not a pointer, something is very wrong.
// The calls above only invoke isMarshaler on pointer types.
if t.Kind() != reflect.Ptr {
panic("proto: misuse of isMarshaler")
}
return t.Implements(marshalerType)
}
// isUnmarshaler reports whether type t implements Unmarshaler.
func isUnmarshaler(t reflect.Type) bool {
// We're checking for (likely) pointer-receiver methods
// so if t is not a pointer, something is very wrong.
// The calls above only invoke isUnmarshaler on pointer types.
if t.Kind() != reflect.Ptr {
panic("proto: misuse of isUnmarshaler")
}
return t.Implements(unmarshalerType)
}
// Init populates the properties from a protocol buffer struct tag.
func (p *Properties) Init(typ reflect.Type, name, tag string, f *reflect.StructField) {
p.init(typ, name, tag, f, true)
}
func (p *Properties) init(typ reflect.Type, name, tag string, f *reflect.StructField, lockGetProp bool) {
// "bytes,49,opt,def=hello!"
p.Name = name
p.OrigName = name
if f != nil {
p.field = toField(f)
}
if tag == "" {
return
}
p.Parse(tag)
p.setEncAndDec(typ, f, lockGetProp)
}
var (
propertiesMu sync.RWMutex
propertiesMap = make(map[reflect.Type]*StructProperties)
)
// GetProperties returns the list of properties for the type represented by t.
// t must represent a generated struct type of a protocol message.
func GetProperties(t reflect.Type) *StructProperties {
if t.Kind() != reflect.Struct {
panic("proto: type must have kind struct")
}
// Most calls to GetProperties in a long-running program will be
// retrieving details for types we have seen before.
propertiesMu.RLock()
sprop, ok := propertiesMap[t]
propertiesMu.RUnlock()
if ok {
if collectStats {
stats.Chit++
}
return sprop
}
propertiesMu.Lock()
sprop = getPropertiesLocked(t)
propertiesMu.Unlock()
return sprop
}
// getPropertiesLocked requires that propertiesMu is held.
func getPropertiesLocked(t reflect.Type) *StructProperties {
if prop, ok := propertiesMap[t]; ok {
if collectStats {
stats.Chit++
}
return prop
}
if collectStats {
stats.Cmiss++
}
prop := new(StructProperties)
// in case of recursive protos, fill this in now.
propertiesMap[t] = prop
// build properties
prop.extendable = reflect.PtrTo(t).Implements(extendableProtoType)
prop.unrecField = invalidField
prop.Prop = make([]*Properties, t.NumField())
prop.order = make([]int, t.NumField())
for i := 0; i < t.NumField(); i++ {
f := t.Field(i)
p := new(Properties)
name := f.Name
p.init(f.Type, name, f.Tag.Get("protobuf"), &f, false)
if f.Name == "XXX_extensions" { // special case
p.enc = (*Buffer).enc_map
p.dec = nil // not needed
p.size = size_map
}
if f.Name == "XXX_unrecognized" { // special case
prop.unrecField = toField(&f)
}
oneof := f.Tag.Get("protobuf_oneof") // special case
if oneof != "" {
// Oneof fields don't use the traditional protobuf tag.
p.OrigName = oneof
}
prop.Prop[i] = p
prop.order[i] = i
if debug {
print(i, " ", f.Name, " ", t.String(), " ")
if p.Tag > 0 {
print(p.String())
}
print("\n")
}
if p.enc == nil && !strings.HasPrefix(f.Name, "XXX_") && oneof == "" {
fmt.Fprintln(os.Stderr, "proto: no encoder for", f.Name, f.Type.String(), "[GetProperties]")
}
}
// Re-order prop.order.
sort.Sort(prop)
type oneofMessage interface {
XXX_OneofFuncs() (func(Message, *Buffer) error, func(Message, int, int, *Buffer) (bool, error), func(Message) int, []interface{})
}
if om, ok := reflect.Zero(reflect.PtrTo(t)).Interface().(oneofMessage); ok {
var oots []interface{}
prop.oneofMarshaler, prop.oneofUnmarshaler, prop.oneofSizer, oots = om.XXX_OneofFuncs()
prop.stype = t
// Interpret oneof metadata.
prop.OneofTypes = make(map[string]*OneofProperties)
for _, oot := range oots {
oop := &OneofProperties{
Type: reflect.ValueOf(oot).Type(), // *T
Prop: new(Properties),
}
sft := oop.Type.Elem().Field(0)
oop.Prop.Name = sft.Name
oop.Prop.Parse(sft.Tag.Get("protobuf"))
// There will be exactly one interface field that
// this new value is assignable to.
for i := 0; i < t.NumField(); i++ {
f := t.Field(i)
if f.Type.Kind() != reflect.Interface {
continue
}
if !oop.Type.AssignableTo(f.Type) {
continue
}
oop.Field = i
break
}
prop.OneofTypes[oop.Prop.OrigName] = oop
}
}
// build required counts
// build tags
reqCount := 0
prop.decoderOrigNames = make(map[string]int)
for i, p := range prop.Prop {
if strings.HasPrefix(p.Name, "XXX_") {
// Internal fields should not appear in tags/origNames maps.
// They are handled specially when encoding and decoding.
continue
}
if p.Required {
reqCount++
}
prop.decoderTags.put(p.Tag, i)
prop.decoderOrigNames[p.OrigName] = i
}
prop.reqCount = reqCount
return prop
}
// Return the Properties object for the x[0]'th field of the structure.
func propByIndex(t reflect.Type, x []int) *Properties {
if len(x) != 1 {
fmt.Fprintf(os.Stderr, "proto: field index dimension %d (not 1) for type %s\n", len(x), t)
return nil
}
prop := GetProperties(t)
return prop.Prop[x[0]]
}
// Get the address and type of a pointer to a struct from an interface.
func getbase(pb Message) (t reflect.Type, b structPointer, err error) {
if pb == nil {
err = ErrNil
return
}
// get the reflect type of the pointer to the struct.
t = reflect.TypeOf(pb)
// get the address of the struct.
value := reflect.ValueOf(pb)
b = toStructPointer(value)
return
}
// A global registry of enum types.
// The generated code will register the generated maps by calling RegisterEnum.
var enumValueMaps = make(map[string]map[string]int32)
// RegisterEnum is called from the generated code to install the enum descriptor
// maps into the global table to aid parsing text format protocol buffers.
func RegisterEnum(typeName string, unusedNameMap map[int32]string, valueMap map[string]int32) {
if _, ok := enumValueMaps[typeName]; ok {
panic("proto: duplicate enum registered: " + typeName)
}
enumValueMaps[typeName] = valueMap
}
// EnumValueMap returns the mapping from names to integers of the
// enum type enumType, or a nil if not found.
func EnumValueMap(enumType string) map[string]int32 {
return enumValueMaps[enumType]
}
// A registry of all linked message types.
// The string is a fully-qualified proto name ("pkg.Message").
var (
protoTypes = make(map[string]reflect.Type)
revProtoTypes = make(map[reflect.Type]string)
)
// RegisterType is called from generated code and maps from the fully qualified
// proto name to the type (pointer to struct) of the protocol buffer.
func RegisterType(x Message, name string) {
if _, ok := protoTypes[name]; ok {
// TODO: Some day, make this a panic.
log.Printf("proto: duplicate proto type registered: %s", name)
return
}
t := reflect.TypeOf(x)
protoTypes[name] = t
revProtoTypes[t] = name
}
// MessageName returns the fully-qualified proto name for the given message type.
func MessageName(x Message) string { return revProtoTypes[reflect.TypeOf(x)] }
// MessageType returns the message type (pointer to struct) for a named message.
func MessageType(name string) reflect.Type { return protoTypes[name] }

View File

@ -1,849 +0,0 @@
// Go support for Protocol Buffers - Google's data interchange format
//
// Copyright 2010 The Go Authors. All rights reserved.
// https://github.com/golang/protobuf
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package proto
// Functions for writing the text protocol buffer format.
import (
"bufio"
"bytes"
"encoding"
"errors"
"fmt"
"io"
"log"
"math"
"reflect"
"sort"
"strings"
)
var (
newline = []byte("\n")
spaces = []byte(" ")
gtNewline = []byte(">\n")
endBraceNewline = []byte("}\n")
backslashN = []byte{'\\', 'n'}
backslashR = []byte{'\\', 'r'}
backslashT = []byte{'\\', 't'}
backslashDQ = []byte{'\\', '"'}
backslashBS = []byte{'\\', '\\'}
posInf = []byte("inf")
negInf = []byte("-inf")
nan = []byte("nan")
)
type writer interface {
io.Writer
WriteByte(byte) error
}
// textWriter is an io.Writer that tracks its indentation level.
type textWriter struct {
ind int
complete bool // if the current position is a complete line
compact bool // whether to write out as a one-liner
w writer
}
func (w *textWriter) WriteString(s string) (n int, err error) {
if !strings.Contains(s, "\n") {
if !w.compact && w.complete {
w.writeIndent()
}
w.complete = false
return io.WriteString(w.w, s)
}
// WriteString is typically called without newlines, so this
// codepath and its copy are rare. We copy to avoid
// duplicating all of Write's logic here.
return w.Write([]byte(s))
}
func (w *textWriter) Write(p []byte) (n int, err error) {
newlines := bytes.Count(p, newline)
if newlines == 0 {
if !w.compact && w.complete {
w.writeIndent()
}
n, err = w.w.Write(p)
w.complete = false
return n, err
}
frags := bytes.SplitN(p, newline, newlines+1)
if w.compact {
for i, frag := range frags {
if i > 0 {
if err := w.w.WriteByte(' '); err != nil {
return n, err
}
n++
}
nn, err := w.w.Write(frag)
n += nn
if err != nil {
return n, err
}
}
return n, nil
}
for i, frag := range frags {
if w.complete {
w.writeIndent()
}
nn, err := w.w.Write(frag)
n += nn
if err != nil {
return n, err
}
if i+1 < len(frags) {
if err := w.w.WriteByte('\n'); err != nil {
return n, err
}
n++
}
}
w.complete = len(frags[len(frags)-1]) == 0
return n, nil
}
func (w *textWriter) WriteByte(c byte) error {
if w.compact && c == '\n' {
c = ' '
}
if !w.compact && w.complete {
w.writeIndent()
}
err := w.w.WriteByte(c)
w.complete = c == '\n'
return err
}
func (w *textWriter) indent() { w.ind++ }
func (w *textWriter) unindent() {
if w.ind == 0 {
log.Printf("proto: textWriter unindented too far")
return
}
w.ind--
}
func writeName(w *textWriter, props *Properties) error {
if _, err := w.WriteString(props.OrigName); err != nil {
return err
}
if props.Wire != "group" {
return w.WriteByte(':')
}
return nil
}
// raw is the interface satisfied by RawMessage.
type raw interface {
Bytes() []byte
}
func requiresQuotes(u string) bool {
// When type URL contains any characters except [0-9A-Za-z./\-]*, it must be quoted.
for _, ch := range u {
switch {
case ch == '.' || ch == '/' || ch == '_':
continue
case '0' <= ch && ch <= '9':
continue
case 'A' <= ch && ch <= 'Z':
continue
case 'a' <= ch && ch <= 'z':
continue
default:
return true
}
}
return false
}
// isAny reports whether sv is a google.protobuf.Any message
func isAny(sv reflect.Value) bool {
type wkt interface {
XXX_WellKnownType() string
}
t, ok := sv.Addr().Interface().(wkt)
return ok && t.XXX_WellKnownType() == "Any"
}
// writeProto3Any writes an expanded google.protobuf.Any message.
//
// It returns (false, nil) if sv value can't be unmarshaled (e.g. because
// required messages are not linked in).
//
// It returns (true, error) when sv was written in expanded format or an error
// was encountered.
func (tm *TextMarshaler) writeProto3Any(w *textWriter, sv reflect.Value) (bool, error) {
turl := sv.FieldByName("TypeUrl")
val := sv.FieldByName("Value")
if !turl.IsValid() || !val.IsValid() {
return true, errors.New("proto: invalid google.protobuf.Any message")
}
b, ok := val.Interface().([]byte)
if !ok {
return true, errors.New("proto: invalid google.protobuf.Any message")
}
parts := strings.Split(turl.String(), "/")
mt := MessageType(parts[len(parts)-1])
if mt == nil {
return false, nil
}
m := reflect.New(mt.Elem())
if err := Unmarshal(b, m.Interface().(Message)); err != nil {
return false, nil
}
w.Write([]byte("["))
u := turl.String()
if requiresQuotes(u) {
writeString(w, u)
} else {
w.Write([]byte(u))
}
if w.compact {
w.Write([]byte("]:<"))
} else {
w.Write([]byte("]: <\n"))
w.ind++
}
if err := tm.writeStruct(w, m.Elem()); err != nil {
return true, err
}
if w.compact {
w.Write([]byte("> "))
} else {
w.ind--
w.Write([]byte(">\n"))
}
return true, nil
}
func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error {
if tm.ExpandAny && isAny(sv) {
if canExpand, err := tm.writeProto3Any(w, sv); canExpand {
return err
}
}
st := sv.Type()
sprops := GetProperties(st)
for i := 0; i < sv.NumField(); i++ {
fv := sv.Field(i)
props := sprops.Prop[i]
name := st.Field(i).Name
if strings.HasPrefix(name, "XXX_") {
// There are two XXX_ fields:
// XXX_unrecognized []byte
// XXX_extensions map[int32]proto.Extension
// The first is handled here;
// the second is handled at the bottom of this function.
if name == "XXX_unrecognized" && !fv.IsNil() {
if err := writeUnknownStruct(w, fv.Interface().([]byte)); err != nil {
return err
}
}
continue
}
if fv.Kind() == reflect.Ptr && fv.IsNil() {
// Field not filled in. This could be an optional field or
// a required field that wasn't filled in. Either way, there
// isn't anything we can show for it.
continue
}
if fv.Kind() == reflect.Slice && fv.IsNil() {
// Repeated field that is empty, or a bytes field that is unused.
continue
}
if props.Repeated && fv.Kind() == reflect.Slice {
// Repeated field.
for j := 0; j < fv.Len(); j++ {
if err := writeName(w, props); err != nil {
return err
}
if !w.compact {
if err := w.WriteByte(' '); err != nil {
return err
}
}
v := fv.Index(j)
if v.Kind() == reflect.Ptr && v.IsNil() {
// A nil message in a repeated field is not valid,
// but we can handle that more gracefully than panicking.
if _, err := w.Write([]byte("<nil>\n")); err != nil {
return err
}
continue
}
if err := tm.writeAny(w, v, props); err != nil {
return err
}
if err := w.WriteByte('\n'); err != nil {
return err
}
}
continue
}
if fv.Kind() == reflect.Map {
// Map fields are rendered as a repeated struct with key/value fields.
keys := fv.MapKeys()
sort.Sort(mapKeys(keys))
for _, key := range keys {
val := fv.MapIndex(key)
if err := writeName(w, props); err != nil {
return err
}
if !w.compact {
if err := w.WriteByte(' '); err != nil {
return err
}
}
// open struct
if err := w.WriteByte('<'); err != nil {
return err
}
if !w.compact {
if err := w.WriteByte('\n'); err != nil {
return err
}
}
w.indent()
// key
if _, err := w.WriteString("key:"); err != nil {
return err
}
if !w.compact {
if err := w.WriteByte(' '); err != nil {
return err
}
}
if err := tm.writeAny(w, key, props.mkeyprop); err != nil {
return err
}
if err := w.WriteByte('\n'); err != nil {
return err
}
// nil values aren't legal, but we can avoid panicking because of them.
if val.Kind() != reflect.Ptr || !val.IsNil() {
// value
if _, err := w.WriteString("value:"); err != nil {
return err
}
if !w.compact {
if err := w.WriteByte(' '); err != nil {
return err
}
}
if err := tm.writeAny(w, val, props.mvalprop); err != nil {
return err
}
if err := w.WriteByte('\n'); err != nil {
return err
}
}
// close struct
w.unindent()
if err := w.WriteByte('>'); err != nil {
return err
}
if err := w.WriteByte('\n'); err != nil {
return err
}
}
continue
}
if props.proto3 && fv.Kind() == reflect.Slice && fv.Len() == 0 {
// empty bytes field
continue
}
if fv.Kind() != reflect.Ptr && fv.Kind() != reflect.Slice {
// proto3 non-repeated scalar field; skip if zero value
if isProto3Zero(fv) {
continue
}
}
if fv.Kind() == reflect.Interface {
// Check if it is a oneof.
if st.Field(i).Tag.Get("protobuf_oneof") != "" {
// fv is nil, or holds a pointer to generated struct.
// That generated struct has exactly one field,
// which has a protobuf struct tag.
if fv.IsNil() {
continue
}
inner := fv.Elem().Elem() // interface -> *T -> T
tag := inner.Type().Field(0).Tag.Get("protobuf")
props = new(Properties) // Overwrite the outer props var, but not its pointee.
props.Parse(tag)
// Write the value in the oneof, not the oneof itself.
fv = inner.Field(0)
// Special case to cope with malformed messages gracefully:
// If the value in the oneof is a nil pointer, don't panic
// in writeAny.
if fv.Kind() == reflect.Ptr && fv.IsNil() {
// Use errors.New so writeAny won't render quotes.
msg := errors.New("/* nil */")
fv = reflect.ValueOf(&msg).Elem()
}
}
}
if err := writeName(w, props); err != nil {
return err
}
if !w.compact {
if err := w.WriteByte(' '); err != nil {
return err
}
}
if b, ok := fv.Interface().(raw); ok {
if err := writeRaw(w, b.Bytes()); err != nil {
return err
}
continue
}
// Enums have a String method, so writeAny will work fine.
if err := tm.writeAny(w, fv, props); err != nil {
return err
}
if err := w.WriteByte('\n'); err != nil {
return err
}
}
// Extensions (the XXX_extensions field).
pv := sv.Addr()
if pv.Type().Implements(extendableProtoType) {
if err := tm.writeExtensions(w, pv); err != nil {
return err
}
}
return nil
}
// writeRaw writes an uninterpreted raw message.
func writeRaw(w *textWriter, b []byte) error {
if err := w.WriteByte('<'); err != nil {
return err
}
if !w.compact {
if err := w.WriteByte('\n'); err != nil {
return err
}
}
w.indent()
if err := writeUnknownStruct(w, b); err != nil {
return err
}
w.unindent()
if err := w.WriteByte('>'); err != nil {
return err
}
return nil
}
// writeAny writes an arbitrary field.
func (tm *TextMarshaler) writeAny(w *textWriter, v reflect.Value, props *Properties) error {
v = reflect.Indirect(v)
// Floats have special cases.
if v.Kind() == reflect.Float32 || v.Kind() == reflect.Float64 {
x := v.Float()
var b []byte
switch {
case math.IsInf(x, 1):
b = posInf
case math.IsInf(x, -1):
b = negInf
case math.IsNaN(x):
b = nan
}
if b != nil {
_, err := w.Write(b)
return err
}
// Other values are handled below.
}
// We don't attempt to serialise every possible value type; only those
// that can occur in protocol buffers.
switch v.Kind() {
case reflect.Slice:
// Should only be a []byte; repeated fields are handled in writeStruct.
if err := writeString(w, string(v.Interface().([]byte))); err != nil {
return err
}
case reflect.String:
if err := writeString(w, v.String()); err != nil {
return err
}
case reflect.Struct:
// Required/optional group/message.
var bra, ket byte = '<', '>'
if props != nil && props.Wire == "group" {
bra, ket = '{', '}'
}
if err := w.WriteByte(bra); err != nil {
return err
}
if !w.compact {
if err := w.WriteByte('\n'); err != nil {
return err
}
}
w.indent()
if etm, ok := v.Interface().(encoding.TextMarshaler); ok {
text, err := etm.MarshalText()
if err != nil {
return err
}
if _, err = w.Write(text); err != nil {
return err
}
} else if err := tm.writeStruct(w, v); err != nil {
return err
}
w.unindent()
if err := w.WriteByte(ket); err != nil {
return err
}
default:
_, err := fmt.Fprint(w, v.Interface())
return err
}
return nil
}
// equivalent to C's isprint.
func isprint(c byte) bool {
return c >= 0x20 && c < 0x7f
}
// writeString writes a string in the protocol buffer text format.
// It is similar to strconv.Quote except we don't use Go escape sequences,
// we treat the string as a byte sequence, and we use octal escapes.
// These differences are to maintain interoperability with the other
// languages' implementations of the text format.
func writeString(w *textWriter, s string) error {
// use WriteByte here to get any needed indent
if err := w.WriteByte('"'); err != nil {
return err
}
// Loop over the bytes, not the runes.
for i := 0; i < len(s); i++ {
var err error
// Divergence from C++: we don't escape apostrophes.
// There's no need to escape them, and the C++ parser
// copes with a naked apostrophe.
switch c := s[i]; c {
case '\n':
_, err = w.w.Write(backslashN)
case '\r':
_, err = w.w.Write(backslashR)
case '\t':
_, err = w.w.Write(backslashT)
case '"':
_, err = w.w.Write(backslashDQ)
case '\\':
_, err = w.w.Write(backslashBS)
default:
if isprint(c) {
err = w.w.WriteByte(c)
} else {
_, err = fmt.Fprintf(w.w, "\\%03o", c)
}
}
if err != nil {
return err
}
}
return w.WriteByte('"')
}
func writeUnknownStruct(w *textWriter, data []byte) (err error) {
if !w.compact {
if _, err := fmt.Fprintf(w, "/* %d unknown bytes */\n", len(data)); err != nil {
return err
}
}
b := NewBuffer(data)
for b.index < len(b.buf) {
x, err := b.DecodeVarint()
if err != nil {
_, err := fmt.Fprintf(w, "/* %v */\n", err)
return err
}
wire, tag := x&7, x>>3
if wire == WireEndGroup {
w.unindent()
if _, err := w.Write(endBraceNewline); err != nil {
return err
}
continue
}
if _, err := fmt.Fprint(w, tag); err != nil {
return err
}
if wire != WireStartGroup {
if err := w.WriteByte(':'); err != nil {
return err
}
}
if !w.compact || wire == WireStartGroup {
if err := w.WriteByte(' '); err != nil {
return err
}
}
switch wire {
case WireBytes:
buf, e := b.DecodeRawBytes(false)
if e == nil {
_, err = fmt.Fprintf(w, "%q", buf)
} else {
_, err = fmt.Fprintf(w, "/* %v */", e)
}
case WireFixed32:
x, err = b.DecodeFixed32()
err = writeUnknownInt(w, x, err)
case WireFixed64:
x, err = b.DecodeFixed64()
err = writeUnknownInt(w, x, err)
case WireStartGroup:
err = w.WriteByte('{')
w.indent()
case WireVarint:
x, err = b.DecodeVarint()
err = writeUnknownInt(w, x, err)
default:
_, err = fmt.Fprintf(w, "/* unknown wire type %d */", wire)
}
if err != nil {
return err
}
if err = w.WriteByte('\n'); err != nil {
return err
}
}
return nil
}
func writeUnknownInt(w *textWriter, x uint64, err error) error {
if err == nil {
_, err = fmt.Fprint(w, x)
} else {
_, err = fmt.Fprintf(w, "/* %v */", err)
}
return err
}
type int32Slice []int32
func (s int32Slice) Len() int { return len(s) }
func (s int32Slice) Less(i, j int) bool { return s[i] < s[j] }
func (s int32Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
// writeExtensions writes all the extensions in pv.
// pv is assumed to be a pointer to a protocol message struct that is extendable.
func (tm *TextMarshaler) writeExtensions(w *textWriter, pv reflect.Value) error {
emap := extensionMaps[pv.Type().Elem()]
ep := pv.Interface().(extendableProto)
// Order the extensions by ID.
// This isn't strictly necessary, but it will give us
// canonical output, which will also make testing easier.
m := ep.ExtensionMap()
ids := make([]int32, 0, len(m))
for id := range m {
ids = append(ids, id)
}
sort.Sort(int32Slice(ids))
for _, extNum := range ids {
ext := m[extNum]
var desc *ExtensionDesc
if emap != nil {
desc = emap[extNum]
}
if desc == nil {
// Unknown extension.
if err := writeUnknownStruct(w, ext.enc); err != nil {
return err
}
continue
}
pb, err := GetExtension(ep, desc)
if err != nil {
return fmt.Errorf("failed getting extension: %v", err)
}
// Repeated extensions will appear as a slice.
if !desc.repeated() {
if err := tm.writeExtension(w, desc.Name, pb); err != nil {
return err
}
} else {
v := reflect.ValueOf(pb)
for i := 0; i < v.Len(); i++ {
if err := tm.writeExtension(w, desc.Name, v.Index(i).Interface()); err != nil {
return err
}
}
}
}
return nil
}
func (tm *TextMarshaler) writeExtension(w *textWriter, name string, pb interface{}) error {
if _, err := fmt.Fprintf(w, "[%s]:", name); err != nil {
return err
}
if !w.compact {
if err := w.WriteByte(' '); err != nil {
return err
}
}
if err := tm.writeAny(w, reflect.ValueOf(pb), nil); err != nil {
return err
}
if err := w.WriteByte('\n'); err != nil {
return err
}
return nil
}
func (w *textWriter) writeIndent() {
if !w.complete {
return
}
remain := w.ind * 2
for remain > 0 {
n := remain
if n > len(spaces) {
n = len(spaces)
}
w.w.Write(spaces[:n])
remain -= n
}
w.complete = false
}
// TextMarshaler is a configurable text format marshaler.
type TextMarshaler struct {
Compact bool // use compact text format (one line).
ExpandAny bool // expand google.protobuf.Any messages of known types
}
// Marshal writes a given protocol buffer in text format.
// The only errors returned are from w.
func (tm *TextMarshaler) Marshal(w io.Writer, pb Message) error {
val := reflect.ValueOf(pb)
if pb == nil || val.IsNil() {
w.Write([]byte("<nil>"))
return nil
}
var bw *bufio.Writer
ww, ok := w.(writer)
if !ok {
bw = bufio.NewWriter(w)
ww = bw
}
aw := &textWriter{
w: ww,
complete: true,
compact: tm.Compact,
}
if etm, ok := pb.(encoding.TextMarshaler); ok {
text, err := etm.MarshalText()
if err != nil {
return err
}
if _, err = aw.Write(text); err != nil {
return err
}
if bw != nil {
return bw.Flush()
}
return nil
}
// Dereference the received pointer so we don't have outer < and >.
v := reflect.Indirect(val)
if err := tm.writeStruct(aw, v); err != nil {
return err
}
if bw != nil {
return bw.Flush()
}
return nil
}
// Text is the same as Marshal, but returns the string directly.
func (tm *TextMarshaler) Text(pb Message) string {
var buf bytes.Buffer
tm.Marshal(&buf, pb)
return buf.String()
}
var (
defaultTextMarshaler = TextMarshaler{}
compactTextMarshaler = TextMarshaler{Compact: true}
)
// TODO: consider removing some of the Marshal functions below.
// MarshalText writes a given protocol buffer in text format.
// The only errors returned are from w.
func MarshalText(w io.Writer, pb Message) error { return defaultTextMarshaler.Marshal(w, pb) }
// MarshalTextString is the same as MarshalText, but returns the string directly.
func MarshalTextString(pb Message) string { return defaultTextMarshaler.Text(pb) }
// CompactText writes a given protocol buffer in compact text format (one line).
func CompactText(w io.Writer, pb Message) error { return compactTextMarshaler.Marshal(w, pb) }
// CompactTextString is the same as CompactText, but returns the string directly.
func CompactTextString(pb Message) string { return compactTextMarshaler.Text(pb) }

View File

@ -1,872 +0,0 @@
// Go support for Protocol Buffers - Google's data interchange format
//
// Copyright 2010 The Go Authors. All rights reserved.
// https://github.com/golang/protobuf
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package proto
// Functions for parsing the Text protocol buffer format.
// TODO: message sets.
import (
"encoding"
"errors"
"fmt"
"reflect"
"strconv"
"strings"
"unicode/utf8"
)
type ParseError struct {
Message string
Line int // 1-based line number
Offset int // 0-based byte offset from start of input
}
func (p *ParseError) Error() string {
if p.Line == 1 {
// show offset only for first line
return fmt.Sprintf("line 1.%d: %v", p.Offset, p.Message)
}
return fmt.Sprintf("line %d: %v", p.Line, p.Message)
}
type token struct {
value string
err *ParseError
line int // line number
offset int // byte number from start of input, not start of line
unquoted string // the unquoted version of value, if it was a quoted string
}
func (t *token) String() string {
if t.err == nil {
return fmt.Sprintf("%q (line=%d, offset=%d)", t.value, t.line, t.offset)
}
return fmt.Sprintf("parse error: %v", t.err)
}
type textParser struct {
s string // remaining input
done bool // whether the parsing is finished (success or error)
backed bool // whether back() was called
offset, line int
cur token
}
func newTextParser(s string) *textParser {
p := new(textParser)
p.s = s
p.line = 1
p.cur.line = 1
return p
}
func (p *textParser) errorf(format string, a ...interface{}) *ParseError {
pe := &ParseError{fmt.Sprintf(format, a...), p.cur.line, p.cur.offset}
p.cur.err = pe
p.done = true
return pe
}
// Numbers and identifiers are matched by [-+._A-Za-z0-9]
func isIdentOrNumberChar(c byte) bool {
switch {
case 'A' <= c && c <= 'Z', 'a' <= c && c <= 'z':
return true
case '0' <= c && c <= '9':
return true
}
switch c {
case '-', '+', '.', '_':
return true
}
return false
}
func isWhitespace(c byte) bool {
switch c {
case ' ', '\t', '\n', '\r':
return true
}
return false
}
func isQuote(c byte) bool {
switch c {
case '"', '\'':
return true
}
return false
}
func (p *textParser) skipWhitespace() {
i := 0
for i < len(p.s) && (isWhitespace(p.s[i]) || p.s[i] == '#') {
if p.s[i] == '#' {
// comment; skip to end of line or input
for i < len(p.s) && p.s[i] != '\n' {
i++
}
if i == len(p.s) {
break
}
}
if p.s[i] == '\n' {
p.line++
}
i++
}
p.offset += i
p.s = p.s[i:len(p.s)]
if len(p.s) == 0 {
p.done = true
}
}
func (p *textParser) advance() {
// Skip whitespace
p.skipWhitespace()
if p.done {
return
}
// Start of non-whitespace
p.cur.err = nil
p.cur.offset, p.cur.line = p.offset, p.line
p.cur.unquoted = ""
switch p.s[0] {
case '<', '>', '{', '}', ':', '[', ']', ';', ',', '/':
// Single symbol
p.cur.value, p.s = p.s[0:1], p.s[1:len(p.s)]
case '"', '\'':
// Quoted string
i := 1
for i < len(p.s) && p.s[i] != p.s[0] && p.s[i] != '\n' {
if p.s[i] == '\\' && i+1 < len(p.s) {
// skip escaped char
i++
}
i++
}
if i >= len(p.s) || p.s[i] != p.s[0] {
p.errorf("unmatched quote")
return
}
unq, err := unquoteC(p.s[1:i], rune(p.s[0]))
if err != nil {
p.errorf("invalid quoted string %s: %v", p.s[0:i+1], err)
return
}
p.cur.value, p.s = p.s[0:i+1], p.s[i+1:len(p.s)]
p.cur.unquoted = unq
default:
i := 0
for i < len(p.s) && isIdentOrNumberChar(p.s[i]) {
i++
}
if i == 0 {
p.errorf("unexpected byte %#x", p.s[0])
return
}
p.cur.value, p.s = p.s[0:i], p.s[i:len(p.s)]
}
p.offset += len(p.cur.value)
}
var (
errBadUTF8 = errors.New("proto: bad UTF-8")
errBadHex = errors.New("proto: bad hexadecimal")
)
func unquoteC(s string, quote rune) (string, error) {
// This is based on C++'s tokenizer.cc.
// Despite its name, this is *not* parsing C syntax.
// For instance, "\0" is an invalid quoted string.
// Avoid allocation in trivial cases.
simple := true
for _, r := range s {
if r == '\\' || r == quote {
simple = false
break
}
}
if simple {
return s, nil
}
buf := make([]byte, 0, 3*len(s)/2)
for len(s) > 0 {
r, n := utf8.DecodeRuneInString(s)
if r == utf8.RuneError && n == 1 {
return "", errBadUTF8
}
s = s[n:]
if r != '\\' {
if r < utf8.RuneSelf {
buf = append(buf, byte(r))
} else {
buf = append(buf, string(r)...)
}
continue
}
ch, tail, err := unescape(s)
if err != nil {
return "", err
}
buf = append(buf, ch...)
s = tail
}
return string(buf), nil
}
func unescape(s string) (ch string, tail string, err error) {
r, n := utf8.DecodeRuneInString(s)
if r == utf8.RuneError && n == 1 {
return "", "", errBadUTF8
}
s = s[n:]
switch r {
case 'a':
return "\a", s, nil
case 'b':
return "\b", s, nil
case 'f':
return "\f", s, nil
case 'n':
return "\n", s, nil
case 'r':
return "\r", s, nil
case 't':
return "\t", s, nil
case 'v':
return "\v", s, nil
case '?':
return "?", s, nil // trigraph workaround
case '\'', '"', '\\':
return string(r), s, nil
case '0', '1', '2', '3', '4', '5', '6', '7', 'x', 'X':
if len(s) < 2 {
return "", "", fmt.Errorf(`\%c requires 2 following digits`, r)
}
base := 8
ss := s[:2]
s = s[2:]
if r == 'x' || r == 'X' {
base = 16
} else {
ss = string(r) + ss
}
i, err := strconv.ParseUint(ss, base, 8)
if err != nil {
return "", "", err
}
return string([]byte{byte(i)}), s, nil
case 'u', 'U':
n := 4
if r == 'U' {
n = 8
}
if len(s) < n {
return "", "", fmt.Errorf(`\%c requires %d digits`, r, n)
}
bs := make([]byte, n/2)
for i := 0; i < n; i += 2 {
a, ok1 := unhex(s[i])
b, ok2 := unhex(s[i+1])
if !ok1 || !ok2 {
return "", "", errBadHex
}
bs[i/2] = a<<4 | b
}
s = s[n:]
return string(bs), s, nil
}
return "", "", fmt.Errorf(`unknown escape \%c`, r)
}
// Adapted from src/pkg/strconv/quote.go.
func unhex(b byte) (v byte, ok bool) {
switch {
case '0' <= b && b <= '9':
return b - '0', true
case 'a' <= b && b <= 'f':
return b - 'a' + 10, true
case 'A' <= b && b <= 'F':
return b - 'A' + 10, true
}
return 0, false
}
// Back off the parser by one token. Can only be done between calls to next().
// It makes the next advance() a no-op.
func (p *textParser) back() { p.backed = true }
// Advances the parser and returns the new current token.
func (p *textParser) next() *token {
if p.backed || p.done {
p.backed = false
return &p.cur
}
p.advance()
if p.done {
p.cur.value = ""
} else if len(p.cur.value) > 0 && isQuote(p.cur.value[0]) {
// Look for multiple quoted strings separated by whitespace,
// and concatenate them.
cat := p.cur
for {
p.skipWhitespace()
if p.done || !isQuote(p.s[0]) {
break
}
p.advance()
if p.cur.err != nil {
return &p.cur
}
cat.value += " " + p.cur.value
cat.unquoted += p.cur.unquoted
}
p.done = false // parser may have seen EOF, but we want to return cat
p.cur = cat
}
return &p.cur
}
func (p *textParser) consumeToken(s string) error {
tok := p.next()
if tok.err != nil {
return tok.err
}
if tok.value != s {
p.back()
return p.errorf("expected %q, found %q", s, tok.value)
}
return nil
}
// Return a RequiredNotSetError indicating which required field was not set.
func (p *textParser) missingRequiredFieldError(sv reflect.Value) *RequiredNotSetError {
st := sv.Type()
sprops := GetProperties(st)
for i := 0; i < st.NumField(); i++ {
if !isNil(sv.Field(i)) {
continue
}
props := sprops.Prop[i]
if props.Required {
return &RequiredNotSetError{fmt.Sprintf("%v.%v", st, props.OrigName)}
}
}
return &RequiredNotSetError{fmt.Sprintf("%v.<unknown field name>", st)} // should not happen
}
// Returns the index in the struct for the named field, as well as the parsed tag properties.
func structFieldByName(sprops *StructProperties, name string) (int, *Properties, bool) {
i, ok := sprops.decoderOrigNames[name]
if ok {
return i, sprops.Prop[i], true
}
return -1, nil, false
}
// Consume a ':' from the input stream (if the next token is a colon),
// returning an error if a colon is needed but not present.
func (p *textParser) checkForColon(props *Properties, typ reflect.Type) *ParseError {
tok := p.next()
if tok.err != nil {
return tok.err
}
if tok.value != ":" {
// Colon is optional when the field is a group or message.
needColon := true
switch props.Wire {
case "group":
needColon = false
case "bytes":
// A "bytes" field is either a message, a string, or a repeated field;
// those three become *T, *string and []T respectively, so we can check for
// this field being a pointer to a non-string.
if typ.Kind() == reflect.Ptr {
// *T or *string
if typ.Elem().Kind() == reflect.String {
break
}
} else if typ.Kind() == reflect.Slice {
// []T or []*T
if typ.Elem().Kind() != reflect.Ptr {
break
}
} else if typ.Kind() == reflect.String {
// The proto3 exception is for a string field,
// which requires a colon.
break
}
needColon = false
}
if needColon {
return p.errorf("expected ':', found %q", tok.value)
}
p.back()
}
return nil
}
func (p *textParser) readStruct(sv reflect.Value, terminator string) error {
st := sv.Type()
sprops := GetProperties(st)
reqCount := sprops.reqCount
var reqFieldErr error
fieldSet := make(map[string]bool)
// A struct is a sequence of "name: value", terminated by one of
// '>' or '}', or the end of the input. A name may also be
// "[extension]" or "[type/url]".
//
// The whole struct can also be an expanded Any message, like:
// [type/url] < ... struct contents ... >
for {
tok := p.next()
if tok.err != nil {
return tok.err
}
if tok.value == terminator {
break
}
if tok.value == "[" {
// Looks like an extension or an Any.
//
// TODO: Check whether we need to handle
// namespace rooted names (e.g. ".something.Foo").
extName, err := p.consumeExtName()
if err != nil {
return err
}
if s := strings.LastIndex(extName, "/"); s >= 0 {
// If it contains a slash, it's an Any type URL.
messageName := extName[s+1:]
mt := MessageType(messageName)
if mt == nil {
return p.errorf("unrecognized message %q in google.protobuf.Any", messageName)
}
tok = p.next()
if tok.err != nil {
return tok.err
}
// consume an optional colon
if tok.value == ":" {
tok = p.next()
if tok.err != nil {
return tok.err
}
}
var terminator string
switch tok.value {
case "<":
terminator = ">"
case "{":
terminator = "}"
default:
return p.errorf("expected '{' or '<', found %q", tok.value)
}
v := reflect.New(mt.Elem())
if pe := p.readStruct(v.Elem(), terminator); pe != nil {
return pe
}
b, err := Marshal(v.Interface().(Message))
if err != nil {
return p.errorf("failed to marshal message of type %q: %v", messageName, err)
}
sv.FieldByName("TypeUrl").SetString(extName)
sv.FieldByName("Value").SetBytes(b)
continue
}
var desc *ExtensionDesc
// This could be faster, but it's functional.
// TODO: Do something smarter than a linear scan.
for _, d := range RegisteredExtensions(reflect.New(st).Interface().(Message)) {
if d.Name == extName {
desc = d
break
}
}
if desc == nil {
return p.errorf("unrecognized extension %q", extName)
}
props := &Properties{}
props.Parse(desc.Tag)
typ := reflect.TypeOf(desc.ExtensionType)
if err := p.checkForColon(props, typ); err != nil {
return err
}
rep := desc.repeated()
// Read the extension structure, and set it in
// the value we're constructing.
var ext reflect.Value
if !rep {
ext = reflect.New(typ).Elem()
} else {
ext = reflect.New(typ.Elem()).Elem()
}
if err := p.readAny(ext, props); err != nil {
if _, ok := err.(*RequiredNotSetError); !ok {
return err
}
reqFieldErr = err
}
ep := sv.Addr().Interface().(extendableProto)
if !rep {
SetExtension(ep, desc, ext.Interface())
} else {
old, err := GetExtension(ep, desc)
var sl reflect.Value
if err == nil {
sl = reflect.ValueOf(old) // existing slice
} else {
sl = reflect.MakeSlice(typ, 0, 1)
}
sl = reflect.Append(sl, ext)
SetExtension(ep, desc, sl.Interface())
}
if err := p.consumeOptionalSeparator(); err != nil {
return err
}
continue
}
// This is a normal, non-extension field.
name := tok.value
var dst reflect.Value
fi, props, ok := structFieldByName(sprops, name)
if ok {
dst = sv.Field(fi)
} else if oop, ok := sprops.OneofTypes[name]; ok {
// It is a oneof.
props = oop.Prop
nv := reflect.New(oop.Type.Elem())
dst = nv.Elem().Field(0)
sv.Field(oop.Field).Set(nv)
}
if !dst.IsValid() {
return p.errorf("unknown field name %q in %v", name, st)
}
if dst.Kind() == reflect.Map {
// Consume any colon.
if err := p.checkForColon(props, dst.Type()); err != nil {
return err
}
// Construct the map if it doesn't already exist.
if dst.IsNil() {
dst.Set(reflect.MakeMap(dst.Type()))
}
key := reflect.New(dst.Type().Key()).Elem()
val := reflect.New(dst.Type().Elem()).Elem()
// The map entry should be this sequence of tokens:
// < key : KEY value : VALUE >
// Technically the "key" and "value" could come in any order,
// but in practice they won't.
tok := p.next()
var terminator string
switch tok.value {
case "<":
terminator = ">"
case "{":
terminator = "}"
default:
return p.errorf("expected '{' or '<', found %q", tok.value)
}
if err := p.consumeToken("key"); err != nil {
return err
}
if err := p.consumeToken(":"); err != nil {
return err
}
if err := p.readAny(key, props.mkeyprop); err != nil {
return err
}
if err := p.consumeOptionalSeparator(); err != nil {
return err
}
if err := p.consumeToken("value"); err != nil {
return err
}
if err := p.checkForColon(props.mvalprop, dst.Type().Elem()); err != nil {
return err
}
if err := p.readAny(val, props.mvalprop); err != nil {
return err
}
if err := p.consumeOptionalSeparator(); err != nil {
return err
}
if err := p.consumeToken(terminator); err != nil {
return err
}
dst.SetMapIndex(key, val)
continue
}
// Check that it's not already set if it's not a repeated field.
if !props.Repeated && fieldSet[name] {
return p.errorf("non-repeated field %q was repeated", name)
}
if err := p.checkForColon(props, dst.Type()); err != nil {
return err
}
// Parse into the field.
fieldSet[name] = true
if err := p.readAny(dst, props); err != nil {
if _, ok := err.(*RequiredNotSetError); !ok {
return err
}
reqFieldErr = err
}
if props.Required {
reqCount--
}
if err := p.consumeOptionalSeparator(); err != nil {
return err
}
}
if reqCount > 0 {
return p.missingRequiredFieldError(sv)
}
return reqFieldErr
}
// consumeExtName consumes extension name or expanded Any type URL and the
// following ']'. It returns the name or URL consumed.
func (p *textParser) consumeExtName() (string, error) {
tok := p.next()
if tok.err != nil {
return "", tok.err
}
// If extension name or type url is quoted, it's a single token.
if len(tok.value) > 2 && isQuote(tok.value[0]) && tok.value[len(tok.value)-1] == tok.value[0] {
name, err := unquoteC(tok.value[1:len(tok.value)-1], rune(tok.value[0]))
if err != nil {
return "", err
}
return name, p.consumeToken("]")
}
// Consume everything up to "]"
var parts []string
for tok.value != "]" {
parts = append(parts, tok.value)
tok = p.next()
if tok.err != nil {
return "", p.errorf("unrecognized type_url or extension name: %s", tok.err)
}
}
return strings.Join(parts, ""), nil
}
// consumeOptionalSeparator consumes an optional semicolon or comma.
// It is used in readStruct to provide backward compatibility.
func (p *textParser) consumeOptionalSeparator() error {
tok := p.next()
if tok.err != nil {
return tok.err
}
if tok.value != ";" && tok.value != "," {
p.back()
}
return nil
}
func (p *textParser) readAny(v reflect.Value, props *Properties) error {
tok := p.next()
if tok.err != nil {
return tok.err
}
if tok.value == "" {
return p.errorf("unexpected EOF")
}
switch fv := v; fv.Kind() {
case reflect.Slice:
at := v.Type()
if at.Elem().Kind() == reflect.Uint8 {
// Special case for []byte
if tok.value[0] != '"' && tok.value[0] != '\'' {
// Deliberately written out here, as the error after
// this switch statement would write "invalid []byte: ...",
// which is not as user-friendly.
return p.errorf("invalid string: %v", tok.value)
}
bytes := []byte(tok.unquoted)
fv.Set(reflect.ValueOf(bytes))
return nil
}
// Repeated field.
if tok.value == "[" {
// Repeated field with list notation, like [1,2,3].
for {
fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem()))
err := p.readAny(fv.Index(fv.Len()-1), props)
if err != nil {
return err
}
tok := p.next()
if tok.err != nil {
return tok.err
}
if tok.value == "]" {
break
}
if tok.value != "," {
return p.errorf("Expected ']' or ',' found %q", tok.value)
}
}
return nil
}
// One value of the repeated field.
p.back()
fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem()))
return p.readAny(fv.Index(fv.Len()-1), props)
case reflect.Bool:
// Either "true", "false", 1 or 0.
switch tok.value {
case "true", "1":
fv.SetBool(true)
return nil
case "false", "0":
fv.SetBool(false)
return nil
}
case reflect.Float32, reflect.Float64:
v := tok.value
// Ignore 'f' for compatibility with output generated by C++, but don't
// remove 'f' when the value is "-inf" or "inf".
if strings.HasSuffix(v, "f") && tok.value != "-inf" && tok.value != "inf" {
v = v[:len(v)-1]
}
if f, err := strconv.ParseFloat(v, fv.Type().Bits()); err == nil {
fv.SetFloat(f)
return nil
}
case reflect.Int32:
if x, err := strconv.ParseInt(tok.value, 0, 32); err == nil {
fv.SetInt(x)
return nil
}
if len(props.Enum) == 0 {
break
}
m, ok := enumValueMaps[props.Enum]
if !ok {
break
}
x, ok := m[tok.value]
if !ok {
break
}
fv.SetInt(int64(x))
return nil
case reflect.Int64:
if x, err := strconv.ParseInt(tok.value, 0, 64); err == nil {
fv.SetInt(x)
return nil
}
case reflect.Ptr:
// A basic field (indirected through pointer), or a repeated message/group
p.back()
fv.Set(reflect.New(fv.Type().Elem()))
return p.readAny(fv.Elem(), props)
case reflect.String:
if tok.value[0] == '"' || tok.value[0] == '\'' {
fv.SetString(tok.unquoted)
return nil
}
case reflect.Struct:
var terminator string
switch tok.value {
case "{":
terminator = "}"
case "<":
terminator = ">"
default:
return p.errorf("expected '{' or '<', found %q", tok.value)
}
// TODO: Handle nested messages which implement encoding.TextUnmarshaler.
return p.readStruct(fv, terminator)
case reflect.Uint32:
if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil {
fv.SetUint(uint64(x))
return nil
}
case reflect.Uint64:
if x, err := strconv.ParseUint(tok.value, 0, 64); err == nil {
fv.SetUint(x)
return nil
}
}
return p.errorf("invalid %v: %v", v.Type(), tok.value)
}
// UnmarshalText reads a protocol buffer in Text format. UnmarshalText resets pb
// before starting to unmarshal, so any existing data in pb is always removed.
// If a required field is not set and no other error occurs,
// UnmarshalText returns *RequiredNotSetError.
func UnmarshalText(s string, pb Message) error {
if um, ok := pb.(encoding.TextUnmarshaler); ok {
err := um.UnmarshalText([]byte(s))
return err
}
pb.Reset()
v := reflect.ValueOf(pb)
if pe := newTextParser(s).readStruct(v.Elem(), ""); pe != nil {
return pe
}
return nil
}

View File

@ -1,6 +0,0 @@
language: go
sudo: false
go:
- 1.3
- 1.4
- tip

View File

@ -1,21 +0,0 @@
The MIT License (MIT)
Copyright (c) 2014 Manuel Martínez-Almeida
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.

View File

@ -1,54 +0,0 @@
#Server-Sent Events [![GoDoc](https://godoc.org/github.com/manucorporat/sse?status.svg)](https://godoc.org/github.com/manucorporat/sse) [![Build Status](https://travis-ci.org/manucorporat/sse.svg)](https://travis-ci.org/manucorporat/sse)
Server-sent events (SSE) is a technology where a browser receives automatic updates from a server via HTTP connection. The Server-Sent Events EventSource API is [standardized as part of HTML5[1] by the W3C](http://www.w3.org/TR/2009/WD-eventsource-20091029/).
- [Real world demostration using Gin](http://sse.getgin.io/)
- [Read this great SSE introduction by the HTML5Rocks guys](http://www.html5rocks.com/en/tutorials/eventsource/basics/)
- [Browser support](http://caniuse.com/#feat=eventsource)
##Sample code
```go
import "github.com/manucorporat/sse"
func httpHandler(w http.ResponseWriter, req *http.Request) {
// data can be a primitive like a string, an integer or a float
sse.Encode(w, sse.Event{
Event: "message",
Data: "some data\nmore data",
})
// also a complex type, like a map, a struct or a slice
sse.Encode(w, sse.Event{
Id: "124",
Event: "message",
Data: map[string]interface{}{
"user": "manu",
"date": time.Now().Unix(),
"content": "hi!",
},
})
}
```
```
event: message
data: some data\\nmore data
id: 124
event: message
data: {"content":"hi!","date":1431540810,"user":"manu"}
```
##Content-Type
```go
fmt.Println(sse.ContentType)
```
```
text/event-stream
```
##Decoding support
There is a client-side implementation of SSE coming soon.

View File

@ -1,116 +0,0 @@
// Copyright 2014 Manu Martinez-Almeida. All rights reserved.
// Use of this source code is governed by a MIT style
// license that can be found in the LICENSE file.
package sse
import (
"bytes"
"io"
"io/ioutil"
)
type decoder struct {
events []Event
}
func Decode(r io.Reader) ([]Event, error) {
var dec decoder
return dec.decode(r)
}
func (d *decoder) dispatchEvent(event Event, data string) {
dataLength := len(data)
if dataLength > 0 {
//If the data buffer's last character is a U+000A LINE FEED (LF) character, then remove the last character from the data buffer.
data = data[:dataLength-1]
dataLength--
}
if dataLength == 0 && event.Event == "" {
return
}
if event.Event == "" {
event.Event = "message"
}
event.Data = data
d.events = append(d.events, event)
}
func (d *decoder) decode(r io.Reader) ([]Event, error) {
buf, err := ioutil.ReadAll(r)
if err != nil {
return nil, err
}
var currentEvent Event
var dataBuffer *bytes.Buffer = new(bytes.Buffer)
// TODO (and unit tests)
// Lines must be separated by either a U+000D CARRIAGE RETURN U+000A LINE FEED (CRLF) character pair,
// a single U+000A LINE FEED (LF) character,
// or a single U+000D CARRIAGE RETURN (CR) character.
lines := bytes.Split(buf, []byte{'\n'})
for _, line := range lines {
if len(line) == 0 {
// If the line is empty (a blank line). Dispatch the event.
d.dispatchEvent(currentEvent, dataBuffer.String())
// reset current event and data buffer
currentEvent = Event{}
dataBuffer.Reset()
continue
}
if line[0] == byte(':') {
// If the line starts with a U+003A COLON character (:), ignore the line.
continue
}
var field, value []byte
colonIndex := bytes.IndexRune(line, ':')
if colonIndex != -1 {
// If the line contains a U+003A COLON character character (:)
// Collect the characters on the line before the first U+003A COLON character (:),
// and let field be that string.
field = line[:colonIndex]
// Collect the characters on the line after the first U+003A COLON character (:),
// and let value be that string.
value = line[colonIndex+1:]
// If value starts with a single U+0020 SPACE character, remove it from value.
if len(value) > 0 && value[0] == ' ' {
value = value[1:]
}
} else {
// Otherwise, the string is not empty but does not contain a U+003A COLON character character (:)
// Use the whole line as the field name, and the empty string as the field value.
field = line
value = []byte{}
}
// The steps to process the field given a field name and a field value depend on the field name,
// as given in the following list. Field names must be compared literally,
// with no case folding performed.
switch string(field) {
case "event":
// Set the event name buffer to field value.
currentEvent.Event = string(value)
case "id":
// Set the event stream's last event ID to the field value.
currentEvent.Id = string(value)
case "retry":
// If the field value consists of only characters in the range U+0030 DIGIT ZERO (0) to U+0039 DIGIT NINE (9),
// then interpret the field value as an integer in base ten, and set the event stream's reconnection time to that integer.
// Otherwise, ignore the field.
currentEvent.Id = string(value)
case "data":
// Append the field value to the data buffer,
dataBuffer.Write(value)
// then append a single U+000A LINE FEED (LF) character to the data buffer.
dataBuffer.WriteString("\n")
default:
//Otherwise. The field is ignored.
continue
}
}
// Once the end of the file is reached, the user agent must dispatch the event one final time.
d.dispatchEvent(currentEvent, dataBuffer.String())
return d.events, nil
}

View File

@ -1,106 +0,0 @@
// Copyright 2014 Manu Martinez-Almeida. All rights reserved.
// Use of this source code is governed by a MIT style
// license that can be found in the LICENSE file.
package sse
import (
"encoding/json"
"fmt"
"io"
"net/http"
"reflect"
"strconv"
"strings"
)
// Server-Sent Events
// W3C Working Draft 29 October 2009
// http://www.w3.org/TR/2009/WD-eventsource-20091029/
const ContentType = "text/event-stream"
var contentType = []string{ContentType}
var noCache = []string{"no-cache"}
var fieldReplacer = strings.NewReplacer(
"\n", "\\n",
"\r", "\\r")
var dataReplacer = strings.NewReplacer(
"\n", "\ndata:",
"\r", "\\r")
type Event struct {
Event string
Id string
Retry uint
Data interface{}
}
func Encode(writer io.Writer, event Event) error {
w := checkWriter(writer)
writeId(w, event.Id)
writeEvent(w, event.Event)
writeRetry(w, event.Retry)
return writeData(w, event.Data)
}
func writeId(w stringWriter, id string) {
if len(id) > 0 {
w.WriteString("id:")
fieldReplacer.WriteString(w, id)
w.WriteString("\n")
}
}
func writeEvent(w stringWriter, event string) {
if len(event) > 0 {
w.WriteString("event:")
fieldReplacer.WriteString(w, event)
w.WriteString("\n")
}
}
func writeRetry(w stringWriter, retry uint) {
if retry > 0 {
w.WriteString("retry:")
w.WriteString(strconv.FormatUint(uint64(retry), 10))
w.WriteString("\n")
}
}
func writeData(w stringWriter, data interface{}) error {
w.WriteString("data:")
switch kindOfData(data) {
case reflect.Struct, reflect.Slice, reflect.Map:
err := json.NewEncoder(w).Encode(data)
if err != nil {
return err
}
w.WriteString("\n")
default:
dataReplacer.WriteString(w, fmt.Sprint(data))
w.WriteString("\n\n")
}
return nil
}
func (r Event) Render(w http.ResponseWriter) error {
header := w.Header()
header["Content-Type"] = contentType
if _, exist := header["Cache-Control"]; !exist {
header["Cache-Control"] = noCache
}
return Encode(w, r)
}
func kindOfData(data interface{}) reflect.Kind {
value := reflect.ValueOf(data)
valueType := value.Kind()
if valueType == reflect.Ptr {
valueType = value.Elem().Kind()
}
return valueType
}

View File

@ -1,24 +0,0 @@
package sse
import "io"
type stringWriter interface {
io.Writer
WriteString(string) (int, error)
}
type stringWrapper struct {
io.Writer
}
func (w stringWrapper) WriteString(str string) (int, error) {
return w.Writer.Write([]byte(str))
}
func checkWriter(writer io.Writer) stringWriter {
if w, ok := writer.(stringWriter); ok {
return w
} else {
return stringWrapper{writer}
}
}

View File

@ -1,27 +0,0 @@
Copyright (c) 2009 The Go Authors. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

View File

@ -1,22 +0,0 @@
Additional IP Rights Grant (Patents)
"This implementation" means the copyrightable works distributed by
Google as part of the Go project.
Google hereby grants to You a perpetual, worldwide, non-exclusive,
no-charge, royalty-free, irrevocable (except as stated in this section)
patent license to make, have made, use, offer to sell, sell, import,
transfer and otherwise run, modify and propagate the contents of this
implementation of Go, where such license applies only to those patent
claims, both currently owned or controlled by Google and acquired in
the future, licensable by Google that are necessarily infringed by this
implementation of Go. This grant does not include claims that would be
infringed only as a consequence of further modification of this
implementation. If you or your agent or exclusive licensee institute or
order or agree to the institution of patent litigation against any
entity (including a cross-claim or counterclaim in a lawsuit) alleging
that this implementation of Go or any code incorporated within this
implementation of Go constitutes direct or contributory patent
infringement, or inducement of patent infringement, then any patent
rights granted to you under this License for this implementation of Go
shall terminate as of the date such litigation is filed.

View File

@ -1,156 +0,0 @@
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package context defines the Context type, which carries deadlines,
// cancelation signals, and other request-scoped values across API boundaries
// and between processes.
//
// Incoming requests to a server should create a Context, and outgoing calls to
// servers should accept a Context. The chain of function calls between must
// propagate the Context, optionally replacing it with a modified copy created
// using WithDeadline, WithTimeout, WithCancel, or WithValue.
//
// Programs that use Contexts should follow these rules to keep interfaces
// consistent across packages and enable static analysis tools to check context
// propagation:
//
// Do not store Contexts inside a struct type; instead, pass a Context
// explicitly to each function that needs it. The Context should be the first
// parameter, typically named ctx:
//
// func DoSomething(ctx context.Context, arg Arg) error {
// // ... use ctx ...
// }
//
// Do not pass a nil Context, even if a function permits it. Pass context.TODO
// if you are unsure about which Context to use.
//
// Use context Values only for request-scoped data that transits processes and
// APIs, not for passing optional parameters to functions.
//
// The same Context may be passed to functions running in different goroutines;
// Contexts are safe for simultaneous use by multiple goroutines.
//
// See http://blog.golang.org/context for example code for a server that uses
// Contexts.
package context
import "time"
// A Context carries a deadline, a cancelation signal, and other values across
// API boundaries.
//
// Context's methods may be called by multiple goroutines simultaneously.
type Context interface {
// Deadline returns the time when work done on behalf of this context
// should be canceled. Deadline returns ok==false when no deadline is
// set. Successive calls to Deadline return the same results.
Deadline() (deadline time.Time, ok bool)
// Done returns a channel that's closed when work done on behalf of this
// context should be canceled. Done may return nil if this context can
// never be canceled. Successive calls to Done return the same value.
//
// WithCancel arranges for Done to be closed when cancel is called;
// WithDeadline arranges for Done to be closed when the deadline
// expires; WithTimeout arranges for Done to be closed when the timeout
// elapses.
//
// Done is provided for use in select statements:
//
// // Stream generates values with DoSomething and sends them to out
// // until DoSomething returns an error or ctx.Done is closed.
// func Stream(ctx context.Context, out chan<- Value) error {
// for {
// v, err := DoSomething(ctx)
// if err != nil {
// return err
// }
// select {
// case <-ctx.Done():
// return ctx.Err()
// case out <- v:
// }
// }
// }
//
// See http://blog.golang.org/pipelines for more examples of how to use
// a Done channel for cancelation.
Done() <-chan struct{}
// Err returns a non-nil error value after Done is closed. Err returns
// Canceled if the context was canceled or DeadlineExceeded if the
// context's deadline passed. No other values for Err are defined.
// After Done is closed, successive calls to Err return the same value.
Err() error
// Value returns the value associated with this context for key, or nil
// if no value is associated with key. Successive calls to Value with
// the same key returns the same result.
//
// Use context values only for request-scoped data that transits
// processes and API boundaries, not for passing optional parameters to
// functions.
//
// A key identifies a specific value in a Context. Functions that wish
// to store values in Context typically allocate a key in a global
// variable then use that key as the argument to context.WithValue and
// Context.Value. A key can be any type that supports equality;
// packages should define keys as an unexported type to avoid
// collisions.
//
// Packages that define a Context key should provide type-safe accessors
// for the values stores using that key:
//
// // Package user defines a User type that's stored in Contexts.
// package user
//
// import "golang.org/x/net/context"
//
// // User is the type of value stored in the Contexts.
// type User struct {...}
//
// // key is an unexported type for keys defined in this package.
// // This prevents collisions with keys defined in other packages.
// type key int
//
// // userKey is the key for user.User values in Contexts. It is
// // unexported; clients use user.NewContext and user.FromContext
// // instead of using this key directly.
// var userKey key = 0
//
// // NewContext returns a new Context that carries value u.
// func NewContext(ctx context.Context, u *User) context.Context {
// return context.WithValue(ctx, userKey, u)
// }
//
// // FromContext returns the User value stored in ctx, if any.
// func FromContext(ctx context.Context) (*User, bool) {
// u, ok := ctx.Value(userKey).(*User)
// return u, ok
// }
Value(key interface{}) interface{}
}
// Background returns a non-nil, empty Context. It is never canceled, has no
// values, and has no deadline. It is typically used by the main function,
// initialization, and tests, and as the top-level Context for incoming
// requests.
func Background() Context {
return background
}
// TODO returns a non-nil, empty Context. Code should use context.TODO when
// it's unclear which Context to use or it is not yet available (because the
// surrounding function has not yet been extended to accept a Context
// parameter). TODO is recognized by static analysis tools that determine
// whether Contexts are propagated correctly in a program.
func TODO() Context {
return todo
}
// A CancelFunc tells an operation to abandon its work.
// A CancelFunc does not wait for the work to stop.
// After the first call, subsequent calls to a CancelFunc do nothing.
type CancelFunc func()

View File

@ -1,72 +0,0 @@
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build go1.7
package context
import (
"context" // standard library's context, as of Go 1.7
"time"
)
var (
todo = context.TODO()
background = context.Background()
)
// Canceled is the error returned by Context.Err when the context is canceled.
var Canceled = context.Canceled
// DeadlineExceeded is the error returned by Context.Err when the context's
// deadline passes.
var DeadlineExceeded = context.DeadlineExceeded
// WithCancel returns a copy of parent with a new Done channel. The returned
// context's Done channel is closed when the returned cancel function is called
// or when the parent context's Done channel is closed, whichever happens first.
//
// Canceling this context releases resources associated with it, so code should
// call cancel as soon as the operations running in this Context complete.
func WithCancel(parent Context) (ctx Context, cancel CancelFunc) {
ctx, f := context.WithCancel(parent)
return ctx, CancelFunc(f)
}
// WithDeadline returns a copy of the parent context with the deadline adjusted
// to be no later than d. If the parent's deadline is already earlier than d,
// WithDeadline(parent, d) is semantically equivalent to parent. The returned
// context's Done channel is closed when the deadline expires, when the returned
// cancel function is called, or when the parent context's Done channel is
// closed, whichever happens first.
//
// Canceling this context releases resources associated with it, so code should
// call cancel as soon as the operations running in this Context complete.
func WithDeadline(parent Context, deadline time.Time) (Context, CancelFunc) {
ctx, f := context.WithDeadline(parent, deadline)
return ctx, CancelFunc(f)
}
// WithTimeout returns WithDeadline(parent, time.Now().Add(timeout)).
//
// Canceling this context releases resources associated with it, so code should
// call cancel as soon as the operations running in this Context complete:
//
// func slowOperationWithTimeout(ctx context.Context) (Result, error) {
// ctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond)
// defer cancel() // releases resources if slowOperation completes before timeout elapses
// return slowOperation(ctx)
// }
func WithTimeout(parent Context, timeout time.Duration) (Context, CancelFunc) {
return WithDeadline(parent, time.Now().Add(timeout))
}
// WithValue returns a copy of parent in which the value associated with key is
// val.
//
// Use context Values only for request-scoped data that transits processes and
// APIs, not for passing optional parameters to functions.
func WithValue(parent Context, key interface{}, val interface{}) Context {
return context.WithValue(parent, key, val)
}

View File

@ -1,300 +0,0 @@
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build !go1.7
package context
import (
"errors"
"fmt"
"sync"
"time"
)
// An emptyCtx is never canceled, has no values, and has no deadline. It is not
// struct{}, since vars of this type must have distinct addresses.
type emptyCtx int
func (*emptyCtx) Deadline() (deadline time.Time, ok bool) {
return
}
func (*emptyCtx) Done() <-chan struct{} {
return nil
}
func (*emptyCtx) Err() error {
return nil
}
func (*emptyCtx) Value(key interface{}) interface{} {
return nil
}
func (e *emptyCtx) String() string {
switch e {
case background:
return "context.Background"
case todo:
return "context.TODO"
}
return "unknown empty Context"
}
var (
background = new(emptyCtx)
todo = new(emptyCtx)
)
// Canceled is the error returned by Context.Err when the context is canceled.
var Canceled = errors.New("context canceled")
// DeadlineExceeded is the error returned by Context.Err when the context's
// deadline passes.
var DeadlineExceeded = errors.New("context deadline exceeded")
// WithCancel returns a copy of parent with a new Done channel. The returned
// context's Done channel is closed when the returned cancel function is called
// or when the parent context's Done channel is closed, whichever happens first.
//
// Canceling this context releases resources associated with it, so code should
// call cancel as soon as the operations running in this Context complete.
func WithCancel(parent Context) (ctx Context, cancel CancelFunc) {
c := newCancelCtx(parent)
propagateCancel(parent, c)
return c, func() { c.cancel(true, Canceled) }
}
// newCancelCtx returns an initialized cancelCtx.
func newCancelCtx(parent Context) *cancelCtx {
return &cancelCtx{
Context: parent,
done: make(chan struct{}),
}
}
// propagateCancel arranges for child to be canceled when parent is.
func propagateCancel(parent Context, child canceler) {
if parent.Done() == nil {
return // parent is never canceled
}
if p, ok := parentCancelCtx(parent); ok {
p.mu.Lock()
if p.err != nil {
// parent has already been canceled
child.cancel(false, p.err)
} else {
if p.children == nil {
p.children = make(map[canceler]bool)
}
p.children[child] = true
}
p.mu.Unlock()
} else {
go func() {
select {
case <-parent.Done():
child.cancel(false, parent.Err())
case <-child.Done():
}
}()
}
}
// parentCancelCtx follows a chain of parent references until it finds a
// *cancelCtx. This function understands how each of the concrete types in this
// package represents its parent.
func parentCancelCtx(parent Context) (*cancelCtx, bool) {
for {
switch c := parent.(type) {
case *cancelCtx:
return c, true
case *timerCtx:
return c.cancelCtx, true
case *valueCtx:
parent = c.Context
default:
return nil, false
}
}
}
// removeChild removes a context from its parent.
func removeChild(parent Context, child canceler) {
p, ok := parentCancelCtx(parent)
if !ok {
return
}
p.mu.Lock()
if p.children != nil {
delete(p.children, child)
}
p.mu.Unlock()
}
// A canceler is a context type that can be canceled directly. The
// implementations are *cancelCtx and *timerCtx.
type canceler interface {
cancel(removeFromParent bool, err error)
Done() <-chan struct{}
}
// A cancelCtx can be canceled. When canceled, it also cancels any children
// that implement canceler.
type cancelCtx struct {
Context
done chan struct{} // closed by the first cancel call.
mu sync.Mutex
children map[canceler]bool // set to nil by the first cancel call
err error // set to non-nil by the first cancel call
}
func (c *cancelCtx) Done() <-chan struct{} {
return c.done
}
func (c *cancelCtx) Err() error {
c.mu.Lock()
defer c.mu.Unlock()
return c.err
}
func (c *cancelCtx) String() string {
return fmt.Sprintf("%v.WithCancel", c.Context)
}
// cancel closes c.done, cancels each of c's children, and, if
// removeFromParent is true, removes c from its parent's children.
func (c *cancelCtx) cancel(removeFromParent bool, err error) {
if err == nil {
panic("context: internal error: missing cancel error")
}
c.mu.Lock()
if c.err != nil {
c.mu.Unlock()
return // already canceled
}
c.err = err
close(c.done)
for child := range c.children {
// NOTE: acquiring the child's lock while holding parent's lock.
child.cancel(false, err)
}
c.children = nil
c.mu.Unlock()
if removeFromParent {
removeChild(c.Context, c)
}
}
// WithDeadline returns a copy of the parent context with the deadline adjusted
// to be no later than d. If the parent's deadline is already earlier than d,
// WithDeadline(parent, d) is semantically equivalent to parent. The returned
// context's Done channel is closed when the deadline expires, when the returned
// cancel function is called, or when the parent context's Done channel is
// closed, whichever happens first.
//
// Canceling this context releases resources associated with it, so code should
// call cancel as soon as the operations running in this Context complete.
func WithDeadline(parent Context, deadline time.Time) (Context, CancelFunc) {
if cur, ok := parent.Deadline(); ok && cur.Before(deadline) {
// The current deadline is already sooner than the new one.
return WithCancel(parent)
}
c := &timerCtx{
cancelCtx: newCancelCtx(parent),
deadline: deadline,
}
propagateCancel(parent, c)
d := deadline.Sub(time.Now())
if d <= 0 {
c.cancel(true, DeadlineExceeded) // deadline has already passed
return c, func() { c.cancel(true, Canceled) }
}
c.mu.Lock()
defer c.mu.Unlock()
if c.err == nil {
c.timer = time.AfterFunc(d, func() {
c.cancel(true, DeadlineExceeded)
})
}
return c, func() { c.cancel(true, Canceled) }
}
// A timerCtx carries a timer and a deadline. It embeds a cancelCtx to
// implement Done and Err. It implements cancel by stopping its timer then
// delegating to cancelCtx.cancel.
type timerCtx struct {
*cancelCtx
timer *time.Timer // Under cancelCtx.mu.
deadline time.Time
}
func (c *timerCtx) Deadline() (deadline time.Time, ok bool) {
return c.deadline, true
}
func (c *timerCtx) String() string {
return fmt.Sprintf("%v.WithDeadline(%s [%s])", c.cancelCtx.Context, c.deadline, c.deadline.Sub(time.Now()))
}
func (c *timerCtx) cancel(removeFromParent bool, err error) {
c.cancelCtx.cancel(false, err)
if removeFromParent {
// Remove this timerCtx from its parent cancelCtx's children.
removeChild(c.cancelCtx.Context, c)
}
c.mu.Lock()
if c.timer != nil {
c.timer.Stop()
c.timer = nil
}
c.mu.Unlock()
}
// WithTimeout returns WithDeadline(parent, time.Now().Add(timeout)).
//
// Canceling this context releases resources associated with it, so code should
// call cancel as soon as the operations running in this Context complete:
//
// func slowOperationWithTimeout(ctx context.Context) (Result, error) {
// ctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond)
// defer cancel() // releases resources if slowOperation completes before timeout elapses
// return slowOperation(ctx)
// }
func WithTimeout(parent Context, timeout time.Duration) (Context, CancelFunc) {
return WithDeadline(parent, time.Now().Add(timeout))
}
// WithValue returns a copy of parent in which the value associated with key is
// val.
//
// Use context Values only for request-scoped data that transits processes and
// APIs, not for passing optional parameters to functions.
func WithValue(parent Context, key interface{}, val interface{}) Context {
return &valueCtx{parent, key, val}
}
// A valueCtx carries a key-value pair. It implements Value for that key and
// delegates all other calls to the embedded Context.
type valueCtx struct {
Context
key, val interface{}
}
func (c *valueCtx) String() string {
return fmt.Sprintf("%v.WithValue(%#v, %#v)", c.Context, c.key, c.val)
}
func (c *valueCtx) Value(key interface{}) interface{} {
if c.key == key {
return c.val
}
return c.Context.Value(key)
}

View File

@ -1,29 +0,0 @@
# Compiled Object files, Static and Dynamic libs (Shared Objects)
*.o
*.a
*.so
# Folders
_obj
_test
# Architecture specific extensions/prefixes
*.[568vq]
[568vq].out
*.cgo1.go
*.cgo2.c
_cgo_defun.c
_cgo_gotypes.go
_cgo_export.*
_testmain.go
*.exe
*.test
*.prof
*.test
*.out
*.txt
cover.html
README.html

View File

@ -1,22 +0,0 @@
The MIT License (MIT)
Copyright (c) 2015 Dean Karn
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

View File

@ -1,368 +0,0 @@
Package validator
================
<img align="right" src="https://raw.githubusercontent.com/go-playground/validator/v8/logo.png">
[![Join the chat at https://gitter.im/bluesuncorp/validator](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/go-playground/validator?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
![Project status](https://img.shields.io/badge/version-8.17.1-green.svg)
[![Build Status](https://semaphoreci.com/api/v1/projects/ec20115f-ef1b-4c7d-9393-cc76aba74eb4/530054/badge.svg)](https://semaphoreci.com/joeybloggs/validator)
[![Coverage Status](https://coveralls.io/repos/go-playground/validator/badge.svg?branch=v8&service=github)](https://coveralls.io/github/go-playground/validator?branch=v8)
[![Go Report Card](http://goreportcard.com/badge/go-playground/validator)](http://goreportcard.com/report/go-playground/validator)
[![GoDoc](https://godoc.org/gopkg.in/go-playground/validator.v8?status.svg)](https://godoc.org/gopkg.in/go-playground/validator.v8)
![License](https://img.shields.io/dub/l/vibe-d.svg)
Package validator implements value validations for structs and individual fields based on tags.
It has the following **unique** features:
- Cross Field and Cross Struct validations by using validation tags or custom validators.
- Slice, Array and Map diving, which allows any or all levels of a multidimensional field to be validated.
- Handles type interface by determining it's underlying type prior to validation.
- Handles custom field types such as sql driver Valuer see [Valuer](https://golang.org/src/database/sql/driver/types.go?s=1210:1293#L29)
- Alias validation tags, which allows for mapping of several validations to a single tag for easier defining of validations on structs
- Extraction of custom defined Field Name e.g. can specify to extract the JSON name while validating and have it available in the resulting FieldError
Installation
------------
Use go get.
go get gopkg.in/go-playground/validator.v8
or to update
go get -u gopkg.in/go-playground/validator.v8
Then import the validator package into your own code.
import "gopkg.in/go-playground/validator.v8"
Error Return Value
-------
Validation functions return type error
They return type error to avoid the issue discussed in the following, where err is always != nil:
* http://stackoverflow.com/a/29138676/3158232
* https://github.com/go-playground/validator/issues/134
validator only returns nil or ValidationErrors as type error; so in you code all you need to do
is check if the error returned is not nil, and if it's not type cast it to type ValidationErrors
like so:
```go
err := validate.Struct(mystruct)
validationErrors := err.(validator.ValidationErrors)
```
Usage and documentation
------
Please see http://godoc.org/gopkg.in/go-playground/validator.v8 for detailed usage docs.
##### Examples:
Struct & Field validation
```go
package main
import (
"fmt"
"gopkg.in/go-playground/validator.v8"
)
// User contains user information
type User struct {
FirstName string `validate:"required"`
LastName string `validate:"required"`
Age uint8 `validate:"gte=0,lte=130"`
Email string `validate:"required,email"`
FavouriteColor string `validate:"hexcolor|rgb|rgba"`
Addresses []*Address `validate:"required,dive,required"` // a person can have a home and cottage...
}
// Address houses a users address information
type Address struct {
Street string `validate:"required"`
City string `validate:"required"`
Planet string `validate:"required"`
Phone string `validate:"required"`
}
var validate *validator.Validate
func main() {
config := &validator.Config{TagName: "validate"}
validate = validator.New(config)
validateStruct()
validateField()
}
func validateStruct() {
address := &Address{
Street: "Eavesdown Docks",
Planet: "Persphone",
Phone: "none",
}
user := &User{
FirstName: "Badger",
LastName: "Smith",
Age: 135,
Email: "Badger.Smith@gmail.com",
FavouriteColor: "#000",
Addresses: []*Address{address},
}
// returns nil or ValidationErrors ( map[string]*FieldError )
errs := validate.Struct(user)
if errs != nil {
fmt.Println(errs) // output: Key: "User.Age" Error:Field validation for "Age" failed on the "lte" tag
// Key: "User.Addresses[0].City" Error:Field validation for "City" failed on the "required" tag
err := errs.(validator.ValidationErrors)["User.Addresses[0].City"]
fmt.Println(err.Field) // output: City
fmt.Println(err.Tag) // output: required
fmt.Println(err.Kind) // output: string
fmt.Println(err.Type) // output: string
fmt.Println(err.Param) // output:
fmt.Println(err.Value) // output:
// from here you can create your own error messages in whatever language you wish
return
}
// save user to database
}
func validateField() {
myEmail := "joeybloggs.gmail.com"
errs := validate.Field(myEmail, "required,email")
if errs != nil {
fmt.Println(errs) // output: Key: "" Error:Field validation for "" failed on the "email" tag
return
}
// email ok, move on
}
```
Custom Field Type
```go
package main
import (
"database/sql"
"database/sql/driver"
"fmt"
"reflect"
"gopkg.in/go-playground/validator.v8"
)
// DbBackedUser User struct
type DbBackedUser struct {
Name sql.NullString `validate:"required"`
Age sql.NullInt64 `validate:"required"`
}
func main() {
config := &validator.Config{TagName: "validate"}
validate := validator.New(config)
// register all sql.Null* types to use the ValidateValuer CustomTypeFunc
validate.RegisterCustomTypeFunc(ValidateValuer, sql.NullString{}, sql.NullInt64{}, sql.NullBool{}, sql.NullFloat64{})
x := DbBackedUser{Name: sql.NullString{String: "", Valid: true}, Age: sql.NullInt64{Int64: 0, Valid: false}}
errs := validate.Struct(x)
if len(errs.(validator.ValidationErrors)) > 0 {
fmt.Printf("Errs:\n%+v\n", errs)
}
}
// ValidateValuer implements validator.CustomTypeFunc
func ValidateValuer(field reflect.Value) interface{} {
if valuer, ok := field.Interface().(driver.Valuer); ok {
val, err := valuer.Value()
if err == nil {
return val
}
// handle the error how you want
}
return nil
}
```
Struct Level Validation
```go
package main
import (
"fmt"
"reflect"
"gopkg.in/go-playground/validator.v8"
)
// User contains user information
type User struct {
FirstName string `json:"fname"`
LastName string `json:"lname"`
Age uint8 `validate:"gte=0,lte=130"`
Email string `validate:"required,email"`
FavouriteColor string `validate:"hexcolor|rgb|rgba"`
Addresses []*Address `validate:"required,dive,required"` // a person can have a home and cottage...
}
// Address houses a users address information
type Address struct {
Street string `validate:"required"`
City string `validate:"required"`
Planet string `validate:"required"`
Phone string `validate:"required"`
}
var validate *validator.Validate
func main() {
config := &validator.Config{TagName: "validate"}
validate = validator.New(config)
validate.RegisterStructValidation(UserStructLevelValidation, User{})
validateStruct()
}
// UserStructLevelValidation contains custom struct level validations that don't always
// make sense at the field validation level. For Example this function validates that either
// FirstName or LastName exist; could have done that with a custom field validation but then
// would have had to add it to both fields duplicating the logic + overhead, this way it's
// only validated once.
//
// NOTE: you may ask why wouldn't I just do this outside of validator, because doing this way
// hooks right into validator and you can combine with validation tags and still have a
// common error output format.
func UserStructLevelValidation(v *validator.Validate, structLevel *validator.StructLevel) {
user := structLevel.CurrentStruct.Interface().(User)
if len(user.FirstName) == 0 && len(user.LastName) == 0 {
structLevel.ReportError(reflect.ValueOf(user.FirstName), "FirstName", "fname", "fnameorlname")
structLevel.ReportError(reflect.ValueOf(user.LastName), "LastName", "lname", "fnameorlname")
}
// plus can to more, even with different tag than "fnameorlname"
}
func validateStruct() {
address := &Address{
Street: "Eavesdown Docks",
Planet: "Persphone",
Phone: "none",
City: "Unknown",
}
user := &User{
FirstName: "",
LastName: "",
Age: 45,
Email: "Badger.Smith@gmail.com",
FavouriteColor: "#000",
Addresses: []*Address{address},
}
// returns nil or ValidationErrors ( map[string]*FieldError )
errs := validate.Struct(user)
if errs != nil {
fmt.Println(errs) // output: Key: 'User.LastName' Error:Field validation for 'LastName' failed on the 'fnameorlname' tag
// Key: 'User.FirstName' Error:Field validation for 'FirstName' failed on the 'fnameorlname' tag
err := errs.(validator.ValidationErrors)["User.FirstName"]
fmt.Println(err.Field) // output: FirstName
fmt.Println(err.Tag) // output: fnameorlname
fmt.Println(err.Kind) // output: string
fmt.Println(err.Type) // output: string
fmt.Println(err.Param) // output:
fmt.Println(err.Value) // output:
// from here you can create your own error messages in whatever language you wish
return
}
// save user to database
}
```
Benchmarks
------
###### Run on MacBook Pro (Retina, 15-inch, Late 2013) 2.6 GHz Intel Core i7 16 GB 1600 MHz DDR3 using Go version go1.5.3 darwin/amd64
```go
go test -cpu=4 -bench=. -benchmem=true
PASS
BenchmarkFieldSuccess-4 10000000 167 ns/op 0 B/op 0 allocs/op
BenchmarkFieldFailure-4 2000000 701 ns/op 432 B/op 4 allocs/op
BenchmarkFieldDiveSuccess-4 500000 2937 ns/op 480 B/op 27 allocs/op
BenchmarkFieldDiveFailure-4 500000 3536 ns/op 912 B/op 31 allocs/op
BenchmarkFieldCustomTypeSuccess-4 5000000 341 ns/op 32 B/op 2 allocs/op
BenchmarkFieldCustomTypeFailure-4 2000000 679 ns/op 432 B/op 4 allocs/op
BenchmarkFieldOrTagSuccess-4 1000000 1157 ns/op 16 B/op 1 allocs/op
BenchmarkFieldOrTagFailure-4 1000000 1109 ns/op 464 B/op 6 allocs/op
BenchmarkStructLevelValidationSuccess-4 2000000 694 ns/op 176 B/op 6 allocs/op
BenchmarkStructLevelValidationFailure-4 1000000 1311 ns/op 640 B/op 11 allocs/op
BenchmarkStructSimpleCustomTypeSuccess-4 2000000 894 ns/op 80 B/op 5 allocs/op
BenchmarkStructSimpleCustomTypeFailure-4 1000000 1496 ns/op 688 B/op 11 allocs/op
BenchmarkStructPartialSuccess-4 1000000 1229 ns/op 384 B/op 10 allocs/op
BenchmarkStructPartialFailure-4 1000000 1838 ns/op 832 B/op 15 allocs/op
BenchmarkStructExceptSuccess-4 2000000 961 ns/op 336 B/op 7 allocs/op
BenchmarkStructExceptFailure-4 1000000 1218 ns/op 384 B/op 10 allocs/op
BenchmarkStructSimpleCrossFieldSuccess-4 2000000 954 ns/op 128 B/op 6 allocs/op
BenchmarkStructSimpleCrossFieldFailure-4 1000000 1569 ns/op 592 B/op 11 allocs/op
BenchmarkStructSimpleCrossStructCrossFieldSuccess-4 1000000 1588 ns/op 192 B/op 10 allocs/op
BenchmarkStructSimpleCrossStructCrossFieldFailure-4 1000000 2217 ns/op 656 B/op 15 allocs/op
BenchmarkStructSimpleSuccess-4 2000000 925 ns/op 48 B/op 3 allocs/op
BenchmarkStructSimpleFailure-4 1000000 1650 ns/op 688 B/op 11 allocs/op
BenchmarkStructSimpleSuccessParallel-4 5000000 261 ns/op 48 B/op 3 allocs/op
BenchmarkStructSimpleFailureParallel-4 2000000 758 ns/op 688 B/op 11 allocs/op
BenchmarkStructComplexSuccess-4 300000 5868 ns/op 544 B/op 32 allocs/op
BenchmarkStructComplexFailure-4 200000 10767 ns/op 3912 B/op 77 allocs/op
BenchmarkStructComplexSuccessParallel-4 1000000 1559 ns/op 544 B/op 32 allocs/op
BenchmarkStructComplexFailureParallel-4 500000 3747 ns/op 3912 B/op 77 allocs
```
Complimentary Software
----------------------
Here is a list of software that compliments using this library either pre or post validation.
* [Gorilla Schema](https://github.com/gorilla/schema) - Package gorilla/schema fills a struct with form values.
* [Conform](https://github.com/leebenson/conform) - Trims, sanitizes & scrubs data based on struct tags.
How to Contribute
------
There will always be a development branch for each version i.e. `v1-development`. In order to contribute,
please make your pull requests against those branches.
If the changes being proposed or requested are breaking changes, please create an issue, for discussion
or create a pull request against the highest development branch for example this package has a
v1 and v1-development branch however, there will also be a v2-development branch even though v2 doesn't exist yet.
I strongly encourage everyone whom creates a custom validation function to contribute them and
help make this package even better.
License
------
Distributed under MIT License, please see license file in code for more details.

File diff suppressed because it is too large Load Diff

View File

@ -1,71 +0,0 @@
package validator
import (
"reflect"
"sync"
)
type cachedField struct {
Idx int
Name string
AltName string
CachedTag *cachedTag
}
type cachedStruct struct {
Name string
fields map[int]cachedField
}
type structCacheMap struct {
lock sync.RWMutex
m map[reflect.Type]*cachedStruct
}
func (s *structCacheMap) Get(key reflect.Type) (*cachedStruct, bool) {
s.lock.RLock()
value, ok := s.m[key]
s.lock.RUnlock()
return value, ok
}
func (s *structCacheMap) Set(key reflect.Type, value *cachedStruct) {
s.lock.Lock()
s.m[key] = value
s.lock.Unlock()
}
type cachedTag struct {
tag string
isOmitEmpty bool
isNoStructLevel bool
isStructOnly bool
diveTag string
tags []*tagVals
}
type tagVals struct {
tagVals [][]string
isOrVal bool
isAlias bool
tag string
}
type tagCacheMap struct {
lock sync.RWMutex
m map[string]*cachedTag
}
func (s *tagCacheMap) Get(key string) (*cachedTag, bool) {
s.lock.RLock()
value, ok := s.m[key]
s.lock.RUnlock()
return value, ok
}
func (s *tagCacheMap) Set(key string, value *cachedTag) {
s.lock.Lock()
s.m[key] = value
s.lock.Unlock()
}

View File

@ -1,852 +0,0 @@
/*
Package validator implements value validations for structs and individual fields
based on tags.
It can also handle Cross-Field and Cross-Struct validation for nested structs
and has the ability to dive into arrays and maps of any type.
Why not a better error message?
Because this library intends for you to handle your own error messages.
Why should I handle my own errors?
Many reasons. We built an internationalized application and needed to know the
field, and what validation failed so we could provide a localized error.
if fieldErr.Field == "Name" {
switch fieldErr.ErrorTag
case "required":
return "Translated string based on field + error"
default:
return "Translated string based on field"
}
Validation Functions Return Type error
Doing things this way is actually the way the standard library does, see the
file.Open method here:
https://golang.org/pkg/os/#Open.
The authors return type "error" to avoid the issue discussed in the following,
where err is always != nil:
http://stackoverflow.com/a/29138676/3158232
https://github.com/go-playground/validator/issues/134
Validator only returns nil or ValidationErrors as type error; so, in your code
all you need to do is check if the error returned is not nil, and if it's not
type cast it to type ValidationErrors like so err.(validator.ValidationErrors).
Custom Functions
Custom functions can be added. Example:
// Structure
func customFunc(v *Validate, topStruct reflect.Value, currentStructOrField reflect.Value, field reflect.Value, fieldType reflect.Type, fieldKind reflect.Kind, param string) bool {
if whatever {
return false
}
return true
}
validate.RegisterValidation("custom tag name", customFunc)
// NOTES: using the same tag name as an existing function
// will overwrite the existing one
Cross-Field Validation
Cross-Field Validation can be done via the following tags:
- eqfield
- nefield
- gtfield
- gtefield
- ltfield
- ltefield
- eqcsfield
- necsfield
- gtcsfield
- ftecsfield
- ltcsfield
- ltecsfield
If, however, some custom cross-field validation is required, it can be done
using a custom validation.
Why not just have cross-fields validation tags (i.e. only eqcsfield and not
eqfield)?
The reason is efficiency. If you want to check a field within the same struct
"eqfield" only has to find the field on the same struct (1 level). But, if we
used "eqcsfield" it could be multiple levels down. Example:
type Inner struct {
StartDate time.Time
}
type Outer struct {
InnerStructField *Inner
CreatedAt time.Time `validate:"ltecsfield=InnerStructField.StartDate"`
}
now := time.Now()
inner := &Inner{
StartDate: now,
}
outer := &Outer{
InnerStructField: inner,
CreatedAt: now,
}
errs := validate.Struct(outer)
// NOTE: when calling validate.Struct(val) topStruct will be the top level struct passed
// into the function
// when calling validate.FieldWithValue(val, field, tag) val will be
// whatever you pass, struct, field...
// when calling validate.Field(field, tag) val will be nil
Multiple Validators
Multiple validators on a field will process in the order defined. Example:
type Test struct {
Field `validate:"max=10,min=1"`
}
// max will be checked then min
Bad Validator definitions are not handled by the library. Example:
type Test struct {
Field `validate:"min=10,max=0"`
}
// this definition of min max will never succeed
Using Validator Tags
Baked In Cross-Field validation only compares fields on the same struct.
If Cross-Field + Cross-Struct validation is needed you should implement your
own custom validator.
Comma (",") is the default separator of validation tags. If you wish to
have a comma included within the parameter (i.e. excludesall=,) you will need to
use the UTF-8 hex representation 0x2C, which is replaced in the code as a comma,
so the above will become excludesall=0x2C.
type Test struct {
Field `validate:"excludesall=,"` // BAD! Do not include a comma.
Field `validate:"excludesall=0x2C"` // GOOD! Use the UTF-8 hex representation.
}
Pipe ("|") is the default separator of validation tags. If you wish to
have a pipe included within the parameter i.e. excludesall=| you will need to
use the UTF-8 hex representation 0x7C, which is replaced in the code as a pipe,
so the above will become excludesall=0x7C
type Test struct {
Field `validate:"excludesall=|"` // BAD! Do not include a a pipe!
Field `validate:"excludesall=0x7C"` // GOOD! Use the UTF-8 hex representation.
}
Baked In Validators and Tags
Here is a list of the current built in validators:
Skip Field
Tells the validation to skip this struct field; this is particularily
handy in ignoring embedded structs from being validated. (Usage: -)
Usage: -
Or Operator
This is the 'or' operator allowing multiple validators to be used and
accepted. (Usage: rbg|rgba) <-- this would allow either rgb or rgba
colors to be accepted. This can also be combined with 'and' for example
( Usage: omitempty,rgb|rgba)
Usage: |
StructOnly
When a field that is a nested struct is encountered, and contains this flag
any validation on the nested struct will be run, but none of the nested
struct fields will be validated. This is usefull if inside of you program
you know the struct will be valid, but need to verify it has been assigned.
NOTE: only "required" and "omitempty" can be used on a struct itself.
Usage: structonly
NoStructLevel
Same as structonly tag except that any struct level validations will not run.
Usage: nostructlevel
Exists
Is a special tag without a validation function attached. It is used when a field
is a Pointer, Interface or Invalid and you wish to validate that it exists.
Example: want to ensure a bool exists if you define the bool as a pointer and
use exists it will ensure there is a value; couldn't use required as it would
fail when the bool was false. exists will fail is the value is a Pointer, Interface
or Invalid and is nil.
Usage: exists
Omit Empty
Allows conditional validation, for example if a field is not set with
a value (Determined by the "required" validator) then other validation
such as min or max won't run, but if a value is set validation will run.
Usage: omitempty
Dive
This tells the validator to dive into a slice, array or map and validate that
level of the slice, array or map with the validation tags that follow.
Multidimensional nesting is also supported, each level you wish to dive will
require another dive tag.
Usage: dive
Example #1
[][]string with validation tag "gt=0,dive,len=1,dive,required"
// gt=0 will be applied to []
// len=1 will be applied to []string
// required will be applied to string
Example #2
[][]string with validation tag "gt=0,dive,dive,required"
// gt=0 will be applied to []
// []string will be spared validation
// required will be applied to string
Required
This validates that the value is not the data types default zero value.
For numbers ensures value is not zero. For strings ensures value is
not "". For slices, maps, pointers, interfaces, channels and functions
ensures the value is not nil.
Usage: required
Length
For numbers, max will ensure that the value is
equal to the parameter given. For strings, it checks that
the string length is exactly that number of characters. For slices,
arrays, and maps, validates the number of items.
Usage: len=10
Maximum
For numbers, max will ensure that the value is
less than or equal to the parameter given. For strings, it checks
that the string length is at most that number of characters. For
slices, arrays, and maps, validates the number of items.
Usage: max=10
Mininum
For numbers, min will ensure that the value is
greater or equal to the parameter given. For strings, it checks that
the string length is at least that number of characters. For slices,
arrays, and maps, validates the number of items.
Usage: min=10
Equals
For strings & numbers, eq will ensure that the value is
equal to the parameter given. For slices, arrays, and maps,
validates the number of items.
Usage: eq=10
Not Equal
For strings & numbers, eq will ensure that the value is not
equal to the parameter given. For slices, arrays, and maps,
validates the number of items.
Usage: eq=10
Greater Than
For numbers, this will ensure that the value is greater than the
parameter given. For strings, it checks that the string length
is greater than that number of characters. For slices, arrays
and maps it validates the number of items.
Example #1
Usage: gt=10
Example #2 (time.Time)
For time.Time ensures the time value is greater than time.Now.UTC().
Usage: gt
Greater Than or Equal
Same as 'min' above. Kept both to make terminology with 'len' easier.
Example #1
Usage: gte=10
Example #2 (time.Time)
For time.Time ensures the time value is greater than or equal to time.Now.UTC().
Usage: gte
Less Than
For numbers, this will ensure that the value is less than the parameter given.
For strings, it checks that the string length is less than that number of
characters. For slices, arrays, and maps it validates the number of items.
Example #1
Usage: lt=10
Example #2 (time.Time)
For time.Time ensures the time value is less than time.Now.UTC().
Usage: lt
Less Than or Equal
Same as 'max' above. Kept both to make terminology with 'len' easier.
Example #1
Usage: lte=10
Example #2 (time.Time)
For time.Time ensures the time value is less than or equal to time.Now.UTC().
Usage: lte
Field Equals Another Field
This will validate the field value against another fields value either within
a struct or passed in field.
Example #1:
// Validation on Password field using:
Usage: eqfield=ConfirmPassword
Example #2:
// Validating by field:
validate.FieldWithValue(password, confirmpassword, "eqfield")
Field Equals Another Field (relative)
This does the same as eqfield except that it validates the field provided relative
to the top level struct.
Usage: eqcsfield=InnerStructField.Field)
Field Does Not Equal Another Field
This will validate the field value against another fields value either within
a struct or passed in field.
Examples:
// Confirm two colors are not the same:
//
// Validation on Color field:
Usage: nefield=Color2
// Validating by field:
validate.FieldWithValue(color1, color2, "nefield")
Field Does Not Equal Another Field (relative)
This does the same as nefield except that it validates the field provided
relative to the top level struct.
Usage: necsfield=InnerStructField.Field
Field Greater Than Another Field
Only valid for Numbers and time.Time types, this will validate the field value
against another fields value either within a struct or passed in field.
usage examples are for validation of a Start and End date:
Example #1:
// Validation on End field using:
validate.Struct Usage(gtfield=Start)
Example #2:
// Validating by field:
validate.FieldWithValue(start, end, "gtfield")
Field Greater Than Another Relative Field
This does the same as gtfield except that it validates the field provided
relative to the top level struct.
Usage: gtcsfield=InnerStructField.Field
Field Greater Than or Equal To Another Field
Only valid for Numbers and time.Time types, this will validate the field value
against another fields value either within a struct or passed in field.
usage examples are for validation of a Start and End date:
Example #1:
// Validation on End field using:
validate.Struct Usage(gtefield=Start)
Example #2:
// Validating by field:
validate.FieldWithValue(start, end, "gtefield")
Field Greater Than or Equal To Another Relative Field
This does the same as gtefield except that it validates the field provided relative
to the top level struct.
Usage: gtecsfield=InnerStructField.Field
Less Than Another Field
Only valid for Numbers and time.Time types, this will validate the field value
against another fields value either within a struct or passed in field.
usage examples are for validation of a Start and End date:
Example #1:
// Validation on End field using:
validate.Struct Usage(ltfield=Start)
Example #2:
// Validating by field:
validate.FieldWithValue(start, end, "ltfield")
Less Than Another Relative Field
This does the same as ltfield except that it validates the field provided relative
to the top level struct.
Usage: ltcsfield=InnerStructField.Field
Less Than or Equal To Another Field
Only valid for Numbers and time.Time types, this will validate the field value
against another fields value either within a struct or passed in field.
usage examples are for validation of a Start and End date:
Example #1:
// Validation on End field using:
validate.Struct Usage(ltefield=Start)
Example #2:
// Validating by field:
validate.FieldWithValue(start, end, "ltefield")
Less Than or Equal To Another Relative Field
This does the same as ltefield except that it validates the field provided relative
to the top level struct.
Usage: ltecsfield=InnerStructField.Field
Alpha Only
This validates that a string value contains alpha characters only
Usage: alpha
Alphanumeric
This validates that a string value contains alphanumeric characters only
Usage: alphanum
Numeric
This validates that a string value contains a basic numeric value.
basic excludes exponents etc...
Usage: numeric
Hexadecimal String
This validates that a string value contains a valid hexadecimal.
Usage: hexadecimal
Hexcolor String
This validates that a string value contains a valid hex color including
hashtag (#)
Usage: hexcolor
RGB String
This validates that a string value contains a valid rgb color
Usage: rgb
RGBA String
This validates that a string value contains a valid rgba color
Usage: rgba
HSL String
This validates that a string value contains a valid hsl color
Usage: hsl
HSLA String
This validates that a string value contains a valid hsla color
Usage: hsla
E-mail String
This validates that a string value contains a valid email
This may not conform to all possibilities of any rfc standard, but neither
does any email provider accept all posibilities.
Usage: email
URL String
This validates that a string value contains a valid url
This will accept any url the golang request uri accepts but must contain
a schema for example http:// or rtmp://
Usage: url
URI String
This validates that a string value contains a valid uri
This will accept any uri the golang request uri accepts
Usage: uri
Base64 String
This validates that a string value contains a valid base64 value.
Although an empty string is valid base64 this will report an empty string
as an error, if you wish to accept an empty string as valid you can use
this with the omitempty tag.
Usage: base64
Contains
This validates that a string value contains the substring value.
Usage: contains=@
Contains Any
This validates that a string value contains any Unicode code points
in the substring value.
Usage: containsany=!@#?
Contains Rune
This validates that a string value contains the supplied rune value.
Usage: containsrune=@
Excludes
This validates that a string value does not contain the substring value.
Usage: excludes=@
Excludes All
This validates that a string value does not contain any Unicode code
points in the substring value.
Usage: excludesall=!@#?
Excludes Rune
This validates that a string value does not contain the supplied rune value.
Usage: excludesrune=@
International Standard Book Number
This validates that a string value contains a valid isbn10 or isbn13 value.
Usage: isbn
International Standard Book Number 10
This validates that a string value contains a valid isbn10 value.
Usage: isbn10
International Standard Book Number 13
This validates that a string value contains a valid isbn13 value.
Usage: isbn13
Universally Unique Identifier UUID
This validates that a string value contains a valid UUID.
Usage: uuid
Universally Unique Identifier UUID v3
This validates that a string value contains a valid version 3 UUID.
Usage: uuid3
Universally Unique Identifier UUID v4
This validates that a string value contains a valid version 4 UUID.
Usage: uuid4
Universally Unique Identifier UUID v5
This validates that a string value contains a valid version 5 UUID.
Usage: uuid5
ASCII
This validates that a string value contains only ASCII characters.
NOTE: if the string is blank, this validates as true.
Usage: ascii
Printable ASCII
This validates that a string value contains only printable ASCII characters.
NOTE: if the string is blank, this validates as true.
Usage: asciiprint
Multi-Byte Characters
This validates that a string value contains one or more multibyte characters.
NOTE: if the string is blank, this validates as true.
Usage: multibyte
Data URL
This validates that a string value contains a valid DataURI.
NOTE: this will also validate that the data portion is valid base64
Usage: datauri
Latitude
This validates that a string value contains a valid latitude.
Usage: latitude
Longitude
This validates that a string value contains a valid longitude.
Usage: longitude
Social Security Number SSN
This validates that a string value contains a valid U.S. Social Security Number.
Usage: ssn
Internet Protocol Address IP
This validates that a string value contains a valid IP Adress.
Usage: ip
Internet Protocol Address IPv4
This validates that a string value contains a valid v4 IP Adress.
Usage: ipv4
Internet Protocol Address IPv6
This validates that a string value contains a valid v6 IP Adress.
Usage: ipv6
Classless Inter-Domain Routing CIDR
This validates that a string value contains a valid CIDR Adress.
Usage: cidr
Classless Inter-Domain Routing CIDRv4
This validates that a string value contains a valid v4 CIDR Adress.
Usage: cidrv4
Classless Inter-Domain Routing CIDRv6
This validates that a string value contains a valid v6 CIDR Adress.
Usage: cidrv6
Transmission Control Protocol Address TCP
This validates that a string value contains a valid resolvable TCP Adress.
Usage: tcp_addr
Transmission Control Protocol Address TCPv4
This validates that a string value contains a valid resolvable v4 TCP Adress.
Usage: tcp4_addr
Transmission Control Protocol Address TCPv6
This validates that a string value contains a valid resolvable v6 TCP Adress.
Usage: tcp6_addr
User Datagram Protocol Address UDP
This validates that a string value contains a valid resolvable UDP Adress.
Usage: udp_addr
User Datagram Protocol Address UDPv4
This validates that a string value contains a valid resolvable v4 UDP Adress.
Usage: udp4_addr
User Datagram Protocol Address UDPv6
This validates that a string value contains a valid resolvable v6 UDP Adress.
Usage: udp6_addr
Internet Protocol Address IP
This validates that a string value contains a valid resolvable IP Adress.
Usage: ip_addr
Internet Protocol Address IPv4
This validates that a string value contains a valid resolvable v4 IP Adress.
Usage: ip4_addr
Internet Protocol Address IPv6
This validates that a string value contains a valid resolvable v6 IP Adress.
Usage: ip6_addr
Unix domain socket end point Address
This validates that a string value contains a valid Unix Adress.
Usage: unix_addr
Media Access Control Address MAC
This validates that a string value contains a valid MAC Adress.
Usage: mac
Note: See Go's ParseMAC for accepted formats and types:
http://golang.org/src/net/mac.go?s=866:918#L29
Alias Validators and Tags
NOTE: When returning an error, the tag returned in "FieldError" will be
the alias tag unless the dive tag is part of the alias. Everything after the
dive tag is not reported as the alias tag. Also, the "ActualTag" in the before
case will be the actual tag within the alias that failed.
Here is a list of the current built in alias tags:
"iscolor"
alias is "hexcolor|rgb|rgba|hsl|hsla" (Usage: iscolor)
Validator notes:
regex
a regex validator won't be added because commas and = signs can be part
of a regex which conflict with the validation definitions. Although
workarounds can be made, they take away from using pure regex's.
Furthermore it's quick and dirty but the regex's become harder to
maintain and are not reusable, so it's as much a programming philosiphy
as anything.
In place of this new validator functions should be created; a regex can
be used within the validator function and even be precompiled for better
efficiency within regexes.go.
And the best reason, you can submit a pull request and we can keep on
adding to the validation library of this package!
Panics
This package panics when bad input is provided, this is by design, bad code like
that should not make it to production.
type Test struct {
TestField string `validate:"nonexistantfunction=1"`
}
t := &Test{
TestField: "Test"
}
validate.Struct(t) // this will panic
*/
package validator

Binary file not shown.

Before

Width:  |  Height:  |  Size: 13 KiB

View File

@ -1,59 +0,0 @@
package validator
import "regexp"
const (
alphaRegexString = "^[a-zA-Z]+$"
alphaNumericRegexString = "^[a-zA-Z0-9]+$"
numericRegexString = "^[-+]?[0-9]+(?:\\.[0-9]+)?$"
numberRegexString = "^[0-9]+$"
hexadecimalRegexString = "^[0-9a-fA-F]+$"
hexcolorRegexString = "^#(?:[0-9a-fA-F]{3}|[0-9a-fA-F]{6})$"
rgbRegexString = "^rgb\\(\\s*(?:(?:0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])\\s*,\\s*(?:0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])\\s*,\\s*(?:0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])|(?:0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])%\\s*,\\s*(?:0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])%\\s*,\\s*(?:0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])%)\\s*\\)$"
rgbaRegexString = "^rgba\\(\\s*(?:(?:0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])\\s*,\\s*(?:0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])\\s*,\\s*(?:0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])|(?:0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])%\\s*,\\s*(?:0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])%\\s*,\\s*(?:0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])%)\\s*,\\s*(?:(?:0.[1-9]*)|[01])\\s*\\)$"
hslRegexString = "^hsl\\(\\s*(?:0|[1-9]\\d?|[12]\\d\\d|3[0-5]\\d|360)\\s*,\\s*(?:(?:0|[1-9]\\d?|100)%)\\s*,\\s*(?:(?:0|[1-9]\\d?|100)%)\\s*\\)$"
hslaRegexString = "^hsla\\(\\s*(?:0|[1-9]\\d?|[12]\\d\\d|3[0-5]\\d|360)\\s*,\\s*(?:(?:0|[1-9]\\d?|100)%)\\s*,\\s*(?:(?:0|[1-9]\\d?|100)%)\\s*,\\s*(?:(?:0.[1-9]*)|[01])\\s*\\)$"
emailRegexString = "^(?:(?:(?:(?:[a-zA-Z]|\\d|[!#\\$%&'\\*\\+\\-\\/=\\?\\^_`{\\|}~]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])+(?:\\.([a-zA-Z]|\\d|[!#\\$%&'\\*\\+\\-\\/=\\?\\^_`{\\|}~]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])+)*)|(?:(?:\\x22)(?:(?:(?:(?:\\x20|\\x09)*(?:\\x0d\\x0a))?(?:\\x20|\\x09)+)?(?:(?:[\\x01-\\x08\\x0b\\x0c\\x0e-\\x1f\\x7f]|\\x21|[\\x23-\\x5b]|[\\x5d-\\x7e]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])|(?:\\(?:[\\x01-\\x09\\x0b\\x0c\\x0d-\\x7f]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}]))))*(?:(?:(?:\\x20|\\x09)*(?:\\x0d\\x0a))?(\\x20|\\x09)+)?(?:\\x22)))@(?:(?:(?:[a-zA-Z]|\\d|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])|(?:(?:[a-zA-Z]|\\d|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])(?:[a-zA-Z]|\\d|-|\\.|_|~|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])*(?:[a-zA-Z]|\\d|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])))\\.)+(?:(?:[a-zA-Z]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])|(?:(?:[a-zA-Z]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])(?:[a-zA-Z]|\\d|-|\\.|_|~|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])*(?:[a-zA-Z]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])))\\.?$"
base64RegexString = "^(?:[A-Za-z0-9+\\/]{4})*(?:[A-Za-z0-9+\\/]{2}==|[A-Za-z0-9+\\/]{3}=|[A-Za-z0-9+\\/]{4})$"
iSBN10RegexString = "^(?:[0-9]{9}X|[0-9]{10})$"
iSBN13RegexString = "^(?:(?:97(?:8|9))[0-9]{10})$"
uUID3RegexString = "^[0-9a-f]{8}-[0-9a-f]{4}-3[0-9a-f]{3}-[0-9a-f]{4}-[0-9a-f]{12}$"
uUID4RegexString = "^[0-9a-f]{8}-[0-9a-f]{4}-4[0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}$"
uUID5RegexString = "^[0-9a-f]{8}-[0-9a-f]{4}-5[0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}$"
uUIDRegexString = "^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$"
aSCIIRegexString = "^[\x00-\x7F]*$"
printableASCIIRegexString = "^[\x20-\x7E]*$"
multibyteRegexString = "[^\x00-\x7F]"
dataURIRegexString = "^data:.+\\/(.+);base64$"
latitudeRegexString = "^[-+]?([1-8]?\\d(\\.\\d+)?|90(\\.0+)?)$"
longitudeRegexString = "^[-+]?(180(\\.0+)?|((1[0-7]\\d)|([1-9]?\\d))(\\.\\d+)?)$"
sSNRegexString = `^\d{3}[- ]?\d{2}[- ]?\d{4}$`
)
var (
alphaRegex = regexp.MustCompile(alphaRegexString)
alphaNumericRegex = regexp.MustCompile(alphaNumericRegexString)
numericRegex = regexp.MustCompile(numericRegexString)
numberRegex = regexp.MustCompile(numberRegexString)
hexadecimalRegex = regexp.MustCompile(hexadecimalRegexString)
hexcolorRegex = regexp.MustCompile(hexcolorRegexString)
rgbRegex = regexp.MustCompile(rgbRegexString)
rgbaRegex = regexp.MustCompile(rgbaRegexString)
hslRegex = regexp.MustCompile(hslRegexString)
hslaRegex = regexp.MustCompile(hslaRegexString)
emailRegex = regexp.MustCompile(emailRegexString)
base64Regex = regexp.MustCompile(base64RegexString)
iSBN10Regex = regexp.MustCompile(iSBN10RegexString)
iSBN13Regex = regexp.MustCompile(iSBN13RegexString)
uUID3Regex = regexp.MustCompile(uUID3RegexString)
uUID4Regex = regexp.MustCompile(uUID4RegexString)
uUID5Regex = regexp.MustCompile(uUID5RegexString)
uUIDRegex = regexp.MustCompile(uUIDRegexString)
aSCIIRegex = regexp.MustCompile(aSCIIRegexString)
printableASCIIRegex = regexp.MustCompile(printableASCIIRegexString)
multibyteRegex = regexp.MustCompile(multibyteRegexString)
dataURIRegex = regexp.MustCompile(dataURIRegexString)
latitudeRegex = regexp.MustCompile(latitudeRegexString)
longitudeRegex = regexp.MustCompile(longitudeRegexString)
sSNRegex = regexp.MustCompile(sSNRegexString)
)

View File

@ -1,382 +0,0 @@
package validator
import (
"fmt"
"reflect"
"strconv"
"strings"
)
const (
dash = "-"
blank = ""
namespaceSeparator = "."
leftBracket = "["
rightBracket = "]"
restrictedTagChars = ".[],|=+()`~!@#$%^&*\\\"/?<>{}"
restrictedAliasErr = "Alias '%s' either contains restricted characters or is the same as a restricted tag needed for normal operation"
restrictedTagErr = "Tag '%s' either contains restricted characters or is the same as a restricted tag needed for normal operation"
)
var (
restrictedTags = map[string]*struct{}{
diveTag: emptyStructPtr,
existsTag: emptyStructPtr,
structOnlyTag: emptyStructPtr,
omitempty: emptyStructPtr,
skipValidationTag: emptyStructPtr,
utf8HexComma: emptyStructPtr,
utf8Pipe: emptyStructPtr,
noStructLevelTag: emptyStructPtr,
}
)
// ExtractType gets the actual underlying type of field value.
// It will dive into pointers, customTypes and return you the
// underlying value and it's kind.
// it is exposed for use within you Custom Functions
func (v *Validate) ExtractType(current reflect.Value) (reflect.Value, reflect.Kind) {
switch current.Kind() {
case reflect.Ptr:
if current.IsNil() {
return current, reflect.Ptr
}
return v.ExtractType(current.Elem())
case reflect.Interface:
if current.IsNil() {
return current, reflect.Interface
}
return v.ExtractType(current.Elem())
case reflect.Invalid:
return current, reflect.Invalid
default:
if v.hasCustomFuncs {
// fmt.Println("Type", current.Type())
if fn, ok := v.customTypeFuncs[current.Type()]; ok {
// fmt.Println("OK")
return v.ExtractType(reflect.ValueOf(fn(current)))
}
// fmt.Println("NOT OK")
}
return current, current.Kind()
}
}
// GetStructFieldOK traverses a struct to retrieve a specific field denoted by the provided namespace and
// returns the field, field kind and whether is was successful in retrieving the field at all.
// NOTE: when not successful ok will be false, this can happen when a nested struct is nil and so the field
// could not be retrived because it didnt exist.
func (v *Validate) GetStructFieldOK(current reflect.Value, namespace string) (reflect.Value, reflect.Kind, bool) {
current, kind := v.ExtractType(current)
if kind == reflect.Invalid {
return current, kind, false
}
if namespace == blank {
return current, kind, true
}
switch kind {
case reflect.Ptr, reflect.Interface:
return current, kind, false
case reflect.Struct:
typ := current.Type()
fld := namespace
ns := namespace
if typ != timeType && typ != timePtrType {
idx := strings.Index(namespace, namespaceSeparator)
if idx != -1 {
fld = namespace[:idx]
ns = namespace[idx+1:]
} else {
ns = blank
idx = len(namespace)
}
bracketIdx := strings.Index(fld, leftBracket)
if bracketIdx != -1 {
fld = fld[:bracketIdx]
ns = namespace[bracketIdx:]
}
current = current.FieldByName(fld)
return v.GetStructFieldOK(current, ns)
}
case reflect.Array, reflect.Slice:
idx := strings.Index(namespace, leftBracket)
idx2 := strings.Index(namespace, rightBracket)
arrIdx, _ := strconv.Atoi(namespace[idx+1 : idx2])
if arrIdx >= current.Len() {
return current, kind, false
}
startIdx := idx2 + 1
if startIdx < len(namespace) {
if namespace[startIdx:startIdx+1] == namespaceSeparator {
startIdx++
}
}
return v.GetStructFieldOK(current.Index(arrIdx), namespace[startIdx:])
case reflect.Map:
idx := strings.Index(namespace, leftBracket) + 1
idx2 := strings.Index(namespace, rightBracket)
endIdx := idx2
if endIdx+1 < len(namespace) {
if namespace[endIdx+1:endIdx+2] == namespaceSeparator {
endIdx++
}
}
key := namespace[idx:idx2]
switch current.Type().Key().Kind() {
case reflect.Int:
i, _ := strconv.Atoi(key)
return v.GetStructFieldOK(current.MapIndex(reflect.ValueOf(i)), namespace[endIdx+1:])
case reflect.Int8:
i, _ := strconv.ParseInt(key, 10, 8)
return v.GetStructFieldOK(current.MapIndex(reflect.ValueOf(int8(i))), namespace[endIdx+1:])
case reflect.Int16:
i, _ := strconv.ParseInt(key, 10, 16)
return v.GetStructFieldOK(current.MapIndex(reflect.ValueOf(int16(i))), namespace[endIdx+1:])
case reflect.Int32:
i, _ := strconv.ParseInt(key, 10, 32)
return v.GetStructFieldOK(current.MapIndex(reflect.ValueOf(int32(i))), namespace[endIdx+1:])
case reflect.Int64:
i, _ := strconv.ParseInt(key, 10, 64)
return v.GetStructFieldOK(current.MapIndex(reflect.ValueOf(i)), namespace[endIdx+1:])
case reflect.Uint:
i, _ := strconv.ParseUint(key, 10, 0)
return v.GetStructFieldOK(current.MapIndex(reflect.ValueOf(uint(i))), namespace[endIdx+1:])
case reflect.Uint8:
i, _ := strconv.ParseUint(key, 10, 8)
return v.GetStructFieldOK(current.MapIndex(reflect.ValueOf(uint8(i))), namespace[endIdx+1:])
case reflect.Uint16:
i, _ := strconv.ParseUint(key, 10, 16)
return v.GetStructFieldOK(current.MapIndex(reflect.ValueOf(uint16(i))), namespace[endIdx+1:])
case reflect.Uint32:
i, _ := strconv.ParseUint(key, 10, 32)
return v.GetStructFieldOK(current.MapIndex(reflect.ValueOf(uint32(i))), namespace[endIdx+1:])
case reflect.Uint64:
i, _ := strconv.ParseUint(key, 10, 64)
return v.GetStructFieldOK(current.MapIndex(reflect.ValueOf(i)), namespace[endIdx+1:])
case reflect.Float32:
f, _ := strconv.ParseFloat(key, 32)
return v.GetStructFieldOK(current.MapIndex(reflect.ValueOf(float32(f))), namespace[endIdx+1:])
case reflect.Float64:
f, _ := strconv.ParseFloat(key, 64)
return v.GetStructFieldOK(current.MapIndex(reflect.ValueOf(f)), namespace[endIdx+1:])
case reflect.Bool:
b, _ := strconv.ParseBool(key)
return v.GetStructFieldOK(current.MapIndex(reflect.ValueOf(b)), namespace[endIdx+1:])
// reflect.Type = string
default:
return v.GetStructFieldOK(current.MapIndex(reflect.ValueOf(key)), namespace[endIdx+1:])
}
}
// if got here there was more namespace, cannot go any deeper
panic("Invalid field namespace")
}
// asInt retuns the parameter as a int64
// or panics if it can't convert
func asInt(param string) int64 {
i, err := strconv.ParseInt(param, 0, 64)
panicIf(err)
return i
}
// asUint returns the parameter as a uint64
// or panics if it can't convert
func asUint(param string) uint64 {
i, err := strconv.ParseUint(param, 0, 64)
panicIf(err)
return i
}
// asFloat returns the parameter as a float64
// or panics if it can't convert
func asFloat(param string) float64 {
i, err := strconv.ParseFloat(param, 64)
panicIf(err)
return i
}
func panicIf(err error) {
if err != nil {
panic(err.Error())
}
}
func (v *Validate) parseStruct(current reflect.Value, sName string) *cachedStruct {
typ := current.Type()
s := &cachedStruct{Name: sName, fields: map[int]cachedField{}}
numFields := current.NumField()
var fld reflect.StructField
var tag string
var customName string
for i := 0; i < numFields; i++ {
fld = typ.Field(i)
if fld.PkgPath != blank {
continue
}
tag = fld.Tag.Get(v.tagName)
if tag == skipValidationTag {
continue
}
customName = fld.Name
if v.fieldNameTag != blank {
name := strings.SplitN(fld.Tag.Get(v.fieldNameTag), ",", 2)[0]
// dash check is for json "-" (aka skipValidationTag) means don't output in json
if name != "" && name != skipValidationTag {
customName = name
}
}
cTag, ok := v.tagCache.Get(tag)
if !ok {
cTag = v.parseTags(tag, fld.Name)
}
s.fields[i] = cachedField{Idx: i, Name: fld.Name, AltName: customName, CachedTag: cTag}
}
v.structCache.Set(typ, s)
return s
}
func (v *Validate) parseTags(tag, fieldName string) *cachedTag {
cTag := &cachedTag{tag: tag}
v.parseTagsRecursive(cTag, tag, fieldName, blank, false)
v.tagCache.Set(tag, cTag)
return cTag
}
func (v *Validate) parseTagsRecursive(cTag *cachedTag, tag, fieldName, alias string, isAlias bool) bool {
if tag == blank {
return true
}
for _, t := range strings.Split(tag, tagSeparator) {
if v.hasAliasValidators {
// check map for alias and process new tags, otherwise process as usual
if tagsVal, ok := v.aliasValidators[t]; ok {
leave := v.parseTagsRecursive(cTag, tagsVal, fieldName, t, true)
if leave {
return leave
}
continue
}
}
switch t {
case diveTag:
cTag.diveTag = tag
tVals := &tagVals{tagVals: [][]string{{t}}}
cTag.tags = append(cTag.tags, tVals)
return true
case omitempty:
cTag.isOmitEmpty = true
case structOnlyTag:
cTag.isStructOnly = true
case noStructLevelTag:
cTag.isNoStructLevel = true
}
// if a pipe character is needed within the param you must use the utf8Pipe representation "0x7C"
orVals := strings.Split(t, orSeparator)
tagVal := &tagVals{isAlias: isAlias, isOrVal: len(orVals) > 1, tagVals: make([][]string, len(orVals))}
cTag.tags = append(cTag.tags, tagVal)
var key string
var param string
for i, val := range orVals {
vals := strings.SplitN(val, tagKeySeparator, 2)
key = vals[0]
tagVal.tag = key
if isAlias {
tagVal.tag = alias
}
if key == blank {
panic(strings.TrimSpace(fmt.Sprintf(invalidValidation, fieldName)))
}
if len(vals) > 1 {
param = strings.Replace(strings.Replace(vals[1], utf8HexComma, ",", -1), utf8Pipe, "|", -1)
}
tagVal.tagVals[i] = []string{key, param}
}
}
return false
}

View File

@ -1,797 +0,0 @@
/**
* Package validator
*
* MISC:
* - anonymous structs - they don't have names so expect the Struct name within StructErrors to be blank
*
*/
package validator
import (
"bytes"
"errors"
"fmt"
"reflect"
"strings"
"sync"
"time"
)
const (
utf8HexComma = "0x2C"
utf8Pipe = "0x7C"
tagSeparator = ","
orSeparator = "|"
tagKeySeparator = "="
structOnlyTag = "structonly"
noStructLevelTag = "nostructlevel"
omitempty = "omitempty"
skipValidationTag = "-"
diveTag = "dive"
existsTag = "exists"
fieldErrMsg = "Key: '%s' Error:Field validation for '%s' failed on the '%s' tag"
arrayIndexFieldName = "%s" + leftBracket + "%d" + rightBracket
mapIndexFieldName = "%s" + leftBracket + "%v" + rightBracket
invalidValidation = "Invalid validation tag on field %s"
undefinedValidation = "Undefined validation function on field %s"
validatorNotInitialized = "Validator instance not initialized"
fieldNameRequired = "Field Name Required"
tagRequired = "Tag Required"
)
var (
timeType = reflect.TypeOf(time.Time{})
timePtrType = reflect.TypeOf(&time.Time{})
emptyStructPtr = new(struct{})
)
// StructLevel contains all of the information and helper methods
// for reporting errors during struct level validation
type StructLevel struct {
TopStruct reflect.Value
CurrentStruct reflect.Value
errPrefix string
nsPrefix string
errs ValidationErrors
v *Validate
}
// ReportValidationErrors accepts the key relative to the top level struct and validatin errors.
// Example: had a triple nested struct User, ContactInfo, Country and ran errs := validate.Struct(country)
// from within a User struct level validation would call this method like so:
// ReportValidationErrors("ContactInfo.", errs)
// NOTE: relativeKey can contain both the Field Relative and Custom name relative paths
// i.e. ReportValidationErrors("ContactInfo.|cInfo", errs) where cInfo represents say the JSON name of
// the relative path; this will be split into 2 variables in the next valiator version.
func (sl *StructLevel) ReportValidationErrors(relativeKey string, errs ValidationErrors) {
for _, e := range errs {
idx := strings.Index(relativeKey, "|")
var rel string
var cRel string
if idx != -1 {
rel = relativeKey[:idx]
cRel = relativeKey[idx+1:]
} else {
rel = relativeKey
}
key := sl.errPrefix + rel + e.Field
e.FieldNamespace = key
e.NameNamespace = sl.nsPrefix + cRel + e.Name
sl.errs[key] = e
}
}
// ReportError reports an error just by passing the field and tag information
// NOTE: tag can be an existing validation tag or just something you make up
// and precess on the flip side it's up to you.
func (sl *StructLevel) ReportError(field reflect.Value, fieldName string, customName string, tag string) {
field, kind := sl.v.ExtractType(field)
if fieldName == blank {
panic(fieldNameRequired)
}
if customName == blank {
customName = fieldName
}
if tag == blank {
panic(tagRequired)
}
ns := sl.errPrefix + fieldName
switch kind {
case reflect.Invalid:
sl.errs[ns] = &FieldError{
FieldNamespace: ns,
NameNamespace: sl.nsPrefix + customName,
Name: customName,
Field: fieldName,
Tag: tag,
ActualTag: tag,
Param: blank,
Kind: kind,
}
default:
sl.errs[ns] = &FieldError{
FieldNamespace: ns,
NameNamespace: sl.nsPrefix + customName,
Name: customName,
Field: fieldName,
Tag: tag,
ActualTag: tag,
Param: blank,
Value: field.Interface(),
Kind: kind,
Type: field.Type(),
}
}
}
// Validate contains the validator settings passed in using the Config struct
type Validate struct {
tagName string
fieldNameTag string
validationFuncs map[string]Func
structLevelFuncs map[reflect.Type]StructLevelFunc
customTypeFuncs map[reflect.Type]CustomTypeFunc
aliasValidators map[string]string
hasCustomFuncs bool
hasAliasValidators bool
hasStructLevelFuncs bool
tagCache *tagCacheMap
structCache *structCacheMap
errsPool *sync.Pool
}
func (v *Validate) initCheck() {
if v == nil {
panic(validatorNotInitialized)
}
}
// Config contains the options that a Validator instance will use.
// It is passed to the New() function
type Config struct {
TagName string
FieldNameTag string
}
// CustomTypeFunc allows for overriding or adding custom field type handler functions
// field = field value of the type to return a value to be validated
// example Valuer from sql drive see https://golang.org/src/database/sql/driver/types.go?s=1210:1293#L29
type CustomTypeFunc func(field reflect.Value) interface{}
// Func accepts all values needed for file and cross field validation
// v = validator instance, needed but some built in functions for it's custom types
// topStruct = top level struct when validating by struct otherwise nil
// currentStruct = current level struct when validating by struct otherwise optional comparison value
// field = field value for validation
// param = parameter used in validation i.e. gt=0 param would be 0
type Func func(v *Validate, topStruct reflect.Value, currentStruct reflect.Value, field reflect.Value, fieldtype reflect.Type, fieldKind reflect.Kind, param string) bool
// StructLevelFunc accepts all values needed for struct level validation
type StructLevelFunc func(v *Validate, structLevel *StructLevel)
// ValidationErrors is a type of map[string]*FieldError
// it exists to allow for multiple errors to be passed from this library
// and yet still subscribe to the error interface
type ValidationErrors map[string]*FieldError
// Error is intended for use in development + debugging and not intended to be a production error message.
// It allows ValidationErrors to subscribe to the Error interface.
// All information to create an error message specific to your application is contained within
// the FieldError found within the ValidationErrors map
func (ve ValidationErrors) Error() string {
buff := bytes.NewBufferString(blank)
for key, err := range ve {
buff.WriteString(fmt.Sprintf(fieldErrMsg, key, err.Field, err.Tag))
buff.WriteString("\n")
}
return strings.TrimSpace(buff.String())
}
// FieldError contains a single field's validation error along
// with other properties that may be needed for error message creation
type FieldError struct {
FieldNamespace string
NameNamespace string
Field string
Name string
Tag string
ActualTag string
Kind reflect.Kind
Type reflect.Type
Param string
Value interface{}
}
// New creates a new Validate instance for use.
func New(config *Config) *Validate {
v := &Validate{
tagName: config.TagName,
fieldNameTag: config.FieldNameTag,
tagCache: &tagCacheMap{m: map[string]*cachedTag{}},
structCache: &structCacheMap{m: map[reflect.Type]*cachedStruct{}},
errsPool: &sync.Pool{New: func() interface{} {
return ValidationErrors{}
}}}
if len(v.aliasValidators) == 0 {
// must copy alias validators for separate validations to be used in each validator instance
v.aliasValidators = map[string]string{}
for k, val := range bakedInAliasValidators {
v.RegisterAliasValidation(k, val)
}
}
if len(v.validationFuncs) == 0 {
// must copy validators for separate validations to be used in each instance
v.validationFuncs = map[string]Func{}
for k, val := range bakedInValidators {
v.RegisterValidation(k, val)
}
}
return v
}
// RegisterStructValidation registers a StructLevelFunc against a number of types
// NOTE: this method is not thread-safe it is intended that these all be registered prior to any validation
func (v *Validate) RegisterStructValidation(fn StructLevelFunc, types ...interface{}) {
v.initCheck()
if v.structLevelFuncs == nil {
v.structLevelFuncs = map[reflect.Type]StructLevelFunc{}
}
for _, t := range types {
v.structLevelFuncs[reflect.TypeOf(t)] = fn
}
v.hasStructLevelFuncs = true
}
// RegisterValidation adds a validation Func to a Validate's map of validators denoted by the key
// NOTE: if the key already exists, the previous validation function will be replaced.
// NOTE: this method is not thread-safe it is intended that these all be registered prior to any validation
func (v *Validate) RegisterValidation(key string, fn Func) error {
v.initCheck()
if key == blank {
return errors.New("Function Key cannot be empty")
}
if fn == nil {
return errors.New("Function cannot be empty")
}
_, ok := restrictedTags[key]
if ok || strings.ContainsAny(key, restrictedTagChars) {
panic(fmt.Sprintf(restrictedTagErr, key))
}
v.validationFuncs[key] = fn
return nil
}
// RegisterCustomTypeFunc registers a CustomTypeFunc against a number of types
// NOTE: this method is not thread-safe it is intended that these all be registered prior to any validation
func (v *Validate) RegisterCustomTypeFunc(fn CustomTypeFunc, types ...interface{}) {
v.initCheck()
if v.customTypeFuncs == nil {
v.customTypeFuncs = map[reflect.Type]CustomTypeFunc{}
}
for _, t := range types {
v.customTypeFuncs[reflect.TypeOf(t)] = fn
}
v.hasCustomFuncs = true
}
// RegisterAliasValidation registers a mapping of a single validationstag that
// defines a common or complex set of validation(s) to simplify adding validation
// to structs. NOTE: when returning an error the tag returned in FieldError will be
// the alias tag unless the dive tag is part of the alias; everything after the
// dive tag is not reported as the alias tag. Also the ActualTag in the before case
// will be the actual tag within the alias that failed.
// NOTE: this method is not thread-safe it is intended that these all be registered prior to any validation
func (v *Validate) RegisterAliasValidation(alias, tags string) {
v.initCheck()
_, ok := restrictedTags[alias]
if ok || strings.ContainsAny(alias, restrictedTagChars) {
panic(fmt.Sprintf(restrictedAliasErr, alias))
}
v.aliasValidators[alias] = tags
v.hasAliasValidators = true
}
// Field validates a single field using tag style validation and returns nil or ValidationErrors as type error.
// You will need to assert the error if it's not nil i.e. err.(validator.ValidationErrors) to access the map of errors.
// NOTE: it returns ValidationErrors instead of a single FieldError because this can also
// validate Array, Slice and maps fields which may contain more than one error
func (v *Validate) Field(field interface{}, tag string) error {
v.initCheck()
errs := v.errsPool.Get().(ValidationErrors)
fieldVal := reflect.ValueOf(field)
v.traverseField(fieldVal, fieldVal, fieldVal, blank, blank, errs, false, tag, blank, blank, false, false, nil, nil)
if len(errs) == 0 {
v.errsPool.Put(errs)
return nil
}
return errs
}
// FieldWithValue validates a single field, against another fields value using tag style validation and returns nil or ValidationErrors.
// You will need to assert the error if it's not nil i.e. err.(validator.ValidationErrors) to access the map of errors.
// NOTE: it returns ValidationErrors instead of a single FieldError because this can also
// validate Array, Slice and maps fields which may contain more than one error
func (v *Validate) FieldWithValue(val interface{}, field interface{}, tag string) error {
v.initCheck()
errs := v.errsPool.Get().(ValidationErrors)
topVal := reflect.ValueOf(val)
v.traverseField(topVal, topVal, reflect.ValueOf(field), blank, blank, errs, false, tag, blank, blank, false, false, nil, nil)
if len(errs) == 0 {
v.errsPool.Put(errs)
return nil
}
return errs
}
// StructPartial validates the fields passed in only, ignoring all others.
// Fields may be provided in a namespaced fashion relative to the struct provided
// i.e. NestedStruct.Field or NestedArrayField[0].Struct.Name and returns nil or ValidationErrors as error
// You will need to assert the error if it's not nil i.e. err.(validator.ValidationErrors) to access the map of errors.
func (v *Validate) StructPartial(current interface{}, fields ...string) error {
v.initCheck()
sv, _ := v.ExtractType(reflect.ValueOf(current))
name := sv.Type().Name()
m := map[string]*struct{}{}
if fields != nil {
for _, k := range fields {
flds := strings.Split(k, namespaceSeparator)
if len(flds) > 0 {
key := name + namespaceSeparator
for _, s := range flds {
idx := strings.Index(s, leftBracket)
if idx != -1 {
for idx != -1 {
key += s[:idx]
m[key] = emptyStructPtr
idx2 := strings.Index(s, rightBracket)
idx2++
key += s[idx:idx2]
m[key] = emptyStructPtr
s = s[idx2:]
idx = strings.Index(s, leftBracket)
}
} else {
key += s
m[key] = emptyStructPtr
}
key += namespaceSeparator
}
}
}
}
errs := v.errsPool.Get().(ValidationErrors)
v.tranverseStruct(sv, sv, sv, blank, blank, errs, true, len(m) != 0, false, m, false)
if len(errs) == 0 {
v.errsPool.Put(errs)
return nil
}
return errs
}
// StructExcept validates all fields except the ones passed in.
// Fields may be provided in a namespaced fashion relative to the struct provided
// i.e. NestedStruct.Field or NestedArrayField[0].Struct.Name and returns nil or ValidationErrors as error
// You will need to assert the error if it's not nil i.e. err.(validator.ValidationErrors) to access the map of errors.
func (v *Validate) StructExcept(current interface{}, fields ...string) error {
v.initCheck()
sv, _ := v.ExtractType(reflect.ValueOf(current))
name := sv.Type().Name()
m := map[string]*struct{}{}
for _, key := range fields {
m[name+namespaceSeparator+key] = emptyStructPtr
}
errs := v.errsPool.Get().(ValidationErrors)
v.tranverseStruct(sv, sv, sv, blank, blank, errs, true, len(m) != 0, true, m, false)
if len(errs) == 0 {
v.errsPool.Put(errs)
return nil
}
return errs
}
// Struct validates a structs exposed fields, and automatically validates nested structs, unless otherwise specified.
// it returns nil or ValidationErrors as error.
// You will need to assert the error if it's not nil i.e. err.(validator.ValidationErrors) to access the map of errors.
func (v *Validate) Struct(current interface{}) error {
v.initCheck()
errs := v.errsPool.Get().(ValidationErrors)
sv := reflect.ValueOf(current)
v.tranverseStruct(sv, sv, sv, blank, blank, errs, true, false, false, nil, false)
if len(errs) == 0 {
v.errsPool.Put(errs)
return nil
}
return errs
}
// tranverseStruct traverses a structs fields and then passes them to be validated by traverseField
func (v *Validate) tranverseStruct(topStruct reflect.Value, currentStruct reflect.Value, current reflect.Value, errPrefix string, nsPrefix string, errs ValidationErrors, useStructName bool, partial bool, exclude bool, includeExclude map[string]*struct{}, isStructOnly bool) {
if current.Kind() == reflect.Ptr && !current.IsNil() {
current = current.Elem()
}
if current.Kind() != reflect.Struct && current.Kind() != reflect.Interface {
panic("value passed for validation is not a struct")
}
// var ok bool
typ := current.Type()
sName := typ.Name()
if useStructName {
errPrefix += sName + namespaceSeparator
if v.fieldNameTag != blank {
nsPrefix += sName + namespaceSeparator
}
}
// structonly tag present don't tranverseFields
// but must still check and run below struct level validation
// if present
if !isStructOnly {
var fld reflect.StructField
// is anonymous struct, cannot parse or cache as
// it has no name to index by
if sName == blank {
var customName string
var ok bool
numFields := current.NumField()
for i := 0; i < numFields; i++ {
fld = typ.Field(i)
if fld.PkgPath != blank && !fld.Anonymous {
continue
}
if partial {
_, ok = includeExclude[errPrefix+fld.Name]
if (ok && exclude) || (!ok && !exclude) {
continue
}
}
customName = fld.Name
if v.fieldNameTag != blank {
name := strings.SplitN(fld.Tag.Get(v.fieldNameTag), ",", 2)[0]
// dash check is for json "-" means don't output in json
if name != blank && name != dash {
customName = name
}
}
v.traverseField(topStruct, currentStruct, current.Field(i), errPrefix, nsPrefix, errs, true, fld.Tag.Get(v.tagName), fld.Name, customName, partial, exclude, includeExclude, nil)
}
} else {
s, ok := v.structCache.Get(typ)
if !ok {
s = v.parseStruct(current, sName)
}
for i, f := range s.fields {
if partial {
_, ok = includeExclude[errPrefix+f.Name]
if (ok && exclude) || (!ok && !exclude) {
continue
}
}
fld = typ.Field(i)
v.traverseField(topStruct, currentStruct, current.Field(i), errPrefix, nsPrefix, errs, true, f.CachedTag.tag, fld.Name, f.AltName, partial, exclude, includeExclude, f.CachedTag)
}
}
}
// check if any struct level validations, after all field validations already checked.
if v.hasStructLevelFuncs {
if fn, ok := v.structLevelFuncs[current.Type()]; ok {
fn(v, &StructLevel{v: v, TopStruct: topStruct, CurrentStruct: current, errPrefix: errPrefix, nsPrefix: nsPrefix, errs: errs})
}
}
}
// traverseField validates any field, be it a struct or single field, ensures it's validity and passes it along to be validated via it's tag options
func (v *Validate) traverseField(topStruct reflect.Value, currentStruct reflect.Value, current reflect.Value, errPrefix string, nsPrefix string, errs ValidationErrors, isStructField bool, tag, name, customName string, partial bool, exclude bool, includeExclude map[string]*struct{}, cTag *cachedTag) {
if tag == skipValidationTag {
return
}
if cTag == nil {
var isCached bool
cTag, isCached = v.tagCache.Get(tag)
if !isCached {
cTag = v.parseTags(tag, name)
}
}
current, kind := v.ExtractType(current)
var typ reflect.Type
switch kind {
case reflect.Ptr, reflect.Interface, reflect.Invalid:
if cTag.isOmitEmpty {
return
}
if tag != blank {
ns := errPrefix + name
if kind == reflect.Invalid {
errs[ns] = &FieldError{
FieldNamespace: ns,
NameNamespace: nsPrefix + customName,
Name: customName,
Field: name,
Tag: cTag.tags[0].tag,
ActualTag: cTag.tags[0].tagVals[0][0],
Param: cTag.tags[0].tagVals[0][1],
Kind: kind,
}
return
}
errs[ns] = &FieldError{
FieldNamespace: ns,
NameNamespace: nsPrefix + customName,
Name: customName,
Field: name,
Tag: cTag.tags[0].tag,
ActualTag: cTag.tags[0].tagVals[0][0],
Param: cTag.tags[0].tagVals[0][1],
Value: current.Interface(),
Kind: kind,
Type: current.Type(),
}
return
}
// if we get here tag length is zero and we can leave
if kind == reflect.Invalid {
return
}
case reflect.Struct:
typ = current.Type()
if typ != timeType {
if cTag.isNoStructLevel {
return
}
v.tranverseStruct(topStruct, current, current, errPrefix+name+namespaceSeparator, nsPrefix+customName+namespaceSeparator, errs, false, partial, exclude, includeExclude, cTag.isStructOnly)
return
}
}
if tag == blank {
return
}
typ = current.Type()
var dive bool
var diveSubTag string
for _, valTag := range cTag.tags {
if valTag.tagVals[0][0] == existsTag {
continue
}
if valTag.tagVals[0][0] == diveTag {
dive = true
diveSubTag = strings.TrimLeft(strings.SplitN(cTag.diveTag, diveTag, 2)[1], ",")
break
}
if valTag.tagVals[0][0] == omitempty {
if !HasValue(v, topStruct, currentStruct, current, typ, kind, blank) {
return
}
continue
}
if v.validateField(topStruct, currentStruct, current, typ, kind, errPrefix, nsPrefix, errs, valTag, name, customName) {
return
}
}
if dive {
// traverse slice or map here
// or panic ;)
switch kind {
case reflect.Slice, reflect.Array:
v.traverseSlice(topStruct, currentStruct, current, errPrefix, nsPrefix, errs, diveSubTag, name, customName, partial, exclude, includeExclude, nil)
case reflect.Map:
v.traverseMap(topStruct, currentStruct, current, errPrefix, nsPrefix, errs, diveSubTag, name, customName, partial, exclude, includeExclude, nil)
default:
// throw error, if not a slice or map then should not have gotten here
// bad dive tag
panic("dive error! can't dive on a non slice or map")
}
}
}
// traverseSlice traverses a Slice or Array's elements and passes them to traverseField for validation
func (v *Validate) traverseSlice(topStruct reflect.Value, currentStruct reflect.Value, current reflect.Value, errPrefix string, nsPrefix string, errs ValidationErrors, tag, name, customName string, partial bool, exclude bool, includeExclude map[string]*struct{}, cTag *cachedTag) {
for i := 0; i < current.Len(); i++ {
v.traverseField(topStruct, currentStruct, current.Index(i), errPrefix, nsPrefix, errs, false, tag, fmt.Sprintf(arrayIndexFieldName, name, i), fmt.Sprintf(arrayIndexFieldName, customName, i), partial, exclude, includeExclude, cTag)
}
}
// traverseMap traverses a map's elements and passes them to traverseField for validation
func (v *Validate) traverseMap(topStruct reflect.Value, currentStruct reflect.Value, current reflect.Value, errPrefix string, nsPrefix string, errs ValidationErrors, tag, name, customName string, partial bool, exclude bool, includeExclude map[string]*struct{}, cTag *cachedTag) {
for _, key := range current.MapKeys() {
v.traverseField(topStruct, currentStruct, current.MapIndex(key), errPrefix, nsPrefix, errs, false, tag, fmt.Sprintf(mapIndexFieldName, name, key.Interface()), fmt.Sprintf(mapIndexFieldName, customName, key.Interface()), partial, exclude, includeExclude, cTag)
}
}
// validateField validates a field based on the provided tag's key and param values and returns true if there is an error or false if all ok
func (v *Validate) validateField(topStruct reflect.Value, currentStruct reflect.Value, current reflect.Value, currentType reflect.Type, currentKind reflect.Kind, errPrefix string, nsPrefix string, errs ValidationErrors, valTag *tagVals, name, customName string) bool {
var valFunc Func
var ok bool
if valTag.isOrVal {
errTag := blank
for _, val := range valTag.tagVals {
valFunc, ok = v.validationFuncs[val[0]]
if !ok {
panic(strings.TrimSpace(fmt.Sprintf(undefinedValidation, name)))
}
if valFunc(v, topStruct, currentStruct, current, currentType, currentKind, val[1]) {
return false
}
errTag += orSeparator + val[0]
}
ns := errPrefix + name
if valTag.isAlias {
errs[ns] = &FieldError{
FieldNamespace: ns,
NameNamespace: nsPrefix + customName,
Name: customName,
Field: name,
Tag: valTag.tag,
ActualTag: errTag[1:],
Value: current.Interface(),
Type: currentType,
Kind: currentKind,
}
} else {
errs[errPrefix+name] = &FieldError{
FieldNamespace: ns,
NameNamespace: nsPrefix + customName,
Name: customName,
Field: name,
Tag: errTag[1:],
ActualTag: errTag[1:],
Value: current.Interface(),
Type: currentType,
Kind: currentKind,
}
}
return true
}
valFunc, ok = v.validationFuncs[valTag.tagVals[0][0]]
if !ok {
panic(strings.TrimSpace(fmt.Sprintf(undefinedValidation, name)))
}
if valFunc(v, topStruct, currentStruct, current, currentType, currentKind, valTag.tagVals[0][1]) {
return false
}
ns := errPrefix + name
errs[ns] = &FieldError{
FieldNamespace: ns,
NameNamespace: nsPrefix + customName,
Name: customName,
Field: name,
Tag: valTag.tag,
ActualTag: valTag.tagVals[0][0],
Value: current.Interface(),
Param: valTag.tagVals[0][1],
Type: currentType,
Kind: currentKind,
}
return true
}

View File

@ -1,9 +0,0 @@
language: go
go:
- 1.4
- 1.5
- 1.6
- tip
go_import_path: gopkg.in/yaml.v2

View File

@ -1,188 +0,0 @@
Copyright (c) 2011-2014 - Canonical Inc.
This software is licensed under the LGPLv3, included below.
As a special exception to the GNU Lesser General Public License version 3
("LGPL3"), the copyright holders of this Library give you permission to
convey to a third party a Combined Work that links statically or dynamically
to this Library without providing any Minimal Corresponding Source or
Minimal Application Code as set out in 4d or providing the installation
information set out in section 4e, provided that you comply with the other
provisions of LGPL3 and provided that you meet, for the Application the
terms and conditions of the license(s) which apply to the Application.
Except as stated in this special exception, the provisions of LGPL3 will
continue to comply in full to this Library. If you modify this Library, you
may apply this exception to your version of this Library, but you are not
obliged to do so. If you do not wish to do so, delete this exception
statement from your version. This exception does not (and cannot) modify any
license terms which apply to the Application, with which you must still
comply.
GNU LESSER GENERAL PUBLIC LICENSE
Version 3, 29 June 2007
Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/>
Everyone is permitted to copy and distribute verbatim copies
of this license document, but changing it is not allowed.
This version of the GNU Lesser General Public License incorporates
the terms and conditions of version 3 of the GNU General Public
License, supplemented by the additional permissions listed below.
0. Additional Definitions.
As used herein, "this License" refers to version 3 of the GNU Lesser
General Public License, and the "GNU GPL" refers to version 3 of the GNU
General Public License.
"The Library" refers to a covered work governed by this License,
other than an Application or a Combined Work as defined below.
An "Application" is any work that makes use of an interface provided
by the Library, but which is not otherwise based on the Library.
Defining a subclass of a class defined by the Library is deemed a mode
of using an interface provided by the Library.
A "Combined Work" is a work produced by combining or linking an
Application with the Library. The particular version of the Library
with which the Combined Work was made is also called the "Linked
Version".
The "Minimal Corresponding Source" for a Combined Work means the
Corresponding Source for the Combined Work, excluding any source code
for portions of the Combined Work that, considered in isolation, are
based on the Application, and not on the Linked Version.
The "Corresponding Application Code" for a Combined Work means the
object code and/or source code for the Application, including any data
and utility programs needed for reproducing the Combined Work from the
Application, but excluding the System Libraries of the Combined Work.
1. Exception to Section 3 of the GNU GPL.
You may convey a covered work under sections 3 and 4 of this License
without being bound by section 3 of the GNU GPL.
2. Conveying Modified Versions.
If you modify a copy of the Library, and, in your modifications, a
facility refers to a function or data to be supplied by an Application
that uses the facility (other than as an argument passed when the
facility is invoked), then you may convey a copy of the modified
version:
a) under this License, provided that you make a good faith effort to
ensure that, in the event an Application does not supply the
function or data, the facility still operates, and performs
whatever part of its purpose remains meaningful, or
b) under the GNU GPL, with none of the additional permissions of
this License applicable to that copy.
3. Object Code Incorporating Material from Library Header Files.
The object code form of an Application may incorporate material from
a header file that is part of the Library. You may convey such object
code under terms of your choice, provided that, if the incorporated
material is not limited to numerical parameters, data structure
layouts and accessors, or small macros, inline functions and templates
(ten or fewer lines in length), you do both of the following:
a) Give prominent notice with each copy of the object code that the
Library is used in it and that the Library and its use are
covered by this License.
b) Accompany the object code with a copy of the GNU GPL and this license
document.
4. Combined Works.
You may convey a Combined Work under terms of your choice that,
taken together, effectively do not restrict modification of the
portions of the Library contained in the Combined Work and reverse
engineering for debugging such modifications, if you also do each of
the following:
a) Give prominent notice with each copy of the Combined Work that
the Library is used in it and that the Library and its use are
covered by this License.
b) Accompany the Combined Work with a copy of the GNU GPL and this license
document.
c) For a Combined Work that displays copyright notices during
execution, include the copyright notice for the Library among
these notices, as well as a reference directing the user to the
copies of the GNU GPL and this license document.
d) Do one of the following:
0) Convey the Minimal Corresponding Source under the terms of this
License, and the Corresponding Application Code in a form
suitable for, and under terms that permit, the user to
recombine or relink the Application with a modified version of
the Linked Version to produce a modified Combined Work, in the
manner specified by section 6 of the GNU GPL for conveying
Corresponding Source.
1) Use a suitable shared library mechanism for linking with the
Library. A suitable mechanism is one that (a) uses at run time
a copy of the Library already present on the user's computer
system, and (b) will operate properly with a modified version
of the Library that is interface-compatible with the Linked
Version.
e) Provide Installation Information, but only if you would otherwise
be required to provide such information under section 6 of the
GNU GPL, and only to the extent that such information is
necessary to install and execute a modified version of the
Combined Work produced by recombining or relinking the
Application with a modified version of the Linked Version. (If
you use option 4d0, the Installation Information must accompany
the Minimal Corresponding Source and Corresponding Application
Code. If you use option 4d1, you must provide the Installation
Information in the manner specified by section 6 of the GNU GPL
for conveying Corresponding Source.)
5. Combined Libraries.
You may place library facilities that are a work based on the
Library side by side in a single library together with other library
facilities that are not Applications and are not covered by this
License, and convey such a combined library under terms of your
choice, if you do both of the following:
a) Accompany the combined library with a copy of the same work based
on the Library, uncombined with any other library facilities,
conveyed under the terms of this License.
b) Give prominent notice with the combined library that part of it
is a work based on the Library, and explaining where to find the
accompanying uncombined form of the same work.
6. Revised Versions of the GNU Lesser General Public License.
The Free Software Foundation may publish revised and/or new versions
of the GNU Lesser General Public License from time to time. Such new
versions will be similar in spirit to the present version, but may
differ in detail to address new problems or concerns.
Each version is given a distinguishing version number. If the
Library as you received it specifies that a certain numbered version
of the GNU Lesser General Public License "or any later version"
applies to it, you have the option of following the terms and
conditions either of that published version or of any later version
published by the Free Software Foundation. If the Library as you
received it does not specify a version number of the GNU Lesser
General Public License, you may choose any version of the GNU Lesser
General Public License ever published by the Free Software Foundation.
If the Library as you received it specifies that a proxy can decide
whether future versions of the GNU Lesser General Public License shall
apply, that proxy's public statement of acceptance of any version is
permanent authorization for you to choose that version for the
Library.

View File

@ -1,31 +0,0 @@
The following files were ported to Go from C files of libyaml, and thus
are still covered by their original copyright and license:
apic.go
emitterc.go
parserc.go
readerc.go
scannerc.go
writerc.go
yamlh.go
yamlprivateh.go
Copyright (c) 2006 Kirill Simonov
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is furnished to do
so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

View File

@ -1,131 +0,0 @@
# YAML support for the Go language
Introduction
------------
The yaml package enables Go programs to comfortably encode and decode YAML
values. It was developed within [Canonical](https://www.canonical.com) as
part of the [juju](https://juju.ubuntu.com) project, and is based on a
pure Go port of the well-known [libyaml](http://pyyaml.org/wiki/LibYAML)
C library to parse and generate YAML data quickly and reliably.
Compatibility
-------------
The yaml package supports most of YAML 1.1 and 1.2, including support for
anchors, tags, map merging, etc. Multi-document unmarshalling is not yet
implemented, and base-60 floats from YAML 1.1 are purposefully not
supported since they're a poor design and are gone in YAML 1.2.
Installation and usage
----------------------
The import path for the package is *gopkg.in/yaml.v2*.
To install it, run:
go get gopkg.in/yaml.v2
API documentation
-----------------
If opened in a browser, the import path itself leads to the API documentation:
* [https://gopkg.in/yaml.v2](https://gopkg.in/yaml.v2)
API stability
-------------
The package API for yaml v2 will remain stable as described in [gopkg.in](https://gopkg.in).
License
-------
The yaml package is licensed under the LGPL with an exception that allows it to be linked statically. Please see the LICENSE file for details.
Example
-------
```Go
package main
import (
"fmt"
"log"
"gopkg.in/yaml.v2"
)
var data = `
a: Easy!
b:
c: 2
d: [3, 4]
`
type T struct {
A string
B struct {
RenamedC int `yaml:"c"`
D []int `yaml:",flow"`
}
}
func main() {
t := T{}
err := yaml.Unmarshal([]byte(data), &t)
if err != nil {
log.Fatalf("error: %v", err)
}
fmt.Printf("--- t:\n%v\n\n", t)
d, err := yaml.Marshal(&t)
if err != nil {
log.Fatalf("error: %v", err)
}
fmt.Printf("--- t dump:\n%s\n\n", string(d))
m := make(map[interface{}]interface{})
err = yaml.Unmarshal([]byte(data), &m)
if err != nil {
log.Fatalf("error: %v", err)
}
fmt.Printf("--- m:\n%v\n\n", m)
d, err = yaml.Marshal(&m)
if err != nil {
log.Fatalf("error: %v", err)
}
fmt.Printf("--- m dump:\n%s\n\n", string(d))
}
```
This example will generate the following output:
```
--- t:
{Easy! {2 [3 4]}}
--- t dump:
a: Easy!
b:
c: 2
d: [3, 4]
--- m:
map[a:Easy! b:map[c:2 d:[3 4]]]
--- m dump:
a: Easy!
b:
c: 2
d:
- 3
- 4
```

View File

@ -1,742 +0,0 @@
package yaml
import (
"io"
"os"
)
func yaml_insert_token(parser *yaml_parser_t, pos int, token *yaml_token_t) {
//fmt.Println("yaml_insert_token", "pos:", pos, "typ:", token.typ, "head:", parser.tokens_head, "len:", len(parser.tokens))
// Check if we can move the queue at the beginning of the buffer.
if parser.tokens_head > 0 && len(parser.tokens) == cap(parser.tokens) {
if parser.tokens_head != len(parser.tokens) {
copy(parser.tokens, parser.tokens[parser.tokens_head:])
}
parser.tokens = parser.tokens[:len(parser.tokens)-parser.tokens_head]
parser.tokens_head = 0
}
parser.tokens = append(parser.tokens, *token)
if pos < 0 {
return
}
copy(parser.tokens[parser.tokens_head+pos+1:], parser.tokens[parser.tokens_head+pos:])
parser.tokens[parser.tokens_head+pos] = *token
}
// Create a new parser object.
func yaml_parser_initialize(parser *yaml_parser_t) bool {
*parser = yaml_parser_t{
raw_buffer: make([]byte, 0, input_raw_buffer_size),
buffer: make([]byte, 0, input_buffer_size),
}
return true
}
// Destroy a parser object.
func yaml_parser_delete(parser *yaml_parser_t) {
*parser = yaml_parser_t{}
}
// String read handler.
func yaml_string_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) {
if parser.input_pos == len(parser.input) {
return 0, io.EOF
}
n = copy(buffer, parser.input[parser.input_pos:])
parser.input_pos += n
return n, nil
}
// File read handler.
func yaml_file_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) {
return parser.input_file.Read(buffer)
}
// Set a string input.
func yaml_parser_set_input_string(parser *yaml_parser_t, input []byte) {
if parser.read_handler != nil {
panic("must set the input source only once")
}
parser.read_handler = yaml_string_read_handler
parser.input = input
parser.input_pos = 0
}
// Set a file input.
func yaml_parser_set_input_file(parser *yaml_parser_t, file *os.File) {
if parser.read_handler != nil {
panic("must set the input source only once")
}
parser.read_handler = yaml_file_read_handler
parser.input_file = file
}
// Set the source encoding.
func yaml_parser_set_encoding(parser *yaml_parser_t, encoding yaml_encoding_t) {
if parser.encoding != yaml_ANY_ENCODING {
panic("must set the encoding only once")
}
parser.encoding = encoding
}
// Create a new emitter object.
func yaml_emitter_initialize(emitter *yaml_emitter_t) bool {
*emitter = yaml_emitter_t{
buffer: make([]byte, output_buffer_size),
raw_buffer: make([]byte, 0, output_raw_buffer_size),
states: make([]yaml_emitter_state_t, 0, initial_stack_size),
events: make([]yaml_event_t, 0, initial_queue_size),
}
return true
}
// Destroy an emitter object.
func yaml_emitter_delete(emitter *yaml_emitter_t) {
*emitter = yaml_emitter_t{}
}
// String write handler.
func yaml_string_write_handler(emitter *yaml_emitter_t, buffer []byte) error {
*emitter.output_buffer = append(*emitter.output_buffer, buffer...)
return nil
}
// File write handler.
func yaml_file_write_handler(emitter *yaml_emitter_t, buffer []byte) error {
_, err := emitter.output_file.Write(buffer)
return err
}
// Set a string output.
func yaml_emitter_set_output_string(emitter *yaml_emitter_t, output_buffer *[]byte) {
if emitter.write_handler != nil {
panic("must set the output target only once")
}
emitter.write_handler = yaml_string_write_handler
emitter.output_buffer = output_buffer
}
// Set a file output.
func yaml_emitter_set_output_file(emitter *yaml_emitter_t, file io.Writer) {
if emitter.write_handler != nil {
panic("must set the output target only once")
}
emitter.write_handler = yaml_file_write_handler
emitter.output_file = file
}
// Set the output encoding.
func yaml_emitter_set_encoding(emitter *yaml_emitter_t, encoding yaml_encoding_t) {
if emitter.encoding != yaml_ANY_ENCODING {
panic("must set the output encoding only once")
}
emitter.encoding = encoding
}
// Set the canonical output style.
func yaml_emitter_set_canonical(emitter *yaml_emitter_t, canonical bool) {
emitter.canonical = canonical
}
//// Set the indentation increment.
func yaml_emitter_set_indent(emitter *yaml_emitter_t, indent int) {
if indent < 2 || indent > 9 {
indent = 2
}
emitter.best_indent = indent
}
// Set the preferred line width.
func yaml_emitter_set_width(emitter *yaml_emitter_t, width int) {
if width < 0 {
width = -1
}
emitter.best_width = width
}
// Set if unescaped non-ASCII characters are allowed.
func yaml_emitter_set_unicode(emitter *yaml_emitter_t, unicode bool) {
emitter.unicode = unicode
}
// Set the preferred line break character.
func yaml_emitter_set_break(emitter *yaml_emitter_t, line_break yaml_break_t) {
emitter.line_break = line_break
}
///*
// * Destroy a token object.
// */
//
//YAML_DECLARE(void)
//yaml_token_delete(yaml_token_t *token)
//{
// assert(token); // Non-NULL token object expected.
//
// switch (token.type)
// {
// case YAML_TAG_DIRECTIVE_TOKEN:
// yaml_free(token.data.tag_directive.handle);
// yaml_free(token.data.tag_directive.prefix);
// break;
//
// case YAML_ALIAS_TOKEN:
// yaml_free(token.data.alias.value);
// break;
//
// case YAML_ANCHOR_TOKEN:
// yaml_free(token.data.anchor.value);
// break;
//
// case YAML_TAG_TOKEN:
// yaml_free(token.data.tag.handle);
// yaml_free(token.data.tag.suffix);
// break;
//
// case YAML_SCALAR_TOKEN:
// yaml_free(token.data.scalar.value);
// break;
//
// default:
// break;
// }
//
// memset(token, 0, sizeof(yaml_token_t));
//}
//
///*
// * Check if a string is a valid UTF-8 sequence.
// *
// * Check 'reader.c' for more details on UTF-8 encoding.
// */
//
//static int
//yaml_check_utf8(yaml_char_t *start, size_t length)
//{
// yaml_char_t *end = start+length;
// yaml_char_t *pointer = start;
//
// while (pointer < end) {
// unsigned char octet;
// unsigned int width;
// unsigned int value;
// size_t k;
//
// octet = pointer[0];
// width = (octet & 0x80) == 0x00 ? 1 :
// (octet & 0xE0) == 0xC0 ? 2 :
// (octet & 0xF0) == 0xE0 ? 3 :
// (octet & 0xF8) == 0xF0 ? 4 : 0;
// value = (octet & 0x80) == 0x00 ? octet & 0x7F :
// (octet & 0xE0) == 0xC0 ? octet & 0x1F :
// (octet & 0xF0) == 0xE0 ? octet & 0x0F :
// (octet & 0xF8) == 0xF0 ? octet & 0x07 : 0;
// if (!width) return 0;
// if (pointer+width > end) return 0;
// for (k = 1; k < width; k ++) {
// octet = pointer[k];
// if ((octet & 0xC0) != 0x80) return 0;
// value = (value << 6) + (octet & 0x3F);
// }
// if (!((width == 1) ||
// (width == 2 && value >= 0x80) ||
// (width == 3 && value >= 0x800) ||
// (width == 4 && value >= 0x10000))) return 0;
//
// pointer += width;
// }
//
// return 1;
//}
//
// Create STREAM-START.
func yaml_stream_start_event_initialize(event *yaml_event_t, encoding yaml_encoding_t) bool {
*event = yaml_event_t{
typ: yaml_STREAM_START_EVENT,
encoding: encoding,
}
return true
}
// Create STREAM-END.
func yaml_stream_end_event_initialize(event *yaml_event_t) bool {
*event = yaml_event_t{
typ: yaml_STREAM_END_EVENT,
}
return true
}
// Create DOCUMENT-START.
func yaml_document_start_event_initialize(event *yaml_event_t, version_directive *yaml_version_directive_t,
tag_directives []yaml_tag_directive_t, implicit bool) bool {
*event = yaml_event_t{
typ: yaml_DOCUMENT_START_EVENT,
version_directive: version_directive,
tag_directives: tag_directives,
implicit: implicit,
}
return true
}
// Create DOCUMENT-END.
func yaml_document_end_event_initialize(event *yaml_event_t, implicit bool) bool {
*event = yaml_event_t{
typ: yaml_DOCUMENT_END_EVENT,
implicit: implicit,
}
return true
}
///*
// * Create ALIAS.
// */
//
//YAML_DECLARE(int)
//yaml_alias_event_initialize(event *yaml_event_t, anchor *yaml_char_t)
//{
// mark yaml_mark_t = { 0, 0, 0 }
// anchor_copy *yaml_char_t = NULL
//
// assert(event) // Non-NULL event object is expected.
// assert(anchor) // Non-NULL anchor is expected.
//
// if (!yaml_check_utf8(anchor, strlen((char *)anchor))) return 0
//
// anchor_copy = yaml_strdup(anchor)
// if (!anchor_copy)
// return 0
//
// ALIAS_EVENT_INIT(*event, anchor_copy, mark, mark)
//
// return 1
//}
// Create SCALAR.
func yaml_scalar_event_initialize(event *yaml_event_t, anchor, tag, value []byte, plain_implicit, quoted_implicit bool, style yaml_scalar_style_t) bool {
*event = yaml_event_t{
typ: yaml_SCALAR_EVENT,
anchor: anchor,
tag: tag,
value: value,
implicit: plain_implicit,
quoted_implicit: quoted_implicit,
style: yaml_style_t(style),
}
return true
}
// Create SEQUENCE-START.
func yaml_sequence_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_sequence_style_t) bool {
*event = yaml_event_t{
typ: yaml_SEQUENCE_START_EVENT,
anchor: anchor,
tag: tag,
implicit: implicit,
style: yaml_style_t(style),
}
return true
}
// Create SEQUENCE-END.
func yaml_sequence_end_event_initialize(event *yaml_event_t) bool {
*event = yaml_event_t{
typ: yaml_SEQUENCE_END_EVENT,
}
return true
}
// Create MAPPING-START.
func yaml_mapping_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_mapping_style_t) bool {
*event = yaml_event_t{
typ: yaml_MAPPING_START_EVENT,
anchor: anchor,
tag: tag,
implicit: implicit,
style: yaml_style_t(style),
}
return true
}
// Create MAPPING-END.
func yaml_mapping_end_event_initialize(event *yaml_event_t) bool {
*event = yaml_event_t{
typ: yaml_MAPPING_END_EVENT,
}
return true
}
// Destroy an event object.
func yaml_event_delete(event *yaml_event_t) {
*event = yaml_event_t{}
}
///*
// * Create a document object.
// */
//
//YAML_DECLARE(int)
//yaml_document_initialize(document *yaml_document_t,
// version_directive *yaml_version_directive_t,
// tag_directives_start *yaml_tag_directive_t,
// tag_directives_end *yaml_tag_directive_t,
// start_implicit int, end_implicit int)
//{
// struct {
// error yaml_error_type_t
// } context
// struct {
// start *yaml_node_t
// end *yaml_node_t
// top *yaml_node_t
// } nodes = { NULL, NULL, NULL }
// version_directive_copy *yaml_version_directive_t = NULL
// struct {
// start *yaml_tag_directive_t
// end *yaml_tag_directive_t
// top *yaml_tag_directive_t
// } tag_directives_copy = { NULL, NULL, NULL }
// value yaml_tag_directive_t = { NULL, NULL }
// mark yaml_mark_t = { 0, 0, 0 }
//
// assert(document) // Non-NULL document object is expected.
// assert((tag_directives_start && tag_directives_end) ||
// (tag_directives_start == tag_directives_end))
// // Valid tag directives are expected.
//
// if (!STACK_INIT(&context, nodes, INITIAL_STACK_SIZE)) goto error
//
// if (version_directive) {
// version_directive_copy = yaml_malloc(sizeof(yaml_version_directive_t))
// if (!version_directive_copy) goto error
// version_directive_copy.major = version_directive.major
// version_directive_copy.minor = version_directive.minor
// }
//
// if (tag_directives_start != tag_directives_end) {
// tag_directive *yaml_tag_directive_t
// if (!STACK_INIT(&context, tag_directives_copy, INITIAL_STACK_SIZE))
// goto error
// for (tag_directive = tag_directives_start
// tag_directive != tag_directives_end; tag_directive ++) {
// assert(tag_directive.handle)
// assert(tag_directive.prefix)
// if (!yaml_check_utf8(tag_directive.handle,
// strlen((char *)tag_directive.handle)))
// goto error
// if (!yaml_check_utf8(tag_directive.prefix,
// strlen((char *)tag_directive.prefix)))
// goto error
// value.handle = yaml_strdup(tag_directive.handle)
// value.prefix = yaml_strdup(tag_directive.prefix)
// if (!value.handle || !value.prefix) goto error
// if (!PUSH(&context, tag_directives_copy, value))
// goto error
// value.handle = NULL
// value.prefix = NULL
// }
// }
//
// DOCUMENT_INIT(*document, nodes.start, nodes.end, version_directive_copy,
// tag_directives_copy.start, tag_directives_copy.top,
// start_implicit, end_implicit, mark, mark)
//
// return 1
//
//error:
// STACK_DEL(&context, nodes)
// yaml_free(version_directive_copy)
// while (!STACK_EMPTY(&context, tag_directives_copy)) {
// value yaml_tag_directive_t = POP(&context, tag_directives_copy)
// yaml_free(value.handle)
// yaml_free(value.prefix)
// }
// STACK_DEL(&context, tag_directives_copy)
// yaml_free(value.handle)
// yaml_free(value.prefix)
//
// return 0
//}
//
///*
// * Destroy a document object.
// */
//
//YAML_DECLARE(void)
//yaml_document_delete(document *yaml_document_t)
//{
// struct {
// error yaml_error_type_t
// } context
// tag_directive *yaml_tag_directive_t
//
// context.error = YAML_NO_ERROR // Eliminate a compliler warning.
//
// assert(document) // Non-NULL document object is expected.
//
// while (!STACK_EMPTY(&context, document.nodes)) {
// node yaml_node_t = POP(&context, document.nodes)
// yaml_free(node.tag)
// switch (node.type) {
// case YAML_SCALAR_NODE:
// yaml_free(node.data.scalar.value)
// break
// case YAML_SEQUENCE_NODE:
// STACK_DEL(&context, node.data.sequence.items)
// break
// case YAML_MAPPING_NODE:
// STACK_DEL(&context, node.data.mapping.pairs)
// break
// default:
// assert(0) // Should not happen.
// }
// }
// STACK_DEL(&context, document.nodes)
//
// yaml_free(document.version_directive)
// for (tag_directive = document.tag_directives.start
// tag_directive != document.tag_directives.end
// tag_directive++) {
// yaml_free(tag_directive.handle)
// yaml_free(tag_directive.prefix)
// }
// yaml_free(document.tag_directives.start)
//
// memset(document, 0, sizeof(yaml_document_t))
//}
//
///**
// * Get a document node.
// */
//
//YAML_DECLARE(yaml_node_t *)
//yaml_document_get_node(document *yaml_document_t, index int)
//{
// assert(document) // Non-NULL document object is expected.
//
// if (index > 0 && document.nodes.start + index <= document.nodes.top) {
// return document.nodes.start + index - 1
// }
// return NULL
//}
//
///**
// * Get the root object.
// */
//
//YAML_DECLARE(yaml_node_t *)
//yaml_document_get_root_node(document *yaml_document_t)
//{
// assert(document) // Non-NULL document object is expected.
//
// if (document.nodes.top != document.nodes.start) {
// return document.nodes.start
// }
// return NULL
//}
//
///*
// * Add a scalar node to a document.
// */
//
//YAML_DECLARE(int)
//yaml_document_add_scalar(document *yaml_document_t,
// tag *yaml_char_t, value *yaml_char_t, length int,
// style yaml_scalar_style_t)
//{
// struct {
// error yaml_error_type_t
// } context
// mark yaml_mark_t = { 0, 0, 0 }
// tag_copy *yaml_char_t = NULL
// value_copy *yaml_char_t = NULL
// node yaml_node_t
//
// assert(document) // Non-NULL document object is expected.
// assert(value) // Non-NULL value is expected.
//
// if (!tag) {
// tag = (yaml_char_t *)YAML_DEFAULT_SCALAR_TAG
// }
//
// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error
// tag_copy = yaml_strdup(tag)
// if (!tag_copy) goto error
//
// if (length < 0) {
// length = strlen((char *)value)
// }
//
// if (!yaml_check_utf8(value, length)) goto error
// value_copy = yaml_malloc(length+1)
// if (!value_copy) goto error
// memcpy(value_copy, value, length)
// value_copy[length] = '\0'
//
// SCALAR_NODE_INIT(node, tag_copy, value_copy, length, style, mark, mark)
// if (!PUSH(&context, document.nodes, node)) goto error
//
// return document.nodes.top - document.nodes.start
//
//error:
// yaml_free(tag_copy)
// yaml_free(value_copy)
//
// return 0
//}
//
///*
// * Add a sequence node to a document.
// */
//
//YAML_DECLARE(int)
//yaml_document_add_sequence(document *yaml_document_t,
// tag *yaml_char_t, style yaml_sequence_style_t)
//{
// struct {
// error yaml_error_type_t
// } context
// mark yaml_mark_t = { 0, 0, 0 }
// tag_copy *yaml_char_t = NULL
// struct {
// start *yaml_node_item_t
// end *yaml_node_item_t
// top *yaml_node_item_t
// } items = { NULL, NULL, NULL }
// node yaml_node_t
//
// assert(document) // Non-NULL document object is expected.
//
// if (!tag) {
// tag = (yaml_char_t *)YAML_DEFAULT_SEQUENCE_TAG
// }
//
// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error
// tag_copy = yaml_strdup(tag)
// if (!tag_copy) goto error
//
// if (!STACK_INIT(&context, items, INITIAL_STACK_SIZE)) goto error
//
// SEQUENCE_NODE_INIT(node, tag_copy, items.start, items.end,
// style, mark, mark)
// if (!PUSH(&context, document.nodes, node)) goto error
//
// return document.nodes.top - document.nodes.start
//
//error:
// STACK_DEL(&context, items)
// yaml_free(tag_copy)
//
// return 0
//}
//
///*
// * Add a mapping node to a document.
// */
//
//YAML_DECLARE(int)
//yaml_document_add_mapping(document *yaml_document_t,
// tag *yaml_char_t, style yaml_mapping_style_t)
//{
// struct {
// error yaml_error_type_t
// } context
// mark yaml_mark_t = { 0, 0, 0 }
// tag_copy *yaml_char_t = NULL
// struct {
// start *yaml_node_pair_t
// end *yaml_node_pair_t
// top *yaml_node_pair_t
// } pairs = { NULL, NULL, NULL }
// node yaml_node_t
//
// assert(document) // Non-NULL document object is expected.
//
// if (!tag) {
// tag = (yaml_char_t *)YAML_DEFAULT_MAPPING_TAG
// }
//
// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error
// tag_copy = yaml_strdup(tag)
// if (!tag_copy) goto error
//
// if (!STACK_INIT(&context, pairs, INITIAL_STACK_SIZE)) goto error
//
// MAPPING_NODE_INIT(node, tag_copy, pairs.start, pairs.end,
// style, mark, mark)
// if (!PUSH(&context, document.nodes, node)) goto error
//
// return document.nodes.top - document.nodes.start
//
//error:
// STACK_DEL(&context, pairs)
// yaml_free(tag_copy)
//
// return 0
//}
//
///*
// * Append an item to a sequence node.
// */
//
//YAML_DECLARE(int)
//yaml_document_append_sequence_item(document *yaml_document_t,
// sequence int, item int)
//{
// struct {
// error yaml_error_type_t
// } context
//
// assert(document) // Non-NULL document is required.
// assert(sequence > 0
// && document.nodes.start + sequence <= document.nodes.top)
// // Valid sequence id is required.
// assert(document.nodes.start[sequence-1].type == YAML_SEQUENCE_NODE)
// // A sequence node is required.
// assert(item > 0 && document.nodes.start + item <= document.nodes.top)
// // Valid item id is required.
//
// if (!PUSH(&context,
// document.nodes.start[sequence-1].data.sequence.items, item))
// return 0
//
// return 1
//}
//
///*
// * Append a pair of a key and a value to a mapping node.
// */
//
//YAML_DECLARE(int)
//yaml_document_append_mapping_pair(document *yaml_document_t,
// mapping int, key int, value int)
//{
// struct {
// error yaml_error_type_t
// } context
//
// pair yaml_node_pair_t
//
// assert(document) // Non-NULL document is required.
// assert(mapping > 0
// && document.nodes.start + mapping <= document.nodes.top)
// // Valid mapping id is required.
// assert(document.nodes.start[mapping-1].type == YAML_MAPPING_NODE)
// // A mapping node is required.
// assert(key > 0 && document.nodes.start + key <= document.nodes.top)
// // Valid key id is required.
// assert(value > 0 && document.nodes.start + value <= document.nodes.top)
// // Valid value id is required.
//
// pair.key = key
// pair.value = value
//
// if (!PUSH(&context,
// document.nodes.start[mapping-1].data.mapping.pairs, pair))
// return 0
//
// return 1
//}
//
//

View File

@ -1,683 +0,0 @@
package yaml
import (
"encoding"
"encoding/base64"
"fmt"
"math"
"reflect"
"strconv"
"time"
)
const (
documentNode = 1 << iota
mappingNode
sequenceNode
scalarNode
aliasNode
)
type node struct {
kind int
line, column int
tag string
value string
implicit bool
children []*node
anchors map[string]*node
}
// ----------------------------------------------------------------------------
// Parser, produces a node tree out of a libyaml event stream.
type parser struct {
parser yaml_parser_t
event yaml_event_t
doc *node
}
func newParser(b []byte) *parser {
p := parser{}
if !yaml_parser_initialize(&p.parser) {
panic("failed to initialize YAML emitter")
}
if len(b) == 0 {
b = []byte{'\n'}
}
yaml_parser_set_input_string(&p.parser, b)
p.skip()
if p.event.typ != yaml_STREAM_START_EVENT {
panic("expected stream start event, got " + strconv.Itoa(int(p.event.typ)))
}
p.skip()
return &p
}
func (p *parser) destroy() {
if p.event.typ != yaml_NO_EVENT {
yaml_event_delete(&p.event)
}
yaml_parser_delete(&p.parser)
}
func (p *parser) skip() {
if p.event.typ != yaml_NO_EVENT {
if p.event.typ == yaml_STREAM_END_EVENT {
failf("attempted to go past the end of stream; corrupted value?")
}
yaml_event_delete(&p.event)
}
if !yaml_parser_parse(&p.parser, &p.event) {
p.fail()
}
}
func (p *parser) fail() {
var where string
var line int
if p.parser.problem_mark.line != 0 {
line = p.parser.problem_mark.line
} else if p.parser.context_mark.line != 0 {
line = p.parser.context_mark.line
}
if line != 0 {
where = "line " + strconv.Itoa(line) + ": "
}
var msg string
if len(p.parser.problem) > 0 {
msg = p.parser.problem
} else {
msg = "unknown problem parsing YAML content"
}
failf("%s%s", where, msg)
}
func (p *parser) anchor(n *node, anchor []byte) {
if anchor != nil {
p.doc.anchors[string(anchor)] = n
}
}
func (p *parser) parse() *node {
switch p.event.typ {
case yaml_SCALAR_EVENT:
return p.scalar()
case yaml_ALIAS_EVENT:
return p.alias()
case yaml_MAPPING_START_EVENT:
return p.mapping()
case yaml_SEQUENCE_START_EVENT:
return p.sequence()
case yaml_DOCUMENT_START_EVENT:
return p.document()
case yaml_STREAM_END_EVENT:
// Happens when attempting to decode an empty buffer.
return nil
default:
panic("attempted to parse unknown event: " + strconv.Itoa(int(p.event.typ)))
}
panic("unreachable")
}
func (p *parser) node(kind int) *node {
return &node{
kind: kind,
line: p.event.start_mark.line,
column: p.event.start_mark.column,
}
}
func (p *parser) document() *node {
n := p.node(documentNode)
n.anchors = make(map[string]*node)
p.doc = n
p.skip()
n.children = append(n.children, p.parse())
if p.event.typ != yaml_DOCUMENT_END_EVENT {
panic("expected end of document event but got " + strconv.Itoa(int(p.event.typ)))
}
p.skip()
return n
}
func (p *parser) alias() *node {
n := p.node(aliasNode)
n.value = string(p.event.anchor)
p.skip()
return n
}
func (p *parser) scalar() *node {
n := p.node(scalarNode)
n.value = string(p.event.value)
n.tag = string(p.event.tag)
n.implicit = p.event.implicit
p.anchor(n, p.event.anchor)
p.skip()
return n
}
func (p *parser) sequence() *node {
n := p.node(sequenceNode)
p.anchor(n, p.event.anchor)
p.skip()
for p.event.typ != yaml_SEQUENCE_END_EVENT {
n.children = append(n.children, p.parse())
}
p.skip()
return n
}
func (p *parser) mapping() *node {
n := p.node(mappingNode)
p.anchor(n, p.event.anchor)
p.skip()
for p.event.typ != yaml_MAPPING_END_EVENT {
n.children = append(n.children, p.parse(), p.parse())
}
p.skip()
return n
}
// ----------------------------------------------------------------------------
// Decoder, unmarshals a node into a provided value.
type decoder struct {
doc *node
aliases map[string]bool
mapType reflect.Type
terrors []string
}
var (
mapItemType = reflect.TypeOf(MapItem{})
durationType = reflect.TypeOf(time.Duration(0))
defaultMapType = reflect.TypeOf(map[interface{}]interface{}{})
ifaceType = defaultMapType.Elem()
)
func newDecoder() *decoder {
d := &decoder{mapType: defaultMapType}
d.aliases = make(map[string]bool)
return d
}
func (d *decoder) terror(n *node, tag string, out reflect.Value) {
if n.tag != "" {
tag = n.tag
}
value := n.value
if tag != yaml_SEQ_TAG && tag != yaml_MAP_TAG {
if len(value) > 10 {
value = " `" + value[:7] + "...`"
} else {
value = " `" + value + "`"
}
}
d.terrors = append(d.terrors, fmt.Sprintf("line %d: cannot unmarshal %s%s into %s", n.line+1, shortTag(tag), value, out.Type()))
}
func (d *decoder) callUnmarshaler(n *node, u Unmarshaler) (good bool) {
terrlen := len(d.terrors)
err := u.UnmarshalYAML(func(v interface{}) (err error) {
defer handleErr(&err)
d.unmarshal(n, reflect.ValueOf(v))
if len(d.terrors) > terrlen {
issues := d.terrors[terrlen:]
d.terrors = d.terrors[:terrlen]
return &TypeError{issues}
}
return nil
})
if e, ok := err.(*TypeError); ok {
d.terrors = append(d.terrors, e.Errors...)
return false
}
if err != nil {
fail(err)
}
return true
}
// d.prepare initializes and dereferences pointers and calls UnmarshalYAML
// if a value is found to implement it.
// It returns the initialized and dereferenced out value, whether
// unmarshalling was already done by UnmarshalYAML, and if so whether
// its types unmarshalled appropriately.
//
// If n holds a null value, prepare returns before doing anything.
func (d *decoder) prepare(n *node, out reflect.Value) (newout reflect.Value, unmarshaled, good bool) {
if n.tag == yaml_NULL_TAG || n.kind == scalarNode && n.tag == "" && (n.value == "null" || n.value == "") {
return out, false, false
}
again := true
for again {
again = false
if out.Kind() == reflect.Ptr {
if out.IsNil() {
out.Set(reflect.New(out.Type().Elem()))
}
out = out.Elem()
again = true
}
if out.CanAddr() {
if u, ok := out.Addr().Interface().(Unmarshaler); ok {
good = d.callUnmarshaler(n, u)
return out, true, good
}
}
}
return out, false, false
}
func (d *decoder) unmarshal(n *node, out reflect.Value) (good bool) {
switch n.kind {
case documentNode:
return d.document(n, out)
case aliasNode:
return d.alias(n, out)
}
out, unmarshaled, good := d.prepare(n, out)
if unmarshaled {
return good
}
switch n.kind {
case scalarNode:
good = d.scalar(n, out)
case mappingNode:
good = d.mapping(n, out)
case sequenceNode:
good = d.sequence(n, out)
default:
panic("internal error: unknown node kind: " + strconv.Itoa(n.kind))
}
return good
}
func (d *decoder) document(n *node, out reflect.Value) (good bool) {
if len(n.children) == 1 {
d.doc = n
d.unmarshal(n.children[0], out)
return true
}
return false
}
func (d *decoder) alias(n *node, out reflect.Value) (good bool) {
an, ok := d.doc.anchors[n.value]
if !ok {
failf("unknown anchor '%s' referenced", n.value)
}
if d.aliases[n.value] {
failf("anchor '%s' value contains itself", n.value)
}
d.aliases[n.value] = true
good = d.unmarshal(an, out)
delete(d.aliases, n.value)
return good
}
var zeroValue reflect.Value
func resetMap(out reflect.Value) {
for _, k := range out.MapKeys() {
out.SetMapIndex(k, zeroValue)
}
}
func (d *decoder) scalar(n *node, out reflect.Value) (good bool) {
var tag string
var resolved interface{}
if n.tag == "" && !n.implicit {
tag = yaml_STR_TAG
resolved = n.value
} else {
tag, resolved = resolve(n.tag, n.value)
if tag == yaml_BINARY_TAG {
data, err := base64.StdEncoding.DecodeString(resolved.(string))
if err != nil {
failf("!!binary value contains invalid base64 data")
}
resolved = string(data)
}
}
if resolved == nil {
if out.Kind() == reflect.Map && !out.CanAddr() {
resetMap(out)
} else {
out.Set(reflect.Zero(out.Type()))
}
return true
}
if s, ok := resolved.(string); ok && out.CanAddr() {
if u, ok := out.Addr().Interface().(encoding.TextUnmarshaler); ok {
err := u.UnmarshalText([]byte(s))
if err != nil {
fail(err)
}
return true
}
}
switch out.Kind() {
case reflect.String:
if tag == yaml_BINARY_TAG {
out.SetString(resolved.(string))
good = true
} else if resolved != nil {
out.SetString(n.value)
good = true
}
case reflect.Interface:
if resolved == nil {
out.Set(reflect.Zero(out.Type()))
} else {
out.Set(reflect.ValueOf(resolved))
}
good = true
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
switch resolved := resolved.(type) {
case int:
if !out.OverflowInt(int64(resolved)) {
out.SetInt(int64(resolved))
good = true
}
case int64:
if !out.OverflowInt(resolved) {
out.SetInt(resolved)
good = true
}
case uint64:
if resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) {
out.SetInt(int64(resolved))
good = true
}
case float64:
if resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) {
out.SetInt(int64(resolved))
good = true
}
case string:
if out.Type() == durationType {
d, err := time.ParseDuration(resolved)
if err == nil {
out.SetInt(int64(d))
good = true
}
}
}
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
switch resolved := resolved.(type) {
case int:
if resolved >= 0 && !out.OverflowUint(uint64(resolved)) {
out.SetUint(uint64(resolved))
good = true
}
case int64:
if resolved >= 0 && !out.OverflowUint(uint64(resolved)) {
out.SetUint(uint64(resolved))
good = true
}
case uint64:
if !out.OverflowUint(uint64(resolved)) {
out.SetUint(uint64(resolved))
good = true
}
case float64:
if resolved <= math.MaxUint64 && !out.OverflowUint(uint64(resolved)) {
out.SetUint(uint64(resolved))
good = true
}
}
case reflect.Bool:
switch resolved := resolved.(type) {
case bool:
out.SetBool(resolved)
good = true
}
case reflect.Float32, reflect.Float64:
switch resolved := resolved.(type) {
case int:
out.SetFloat(float64(resolved))
good = true
case int64:
out.SetFloat(float64(resolved))
good = true
case uint64:
out.SetFloat(float64(resolved))
good = true
case float64:
out.SetFloat(resolved)
good = true
}
case reflect.Ptr:
if out.Type().Elem() == reflect.TypeOf(resolved) {
// TODO DOes this make sense? When is out a Ptr except when decoding a nil value?
elem := reflect.New(out.Type().Elem())
elem.Elem().Set(reflect.ValueOf(resolved))
out.Set(elem)
good = true
}
}
if !good {
d.terror(n, tag, out)
}
return good
}
func settableValueOf(i interface{}) reflect.Value {
v := reflect.ValueOf(i)
sv := reflect.New(v.Type()).Elem()
sv.Set(v)
return sv
}
func (d *decoder) sequence(n *node, out reflect.Value) (good bool) {
l := len(n.children)
var iface reflect.Value
switch out.Kind() {
case reflect.Slice:
out.Set(reflect.MakeSlice(out.Type(), l, l))
case reflect.Interface:
// No type hints. Will have to use a generic sequence.
iface = out
out = settableValueOf(make([]interface{}, l))
default:
d.terror(n, yaml_SEQ_TAG, out)
return false
}
et := out.Type().Elem()
j := 0
for i := 0; i < l; i++ {
e := reflect.New(et).Elem()
if ok := d.unmarshal(n.children[i], e); ok {
out.Index(j).Set(e)
j++
}
}
out.Set(out.Slice(0, j))
if iface.IsValid() {
iface.Set(out)
}
return true
}
func (d *decoder) mapping(n *node, out reflect.Value) (good bool) {
switch out.Kind() {
case reflect.Struct:
return d.mappingStruct(n, out)
case reflect.Slice:
return d.mappingSlice(n, out)
case reflect.Map:
// okay
case reflect.Interface:
if d.mapType.Kind() == reflect.Map {
iface := out
out = reflect.MakeMap(d.mapType)
iface.Set(out)
} else {
slicev := reflect.New(d.mapType).Elem()
if !d.mappingSlice(n, slicev) {
return false
}
out.Set(slicev)
return true
}
default:
d.terror(n, yaml_MAP_TAG, out)
return false
}
outt := out.Type()
kt := outt.Key()
et := outt.Elem()
mapType := d.mapType
if outt.Key() == ifaceType && outt.Elem() == ifaceType {
d.mapType = outt
}
if out.IsNil() {
out.Set(reflect.MakeMap(outt))
}
l := len(n.children)
for i := 0; i < l; i += 2 {
if isMerge(n.children[i]) {
d.merge(n.children[i+1], out)
continue
}
k := reflect.New(kt).Elem()
if d.unmarshal(n.children[i], k) {
kkind := k.Kind()
if kkind == reflect.Interface {
kkind = k.Elem().Kind()
}
if kkind == reflect.Map || kkind == reflect.Slice {
failf("invalid map key: %#v", k.Interface())
}
e := reflect.New(et).Elem()
if d.unmarshal(n.children[i+1], e) {
out.SetMapIndex(k, e)
}
}
}
d.mapType = mapType
return true
}
func (d *decoder) mappingSlice(n *node, out reflect.Value) (good bool) {
outt := out.Type()
if outt.Elem() != mapItemType {
d.terror(n, yaml_MAP_TAG, out)
return false
}
mapType := d.mapType
d.mapType = outt
var slice []MapItem
var l = len(n.children)
for i := 0; i < l; i += 2 {
if isMerge(n.children[i]) {
d.merge(n.children[i+1], out)
continue
}
item := MapItem{}
k := reflect.ValueOf(&item.Key).Elem()
if d.unmarshal(n.children[i], k) {
v := reflect.ValueOf(&item.Value).Elem()
if d.unmarshal(n.children[i+1], v) {
slice = append(slice, item)
}
}
}
out.Set(reflect.ValueOf(slice))
d.mapType = mapType
return true
}
func (d *decoder) mappingStruct(n *node, out reflect.Value) (good bool) {
sinfo, err := getStructInfo(out.Type())
if err != nil {
panic(err)
}
name := settableValueOf("")
l := len(n.children)
var inlineMap reflect.Value
var elemType reflect.Type
if sinfo.InlineMap != -1 {
inlineMap = out.Field(sinfo.InlineMap)
inlineMap.Set(reflect.New(inlineMap.Type()).Elem())
elemType = inlineMap.Type().Elem()
}
for i := 0; i < l; i += 2 {
ni := n.children[i]
if isMerge(ni) {
d.merge(n.children[i+1], out)
continue
}
if !d.unmarshal(ni, name) {
continue
}
if info, ok := sinfo.FieldsMap[name.String()]; ok {
var field reflect.Value
if info.Inline == nil {
field = out.Field(info.Num)
} else {
field = out.FieldByIndex(info.Inline)
}
d.unmarshal(n.children[i+1], field)
} else if sinfo.InlineMap != -1 {
if inlineMap.IsNil() {
inlineMap.Set(reflect.MakeMap(inlineMap.Type()))
}
value := reflect.New(elemType).Elem()
d.unmarshal(n.children[i+1], value)
inlineMap.SetMapIndex(name, value)
}
}
return true
}
func failWantMap() {
failf("map merge requires map or sequence of maps as the value")
}
func (d *decoder) merge(n *node, out reflect.Value) {
switch n.kind {
case mappingNode:
d.unmarshal(n, out)
case aliasNode:
an, ok := d.doc.anchors[n.value]
if ok && an.kind != mappingNode {
failWantMap()
}
d.unmarshal(n, out)
case sequenceNode:
// Step backwards as earlier nodes take precedence.
for i := len(n.children) - 1; i >= 0; i-- {
ni := n.children[i]
if ni.kind == aliasNode {
an, ok := d.doc.anchors[ni.value]
if ok && an.kind != mappingNode {
failWantMap()
}
} else if ni.kind != mappingNode {
failWantMap()
}
d.unmarshal(ni, out)
}
default:
failWantMap()
}
}
func isMerge(n *node) bool {
return n.kind == scalarNode && n.value == "<<" && (n.implicit == true || n.tag == yaml_MERGE_TAG)
}

File diff suppressed because it is too large Load Diff

View File

@ -1,306 +0,0 @@
package yaml
import (
"encoding"
"fmt"
"reflect"
"regexp"
"sort"
"strconv"
"strings"
"time"
)
type encoder struct {
emitter yaml_emitter_t
event yaml_event_t
out []byte
flow bool
}
func newEncoder() (e *encoder) {
e = &encoder{}
e.must(yaml_emitter_initialize(&e.emitter))
yaml_emitter_set_output_string(&e.emitter, &e.out)
yaml_emitter_set_unicode(&e.emitter, true)
e.must(yaml_stream_start_event_initialize(&e.event, yaml_UTF8_ENCODING))
e.emit()
e.must(yaml_document_start_event_initialize(&e.event, nil, nil, true))
e.emit()
return e
}
func (e *encoder) finish() {
e.must(yaml_document_end_event_initialize(&e.event, true))
e.emit()
e.emitter.open_ended = false
e.must(yaml_stream_end_event_initialize(&e.event))
e.emit()
}
func (e *encoder) destroy() {
yaml_emitter_delete(&e.emitter)
}
func (e *encoder) emit() {
// This will internally delete the e.event value.
if !yaml_emitter_emit(&e.emitter, &e.event) && e.event.typ != yaml_DOCUMENT_END_EVENT && e.event.typ != yaml_STREAM_END_EVENT {
e.must(false)
}
}
func (e *encoder) must(ok bool) {
if !ok {
msg := e.emitter.problem
if msg == "" {
msg = "unknown problem generating YAML content"
}
failf("%s", msg)
}
}
func (e *encoder) marshal(tag string, in reflect.Value) {
if !in.IsValid() {
e.nilv()
return
}
iface := in.Interface()
if m, ok := iface.(Marshaler); ok {
v, err := m.MarshalYAML()
if err != nil {
fail(err)
}
if v == nil {
e.nilv()
return
}
in = reflect.ValueOf(v)
} else if m, ok := iface.(encoding.TextMarshaler); ok {
text, err := m.MarshalText()
if err != nil {
fail(err)
}
in = reflect.ValueOf(string(text))
}
switch in.Kind() {
case reflect.Interface:
if in.IsNil() {
e.nilv()
} else {
e.marshal(tag, in.Elem())
}
case reflect.Map:
e.mapv(tag, in)
case reflect.Ptr:
if in.IsNil() {
e.nilv()
} else {
e.marshal(tag, in.Elem())
}
case reflect.Struct:
e.structv(tag, in)
case reflect.Slice:
if in.Type().Elem() == mapItemType {
e.itemsv(tag, in)
} else {
e.slicev(tag, in)
}
case reflect.String:
e.stringv(tag, in)
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
if in.Type() == durationType {
e.stringv(tag, reflect.ValueOf(iface.(time.Duration).String()))
} else {
e.intv(tag, in)
}
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
e.uintv(tag, in)
case reflect.Float32, reflect.Float64:
e.floatv(tag, in)
case reflect.Bool:
e.boolv(tag, in)
default:
panic("cannot marshal type: " + in.Type().String())
}
}
func (e *encoder) mapv(tag string, in reflect.Value) {
e.mappingv(tag, func() {
keys := keyList(in.MapKeys())
sort.Sort(keys)
for _, k := range keys {
e.marshal("", k)
e.marshal("", in.MapIndex(k))
}
})
}
func (e *encoder) itemsv(tag string, in reflect.Value) {
e.mappingv(tag, func() {
slice := in.Convert(reflect.TypeOf([]MapItem{})).Interface().([]MapItem)
for _, item := range slice {
e.marshal("", reflect.ValueOf(item.Key))
e.marshal("", reflect.ValueOf(item.Value))
}
})
}
func (e *encoder) structv(tag string, in reflect.Value) {
sinfo, err := getStructInfo(in.Type())
if err != nil {
panic(err)
}
e.mappingv(tag, func() {
for _, info := range sinfo.FieldsList {
var value reflect.Value
if info.Inline == nil {
value = in.Field(info.Num)
} else {
value = in.FieldByIndex(info.Inline)
}
if info.OmitEmpty && isZero(value) {
continue
}
e.marshal("", reflect.ValueOf(info.Key))
e.flow = info.Flow
e.marshal("", value)
}
if sinfo.InlineMap >= 0 {
m := in.Field(sinfo.InlineMap)
if m.Len() > 0 {
e.flow = false
keys := keyList(m.MapKeys())
sort.Sort(keys)
for _, k := range keys {
if _, found := sinfo.FieldsMap[k.String()]; found {
panic(fmt.Sprintf("Can't have key %q in inlined map; conflicts with struct field", k.String()))
}
e.marshal("", k)
e.flow = false
e.marshal("", m.MapIndex(k))
}
}
}
})
}
func (e *encoder) mappingv(tag string, f func()) {
implicit := tag == ""
style := yaml_BLOCK_MAPPING_STYLE
if e.flow {
e.flow = false
style = yaml_FLOW_MAPPING_STYLE
}
e.must(yaml_mapping_start_event_initialize(&e.event, nil, []byte(tag), implicit, style))
e.emit()
f()
e.must(yaml_mapping_end_event_initialize(&e.event))
e.emit()
}
func (e *encoder) slicev(tag string, in reflect.Value) {
implicit := tag == ""
style := yaml_BLOCK_SEQUENCE_STYLE
if e.flow {
e.flow = false
style = yaml_FLOW_SEQUENCE_STYLE
}
e.must(yaml_sequence_start_event_initialize(&e.event, nil, []byte(tag), implicit, style))
e.emit()
n := in.Len()
for i := 0; i < n; i++ {
e.marshal("", in.Index(i))
}
e.must(yaml_sequence_end_event_initialize(&e.event))
e.emit()
}
// isBase60 returns whether s is in base 60 notation as defined in YAML 1.1.
//
// The base 60 float notation in YAML 1.1 is a terrible idea and is unsupported
// in YAML 1.2 and by this package, but these should be marshalled quoted for
// the time being for compatibility with other parsers.
func isBase60Float(s string) (result bool) {
// Fast path.
if s == "" {
return false
}
c := s[0]
if !(c == '+' || c == '-' || c >= '0' && c <= '9') || strings.IndexByte(s, ':') < 0 {
return false
}
// Do the full match.
return base60float.MatchString(s)
}
// From http://yaml.org/type/float.html, except the regular expression there
// is bogus. In practice parsers do not enforce the "\.[0-9_]*" suffix.
var base60float = regexp.MustCompile(`^[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+(?:\.[0-9_]*)?$`)
func (e *encoder) stringv(tag string, in reflect.Value) {
var style yaml_scalar_style_t
s := in.String()
rtag, rs := resolve("", s)
if rtag == yaml_BINARY_TAG {
if tag == "" || tag == yaml_STR_TAG {
tag = rtag
s = rs.(string)
} else if tag == yaml_BINARY_TAG {
failf("explicitly tagged !!binary data must be base64-encoded")
} else {
failf("cannot marshal invalid UTF-8 data as %s", shortTag(tag))
}
}
if tag == "" && (rtag != yaml_STR_TAG || isBase60Float(s)) {
style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
} else if strings.Contains(s, "\n") {
style = yaml_LITERAL_SCALAR_STYLE
} else {
style = yaml_PLAIN_SCALAR_STYLE
}
e.emitScalar(s, "", tag, style)
}
func (e *encoder) boolv(tag string, in reflect.Value) {
var s string
if in.Bool() {
s = "true"
} else {
s = "false"
}
e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE)
}
func (e *encoder) intv(tag string, in reflect.Value) {
s := strconv.FormatInt(in.Int(), 10)
e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE)
}
func (e *encoder) uintv(tag string, in reflect.Value) {
s := strconv.FormatUint(in.Uint(), 10)
e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE)
}
func (e *encoder) floatv(tag string, in reflect.Value) {
// FIXME: Handle 64 bits here.
s := strconv.FormatFloat(float64(in.Float()), 'g', -1, 32)
switch s {
case "+Inf":
s = ".inf"
case "-Inf":
s = "-.inf"
case "NaN":
s = ".nan"
}
e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE)
}
func (e *encoder) nilv() {
e.emitScalar("null", "", "", yaml_PLAIN_SCALAR_STYLE)
}
func (e *encoder) emitScalar(value, anchor, tag string, style yaml_scalar_style_t) {
implicit := tag == ""
e.must(yaml_scalar_event_initialize(&e.event, []byte(anchor), []byte(tag), []byte(value), implicit, implicit, style))
e.emit()
}

File diff suppressed because it is too large Load Diff

View File

@ -1,394 +0,0 @@
package yaml
import (
"io"
)
// Set the reader error and return 0.
func yaml_parser_set_reader_error(parser *yaml_parser_t, problem string, offset int, value int) bool {
parser.error = yaml_READER_ERROR
parser.problem = problem
parser.problem_offset = offset
parser.problem_value = value
return false
}
// Byte order marks.
const (
bom_UTF8 = "\xef\xbb\xbf"
bom_UTF16LE = "\xff\xfe"
bom_UTF16BE = "\xfe\xff"
)
// Determine the input stream encoding by checking the BOM symbol. If no BOM is
// found, the UTF-8 encoding is assumed. Return 1 on success, 0 on failure.
func yaml_parser_determine_encoding(parser *yaml_parser_t) bool {
// Ensure that we had enough bytes in the raw buffer.
for !parser.eof && len(parser.raw_buffer)-parser.raw_buffer_pos < 3 {
if !yaml_parser_update_raw_buffer(parser) {
return false
}
}
// Determine the encoding.
buf := parser.raw_buffer
pos := parser.raw_buffer_pos
avail := len(buf) - pos
if avail >= 2 && buf[pos] == bom_UTF16LE[0] && buf[pos+1] == bom_UTF16LE[1] {
parser.encoding = yaml_UTF16LE_ENCODING
parser.raw_buffer_pos += 2
parser.offset += 2
} else if avail >= 2 && buf[pos] == bom_UTF16BE[0] && buf[pos+1] == bom_UTF16BE[1] {
parser.encoding = yaml_UTF16BE_ENCODING
parser.raw_buffer_pos += 2
parser.offset += 2
} else if avail >= 3 && buf[pos] == bom_UTF8[0] && buf[pos+1] == bom_UTF8[1] && buf[pos+2] == bom_UTF8[2] {
parser.encoding = yaml_UTF8_ENCODING
parser.raw_buffer_pos += 3
parser.offset += 3
} else {
parser.encoding = yaml_UTF8_ENCODING
}
return true
}
// Update the raw buffer.
func yaml_parser_update_raw_buffer(parser *yaml_parser_t) bool {
size_read := 0
// Return if the raw buffer is full.
if parser.raw_buffer_pos == 0 && len(parser.raw_buffer) == cap(parser.raw_buffer) {
return true
}
// Return on EOF.
if parser.eof {
return true
}
// Move the remaining bytes in the raw buffer to the beginning.
if parser.raw_buffer_pos > 0 && parser.raw_buffer_pos < len(parser.raw_buffer) {
copy(parser.raw_buffer, parser.raw_buffer[parser.raw_buffer_pos:])
}
parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)-parser.raw_buffer_pos]
parser.raw_buffer_pos = 0
// Call the read handler to fill the buffer.
size_read, err := parser.read_handler(parser, parser.raw_buffer[len(parser.raw_buffer):cap(parser.raw_buffer)])
parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)+size_read]
if err == io.EOF {
parser.eof = true
} else if err != nil {
return yaml_parser_set_reader_error(parser, "input error: "+err.Error(), parser.offset, -1)
}
return true
}
// Ensure that the buffer contains at least `length` characters.
// Return true on success, false on failure.
//
// The length is supposed to be significantly less that the buffer size.
func yaml_parser_update_buffer(parser *yaml_parser_t, length int) bool {
if parser.read_handler == nil {
panic("read handler must be set")
}
// If the EOF flag is set and the raw buffer is empty, do nothing.
if parser.eof && parser.raw_buffer_pos == len(parser.raw_buffer) {
return true
}
// Return if the buffer contains enough characters.
if parser.unread >= length {
return true
}
// Determine the input encoding if it is not known yet.
if parser.encoding == yaml_ANY_ENCODING {
if !yaml_parser_determine_encoding(parser) {
return false
}
}
// Move the unread characters to the beginning of the buffer.
buffer_len := len(parser.buffer)
if parser.buffer_pos > 0 && parser.buffer_pos < buffer_len {
copy(parser.buffer, parser.buffer[parser.buffer_pos:])
buffer_len -= parser.buffer_pos
parser.buffer_pos = 0
} else if parser.buffer_pos == buffer_len {
buffer_len = 0
parser.buffer_pos = 0
}
// Open the whole buffer for writing, and cut it before returning.
parser.buffer = parser.buffer[:cap(parser.buffer)]
// Fill the buffer until it has enough characters.
first := true
for parser.unread < length {
// Fill the raw buffer if necessary.
if !first || parser.raw_buffer_pos == len(parser.raw_buffer) {
if !yaml_parser_update_raw_buffer(parser) {
parser.buffer = parser.buffer[:buffer_len]
return false
}
}
first = false
// Decode the raw buffer.
inner:
for parser.raw_buffer_pos != len(parser.raw_buffer) {
var value rune
var width int
raw_unread := len(parser.raw_buffer) - parser.raw_buffer_pos
// Decode the next character.
switch parser.encoding {
case yaml_UTF8_ENCODING:
// Decode a UTF-8 character. Check RFC 3629
// (http://www.ietf.org/rfc/rfc3629.txt) for more details.
//
// The following table (taken from the RFC) is used for
// decoding.
//
// Char. number range | UTF-8 octet sequence
// (hexadecimal) | (binary)
// --------------------+------------------------------------
// 0000 0000-0000 007F | 0xxxxxxx
// 0000 0080-0000 07FF | 110xxxxx 10xxxxxx
// 0000 0800-0000 FFFF | 1110xxxx 10xxxxxx 10xxxxxx
// 0001 0000-0010 FFFF | 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx
//
// Additionally, the characters in the range 0xD800-0xDFFF
// are prohibited as they are reserved for use with UTF-16
// surrogate pairs.
// Determine the length of the UTF-8 sequence.
octet := parser.raw_buffer[parser.raw_buffer_pos]
switch {
case octet&0x80 == 0x00:
width = 1
case octet&0xE0 == 0xC0:
width = 2
case octet&0xF0 == 0xE0:
width = 3
case octet&0xF8 == 0xF0:
width = 4
default:
// The leading octet is invalid.
return yaml_parser_set_reader_error(parser,
"invalid leading UTF-8 octet",
parser.offset, int(octet))
}
// Check if the raw buffer contains an incomplete character.
if width > raw_unread {
if parser.eof {
return yaml_parser_set_reader_error(parser,
"incomplete UTF-8 octet sequence",
parser.offset, -1)
}
break inner
}
// Decode the leading octet.
switch {
case octet&0x80 == 0x00:
value = rune(octet & 0x7F)
case octet&0xE0 == 0xC0:
value = rune(octet & 0x1F)
case octet&0xF0 == 0xE0:
value = rune(octet & 0x0F)
case octet&0xF8 == 0xF0:
value = rune(octet & 0x07)
default:
value = 0
}
// Check and decode the trailing octets.
for k := 1; k < width; k++ {
octet = parser.raw_buffer[parser.raw_buffer_pos+k]
// Check if the octet is valid.
if (octet & 0xC0) != 0x80 {
return yaml_parser_set_reader_error(parser,
"invalid trailing UTF-8 octet",
parser.offset+k, int(octet))
}
// Decode the octet.
value = (value << 6) + rune(octet&0x3F)
}
// Check the length of the sequence against the value.
switch {
case width == 1:
case width == 2 && value >= 0x80:
case width == 3 && value >= 0x800:
case width == 4 && value >= 0x10000:
default:
return yaml_parser_set_reader_error(parser,
"invalid length of a UTF-8 sequence",
parser.offset, -1)
}
// Check the range of the value.
if value >= 0xD800 && value <= 0xDFFF || value > 0x10FFFF {
return yaml_parser_set_reader_error(parser,
"invalid Unicode character",
parser.offset, int(value))
}
case yaml_UTF16LE_ENCODING, yaml_UTF16BE_ENCODING:
var low, high int
if parser.encoding == yaml_UTF16LE_ENCODING {
low, high = 0, 1
} else {
low, high = 1, 0
}
// The UTF-16 encoding is not as simple as one might
// naively think. Check RFC 2781
// (http://www.ietf.org/rfc/rfc2781.txt).
//
// Normally, two subsequent bytes describe a Unicode
// character. However a special technique (called a
// surrogate pair) is used for specifying character
// values larger than 0xFFFF.
//
// A surrogate pair consists of two pseudo-characters:
// high surrogate area (0xD800-0xDBFF)
// low surrogate area (0xDC00-0xDFFF)
//
// The following formulas are used for decoding
// and encoding characters using surrogate pairs:
//
// U = U' + 0x10000 (0x01 00 00 <= U <= 0x10 FF FF)
// U' = yyyyyyyyyyxxxxxxxxxx (0 <= U' <= 0x0F FF FF)
// W1 = 110110yyyyyyyyyy
// W2 = 110111xxxxxxxxxx
//
// where U is the character value, W1 is the high surrogate
// area, W2 is the low surrogate area.
// Check for incomplete UTF-16 character.
if raw_unread < 2 {
if parser.eof {
return yaml_parser_set_reader_error(parser,
"incomplete UTF-16 character",
parser.offset, -1)
}
break inner
}
// Get the character.
value = rune(parser.raw_buffer[parser.raw_buffer_pos+low]) +
(rune(parser.raw_buffer[parser.raw_buffer_pos+high]) << 8)
// Check for unexpected low surrogate area.
if value&0xFC00 == 0xDC00 {
return yaml_parser_set_reader_error(parser,
"unexpected low surrogate area",
parser.offset, int(value))
}
// Check for a high surrogate area.
if value&0xFC00 == 0xD800 {
width = 4
// Check for incomplete surrogate pair.
if raw_unread < 4 {
if parser.eof {
return yaml_parser_set_reader_error(parser,
"incomplete UTF-16 surrogate pair",
parser.offset, -1)
}
break inner
}
// Get the next character.
value2 := rune(parser.raw_buffer[parser.raw_buffer_pos+low+2]) +
(rune(parser.raw_buffer[parser.raw_buffer_pos+high+2]) << 8)
// Check for a low surrogate area.
if value2&0xFC00 != 0xDC00 {
return yaml_parser_set_reader_error(parser,
"expected low surrogate area",
parser.offset+2, int(value2))
}
// Generate the value of the surrogate pair.
value = 0x10000 + ((value & 0x3FF) << 10) + (value2 & 0x3FF)
} else {
width = 2
}
default:
panic("impossible")
}
// Check if the character is in the allowed range:
// #x9 | #xA | #xD | [#x20-#x7E] (8 bit)
// | #x85 | [#xA0-#xD7FF] | [#xE000-#xFFFD] (16 bit)
// | [#x10000-#x10FFFF] (32 bit)
switch {
case value == 0x09:
case value == 0x0A:
case value == 0x0D:
case value >= 0x20 && value <= 0x7E:
case value == 0x85:
case value >= 0xA0 && value <= 0xD7FF:
case value >= 0xE000 && value <= 0xFFFD:
case value >= 0x10000 && value <= 0x10FFFF:
default:
return yaml_parser_set_reader_error(parser,
"control characters are not allowed",
parser.offset, int(value))
}
// Move the raw pointers.
parser.raw_buffer_pos += width
parser.offset += width
// Finally put the character into the buffer.
if value <= 0x7F {
// 0000 0000-0000 007F . 0xxxxxxx
parser.buffer[buffer_len+0] = byte(value)
buffer_len += 1
} else if value <= 0x7FF {
// 0000 0080-0000 07FF . 110xxxxx 10xxxxxx
parser.buffer[buffer_len+0] = byte(0xC0 + (value >> 6))
parser.buffer[buffer_len+1] = byte(0x80 + (value & 0x3F))
buffer_len += 2
} else if value <= 0xFFFF {
// 0000 0800-0000 FFFF . 1110xxxx 10xxxxxx 10xxxxxx
parser.buffer[buffer_len+0] = byte(0xE0 + (value >> 12))
parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 6) & 0x3F))
parser.buffer[buffer_len+2] = byte(0x80 + (value & 0x3F))
buffer_len += 3
} else {
// 0001 0000-0010 FFFF . 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx
parser.buffer[buffer_len+0] = byte(0xF0 + (value >> 18))
parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 12) & 0x3F))
parser.buffer[buffer_len+2] = byte(0x80 + ((value >> 6) & 0x3F))
parser.buffer[buffer_len+3] = byte(0x80 + (value & 0x3F))
buffer_len += 4
}
parser.unread++
}
// On EOF, put NUL into the buffer and return.
if parser.eof {
parser.buffer[buffer_len] = 0
buffer_len++
parser.unread++
break
}
}
parser.buffer = parser.buffer[:buffer_len]
return true
}

View File

@ -1,203 +0,0 @@
package yaml
import (
"encoding/base64"
"math"
"strconv"
"strings"
"unicode/utf8"
)
type resolveMapItem struct {
value interface{}
tag string
}
var resolveTable = make([]byte, 256)
var resolveMap = make(map[string]resolveMapItem)
func init() {
t := resolveTable
t[int('+')] = 'S' // Sign
t[int('-')] = 'S'
for _, c := range "0123456789" {
t[int(c)] = 'D' // Digit
}
for _, c := range "yYnNtTfFoO~" {
t[int(c)] = 'M' // In map
}
t[int('.')] = '.' // Float (potentially in map)
var resolveMapList = []struct {
v interface{}
tag string
l []string
}{
{true, yaml_BOOL_TAG, []string{"y", "Y", "yes", "Yes", "YES"}},
{true, yaml_BOOL_TAG, []string{"true", "True", "TRUE"}},
{true, yaml_BOOL_TAG, []string{"on", "On", "ON"}},
{false, yaml_BOOL_TAG, []string{"n", "N", "no", "No", "NO"}},
{false, yaml_BOOL_TAG, []string{"false", "False", "FALSE"}},
{false, yaml_BOOL_TAG, []string{"off", "Off", "OFF"}},
{nil, yaml_NULL_TAG, []string{"", "~", "null", "Null", "NULL"}},
{math.NaN(), yaml_FLOAT_TAG, []string{".nan", ".NaN", ".NAN"}},
{math.Inf(+1), yaml_FLOAT_TAG, []string{".inf", ".Inf", ".INF"}},
{math.Inf(+1), yaml_FLOAT_TAG, []string{"+.inf", "+.Inf", "+.INF"}},
{math.Inf(-1), yaml_FLOAT_TAG, []string{"-.inf", "-.Inf", "-.INF"}},
{"<<", yaml_MERGE_TAG, []string{"<<"}},
}
m := resolveMap
for _, item := range resolveMapList {
for _, s := range item.l {
m[s] = resolveMapItem{item.v, item.tag}
}
}
}
const longTagPrefix = "tag:yaml.org,2002:"
func shortTag(tag string) string {
// TODO This can easily be made faster and produce less garbage.
if strings.HasPrefix(tag, longTagPrefix) {
return "!!" + tag[len(longTagPrefix):]
}
return tag
}
func longTag(tag string) string {
if strings.HasPrefix(tag, "!!") {
return longTagPrefix + tag[2:]
}
return tag
}
func resolvableTag(tag string) bool {
switch tag {
case "", yaml_STR_TAG, yaml_BOOL_TAG, yaml_INT_TAG, yaml_FLOAT_TAG, yaml_NULL_TAG:
return true
}
return false
}
func resolve(tag string, in string) (rtag string, out interface{}) {
if !resolvableTag(tag) {
return tag, in
}
defer func() {
switch tag {
case "", rtag, yaml_STR_TAG, yaml_BINARY_TAG:
return
}
failf("cannot decode %s `%s` as a %s", shortTag(rtag), in, shortTag(tag))
}()
// Any data is accepted as a !!str or !!binary.
// Otherwise, the prefix is enough of a hint about what it might be.
hint := byte('N')
if in != "" {
hint = resolveTable[in[0]]
}
if hint != 0 && tag != yaml_STR_TAG && tag != yaml_BINARY_TAG {
// Handle things we can lookup in a map.
if item, ok := resolveMap[in]; ok {
return item.tag, item.value
}
// Base 60 floats are a bad idea, were dropped in YAML 1.2, and
// are purposefully unsupported here. They're still quoted on
// the way out for compatibility with other parser, though.
switch hint {
case 'M':
// We've already checked the map above.
case '.':
// Not in the map, so maybe a normal float.
floatv, err := strconv.ParseFloat(in, 64)
if err == nil {
return yaml_FLOAT_TAG, floatv
}
case 'D', 'S':
// Int, float, or timestamp.
plain := strings.Replace(in, "_", "", -1)
intv, err := strconv.ParseInt(plain, 0, 64)
if err == nil {
if intv == int64(int(intv)) {
return yaml_INT_TAG, int(intv)
} else {
return yaml_INT_TAG, intv
}
}
uintv, err := strconv.ParseUint(plain, 0, 64)
if err == nil {
return yaml_INT_TAG, uintv
}
floatv, err := strconv.ParseFloat(plain, 64)
if err == nil {
return yaml_FLOAT_TAG, floatv
}
if strings.HasPrefix(plain, "0b") {
intv, err := strconv.ParseInt(plain[2:], 2, 64)
if err == nil {
if intv == int64(int(intv)) {
return yaml_INT_TAG, int(intv)
} else {
return yaml_INT_TAG, intv
}
}
uintv, err := strconv.ParseUint(plain[2:], 2, 64)
if err == nil {
return yaml_INT_TAG, uintv
}
} else if strings.HasPrefix(plain, "-0b") {
intv, err := strconv.ParseInt(plain[3:], 2, 64)
if err == nil {
if intv == int64(int(intv)) {
return yaml_INT_TAG, -int(intv)
} else {
return yaml_INT_TAG, -intv
}
}
}
// XXX Handle timestamps here.
default:
panic("resolveTable item not yet handled: " + string(rune(hint)) + " (with " + in + ")")
}
}
if tag == yaml_BINARY_TAG {
return yaml_BINARY_TAG, in
}
if utf8.ValidString(in) {
return yaml_STR_TAG, in
}
return yaml_BINARY_TAG, encodeBase64(in)
}
// encodeBase64 encodes s as base64 that is broken up into multiple lines
// as appropriate for the resulting length.
func encodeBase64(s string) string {
const lineLen = 70
encLen := base64.StdEncoding.EncodedLen(len(s))
lines := encLen/lineLen + 1
buf := make([]byte, encLen*2+lines)
in := buf[0:encLen]
out := buf[encLen:]
base64.StdEncoding.Encode(in, []byte(s))
k := 0
for i := 0; i < len(in); i += lineLen {
j := i + lineLen
if j > len(in) {
j = len(in)
}
k += copy(out[k:], in[i:j])
if lines > 1 {
out[k] = '\n'
k++
}
}
return string(out[:k])
}

File diff suppressed because it is too large Load Diff

View File

@ -1,104 +0,0 @@
package yaml
import (
"reflect"
"unicode"
)
type keyList []reflect.Value
func (l keyList) Len() int { return len(l) }
func (l keyList) Swap(i, j int) { l[i], l[j] = l[j], l[i] }
func (l keyList) Less(i, j int) bool {
a := l[i]
b := l[j]
ak := a.Kind()
bk := b.Kind()
for (ak == reflect.Interface || ak == reflect.Ptr) && !a.IsNil() {
a = a.Elem()
ak = a.Kind()
}
for (bk == reflect.Interface || bk == reflect.Ptr) && !b.IsNil() {
b = b.Elem()
bk = b.Kind()
}
af, aok := keyFloat(a)
bf, bok := keyFloat(b)
if aok && bok {
if af != bf {
return af < bf
}
if ak != bk {
return ak < bk
}
return numLess(a, b)
}
if ak != reflect.String || bk != reflect.String {
return ak < bk
}
ar, br := []rune(a.String()), []rune(b.String())
for i := 0; i < len(ar) && i < len(br); i++ {
if ar[i] == br[i] {
continue
}
al := unicode.IsLetter(ar[i])
bl := unicode.IsLetter(br[i])
if al && bl {
return ar[i] < br[i]
}
if al || bl {
return bl
}
var ai, bi int
var an, bn int64
for ai = i; ai < len(ar) && unicode.IsDigit(ar[ai]); ai++ {
an = an*10 + int64(ar[ai]-'0')
}
for bi = i; bi < len(br) && unicode.IsDigit(br[bi]); bi++ {
bn = bn*10 + int64(br[bi]-'0')
}
if an != bn {
return an < bn
}
if ai != bi {
return ai < bi
}
return ar[i] < br[i]
}
return len(ar) < len(br)
}
// keyFloat returns a float value for v if it is a number/bool
// and whether it is a number/bool or not.
func keyFloat(v reflect.Value) (f float64, ok bool) {
switch v.Kind() {
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
return float64(v.Int()), true
case reflect.Float32, reflect.Float64:
return v.Float(), true
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
return float64(v.Uint()), true
case reflect.Bool:
if v.Bool() {
return 1, true
}
return 0, true
}
return 0, false
}
// numLess returns whether a < b.
// a and b must necessarily have the same kind.
func numLess(a, b reflect.Value) bool {
switch a.Kind() {
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
return a.Int() < b.Int()
case reflect.Float32, reflect.Float64:
return a.Float() < b.Float()
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
return a.Uint() < b.Uint()
case reflect.Bool:
return !a.Bool() && b.Bool()
}
panic("not a number")
}

View File

@ -1,89 +0,0 @@
package yaml
// Set the writer error and return false.
func yaml_emitter_set_writer_error(emitter *yaml_emitter_t, problem string) bool {
emitter.error = yaml_WRITER_ERROR
emitter.problem = problem
return false
}
// Flush the output buffer.
func yaml_emitter_flush(emitter *yaml_emitter_t) bool {
if emitter.write_handler == nil {
panic("write handler not set")
}
// Check if the buffer is empty.
if emitter.buffer_pos == 0 {
return true
}
// If the output encoding is UTF-8, we don't need to recode the buffer.
if emitter.encoding == yaml_UTF8_ENCODING {
if err := emitter.write_handler(emitter, emitter.buffer[:emitter.buffer_pos]); err != nil {
return yaml_emitter_set_writer_error(emitter, "write error: "+err.Error())
}
emitter.buffer_pos = 0
return true
}
// Recode the buffer into the raw buffer.
var low, high int
if emitter.encoding == yaml_UTF16LE_ENCODING {
low, high = 0, 1
} else {
high, low = 1, 0
}
pos := 0
for pos < emitter.buffer_pos {
// See the "reader.c" code for more details on UTF-8 encoding. Note
// that we assume that the buffer contains a valid UTF-8 sequence.
// Read the next UTF-8 character.
octet := emitter.buffer[pos]
var w int
var value rune
switch {
case octet&0x80 == 0x00:
w, value = 1, rune(octet&0x7F)
case octet&0xE0 == 0xC0:
w, value = 2, rune(octet&0x1F)
case octet&0xF0 == 0xE0:
w, value = 3, rune(octet&0x0F)
case octet&0xF8 == 0xF0:
w, value = 4, rune(octet&0x07)
}
for k := 1; k < w; k++ {
octet = emitter.buffer[pos+k]
value = (value << 6) + (rune(octet) & 0x3F)
}
pos += w
// Write the character.
if value < 0x10000 {
var b [2]byte
b[high] = byte(value >> 8)
b[low] = byte(value & 0xFF)
emitter.raw_buffer = append(emitter.raw_buffer, b[0], b[1])
} else {
// Write the character using a surrogate pair (check "reader.c").
var b [4]byte
value -= 0x10000
b[high] = byte(0xD8 + (value >> 18))
b[low] = byte((value >> 10) & 0xFF)
b[high+2] = byte(0xDC + ((value >> 8) & 0xFF))
b[low+2] = byte(value & 0xFF)
emitter.raw_buffer = append(emitter.raw_buffer, b[0], b[1], b[2], b[3])
}
}
// Write the raw buffer.
if err := emitter.write_handler(emitter, emitter.raw_buffer); err != nil {
return yaml_emitter_set_writer_error(emitter, "write error: "+err.Error())
}
emitter.buffer_pos = 0
emitter.raw_buffer = emitter.raw_buffer[:0]
return true
}

View File

@ -1,346 +0,0 @@
// Package yaml implements YAML support for the Go language.
//
// Source code and other details for the project are available at GitHub:
//
// https://github.com/go-yaml/yaml
//
package yaml
import (
"errors"
"fmt"
"reflect"
"strings"
"sync"
)
// MapSlice encodes and decodes as a YAML map.
// The order of keys is preserved when encoding and decoding.
type MapSlice []MapItem
// MapItem is an item in a MapSlice.
type MapItem struct {
Key, Value interface{}
}
// The Unmarshaler interface may be implemented by types to customize their
// behavior when being unmarshaled from a YAML document. The UnmarshalYAML
// method receives a function that may be called to unmarshal the original
// YAML value into a field or variable. It is safe to call the unmarshal
// function parameter more than once if necessary.
type Unmarshaler interface {
UnmarshalYAML(unmarshal func(interface{}) error) error
}
// The Marshaler interface may be implemented by types to customize their
// behavior when being marshaled into a YAML document. The returned value
// is marshaled in place of the original value implementing Marshaler.
//
// If an error is returned by MarshalYAML, the marshaling procedure stops
// and returns with the provided error.
type Marshaler interface {
MarshalYAML() (interface{}, error)
}
// Unmarshal decodes the first document found within the in byte slice
// and assigns decoded values into the out value.
//
// Maps and pointers (to a struct, string, int, etc) are accepted as out
// values. If an internal pointer within a struct is not initialized,
// the yaml package will initialize it if necessary for unmarshalling
// the provided data. The out parameter must not be nil.
//
// The type of the decoded values should be compatible with the respective
// values in out. If one or more values cannot be decoded due to a type
// mismatches, decoding continues partially until the end of the YAML
// content, and a *yaml.TypeError is returned with details for all
// missed values.
//
// Struct fields are only unmarshalled if they are exported (have an
// upper case first letter), and are unmarshalled using the field name
// lowercased as the default key. Custom keys may be defined via the
// "yaml" name in the field tag: the content preceding the first comma
// is used as the key, and the following comma-separated options are
// used to tweak the marshalling process (see Marshal).
// Conflicting names result in a runtime error.
//
// For example:
//
// type T struct {
// F int `yaml:"a,omitempty"`
// B int
// }
// var t T
// yaml.Unmarshal([]byte("a: 1\nb: 2"), &t)
//
// See the documentation of Marshal for the format of tags and a list of
// supported tag options.
//
func Unmarshal(in []byte, out interface{}) (err error) {
defer handleErr(&err)
d := newDecoder()
p := newParser(in)
defer p.destroy()
node := p.parse()
if node != nil {
v := reflect.ValueOf(out)
if v.Kind() == reflect.Ptr && !v.IsNil() {
v = v.Elem()
}
d.unmarshal(node, v)
}
if len(d.terrors) > 0 {
return &TypeError{d.terrors}
}
return nil
}
// Marshal serializes the value provided into a YAML document. The structure
// of the generated document will reflect the structure of the value itself.
// Maps and pointers (to struct, string, int, etc) are accepted as the in value.
//
// Struct fields are only unmarshalled if they are exported (have an upper case
// first letter), and are unmarshalled using the field name lowercased as the
// default key. Custom keys may be defined via the "yaml" name in the field
// tag: the content preceding the first comma is used as the key, and the
// following comma-separated options are used to tweak the marshalling process.
// Conflicting names result in a runtime error.
//
// The field tag format accepted is:
//
// `(...) yaml:"[<key>][,<flag1>[,<flag2>]]" (...)`
//
// The following flags are currently supported:
//
// omitempty Only include the field if it's not set to the zero
// value for the type or to empty slices or maps.
// Does not apply to zero valued structs.
//
// flow Marshal using a flow style (useful for structs,
// sequences and maps).
//
// inline Inline the field, which must be a struct or a map,
// causing all of its fields or keys to be processed as if
// they were part of the outer struct. For maps, keys must
// not conflict with the yaml keys of other struct fields.
//
// In addition, if the key is "-", the field is ignored.
//
// For example:
//
// type T struct {
// F int "a,omitempty"
// B int
// }
// yaml.Marshal(&T{B: 2}) // Returns "b: 2\n"
// yaml.Marshal(&T{F: 1}} // Returns "a: 1\nb: 0\n"
//
func Marshal(in interface{}) (out []byte, err error) {
defer handleErr(&err)
e := newEncoder()
defer e.destroy()
e.marshal("", reflect.ValueOf(in))
e.finish()
out = e.out
return
}
func handleErr(err *error) {
if v := recover(); v != nil {
if e, ok := v.(yamlError); ok {
*err = e.err
} else {
panic(v)
}
}
}
type yamlError struct {
err error
}
func fail(err error) {
panic(yamlError{err})
}
func failf(format string, args ...interface{}) {
panic(yamlError{fmt.Errorf("yaml: "+format, args...)})
}
// A TypeError is returned by Unmarshal when one or more fields in
// the YAML document cannot be properly decoded into the requested
// types. When this error is returned, the value is still
// unmarshaled partially.
type TypeError struct {
Errors []string
}
func (e *TypeError) Error() string {
return fmt.Sprintf("yaml: unmarshal errors:\n %s", strings.Join(e.Errors, "\n "))
}
// --------------------------------------------------------------------------
// Maintain a mapping of keys to structure field indexes
// The code in this section was copied from mgo/bson.
// structInfo holds details for the serialization of fields of
// a given struct.
type structInfo struct {
FieldsMap map[string]fieldInfo
FieldsList []fieldInfo
// InlineMap is the number of the field in the struct that
// contains an ,inline map, or -1 if there's none.
InlineMap int
}
type fieldInfo struct {
Key string
Num int
OmitEmpty bool
Flow bool
// Inline holds the field index if the field is part of an inlined struct.
Inline []int
}
var structMap = make(map[reflect.Type]*structInfo)
var fieldMapMutex sync.RWMutex
func getStructInfo(st reflect.Type) (*structInfo, error) {
fieldMapMutex.RLock()
sinfo, found := structMap[st]
fieldMapMutex.RUnlock()
if found {
return sinfo, nil
}
n := st.NumField()
fieldsMap := make(map[string]fieldInfo)
fieldsList := make([]fieldInfo, 0, n)
inlineMap := -1
for i := 0; i != n; i++ {
field := st.Field(i)
if field.PkgPath != "" && !field.Anonymous {
continue // Private field
}
info := fieldInfo{Num: i}
tag := field.Tag.Get("yaml")
if tag == "" && strings.Index(string(field.Tag), ":") < 0 {
tag = string(field.Tag)
}
if tag == "-" {
continue
}
inline := false
fields := strings.Split(tag, ",")
if len(fields) > 1 {
for _, flag := range fields[1:] {
switch flag {
case "omitempty":
info.OmitEmpty = true
case "flow":
info.Flow = true
case "inline":
inline = true
default:
return nil, errors.New(fmt.Sprintf("Unsupported flag %q in tag %q of type %s", flag, tag, st))
}
}
tag = fields[0]
}
if inline {
switch field.Type.Kind() {
case reflect.Map:
if inlineMap >= 0 {
return nil, errors.New("Multiple ,inline maps in struct " + st.String())
}
if field.Type.Key() != reflect.TypeOf("") {
return nil, errors.New("Option ,inline needs a map with string keys in struct " + st.String())
}
inlineMap = info.Num
case reflect.Struct:
sinfo, err := getStructInfo(field.Type)
if err != nil {
return nil, err
}
for _, finfo := range sinfo.FieldsList {
if _, found := fieldsMap[finfo.Key]; found {
msg := "Duplicated key '" + finfo.Key + "' in struct " + st.String()
return nil, errors.New(msg)
}
if finfo.Inline == nil {
finfo.Inline = []int{i, finfo.Num}
} else {
finfo.Inline = append([]int{i}, finfo.Inline...)
}
fieldsMap[finfo.Key] = finfo
fieldsList = append(fieldsList, finfo)
}
default:
//return nil, errors.New("Option ,inline needs a struct value or map field")
return nil, errors.New("Option ,inline needs a struct value field")
}
continue
}
if tag != "" {
info.Key = tag
} else {
info.Key = strings.ToLower(field.Name)
}
if _, found = fieldsMap[info.Key]; found {
msg := "Duplicated key '" + info.Key + "' in struct " + st.String()
return nil, errors.New(msg)
}
fieldsList = append(fieldsList, info)
fieldsMap[info.Key] = info
}
sinfo = &structInfo{fieldsMap, fieldsList, inlineMap}
fieldMapMutex.Lock()
structMap[st] = sinfo
fieldMapMutex.Unlock()
return sinfo, nil
}
func isZero(v reflect.Value) bool {
switch v.Kind() {
case reflect.String:
return len(v.String()) == 0
case reflect.Interface, reflect.Ptr:
return v.IsNil()
case reflect.Slice:
return v.Len() == 0
case reflect.Map:
return v.Len() == 0
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
return v.Int() == 0
case reflect.Float32, reflect.Float64:
return v.Float() == 0
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
return v.Uint() == 0
case reflect.Bool:
return !v.Bool()
case reflect.Struct:
vt := v.Type()
for i := v.NumField() - 1; i >= 0; i-- {
if vt.Field(i).PkgPath != "" {
continue // Private field
}
if !isZero(v.Field(i)) {
return false
}
}
return true
}
return false
}

View File

@ -1,716 +0,0 @@
package yaml
import (
"io"
)
// The version directive data.
type yaml_version_directive_t struct {
major int8 // The major version number.
minor int8 // The minor version number.
}
// The tag directive data.
type yaml_tag_directive_t struct {
handle []byte // The tag handle.
prefix []byte // The tag prefix.
}
type yaml_encoding_t int
// The stream encoding.
const (
// Let the parser choose the encoding.
yaml_ANY_ENCODING yaml_encoding_t = iota
yaml_UTF8_ENCODING // The default UTF-8 encoding.
yaml_UTF16LE_ENCODING // The UTF-16-LE encoding with BOM.
yaml_UTF16BE_ENCODING // The UTF-16-BE encoding with BOM.
)
type yaml_break_t int
// Line break types.
const (
// Let the parser choose the break type.
yaml_ANY_BREAK yaml_break_t = iota
yaml_CR_BREAK // Use CR for line breaks (Mac style).
yaml_LN_BREAK // Use LN for line breaks (Unix style).
yaml_CRLN_BREAK // Use CR LN for line breaks (DOS style).
)
type yaml_error_type_t int
// Many bad things could happen with the parser and emitter.
const (
// No error is produced.
yaml_NO_ERROR yaml_error_type_t = iota
yaml_MEMORY_ERROR // Cannot allocate or reallocate a block of memory.
yaml_READER_ERROR // Cannot read or decode the input stream.
yaml_SCANNER_ERROR // Cannot scan the input stream.
yaml_PARSER_ERROR // Cannot parse the input stream.
yaml_COMPOSER_ERROR // Cannot compose a YAML document.
yaml_WRITER_ERROR // Cannot write to the output stream.
yaml_EMITTER_ERROR // Cannot emit a YAML stream.
)
// The pointer position.
type yaml_mark_t struct {
index int // The position index.
line int // The position line.
column int // The position column.
}
// Node Styles
type yaml_style_t int8
type yaml_scalar_style_t yaml_style_t
// Scalar styles.
const (
// Let the emitter choose the style.
yaml_ANY_SCALAR_STYLE yaml_scalar_style_t = iota
yaml_PLAIN_SCALAR_STYLE // The plain scalar style.
yaml_SINGLE_QUOTED_SCALAR_STYLE // The single-quoted scalar style.
yaml_DOUBLE_QUOTED_SCALAR_STYLE // The double-quoted scalar style.
yaml_LITERAL_SCALAR_STYLE // The literal scalar style.
yaml_FOLDED_SCALAR_STYLE // The folded scalar style.
)
type yaml_sequence_style_t yaml_style_t
// Sequence styles.
const (
// Let the emitter choose the style.
yaml_ANY_SEQUENCE_STYLE yaml_sequence_style_t = iota
yaml_BLOCK_SEQUENCE_STYLE // The block sequence style.
yaml_FLOW_SEQUENCE_STYLE // The flow sequence style.
)
type yaml_mapping_style_t yaml_style_t
// Mapping styles.
const (
// Let the emitter choose the style.
yaml_ANY_MAPPING_STYLE yaml_mapping_style_t = iota
yaml_BLOCK_MAPPING_STYLE // The block mapping style.
yaml_FLOW_MAPPING_STYLE // The flow mapping style.
)
// Tokens
type yaml_token_type_t int
// Token types.
const (
// An empty token.
yaml_NO_TOKEN yaml_token_type_t = iota
yaml_STREAM_START_TOKEN // A STREAM-START token.
yaml_STREAM_END_TOKEN // A STREAM-END token.
yaml_VERSION_DIRECTIVE_TOKEN // A VERSION-DIRECTIVE token.
yaml_TAG_DIRECTIVE_TOKEN // A TAG-DIRECTIVE token.
yaml_DOCUMENT_START_TOKEN // A DOCUMENT-START token.
yaml_DOCUMENT_END_TOKEN // A DOCUMENT-END token.
yaml_BLOCK_SEQUENCE_START_TOKEN // A BLOCK-SEQUENCE-START token.
yaml_BLOCK_MAPPING_START_TOKEN // A BLOCK-SEQUENCE-END token.
yaml_BLOCK_END_TOKEN // A BLOCK-END token.
yaml_FLOW_SEQUENCE_START_TOKEN // A FLOW-SEQUENCE-START token.
yaml_FLOW_SEQUENCE_END_TOKEN // A FLOW-SEQUENCE-END token.
yaml_FLOW_MAPPING_START_TOKEN // A FLOW-MAPPING-START token.
yaml_FLOW_MAPPING_END_TOKEN // A FLOW-MAPPING-END token.
yaml_BLOCK_ENTRY_TOKEN // A BLOCK-ENTRY token.
yaml_FLOW_ENTRY_TOKEN // A FLOW-ENTRY token.
yaml_KEY_TOKEN // A KEY token.
yaml_VALUE_TOKEN // A VALUE token.
yaml_ALIAS_TOKEN // An ALIAS token.
yaml_ANCHOR_TOKEN // An ANCHOR token.
yaml_TAG_TOKEN // A TAG token.
yaml_SCALAR_TOKEN // A SCALAR token.
)
func (tt yaml_token_type_t) String() string {
switch tt {
case yaml_NO_TOKEN:
return "yaml_NO_TOKEN"
case yaml_STREAM_START_TOKEN:
return "yaml_STREAM_START_TOKEN"
case yaml_STREAM_END_TOKEN:
return "yaml_STREAM_END_TOKEN"
case yaml_VERSION_DIRECTIVE_TOKEN:
return "yaml_VERSION_DIRECTIVE_TOKEN"
case yaml_TAG_DIRECTIVE_TOKEN:
return "yaml_TAG_DIRECTIVE_TOKEN"
case yaml_DOCUMENT_START_TOKEN:
return "yaml_DOCUMENT_START_TOKEN"
case yaml_DOCUMENT_END_TOKEN:
return "yaml_DOCUMENT_END_TOKEN"
case yaml_BLOCK_SEQUENCE_START_TOKEN:
return "yaml_BLOCK_SEQUENCE_START_TOKEN"
case yaml_BLOCK_MAPPING_START_TOKEN:
return "yaml_BLOCK_MAPPING_START_TOKEN"
case yaml_BLOCK_END_TOKEN:
return "yaml_BLOCK_END_TOKEN"
case yaml_FLOW_SEQUENCE_START_TOKEN:
return "yaml_FLOW_SEQUENCE_START_TOKEN"
case yaml_FLOW_SEQUENCE_END_TOKEN:
return "yaml_FLOW_SEQUENCE_END_TOKEN"
case yaml_FLOW_MAPPING_START_TOKEN:
return "yaml_FLOW_MAPPING_START_TOKEN"
case yaml_FLOW_MAPPING_END_TOKEN:
return "yaml_FLOW_MAPPING_END_TOKEN"
case yaml_BLOCK_ENTRY_TOKEN:
return "yaml_BLOCK_ENTRY_TOKEN"
case yaml_FLOW_ENTRY_TOKEN:
return "yaml_FLOW_ENTRY_TOKEN"
case yaml_KEY_TOKEN:
return "yaml_KEY_TOKEN"
case yaml_VALUE_TOKEN:
return "yaml_VALUE_TOKEN"
case yaml_ALIAS_TOKEN:
return "yaml_ALIAS_TOKEN"
case yaml_ANCHOR_TOKEN:
return "yaml_ANCHOR_TOKEN"
case yaml_TAG_TOKEN:
return "yaml_TAG_TOKEN"
case yaml_SCALAR_TOKEN:
return "yaml_SCALAR_TOKEN"
}
return "<unknown token>"
}
// The token structure.
type yaml_token_t struct {
// The token type.
typ yaml_token_type_t
// The start/end of the token.
start_mark, end_mark yaml_mark_t
// The stream encoding (for yaml_STREAM_START_TOKEN).
encoding yaml_encoding_t
// The alias/anchor/scalar value or tag/tag directive handle
// (for yaml_ALIAS_TOKEN, yaml_ANCHOR_TOKEN, yaml_SCALAR_TOKEN, yaml_TAG_TOKEN, yaml_TAG_DIRECTIVE_TOKEN).
value []byte
// The tag suffix (for yaml_TAG_TOKEN).
suffix []byte
// The tag directive prefix (for yaml_TAG_DIRECTIVE_TOKEN).
prefix []byte
// The scalar style (for yaml_SCALAR_TOKEN).
style yaml_scalar_style_t
// The version directive major/minor (for yaml_VERSION_DIRECTIVE_TOKEN).
major, minor int8
}
// Events
type yaml_event_type_t int8
// Event types.
const (
// An empty event.
yaml_NO_EVENT yaml_event_type_t = iota
yaml_STREAM_START_EVENT // A STREAM-START event.
yaml_STREAM_END_EVENT // A STREAM-END event.
yaml_DOCUMENT_START_EVENT // A DOCUMENT-START event.
yaml_DOCUMENT_END_EVENT // A DOCUMENT-END event.
yaml_ALIAS_EVENT // An ALIAS event.
yaml_SCALAR_EVENT // A SCALAR event.
yaml_SEQUENCE_START_EVENT // A SEQUENCE-START event.
yaml_SEQUENCE_END_EVENT // A SEQUENCE-END event.
yaml_MAPPING_START_EVENT // A MAPPING-START event.
yaml_MAPPING_END_EVENT // A MAPPING-END event.
)
// The event structure.
type yaml_event_t struct {
// The event type.
typ yaml_event_type_t
// The start and end of the event.
start_mark, end_mark yaml_mark_t
// The document encoding (for yaml_STREAM_START_EVENT).
encoding yaml_encoding_t
// The version directive (for yaml_DOCUMENT_START_EVENT).
version_directive *yaml_version_directive_t
// The list of tag directives (for yaml_DOCUMENT_START_EVENT).
tag_directives []yaml_tag_directive_t
// The anchor (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_ALIAS_EVENT).
anchor []byte
// The tag (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT).
tag []byte
// The scalar value (for yaml_SCALAR_EVENT).
value []byte
// Is the document start/end indicator implicit, or the tag optional?
// (for yaml_DOCUMENT_START_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_SCALAR_EVENT).
implicit bool
// Is the tag optional for any non-plain style? (for yaml_SCALAR_EVENT).
quoted_implicit bool
// The style (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT).
style yaml_style_t
}
func (e *yaml_event_t) scalar_style() yaml_scalar_style_t { return yaml_scalar_style_t(e.style) }
func (e *yaml_event_t) sequence_style() yaml_sequence_style_t { return yaml_sequence_style_t(e.style) }
func (e *yaml_event_t) mapping_style() yaml_mapping_style_t { return yaml_mapping_style_t(e.style) }
// Nodes
const (
yaml_NULL_TAG = "tag:yaml.org,2002:null" // The tag !!null with the only possible value: null.
yaml_BOOL_TAG = "tag:yaml.org,2002:bool" // The tag !!bool with the values: true and false.
yaml_STR_TAG = "tag:yaml.org,2002:str" // The tag !!str for string values.
yaml_INT_TAG = "tag:yaml.org,2002:int" // The tag !!int for integer values.
yaml_FLOAT_TAG = "tag:yaml.org,2002:float" // The tag !!float for float values.
yaml_TIMESTAMP_TAG = "tag:yaml.org,2002:timestamp" // The tag !!timestamp for date and time values.
yaml_SEQ_TAG = "tag:yaml.org,2002:seq" // The tag !!seq is used to denote sequences.
yaml_MAP_TAG = "tag:yaml.org,2002:map" // The tag !!map is used to denote mapping.
// Not in original libyaml.
yaml_BINARY_TAG = "tag:yaml.org,2002:binary"
yaml_MERGE_TAG = "tag:yaml.org,2002:merge"
yaml_DEFAULT_SCALAR_TAG = yaml_STR_TAG // The default scalar tag is !!str.
yaml_DEFAULT_SEQUENCE_TAG = yaml_SEQ_TAG // The default sequence tag is !!seq.
yaml_DEFAULT_MAPPING_TAG = yaml_MAP_TAG // The default mapping tag is !!map.
)
type yaml_node_type_t int
// Node types.
const (
// An empty node.
yaml_NO_NODE yaml_node_type_t = iota
yaml_SCALAR_NODE // A scalar node.
yaml_SEQUENCE_NODE // A sequence node.
yaml_MAPPING_NODE // A mapping node.
)
// An element of a sequence node.
type yaml_node_item_t int
// An element of a mapping node.
type yaml_node_pair_t struct {
key int // The key of the element.
value int // The value of the element.
}
// The node structure.
type yaml_node_t struct {
typ yaml_node_type_t // The node type.
tag []byte // The node tag.
// The node data.
// The scalar parameters (for yaml_SCALAR_NODE).
scalar struct {
value []byte // The scalar value.
length int // The length of the scalar value.
style yaml_scalar_style_t // The scalar style.
}
// The sequence parameters (for YAML_SEQUENCE_NODE).
sequence struct {
items_data []yaml_node_item_t // The stack of sequence items.
style yaml_sequence_style_t // The sequence style.
}
// The mapping parameters (for yaml_MAPPING_NODE).
mapping struct {
pairs_data []yaml_node_pair_t // The stack of mapping pairs (key, value).
pairs_start *yaml_node_pair_t // The beginning of the stack.
pairs_end *yaml_node_pair_t // The end of the stack.
pairs_top *yaml_node_pair_t // The top of the stack.
style yaml_mapping_style_t // The mapping style.
}
start_mark yaml_mark_t // The beginning of the node.
end_mark yaml_mark_t // The end of the node.
}
// The document structure.
type yaml_document_t struct {
// The document nodes.
nodes []yaml_node_t
// The version directive.
version_directive *yaml_version_directive_t
// The list of tag directives.
tag_directives_data []yaml_tag_directive_t
tag_directives_start int // The beginning of the tag directives list.
tag_directives_end int // The end of the tag directives list.
start_implicit int // Is the document start indicator implicit?
end_implicit int // Is the document end indicator implicit?
// The start/end of the document.
start_mark, end_mark yaml_mark_t
}
// The prototype of a read handler.
//
// The read handler is called when the parser needs to read more bytes from the
// source. The handler should write not more than size bytes to the buffer.
// The number of written bytes should be set to the size_read variable.
//
// [in,out] data A pointer to an application data specified by
// yaml_parser_set_input().
// [out] buffer The buffer to write the data from the source.
// [in] size The size of the buffer.
// [out] size_read The actual number of bytes read from the source.
//
// On success, the handler should return 1. If the handler failed,
// the returned value should be 0. On EOF, the handler should set the
// size_read to 0 and return 1.
type yaml_read_handler_t func(parser *yaml_parser_t, buffer []byte) (n int, err error)
// This structure holds information about a potential simple key.
type yaml_simple_key_t struct {
possible bool // Is a simple key possible?
required bool // Is a simple key required?
token_number int // The number of the token.
mark yaml_mark_t // The position mark.
}
// The states of the parser.
type yaml_parser_state_t int
const (
yaml_PARSE_STREAM_START_STATE yaml_parser_state_t = iota
yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE // Expect the beginning of an implicit document.
yaml_PARSE_DOCUMENT_START_STATE // Expect DOCUMENT-START.
yaml_PARSE_DOCUMENT_CONTENT_STATE // Expect the content of a document.
yaml_PARSE_DOCUMENT_END_STATE // Expect DOCUMENT-END.
yaml_PARSE_BLOCK_NODE_STATE // Expect a block node.
yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE // Expect a block node or indentless sequence.
yaml_PARSE_FLOW_NODE_STATE // Expect a flow node.
yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE // Expect the first entry of a block sequence.
yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE // Expect an entry of a block sequence.
yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE // Expect an entry of an indentless sequence.
yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE // Expect the first key of a block mapping.
yaml_PARSE_BLOCK_MAPPING_KEY_STATE // Expect a block mapping key.
yaml_PARSE_BLOCK_MAPPING_VALUE_STATE // Expect a block mapping value.
yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE // Expect the first entry of a flow sequence.
yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE // Expect an entry of a flow sequence.
yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE // Expect a key of an ordered mapping.
yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE // Expect a value of an ordered mapping.
yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE // Expect the and of an ordered mapping entry.
yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE // Expect the first key of a flow mapping.
yaml_PARSE_FLOW_MAPPING_KEY_STATE // Expect a key of a flow mapping.
yaml_PARSE_FLOW_MAPPING_VALUE_STATE // Expect a value of a flow mapping.
yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE // Expect an empty value of a flow mapping.
yaml_PARSE_END_STATE // Expect nothing.
)
func (ps yaml_parser_state_t) String() string {
switch ps {
case yaml_PARSE_STREAM_START_STATE:
return "yaml_PARSE_STREAM_START_STATE"
case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE:
return "yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE"
case yaml_PARSE_DOCUMENT_START_STATE:
return "yaml_PARSE_DOCUMENT_START_STATE"
case yaml_PARSE_DOCUMENT_CONTENT_STATE:
return "yaml_PARSE_DOCUMENT_CONTENT_STATE"
case yaml_PARSE_DOCUMENT_END_STATE:
return "yaml_PARSE_DOCUMENT_END_STATE"
case yaml_PARSE_BLOCK_NODE_STATE:
return "yaml_PARSE_BLOCK_NODE_STATE"
case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE:
return "yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE"
case yaml_PARSE_FLOW_NODE_STATE:
return "yaml_PARSE_FLOW_NODE_STATE"
case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE:
return "yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE"
case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE:
return "yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE"
case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE:
return "yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE"
case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE:
return "yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE"
case yaml_PARSE_BLOCK_MAPPING_KEY_STATE:
return "yaml_PARSE_BLOCK_MAPPING_KEY_STATE"
case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE:
return "yaml_PARSE_BLOCK_MAPPING_VALUE_STATE"
case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE:
return "yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE"
case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE:
return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE"
case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE:
return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE"
case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE:
return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE"
case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE:
return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE"
case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE:
return "yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE"
case yaml_PARSE_FLOW_MAPPING_KEY_STATE:
return "yaml_PARSE_FLOW_MAPPING_KEY_STATE"
case yaml_PARSE_FLOW_MAPPING_VALUE_STATE:
return "yaml_PARSE_FLOW_MAPPING_VALUE_STATE"
case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE:
return "yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE"
case yaml_PARSE_END_STATE:
return "yaml_PARSE_END_STATE"
}
return "<unknown parser state>"
}
// This structure holds aliases data.
type yaml_alias_data_t struct {
anchor []byte // The anchor.
index int // The node id.
mark yaml_mark_t // The anchor mark.
}
// The parser structure.
//
// All members are internal. Manage the structure using the
// yaml_parser_ family of functions.
type yaml_parser_t struct {
// Error handling
error yaml_error_type_t // Error type.
problem string // Error description.
// The byte about which the problem occured.
problem_offset int
problem_value int
problem_mark yaml_mark_t
// The error context.
context string
context_mark yaml_mark_t
// Reader stuff
read_handler yaml_read_handler_t // Read handler.
input_file io.Reader // File input data.
input []byte // String input data.
input_pos int
eof bool // EOF flag
buffer []byte // The working buffer.
buffer_pos int // The current position of the buffer.
unread int // The number of unread characters in the buffer.
raw_buffer []byte // The raw buffer.
raw_buffer_pos int // The current position of the buffer.
encoding yaml_encoding_t // The input encoding.
offset int // The offset of the current position (in bytes).
mark yaml_mark_t // The mark of the current position.
// Scanner stuff
stream_start_produced bool // Have we started to scan the input stream?
stream_end_produced bool // Have we reached the end of the input stream?
flow_level int // The number of unclosed '[' and '{' indicators.
tokens []yaml_token_t // The tokens queue.
tokens_head int // The head of the tokens queue.
tokens_parsed int // The number of tokens fetched from the queue.
token_available bool // Does the tokens queue contain a token ready for dequeueing.
indent int // The current indentation level.
indents []int // The indentation levels stack.
simple_key_allowed bool // May a simple key occur at the current position?
simple_keys []yaml_simple_key_t // The stack of simple keys.
// Parser stuff
state yaml_parser_state_t // The current parser state.
states []yaml_parser_state_t // The parser states stack.
marks []yaml_mark_t // The stack of marks.
tag_directives []yaml_tag_directive_t // The list of TAG directives.
// Dumper stuff
aliases []yaml_alias_data_t // The alias data.
document *yaml_document_t // The currently parsed document.
}
// Emitter Definitions
// The prototype of a write handler.
//
// The write handler is called when the emitter needs to flush the accumulated
// characters to the output. The handler should write @a size bytes of the
// @a buffer to the output.
//
// @param[in,out] data A pointer to an application data specified by
// yaml_emitter_set_output().
// @param[in] buffer The buffer with bytes to be written.
// @param[in] size The size of the buffer.
//
// @returns On success, the handler should return @c 1. If the handler failed,
// the returned value should be @c 0.
//
type yaml_write_handler_t func(emitter *yaml_emitter_t, buffer []byte) error
type yaml_emitter_state_t int
// The emitter states.
const (
// Expect STREAM-START.
yaml_EMIT_STREAM_START_STATE yaml_emitter_state_t = iota
yaml_EMIT_FIRST_DOCUMENT_START_STATE // Expect the first DOCUMENT-START or STREAM-END.
yaml_EMIT_DOCUMENT_START_STATE // Expect DOCUMENT-START or STREAM-END.
yaml_EMIT_DOCUMENT_CONTENT_STATE // Expect the content of a document.
yaml_EMIT_DOCUMENT_END_STATE // Expect DOCUMENT-END.
yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE // Expect the first item of a flow sequence.
yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE // Expect an item of a flow sequence.
yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE // Expect the first key of a flow mapping.
yaml_EMIT_FLOW_MAPPING_KEY_STATE // Expect a key of a flow mapping.
yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a flow mapping.
yaml_EMIT_FLOW_MAPPING_VALUE_STATE // Expect a value of a flow mapping.
yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE // Expect the first item of a block sequence.
yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE // Expect an item of a block sequence.
yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE // Expect the first key of a block mapping.
yaml_EMIT_BLOCK_MAPPING_KEY_STATE // Expect the key of a block mapping.
yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a block mapping.
yaml_EMIT_BLOCK_MAPPING_VALUE_STATE // Expect a value of a block mapping.
yaml_EMIT_END_STATE // Expect nothing.
)
// The emitter structure.
//
// All members are internal. Manage the structure using the @c yaml_emitter_
// family of functions.
type yaml_emitter_t struct {
// Error handling
error yaml_error_type_t // Error type.
problem string // Error description.
// Writer stuff
write_handler yaml_write_handler_t // Write handler.
output_buffer *[]byte // String output data.
output_file io.Writer // File output data.
buffer []byte // The working buffer.
buffer_pos int // The current position of the buffer.
raw_buffer []byte // The raw buffer.
raw_buffer_pos int // The current position of the buffer.
encoding yaml_encoding_t // The stream encoding.
// Emitter stuff
canonical bool // If the output is in the canonical style?
best_indent int // The number of indentation spaces.
best_width int // The preferred width of the output lines.
unicode bool // Allow unescaped non-ASCII characters?
line_break yaml_break_t // The preferred line break.
state yaml_emitter_state_t // The current emitter state.
states []yaml_emitter_state_t // The stack of states.
events []yaml_event_t // The event queue.
events_head int // The head of the event queue.
indents []int // The stack of indentation levels.
tag_directives []yaml_tag_directive_t // The list of tag directives.
indent int // The current indentation level.
flow_level int // The current flow level.
root_context bool // Is it the document root context?
sequence_context bool // Is it a sequence context?
mapping_context bool // Is it a mapping context?
simple_key_context bool // Is it a simple mapping key context?
line int // The current line.
column int // The current column.
whitespace bool // If the last character was a whitespace?
indention bool // If the last character was an indentation character (' ', '-', '?', ':')?
open_ended bool // If an explicit document end is required?
// Anchor analysis.
anchor_data struct {
anchor []byte // The anchor value.
alias bool // Is it an alias?
}
// Tag analysis.
tag_data struct {
handle []byte // The tag handle.
suffix []byte // The tag suffix.
}
// Scalar analysis.
scalar_data struct {
value []byte // The scalar value.
multiline bool // Does the scalar contain line breaks?
flow_plain_allowed bool // Can the scalar be expessed in the flow plain style?
block_plain_allowed bool // Can the scalar be expressed in the block plain style?
single_quoted_allowed bool // Can the scalar be expressed in the single quoted style?
block_allowed bool // Can the scalar be expressed in the literal or folded styles?
style yaml_scalar_style_t // The output style.
}
// Dumper stuff
opened bool // If the stream was already opened?
closed bool // If the stream was already closed?
// The information associated with the document nodes.
anchors *struct {
references int // The number of references.
anchor int // The anchor id.
serialized bool // If the node has been emitted?
}
last_anchor_id int // The last assigned anchor id.
document *yaml_document_t // The currently emitted document.
}

View File

@ -1,173 +0,0 @@
package yaml
const (
// The size of the input raw buffer.
input_raw_buffer_size = 512
// The size of the input buffer.
// It should be possible to decode the whole raw buffer.
input_buffer_size = input_raw_buffer_size * 3
// The size of the output buffer.
output_buffer_size = 128
// The size of the output raw buffer.
// It should be possible to encode the whole output buffer.
output_raw_buffer_size = (output_buffer_size*2 + 2)
// The size of other stacks and queues.
initial_stack_size = 16
initial_queue_size = 16
initial_string_size = 16
)
// Check if the character at the specified position is an alphabetical
// character, a digit, '_', or '-'.
func is_alpha(b []byte, i int) bool {
return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'Z' || b[i] >= 'a' && b[i] <= 'z' || b[i] == '_' || b[i] == '-'
}
// Check if the character at the specified position is a digit.
func is_digit(b []byte, i int) bool {
return b[i] >= '0' && b[i] <= '9'
}
// Get the value of a digit.
func as_digit(b []byte, i int) int {
return int(b[i]) - '0'
}
// Check if the character at the specified position is a hex-digit.
func is_hex(b []byte, i int) bool {
return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'F' || b[i] >= 'a' && b[i] <= 'f'
}
// Get the value of a hex-digit.
func as_hex(b []byte, i int) int {
bi := b[i]
if bi >= 'A' && bi <= 'F' {
return int(bi) - 'A' + 10
}
if bi >= 'a' && bi <= 'f' {
return int(bi) - 'a' + 10
}
return int(bi) - '0'
}
// Check if the character is ASCII.
func is_ascii(b []byte, i int) bool {
return b[i] <= 0x7F
}
// Check if the character at the start of the buffer can be printed unescaped.
func is_printable(b []byte, i int) bool {
return ((b[i] == 0x0A) || // . == #x0A
(b[i] >= 0x20 && b[i] <= 0x7E) || // #x20 <= . <= #x7E
(b[i] == 0xC2 && b[i+1] >= 0xA0) || // #0xA0 <= . <= #xD7FF
(b[i] > 0xC2 && b[i] < 0xED) ||
(b[i] == 0xED && b[i+1] < 0xA0) ||
(b[i] == 0xEE) ||
(b[i] == 0xEF && // #xE000 <= . <= #xFFFD
!(b[i+1] == 0xBB && b[i+2] == 0xBF) && // && . != #xFEFF
!(b[i+1] == 0xBF && (b[i+2] == 0xBE || b[i+2] == 0xBF))))
}
// Check if the character at the specified position is NUL.
func is_z(b []byte, i int) bool {
return b[i] == 0x00
}
// Check if the beginning of the buffer is a BOM.
func is_bom(b []byte, i int) bool {
return b[0] == 0xEF && b[1] == 0xBB && b[2] == 0xBF
}
// Check if the character at the specified position is space.
func is_space(b []byte, i int) bool {
return b[i] == ' '
}
// Check if the character at the specified position is tab.
func is_tab(b []byte, i int) bool {
return b[i] == '\t'
}
// Check if the character at the specified position is blank (space or tab).
func is_blank(b []byte, i int) bool {
//return is_space(b, i) || is_tab(b, i)
return b[i] == ' ' || b[i] == '\t'
}
// Check if the character at the specified position is a line break.
func is_break(b []byte, i int) bool {
return (b[i] == '\r' || // CR (#xD)
b[i] == '\n' || // LF (#xA)
b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85)
b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028)
b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9) // PS (#x2029)
}
func is_crlf(b []byte, i int) bool {
return b[i] == '\r' && b[i+1] == '\n'
}
// Check if the character is a line break or NUL.
func is_breakz(b []byte, i int) bool {
//return is_break(b, i) || is_z(b, i)
return ( // is_break:
b[i] == '\r' || // CR (#xD)
b[i] == '\n' || // LF (#xA)
b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85)
b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028)
b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029)
// is_z:
b[i] == 0)
}
// Check if the character is a line break, space, or NUL.
func is_spacez(b []byte, i int) bool {
//return is_space(b, i) || is_breakz(b, i)
return ( // is_space:
b[i] == ' ' ||
// is_breakz:
b[i] == '\r' || // CR (#xD)
b[i] == '\n' || // LF (#xA)
b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85)
b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028)
b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029)
b[i] == 0)
}
// Check if the character is a line break, space, tab, or NUL.
func is_blankz(b []byte, i int) bool {
//return is_blank(b, i) || is_breakz(b, i)
return ( // is_blank:
b[i] == ' ' || b[i] == '\t' ||
// is_breakz:
b[i] == '\r' || // CR (#xD)
b[i] == '\n' || // LF (#xA)
b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85)
b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028)
b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029)
b[i] == 0)
}
// Determine the width of the character.
func width(b byte) int {
// Don't replace these by a switch without first
// confirming that it is being inlined.
if b&0x80 == 0x00 {
return 1
}
if b&0xE0 == 0xC0 {
return 2
}
if b&0xF0 == 0xE0 {
return 3
}
if b&0xF8 == 0xF0 {
return 4
}
return 0
}

View File

@ -1,12 +1,19 @@
export PATH := $(GOPATH)/bin:$(PATH)
export GOPATH := $(shell pwd)/Godeps/_workspace:$(shell pwd):$(GOPATH)
export GO15VENDOREXPERIMENT := 1
all: build
all: fmt build
build: fmt frps frpc build_test
build: frps frpc build_test
build_test: echo_server http_server
# compile assets into binary file
assets:
go get -d github.com/rakyll/statik
@go install github.com/rakyll/statik
@rm -rf ./src/assets/statik
go generate ./src/...
fmt:
go fmt ./src/...
@go fmt ./test/echo_server.go
@ -14,10 +21,11 @@ fmt:
@go fmt ./test/func_test.go
frps:
go build -o bin/frps ./src/frp/cmd/frps
go build -o bin/frps ./src/cmd/frps
@cp -rf ./src/assets/static ./bin
frpc:
go build -o bin/frpc ./src/frp/cmd/frpc
go build -o bin/frpc ./src/cmd/frpc
echo_server:
go build -o test/bin/echo_server ./test/echo_server.go
@ -42,3 +50,6 @@ clean:
rm -f ./test/bin/echo_server
rm -f ./test/bin/http_server
cd ./test && ./clean_test.sh && cd -
save:
godep save ./src/...

View File

@ -1,13 +1,12 @@
export PATH := $(GOPATH)/bin:$(PATH)
export OLDGOPATH := $(GOPATH)
export GOPATH := $(shell pwd)/Godeps/_workspace:$(shell pwd):$(GOPATH)
export GO15VENDOREXPERIMENT := 1
all: build
build: gox app
gox:
GOPATH=$(OLDGOPATH) go get github.com/mitchellh/gox
go get github.com/mitchellh/gox
app:
gox -osarch "darwin/386 darwin/amd64 linux/386 linux/amd64 linux/arm windows/386 windows/amd64" ./src/...

Some files were not shown because too many files have changed in this diff Show More