mirror of
https://github.com/TwiN/gatus.git
synced 2024-11-27 18:33:31 +01:00
chore: Remove vendor folder
This commit is contained in:
parent
21f62f362f
commit
22925c9ffc
1
vendor/github.com/TwiN/deepmerge/.gitattributes
generated
vendored
1
vendor/github.com/TwiN/deepmerge/.gitattributes
generated
vendored
@ -1 +0,0 @@
|
|||||||
* text=auto eol=lf
|
|
13
vendor/github.com/TwiN/deepmerge/.gitignore
generated
vendored
13
vendor/github.com/TwiN/deepmerge/.gitignore
generated
vendored
@ -1,13 +0,0 @@
|
|||||||
# IDE
|
|
||||||
*.iml
|
|
||||||
.idea
|
|
||||||
.vscode
|
|
||||||
|
|
||||||
# OS
|
|
||||||
.DS_Store
|
|
||||||
|
|
||||||
# JS
|
|
||||||
node_modules
|
|
||||||
|
|
||||||
# Go
|
|
||||||
/vendor
|
|
21
vendor/github.com/TwiN/deepmerge/LICENSE
generated
vendored
21
vendor/github.com/TwiN/deepmerge/LICENSE
generated
vendored
@ -1,21 +0,0 @@
|
|||||||
MIT License
|
|
||||||
|
|
||||||
Copyright (c) 2023 TwiN
|
|
||||||
|
|
||||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
||||||
of this software and associated documentation files (the "Software"), to deal
|
|
||||||
in the Software without restriction, including without limitation the rights
|
|
||||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
||||||
copies of the Software, and to permit persons to whom the Software is
|
|
||||||
furnished to do so, subject to the following conditions:
|
|
||||||
|
|
||||||
The above copyright notice and this permission notice shall be included in all
|
|
||||||
copies or substantial portions of the Software.
|
|
||||||
|
|
||||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
||||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
||||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
||||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
||||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
||||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
||||||
SOFTWARE.
|
|
135
vendor/github.com/TwiN/deepmerge/README.md
generated
vendored
135
vendor/github.com/TwiN/deepmerge/README.md
generated
vendored
@ -1,135 +0,0 @@
|
|||||||
# deepmerge
|
|
||||||
![test](https://github.com/TwiN/deepmerge/workflows/test/badge.svg?branch=master)
|
|
||||||
|
|
||||||
Go library for deep merging YAML or JSON files.
|
|
||||||
|
|
||||||
|
|
||||||
## Usage
|
|
||||||
|
|
||||||
### YAML
|
|
||||||
```go
|
|
||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"github.com/TwiN/deepmerge"
|
|
||||||
)
|
|
||||||
|
|
||||||
func main() {
|
|
||||||
dst := `
|
|
||||||
debug: true
|
|
||||||
client:
|
|
||||||
insecure: true
|
|
||||||
users:
|
|
||||||
- id: 1
|
|
||||||
firstName: John
|
|
||||||
lastName: Doe
|
|
||||||
- id: 2
|
|
||||||
firstName: Jane
|
|
||||||
lastName: Doe`
|
|
||||||
src := `
|
|
||||||
client:
|
|
||||||
timeout: 5s
|
|
||||||
users:
|
|
||||||
- id: 3
|
|
||||||
firstName: Bob
|
|
||||||
lastName: Smith`
|
|
||||||
output, err := deepmerge.YAML([]byte(dst), []byte(src))
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
println(string(output))
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
Output:
|
|
||||||
```yaml
|
|
||||||
client:
|
|
||||||
insecure: true
|
|
||||||
timeout: 5s
|
|
||||||
debug: true
|
|
||||||
users:
|
|
||||||
- firstName: John
|
|
||||||
id: 1
|
|
||||||
lastName: Doe
|
|
||||||
- firstName: Jane
|
|
||||||
id: 2
|
|
||||||
lastName: Doe
|
|
||||||
- firstName: Bob
|
|
||||||
id: 3
|
|
||||||
lastName: Smith
|
|
||||||
```
|
|
||||||
|
|
||||||
### JSON
|
|
||||||
```go
|
|
||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"github.com/TwiN/deepmerge"
|
|
||||||
)
|
|
||||||
|
|
||||||
func main() {
|
|
||||||
dst := `{
|
|
||||||
"debug": true,
|
|
||||||
"client": {
|
|
||||||
"insecure": true
|
|
||||||
},
|
|
||||||
"users": [
|
|
||||||
{
|
|
||||||
"id": 1,
|
|
||||||
"firstName": "John",
|
|
||||||
"lastName": "Doe"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"id": 2,
|
|
||||||
"firstName": "Jane",
|
|
||||||
"lastName": "Doe"
|
|
||||||
}
|
|
||||||
]
|
|
||||||
}`
|
|
||||||
src := `{
|
|
||||||
"client": {
|
|
||||||
"timeout": "5s"
|
|
||||||
},
|
|
||||||
"users": [
|
|
||||||
{
|
|
||||||
"id": 3,
|
|
||||||
"firstName": "Bob",
|
|
||||||
"lastName": "Smith"
|
|
||||||
}
|
|
||||||
]
|
|
||||||
}`
|
|
||||||
output, err := deepmerge.JSON([]byte(dst), []byte(src))
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
println(string(output))
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
Output:
|
|
||||||
```json
|
|
||||||
{
|
|
||||||
"client": {
|
|
||||||
"insecure": true,
|
|
||||||
"timeout": "5s"
|
|
||||||
},
|
|
||||||
"debug": true,
|
|
||||||
"users": [
|
|
||||||
{
|
|
||||||
"firstName": "John",
|
|
||||||
"id": 1,
|
|
||||||
"lastName": "Doe"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"firstName": "Jane",
|
|
||||||
"id": 2,
|
|
||||||
"lastName": "Doe"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"firstName": "Bob",
|
|
||||||
"id": 3,
|
|
||||||
"lastName": "Smith"
|
|
||||||
}
|
|
||||||
]
|
|
||||||
}
|
|
||||||
```
|
|
10
vendor/github.com/TwiN/deepmerge/config.go
generated
vendored
10
vendor/github.com/TwiN/deepmerge/config.go
generated
vendored
@ -1,10 +0,0 @@
|
|||||||
package deepmerge
|
|
||||||
|
|
||||||
type Config struct {
|
|
||||||
// PreventMultipleDefinitionsOfKeysWithPrimitiveValue causes the return of an error if dst and src define
|
|
||||||
// the same key and if said key has a value with a primitive type
|
|
||||||
// This does not apply to slices or maps.
|
|
||||||
//
|
|
||||||
// Defaults to true
|
|
||||||
PreventMultipleDefinitionsOfKeysWithPrimitiveValue bool
|
|
||||||
}
|
|
48
vendor/github.com/TwiN/deepmerge/deepmerge.go
generated
vendored
48
vendor/github.com/TwiN/deepmerge/deepmerge.go
generated
vendored
@ -1,48 +0,0 @@
|
|||||||
package deepmerge
|
|
||||||
|
|
||||||
import (
|
|
||||||
"errors"
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
ErrKeyWithPrimitiveValueDefinedMoreThanOnce = errors.New("error due to parameter with value of primitive type: only maps and slices/arrays can be merged, which means you cannot have define the same key twice for parameters that are not maps or slices/arrays")
|
|
||||||
)
|
|
||||||
|
|
||||||
func DeepMerge(dst, src map[string]interface{}, config Config) error {
|
|
||||||
for srcKey, srcValue := range src {
|
|
||||||
if srcValueAsMap, ok := srcValue.(map[string]interface{}); ok { // handle maps
|
|
||||||
if dstValue, ok := dst[srcKey]; ok {
|
|
||||||
if dstValueAsMap, ok := dstValue.(map[string]interface{}); ok {
|
|
||||||
err := DeepMerge(dstValueAsMap, srcValueAsMap, config)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
dst[srcKey] = make(map[string]interface{})
|
|
||||||
}
|
|
||||||
err := DeepMerge(dst[srcKey].(map[string]interface{}), srcValueAsMap, config)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
} else if srcValueAsSlice, ok := srcValue.([]interface{}); ok { // handle slices
|
|
||||||
if dstValue, ok := dst[srcKey]; ok {
|
|
||||||
if dstValueAsSlice, ok := dstValue.([]interface{}); ok {
|
|
||||||
// If both src and dst are slices, we'll copy the elements from that src slice over to the dst slice
|
|
||||||
dst[srcKey] = append(dstValueAsSlice, srcValueAsSlice...)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
}
|
|
||||||
dst[srcKey] = srcValueAsSlice
|
|
||||||
} else { // handle primitives
|
|
||||||
if config.PreventMultipleDefinitionsOfKeysWithPrimitiveValue {
|
|
||||||
if _, ok := dst[srcKey]; ok {
|
|
||||||
return ErrKeyWithPrimitiveValueDefinedMoreThanOnce
|
|
||||||
}
|
|
||||||
}
|
|
||||||
dst[srcKey] = srcValue
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
31
vendor/github.com/TwiN/deepmerge/json.go
generated
vendored
31
vendor/github.com/TwiN/deepmerge/json.go
generated
vendored
@ -1,31 +0,0 @@
|
|||||||
package deepmerge
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding/json"
|
|
||||||
)
|
|
||||||
|
|
||||||
// JSON merges the contents of src into dst
|
|
||||||
func JSON(dst, src []byte, optionalConfig ...Config) ([]byte, error) {
|
|
||||||
var cfg Config
|
|
||||||
if len(optionalConfig) > 0 {
|
|
||||||
cfg = optionalConfig[0]
|
|
||||||
} else {
|
|
||||||
cfg = Config{PreventMultipleDefinitionsOfKeysWithPrimitiveValue: true}
|
|
||||||
}
|
|
||||||
var dstMap, srcMap map[string]interface{}
|
|
||||||
err := json.Unmarshal(dst, &dstMap)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
err = json.Unmarshal(src, &srcMap)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if dstMap == nil {
|
|
||||||
dstMap = make(map[string]interface{})
|
|
||||||
}
|
|
||||||
if err = DeepMerge(dstMap, srcMap, cfg); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return json.Marshal(dstMap)
|
|
||||||
}
|
|
31
vendor/github.com/TwiN/deepmerge/yaml.go
generated
vendored
31
vendor/github.com/TwiN/deepmerge/yaml.go
generated
vendored
@ -1,31 +0,0 @@
|
|||||||
package deepmerge
|
|
||||||
|
|
||||||
import (
|
|
||||||
"gopkg.in/yaml.v3"
|
|
||||||
)
|
|
||||||
|
|
||||||
// YAML merges the contents of src into dst
|
|
||||||
func YAML(dst, src []byte, optionalConfig ...Config) ([]byte, error) {
|
|
||||||
var cfg Config
|
|
||||||
if len(optionalConfig) > 0 {
|
|
||||||
cfg = optionalConfig[0]
|
|
||||||
} else {
|
|
||||||
cfg = Config{PreventMultipleDefinitionsOfKeysWithPrimitiveValue: true}
|
|
||||||
}
|
|
||||||
var dstMap, srcMap map[string]interface{}
|
|
||||||
err := yaml.Unmarshal(dst, &dstMap)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
err = yaml.Unmarshal(src, &srcMap)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if dstMap == nil {
|
|
||||||
dstMap = make(map[string]interface{})
|
|
||||||
}
|
|
||||||
if err = DeepMerge(dstMap, srcMap, cfg); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return yaml.Marshal(dstMap)
|
|
||||||
}
|
|
1
vendor/github.com/TwiN/g8/.gitattributes
generated
vendored
1
vendor/github.com/TwiN/g8/.gitattributes
generated
vendored
@ -1 +0,0 @@
|
|||||||
* text=lf
|
|
2
vendor/github.com/TwiN/g8/.gitignore
generated
vendored
2
vendor/github.com/TwiN/g8/.gitignore
generated
vendored
@ -1,2 +0,0 @@
|
|||||||
.idea
|
|
||||||
*.iml
|
|
21
vendor/github.com/TwiN/g8/LICENSE
generated
vendored
21
vendor/github.com/TwiN/g8/LICENSE
generated
vendored
@ -1,21 +0,0 @@
|
|||||||
MIT License
|
|
||||||
|
|
||||||
Copyright (c) 2022 TwiN
|
|
||||||
|
|
||||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
||||||
of this software and associated documentation files (the "Software"), to deal
|
|
||||||
in the Software without restriction, including without limitation the rights
|
|
||||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
||||||
copies of the Software, and to permit persons to whom the Software is
|
|
||||||
furnished to do so, subject to the following conditions:
|
|
||||||
|
|
||||||
The above copyright notice and this permission notice shall be included in all
|
|
||||||
copies or substantial portions of the Software.
|
|
||||||
|
|
||||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
||||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
||||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
||||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
||||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
||||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
||||||
SOFTWARE.
|
|
287
vendor/github.com/TwiN/g8/README.md
generated
vendored
287
vendor/github.com/TwiN/g8/README.md
generated
vendored
@ -1,287 +0,0 @@
|
|||||||
# g8
|
|
||||||
|
|
||||||
![test](https://github.com/TwiN/g8/workflows/test/badge.svg?branch=master)
|
|
||||||
[![Go Report Card](https://goreportcard.com/badge/github.com/TwiN/g8)](https://goreportcard.com/report/github.com/TwiN/g8)
|
|
||||||
[![codecov](https://codecov.io/gh/TwiN/g8/branch/master/graph/badge.svg)](https://codecov.io/gh/TwiN/g8)
|
|
||||||
[![Go version](https://img.shields.io/github/go-mod/go-version/TwiN/g8.svg)](https://github.com/TwiN/g8)
|
|
||||||
[![Go Reference](https://pkg.go.dev/badge/github.com/TwiN/g8.svg)](https://pkg.go.dev/github.com/TwiN/g8)
|
|
||||||
[![Follow TwiN](https://img.shields.io/github/followers/TwiN?label=Follow&style=social)](https://github.com/TwiN)
|
|
||||||
|
|
||||||
g8, pronounced gate, is a simple Go library for protecting HTTP handlers.
|
|
||||||
|
|
||||||
Tired of constantly re-implementing a security layer for each application? Me too, that's why I made g8.
|
|
||||||
|
|
||||||
|
|
||||||
## Installation
|
|
||||||
```console
|
|
||||||
go get -u github.com/TwiN/g8
|
|
||||||
```
|
|
||||||
|
|
||||||
|
|
||||||
## Usage
|
|
||||||
Because the entire purpose of g8 is to NOT waste time configuring the layer of security, the primary emphasis is to
|
|
||||||
keep it as simple as possible.
|
|
||||||
|
|
||||||
|
|
||||||
### Simple
|
|
||||||
Just want a simple layer of security without the need for advanced permissions? This configuration is what you're
|
|
||||||
looking for.
|
|
||||||
|
|
||||||
```go
|
|
||||||
authorizationService := g8.NewAuthorizationService().WithToken("mytoken")
|
|
||||||
gate := g8.New().WithAuthorizationService(authorizationService)
|
|
||||||
|
|
||||||
router := http.NewServeMux()
|
|
||||||
router.Handle("/unprotected", yourHandler)
|
|
||||||
router.Handle("/protected", gate.Protect(yourHandler))
|
|
||||||
|
|
||||||
http.ListenAndServe(":8080", router)
|
|
||||||
```
|
|
||||||
|
|
||||||
The endpoint `/protected` is now only accessible if you pass the header `Authorization: Bearer mytoken`.
|
|
||||||
|
|
||||||
If you use `http.HandleFunc` instead of `http.Handle`, you may use `gate.ProtectFunc(yourHandler)` instead.
|
|
||||||
|
|
||||||
If you're not using the `Authorization` header, you can specify a custom token extractor.
|
|
||||||
This enables use cases like [Protecting a handler using session cookie](#protecting-a-handler-using-session-cookie)
|
|
||||||
|
|
||||||
|
|
||||||
### Advanced permissions
|
|
||||||
If you have tokens with more permissions than others, g8's permission system will make managing authorization a breeze.
|
|
||||||
|
|
||||||
Rather than registering tokens, think of it as registering clients, the only difference being that clients may be
|
|
||||||
configured with permissions while tokens cannot.
|
|
||||||
|
|
||||||
```go
|
|
||||||
authorizationService := g8.NewAuthorizationService().WithClient(g8.NewClient("mytoken").WithPermission("admin"))
|
|
||||||
gate := g8.New().WithAuthorizationService(authorizationService)
|
|
||||||
|
|
||||||
router := http.NewServeMux()
|
|
||||||
router.Handle("/unprotected", yourHandler)
|
|
||||||
router.Handle("/protected-with-admin", gate.ProtectWithPermissions(yourHandler, []string{"admin"}))
|
|
||||||
|
|
||||||
http.ListenAndServe(":8080", router)
|
|
||||||
```
|
|
||||||
|
|
||||||
The endpoint `/protected-with-admin` is now only accessible if you pass the header `Authorization: Bearer mytoken`,
|
|
||||||
because the client with the token `mytoken` has the permission `admin`. Note that the following handler would also be
|
|
||||||
accessible with that token:
|
|
||||||
```go
|
|
||||||
router.Handle("/protected", gate.Protect(yourHandler))
|
|
||||||
```
|
|
||||||
|
|
||||||
To clarify, both clients and tokens have access to handlers that aren't protected with extra permissions, and
|
|
||||||
essentially, tokens are registered as clients with no extra permissions in the background.
|
|
||||||
|
|
||||||
Creating a token like so:
|
|
||||||
```go
|
|
||||||
authorizationService := g8.NewAuthorizationService().WithToken("mytoken")
|
|
||||||
```
|
|
||||||
is the equivalent of creating the following client:
|
|
||||||
```go
|
|
||||||
authorizationService := g8.NewAuthorizationService().WithClient(g8.NewClient("mytoken"))
|
|
||||||
```
|
|
||||||
|
|
||||||
|
|
||||||
### With client provider
|
|
||||||
A client provider's task is to retrieve a Client from an external source (e.g. a database) when provided with a token.
|
|
||||||
You should use a client provider when you have a lot of tokens and it wouldn't make sense to register all of them using
|
|
||||||
`AuthorizationService`'s `WithToken`/`WithTokens`/`WithClient`/`WithClients`.
|
|
||||||
|
|
||||||
Note that the provider is used as a fallback source. As such, if a token is explicitly registered using one of the 4
|
|
||||||
aforementioned functions, the client provider will not be used.
|
|
||||||
|
|
||||||
```go
|
|
||||||
clientProvider := g8.NewClientProvider(func(token string) *g8.Client {
|
|
||||||
// We'll assume that the following function calls your database and returns a struct "User" that
|
|
||||||
// has the user's token as well as the permissions granted to said user
|
|
||||||
user := database.GetUserByToken(token)
|
|
||||||
if user != nil {
|
|
||||||
return g8.NewClient(user.Token).WithPermissions(user.Permissions)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
})
|
|
||||||
authorizationService := g8.NewAuthorizationService().WithClientProvider(clientProvider)
|
|
||||||
gate := g8.New().WithAuthorizationService(authorizationService)
|
|
||||||
```
|
|
||||||
|
|
||||||
You can also configure the client provider to cache the output of the function you provide to retrieve clients by token:
|
|
||||||
```go
|
|
||||||
clientProvider := g8.NewClientProvider(...).WithCache(ttl, maxSize)
|
|
||||||
```
|
|
||||||
|
|
||||||
Since g8 leverages [TwiN/gocache](https://github.com/TwiN/gocache), you can also use gocache's
|
|
||||||
constants for configuring the TTL and the maximum size:
|
|
||||||
- Setting the TTL to `gocache.NoExpiration` (-1) will disable the TTL.
|
|
||||||
- Setting the maximum size to `gocache.NoMaxSize` (0) will disable the maximum cache size
|
|
||||||
|
|
||||||
If you're using a TTL and have a lot of tokens (100k+), you may want to use `clientProvider.StartJanitor()` to allow
|
|
||||||
the cache to passively delete expired entries. If you have to re-initialize the client provider after the janitor has
|
|
||||||
been started, make sure to stop the janitor first (`clientProvider.StopJanitor()`). This is because the janitor runs on
|
|
||||||
a separate goroutine, thus, if you were to re-create a client provider and re-assign it, the old client provider would
|
|
||||||
still exist in memory with the old cache. I'm only specifying this for completeness, because for the overwhelming
|
|
||||||
majority of people, the gate will be created on application start and never modified again until the application shuts
|
|
||||||
down, in which case, you don't even need to worry about stopping the janitor.
|
|
||||||
|
|
||||||
To avoid any misunderstandings, using a client provider is not mandatory. If you only have a few tokens and you can load
|
|
||||||
them on application start, you can just leverage `AuthorizationService`'s `WithToken`/`WithTokens`/`WithClient`/`WithClients`.
|
|
||||||
|
|
||||||
|
|
||||||
## AuthorizationService
|
|
||||||
As the previous examples may have hinted, there are several ways to create clients. The one thing they have
|
|
||||||
in common is that they all go through AuthorizationService, which is in charge of both managing clients and determining
|
|
||||||
whether a request should be blocked or allowed through.
|
|
||||||
|
|
||||||
| Function | Description |
|
|
||||||
|:-------------------|:---------------------------------------------------------------------------------------------------------------------------------|
|
|
||||||
| WithToken | Creates a single static client with no extra permissions |
|
|
||||||
| WithTokens | Creates a slice of static clients with no extra permissions |
|
|
||||||
| WithClient | Creates a single static client |
|
|
||||||
| WithClients | Creates a slice of static clients |
|
|
||||||
| WithClientProvider | Creates a client provider which will allow a fallback to a dynamic source (e.g. to a database) when a static client is not found |
|
|
||||||
|
|
||||||
Except for `WithClientProvider`, every functions listed above can be called more than once.
|
|
||||||
As a result, you may safely perform actions like this:
|
|
||||||
```go
|
|
||||||
authorizationService := g8.NewAuthorizationService().
|
|
||||||
WithToken("123").
|
|
||||||
WithToken("456").
|
|
||||||
WithClient(g8.NewClient("789").WithPermission("admin"))
|
|
||||||
gate := g8.New().WithAuthorizationService(authorizationService)
|
|
||||||
```
|
|
||||||
|
|
||||||
Be aware that g8.Client supports a list of permissions as well. You may call `WithPermission` several times, or call
|
|
||||||
`WithPermissions` with a slice of permissions instead.
|
|
||||||
|
|
||||||
|
|
||||||
### Permissions
|
|
||||||
Unlike client permissions, handler permissions are requirements.
|
|
||||||
|
|
||||||
A client may have as many permissions as you want, but for said client to have access to a handler protected by
|
|
||||||
permissions, the client must have all permissions defined by said handler in order to have access to it.
|
|
||||||
|
|
||||||
In other words, a client with the permissions `create`, `read`, `update` and `delete` would have access to all of these handlers:
|
|
||||||
```go
|
|
||||||
gate := g8.New().WithAuthorizationService(g8.NewAuthorizationService().WithClient(g8.NewClient("mytoken").WithPermissions([]string{"create", "read", "update", "delete"})))
|
|
||||||
router := http.NewServeMux()
|
|
||||||
router.Handle("/", gate.Protect(homeHandler)) // equivalent of gate.ProtectWithPermissions(homeHandler, []string{})
|
|
||||||
router.Handle("/create", gate.ProtectWithPermissions(createHandler, []string{"create"}))
|
|
||||||
router.Handle("/read", gate.ProtectWithPermissions(readHandler, []string{"read"}))
|
|
||||||
router.Handle("/update", gate.ProtectWithPermissions(updateHandler, []string{"update"}))
|
|
||||||
router.Handle("/delete", gate.ProtectWithPermissions(deleteHandler, []string{"delete"}))
|
|
||||||
router.Handle("/crud", gate.ProtectWithPermissions(crudHandler, []string{"create", "read", "update", "delete"}))
|
|
||||||
```
|
|
||||||
But it would not have access to the following handler, because while `mytoken` has the `read` permission, it does not
|
|
||||||
have the `backup` permission:
|
|
||||||
```go
|
|
||||||
router.Handle("/backup", gate.ProtectWithPermissions(&testHandler{}, []string{"read", "backup"}))
|
|
||||||
```
|
|
||||||
|
|
||||||
If you're using an HTTP library that supports middlewares like [mux](https://github.com/gorilla/mux), you can protect
|
|
||||||
an entire group of handlers instead using `gate.Protect` or `gate.PermissionMiddleware()`:
|
|
||||||
```go
|
|
||||||
router := mux.NewRouter()
|
|
||||||
|
|
||||||
userRouter := router.PathPrefix("/").Subrouter()
|
|
||||||
userRouter.Use(gate.Protect)
|
|
||||||
userRouter.HandleFunc("/api/v1/users/me", getUserProfile).Methods("GET")
|
|
||||||
userRouter.HandleFunc("/api/v1/users/me/friends", getUserFriends).Methods("GET")
|
|
||||||
userRouter.HandleFunc("/api/v1/users/me/email", updateUserEmail).Methods("PATCH")
|
|
||||||
|
|
||||||
adminRouter := router.PathPrefix("/").Subrouter()
|
|
||||||
adminRouter.Use(gate.PermissionMiddleware("admin"))
|
|
||||||
adminRouter.HandleFunc("/api/v1/users/{id}/ban", banUserByID).Methods("POST")
|
|
||||||
adminRouter.HandleFunc("/api/v1/users/{id}/delete", deleteUserByID).Methods("DELETE")
|
|
||||||
```
|
|
||||||
|
|
||||||
|
|
||||||
## Rate limiting
|
|
||||||
To add a rate limit of 100 requests per second:
|
|
||||||
```go
|
|
||||||
gate := g8.New().WithRateLimit(100)
|
|
||||||
```
|
|
||||||
|
|
||||||
|
|
||||||
## Accessing the token from the protected handlers
|
|
||||||
If you need to access the token from the handlers you are protecting with g8, you can retrieve it from the
|
|
||||||
request context by using the key `g8.TokenContextKey`:
|
|
||||||
```go
|
|
||||||
http.Handle("/handle", gate.ProtectFunc(func(w http.ResponseWriter, r *http.Request) {
|
|
||||||
token, _ := r.Context().Value(g8.TokenContextKey).(string)
|
|
||||||
// ...
|
|
||||||
}))
|
|
||||||
```
|
|
||||||
|
|
||||||
## Examples
|
|
||||||
### Protecting a handler using session cookie
|
|
||||||
If you want to only allow authenticated users to access a handler, you can use a custom token extractor function
|
|
||||||
combined with a client provider.
|
|
||||||
|
|
||||||
First, we'll create a function to extract the session ID from the session cookie. While a session ID does not
|
|
||||||
theoretically refer to a token, g8 uses the term `token` as a blanket term to refer to any string that can be used to
|
|
||||||
identify a client.
|
|
||||||
```go
|
|
||||||
customTokenExtractorFunc := func(request *http.Request) string {
|
|
||||||
sessionCookie, err := request.Cookie("session")
|
|
||||||
if err != nil {
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
return sessionCookie.Value
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
Next, we need to create a client provider that will validate our token, which refers to the session ID in this case.
|
|
||||||
```go
|
|
||||||
clientProvider := g8.NewClientProvider(func(token string) *g8.Client {
|
|
||||||
// We'll assume that the following function calls your database and validates whether the session is valid.
|
|
||||||
isSessionValid := database.CheckIfSessionIsValid(token)
|
|
||||||
if !isSessionValid {
|
|
||||||
return nil // Returning nil will cause the gate to return a 401 Unauthorized.
|
|
||||||
}
|
|
||||||
// You could also retrieve the user and their permissions if you wanted instead, but for this example,
|
|
||||||
// all we care about is confirming whether the session is valid or not.
|
|
||||||
return g8.NewClient(token)
|
|
||||||
})
|
|
||||||
```
|
|
||||||
|
|
||||||
Keep in mind that you can get really creative with the client provider above.
|
|
||||||
For instance, you could refresh the session's expiration time, which will allow the user to stay logged in for
|
|
||||||
as long as they're active.
|
|
||||||
|
|
||||||
You're also not limited to using something stateful like the example above. You could use a JWT and have your client
|
|
||||||
provider validate said JWT.
|
|
||||||
|
|
||||||
Finally, we can create the authorization service and the gate:
|
|
||||||
```go
|
|
||||||
authorizationService := g8.NewAuthorizationService().WithClientProvider(clientProvider)
|
|
||||||
gate := g8.New().WithAuthorizationService(authorizationService).WithCustomTokenExtractor(customTokenExtractorFunc)
|
|
||||||
```
|
|
||||||
|
|
||||||
If you need to access the token (session ID in this case) from the protected handlers, you can retrieve it from the
|
|
||||||
request context by using the key `g8.TokenContextKey`:
|
|
||||||
```go
|
|
||||||
http.Handle("/handle", gate.ProtectFunc(func(w http.ResponseWriter, r *http.Request) {
|
|
||||||
sessionID, _ := r.Context().Value(g8.TokenContextKey).(string)
|
|
||||||
// ...
|
|
||||||
}))
|
|
||||||
```
|
|
||||||
|
|
||||||
### Using a custom header
|
|
||||||
The logic is the same as the example above:
|
|
||||||
```go
|
|
||||||
customTokenExtractorFunc := func(request *http.Request) string {
|
|
||||||
return request.Header.Get("X-API-Token")
|
|
||||||
}
|
|
||||||
|
|
||||||
clientProvider := g8.NewClientProvider(func(token string) *g8.Client {
|
|
||||||
// We'll assume that the following function calls your database and returns a struct "User" that
|
|
||||||
// has the user's token as well as the permissions granted to said user
|
|
||||||
user := database.GetUserByToken(token)
|
|
||||||
if user != nil {
|
|
||||||
return g8.NewClient(user.Token).WithPermissions(user.Permissions)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
})
|
|
||||||
authorizationService := g8.NewAuthorizationService().WithClientProvider(clientProvider)
|
|
||||||
gate := g8.New().WithAuthorizationService(authorizationService).WithCustomTokenExtractor(customTokenExtractorFunc)
|
|
||||||
```
|
|
122
vendor/github.com/TwiN/g8/authorization.go
generated
vendored
122
vendor/github.com/TwiN/g8/authorization.go
generated
vendored
@ -1,122 +0,0 @@
|
|||||||
package g8
|
|
||||||
|
|
||||||
import (
|
|
||||||
"sync"
|
|
||||||
)
|
|
||||||
|
|
||||||
// AuthorizationService is the service that manages client/token registry and client fallback as well as the service
|
|
||||||
// that determines whether a token meets the specific requirements to be authorized by a Gate or not.
|
|
||||||
type AuthorizationService struct {
|
|
||||||
clients map[string]*Client
|
|
||||||
clientProvider *ClientProvider
|
|
||||||
|
|
||||||
mutex sync.RWMutex
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewAuthorizationService creates a new AuthorizationService
|
|
||||||
func NewAuthorizationService() *AuthorizationService {
|
|
||||||
return &AuthorizationService{
|
|
||||||
clients: make(map[string]*Client),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithToken is used to specify a single token for which authorization will be granted
|
|
||||||
//
|
|
||||||
// The client that will be created from this token will have access to all handlers that are not protected with a
|
|
||||||
// specific permission.
|
|
||||||
//
|
|
||||||
// In other words, if you were to do the following:
|
|
||||||
// gate := g8.New().WithAuthorizationService(g8.NewAuthorizationService().WithToken("12345"))
|
|
||||||
//
|
|
||||||
// The following handler would be accessible with the token 12345:
|
|
||||||
// router.Handle("/1st-handler", gate.Protect(yourHandler))
|
|
||||||
//
|
|
||||||
// But not this one would not be accessible with the token 12345:
|
|
||||||
// router.Handle("/2nd-handler", gate.ProtectWithPermissions(yourOtherHandler, []string{"admin"}))
|
|
||||||
//
|
|
||||||
// Calling this function multiple times will add multiple clients, though you may want to use WithTokens instead
|
|
||||||
// if you plan to add multiple clients
|
|
||||||
//
|
|
||||||
// If you wish to configure advanced permissions, consider using WithClient instead.
|
|
||||||
//
|
|
||||||
func (authorizationService *AuthorizationService) WithToken(token string) *AuthorizationService {
|
|
||||||
authorizationService.mutex.Lock()
|
|
||||||
authorizationService.clients[token] = NewClient(token)
|
|
||||||
authorizationService.mutex.Unlock()
|
|
||||||
return authorizationService
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithTokens is used to specify a slice of tokens for which authorization will be granted
|
|
||||||
func (authorizationService *AuthorizationService) WithTokens(tokens []string) *AuthorizationService {
|
|
||||||
authorizationService.mutex.Lock()
|
|
||||||
for _, token := range tokens {
|
|
||||||
authorizationService.clients[token] = NewClient(token)
|
|
||||||
}
|
|
||||||
authorizationService.mutex.Unlock()
|
|
||||||
return authorizationService
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithClient is used to specify a single client for which authorization will be granted
|
|
||||||
//
|
|
||||||
// When compared to WithToken, the advantage of using this function is that you may specify the client's
|
|
||||||
// permissions and thus, be a lot more granular with what endpoint a token has access to.
|
|
||||||
//
|
|
||||||
// In other words, if you were to do the following:
|
|
||||||
// gate := g8.New().WithAuthorizationService(g8.NewAuthorizationService().WithClient(g8.NewClient("12345").WithPermission("mod")))
|
|
||||||
//
|
|
||||||
// The following handlers would be accessible with the token 12345:
|
|
||||||
// router.Handle("/1st-handler", gate.ProtectWithPermissions(yourHandler, []string{"mod"}))
|
|
||||||
// router.Handle("/2nd-handler", gate.Protect(yourOtherHandler))
|
|
||||||
//
|
|
||||||
// But not this one, because the user does not have the permission "admin":
|
|
||||||
// router.Handle("/3rd-handler", gate.ProtectWithPermissions(yetAnotherHandler, []string{"admin"}))
|
|
||||||
//
|
|
||||||
// Calling this function multiple times will add multiple clients, though you may want to use WithClients instead
|
|
||||||
// if you plan to add multiple clients
|
|
||||||
func (authorizationService *AuthorizationService) WithClient(client *Client) *AuthorizationService {
|
|
||||||
authorizationService.mutex.Lock()
|
|
||||||
authorizationService.clients[client.Token] = client
|
|
||||||
authorizationService.mutex.Unlock()
|
|
||||||
return authorizationService
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithClients is used to specify a slice of clients for which authorization will be granted
|
|
||||||
func (authorizationService *AuthorizationService) WithClients(clients []*Client) *AuthorizationService {
|
|
||||||
authorizationService.mutex.Lock()
|
|
||||||
for _, client := range clients {
|
|
||||||
authorizationService.clients[client.Token] = client
|
|
||||||
}
|
|
||||||
authorizationService.mutex.Unlock()
|
|
||||||
return authorizationService
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithClientProvider allows specifying a custom provider to fetch clients by token.
|
|
||||||
//
|
|
||||||
// For example, you can use it to fallback to making a call in your database when a request is made with a token that
|
|
||||||
// hasn't been specified via WithToken, WithTokens, WithClient or WithClients.
|
|
||||||
func (authorizationService *AuthorizationService) WithClientProvider(provider *ClientProvider) *AuthorizationService {
|
|
||||||
authorizationService.clientProvider = provider
|
|
||||||
return authorizationService
|
|
||||||
}
|
|
||||||
|
|
||||||
// IsAuthorized checks whether a client with a given token exists and has the permissions required.
|
|
||||||
//
|
|
||||||
// If permissionsRequired is nil or empty and a client with the given token exists, said client will have access to all
|
|
||||||
// handlers that are not protected by a given permission.
|
|
||||||
func (authorizationService *AuthorizationService) IsAuthorized(token string, permissionsRequired []string) bool {
|
|
||||||
if len(token) == 0 {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
authorizationService.mutex.RLock()
|
|
||||||
client, _ := authorizationService.clients[token]
|
|
||||||
authorizationService.mutex.RUnlock()
|
|
||||||
// If there's no clients with the given token directly stored in the AuthorizationService, fall back to the
|
|
||||||
// client provider, if there's one configured.
|
|
||||||
if client == nil && authorizationService.clientProvider != nil {
|
|
||||||
client = authorizationService.clientProvider.GetClientByToken(token)
|
|
||||||
}
|
|
||||||
if client != nil {
|
|
||||||
return client.HasPermissions(permissionsRequired)
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
58
vendor/github.com/TwiN/g8/client.go
generated
vendored
58
vendor/github.com/TwiN/g8/client.go
generated
vendored
@ -1,58 +0,0 @@
|
|||||||
package g8
|
|
||||||
|
|
||||||
// Client is a struct containing both a Token and a slice of extra Permissions that said token has.
|
|
||||||
type Client struct {
|
|
||||||
// Token is the value used to authenticate with the API.
|
|
||||||
Token string
|
|
||||||
|
|
||||||
// Permissions is a slice of extra permissions that may be used for more granular access control.
|
|
||||||
//
|
|
||||||
// If you only wish to use Gate.Protect and Gate.ProtectFunc, you do not have to worry about this,
|
|
||||||
// since they're only used by Gate.ProtectWithPermissions and Gate.ProtectFuncWithPermissions
|
|
||||||
Permissions []string
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewClient creates a Client with a given token
|
|
||||||
func NewClient(token string) *Client {
|
|
||||||
return &Client{
|
|
||||||
Token: token,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewClientWithPermissions creates a Client with a slice of permissions
|
|
||||||
// Equivalent to using NewClient and WithPermissions
|
|
||||||
func NewClientWithPermissions(token string, permissions []string) *Client {
|
|
||||||
return NewClient(token).WithPermissions(permissions)
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithPermissions adds a slice of permissions to a client
|
|
||||||
func (client *Client) WithPermissions(permissions []string) *Client {
|
|
||||||
client.Permissions = append(client.Permissions, permissions...)
|
|
||||||
return client
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithPermission adds a permission to a client
|
|
||||||
func (client *Client) WithPermission(permission string) *Client {
|
|
||||||
client.Permissions = append(client.Permissions, permission)
|
|
||||||
return client
|
|
||||||
}
|
|
||||||
|
|
||||||
// HasPermission checks whether a client has a given permission
|
|
||||||
func (client Client) HasPermission(permissionRequired string) bool {
|
|
||||||
for _, permission := range client.Permissions {
|
|
||||||
if permissionRequired == permission {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// HasPermissions checks whether a client has the all permissions passed
|
|
||||||
func (client Client) HasPermissions(permissionsRequired []string) bool {
|
|
||||||
for _, permissionRequired := range permissionsRequired {
|
|
||||||
if !client.HasPermission(permissionRequired) {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}
|
|
140
vendor/github.com/TwiN/g8/clientprovider.go
generated
vendored
140
vendor/github.com/TwiN/g8/clientprovider.go
generated
vendored
@ -1,140 +0,0 @@
|
|||||||
package g8
|
|
||||||
|
|
||||||
import (
|
|
||||||
"errors"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/TwiN/gocache/v2"
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
// ErrNoExpiration is the error returned by ClientProvider.StartCacheJanitor if there was an attempt to start the
|
|
||||||
// janitor despite no expiration being configured.
|
|
||||||
// To clarify, this is because the cache janitor is only useful when an expiration is set.
|
|
||||||
ErrNoExpiration = errors.New("no point starting the janitor if the TTL is set to not expire")
|
|
||||||
|
|
||||||
// ErrCacheNotInitialized is the error returned by ClientProvider.StartCacheJanitor if there was an attempt to start
|
|
||||||
// the janitor despite the cache not having been initialized using ClientProvider.WithCache
|
|
||||||
ErrCacheNotInitialized = errors.New("cannot start janitor because cache is not configured")
|
|
||||||
)
|
|
||||||
|
|
||||||
// ClientProvider has the task of retrieving a Client from an external source (e.g. a database) when provided with a
|
|
||||||
// token. It should be used when you have a lot of tokens, and it wouldn't make sense to register all of them using
|
|
||||||
// AuthorizationService's WithToken, WithTokens, WithClient or WithClients.
|
|
||||||
//
|
|
||||||
// Note that the provider is used as a fallback source. As such, if a token is explicitly registered using one of the 4
|
|
||||||
// aforementioned functions, the client provider will not be used by the AuthorizationService when a request is made
|
|
||||||
// with said token. It will, however, be called upon if a token that is not explicitly registered in
|
|
||||||
// AuthorizationService is sent alongside a request going through the Gate.
|
|
||||||
//
|
|
||||||
// clientProvider := g8.NewClientProvider(func(token string) *g8.Client {
|
|
||||||
// // We'll assume that the following function calls your database and returns a struct "User" that
|
|
||||||
// // has the user's token as well as the permissions granted to said user
|
|
||||||
// user := database.GetUserByToken(token)
|
|
||||||
// if user != nil {
|
|
||||||
// return g8.NewClient(user.Token).WithPermissions(user.Permissions)
|
|
||||||
// }
|
|
||||||
// return nil
|
|
||||||
// })
|
|
||||||
// gate := g8.New().WithAuthorizationService(g8.NewAuthorizationService().WithClientProvider(clientProvider))
|
|
||||||
//
|
|
||||||
type ClientProvider struct {
|
|
||||||
getClientByTokenFunc func(token string) *Client
|
|
||||||
|
|
||||||
cache *gocache.Cache
|
|
||||||
ttl time.Duration
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewClientProvider creates a ClientProvider
|
|
||||||
// The parameter that must be passed is a function that the provider will use to retrieve a client by a given token
|
|
||||||
//
|
|
||||||
// Example:
|
|
||||||
// clientProvider := g8.NewClientProvider(func(token string) *g8.Client {
|
|
||||||
// // We'll assume that the following function calls your database and returns a struct "User" that
|
|
||||||
// // has the user's token as well as the permissions granted to said user
|
|
||||||
// user := database.GetUserByToken(token)
|
|
||||||
// if user == nil {
|
|
||||||
// return nil
|
|
||||||
// }
|
|
||||||
// return g8.NewClient(user.Token).WithPermissions(user.Permissions)
|
|
||||||
// })
|
|
||||||
// gate := g8.New().WithAuthorizationService(g8.NewAuthorizationService().WithClientProvider(clientProvider))
|
|
||||||
//
|
|
||||||
func NewClientProvider(getClientByTokenFunc func(token string) *Client) *ClientProvider {
|
|
||||||
return &ClientProvider{
|
|
||||||
getClientByTokenFunc: getClientByTokenFunc,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithCache adds cache options to the ClientProvider.
|
|
||||||
//
|
|
||||||
// ttl is the time until the cache entry will expire. A TTL of gocache.NoExpiration (-1) means no expiration
|
|
||||||
// maxSize is the maximum amount of entries that can be in the cache at any given time.
|
|
||||||
// If a value of gocache.NoMaxSize (0) or less is provided for maxSize, there will be no maximum size.
|
|
||||||
//
|
|
||||||
// Example:
|
|
||||||
// clientProvider := g8.NewClientProvider(func(token string) *g8.Client {
|
|
||||||
// // We'll assume that the following function calls your database and returns a struct "User" that
|
|
||||||
// // has the user's token as well as the permissions granted to said user
|
|
||||||
// user := database.GetUserByToken(token)
|
|
||||||
// if user != nil {
|
|
||||||
// return g8.NewClient(user.Token).WithPermissions(user.Permissions)
|
|
||||||
// }
|
|
||||||
// return nil
|
|
||||||
// })
|
|
||||||
// gate := g8.New().WithAuthorizationService(g8.NewAuthorizationService().WithClientProvider(clientProvider.WithCache(time.Hour, 70000)))
|
|
||||||
//
|
|
||||||
func (provider *ClientProvider) WithCache(ttl time.Duration, maxSize int) *ClientProvider {
|
|
||||||
provider.cache = gocache.NewCache().WithEvictionPolicy(gocache.LeastRecentlyUsed).WithMaxSize(maxSize)
|
|
||||||
provider.ttl = ttl
|
|
||||||
return provider
|
|
||||||
}
|
|
||||||
|
|
||||||
// StartCacheJanitor starts the cache janitor, which passively deletes expired cache entries in the background.
|
|
||||||
//
|
|
||||||
// Not really necessary unless you have a lot of clients (100000+).
|
|
||||||
//
|
|
||||||
// Even without the janitor, active eviction will still happen (i.e. when GetClientByToken is called, but the cache
|
|
||||||
// entry for the given token has expired, the cache entry will be automatically deleted and re-fetched from the
|
|
||||||
// user-defined getClientByTokenFunc)
|
|
||||||
func (provider *ClientProvider) StartCacheJanitor() error {
|
|
||||||
if provider.cache == nil {
|
|
||||||
// Can't start the cache janitor if there's no cache
|
|
||||||
return ErrCacheNotInitialized
|
|
||||||
}
|
|
||||||
if provider.ttl != gocache.NoExpiration {
|
|
||||||
return provider.cache.StartJanitor()
|
|
||||||
}
|
|
||||||
return ErrNoExpiration
|
|
||||||
}
|
|
||||||
|
|
||||||
// StopCacheJanitor stops the cache janitor
|
|
||||||
//
|
|
||||||
// Not required unless your application initializes multiple providers over the course of its lifecycle.
|
|
||||||
// In English, that means if you initialize a ClientProvider only once on application start and it stays up
|
|
||||||
// until your application shuts down, you don't need to call this function.
|
|
||||||
func (provider *ClientProvider) StopCacheJanitor() {
|
|
||||||
if provider.cache != nil {
|
|
||||||
provider.cache.StopJanitor()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetClientByToken retrieves a client by its token through the provided getClientByTokenFunc.
|
|
||||||
func (provider *ClientProvider) GetClientByToken(token string) *Client {
|
|
||||||
if provider.cache == nil {
|
|
||||||
return provider.getClientByTokenFunc(token)
|
|
||||||
}
|
|
||||||
if cachedClient, exists := provider.cache.Get(token); exists {
|
|
||||||
if cachedClient == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
// Safely typecast the client.
|
|
||||||
// Regardless of whether the typecast is successful or not, we return client since it'll be either client or
|
|
||||||
// nil. Technically, it should never be nil, but it's better to be safe than sorry.
|
|
||||||
client, _ := cachedClient.(*Client)
|
|
||||||
return client
|
|
||||||
}
|
|
||||||
client := provider.getClientByTokenFunc(token)
|
|
||||||
provider.cache.SetWithTTL(token, client, provider.ttl)
|
|
||||||
return client
|
|
||||||
}
|
|
238
vendor/github.com/TwiN/g8/gate.go
generated
vendored
238
vendor/github.com/TwiN/g8/gate.go
generated
vendored
@ -1,238 +0,0 @@
|
|||||||
package g8
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"net/http"
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
// AuthorizationHeader is the header in which g8 looks for the authorization bearer token
|
|
||||||
AuthorizationHeader = "Authorization"
|
|
||||||
|
|
||||||
// DefaultUnauthorizedResponseBody is the default response body returned if a request was sent with a missing or invalid token
|
|
||||||
DefaultUnauthorizedResponseBody = "token is missing or invalid"
|
|
||||||
|
|
||||||
// DefaultTooManyRequestsResponseBody is the default response body returned if a request exceeded the allowed rate limit
|
|
||||||
DefaultTooManyRequestsResponseBody = "too many requests"
|
|
||||||
|
|
||||||
// TokenContextKey is the key used to store the token in the context.
|
|
||||||
TokenContextKey = "g8.token"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Gate is lock to the front door of your API, letting only those you allow through.
|
|
||||||
type Gate struct {
|
|
||||||
authorizationService *AuthorizationService
|
|
||||||
unauthorizedResponseBody []byte
|
|
||||||
|
|
||||||
customTokenExtractorFunc func(request *http.Request) string
|
|
||||||
|
|
||||||
rateLimiter *RateLimiter
|
|
||||||
tooManyRequestsResponseBody []byte
|
|
||||||
}
|
|
||||||
|
|
||||||
// Deprecated: use New instead.
|
|
||||||
func NewGate(authorizationService *AuthorizationService) *Gate {
|
|
||||||
return &Gate{
|
|
||||||
authorizationService: authorizationService,
|
|
||||||
unauthorizedResponseBody: []byte(DefaultUnauthorizedResponseBody),
|
|
||||||
tooManyRequestsResponseBody: []byte(DefaultTooManyRequestsResponseBody),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// New creates a new Gate.
|
|
||||||
func New() *Gate {
|
|
||||||
return &Gate{
|
|
||||||
unauthorizedResponseBody: []byte(DefaultUnauthorizedResponseBody),
|
|
||||||
tooManyRequestsResponseBody: []byte(DefaultTooManyRequestsResponseBody),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithAuthorizationService sets the authorization service to use.
|
|
||||||
//
|
|
||||||
// If there is no authorization service, Gate will not enforce authorization.
|
|
||||||
func (gate *Gate) WithAuthorizationService(authorizationService *AuthorizationService) *Gate {
|
|
||||||
gate.authorizationService = authorizationService
|
|
||||||
return gate
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithCustomUnauthorizedResponseBody sets a custom response body when Gate determines that a request must be blocked
|
|
||||||
func (gate *Gate) WithCustomUnauthorizedResponseBody(unauthorizedResponseBody []byte) *Gate {
|
|
||||||
gate.unauthorizedResponseBody = unauthorizedResponseBody
|
|
||||||
return gate
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithCustomTokenExtractor allows the specification of a custom function to extract a token from a request.
|
|
||||||
// If a custom token extractor is not specified, the token will be extracted from the Authorization header.
|
|
||||||
//
|
|
||||||
// For instance, if you're using a session cookie, you can extract the token from the cookie like so:
|
|
||||||
//
|
|
||||||
// authorizationService := g8.NewAuthorizationService()
|
|
||||||
// customTokenExtractorFunc := func(request *http.Request) string {
|
|
||||||
// sessionCookie, err := request.Cookie("session")
|
|
||||||
// if err != nil {
|
|
||||||
// return ""
|
|
||||||
// }
|
|
||||||
// return sessionCookie.Value
|
|
||||||
// }
|
|
||||||
// gate := g8.New().WithAuthorizationService(authorizationService).WithCustomTokenExtractor(customTokenExtractorFunc)
|
|
||||||
//
|
|
||||||
// You would normally use this with a client provider that matches whatever need you have.
|
|
||||||
// For example, if you're using a session cookie, your client provider would retrieve the user from the session ID
|
|
||||||
// extracted by this custom token extractor.
|
|
||||||
//
|
|
||||||
// Note that for the sake of convenience, the token extracted from the request is passed the protected handlers request
|
|
||||||
// context under the key TokenContextKey. This is especially useful if the token is in fact a session ID.
|
|
||||||
func (gate *Gate) WithCustomTokenExtractor(customTokenExtractorFunc func(request *http.Request) string) *Gate {
|
|
||||||
gate.customTokenExtractorFunc = customTokenExtractorFunc
|
|
||||||
return gate
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithRateLimit adds rate limiting to the Gate
|
|
||||||
//
|
|
||||||
// If you just want to use a gate for rate limiting purposes:
|
|
||||||
//
|
|
||||||
// gate := g8.New().WithRateLimit(50)
|
|
||||||
func (gate *Gate) WithRateLimit(maximumRequestsPerSecond int) *Gate {
|
|
||||||
gate.rateLimiter = NewRateLimiter(maximumRequestsPerSecond)
|
|
||||||
return gate
|
|
||||||
}
|
|
||||||
|
|
||||||
// Protect secures a handler, requiring requests going through to have a valid Authorization Bearer token.
|
|
||||||
// Unlike ProtectWithPermissions, Protect will allow access to any registered tokens, regardless of their permissions
|
|
||||||
// or lack thereof.
|
|
||||||
//
|
|
||||||
// Example:
|
|
||||||
//
|
|
||||||
// gate := g8.New().WithAuthorizationService(g8.NewAuthorizationService().WithToken("token"))
|
|
||||||
// router := http.NewServeMux()
|
|
||||||
// // Without protection
|
|
||||||
// router.Handle("/handle", yourHandler)
|
|
||||||
// // With protection
|
|
||||||
// router.Handle("/handle", gate.Protect(yourHandler))
|
|
||||||
//
|
|
||||||
// The token extracted from the request is passed to the handlerFunc request context under the key TokenContextKey
|
|
||||||
func (gate *Gate) Protect(handler http.Handler) http.Handler {
|
|
||||||
return gate.ProtectWithPermissions(handler, nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ProtectWithPermissions secures a handler, requiring requests going through to have a valid Authorization Bearer token
|
|
||||||
// as well as a slice of permissions that must be met.
|
|
||||||
//
|
|
||||||
// Example:
|
|
||||||
//
|
|
||||||
// gate := g8.New().WithAuthorizationService(g8.NewAuthorizationService().WithClient(g8.NewClient("token").WithPermission("ADMIN")))
|
|
||||||
// router := http.NewServeMux()
|
|
||||||
// // Without protection
|
|
||||||
// router.Handle("/handle", yourHandler)
|
|
||||||
// // With protection
|
|
||||||
// router.Handle("/handle", gate.ProtectWithPermissions(yourHandler, []string{"admin"}))
|
|
||||||
//
|
|
||||||
// The token extracted from the request is passed to the handlerFunc request context under the key TokenContextKey
|
|
||||||
func (gate *Gate) ProtectWithPermissions(handler http.Handler, permissions []string) http.Handler {
|
|
||||||
return gate.ProtectFuncWithPermissions(func(writer http.ResponseWriter, request *http.Request) {
|
|
||||||
handler.ServeHTTP(writer, request)
|
|
||||||
}, permissions)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ProtectWithPermission does the same thing as ProtectWithPermissions, but for a single permission instead of a
|
|
||||||
// slice of permissions
|
|
||||||
//
|
|
||||||
// See ProtectWithPermissions for further documentation
|
|
||||||
func (gate *Gate) ProtectWithPermission(handler http.Handler, permission string) http.Handler {
|
|
||||||
return gate.ProtectFuncWithPermissions(func(writer http.ResponseWriter, request *http.Request) {
|
|
||||||
handler.ServeHTTP(writer, request)
|
|
||||||
}, []string{permission})
|
|
||||||
}
|
|
||||||
|
|
||||||
// ProtectFunc secures a handlerFunc, requiring requests going through to have a valid Authorization Bearer token.
|
|
||||||
// Unlike ProtectFuncWithPermissions, ProtectFunc will allow access to any registered tokens, regardless of their
|
|
||||||
// permissions or lack thereof.
|
|
||||||
//
|
|
||||||
// Example:
|
|
||||||
//
|
|
||||||
// gate := g8.New().WithAuthorizationService(g8.NewAuthorizationService().WithToken("token"))
|
|
||||||
// router := http.NewServeMux()
|
|
||||||
// // Without protection
|
|
||||||
// router.HandleFunc("/handle", yourHandlerFunc)
|
|
||||||
// // With protection
|
|
||||||
// router.HandleFunc("/handle", gate.ProtectFunc(yourHandlerFunc))
|
|
||||||
//
|
|
||||||
// The token extracted from the request is passed to the handlerFunc request context under the key TokenContextKey
|
|
||||||
func (gate *Gate) ProtectFunc(handlerFunc http.HandlerFunc) http.HandlerFunc {
|
|
||||||
return gate.ProtectFuncWithPermissions(handlerFunc, nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ProtectFuncWithPermissions secures a handler, requiring requests going through to have a valid Authorization Bearer
|
|
||||||
// token as well as a slice of permissions that must be met.
|
|
||||||
//
|
|
||||||
// Example:
|
|
||||||
//
|
|
||||||
// gate := g8.New().WithAuthorizationService(g8.NewAuthorizationService().WithClient(g8.NewClient("token").WithPermission("admin")))
|
|
||||||
// router := http.NewServeMux()
|
|
||||||
// // Without protection
|
|
||||||
// router.HandleFunc("/handle", yourHandlerFunc)
|
|
||||||
// // With protection
|
|
||||||
// router.HandleFunc("/handle", gate.ProtectFuncWithPermissions(yourHandlerFunc, []string{"admin"}))
|
|
||||||
//
|
|
||||||
// The token extracted from the request is passed to the handlerFunc request context under the key TokenContextKey
|
|
||||||
func (gate *Gate) ProtectFuncWithPermissions(handlerFunc http.HandlerFunc, permissions []string) http.HandlerFunc {
|
|
||||||
return func(writer http.ResponseWriter, request *http.Request) {
|
|
||||||
if gate.rateLimiter != nil {
|
|
||||||
if !gate.rateLimiter.Try() {
|
|
||||||
writer.WriteHeader(http.StatusTooManyRequests)
|
|
||||||
_, _ = writer.Write(gate.tooManyRequestsResponseBody)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if gate.authorizationService != nil {
|
|
||||||
token := gate.ExtractTokenFromRequest(request)
|
|
||||||
if !gate.authorizationService.IsAuthorized(token, permissions) {
|
|
||||||
writer.WriteHeader(http.StatusUnauthorized)
|
|
||||||
_, _ = writer.Write(gate.unauthorizedResponseBody)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
request = request.WithContext(context.WithValue(request.Context(), TokenContextKey, token))
|
|
||||||
}
|
|
||||||
handlerFunc(writer, request)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// ProtectFuncWithPermission does the same thing as ProtectFuncWithPermissions, but for a single permission instead of a
|
|
||||||
// slice of permissions
|
|
||||||
//
|
|
||||||
// See ProtectFuncWithPermissions for further documentation
|
|
||||||
func (gate *Gate) ProtectFuncWithPermission(handlerFunc http.HandlerFunc, permission string) http.HandlerFunc {
|
|
||||||
return gate.ProtectFuncWithPermissions(handlerFunc, []string{permission})
|
|
||||||
}
|
|
||||||
|
|
||||||
// ExtractTokenFromRequest extracts a token from a request.
|
|
||||||
//
|
|
||||||
// By default, it extracts the bearer token from the AuthorizationHeader, but if a customTokenExtractorFunc is defined,
|
|
||||||
// it will use that instead.
|
|
||||||
//
|
|
||||||
// Note that this method is internally used by Protect, ProtectWithPermission, ProtectFunc and
|
|
||||||
// ProtectFuncWithPermissions, but it is exposed in case you need to use it directly.
|
|
||||||
func (gate *Gate) ExtractTokenFromRequest(request *http.Request) string {
|
|
||||||
if gate.customTokenExtractorFunc != nil {
|
|
||||||
// A custom token extractor function is defined, so we'll use it instead of the default token extraction logic
|
|
||||||
return gate.customTokenExtractorFunc(request)
|
|
||||||
}
|
|
||||||
return strings.TrimPrefix(request.Header.Get(AuthorizationHeader), "Bearer ")
|
|
||||||
}
|
|
||||||
|
|
||||||
// PermissionMiddleware is a middleware that behaves like ProtectWithPermission, but it is meant to be used
|
|
||||||
// as a middleware for libraries that support such a feature.
|
|
||||||
//
|
|
||||||
// For instance, if you are using github.com/gorilla/mux, you can use PermissionMiddleware like so:
|
|
||||||
//
|
|
||||||
// router := mux.NewRouter()
|
|
||||||
// router.Use(gate.PermissionMiddleware("admin"))
|
|
||||||
// router.Handle("/admin/handle", adminHandler)
|
|
||||||
//
|
|
||||||
// If you do not want to protect a router with a specific permission, you can use Gate.Protect instead.
|
|
||||||
func (gate *Gate) PermissionMiddleware(permissions ...string) func(http.Handler) http.Handler {
|
|
||||||
return func(next http.Handler) http.Handler {
|
|
||||||
return gate.ProtectWithPermissions(next, permissions)
|
|
||||||
}
|
|
||||||
}
|
|
42
vendor/github.com/TwiN/g8/ratelimiter.go
generated
vendored
42
vendor/github.com/TwiN/g8/ratelimiter.go
generated
vendored
@ -1,42 +0,0 @@
|
|||||||
package g8
|
|
||||||
|
|
||||||
import (
|
|
||||||
"sync"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
// RateLimiter is a fixed rate limiter
|
|
||||||
type RateLimiter struct {
|
|
||||||
maximumExecutionsPerSecond int
|
|
||||||
executionsLeftInWindow int
|
|
||||||
windowStartTime time.Time
|
|
||||||
mutex sync.Mutex
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewRateLimiter creates a RateLimiter
|
|
||||||
func NewRateLimiter(maximumExecutionsPerSecond int) *RateLimiter {
|
|
||||||
return &RateLimiter{
|
|
||||||
windowStartTime: time.Now(),
|
|
||||||
executionsLeftInWindow: maximumExecutionsPerSecond,
|
|
||||||
maximumExecutionsPerSecond: maximumExecutionsPerSecond,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Try updates the number of executions if the rate limit quota hasn't been reached and returns whether the
|
|
||||||
// attempt was successful or not.
|
|
||||||
//
|
|
||||||
// Returns false if the execution was not successful (rate limit quota has been reached)
|
|
||||||
// Returns true if the execution was successful (rate limit quota has not been reached)
|
|
||||||
func (r *RateLimiter) Try() bool {
|
|
||||||
r.mutex.Lock()
|
|
||||||
defer r.mutex.Unlock()
|
|
||||||
if time.Now().Add(-time.Second).After(r.windowStartTime) {
|
|
||||||
r.windowStartTime = time.Now()
|
|
||||||
r.executionsLeftInWindow = r.maximumExecutionsPerSecond
|
|
||||||
}
|
|
||||||
if r.executionsLeftInWindow == 0 {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
r.executionsLeftInWindow--
|
|
||||||
return true
|
|
||||||
}
|
|
1
vendor/github.com/TwiN/gocache/v2/.gitattributes
generated
vendored
1
vendor/github.com/TwiN/gocache/v2/.gitattributes
generated
vendored
@ -1 +0,0 @@
|
|||||||
* text=lf
|
|
1
vendor/github.com/TwiN/gocache/v2/.gitignore
generated
vendored
1
vendor/github.com/TwiN/gocache/v2/.gitignore
generated
vendored
@ -1 +0,0 @@
|
|||||||
.idea
|
|
9
vendor/github.com/TwiN/gocache/v2/LICENSE.md
generated
vendored
9
vendor/github.com/TwiN/gocache/v2/LICENSE.md
generated
vendored
@ -1,9 +0,0 @@
|
|||||||
MIT License
|
|
||||||
|
|
||||||
Copyright (c) 2022 TwiN
|
|
||||||
|
|
||||||
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
|
|
||||||
|
|
||||||
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
|
|
||||||
|
|
||||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
|
361
vendor/github.com/TwiN/gocache/v2/README.md
generated
vendored
361
vendor/github.com/TwiN/gocache/v2/README.md
generated
vendored
@ -1,361 +0,0 @@
|
|||||||
# gocache
|
|
||||||
![test](https://github.com/TwiN/gocache/workflows/test/badge.svg?branch=master)
|
|
||||||
[![Go Report Card](https://goreportcard.com/badge/github.com/TwiN/gocache)](https://goreportcard.com/report/github.com/TwiN/gocache)
|
|
||||||
[![codecov](https://codecov.io/gh/TwiN/gocache/branch/master/graph/badge.svg)](https://codecov.io/gh/TwiN/gocache)
|
|
||||||
[![Go version](https://img.shields.io/github/go-mod/go-version/TwiN/gocache.svg)](https://github.com/TwiN/gocache)
|
|
||||||
[![Go Reference](https://pkg.go.dev/badge/github.com/TwiN/gocache.svg)](https://pkg.go.dev/github.com/TwiN/gocache/v2)
|
|
||||||
[![Follow TwiN](https://img.shields.io/github/followers/TwiN?label=Follow&style=social)](https://github.com/TwiN)
|
|
||||||
|
|
||||||
gocache is an easy-to-use, high-performance, lightweight and thread-safe (goroutine-safe) in-memory key-value cache
|
|
||||||
with support for LRU and FIFO eviction policies as well as expiration, bulk operations and even retrieval of keys by pattern.
|
|
||||||
|
|
||||||
|
|
||||||
## Table of Contents
|
|
||||||
|
|
||||||
- [Features](#features)
|
|
||||||
- [Usage](#usage)
|
|
||||||
- [Initializing the cache](#initializing-the-cache)
|
|
||||||
- [Functions](#functions)
|
|
||||||
- [Examples](#examples)
|
|
||||||
- [Creating or updating an entry](#creating-or-updating-an-entry)
|
|
||||||
- [Getting an entry](#getting-an-entry)
|
|
||||||
- [Deleting an entry](#deleting-an-entry)
|
|
||||||
- [Complex example](#complex-example)
|
|
||||||
- [Persistence](#persistence)
|
|
||||||
- [Eviction](#eviction)
|
|
||||||
- [MaxSize](#maxsize)
|
|
||||||
- [MaxMemoryUsage](#maxmemoryusage)
|
|
||||||
- [Expiration](#expiration)
|
|
||||||
- [Performance](#performance)
|
|
||||||
- [Summary](#summary)
|
|
||||||
- [Results](#results)
|
|
||||||
- [FAQ](#faq)
|
|
||||||
- [How can I persist the data on application termination?](#how-can-i-persist-the-data-on-application-termination)
|
|
||||||
|
|
||||||
|
|
||||||
## Features
|
|
||||||
gocache supports the following cache eviction policies:
|
|
||||||
- First in first out (FIFO)
|
|
||||||
- Least recently used (LRU)
|
|
||||||
|
|
||||||
It also supports cache entry TTL, which is both active and passive. Active expiration means that if you attempt
|
|
||||||
to retrieve a cache key that has already expired, it will delete it on the spot and the behavior will be as if
|
|
||||||
the cache key didn't exist. As for passive expiration, there's a background task that will take care of deleting
|
|
||||||
expired keys.
|
|
||||||
|
|
||||||
It also includes what you'd expect from a cache, like GET/SET, bulk operations and get by pattern.
|
|
||||||
|
|
||||||
|
|
||||||
## Usage
|
|
||||||
```
|
|
||||||
go get -u github.com/TwiN/gocache/v2
|
|
||||||
```
|
|
||||||
|
|
||||||
|
|
||||||
### Initializing the cache
|
|
||||||
```go
|
|
||||||
cache := gocache.NewCache().WithMaxSize(1000).WithEvictionPolicy(gocache.LeastRecentlyUsed)
|
|
||||||
```
|
|
||||||
|
|
||||||
If you're planning on using expiration (`SetWithTTL` or `Expire`) and you want expired entries to be automatically deleted
|
|
||||||
in the background, make sure to start the janitor when you instantiate the cache:
|
|
||||||
|
|
||||||
```go
|
|
||||||
cache.StartJanitor()
|
|
||||||
```
|
|
||||||
|
|
||||||
### Functions
|
|
||||||
| Function | Description |
|
|
||||||
|-----------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
|
||||||
| WithMaxSize | Sets the max size of the cache. `gocache.NoMaxSize` means there is no limit. If not set, the default max size is `gocache.DefaultMaxSize`. |
|
|
||||||
| WithMaxMemoryUsage | Sets the max memory usage of the cache. `gocache.NoMaxMemoryUsage` means there is no limit. The default behavior is to not evict based on memory usage. |
|
|
||||||
| WithEvictionPolicy | Sets the eviction algorithm to be used when the cache reaches the max size. If not set, the default eviction policy is `gocache.FirstInFirstOut` (FIFO). |
|
|
||||||
| WithDefaultTTL | Sets the default TTL for each entry. |
|
|
||||||
| WithForceNilInterfaceOnNilPointer | Configures whether values with a nil pointer passed to write functions should be forcefully set to nil. Defaults to true. |
|
|
||||||
| StartJanitor | Starts the janitor, which is in charge of deleting expired cache entries in the background. |
|
|
||||||
| StopJanitor | Stops the janitor. |
|
|
||||||
| Set | Same as `SetWithTTL`, but using the default TTL (which is `gocache.NoExpiration`, unless configured otherwise). |
|
|
||||||
| SetWithTTL | Creates or updates a cache entry with the given key, value and expiration time. If the max size after the aforementioned operation is above the configured max size, the tail will be evicted. Depending on the eviction policy, the tail is defined as the oldest |
|
|
||||||
| SetAll | Same as `Set`, but in bulk. |
|
|
||||||
| SetAllWithTTL | Same as `SetWithTTL`, but in bulk. |
|
|
||||||
| Get | Gets a cache entry by its key. |
|
|
||||||
| GetByKeys | Gets a map of entries by their keys. The resulting map will contain all keys, even if some of the keys in the slice passed as parameter were not present in the cache. |
|
|
||||||
| GetAll | Gets all cache entries. |
|
|
||||||
| GetKeysByPattern | Retrieves a slice of keys that matches a given pattern. |
|
|
||||||
| Delete | Removes a key from the cache. |
|
|
||||||
| DeleteAll | Removes multiple keys from the cache. |
|
|
||||||
| DeleteKeysByPattern | Removes all keys that that matches a given pattern. |
|
|
||||||
| Count | Gets the size of the cache. This includes cache keys which may have already expired, but have not been removed yet. |
|
|
||||||
| Clear | Wipes the cache. |
|
|
||||||
| TTL | Gets the time until a cache key expires. |
|
|
||||||
| Expire | Sets the expiration time of an existing cache key. |
|
|
||||||
|
|
||||||
For further documentation, please refer to [Go Reference](https://pkg.go.dev/github.com/TwiN/gocache)
|
|
||||||
|
|
||||||
|
|
||||||
### Examples
|
|
||||||
|
|
||||||
#### Creating or updating an entry
|
|
||||||
```go
|
|
||||||
cache.Set("key", "value")
|
|
||||||
cache.Set("key", 1)
|
|
||||||
cache.Set("key", struct{ Text string }{Test: "value"})
|
|
||||||
cache.SetWithTTL("key", []byte("value"), 24*time.Hour)
|
|
||||||
```
|
|
||||||
|
|
||||||
#### Getting an entry
|
|
||||||
```go
|
|
||||||
value, exists := cache.Get("key")
|
|
||||||
```
|
|
||||||
You can also get multiple entries by using `cache.GetByKeys([]string{"key1", "key2"})`
|
|
||||||
|
|
||||||
#### Deleting an entry
|
|
||||||
```go
|
|
||||||
cache.Delete("key")
|
|
||||||
```
|
|
||||||
You can also delete multiple entries by using `cache.DeleteAll([]string{"key1", "key2"})`
|
|
||||||
|
|
||||||
#### Complex example
|
|
||||||
```go
|
|
||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/TwiN/gocache/v2"
|
|
||||||
)
|
|
||||||
|
|
||||||
func main() {
|
|
||||||
cache := gocache.NewCache().WithEvictionPolicy(gocache.LeastRecentlyUsed).WithMaxSize(10000)
|
|
||||||
cache.StartJanitor() // Passively manages expired entries
|
|
||||||
defer cache.StopJanitor()
|
|
||||||
|
|
||||||
cache.Set("key", "value")
|
|
||||||
cache.SetWithTTL("key-with-ttl", "value", 60*time.Minute)
|
|
||||||
cache.SetAll(map[string]any{"k1": "v1", "k2": "v2", "k3": "v3"})
|
|
||||||
|
|
||||||
fmt.Println("[Count] Cache size:", cache.Count())
|
|
||||||
|
|
||||||
value, exists := cache.Get("key")
|
|
||||||
fmt.Printf("[Get] key=key; value=%s; exists=%v\n", value, exists)
|
|
||||||
for key, value := range cache.GetByKeys([]string{"k1", "k2", "k3"}) {
|
|
||||||
fmt.Printf("[GetByKeys] key=%s; value=%s\n", key, value)
|
|
||||||
}
|
|
||||||
for _, key := range cache.GetKeysByPattern("key*", 0) {
|
|
||||||
fmt.Printf("[GetKeysByPattern] pattern=key*; key=%s\n", key)
|
|
||||||
}
|
|
||||||
|
|
||||||
cache.Expire("key", time.Hour)
|
|
||||||
time.Sleep(500*time.Millisecond)
|
|
||||||
timeUntilExpiration, _ := cache.TTL("key")
|
|
||||||
fmt.Println("[TTL] Number of minutes before 'key' expires:", int(timeUntilExpiration.Seconds()))
|
|
||||||
|
|
||||||
cache.Delete("key")
|
|
||||||
cache.DeleteAll([]string{"k1", "k2", "k3"})
|
|
||||||
|
|
||||||
cache.Clear()
|
|
||||||
fmt.Println("[Count] Cache size after clearing the cache:", cache.Count())
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
<details>
|
|
||||||
<summary>Output</summary>
|
|
||||||
|
|
||||||
```
|
|
||||||
[Count] Cache size: 5
|
|
||||||
[Get] key=key; value=value; exists=true
|
|
||||||
[GetByKeys] key=k1; value=v1
|
|
||||||
[GetByKeys] key=k2; value=v2
|
|
||||||
[GetByKeys] key=k3; value=v3
|
|
||||||
[GetKeysByPattern] pattern=key*; key=key-with-ttl
|
|
||||||
[GetKeysByPattern] pattern=key*; key=key
|
|
||||||
[TTL] Number of minutes before 'key' expires: 3599
|
|
||||||
[Count] Cache size after clearing the cache: 0
|
|
||||||
```
|
|
||||||
</details>
|
|
||||||
|
|
||||||
|
|
||||||
## Persistence
|
|
||||||
Prior to v2, gocache supported persistence out of the box.
|
|
||||||
|
|
||||||
After some thinking, I decided that persistence added too many dependencies, and given than this is a cache library
|
|
||||||
and most people wouldn't be interested in persistence, I decided to get rid of it.
|
|
||||||
|
|
||||||
That being said, you can use the `GetAll` and `SetAll` methods of `gocache.Cache` to implement persistence yourself.
|
|
||||||
|
|
||||||
|
|
||||||
## Eviction
|
|
||||||
### MaxSize
|
|
||||||
Eviction by MaxSize is the default behavior, and is also the most efficient.
|
|
||||||
|
|
||||||
The code below will create a cache that has a maximum size of 1000:
|
|
||||||
```go
|
|
||||||
cache := gocache.NewCache().WithMaxSize(1000)
|
|
||||||
```
|
|
||||||
This means that whenever an operation causes the total size of the cache to go above 1000, the tail will be evicted.
|
|
||||||
|
|
||||||
### MaxMemoryUsage
|
|
||||||
Eviction by MaxMemoryUsage is **disabled by default**, and is in alpha.
|
|
||||||
|
|
||||||
The code below will create a cache that has a maximum memory usage of 50MB:
|
|
||||||
```go
|
|
||||||
cache := gocache.NewCache().WithMaxSize(0).WithMaxMemoryUsage(50*gocache.Megabyte)
|
|
||||||
```
|
|
||||||
This means that whenever an operation causes the total memory usage of the cache to go above 50MB, one or more tails
|
|
||||||
will be evicted.
|
|
||||||
|
|
||||||
Unlike evictions caused by reaching the MaxSize, evictions triggered by MaxMemoryUsage may lead to multiple entries
|
|
||||||
being evicted in a row. The reason for this is that if, for instance, you had 100 entries of 0.1MB each and you suddenly added
|
|
||||||
a single entry of 10MB, 100 entries would need to be evicted to make enough space for that new big entry.
|
|
||||||
|
|
||||||
It's very important to keep in mind that eviction by MaxMemoryUsage is approximate.
|
|
||||||
|
|
||||||
**The only memory taken into consideration is the size of the cache, not the size of the entire application.**
|
|
||||||
If you pass along 100MB worth of data in a matter of seconds, even though the cache's memory usage will remain
|
|
||||||
under 50MB (or whatever you configure the MaxMemoryUsage to), the memory footprint generated by that 100MB will
|
|
||||||
still exist until the next GC cycle.
|
|
||||||
|
|
||||||
As previously mentioned, this is a work in progress, and here's a list of the things you should keep in mind:
|
|
||||||
- The memory usage of structs are a gross estimation and may not reflect the actual memory usage.
|
|
||||||
- Native types (string, int, bool, []byte, etc.) are the most accurate for calculating the memory usage.
|
|
||||||
- Adding an entry bigger than the configured MaxMemoryUsage will work, but it will evict all other entries.
|
|
||||||
|
|
||||||
|
|
||||||
## Expiration
|
|
||||||
There are two ways that the deletion of expired keys can take place:
|
|
||||||
- Active
|
|
||||||
- Passive
|
|
||||||
|
|
||||||
**Active deletion of expired keys** happens when an attempt is made to access the value of a cache entry that expired.
|
|
||||||
`Get`, `GetByKeys` and `GetAll` are the only functions that can trigger active deletion of expired keys.
|
|
||||||
|
|
||||||
**Passive deletion of expired keys** runs in the background and is managed by the janitor.
|
|
||||||
If you do not start the janitor, there will be no passive deletion of expired keys.
|
|
||||||
|
|
||||||
|
|
||||||
## Performance
|
|
||||||
### Summary
|
|
||||||
- **Set**: Both map and gocache have the same performance.
|
|
||||||
- **Get**: Map is faster than gocache.
|
|
||||||
|
|
||||||
This is because gocache keeps track of the head and the tail for eviction and expiration/TTL.
|
|
||||||
|
|
||||||
Ultimately, the difference is negligible.
|
|
||||||
|
|
||||||
We could add a way to disable eviction or disable expiration altogether just to match the map's performance,
|
|
||||||
but if you're looking into using a library like gocache, odds are, you want more than just a map.
|
|
||||||
|
|
||||||
|
|
||||||
### Results
|
|
||||||
| key | value |
|
|
||||||
|:-------|:---------|
|
|
||||||
| goos | windows |
|
|
||||||
| goarch | amd64 |
|
|
||||||
| cpu | i7-9700K |
|
|
||||||
| mem | 32G DDR4 |
|
|
||||||
|
|
||||||
```
|
|
||||||
// Normal map
|
|
||||||
BenchmarkMap_Get-8 49944228 24.2 ns/op 7 B/op 0 allocs/op
|
|
||||||
BenchmarkMap_Set/small_value-8 3939964 394.1 ns/op 188 B/op 2 allocs/op
|
|
||||||
BenchmarkMap_Set/medium_value-8 3868586 395.5 ns/op 191 B/op 2 allocs/op
|
|
||||||
BenchmarkMap_Set/large_value-8 3992138 385.3 ns/op 186 B/op 2 allocs/op
|
|
||||||
// Gocache
|
|
||||||
BenchmarkCache_Get/FirstInFirstOut-8 27907950 44.3 ns/op 7 B/op 0 allocs/op
|
|
||||||
BenchmarkCache_Get/LeastRecentlyUsed-8 28211396 44.2 ns/op 7 B/op 0 allocs/op
|
|
||||||
BenchmarkCache_Set/FirstInFirstOut_small_value-8 3139538 373.5 ns/op 185 B/op 3 allocs/op
|
|
||||||
BenchmarkCache_Set/FirstInFirstOut_medium_value-8 3099516 378.6 ns/op 186 B/op 3 allocs/op
|
|
||||||
BenchmarkCache_Set/FirstInFirstOut_large_value-8 3086776 386.7 ns/op 186 B/op 3 allocs/op
|
|
||||||
BenchmarkCache_Set/LeastRecentlyUsed_small_value-8 3070555 379.0 ns/op 187 B/op 3 allocs/op
|
|
||||||
BenchmarkCache_Set/LeastRecentlyUsed_medium_value-8 3056928 383.8 ns/op 187 B/op 3 allocs/op
|
|
||||||
BenchmarkCache_Set/LeastRecentlyUsed_large_value-8 3108250 383.8 ns/op 186 B/op 3 allocs/op
|
|
||||||
BenchmarkCache_SetUsingMaxMemoryUsage/medium_value-8 2773315 449.0 ns/op 210 B/op 4 allocs/op
|
|
||||||
BenchmarkCache_SetUsingMaxMemoryUsage/large_value-8 2731818 440.0 ns/op 211 B/op 4 allocs/op
|
|
||||||
BenchmarkCache_SetUsingMaxMemoryUsage/small_value-8 2659296 446.8 ns/op 213 B/op 4 allocs/op
|
|
||||||
BenchmarkCache_SetWithMaxSize/100_small_value-8 4848658 248.8 ns/op 114 B/op 3 allocs/op
|
|
||||||
BenchmarkCache_SetWithMaxSize/10000_small_value-8 4117632 293.7 ns/op 106 B/op 3 allocs/op
|
|
||||||
BenchmarkCache_SetWithMaxSize/100000_small_value-8 3867402 313.0 ns/op 110 B/op 3 allocs/op
|
|
||||||
BenchmarkCache_SetWithMaxSize/100_medium_value-8 4750057 250.1 ns/op 113 B/op 3 allocs/op
|
|
||||||
BenchmarkCache_SetWithMaxSize/10000_medium_value-8 4143772 294.5 ns/op 106 B/op 3 allocs/op
|
|
||||||
BenchmarkCache_SetWithMaxSize/100000_medium_value-8 3768883 313.2 ns/op 111 B/op 3 allocs/op
|
|
||||||
BenchmarkCache_SetWithMaxSize/100_large_value-8 4822646 251.1 ns/op 114 B/op 3 allocs/op
|
|
||||||
BenchmarkCache_SetWithMaxSize/10000_large_value-8 4154428 291.6 ns/op 106 B/op 3 allocs/op
|
|
||||||
BenchmarkCache_SetWithMaxSize/100000_large_value-8 3897358 313.7 ns/op 110 B/op 3 allocs/op
|
|
||||||
BenchmarkCache_SetWithMaxSizeAndLRU/100_small_value-8 4784180 254.2 ns/op 114 B/op 3 allocs/op
|
|
||||||
BenchmarkCache_SetWithMaxSizeAndLRU/10000_small_value-8 4067042 292.0 ns/op 106 B/op 3 allocs/op
|
|
||||||
BenchmarkCache_SetWithMaxSizeAndLRU/100000_small_value-8 3832760 313.8 ns/op 111 B/op 3 allocs/op
|
|
||||||
BenchmarkCache_SetWithMaxSizeAndLRU/100_medium_value-8 4846706 252.2 ns/op 114 B/op 3 allocs/op
|
|
||||||
BenchmarkCache_SetWithMaxSizeAndLRU/10000_medium_value-8 4103817 292.5 ns/op 106 B/op 3 allocs/op
|
|
||||||
BenchmarkCache_SetWithMaxSizeAndLRU/100000_medium_value-8 3845623 315.1 ns/op 111 B/op 3 allocs/op
|
|
||||||
BenchmarkCache_SetWithMaxSizeAndLRU/100_large_value-8 4744513 257.9 ns/op 114 B/op 3 allocs/op
|
|
||||||
BenchmarkCache_SetWithMaxSizeAndLRU/10000_large_value-8 3956316 299.5 ns/op 106 B/op 3 allocs/op
|
|
||||||
BenchmarkCache_SetWithMaxSizeAndLRU/100000_large_value-8 3876843 351.3 ns/op 110 B/op 3 allocs/op
|
|
||||||
BenchmarkCache_GetSetMultipleConcurrent-8 750088 1566.0 ns/op 128 B/op 8 allocs/op
|
|
||||||
BenchmarkCache_GetSetConcurrentWithFrequentEviction/FirstInFirstOut-8 3836961 316.2 ns/op 80 B/op 1 allocs/op
|
|
||||||
BenchmarkCache_GetSetConcurrentWithFrequentEviction/LeastRecentlyUsed-8 3846165 315.6 ns/op 80 B/op 1 allocs/op
|
|
||||||
BenchmarkCache_GetConcurrently/FirstInFirstOut-8 4830342 239.8 ns/op 8 B/op 1 allocs/op
|
|
||||||
BenchmarkCache_GetConcurrently/LeastRecentlyUsed-8 4895587 243.2 ns/op 8 B/op 1 allocs/op
|
|
||||||
(Trimmed "BenchmarkCache_" for readability)
|
|
||||||
WithForceNilInterfaceOnNilPointer/true_with_nil_struct_pointer-8 6901461 178.5 ns/op 7 B/op 1 allocs/op
|
|
||||||
WithForceNilInterfaceOnNilPointer/true-8 6629566 180.7 ns/op 7 B/op 1 allocs/op
|
|
||||||
WithForceNilInterfaceOnNilPointer/false_with_nil_struct_pointer-8 6282798 170.1 ns/op 7 B/op 1 allocs/op
|
|
||||||
WithForceNilInterfaceOnNilPointer/false-8 6741382 172.6 ns/op 7 B/op 1 allocs/op
|
|
||||||
WithForceNilInterfaceOnNilPointerWithConcurrency/true_with_nil_struct_pointer-8 4432951 258.0 ns/op 8 B/op 1 allocs/op
|
|
||||||
WithForceNilInterfaceOnNilPointerWithConcurrency/true-8 4676943 244.4 ns/op 8 B/op 1 allocs/op
|
|
||||||
WithForceNilInterfaceOnNilPointerWithConcurrency/false_with_nil_struct_pointer-8 4818418 239.6 ns/op 8 B/op 1 allocs/op
|
|
||||||
WithForceNilInterfaceOnNilPointerWithConcurrency/false-8 5025937 238.2 ns/op 8 B/op 1 allocs/op
|
|
||||||
```
|
|
||||||
|
|
||||||
|
|
||||||
## FAQ
|
|
||||||
|
|
||||||
### How can I persist the data on application termination?
|
|
||||||
While creating your own auto save feature might come in handy, it may still lead to loss of data if the application
|
|
||||||
automatically saves every 10 minutes and your application crashes 9 minutes after the previous save.
|
|
||||||
|
|
||||||
To increase your odds of not losing any data, you can use Go's `signal` package, more specifically its `Notify` function
|
|
||||||
which allows listening for termination signals like SIGTERM and SIGINT. Once a termination signal is caught, you can
|
|
||||||
add the necessary logic for a graceful shutdown.
|
|
||||||
|
|
||||||
In the following example, the code that would usually be present in the `main` function is moved to a different function
|
|
||||||
named `Start` which is launched on a different goroutine so that listening for a termination signals is what blocks the
|
|
||||||
main goroutine instead:
|
|
||||||
```go
|
|
||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"log"
|
|
||||||
"os"
|
|
||||||
"os/signal"
|
|
||||||
"syscall"
|
|
||||||
|
|
||||||
"github.com/TwiN/gocache/v2"
|
|
||||||
)
|
|
||||||
|
|
||||||
var cache = gocache.NewCache()
|
|
||||||
|
|
||||||
func main() {
|
|
||||||
data := retrieveCacheEntriesUsingWhateverMeanYouUsedToPersistIt()
|
|
||||||
cache.SetAll(data)
|
|
||||||
// Start everything else on another goroutine to prevent blocking the main goroutine
|
|
||||||
go Start()
|
|
||||||
// Wait for termination signal
|
|
||||||
sig := make(chan os.Signal, 1)
|
|
||||||
done := make(chan bool, 1)
|
|
||||||
signal.Notify(sig, os.Interrupt, syscall.SIGTERM)
|
|
||||||
go func() {
|
|
||||||
<-sig
|
|
||||||
log.Println("Received termination signal, attempting to gracefully shut down")
|
|
||||||
// Persist the cache entries
|
|
||||||
cacheEntries := cache.GetAll()
|
|
||||||
persistCacheEntriesHoweverYouWant(cacheEntries)
|
|
||||||
// Tell the main goroutine that we're done
|
|
||||||
done <- true
|
|
||||||
}()
|
|
||||||
<-done
|
|
||||||
log.Println("Shutting down")
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
Note that this won't protect you from a SIGKILL, as this signal cannot be caught.
|
|
108
vendor/github.com/TwiN/gocache/v2/entry.go
generated
vendored
108
vendor/github.com/TwiN/gocache/v2/entry.go
generated
vendored
@ -1,108 +0,0 @@
|
|||||||
package gocache
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"time"
|
|
||||||
"unsafe"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Entry is a cache entry
|
|
||||||
type Entry struct {
|
|
||||||
// Key is the name of the cache entry
|
|
||||||
Key string
|
|
||||||
|
|
||||||
// Value is the value of the cache entry
|
|
||||||
Value any
|
|
||||||
|
|
||||||
// RelevantTimestamp is the variable used to store either:
|
|
||||||
// - creation timestamp, if the Cache's EvictionPolicy is FirstInFirstOut
|
|
||||||
// - last access timestamp, if the Cache's EvictionPolicy is LeastRecentlyUsed
|
|
||||||
//
|
|
||||||
// Note that updating an existing entry will also update this value
|
|
||||||
RelevantTimestamp time.Time
|
|
||||||
|
|
||||||
// Expiration is the unix time in nanoseconds at which the entry will expire (-1 means no expiration)
|
|
||||||
Expiration int64
|
|
||||||
|
|
||||||
next *Entry
|
|
||||||
previous *Entry
|
|
||||||
}
|
|
||||||
|
|
||||||
// Accessed updates the Entry's RelevantTimestamp to now
|
|
||||||
func (entry *Entry) Accessed() {
|
|
||||||
entry.RelevantTimestamp = time.Now()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Expired returns whether the Entry has expired
|
|
||||||
func (entry Entry) Expired() bool {
|
|
||||||
if entry.Expiration > 0 {
|
|
||||||
if time.Now().UnixNano() > entry.Expiration {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// SizeInBytes returns the size of an entry in bytes, approximately.
|
|
||||||
func (entry *Entry) SizeInBytes() int {
|
|
||||||
return toBytes(entry.Key) + toBytes(entry.Value) + 32
|
|
||||||
}
|
|
||||||
|
|
||||||
func toBytes(value any) int {
|
|
||||||
switch value.(type) {
|
|
||||||
case string:
|
|
||||||
return int(unsafe.Sizeof(value)) + len(value.(string))
|
|
||||||
case int8, uint8, bool:
|
|
||||||
return int(unsafe.Sizeof(value)) + 1
|
|
||||||
case int16, uint16:
|
|
||||||
return int(unsafe.Sizeof(value)) + 2
|
|
||||||
case int32, uint32, float32, complex64:
|
|
||||||
return int(unsafe.Sizeof(value)) + 4
|
|
||||||
case int64, uint64, int, uint, float64, complex128:
|
|
||||||
return int(unsafe.Sizeof(value)) + 8
|
|
||||||
case []any:
|
|
||||||
size := 0
|
|
||||||
for _, v := range value.([]any) {
|
|
||||||
size += toBytes(v)
|
|
||||||
}
|
|
||||||
return int(unsafe.Sizeof(value)) + size
|
|
||||||
case []string:
|
|
||||||
size := 0
|
|
||||||
for _, v := range value.([]string) {
|
|
||||||
size += toBytes(v)
|
|
||||||
}
|
|
||||||
return int(unsafe.Sizeof(value)) + size
|
|
||||||
case []int8:
|
|
||||||
return int(unsafe.Sizeof(value)) + len(value.([]int8))
|
|
||||||
case []uint8:
|
|
||||||
return int(unsafe.Sizeof(value)) + len(value.([]uint8))
|
|
||||||
case []bool:
|
|
||||||
return int(unsafe.Sizeof(value)) + len(value.([]bool))
|
|
||||||
case []int16:
|
|
||||||
return int(unsafe.Sizeof(value)) + (len(value.([]int16)) * 2)
|
|
||||||
case []uint16:
|
|
||||||
return int(unsafe.Sizeof(value)) + (len(value.([]uint16)) * 2)
|
|
||||||
case []int32:
|
|
||||||
return int(unsafe.Sizeof(value)) + (len(value.([]int32)) * 4)
|
|
||||||
case []uint32:
|
|
||||||
return int(unsafe.Sizeof(value)) + (len(value.([]uint32)) * 4)
|
|
||||||
case []float32:
|
|
||||||
return int(unsafe.Sizeof(value)) + (len(value.([]float32)) * 4)
|
|
||||||
case []complex64:
|
|
||||||
return int(unsafe.Sizeof(value)) + (len(value.([]complex64)) * 4)
|
|
||||||
case []int64:
|
|
||||||
return int(unsafe.Sizeof(value)) + (len(value.([]int64)) * 8)
|
|
||||||
case []uint64:
|
|
||||||
return int(unsafe.Sizeof(value)) + (len(value.([]uint64)) * 8)
|
|
||||||
case []int:
|
|
||||||
return int(unsafe.Sizeof(value)) + (len(value.([]int)) * 8)
|
|
||||||
case []uint:
|
|
||||||
return int(unsafe.Sizeof(value)) + (len(value.([]uint)) * 8)
|
|
||||||
case []float64:
|
|
||||||
return int(unsafe.Sizeof(value)) + (len(value.([]float64)) * 8)
|
|
||||||
case []complex128:
|
|
||||||
return int(unsafe.Sizeof(value)) + (len(value.([]complex128)) * 8)
|
|
||||||
default:
|
|
||||||
return int(unsafe.Sizeof(value)) + len(fmt.Sprintf("%v", value))
|
|
||||||
}
|
|
||||||
}
|
|
599
vendor/github.com/TwiN/gocache/v2/gocache.go
generated
vendored
599
vendor/github.com/TwiN/gocache/v2/gocache.go
generated
vendored
@ -1,599 +0,0 @@
|
|||||||
package gocache
|
|
||||||
|
|
||||||
import (
|
|
||||||
"errors"
|
|
||||||
"reflect"
|
|
||||||
"sync"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
Debug = false
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
// NoMaxSize means that the cache has no maximum number of entries in the cache
|
|
||||||
// Setting Cache.maxSize to this value also means there will be no eviction
|
|
||||||
NoMaxSize = 0
|
|
||||||
|
|
||||||
// NoMaxMemoryUsage means that the cache has no maximum number of entries in the cache
|
|
||||||
NoMaxMemoryUsage = 0
|
|
||||||
|
|
||||||
// DefaultMaxSize is the max size set if no max size is specified
|
|
||||||
DefaultMaxSize = 100000
|
|
||||||
|
|
||||||
// NoExpiration is the value that must be used as TTL to specify that the given key should never expire
|
|
||||||
NoExpiration = -1
|
|
||||||
|
|
||||||
Kilobyte = 1024
|
|
||||||
Megabyte = 1024 * Kilobyte
|
|
||||||
Gigabyte = 1024 * Megabyte
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
ErrKeyDoesNotExist = errors.New("key does not exist") // Returned when a cache key does not exist
|
|
||||||
ErrKeyHasNoExpiration = errors.New("key has no expiration") // Returned when a cache key has no expiration
|
|
||||||
ErrJanitorAlreadyRunning = errors.New("janitor is already running") // Returned when the janitor has already been started
|
|
||||||
)
|
|
||||||
|
|
||||||
// Cache is the core struct of gocache which contains the data as well as all relevant configuration fields
|
|
||||||
//
|
|
||||||
// Do not instantiate this struct directly, use NewCache instead
|
|
||||||
type Cache struct {
|
|
||||||
// maxSize is the maximum amount of entries that can be in the cache at any given time
|
|
||||||
// By default, this is set to DefaultMaxSize
|
|
||||||
maxSize int
|
|
||||||
|
|
||||||
// maxMemoryUsage is the maximum amount of memory that can be taken up by the cache at any time
|
|
||||||
// By default, this is set to NoMaxMemoryUsage, meaning that the default behavior is to not evict
|
|
||||||
// based on maximum memory usage
|
|
||||||
maxMemoryUsage int
|
|
||||||
|
|
||||||
// evictionPolicy is the eviction policy
|
|
||||||
evictionPolicy EvictionPolicy
|
|
||||||
|
|
||||||
// defaultTTL is the default TTL for each entry
|
|
||||||
// Defaults to NoExpiration
|
|
||||||
defaultTTL time.Duration
|
|
||||||
|
|
||||||
// stats is the object that contains cache statistics/metrics
|
|
||||||
stats *Statistics
|
|
||||||
|
|
||||||
// entries is the content of the cache
|
|
||||||
entries map[string]*Entry
|
|
||||||
|
|
||||||
// mutex is the lock for making concurrent operations on the cache
|
|
||||||
mutex sync.RWMutex
|
|
||||||
|
|
||||||
// head is the cache entry at the head of the cache
|
|
||||||
head *Entry
|
|
||||||
|
|
||||||
// tail is the last cache node and also the next entry that will be evicted
|
|
||||||
tail *Entry
|
|
||||||
|
|
||||||
// stopJanitor is the channel used to stop the janitor
|
|
||||||
stopJanitor chan bool
|
|
||||||
|
|
||||||
// memoryUsage is the approximate memory usage of the cache (dataset only) in bytes
|
|
||||||
memoryUsage int
|
|
||||||
|
|
||||||
// forceNilInterfaceOnNilPointer determines whether all Set-like functions should set a value as nil if the
|
|
||||||
// interface passed has a nil value but not a nil type.
|
|
||||||
//
|
|
||||||
// By default, interfaces are only nil when both their type and value is nil.
|
|
||||||
// This means that when you pass a pointer to a nil value, the type of the interface
|
|
||||||
// will still show as nil, which means that if you don't cast the interface after
|
|
||||||
// retrieving it, a nil check will return that the value is not false.
|
|
||||||
forceNilInterfaceOnNilPointer bool
|
|
||||||
}
|
|
||||||
|
|
||||||
// MaxSize returns the maximum amount of keys that can be present in the cache before
|
|
||||||
// new entries trigger the eviction of the tail
|
|
||||||
func (cache *Cache) MaxSize() int {
|
|
||||||
return cache.maxSize
|
|
||||||
}
|
|
||||||
|
|
||||||
// MaxMemoryUsage returns the configured maxMemoryUsage of the cache
|
|
||||||
func (cache *Cache) MaxMemoryUsage() int {
|
|
||||||
return cache.maxMemoryUsage
|
|
||||||
}
|
|
||||||
|
|
||||||
// EvictionPolicy returns the EvictionPolicy of the Cache
|
|
||||||
func (cache *Cache) EvictionPolicy() EvictionPolicy {
|
|
||||||
return cache.evictionPolicy
|
|
||||||
}
|
|
||||||
|
|
||||||
// Stats returns statistics from the cache
|
|
||||||
func (cache *Cache) Stats() Statistics {
|
|
||||||
cache.mutex.RLock()
|
|
||||||
stats := Statistics{
|
|
||||||
EvictedKeys: cache.stats.EvictedKeys,
|
|
||||||
ExpiredKeys: cache.stats.ExpiredKeys,
|
|
||||||
Hits: cache.stats.Hits,
|
|
||||||
Misses: cache.stats.Misses,
|
|
||||||
}
|
|
||||||
cache.mutex.RUnlock()
|
|
||||||
return stats
|
|
||||||
}
|
|
||||||
|
|
||||||
// MemoryUsage returns the current memory usage of the cache's dataset in bytes
|
|
||||||
// If MaxMemoryUsage is set to NoMaxMemoryUsage, this will return 0
|
|
||||||
func (cache *Cache) MemoryUsage() int {
|
|
||||||
return cache.memoryUsage
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithMaxSize sets the maximum amount of entries that can be in the cache at any given time
|
|
||||||
// A maxSize of 0 or less means infinite
|
|
||||||
func (cache *Cache) WithMaxSize(maxSize int) *Cache {
|
|
||||||
if maxSize < 0 {
|
|
||||||
maxSize = NoMaxSize
|
|
||||||
}
|
|
||||||
if maxSize != NoMaxSize && cache.Count() == 0 {
|
|
||||||
cache.entries = make(map[string]*Entry, maxSize)
|
|
||||||
}
|
|
||||||
cache.maxSize = maxSize
|
|
||||||
return cache
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithMaxMemoryUsage sets the maximum amount of memory that can be used by the cache at any given time
|
|
||||||
//
|
|
||||||
// NOTE: This is approximate.
|
|
||||||
//
|
|
||||||
// Setting this to NoMaxMemoryUsage will disable eviction by memory usage
|
|
||||||
func (cache *Cache) WithMaxMemoryUsage(maxMemoryUsageInBytes int) *Cache {
|
|
||||||
if maxMemoryUsageInBytes < 0 {
|
|
||||||
maxMemoryUsageInBytes = NoMaxMemoryUsage
|
|
||||||
}
|
|
||||||
cache.maxMemoryUsage = maxMemoryUsageInBytes
|
|
||||||
return cache
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithEvictionPolicy sets eviction algorithm.
|
|
||||||
//
|
|
||||||
// Defaults to FirstInFirstOut (FIFO)
|
|
||||||
func (cache *Cache) WithEvictionPolicy(policy EvictionPolicy) *Cache {
|
|
||||||
cache.evictionPolicy = policy
|
|
||||||
return cache
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithDefaultTTL sets the default TTL for each entry (unless a different TTL is specified using SetWithTTL or SetAllWithTTL)
|
|
||||||
//
|
|
||||||
// Defaults to NoExpiration (-1)
|
|
||||||
func (cache *Cache) WithDefaultTTL(ttl time.Duration) *Cache {
|
|
||||||
if ttl > 1 {
|
|
||||||
cache.defaultTTL = ttl
|
|
||||||
}
|
|
||||||
return cache
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithForceNilInterfaceOnNilPointer sets whether all Set-like functions should set a value as nil if the
|
|
||||||
// interface passed has a nil value but not a nil type.
|
|
||||||
//
|
|
||||||
// In Go, an interface is only nil if both its type and value are nil, which means that a nil pointer
|
|
||||||
// (e.g. (*Struct)(nil)) will retain its attribution to the type, and the unmodified value returned from
|
|
||||||
// Cache.Get, for instance, would return false when compared with nil if this option is set to false.
|
|
||||||
//
|
|
||||||
// We can bypass this by detecting if the interface's value is nil and setting it to nil rather than
|
|
||||||
// a nil pointer, which will make the value returned from Cache.Get return true when compared with nil.
|
|
||||||
// This is exactly what passing true to WithForceNilInterfaceOnNilPointer does, and it's also the default behavior.
|
|
||||||
//
|
|
||||||
// Alternatively, you may pass false to WithForceNilInterfaceOnNilPointer, which will mean that you'll have
|
|
||||||
// to cast the value returned from Cache.Get to its original type to check for whether the pointer returned
|
|
||||||
// is nil or not.
|
|
||||||
//
|
|
||||||
// If set to true (default):
|
|
||||||
//
|
|
||||||
// cache := gocache.NewCache().WithForceNilInterfaceOnNilPointer(true)
|
|
||||||
// cache.Set("key", (*Struct)(nil))
|
|
||||||
// value, _ := cache.Get("key")
|
|
||||||
// // the following returns true, because the interface{} (any) was forcefully set to nil
|
|
||||||
// if value == nil {}
|
|
||||||
// // the following will panic, because the value has been casted to its type (which is nil)
|
|
||||||
// if value.(*Struct) == nil {}
|
|
||||||
//
|
|
||||||
// If set to false:
|
|
||||||
//
|
|
||||||
// cache := gocache.NewCache().WithForceNilInterfaceOnNilPointer(false)
|
|
||||||
// cache.Set("key", (*Struct)(nil))
|
|
||||||
// value, _ := cache.Get("key")
|
|
||||||
// // the following returns false, because the interface{} (any) returned has a non-nil type (*Struct)
|
|
||||||
// if value == nil {}
|
|
||||||
// // the following returns true, because the value has been cast to its type
|
|
||||||
// if value.(*Struct) == nil {}
|
|
||||||
//
|
|
||||||
// In other words, if set to true, you do not need to cast the value returned from the cache to
|
|
||||||
// check if the value is nil.
|
|
||||||
//
|
|
||||||
// Defaults to true
|
|
||||||
func (cache *Cache) WithForceNilInterfaceOnNilPointer(forceNilInterfaceOnNilPointer bool) *Cache {
|
|
||||||
cache.forceNilInterfaceOnNilPointer = forceNilInterfaceOnNilPointer
|
|
||||||
return cache
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewCache creates a new Cache
|
|
||||||
//
|
|
||||||
// Should be used in conjunction with Cache.WithMaxSize, Cache.WithMaxMemoryUsage and/or Cache.WithEvictionPolicy
|
|
||||||
//
|
|
||||||
// gocache.NewCache().WithMaxSize(10000).WithEvictionPolicy(gocache.LeastRecentlyUsed)
|
|
||||||
func NewCache() *Cache {
|
|
||||||
return &Cache{
|
|
||||||
maxSize: DefaultMaxSize,
|
|
||||||
evictionPolicy: FirstInFirstOut,
|
|
||||||
defaultTTL: NoExpiration,
|
|
||||||
stats: &Statistics{},
|
|
||||||
entries: make(map[string]*Entry),
|
|
||||||
mutex: sync.RWMutex{},
|
|
||||||
stopJanitor: nil,
|
|
||||||
forceNilInterfaceOnNilPointer: true,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Set creates or updates a key with a given value
|
|
||||||
func (cache *Cache) Set(key string, value any) {
|
|
||||||
cache.SetWithTTL(key, value, cache.defaultTTL)
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetWithTTL creates or updates a key with a given value and sets an expiration time (-1 is NoExpiration)
|
|
||||||
//
|
|
||||||
// The TTL provided must be greater than 0, or NoExpiration (-1). If a negative value that isn't -1 (NoExpiration) is
|
|
||||||
// provided, the entry will not be created if the key doesn't exist
|
|
||||||
func (cache *Cache) SetWithTTL(key string, value any, ttl time.Duration) {
|
|
||||||
// An interface is only nil if both its value and its type are nil, however, passing a nil pointer as an interface{}
|
|
||||||
// means that the interface itself is not nil, because the interface value is nil but not the type.
|
|
||||||
if cache.forceNilInterfaceOnNilPointer {
|
|
||||||
if value != nil && (reflect.ValueOf(value).Kind() == reflect.Ptr && reflect.ValueOf(value).IsNil()) {
|
|
||||||
value = nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
cache.mutex.Lock()
|
|
||||||
entry, ok := cache.get(key)
|
|
||||||
if !ok {
|
|
||||||
// A negative TTL that isn't -1 (NoExpiration) or 0 is an entry that will expire instantly,
|
|
||||||
// so might as well just not create it in the first place
|
|
||||||
if ttl != NoExpiration && ttl < 1 {
|
|
||||||
cache.mutex.Unlock()
|
|
||||||
return
|
|
||||||
}
|
|
||||||
// Cache entry doesn't exist, so we have to create a new one
|
|
||||||
entry = &Entry{
|
|
||||||
Key: key,
|
|
||||||
Value: value,
|
|
||||||
RelevantTimestamp: time.Now(),
|
|
||||||
next: cache.head,
|
|
||||||
}
|
|
||||||
if cache.head == nil {
|
|
||||||
cache.tail = entry
|
|
||||||
} else {
|
|
||||||
cache.head.previous = entry
|
|
||||||
}
|
|
||||||
cache.head = entry
|
|
||||||
cache.entries[key] = entry
|
|
||||||
if cache.maxMemoryUsage != NoMaxMemoryUsage {
|
|
||||||
cache.memoryUsage += entry.SizeInBytes()
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
// A negative TTL that isn't -1 (NoExpiration) or 0 is an entry that will expire instantly,
|
|
||||||
// so might as well just delete it immediately instead of updating it
|
|
||||||
if ttl != NoExpiration && ttl < 1 {
|
|
||||||
cache.delete(key)
|
|
||||||
cache.mutex.Unlock()
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if cache.maxMemoryUsage != NoMaxMemoryUsage {
|
|
||||||
// Subtract the old entry from the cache's memoryUsage
|
|
||||||
cache.memoryUsage -= entry.SizeInBytes()
|
|
||||||
}
|
|
||||||
// Update existing entry's value
|
|
||||||
entry.Value = value
|
|
||||||
entry.RelevantTimestamp = time.Now()
|
|
||||||
if cache.maxMemoryUsage != NoMaxMemoryUsage {
|
|
||||||
// Add the memory usage of the new entry to the cache's memoryUsage
|
|
||||||
cache.memoryUsage += entry.SizeInBytes()
|
|
||||||
}
|
|
||||||
// Because we just updated the entry, we need to move it back to HEAD
|
|
||||||
cache.moveExistingEntryToHead(entry)
|
|
||||||
}
|
|
||||||
if ttl != NoExpiration {
|
|
||||||
entry.Expiration = time.Now().Add(ttl).UnixNano()
|
|
||||||
} else {
|
|
||||||
entry.Expiration = NoExpiration
|
|
||||||
}
|
|
||||||
// If the cache doesn't have a maxSize/maxMemoryUsage, then there's no point
|
|
||||||
// checking if we need to evict an entry, so we'll just return now
|
|
||||||
if cache.maxSize == NoMaxSize && cache.maxMemoryUsage == NoMaxMemoryUsage {
|
|
||||||
cache.mutex.Unlock()
|
|
||||||
return
|
|
||||||
}
|
|
||||||
// If there's a maxSize and the cache has more entries than the maxSize, evict
|
|
||||||
if cache.maxSize != NoMaxSize && len(cache.entries) > cache.maxSize {
|
|
||||||
cache.evict()
|
|
||||||
}
|
|
||||||
// If there's a maxMemoryUsage and the memoryUsage is above the maxMemoryUsage, evict
|
|
||||||
if cache.maxMemoryUsage != NoMaxMemoryUsage && cache.memoryUsage > cache.maxMemoryUsage {
|
|
||||||
for cache.memoryUsage > cache.maxMemoryUsage && len(cache.entries) > 0 {
|
|
||||||
cache.evict()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
cache.mutex.Unlock()
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetAll creates or updates multiple values
|
|
||||||
func (cache *Cache) SetAll(entries map[string]any) {
|
|
||||||
cache.SetAllWithTTL(entries, cache.defaultTTL)
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetAllWithTTL creates or updates multiple values
|
|
||||||
func (cache *Cache) SetAllWithTTL(entries map[string]any, ttl time.Duration) {
|
|
||||||
for key, value := range entries {
|
|
||||||
cache.SetWithTTL(key, value, ttl)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get retrieves an entry using the key passed as parameter
|
|
||||||
// If there is no such entry, the value returned will be nil and the boolean will be false
|
|
||||||
// If there is an entry, the value returned will be the value cached and the boolean will be true
|
|
||||||
func (cache *Cache) Get(key string) (any, bool) {
|
|
||||||
cache.mutex.Lock()
|
|
||||||
entry, ok := cache.get(key)
|
|
||||||
if !ok {
|
|
||||||
cache.stats.Misses++
|
|
||||||
cache.mutex.Unlock()
|
|
||||||
return nil, false
|
|
||||||
}
|
|
||||||
if entry.Expired() {
|
|
||||||
cache.stats.ExpiredKeys++
|
|
||||||
cache.delete(key)
|
|
||||||
cache.mutex.Unlock()
|
|
||||||
return nil, false
|
|
||||||
}
|
|
||||||
cache.stats.Hits++
|
|
||||||
if cache.evictionPolicy == LeastRecentlyUsed {
|
|
||||||
entry.Accessed()
|
|
||||||
if cache.head == entry {
|
|
||||||
cache.mutex.Unlock()
|
|
||||||
return entry.Value, true
|
|
||||||
}
|
|
||||||
// Because the eviction policy is LRU, we need to move the entry back to HEAD
|
|
||||||
cache.moveExistingEntryToHead(entry)
|
|
||||||
}
|
|
||||||
cache.mutex.Unlock()
|
|
||||||
return entry.Value, true
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetValue retrieves an entry using the key passed as parameter
|
|
||||||
// Unlike Get, this function only returns the value
|
|
||||||
func (cache *Cache) GetValue(key string) any {
|
|
||||||
value, _ := cache.Get(key)
|
|
||||||
return value
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetByKeys retrieves multiple entries using the keys passed as parameter
|
|
||||||
// All keys are returned in the map, regardless of whether they exist or not, however, entries that do not exist in the
|
|
||||||
// cache will return nil, meaning that there is no way of determining whether a key genuinely has the value nil, or
|
|
||||||
// whether it doesn't exist in the cache using only this function.
|
|
||||||
func (cache *Cache) GetByKeys(keys []string) map[string]any {
|
|
||||||
entries := make(map[string]any)
|
|
||||||
for _, key := range keys {
|
|
||||||
entries[key], _ = cache.Get(key)
|
|
||||||
}
|
|
||||||
return entries
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetAll retrieves all cache entries
|
|
||||||
//
|
|
||||||
// If the eviction policy is LeastRecentlyUsed, note that unlike Get and GetByKeys, this does not update the last access
|
|
||||||
// timestamp. The reason for this is that since all cache entries will be accessed, updating the last access timestamp
|
|
||||||
// would provide very little benefit while harming the ability to accurately determine the next key that will be evicted
|
|
||||||
//
|
|
||||||
// You should probably avoid using this if you have a lot of entries.
|
|
||||||
//
|
|
||||||
// GetKeysByPattern is a good alternative if you want to retrieve entries that you do not have the key for, as it only
|
|
||||||
// retrieves the keys and does not trigger active eviction and has a parameter for setting a limit to the number of keys
|
|
||||||
// you wish to retrieve.
|
|
||||||
func (cache *Cache) GetAll() map[string]any {
|
|
||||||
entries := make(map[string]any)
|
|
||||||
cache.mutex.Lock()
|
|
||||||
for key, entry := range cache.entries {
|
|
||||||
if entry.Expired() {
|
|
||||||
cache.delete(key)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
entries[key] = entry.Value
|
|
||||||
}
|
|
||||||
cache.stats.Hits += uint64(len(entries))
|
|
||||||
cache.mutex.Unlock()
|
|
||||||
return entries
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetKeysByPattern retrieves a slice of keys that match a given pattern
|
|
||||||
// If the limit is set to 0, the entire cache will be searched for matching keys.
|
|
||||||
// If the limit is above 0, the search will stop once the specified number of matching keys have been found.
|
|
||||||
//
|
|
||||||
// e.g.
|
|
||||||
//
|
|
||||||
// cache.GetKeysByPattern("*some*", 0) will return all keys containing "some" in them
|
|
||||||
// cache.GetKeysByPattern("*some*", 5) will return 5 keys (or less) containing "some" in them
|
|
||||||
//
|
|
||||||
// Note that GetKeysByPattern does not trigger active evictions, nor does it count as accessing the entry (if LRU).
|
|
||||||
// The reason for that behavior is that these two (active eviction and access) only applies when you access the value
|
|
||||||
// of the cache entry, and this function only returns the keys.
|
|
||||||
func (cache *Cache) GetKeysByPattern(pattern string, limit int) []string {
|
|
||||||
var matchingKeys []string
|
|
||||||
cache.mutex.Lock()
|
|
||||||
for key, value := range cache.entries {
|
|
||||||
if value.Expired() {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if MatchPattern(pattern, key) {
|
|
||||||
matchingKeys = append(matchingKeys, key)
|
|
||||||
if limit > 0 && len(matchingKeys) >= limit {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
cache.mutex.Unlock()
|
|
||||||
return matchingKeys
|
|
||||||
}
|
|
||||||
|
|
||||||
// Delete removes a key from the cache
|
|
||||||
//
|
|
||||||
// Returns false if the key did not exist.
|
|
||||||
func (cache *Cache) Delete(key string) bool {
|
|
||||||
cache.mutex.Lock()
|
|
||||||
ok := cache.delete(key)
|
|
||||||
cache.mutex.Unlock()
|
|
||||||
return ok
|
|
||||||
}
|
|
||||||
|
|
||||||
// DeleteAll deletes multiple entries based on the keys passed as parameter
|
|
||||||
//
|
|
||||||
// Returns the number of keys deleted
|
|
||||||
func (cache *Cache) DeleteAll(keys []string) int {
|
|
||||||
numberOfKeysDeleted := 0
|
|
||||||
cache.mutex.Lock()
|
|
||||||
for _, key := range keys {
|
|
||||||
if cache.delete(key) {
|
|
||||||
numberOfKeysDeleted++
|
|
||||||
}
|
|
||||||
}
|
|
||||||
cache.mutex.Unlock()
|
|
||||||
return numberOfKeysDeleted
|
|
||||||
}
|
|
||||||
|
|
||||||
// DeleteKeysByPattern deletes all entries matching a given key pattern and returns the number of entries deleted.
|
|
||||||
//
|
|
||||||
// Note that DeleteKeysByPattern does not trigger active evictions, nor does it count as accessing the entry (if LRU).
|
|
||||||
func (cache *Cache) DeleteKeysByPattern(pattern string) int {
|
|
||||||
return cache.DeleteAll(cache.GetKeysByPattern(pattern, 0))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Count returns the total amount of entries in the cache, regardless of whether they're expired or not
|
|
||||||
func (cache *Cache) Count() int {
|
|
||||||
cache.mutex.RLock()
|
|
||||||
count := len(cache.entries)
|
|
||||||
cache.mutex.RUnlock()
|
|
||||||
return count
|
|
||||||
}
|
|
||||||
|
|
||||||
// Clear deletes all entries from the cache
|
|
||||||
func (cache *Cache) Clear() {
|
|
||||||
cache.mutex.Lock()
|
|
||||||
cache.entries = make(map[string]*Entry)
|
|
||||||
cache.memoryUsage = 0
|
|
||||||
cache.head = nil
|
|
||||||
cache.tail = nil
|
|
||||||
cache.mutex.Unlock()
|
|
||||||
}
|
|
||||||
|
|
||||||
// TTL returns the time until the cache entry specified by the key passed as parameter
|
|
||||||
// will be deleted.
|
|
||||||
func (cache *Cache) TTL(key string) (time.Duration, error) {
|
|
||||||
cache.mutex.RLock()
|
|
||||||
entry, ok := cache.get(key)
|
|
||||||
cache.mutex.RUnlock()
|
|
||||||
if !ok {
|
|
||||||
return 0, ErrKeyDoesNotExist
|
|
||||||
}
|
|
||||||
if entry.Expiration == NoExpiration {
|
|
||||||
return 0, ErrKeyHasNoExpiration
|
|
||||||
}
|
|
||||||
timeUntilExpiration := time.Until(time.Unix(0, entry.Expiration))
|
|
||||||
if timeUntilExpiration < 0 {
|
|
||||||
// The key has already expired but hasn't been deleted yet.
|
|
||||||
// From the client's perspective, this means that the cache entry doesn't exist
|
|
||||||
return 0, ErrKeyDoesNotExist
|
|
||||||
}
|
|
||||||
return timeUntilExpiration, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Expire sets a key's expiration time
|
|
||||||
//
|
|
||||||
// A TTL of -1 means that the key will never expire
|
|
||||||
// A TTL of 0 means that the key will expire immediately
|
|
||||||
// If using LRU, note that this does not reset the position of the key
|
|
||||||
//
|
|
||||||
// Returns true if the cache key exists and has had its expiration time altered
|
|
||||||
func (cache *Cache) Expire(key string, ttl time.Duration) bool {
|
|
||||||
entry, ok := cache.get(key)
|
|
||||||
if !ok || entry.Expired() {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
if ttl != NoExpiration {
|
|
||||||
entry.Expiration = time.Now().Add(ttl).UnixNano()
|
|
||||||
} else {
|
|
||||||
entry.Expiration = NoExpiration
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
// get retrieves an entry using the key passed as parameter, but unlike Get, it doesn't update the access time or
|
|
||||||
// move the position of the entry to the head
|
|
||||||
func (cache *Cache) get(key string) (*Entry, bool) {
|
|
||||||
entry, ok := cache.entries[key]
|
|
||||||
return entry, ok
|
|
||||||
}
|
|
||||||
|
|
||||||
func (cache *Cache) delete(key string) bool {
|
|
||||||
entry, ok := cache.entries[key]
|
|
||||||
if ok {
|
|
||||||
if cache.maxMemoryUsage != NoMaxMemoryUsage {
|
|
||||||
cache.memoryUsage -= entry.SizeInBytes()
|
|
||||||
}
|
|
||||||
cache.removeExistingEntryReferences(entry)
|
|
||||||
delete(cache.entries, key)
|
|
||||||
}
|
|
||||||
return ok
|
|
||||||
}
|
|
||||||
|
|
||||||
// moveExistingEntryToHead replaces the current cache head for an existing entry
|
|
||||||
func (cache *Cache) moveExistingEntryToHead(entry *Entry) {
|
|
||||||
if !(entry == cache.head && entry == cache.tail) {
|
|
||||||
cache.removeExistingEntryReferences(entry)
|
|
||||||
}
|
|
||||||
if entry != cache.head {
|
|
||||||
entry.next = cache.head
|
|
||||||
entry.previous = nil
|
|
||||||
if cache.head != nil {
|
|
||||||
cache.head.previous = entry
|
|
||||||
}
|
|
||||||
cache.head = entry
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// removeExistingEntryReferences modifies the next and previous reference of an existing entry and re-links
|
|
||||||
// the next and previous entry accordingly, as well as the cache head or/and the cache tail if necessary.
|
|
||||||
// Note that it does not remove the entry from the cache, only the references.
|
|
||||||
func (cache *Cache) removeExistingEntryReferences(entry *Entry) {
|
|
||||||
if cache.tail == entry && cache.head == entry {
|
|
||||||
cache.tail = nil
|
|
||||||
cache.head = nil
|
|
||||||
} else if cache.tail == entry {
|
|
||||||
cache.tail = cache.tail.previous
|
|
||||||
} else if cache.head == entry {
|
|
||||||
cache.head = cache.head.next
|
|
||||||
}
|
|
||||||
if entry.previous != nil {
|
|
||||||
entry.previous.next = entry.next
|
|
||||||
}
|
|
||||||
if entry.next != nil {
|
|
||||||
entry.next.previous = entry.previous
|
|
||||||
}
|
|
||||||
entry.next = nil
|
|
||||||
entry.previous = nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// evict removes the tail from the cache
|
|
||||||
func (cache *Cache) evict() {
|
|
||||||
if cache.tail == nil || len(cache.entries) == 0 {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if cache.tail != nil {
|
|
||||||
oldTail := cache.tail
|
|
||||||
cache.removeExistingEntryReferences(oldTail)
|
|
||||||
delete(cache.entries, oldTail.Key)
|
|
||||||
if cache.maxMemoryUsage != NoMaxMemoryUsage {
|
|
||||||
cache.memoryUsage -= oldTail.SizeInBytes()
|
|
||||||
}
|
|
||||||
cache.stats.EvictedKeys++
|
|
||||||
}
|
|
||||||
}
|
|
146
vendor/github.com/TwiN/gocache/v2/janitor.go
generated
vendored
146
vendor/github.com/TwiN/gocache/v2/janitor.go
generated
vendored
@ -1,146 +0,0 @@
|
|||||||
package gocache
|
|
||||||
|
|
||||||
import (
|
|
||||||
"log"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
// JanitorShiftTarget is the target number of expired keys to find during passive clean up duty
|
|
||||||
// before pausing the passive expired keys eviction process
|
|
||||||
JanitorShiftTarget = 25
|
|
||||||
|
|
||||||
// JanitorMaxIterationsPerShift is the maximum number of nodes to traverse before pausing
|
|
||||||
//
|
|
||||||
// This is to prevent the janitor from traversing the entire cache, which could take a long time
|
|
||||||
// to complete depending on the size of the cache.
|
|
||||||
//
|
|
||||||
// By limiting it to a small number, we are effectively reducing the impact of passive eviction.
|
|
||||||
JanitorMaxIterationsPerShift = 1000
|
|
||||||
|
|
||||||
// JanitorMinShiftBackOff is the minimum interval between each iteration of steps
|
|
||||||
// defined by JanitorMaxIterationsPerShift
|
|
||||||
JanitorMinShiftBackOff = 50 * time.Millisecond
|
|
||||||
|
|
||||||
// JanitorMaxShiftBackOff is the maximum interval between each iteration of steps
|
|
||||||
// defined by JanitorMaxIterationsPerShift
|
|
||||||
JanitorMaxShiftBackOff = 500 * time.Millisecond
|
|
||||||
)
|
|
||||||
|
|
||||||
// StartJanitor starts the janitor on a different goroutine
|
|
||||||
// The janitor's job is to delete expired keys in the background, in other words, it takes care of passive eviction.
|
|
||||||
// It can be stopped by calling Cache.StopJanitor.
|
|
||||||
// If you do not start the janitor, expired keys will only be deleted when they are accessed through Get, GetByKeys, or
|
|
||||||
// GetAll.
|
|
||||||
func (cache *Cache) StartJanitor() error {
|
|
||||||
if cache.stopJanitor != nil {
|
|
||||||
return ErrJanitorAlreadyRunning
|
|
||||||
}
|
|
||||||
cache.stopJanitor = make(chan bool)
|
|
||||||
go func() {
|
|
||||||
// rather than starting from the tail on every run, we can try to start from the last traversed entry
|
|
||||||
var lastTraversedNode *Entry
|
|
||||||
totalNumberOfExpiredKeysInPreviousRunFromTailToHead := 0
|
|
||||||
backOff := JanitorMinShiftBackOff
|
|
||||||
for {
|
|
||||||
select {
|
|
||||||
case <-time.After(backOff):
|
|
||||||
// Passive clean up duty
|
|
||||||
cache.mutex.Lock()
|
|
||||||
if cache.tail != nil {
|
|
||||||
start := time.Now()
|
|
||||||
steps := 0
|
|
||||||
expiredEntriesFound := 0
|
|
||||||
current := cache.tail
|
|
||||||
if lastTraversedNode != nil {
|
|
||||||
// Make sure the lastTraversedNode is still in the cache, otherwise we might be traversing nodes that were already deleted.
|
|
||||||
// Furthermore, we need to make sure that the entry from the cache has the same pointer as the lastTraversedNode
|
|
||||||
// to verify that there isn't just a new cache entry with the same key (i.e. in case lastTraversedNode got evicted)
|
|
||||||
if entryFromCache, isInCache := cache.get(lastTraversedNode.Key); isInCache && entryFromCache == lastTraversedNode {
|
|
||||||
current = lastTraversedNode
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if current == cache.tail {
|
|
||||||
if Debug {
|
|
||||||
log.Printf("There are currently %d entries in the cache. The last walk resulted in finding %d expired keys", len(cache.entries), totalNumberOfExpiredKeysInPreviousRunFromTailToHead)
|
|
||||||
}
|
|
||||||
totalNumberOfExpiredKeysInPreviousRunFromTailToHead = 0
|
|
||||||
}
|
|
||||||
for current != nil {
|
|
||||||
// since we're walking from the tail to the head, we get the previous reference
|
|
||||||
var previous *Entry
|
|
||||||
steps++
|
|
||||||
if current.Expired() {
|
|
||||||
expiredEntriesFound++
|
|
||||||
// Because delete will remove the previous reference from the entry, we need to store the
|
|
||||||
// previous reference before we delete it
|
|
||||||
previous = current.previous
|
|
||||||
cache.delete(current.Key)
|
|
||||||
cache.stats.ExpiredKeys++
|
|
||||||
}
|
|
||||||
if current == cache.head {
|
|
||||||
lastTraversedNode = nil
|
|
||||||
break
|
|
||||||
}
|
|
||||||
// Travel to the current node's previous node only if no specific previous node has been specified
|
|
||||||
if previous != nil {
|
|
||||||
current = previous
|
|
||||||
} else {
|
|
||||||
current = current.previous
|
|
||||||
}
|
|
||||||
lastTraversedNode = current
|
|
||||||
if steps == JanitorMaxIterationsPerShift || expiredEntriesFound >= JanitorShiftTarget {
|
|
||||||
if expiredEntriesFound > 0 {
|
|
||||||
backOff = JanitorMinShiftBackOff
|
|
||||||
} else {
|
|
||||||
if backOff*2 <= JanitorMaxShiftBackOff {
|
|
||||||
backOff *= 2
|
|
||||||
} else {
|
|
||||||
backOff = JanitorMaxShiftBackOff
|
|
||||||
}
|
|
||||||
}
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if Debug {
|
|
||||||
log.Printf("traversed %d nodes and found %d expired entries in %s before stopping\n", steps, expiredEntriesFound, time.Since(start))
|
|
||||||
}
|
|
||||||
totalNumberOfExpiredKeysInPreviousRunFromTailToHead += expiredEntriesFound
|
|
||||||
} else {
|
|
||||||
if backOff*2 < JanitorMaxShiftBackOff {
|
|
||||||
backOff *= 2
|
|
||||||
} else {
|
|
||||||
backOff = JanitorMaxShiftBackOff
|
|
||||||
}
|
|
||||||
}
|
|
||||||
cache.mutex.Unlock()
|
|
||||||
case <-cache.stopJanitor:
|
|
||||||
cache.stopJanitor <- true
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
//if Debug {
|
|
||||||
// go func() {
|
|
||||||
// var m runtime.MemStats
|
|
||||||
// for {
|
|
||||||
// runtime.ReadMemStats(&m)
|
|
||||||
// log.Printf("Alloc=%vMB; HeapReleased=%vMB; Sys=%vMB; HeapInUse=%vMB; HeapObjects=%v; HeapObjectsFreed=%v; GC=%v; cache.memoryUsage=%vMB; cacheSize=%d\n", m.Alloc/1024/1024, m.HeapReleased/1024/1024, m.Sys/1024/1024, m.HeapInuse/1024/1024, m.HeapObjects, m.Frees, m.NumGC, cache.memoryUsage/1024/1024, cache.Count())
|
|
||||||
// time.Sleep(3 * time.Second)
|
|
||||||
// }
|
|
||||||
// }()
|
|
||||||
//}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// StopJanitor stops the janitor
|
|
||||||
func (cache *Cache) StopJanitor() {
|
|
||||||
if cache.stopJanitor != nil {
|
|
||||||
// Tell the janitor to stop, and then wait for the janitor to reply on the same channel that it's stopping
|
|
||||||
// This may seem a bit odd, but this allows us to avoid a data race condition when trying to set
|
|
||||||
// cache.stopJanitor to nil
|
|
||||||
cache.stopJanitor <- true
|
|
||||||
<-cache.stopJanitor
|
|
||||||
cache.stopJanitor = nil
|
|
||||||
}
|
|
||||||
}
|
|
12
vendor/github.com/TwiN/gocache/v2/pattern.go
generated
vendored
12
vendor/github.com/TwiN/gocache/v2/pattern.go
generated
vendored
@ -1,12 +0,0 @@
|
|||||||
package gocache
|
|
||||||
|
|
||||||
import "path/filepath"
|
|
||||||
|
|
||||||
// MatchPattern checks whether a string matches a pattern
|
|
||||||
func MatchPattern(pattern, s string) bool {
|
|
||||||
if pattern == "*" {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
matched, _ := filepath.Match(pattern, s)
|
|
||||||
return matched
|
|
||||||
}
|
|
33
vendor/github.com/TwiN/gocache/v2/policy.go
generated
vendored
33
vendor/github.com/TwiN/gocache/v2/policy.go
generated
vendored
@ -1,33 +0,0 @@
|
|||||||
package gocache
|
|
||||||
|
|
||||||
// EvictionPolicy is what dictates how evictions are handled
|
|
||||||
type EvictionPolicy string
|
|
||||||
|
|
||||||
const (
|
|
||||||
// LeastRecentlyUsed is an eviction policy that causes the most recently accessed cache entry to be moved to the
|
|
||||||
// head of the cache. Effectively, this causes the cache entries that have not been accessed for some time to
|
|
||||||
// gradually move closer and closer to the tail, and since the tail is the entry that gets deleted when an eviction
|
|
||||||
// is required, it allows less used cache entries to be evicted while keeping recently accessed entries at or close
|
|
||||||
// to the head.
|
|
||||||
//
|
|
||||||
// For instance, creating a Cache with a Cache.MaxSize of 3 and creating the entries 1, 2 and 3 in that order would
|
|
||||||
// put 3 at the head and 1 at the tail:
|
|
||||||
// 3 (head) -> 2 -> 1 (tail)
|
|
||||||
// If the cache entry 1 was then accessed, 1 would become the head and 2 the tail:
|
|
||||||
// 1 (head) -> 3 -> 2 (tail)
|
|
||||||
// If a cache entry 4 was then created, because the Cache.MaxSize is 3, the tail (2) would then be evicted:
|
|
||||||
// 4 (head) -> 1 -> 3 (tail)
|
|
||||||
LeastRecentlyUsed EvictionPolicy = "LeastRecentlyUsed"
|
|
||||||
|
|
||||||
// FirstInFirstOut is an eviction policy that causes cache entries to be evicted in the same order that they are
|
|
||||||
// created.
|
|
||||||
//
|
|
||||||
// For instance, creating a Cache with a Cache.MaxSize of 3 and creating the entries 1, 2 and 3 in that order would
|
|
||||||
// put 3 at the head and 1 at the tail:
|
|
||||||
// 3 (head) -> 2 -> 1 (tail)
|
|
||||||
// If the cache entry 1 was then accessed, unlike with LeastRecentlyUsed, nothing would change:
|
|
||||||
// 3 (head) -> 2 -> 1 (tail)
|
|
||||||
// If a cache entry 4 was then created, because the Cache.MaxSize is 3, the tail (1) would then be evicted:
|
|
||||||
// 4 (head) -> 3 -> 2 (tail)
|
|
||||||
FirstInFirstOut EvictionPolicy = "FirstInFirstOut"
|
|
||||||
)
|
|
15
vendor/github.com/TwiN/gocache/v2/statistics.go
generated
vendored
15
vendor/github.com/TwiN/gocache/v2/statistics.go
generated
vendored
@ -1,15 +0,0 @@
|
|||||||
package gocache
|
|
||||||
|
|
||||||
type Statistics struct {
|
|
||||||
// EvictedKeys is the number of keys that were evicted
|
|
||||||
EvictedKeys uint64
|
|
||||||
|
|
||||||
// ExpiredKeys is the number of keys that were automatically deleted as a result of expiring
|
|
||||||
ExpiredKeys uint64
|
|
||||||
|
|
||||||
// Hits is the number of cache hits
|
|
||||||
Hits uint64
|
|
||||||
|
|
||||||
// Misses is the number of cache misses
|
|
||||||
Misses uint64
|
|
||||||
}
|
|
1
vendor/github.com/TwiN/health/.gitattributes
generated
vendored
1
vendor/github.com/TwiN/health/.gitattributes
generated
vendored
@ -1 +0,0 @@
|
|||||||
* text=auto eol=lf
|
|
2
vendor/github.com/TwiN/health/.gitignore
generated
vendored
2
vendor/github.com/TwiN/health/.gitignore
generated
vendored
@ -1,2 +0,0 @@
|
|||||||
.idea
|
|
||||||
*.iml
|
|
21
vendor/github.com/TwiN/health/LICENSE
generated
vendored
21
vendor/github.com/TwiN/health/LICENSE
generated
vendored
@ -1,21 +0,0 @@
|
|||||||
MIT License
|
|
||||||
|
|
||||||
Copyright (c) 2022 TwiN
|
|
||||||
|
|
||||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
||||||
of this software and associated documentation files (the "Software"), to deal
|
|
||||||
in the Software without restriction, including without limitation the rights
|
|
||||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
||||||
copies of the Software, and to permit persons to whom the Software is
|
|
||||||
furnished to do so, subject to the following conditions:
|
|
||||||
|
|
||||||
The above copyright notice and this permission notice shall be included in all
|
|
||||||
copies or substantial portions of the Software.
|
|
||||||
|
|
||||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
||||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
||||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
||||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
||||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
||||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
||||||
SOFTWARE.
|
|
5
vendor/github.com/TwiN/health/Makefile
generated
vendored
5
vendor/github.com/TwiN/health/Makefile
generated
vendored
@ -1,5 +0,0 @@
|
|||||||
bench:
|
|
||||||
go test -bench . -race
|
|
||||||
|
|
||||||
test:
|
|
||||||
go test . -race
|
|
92
vendor/github.com/TwiN/health/README.md
generated
vendored
92
vendor/github.com/TwiN/health/README.md
generated
vendored
@ -1,92 +0,0 @@
|
|||||||
# health
|
|
||||||
![test](https://github.com/TwiN/health/workflows/test/badge.svg?branch=master)
|
|
||||||
[![Go Report Card](https://goreportcard.com/badge/github.com/TwiN/health)](https://goreportcard.com/report/github.com/TwiN/health)
|
|
||||||
[![codecov](https://codecov.io/gh/TwiN/health/branch/master/graph/badge.svg)](https://codecov.io/gh/TwiN/health)
|
|
||||||
[![Go version](https://img.shields.io/github/go-mod/go-version/TwiN/health.svg)](https://github.com/TwiN/health)
|
|
||||||
[![Go Reference](https://pkg.go.dev/badge/github.com/TwiN/health.svg)](https://pkg.go.dev/github.com/TwiN/health)
|
|
||||||
|
|
||||||
Health is a library used for creating a very simple health endpoint.
|
|
||||||
|
|
||||||
While implementing a health endpoint is very simple, I've grown tired of implementing
|
|
||||||
it over and over again.
|
|
||||||
|
|
||||||
|
|
||||||
## Installation
|
|
||||||
```console
|
|
||||||
go get -u github.com/TwiN/health
|
|
||||||
```
|
|
||||||
|
|
||||||
|
|
||||||
## Usage
|
|
||||||
To retrieve the handler, you must use `health.Handler()` and are expected to pass it to the router like so:
|
|
||||||
```go
|
|
||||||
router := http.NewServeMux()
|
|
||||||
router.Handle("/health", health.Handler())
|
|
||||||
server := &http.Server{
|
|
||||||
Addr: ":8080",
|
|
||||||
Handler: router,
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
By default, the handler will return `UP` when the status is up, and `DOWN` when the status is down.
|
|
||||||
If you prefer using JSON, however, you may initialize the health handler like so:
|
|
||||||
```go
|
|
||||||
router.Handle("/health", health.Handler().WithJSON(true))
|
|
||||||
```
|
|
||||||
The above will cause the response body to become `{"status":"UP"}` and `{"status":"DOWN"}` for both status respectively,
|
|
||||||
unless there is a reason, in which case a reason set to `because` would return `{"status":"UP", "reason":"because"}`
|
|
||||||
and `{"status":"DOWN", "reason":"because"}` respectively.
|
|
||||||
|
|
||||||
To set the health status to `DOWN` with a reason:
|
|
||||||
```go
|
|
||||||
health.SetUnhealthy("<enter reason here>")
|
|
||||||
```
|
|
||||||
The string passed will be automatically set as the reason.
|
|
||||||
|
|
||||||
In a similar fashion, to set the health status to `UP` and clear the reason:
|
|
||||||
```go
|
|
||||||
health.SetHealthy()
|
|
||||||
```
|
|
||||||
|
|
||||||
|
|
||||||
Alternatively, to set the status and the reason individually you can use `health.SetStatus(<status>)` where `<status>` is `health.Up`
|
|
||||||
or `health.Down`:
|
|
||||||
```go
|
|
||||||
health.SetStatus(health.Up)
|
|
||||||
health.SetStatus(health.Down)
|
|
||||||
```
|
|
||||||
As for the reason:
|
|
||||||
```go
|
|
||||||
health.SetReason("database is unreachable")
|
|
||||||
```
|
|
||||||
|
|
||||||
Generally speaking, you'd only want to include a reason if the status is `Down`, but you can do as you desire.
|
|
||||||
|
|
||||||
For the sake of convenience, you can also use `health.SetStatusAndReason(<status>, <reason>)` instead of doing
|
|
||||||
`health.SetStatus(<status>)` and `health.SetReason(<reason>)` separately.
|
|
||||||
|
|
||||||
|
|
||||||
### Complete example
|
|
||||||
```go
|
|
||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"net/http"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/TwiN/health"
|
|
||||||
)
|
|
||||||
|
|
||||||
func main() {
|
|
||||||
router := http.NewServeMux()
|
|
||||||
router.Handle("/health", health.Handler())
|
|
||||||
server := &http.Server{
|
|
||||||
Addr: "0.0.0.0:8080",
|
|
||||||
Handler: router,
|
|
||||||
ReadTimeout: 15 * time.Second,
|
|
||||||
WriteTimeout: 15 * time.Second,
|
|
||||||
IdleTimeout: 15 * time.Second,
|
|
||||||
}
|
|
||||||
server.ListenAndServe()
|
|
||||||
}
|
|
||||||
```
|
|
142
vendor/github.com/TwiN/health/health.go
generated
vendored
142
vendor/github.com/TwiN/health/health.go
generated
vendored
@ -1,142 +0,0 @@
|
|||||||
package health
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding/json"
|
|
||||||
"net/http"
|
|
||||||
"sync"
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
handler = &healthHandler{
|
|
||||||
useJSON: false,
|
|
||||||
status: Up,
|
|
||||||
}
|
|
||||||
)
|
|
||||||
|
|
||||||
// responseBody is the body of the response returned by the health handler.
|
|
||||||
type responseBody struct {
|
|
||||||
Status string `json:"status"`
|
|
||||||
Reason string `json:"reason,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// healthHandler is the HTTP handler for serving the health endpoint
|
|
||||||
type healthHandler struct {
|
|
||||||
useJSON bool
|
|
||||||
|
|
||||||
status Status
|
|
||||||
reason string
|
|
||||||
|
|
||||||
mutex sync.RWMutex
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithJSON configures whether the handler should output a response in JSON or in raw text
|
|
||||||
//
|
|
||||||
// Defaults to false
|
|
||||||
func (h *healthHandler) WithJSON(v bool) *healthHandler {
|
|
||||||
h.useJSON = v
|
|
||||||
return h
|
|
||||||
}
|
|
||||||
|
|
||||||
// ServeHTTP serves the HTTP request for the health handler
|
|
||||||
func (h *healthHandler) ServeHTTP(writer http.ResponseWriter, _ *http.Request) {
|
|
||||||
statusCode, body, useJSON := h.getResponseStatusCodeAndBodyAndWhetherBodyUsesJSON()
|
|
||||||
if useJSON {
|
|
||||||
writer.Header().Set("Content-Type", "application/json")
|
|
||||||
}
|
|
||||||
writer.WriteHeader(statusCode)
|
|
||||||
_, _ = writer.Write(body)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (h *healthHandler) GetResponseStatusCodeAndBody() (statusCode int, body []byte) {
|
|
||||||
statusCode, body, _ = h.getResponseStatusCodeAndBodyAndWhetherBodyUsesJSON()
|
|
||||||
return statusCode, body
|
|
||||||
}
|
|
||||||
|
|
||||||
func (h *healthHandler) getResponseStatusCodeAndBodyAndWhetherBodyUsesJSON() (statusCode int, body []byte, useJSON bool) {
|
|
||||||
var status Status
|
|
||||||
var reason string
|
|
||||||
h.mutex.RLock()
|
|
||||||
status, reason, useJSON = h.status, h.reason, h.useJSON
|
|
||||||
h.mutex.RUnlock()
|
|
||||||
if status == Up {
|
|
||||||
statusCode = http.StatusOK
|
|
||||||
} else {
|
|
||||||
statusCode = http.StatusInternalServerError
|
|
||||||
}
|
|
||||||
if useJSON {
|
|
||||||
// We can safely ignore the error here because we know that both values are strings, therefore are supported encoders.
|
|
||||||
body, _ = json.Marshal(responseBody{Status: string(status), Reason: reason})
|
|
||||||
} else {
|
|
||||||
if len(reason) == 0 {
|
|
||||||
body = []byte(status)
|
|
||||||
} else {
|
|
||||||
body = []byte(string(status) + ": " + reason)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Handler retrieves the health handler
|
|
||||||
func Handler() *healthHandler {
|
|
||||||
return handler
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetStatus retrieves the current status returned by the health handler
|
|
||||||
func GetStatus() Status {
|
|
||||||
handler.mutex.RLock()
|
|
||||||
defer handler.mutex.RUnlock()
|
|
||||||
return handler.status
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetStatus sets the status to be returned by the health handler
|
|
||||||
func SetStatus(status Status) {
|
|
||||||
handler.mutex.Lock()
|
|
||||||
handler.status = status
|
|
||||||
handler.mutex.Unlock()
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetReason retrieves the current status returned by the health handler
|
|
||||||
func GetReason() string {
|
|
||||||
handler.mutex.RLock()
|
|
||||||
defer handler.mutex.RUnlock()
|
|
||||||
return handler.reason
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetReason sets a reason for the current status to be returned by the health handler
|
|
||||||
func SetReason(reason string) {
|
|
||||||
handler.mutex.Lock()
|
|
||||||
handler.reason = reason
|
|
||||||
handler.mutex.Unlock()
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetStatusAndReason sets the status and reason to be returned by the health handler
|
|
||||||
func SetStatusAndReason(status Status, reason string) {
|
|
||||||
handler.mutex.Lock()
|
|
||||||
handler.status = status
|
|
||||||
handler.reason = reason
|
|
||||||
handler.mutex.Unlock()
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetStatusAndResetReason sets the status and resets the reason to a blank string
|
|
||||||
func SetStatusAndResetReason(status Status) {
|
|
||||||
handler.mutex.Lock()
|
|
||||||
handler.status = status
|
|
||||||
handler.reason = ""
|
|
||||||
handler.mutex.Unlock()
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetHealthy sets the status to Up and the reason to a blank string
|
|
||||||
func SetHealthy() {
|
|
||||||
SetStatusAndResetReason(Up)
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetUnhealthy sets the status to Down and the reason to the string passed as parameter
|
|
||||||
//
|
|
||||||
// Unlike SetHealthy, this function enforces setting a reason, because it's good practice to give at least a bit
|
|
||||||
// of information as to why an application is unhealthy, and this library attempts to promote good practices.
|
|
||||||
func SetUnhealthy(reason string) {
|
|
||||||
handler.mutex.Lock()
|
|
||||||
handler.status = Down
|
|
||||||
handler.reason = reason
|
|
||||||
handler.mutex.Unlock()
|
|
||||||
}
|
|
8
vendor/github.com/TwiN/health/status.go
generated
vendored
8
vendor/github.com/TwiN/health/status.go
generated
vendored
@ -1,8 +0,0 @@
|
|||||||
package health
|
|
||||||
|
|
||||||
type Status string
|
|
||||||
|
|
||||||
var (
|
|
||||||
Down Status = "DOWN" // For when the application is unhealthy
|
|
||||||
Up Status = "UP" // For when the application is healthy
|
|
||||||
)
|
|
2
vendor/github.com/TwiN/whois/.gitignore
generated
vendored
2
vendor/github.com/TwiN/whois/.gitignore
generated
vendored
@ -1,2 +0,0 @@
|
|||||||
.idea
|
|
||||||
bin
|
|
21
vendor/github.com/TwiN/whois/LICENSE
generated
vendored
21
vendor/github.com/TwiN/whois/LICENSE
generated
vendored
@ -1,21 +0,0 @@
|
|||||||
MIT License
|
|
||||||
|
|
||||||
Copyright (c) 2022 TwiN
|
|
||||||
|
|
||||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
||||||
of this software and associated documentation files (the "Software"), to deal
|
|
||||||
in the Software without restriction, including without limitation the rights
|
|
||||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
||||||
copies of the Software, and to permit persons to whom the Software is
|
|
||||||
furnished to do so, subject to the following conditions:
|
|
||||||
|
|
||||||
The above copyright notice and this permission notice shall be included in all
|
|
||||||
copies or substantial portions of the Software.
|
|
||||||
|
|
||||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
||||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
||||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
||||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
||||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
||||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
||||||
SOFTWARE.
|
|
4
vendor/github.com/TwiN/whois/Makefile
generated
vendored
4
vendor/github.com/TwiN/whois/Makefile
generated
vendored
@ -1,4 +0,0 @@
|
|||||||
.PHONY: build-binaries
|
|
||||||
|
|
||||||
build-binaries:
|
|
||||||
./scripts/build.sh
|
|
81
vendor/github.com/TwiN/whois/README.md
generated
vendored
81
vendor/github.com/TwiN/whois/README.md
generated
vendored
@ -1,81 +0,0 @@
|
|||||||
# whois
|
|
||||||
![test](https://github.com/TwiN/whois/workflows/test/badge.svg?branch=master)
|
|
||||||
|
|
||||||
Lightweight library for retrieving WHOIS information on a domain.
|
|
||||||
|
|
||||||
It automatically retrieves the appropriate WHOIS server based on the domain's TLD by first querying IANA.
|
|
||||||
|
|
||||||
|
|
||||||
## Usage
|
|
||||||
### As an executable
|
|
||||||
To install it:
|
|
||||||
```console
|
|
||||||
go install github.com/TwiN/whois/cmd/whois@latest
|
|
||||||
```
|
|
||||||
To run it:
|
|
||||||
```console
|
|
||||||
whois example.com
|
|
||||||
```
|
|
||||||
|
|
||||||
### As a library
|
|
||||||
```console
|
|
||||||
go get github.com/TwiN/whois
|
|
||||||
```
|
|
||||||
|
|
||||||
#### Query
|
|
||||||
If all you want is the text a WHOIS server would return you, you can use the `Query` method of the `whois.Client` type:
|
|
||||||
```go
|
|
||||||
package main
|
|
||||||
|
|
||||||
import "github.com/TwiN/whois"
|
|
||||||
|
|
||||||
func main() {
|
|
||||||
client := whois.NewClient()
|
|
||||||
output, err := client.Query("example.com")
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
println(output)
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
#### QueryAndParse
|
|
||||||
If you want specific pieces of information, you can use the `QueryAndParse` method of the `whois.Client` type:
|
|
||||||
```go
|
|
||||||
package main
|
|
||||||
|
|
||||||
import "github.com/TwiN/whois"
|
|
||||||
|
|
||||||
func main() {
|
|
||||||
client := whois.NewClient()
|
|
||||||
response, err := client.QueryAndParse("example.com")
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
println(response.ExpirationDate.String())
|
|
||||||
}
|
|
||||||
```
|
|
||||||
Note that because there is no standardized format for WHOIS responses, this parsing may not be successful for every single TLD.
|
|
||||||
|
|
||||||
Currently, the only fields parsed are:
|
|
||||||
- `ExpirationDate`: The time.Time at which the domain will expire
|
|
||||||
- `DomainStatuses`: The statuses that the domain currently has (e.g. `clientTransferProhibited`)
|
|
||||||
- `NameServers`: The nameservers currently tied to the domain
|
|
||||||
|
|
||||||
If you'd like one or more other fields to be parsed, please don't be shy and create an issue or a pull request.
|
|
||||||
|
|
||||||
#### Caching referral WHOIS servers
|
|
||||||
The way that WHOIS scales is by having one "main" WHOIS server, namely `whois.iana.org:43`, refer to other WHOIS server
|
|
||||||
on a per-TLD basis.
|
|
||||||
|
|
||||||
In other word, let's say that you wanted to have the WHOIS information for `example.com`.
|
|
||||||
The first step would be to query `whois.iana.org:43` with `com`, which would return `whois.verisign-grs.com`.
|
|
||||||
Then, you would query `whois.verisign-grs.com:43` for the WHOIS information on `example.com`.
|
|
||||||
|
|
||||||
If you're querying a lot of servers, making two queries instead of one can be a little wasteful, hence `WithReferralCache(true)`:
|
|
||||||
```go
|
|
||||||
client := whois.NewClient().WithReferralCache(true)
|
|
||||||
```
|
|
||||||
The above will cache the referral WHOIS server for each TLD, so that you can directly query the appropriate WHOIS server
|
|
||||||
instead of first querying `whois.iana.org:43` for the referral.
|
|
||||||
|
|
130
vendor/github.com/TwiN/whois/whois.go
generated
vendored
130
vendor/github.com/TwiN/whois/whois.go
generated
vendored
@ -1,130 +0,0 @@
|
|||||||
package whois
|
|
||||||
|
|
||||||
import (
|
|
||||||
"io"
|
|
||||||
"net"
|
|
||||||
"strings"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
ianaWHOISServerAddress = "whois.iana.org:43"
|
|
||||||
)
|
|
||||||
|
|
||||||
type Client struct {
|
|
||||||
whoisServerAddress string
|
|
||||||
|
|
||||||
isCachingReferralWHOISServers bool
|
|
||||||
referralWHOISServersCache map[string]string
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewClient() *Client {
|
|
||||||
return &Client{
|
|
||||||
whoisServerAddress: ianaWHOISServerAddress,
|
|
||||||
referralWHOISServersCache: make(map[string]string),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithReferralCache allows you to enable or disable the referral WHOIS server cache.
|
|
||||||
// While ianaWHOISServerAddress is the "entry point" for WHOIS queries, it sometimes has
|
|
||||||
// availability issues. One way to mitigate this is to cache the referral WHOIS server.
|
|
||||||
//
|
|
||||||
// This is disabled by default
|
|
||||||
func (c *Client) WithReferralCache(enabled bool) *Client {
|
|
||||||
c.isCachingReferralWHOISServers = enabled
|
|
||||||
if enabled {
|
|
||||||
// We'll set a couple of common ones right away to avoid unnecessary queries
|
|
||||||
c.referralWHOISServersCache = map[string]string{
|
|
||||||
"com": "whois.verisign-grs.com",
|
|
||||||
"black": "whois.nic.black",
|
|
||||||
"dev": "whois.nic.google",
|
|
||||||
"green": "whois.nic.green",
|
|
||||||
"io": "whois.nic.io",
|
|
||||||
"net": "whois.verisign-grs.com",
|
|
||||||
"org": "whois.publicinterestregistry.org",
|
|
||||||
"red": "whois.nic.red",
|
|
||||||
"sh": "whois.nic.sh",
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return c
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *Client) Query(domain string) (string, error) {
|
|
||||||
parts := strings.Split(domain, ".")
|
|
||||||
if c.isCachingReferralWHOISServers {
|
|
||||||
if cachedWHOISServer, ok := c.referralWHOISServersCache[domain]; ok {
|
|
||||||
return c.query(cachedWHOISServer, domain)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
output, err := c.query(c.whoisServerAddress, parts[len(parts)-1])
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
if strings.Contains(output, "whois:") {
|
|
||||||
startIndex := strings.Index(output, "whois:") + 6
|
|
||||||
endIndex := strings.Index(output[startIndex:], "\n") + startIndex
|
|
||||||
whois := strings.TrimSpace(output[startIndex:endIndex])
|
|
||||||
if referOutput, err := c.query(whois+":43", domain); err == nil {
|
|
||||||
if c.isCachingReferralWHOISServers {
|
|
||||||
c.referralWHOISServersCache[domain] = whois + ":43"
|
|
||||||
}
|
|
||||||
return referOutput, nil
|
|
||||||
}
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
return output, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c Client) query(whoisServerAddress, domain string) (string, error) {
|
|
||||||
connection, err := net.DialTimeout("tcp", whoisServerAddress, 10*time.Second)
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
defer connection.Close()
|
|
||||||
_ = connection.SetDeadline(time.Now().Add(5 * time.Second))
|
|
||||||
_, err = connection.Write([]byte(domain + "\r\n"))
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
output, err := io.ReadAll(connection)
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
return string(output), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
type Response struct {
|
|
||||||
ExpirationDate time.Time
|
|
||||||
DomainStatuses []string
|
|
||||||
NameServers []string
|
|
||||||
}
|
|
||||||
|
|
||||||
// QueryAndParse tries to parse the response from the WHOIS server
|
|
||||||
// There is no standardized format for WHOIS responses, so this is an attempt at best.
|
|
||||||
//
|
|
||||||
// Being the selfish person that I am, I also only parse the fields that I need.
|
|
||||||
// If you need more fields, please open an issue or pull request.
|
|
||||||
func (c Client) QueryAndParse(domain string) (*Response, error) {
|
|
||||||
text, err := c.Query(domain)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
response := Response{}
|
|
||||||
for _, line := range strings.Split(text, "\n") {
|
|
||||||
line = strings.TrimSpace(line)
|
|
||||||
valueStartIndex := strings.Index(line, ":")
|
|
||||||
if valueStartIndex == -1 {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
key := strings.ToLower(strings.TrimSpace(line[:valueStartIndex]))
|
|
||||||
value := strings.TrimSpace(line[valueStartIndex+1:])
|
|
||||||
if response.ExpirationDate.Unix() != 0 && strings.Contains(key, "expir") && strings.Contains(key, "date") {
|
|
||||||
response.ExpirationDate, _ = time.Parse(time.RFC3339, strings.ToUpper(value))
|
|
||||||
} else if strings.Contains(key, "domain status") {
|
|
||||||
response.DomainStatuses = append(response.DomainStatuses, value)
|
|
||||||
} else if strings.Contains(key, "name server") {
|
|
||||||
response.NameServers = append(response.NameServers, value)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return &response, nil
|
|
||||||
}
|
|
20
vendor/github.com/beorn7/perks/LICENSE
generated
vendored
20
vendor/github.com/beorn7/perks/LICENSE
generated
vendored
@ -1,20 +0,0 @@
|
|||||||
Copyright (C) 2013 Blake Mizerany
|
|
||||||
|
|
||||||
Permission is hereby granted, free of charge, to any person obtaining
|
|
||||||
a copy of this software and associated documentation files (the
|
|
||||||
"Software"), to deal in the Software without restriction, including
|
|
||||||
without limitation the rights to use, copy, modify, merge, publish,
|
|
||||||
distribute, sublicense, and/or sell copies of the Software, and to
|
|
||||||
permit persons to whom the Software is furnished to do so, subject to
|
|
||||||
the following conditions:
|
|
||||||
|
|
||||||
The above copyright notice and this permission notice shall be
|
|
||||||
included in all copies or substantial portions of the Software.
|
|
||||||
|
|
||||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
|
||||||
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
|
||||||
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
|
||||||
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
|
|
||||||
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
|
||||||
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
|
|
||||||
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
|
2388
vendor/github.com/beorn7/perks/quantile/exampledata.txt
generated
vendored
2388
vendor/github.com/beorn7/perks/quantile/exampledata.txt
generated
vendored
File diff suppressed because it is too large
Load Diff
316
vendor/github.com/beorn7/perks/quantile/stream.go
generated
vendored
316
vendor/github.com/beorn7/perks/quantile/stream.go
generated
vendored
@ -1,316 +0,0 @@
|
|||||||
// Package quantile computes approximate quantiles over an unbounded data
|
|
||||||
// stream within low memory and CPU bounds.
|
|
||||||
//
|
|
||||||
// A small amount of accuracy is traded to achieve the above properties.
|
|
||||||
//
|
|
||||||
// Multiple streams can be merged before calling Query to generate a single set
|
|
||||||
// of results. This is meaningful when the streams represent the same type of
|
|
||||||
// data. See Merge and Samples.
|
|
||||||
//
|
|
||||||
// For more detailed information about the algorithm used, see:
|
|
||||||
//
|
|
||||||
// Effective Computation of Biased Quantiles over Data Streams
|
|
||||||
//
|
|
||||||
// http://www.cs.rutgers.edu/~muthu/bquant.pdf
|
|
||||||
package quantile
|
|
||||||
|
|
||||||
import (
|
|
||||||
"math"
|
|
||||||
"sort"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Sample holds an observed value and meta information for compression. JSON
|
|
||||||
// tags have been added for convenience.
|
|
||||||
type Sample struct {
|
|
||||||
Value float64 `json:",string"`
|
|
||||||
Width float64 `json:",string"`
|
|
||||||
Delta float64 `json:",string"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// Samples represents a slice of samples. It implements sort.Interface.
|
|
||||||
type Samples []Sample
|
|
||||||
|
|
||||||
func (a Samples) Len() int { return len(a) }
|
|
||||||
func (a Samples) Less(i, j int) bool { return a[i].Value < a[j].Value }
|
|
||||||
func (a Samples) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
|
|
||||||
|
|
||||||
type invariant func(s *stream, r float64) float64
|
|
||||||
|
|
||||||
// NewLowBiased returns an initialized Stream for low-biased quantiles
|
|
||||||
// (e.g. 0.01, 0.1, 0.5) where the needed quantiles are not known a priori, but
|
|
||||||
// error guarantees can still be given even for the lower ranks of the data
|
|
||||||
// distribution.
|
|
||||||
//
|
|
||||||
// The provided epsilon is a relative error, i.e. the true quantile of a value
|
|
||||||
// returned by a query is guaranteed to be within (1±Epsilon)*Quantile.
|
|
||||||
//
|
|
||||||
// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error
|
|
||||||
// properties.
|
|
||||||
func NewLowBiased(epsilon float64) *Stream {
|
|
||||||
ƒ := func(s *stream, r float64) float64 {
|
|
||||||
return 2 * epsilon * r
|
|
||||||
}
|
|
||||||
return newStream(ƒ)
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewHighBiased returns an initialized Stream for high-biased quantiles
|
|
||||||
// (e.g. 0.01, 0.1, 0.5) where the needed quantiles are not known a priori, but
|
|
||||||
// error guarantees can still be given even for the higher ranks of the data
|
|
||||||
// distribution.
|
|
||||||
//
|
|
||||||
// The provided epsilon is a relative error, i.e. the true quantile of a value
|
|
||||||
// returned by a query is guaranteed to be within 1-(1±Epsilon)*(1-Quantile).
|
|
||||||
//
|
|
||||||
// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error
|
|
||||||
// properties.
|
|
||||||
func NewHighBiased(epsilon float64) *Stream {
|
|
||||||
ƒ := func(s *stream, r float64) float64 {
|
|
||||||
return 2 * epsilon * (s.n - r)
|
|
||||||
}
|
|
||||||
return newStream(ƒ)
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewTargeted returns an initialized Stream concerned with a particular set of
|
|
||||||
// quantile values that are supplied a priori. Knowing these a priori reduces
|
|
||||||
// space and computation time. The targets map maps the desired quantiles to
|
|
||||||
// their absolute errors, i.e. the true quantile of a value returned by a query
|
|
||||||
// is guaranteed to be within (Quantile±Epsilon).
|
|
||||||
//
|
|
||||||
// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error properties.
|
|
||||||
func NewTargeted(targetMap map[float64]float64) *Stream {
|
|
||||||
// Convert map to slice to avoid slow iterations on a map.
|
|
||||||
// ƒ is called on the hot path, so converting the map to a slice
|
|
||||||
// beforehand results in significant CPU savings.
|
|
||||||
targets := targetMapToSlice(targetMap)
|
|
||||||
|
|
||||||
ƒ := func(s *stream, r float64) float64 {
|
|
||||||
var m = math.MaxFloat64
|
|
||||||
var f float64
|
|
||||||
for _, t := range targets {
|
|
||||||
if t.quantile*s.n <= r {
|
|
||||||
f = (2 * t.epsilon * r) / t.quantile
|
|
||||||
} else {
|
|
||||||
f = (2 * t.epsilon * (s.n - r)) / (1 - t.quantile)
|
|
||||||
}
|
|
||||||
if f < m {
|
|
||||||
m = f
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return m
|
|
||||||
}
|
|
||||||
return newStream(ƒ)
|
|
||||||
}
|
|
||||||
|
|
||||||
type target struct {
|
|
||||||
quantile float64
|
|
||||||
epsilon float64
|
|
||||||
}
|
|
||||||
|
|
||||||
func targetMapToSlice(targetMap map[float64]float64) []target {
|
|
||||||
targets := make([]target, 0, len(targetMap))
|
|
||||||
|
|
||||||
for quantile, epsilon := range targetMap {
|
|
||||||
t := target{
|
|
||||||
quantile: quantile,
|
|
||||||
epsilon: epsilon,
|
|
||||||
}
|
|
||||||
targets = append(targets, t)
|
|
||||||
}
|
|
||||||
|
|
||||||
return targets
|
|
||||||
}
|
|
||||||
|
|
||||||
// Stream computes quantiles for a stream of float64s. It is not thread-safe by
|
|
||||||
// design. Take care when using across multiple goroutines.
|
|
||||||
type Stream struct {
|
|
||||||
*stream
|
|
||||||
b Samples
|
|
||||||
sorted bool
|
|
||||||
}
|
|
||||||
|
|
||||||
func newStream(ƒ invariant) *Stream {
|
|
||||||
x := &stream{ƒ: ƒ}
|
|
||||||
return &Stream{x, make(Samples, 0, 500), true}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Insert inserts v into the stream.
|
|
||||||
func (s *Stream) Insert(v float64) {
|
|
||||||
s.insert(Sample{Value: v, Width: 1})
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *Stream) insert(sample Sample) {
|
|
||||||
s.b = append(s.b, sample)
|
|
||||||
s.sorted = false
|
|
||||||
if len(s.b) == cap(s.b) {
|
|
||||||
s.flush()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Query returns the computed qth percentiles value. If s was created with
|
|
||||||
// NewTargeted, and q is not in the set of quantiles provided a priori, Query
|
|
||||||
// will return an unspecified result.
|
|
||||||
func (s *Stream) Query(q float64) float64 {
|
|
||||||
if !s.flushed() {
|
|
||||||
// Fast path when there hasn't been enough data for a flush;
|
|
||||||
// this also yields better accuracy for small sets of data.
|
|
||||||
l := len(s.b)
|
|
||||||
if l == 0 {
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
i := int(math.Ceil(float64(l) * q))
|
|
||||||
if i > 0 {
|
|
||||||
i -= 1
|
|
||||||
}
|
|
||||||
s.maybeSort()
|
|
||||||
return s.b[i].Value
|
|
||||||
}
|
|
||||||
s.flush()
|
|
||||||
return s.stream.query(q)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Merge merges samples into the underlying streams samples. This is handy when
|
|
||||||
// merging multiple streams from separate threads, database shards, etc.
|
|
||||||
//
|
|
||||||
// ATTENTION: This method is broken and does not yield correct results. The
|
|
||||||
// underlying algorithm is not capable of merging streams correctly.
|
|
||||||
func (s *Stream) Merge(samples Samples) {
|
|
||||||
sort.Sort(samples)
|
|
||||||
s.stream.merge(samples)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Reset reinitializes and clears the list reusing the samples buffer memory.
|
|
||||||
func (s *Stream) Reset() {
|
|
||||||
s.stream.reset()
|
|
||||||
s.b = s.b[:0]
|
|
||||||
}
|
|
||||||
|
|
||||||
// Samples returns stream samples held by s.
|
|
||||||
func (s *Stream) Samples() Samples {
|
|
||||||
if !s.flushed() {
|
|
||||||
return s.b
|
|
||||||
}
|
|
||||||
s.flush()
|
|
||||||
return s.stream.samples()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Count returns the total number of samples observed in the stream
|
|
||||||
// since initialization.
|
|
||||||
func (s *Stream) Count() int {
|
|
||||||
return len(s.b) + s.stream.count()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *Stream) flush() {
|
|
||||||
s.maybeSort()
|
|
||||||
s.stream.merge(s.b)
|
|
||||||
s.b = s.b[:0]
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *Stream) maybeSort() {
|
|
||||||
if !s.sorted {
|
|
||||||
s.sorted = true
|
|
||||||
sort.Sort(s.b)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *Stream) flushed() bool {
|
|
||||||
return len(s.stream.l) > 0
|
|
||||||
}
|
|
||||||
|
|
||||||
type stream struct {
|
|
||||||
n float64
|
|
||||||
l []Sample
|
|
||||||
ƒ invariant
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *stream) reset() {
|
|
||||||
s.l = s.l[:0]
|
|
||||||
s.n = 0
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *stream) insert(v float64) {
|
|
||||||
s.merge(Samples{{v, 1, 0}})
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *stream) merge(samples Samples) {
|
|
||||||
// TODO(beorn7): This tries to merge not only individual samples, but
|
|
||||||
// whole summaries. The paper doesn't mention merging summaries at
|
|
||||||
// all. Unittests show that the merging is inaccurate. Find out how to
|
|
||||||
// do merges properly.
|
|
||||||
var r float64
|
|
||||||
i := 0
|
|
||||||
for _, sample := range samples {
|
|
||||||
for ; i < len(s.l); i++ {
|
|
||||||
c := s.l[i]
|
|
||||||
if c.Value > sample.Value {
|
|
||||||
// Insert at position i.
|
|
||||||
s.l = append(s.l, Sample{})
|
|
||||||
copy(s.l[i+1:], s.l[i:])
|
|
||||||
s.l[i] = Sample{
|
|
||||||
sample.Value,
|
|
||||||
sample.Width,
|
|
||||||
math.Max(sample.Delta, math.Floor(s.ƒ(s, r))-1),
|
|
||||||
// TODO(beorn7): How to calculate delta correctly?
|
|
||||||
}
|
|
||||||
i++
|
|
||||||
goto inserted
|
|
||||||
}
|
|
||||||
r += c.Width
|
|
||||||
}
|
|
||||||
s.l = append(s.l, Sample{sample.Value, sample.Width, 0})
|
|
||||||
i++
|
|
||||||
inserted:
|
|
||||||
s.n += sample.Width
|
|
||||||
r += sample.Width
|
|
||||||
}
|
|
||||||
s.compress()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *stream) count() int {
|
|
||||||
return int(s.n)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *stream) query(q float64) float64 {
|
|
||||||
t := math.Ceil(q * s.n)
|
|
||||||
t += math.Ceil(s.ƒ(s, t) / 2)
|
|
||||||
p := s.l[0]
|
|
||||||
var r float64
|
|
||||||
for _, c := range s.l[1:] {
|
|
||||||
r += p.Width
|
|
||||||
if r+c.Width+c.Delta > t {
|
|
||||||
return p.Value
|
|
||||||
}
|
|
||||||
p = c
|
|
||||||
}
|
|
||||||
return p.Value
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *stream) compress() {
|
|
||||||
if len(s.l) < 2 {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
x := s.l[len(s.l)-1]
|
|
||||||
xi := len(s.l) - 1
|
|
||||||
r := s.n - 1 - x.Width
|
|
||||||
|
|
||||||
for i := len(s.l) - 2; i >= 0; i-- {
|
|
||||||
c := s.l[i]
|
|
||||||
if c.Width+x.Width+x.Delta <= s.ƒ(s, r) {
|
|
||||||
x.Width += c.Width
|
|
||||||
s.l[xi] = x
|
|
||||||
// Remove element at i.
|
|
||||||
copy(s.l[i:], s.l[i+1:])
|
|
||||||
s.l = s.l[:len(s.l)-1]
|
|
||||||
xi -= 1
|
|
||||||
} else {
|
|
||||||
x = c
|
|
||||||
xi = i
|
|
||||||
}
|
|
||||||
r -= c.Width
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *stream) samples() Samples {
|
|
||||||
samples := make(Samples, len(s.l))
|
|
||||||
copy(samples, s.l)
|
|
||||||
return samples
|
|
||||||
}
|
|
22
vendor/github.com/cespare/xxhash/v2/LICENSE.txt
generated
vendored
22
vendor/github.com/cespare/xxhash/v2/LICENSE.txt
generated
vendored
@ -1,22 +0,0 @@
|
|||||||
Copyright (c) 2016 Caleb Spare
|
|
||||||
|
|
||||||
MIT License
|
|
||||||
|
|
||||||
Permission is hereby granted, free of charge, to any person obtaining
|
|
||||||
a copy of this software and associated documentation files (the
|
|
||||||
"Software"), to deal in the Software without restriction, including
|
|
||||||
without limitation the rights to use, copy, modify, merge, publish,
|
|
||||||
distribute, sublicense, and/or sell copies of the Software, and to
|
|
||||||
permit persons to whom the Software is furnished to do so, subject to
|
|
||||||
the following conditions:
|
|
||||||
|
|
||||||
The above copyright notice and this permission notice shall be
|
|
||||||
included in all copies or substantial portions of the Software.
|
|
||||||
|
|
||||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
|
||||||
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
|
||||||
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
|
||||||
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
|
|
||||||
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
|
||||||
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
|
|
||||||
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
|
69
vendor/github.com/cespare/xxhash/v2/README.md
generated
vendored
69
vendor/github.com/cespare/xxhash/v2/README.md
generated
vendored
@ -1,69 +0,0 @@
|
|||||||
# xxhash
|
|
||||||
|
|
||||||
[![Go Reference](https://pkg.go.dev/badge/github.com/cespare/xxhash/v2.svg)](https://pkg.go.dev/github.com/cespare/xxhash/v2)
|
|
||||||
[![Test](https://github.com/cespare/xxhash/actions/workflows/test.yml/badge.svg)](https://github.com/cespare/xxhash/actions/workflows/test.yml)
|
|
||||||
|
|
||||||
xxhash is a Go implementation of the 64-bit
|
|
||||||
[xxHash](http://cyan4973.github.io/xxHash/) algorithm, XXH64. This is a
|
|
||||||
high-quality hashing algorithm that is much faster than anything in the Go
|
|
||||||
standard library.
|
|
||||||
|
|
||||||
This package provides a straightforward API:
|
|
||||||
|
|
||||||
```
|
|
||||||
func Sum64(b []byte) uint64
|
|
||||||
func Sum64String(s string) uint64
|
|
||||||
type Digest struct{ ... }
|
|
||||||
func New() *Digest
|
|
||||||
```
|
|
||||||
|
|
||||||
The `Digest` type implements hash.Hash64. Its key methods are:
|
|
||||||
|
|
||||||
```
|
|
||||||
func (*Digest) Write([]byte) (int, error)
|
|
||||||
func (*Digest) WriteString(string) (int, error)
|
|
||||||
func (*Digest) Sum64() uint64
|
|
||||||
```
|
|
||||||
|
|
||||||
This implementation provides a fast pure-Go implementation and an even faster
|
|
||||||
assembly implementation for amd64.
|
|
||||||
|
|
||||||
## Compatibility
|
|
||||||
|
|
||||||
This package is in a module and the latest code is in version 2 of the module.
|
|
||||||
You need a version of Go with at least "minimal module compatibility" to use
|
|
||||||
github.com/cespare/xxhash/v2:
|
|
||||||
|
|
||||||
* 1.9.7+ for Go 1.9
|
|
||||||
* 1.10.3+ for Go 1.10
|
|
||||||
* Go 1.11 or later
|
|
||||||
|
|
||||||
I recommend using the latest release of Go.
|
|
||||||
|
|
||||||
## Benchmarks
|
|
||||||
|
|
||||||
Here are some quick benchmarks comparing the pure-Go and assembly
|
|
||||||
implementations of Sum64.
|
|
||||||
|
|
||||||
| input size | purego | asm |
|
|
||||||
| --- | --- | --- |
|
|
||||||
| 5 B | 979.66 MB/s | 1291.17 MB/s |
|
|
||||||
| 100 B | 7475.26 MB/s | 7973.40 MB/s |
|
|
||||||
| 4 KB | 17573.46 MB/s | 17602.65 MB/s |
|
|
||||||
| 10 MB | 17131.46 MB/s | 17142.16 MB/s |
|
|
||||||
|
|
||||||
These numbers were generated on Ubuntu 18.04 with an Intel i7-8700K CPU using
|
|
||||||
the following commands under Go 1.11.2:
|
|
||||||
|
|
||||||
```
|
|
||||||
$ go test -tags purego -benchtime 10s -bench '/xxhash,direct,bytes'
|
|
||||||
$ go test -benchtime 10s -bench '/xxhash,direct,bytes'
|
|
||||||
```
|
|
||||||
|
|
||||||
## Projects using this package
|
|
||||||
|
|
||||||
- [InfluxDB](https://github.com/influxdata/influxdb)
|
|
||||||
- [Prometheus](https://github.com/prometheus/prometheus)
|
|
||||||
- [VictoriaMetrics](https://github.com/VictoriaMetrics/VictoriaMetrics)
|
|
||||||
- [FreeCache](https://github.com/coocood/freecache)
|
|
||||||
- [FastCache](https://github.com/VictoriaMetrics/fastcache)
|
|
235
vendor/github.com/cespare/xxhash/v2/xxhash.go
generated
vendored
235
vendor/github.com/cespare/xxhash/v2/xxhash.go
generated
vendored
@ -1,235 +0,0 @@
|
|||||||
// Package xxhash implements the 64-bit variant of xxHash (XXH64) as described
|
|
||||||
// at http://cyan4973.github.io/xxHash/.
|
|
||||||
package xxhash
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding/binary"
|
|
||||||
"errors"
|
|
||||||
"math/bits"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
prime1 uint64 = 11400714785074694791
|
|
||||||
prime2 uint64 = 14029467366897019727
|
|
||||||
prime3 uint64 = 1609587929392839161
|
|
||||||
prime4 uint64 = 9650029242287828579
|
|
||||||
prime5 uint64 = 2870177450012600261
|
|
||||||
)
|
|
||||||
|
|
||||||
// NOTE(caleb): I'm using both consts and vars of the primes. Using consts where
|
|
||||||
// possible in the Go code is worth a small (but measurable) performance boost
|
|
||||||
// by avoiding some MOVQs. Vars are needed for the asm and also are useful for
|
|
||||||
// convenience in the Go code in a few places where we need to intentionally
|
|
||||||
// avoid constant arithmetic (e.g., v1 := prime1 + prime2 fails because the
|
|
||||||
// result overflows a uint64).
|
|
||||||
var (
|
|
||||||
prime1v = prime1
|
|
||||||
prime2v = prime2
|
|
||||||
prime3v = prime3
|
|
||||||
prime4v = prime4
|
|
||||||
prime5v = prime5
|
|
||||||
)
|
|
||||||
|
|
||||||
// Digest implements hash.Hash64.
|
|
||||||
type Digest struct {
|
|
||||||
v1 uint64
|
|
||||||
v2 uint64
|
|
||||||
v3 uint64
|
|
||||||
v4 uint64
|
|
||||||
total uint64
|
|
||||||
mem [32]byte
|
|
||||||
n int // how much of mem is used
|
|
||||||
}
|
|
||||||
|
|
||||||
// New creates a new Digest that computes the 64-bit xxHash algorithm.
|
|
||||||
func New() *Digest {
|
|
||||||
var d Digest
|
|
||||||
d.Reset()
|
|
||||||
return &d
|
|
||||||
}
|
|
||||||
|
|
||||||
// Reset clears the Digest's state so that it can be reused.
|
|
||||||
func (d *Digest) Reset() {
|
|
||||||
d.v1 = prime1v + prime2
|
|
||||||
d.v2 = prime2
|
|
||||||
d.v3 = 0
|
|
||||||
d.v4 = -prime1v
|
|
||||||
d.total = 0
|
|
||||||
d.n = 0
|
|
||||||
}
|
|
||||||
|
|
||||||
// Size always returns 8 bytes.
|
|
||||||
func (d *Digest) Size() int { return 8 }
|
|
||||||
|
|
||||||
// BlockSize always returns 32 bytes.
|
|
||||||
func (d *Digest) BlockSize() int { return 32 }
|
|
||||||
|
|
||||||
// Write adds more data to d. It always returns len(b), nil.
|
|
||||||
func (d *Digest) Write(b []byte) (n int, err error) {
|
|
||||||
n = len(b)
|
|
||||||
d.total += uint64(n)
|
|
||||||
|
|
||||||
if d.n+n < 32 {
|
|
||||||
// This new data doesn't even fill the current block.
|
|
||||||
copy(d.mem[d.n:], b)
|
|
||||||
d.n += n
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if d.n > 0 {
|
|
||||||
// Finish off the partial block.
|
|
||||||
copy(d.mem[d.n:], b)
|
|
||||||
d.v1 = round(d.v1, u64(d.mem[0:8]))
|
|
||||||
d.v2 = round(d.v2, u64(d.mem[8:16]))
|
|
||||||
d.v3 = round(d.v3, u64(d.mem[16:24]))
|
|
||||||
d.v4 = round(d.v4, u64(d.mem[24:32]))
|
|
||||||
b = b[32-d.n:]
|
|
||||||
d.n = 0
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(b) >= 32 {
|
|
||||||
// One or more full blocks left.
|
|
||||||
nw := writeBlocks(d, b)
|
|
||||||
b = b[nw:]
|
|
||||||
}
|
|
||||||
|
|
||||||
// Store any remaining partial block.
|
|
||||||
copy(d.mem[:], b)
|
|
||||||
d.n = len(b)
|
|
||||||
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Sum appends the current hash to b and returns the resulting slice.
|
|
||||||
func (d *Digest) Sum(b []byte) []byte {
|
|
||||||
s := d.Sum64()
|
|
||||||
return append(
|
|
||||||
b,
|
|
||||||
byte(s>>56),
|
|
||||||
byte(s>>48),
|
|
||||||
byte(s>>40),
|
|
||||||
byte(s>>32),
|
|
||||||
byte(s>>24),
|
|
||||||
byte(s>>16),
|
|
||||||
byte(s>>8),
|
|
||||||
byte(s),
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Sum64 returns the current hash.
|
|
||||||
func (d *Digest) Sum64() uint64 {
|
|
||||||
var h uint64
|
|
||||||
|
|
||||||
if d.total >= 32 {
|
|
||||||
v1, v2, v3, v4 := d.v1, d.v2, d.v3, d.v4
|
|
||||||
h = rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4)
|
|
||||||
h = mergeRound(h, v1)
|
|
||||||
h = mergeRound(h, v2)
|
|
||||||
h = mergeRound(h, v3)
|
|
||||||
h = mergeRound(h, v4)
|
|
||||||
} else {
|
|
||||||
h = d.v3 + prime5
|
|
||||||
}
|
|
||||||
|
|
||||||
h += d.total
|
|
||||||
|
|
||||||
i, end := 0, d.n
|
|
||||||
for ; i+8 <= end; i += 8 {
|
|
||||||
k1 := round(0, u64(d.mem[i:i+8]))
|
|
||||||
h ^= k1
|
|
||||||
h = rol27(h)*prime1 + prime4
|
|
||||||
}
|
|
||||||
if i+4 <= end {
|
|
||||||
h ^= uint64(u32(d.mem[i:i+4])) * prime1
|
|
||||||
h = rol23(h)*prime2 + prime3
|
|
||||||
i += 4
|
|
||||||
}
|
|
||||||
for i < end {
|
|
||||||
h ^= uint64(d.mem[i]) * prime5
|
|
||||||
h = rol11(h) * prime1
|
|
||||||
i++
|
|
||||||
}
|
|
||||||
|
|
||||||
h ^= h >> 33
|
|
||||||
h *= prime2
|
|
||||||
h ^= h >> 29
|
|
||||||
h *= prime3
|
|
||||||
h ^= h >> 32
|
|
||||||
|
|
||||||
return h
|
|
||||||
}
|
|
||||||
|
|
||||||
const (
|
|
||||||
magic = "xxh\x06"
|
|
||||||
marshaledSize = len(magic) + 8*5 + 32
|
|
||||||
)
|
|
||||||
|
|
||||||
// MarshalBinary implements the encoding.BinaryMarshaler interface.
|
|
||||||
func (d *Digest) MarshalBinary() ([]byte, error) {
|
|
||||||
b := make([]byte, 0, marshaledSize)
|
|
||||||
b = append(b, magic...)
|
|
||||||
b = appendUint64(b, d.v1)
|
|
||||||
b = appendUint64(b, d.v2)
|
|
||||||
b = appendUint64(b, d.v3)
|
|
||||||
b = appendUint64(b, d.v4)
|
|
||||||
b = appendUint64(b, d.total)
|
|
||||||
b = append(b, d.mem[:d.n]...)
|
|
||||||
b = b[:len(b)+len(d.mem)-d.n]
|
|
||||||
return b, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// UnmarshalBinary implements the encoding.BinaryUnmarshaler interface.
|
|
||||||
func (d *Digest) UnmarshalBinary(b []byte) error {
|
|
||||||
if len(b) < len(magic) || string(b[:len(magic)]) != magic {
|
|
||||||
return errors.New("xxhash: invalid hash state identifier")
|
|
||||||
}
|
|
||||||
if len(b) != marshaledSize {
|
|
||||||
return errors.New("xxhash: invalid hash state size")
|
|
||||||
}
|
|
||||||
b = b[len(magic):]
|
|
||||||
b, d.v1 = consumeUint64(b)
|
|
||||||
b, d.v2 = consumeUint64(b)
|
|
||||||
b, d.v3 = consumeUint64(b)
|
|
||||||
b, d.v4 = consumeUint64(b)
|
|
||||||
b, d.total = consumeUint64(b)
|
|
||||||
copy(d.mem[:], b)
|
|
||||||
d.n = int(d.total % uint64(len(d.mem)))
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func appendUint64(b []byte, x uint64) []byte {
|
|
||||||
var a [8]byte
|
|
||||||
binary.LittleEndian.PutUint64(a[:], x)
|
|
||||||
return append(b, a[:]...)
|
|
||||||
}
|
|
||||||
|
|
||||||
func consumeUint64(b []byte) ([]byte, uint64) {
|
|
||||||
x := u64(b)
|
|
||||||
return b[8:], x
|
|
||||||
}
|
|
||||||
|
|
||||||
func u64(b []byte) uint64 { return binary.LittleEndian.Uint64(b) }
|
|
||||||
func u32(b []byte) uint32 { return binary.LittleEndian.Uint32(b) }
|
|
||||||
|
|
||||||
func round(acc, input uint64) uint64 {
|
|
||||||
acc += input * prime2
|
|
||||||
acc = rol31(acc)
|
|
||||||
acc *= prime1
|
|
||||||
return acc
|
|
||||||
}
|
|
||||||
|
|
||||||
func mergeRound(acc, val uint64) uint64 {
|
|
||||||
val = round(0, val)
|
|
||||||
acc ^= val
|
|
||||||
acc = acc*prime1 + prime4
|
|
||||||
return acc
|
|
||||||
}
|
|
||||||
|
|
||||||
func rol1(x uint64) uint64 { return bits.RotateLeft64(x, 1) }
|
|
||||||
func rol7(x uint64) uint64 { return bits.RotateLeft64(x, 7) }
|
|
||||||
func rol11(x uint64) uint64 { return bits.RotateLeft64(x, 11) }
|
|
||||||
func rol12(x uint64) uint64 { return bits.RotateLeft64(x, 12) }
|
|
||||||
func rol18(x uint64) uint64 { return bits.RotateLeft64(x, 18) }
|
|
||||||
func rol23(x uint64) uint64 { return bits.RotateLeft64(x, 23) }
|
|
||||||
func rol27(x uint64) uint64 { return bits.RotateLeft64(x, 27) }
|
|
||||||
func rol31(x uint64) uint64 { return bits.RotateLeft64(x, 31) }
|
|
13
vendor/github.com/cespare/xxhash/v2/xxhash_amd64.go
generated
vendored
13
vendor/github.com/cespare/xxhash/v2/xxhash_amd64.go
generated
vendored
@ -1,13 +0,0 @@
|
|||||||
// +build !appengine
|
|
||||||
// +build gc
|
|
||||||
// +build !purego
|
|
||||||
|
|
||||||
package xxhash
|
|
||||||
|
|
||||||
// Sum64 computes the 64-bit xxHash digest of b.
|
|
||||||
//
|
|
||||||
//go:noescape
|
|
||||||
func Sum64(b []byte) uint64
|
|
||||||
|
|
||||||
//go:noescape
|
|
||||||
func writeBlocks(d *Digest, b []byte) int
|
|
215
vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s
generated
vendored
215
vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s
generated
vendored
@ -1,215 +0,0 @@
|
|||||||
// +build !appengine
|
|
||||||
// +build gc
|
|
||||||
// +build !purego
|
|
||||||
|
|
||||||
#include "textflag.h"
|
|
||||||
|
|
||||||
// Register allocation:
|
|
||||||
// AX h
|
|
||||||
// SI pointer to advance through b
|
|
||||||
// DX n
|
|
||||||
// BX loop end
|
|
||||||
// R8 v1, k1
|
|
||||||
// R9 v2
|
|
||||||
// R10 v3
|
|
||||||
// R11 v4
|
|
||||||
// R12 tmp
|
|
||||||
// R13 prime1v
|
|
||||||
// R14 prime2v
|
|
||||||
// DI prime4v
|
|
||||||
|
|
||||||
// round reads from and advances the buffer pointer in SI.
|
|
||||||
// It assumes that R13 has prime1v and R14 has prime2v.
|
|
||||||
#define round(r) \
|
|
||||||
MOVQ (SI), R12 \
|
|
||||||
ADDQ $8, SI \
|
|
||||||
IMULQ R14, R12 \
|
|
||||||
ADDQ R12, r \
|
|
||||||
ROLQ $31, r \
|
|
||||||
IMULQ R13, r
|
|
||||||
|
|
||||||
// mergeRound applies a merge round on the two registers acc and val.
|
|
||||||
// It assumes that R13 has prime1v, R14 has prime2v, and DI has prime4v.
|
|
||||||
#define mergeRound(acc, val) \
|
|
||||||
IMULQ R14, val \
|
|
||||||
ROLQ $31, val \
|
|
||||||
IMULQ R13, val \
|
|
||||||
XORQ val, acc \
|
|
||||||
IMULQ R13, acc \
|
|
||||||
ADDQ DI, acc
|
|
||||||
|
|
||||||
// func Sum64(b []byte) uint64
|
|
||||||
TEXT ·Sum64(SB), NOSPLIT, $0-32
|
|
||||||
// Load fixed primes.
|
|
||||||
MOVQ ·prime1v(SB), R13
|
|
||||||
MOVQ ·prime2v(SB), R14
|
|
||||||
MOVQ ·prime4v(SB), DI
|
|
||||||
|
|
||||||
// Load slice.
|
|
||||||
MOVQ b_base+0(FP), SI
|
|
||||||
MOVQ b_len+8(FP), DX
|
|
||||||
LEAQ (SI)(DX*1), BX
|
|
||||||
|
|
||||||
// The first loop limit will be len(b)-32.
|
|
||||||
SUBQ $32, BX
|
|
||||||
|
|
||||||
// Check whether we have at least one block.
|
|
||||||
CMPQ DX, $32
|
|
||||||
JLT noBlocks
|
|
||||||
|
|
||||||
// Set up initial state (v1, v2, v3, v4).
|
|
||||||
MOVQ R13, R8
|
|
||||||
ADDQ R14, R8
|
|
||||||
MOVQ R14, R9
|
|
||||||
XORQ R10, R10
|
|
||||||
XORQ R11, R11
|
|
||||||
SUBQ R13, R11
|
|
||||||
|
|
||||||
// Loop until SI > BX.
|
|
||||||
blockLoop:
|
|
||||||
round(R8)
|
|
||||||
round(R9)
|
|
||||||
round(R10)
|
|
||||||
round(R11)
|
|
||||||
|
|
||||||
CMPQ SI, BX
|
|
||||||
JLE blockLoop
|
|
||||||
|
|
||||||
MOVQ R8, AX
|
|
||||||
ROLQ $1, AX
|
|
||||||
MOVQ R9, R12
|
|
||||||
ROLQ $7, R12
|
|
||||||
ADDQ R12, AX
|
|
||||||
MOVQ R10, R12
|
|
||||||
ROLQ $12, R12
|
|
||||||
ADDQ R12, AX
|
|
||||||
MOVQ R11, R12
|
|
||||||
ROLQ $18, R12
|
|
||||||
ADDQ R12, AX
|
|
||||||
|
|
||||||
mergeRound(AX, R8)
|
|
||||||
mergeRound(AX, R9)
|
|
||||||
mergeRound(AX, R10)
|
|
||||||
mergeRound(AX, R11)
|
|
||||||
|
|
||||||
JMP afterBlocks
|
|
||||||
|
|
||||||
noBlocks:
|
|
||||||
MOVQ ·prime5v(SB), AX
|
|
||||||
|
|
||||||
afterBlocks:
|
|
||||||
ADDQ DX, AX
|
|
||||||
|
|
||||||
// Right now BX has len(b)-32, and we want to loop until SI > len(b)-8.
|
|
||||||
ADDQ $24, BX
|
|
||||||
|
|
||||||
CMPQ SI, BX
|
|
||||||
JG fourByte
|
|
||||||
|
|
||||||
wordLoop:
|
|
||||||
// Calculate k1.
|
|
||||||
MOVQ (SI), R8
|
|
||||||
ADDQ $8, SI
|
|
||||||
IMULQ R14, R8
|
|
||||||
ROLQ $31, R8
|
|
||||||
IMULQ R13, R8
|
|
||||||
|
|
||||||
XORQ R8, AX
|
|
||||||
ROLQ $27, AX
|
|
||||||
IMULQ R13, AX
|
|
||||||
ADDQ DI, AX
|
|
||||||
|
|
||||||
CMPQ SI, BX
|
|
||||||
JLE wordLoop
|
|
||||||
|
|
||||||
fourByte:
|
|
||||||
ADDQ $4, BX
|
|
||||||
CMPQ SI, BX
|
|
||||||
JG singles
|
|
||||||
|
|
||||||
MOVL (SI), R8
|
|
||||||
ADDQ $4, SI
|
|
||||||
IMULQ R13, R8
|
|
||||||
XORQ R8, AX
|
|
||||||
|
|
||||||
ROLQ $23, AX
|
|
||||||
IMULQ R14, AX
|
|
||||||
ADDQ ·prime3v(SB), AX
|
|
||||||
|
|
||||||
singles:
|
|
||||||
ADDQ $4, BX
|
|
||||||
CMPQ SI, BX
|
|
||||||
JGE finalize
|
|
||||||
|
|
||||||
singlesLoop:
|
|
||||||
MOVBQZX (SI), R12
|
|
||||||
ADDQ $1, SI
|
|
||||||
IMULQ ·prime5v(SB), R12
|
|
||||||
XORQ R12, AX
|
|
||||||
|
|
||||||
ROLQ $11, AX
|
|
||||||
IMULQ R13, AX
|
|
||||||
|
|
||||||
CMPQ SI, BX
|
|
||||||
JL singlesLoop
|
|
||||||
|
|
||||||
finalize:
|
|
||||||
MOVQ AX, R12
|
|
||||||
SHRQ $33, R12
|
|
||||||
XORQ R12, AX
|
|
||||||
IMULQ R14, AX
|
|
||||||
MOVQ AX, R12
|
|
||||||
SHRQ $29, R12
|
|
||||||
XORQ R12, AX
|
|
||||||
IMULQ ·prime3v(SB), AX
|
|
||||||
MOVQ AX, R12
|
|
||||||
SHRQ $32, R12
|
|
||||||
XORQ R12, AX
|
|
||||||
|
|
||||||
MOVQ AX, ret+24(FP)
|
|
||||||
RET
|
|
||||||
|
|
||||||
// writeBlocks uses the same registers as above except that it uses AX to store
|
|
||||||
// the d pointer.
|
|
||||||
|
|
||||||
// func writeBlocks(d *Digest, b []byte) int
|
|
||||||
TEXT ·writeBlocks(SB), NOSPLIT, $0-40
|
|
||||||
// Load fixed primes needed for round.
|
|
||||||
MOVQ ·prime1v(SB), R13
|
|
||||||
MOVQ ·prime2v(SB), R14
|
|
||||||
|
|
||||||
// Load slice.
|
|
||||||
MOVQ b_base+8(FP), SI
|
|
||||||
MOVQ b_len+16(FP), DX
|
|
||||||
LEAQ (SI)(DX*1), BX
|
|
||||||
SUBQ $32, BX
|
|
||||||
|
|
||||||
// Load vN from d.
|
|
||||||
MOVQ d+0(FP), AX
|
|
||||||
MOVQ 0(AX), R8 // v1
|
|
||||||
MOVQ 8(AX), R9 // v2
|
|
||||||
MOVQ 16(AX), R10 // v3
|
|
||||||
MOVQ 24(AX), R11 // v4
|
|
||||||
|
|
||||||
// We don't need to check the loop condition here; this function is
|
|
||||||
// always called with at least one block of data to process.
|
|
||||||
blockLoop:
|
|
||||||
round(R8)
|
|
||||||
round(R9)
|
|
||||||
round(R10)
|
|
||||||
round(R11)
|
|
||||||
|
|
||||||
CMPQ SI, BX
|
|
||||||
JLE blockLoop
|
|
||||||
|
|
||||||
// Copy vN back to d.
|
|
||||||
MOVQ R8, 0(AX)
|
|
||||||
MOVQ R9, 8(AX)
|
|
||||||
MOVQ R10, 16(AX)
|
|
||||||
MOVQ R11, 24(AX)
|
|
||||||
|
|
||||||
// The number of bytes written is SI minus the old base pointer.
|
|
||||||
SUBQ b_base+8(FP), SI
|
|
||||||
MOVQ SI, ret+32(FP)
|
|
||||||
|
|
||||||
RET
|
|
76
vendor/github.com/cespare/xxhash/v2/xxhash_other.go
generated
vendored
76
vendor/github.com/cespare/xxhash/v2/xxhash_other.go
generated
vendored
@ -1,76 +0,0 @@
|
|||||||
// +build !amd64 appengine !gc purego
|
|
||||||
|
|
||||||
package xxhash
|
|
||||||
|
|
||||||
// Sum64 computes the 64-bit xxHash digest of b.
|
|
||||||
func Sum64(b []byte) uint64 {
|
|
||||||
// A simpler version would be
|
|
||||||
// d := New()
|
|
||||||
// d.Write(b)
|
|
||||||
// return d.Sum64()
|
|
||||||
// but this is faster, particularly for small inputs.
|
|
||||||
|
|
||||||
n := len(b)
|
|
||||||
var h uint64
|
|
||||||
|
|
||||||
if n >= 32 {
|
|
||||||
v1 := prime1v + prime2
|
|
||||||
v2 := prime2
|
|
||||||
v3 := uint64(0)
|
|
||||||
v4 := -prime1v
|
|
||||||
for len(b) >= 32 {
|
|
||||||
v1 = round(v1, u64(b[0:8:len(b)]))
|
|
||||||
v2 = round(v2, u64(b[8:16:len(b)]))
|
|
||||||
v3 = round(v3, u64(b[16:24:len(b)]))
|
|
||||||
v4 = round(v4, u64(b[24:32:len(b)]))
|
|
||||||
b = b[32:len(b):len(b)]
|
|
||||||
}
|
|
||||||
h = rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4)
|
|
||||||
h = mergeRound(h, v1)
|
|
||||||
h = mergeRound(h, v2)
|
|
||||||
h = mergeRound(h, v3)
|
|
||||||
h = mergeRound(h, v4)
|
|
||||||
} else {
|
|
||||||
h = prime5
|
|
||||||
}
|
|
||||||
|
|
||||||
h += uint64(n)
|
|
||||||
|
|
||||||
i, end := 0, len(b)
|
|
||||||
for ; i+8 <= end; i += 8 {
|
|
||||||
k1 := round(0, u64(b[i:i+8:len(b)]))
|
|
||||||
h ^= k1
|
|
||||||
h = rol27(h)*prime1 + prime4
|
|
||||||
}
|
|
||||||
if i+4 <= end {
|
|
||||||
h ^= uint64(u32(b[i:i+4:len(b)])) * prime1
|
|
||||||
h = rol23(h)*prime2 + prime3
|
|
||||||
i += 4
|
|
||||||
}
|
|
||||||
for ; i < end; i++ {
|
|
||||||
h ^= uint64(b[i]) * prime5
|
|
||||||
h = rol11(h) * prime1
|
|
||||||
}
|
|
||||||
|
|
||||||
h ^= h >> 33
|
|
||||||
h *= prime2
|
|
||||||
h ^= h >> 29
|
|
||||||
h *= prime3
|
|
||||||
h ^= h >> 32
|
|
||||||
|
|
||||||
return h
|
|
||||||
}
|
|
||||||
|
|
||||||
func writeBlocks(d *Digest, b []byte) int {
|
|
||||||
v1, v2, v3, v4 := d.v1, d.v2, d.v3, d.v4
|
|
||||||
n := len(b)
|
|
||||||
for len(b) >= 32 {
|
|
||||||
v1 = round(v1, u64(b[0:8:len(b)]))
|
|
||||||
v2 = round(v2, u64(b[8:16:len(b)]))
|
|
||||||
v3 = round(v3, u64(b[16:24:len(b)]))
|
|
||||||
v4 = round(v4, u64(b[24:32:len(b)]))
|
|
||||||
b = b[32:len(b):len(b)]
|
|
||||||
}
|
|
||||||
d.v1, d.v2, d.v3, d.v4 = v1, v2, v3, v4
|
|
||||||
return n - len(b)
|
|
||||||
}
|
|
15
vendor/github.com/cespare/xxhash/v2/xxhash_safe.go
generated
vendored
15
vendor/github.com/cespare/xxhash/v2/xxhash_safe.go
generated
vendored
@ -1,15 +0,0 @@
|
|||||||
// +build appengine
|
|
||||||
|
|
||||||
// This file contains the safe implementations of otherwise unsafe-using code.
|
|
||||||
|
|
||||||
package xxhash
|
|
||||||
|
|
||||||
// Sum64String computes the 64-bit xxHash digest of s.
|
|
||||||
func Sum64String(s string) uint64 {
|
|
||||||
return Sum64([]byte(s))
|
|
||||||
}
|
|
||||||
|
|
||||||
// WriteString adds more data to d. It always returns len(s), nil.
|
|
||||||
func (d *Digest) WriteString(s string) (n int, err error) {
|
|
||||||
return d.Write([]byte(s))
|
|
||||||
}
|
|
57
vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go
generated
vendored
57
vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go
generated
vendored
@ -1,57 +0,0 @@
|
|||||||
// +build !appengine
|
|
||||||
|
|
||||||
// This file encapsulates usage of unsafe.
|
|
||||||
// xxhash_safe.go contains the safe implementations.
|
|
||||||
|
|
||||||
package xxhash
|
|
||||||
|
|
||||||
import (
|
|
||||||
"unsafe"
|
|
||||||
)
|
|
||||||
|
|
||||||
// In the future it's possible that compiler optimizations will make these
|
|
||||||
// XxxString functions unnecessary by realizing that calls such as
|
|
||||||
// Sum64([]byte(s)) don't need to copy s. See https://golang.org/issue/2205.
|
|
||||||
// If that happens, even if we keep these functions they can be replaced with
|
|
||||||
// the trivial safe code.
|
|
||||||
|
|
||||||
// NOTE: The usual way of doing an unsafe string-to-[]byte conversion is:
|
|
||||||
//
|
|
||||||
// var b []byte
|
|
||||||
// bh := (*reflect.SliceHeader)(unsafe.Pointer(&b))
|
|
||||||
// bh.Data = (*reflect.StringHeader)(unsafe.Pointer(&s)).Data
|
|
||||||
// bh.Len = len(s)
|
|
||||||
// bh.Cap = len(s)
|
|
||||||
//
|
|
||||||
// Unfortunately, as of Go 1.15.3 the inliner's cost model assigns a high enough
|
|
||||||
// weight to this sequence of expressions that any function that uses it will
|
|
||||||
// not be inlined. Instead, the functions below use a different unsafe
|
|
||||||
// conversion designed to minimize the inliner weight and allow both to be
|
|
||||||
// inlined. There is also a test (TestInlining) which verifies that these are
|
|
||||||
// inlined.
|
|
||||||
//
|
|
||||||
// See https://github.com/golang/go/issues/42739 for discussion.
|
|
||||||
|
|
||||||
// Sum64String computes the 64-bit xxHash digest of s.
|
|
||||||
// It may be faster than Sum64([]byte(s)) by avoiding a copy.
|
|
||||||
func Sum64String(s string) uint64 {
|
|
||||||
b := *(*[]byte)(unsafe.Pointer(&sliceHeader{s, len(s)}))
|
|
||||||
return Sum64(b)
|
|
||||||
}
|
|
||||||
|
|
||||||
// WriteString adds more data to d. It always returns len(s), nil.
|
|
||||||
// It may be faster than Write([]byte(s)) by avoiding a copy.
|
|
||||||
func (d *Digest) WriteString(s string) (n int, err error) {
|
|
||||||
d.Write(*(*[]byte)(unsafe.Pointer(&sliceHeader{s, len(s)})))
|
|
||||||
// d.Write always returns len(s), nil.
|
|
||||||
// Ignoring the return output and returning these fixed values buys a
|
|
||||||
// savings of 6 in the inliner's cost model.
|
|
||||||
return len(s), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// sliceHeader is similar to reflect.SliceHeader, but it assumes that the layout
|
|
||||||
// of the first two words is the same as the layout of a string.
|
|
||||||
type sliceHeader struct {
|
|
||||||
s string
|
|
||||||
cap int
|
|
||||||
}
|
|
202
vendor/github.com/coreos/go-oidc/v3/LICENSE
generated
vendored
202
vendor/github.com/coreos/go-oidc/v3/LICENSE
generated
vendored
@ -1,202 +0,0 @@
|
|||||||
Apache License
|
|
||||||
Version 2.0, January 2004
|
|
||||||
http://www.apache.org/licenses/
|
|
||||||
|
|
||||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
|
||||||
|
|
||||||
1. Definitions.
|
|
||||||
|
|
||||||
"License" shall mean the terms and conditions for use, reproduction,
|
|
||||||
and distribution as defined by Sections 1 through 9 of this document.
|
|
||||||
|
|
||||||
"Licensor" shall mean the copyright owner or entity authorized by
|
|
||||||
the copyright owner that is granting the License.
|
|
||||||
|
|
||||||
"Legal Entity" shall mean the union of the acting entity and all
|
|
||||||
other entities that control, are controlled by, or are under common
|
|
||||||
control with that entity. For the purposes of this definition,
|
|
||||||
"control" means (i) the power, direct or indirect, to cause the
|
|
||||||
direction or management of such entity, whether by contract or
|
|
||||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
|
||||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
|
||||||
|
|
||||||
"You" (or "Your") shall mean an individual or Legal Entity
|
|
||||||
exercising permissions granted by this License.
|
|
||||||
|
|
||||||
"Source" form shall mean the preferred form for making modifications,
|
|
||||||
including but not limited to software source code, documentation
|
|
||||||
source, and configuration files.
|
|
||||||
|
|
||||||
"Object" form shall mean any form resulting from mechanical
|
|
||||||
transformation or translation of a Source form, including but
|
|
||||||
not limited to compiled object code, generated documentation,
|
|
||||||
and conversions to other media types.
|
|
||||||
|
|
||||||
"Work" shall mean the work of authorship, whether in Source or
|
|
||||||
Object form, made available under the License, as indicated by a
|
|
||||||
copyright notice that is included in or attached to the work
|
|
||||||
(an example is provided in the Appendix below).
|
|
||||||
|
|
||||||
"Derivative Works" shall mean any work, whether in Source or Object
|
|
||||||
form, that is based on (or derived from) the Work and for which the
|
|
||||||
editorial revisions, annotations, elaborations, or other modifications
|
|
||||||
represent, as a whole, an original work of authorship. For the purposes
|
|
||||||
of this License, Derivative Works shall not include works that remain
|
|
||||||
separable from, or merely link (or bind by name) to the interfaces of,
|
|
||||||
the Work and Derivative Works thereof.
|
|
||||||
|
|
||||||
"Contribution" shall mean any work of authorship, including
|
|
||||||
the original version of the Work and any modifications or additions
|
|
||||||
to that Work or Derivative Works thereof, that is intentionally
|
|
||||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
|
||||||
or by an individual or Legal Entity authorized to submit on behalf of
|
|
||||||
the copyright owner. For the purposes of this definition, "submitted"
|
|
||||||
means any form of electronic, verbal, or written communication sent
|
|
||||||
to the Licensor or its representatives, including but not limited to
|
|
||||||
communication on electronic mailing lists, source code control systems,
|
|
||||||
and issue tracking systems that are managed by, or on behalf of, the
|
|
||||||
Licensor for the purpose of discussing and improving the Work, but
|
|
||||||
excluding communication that is conspicuously marked or otherwise
|
|
||||||
designated in writing by the copyright owner as "Not a Contribution."
|
|
||||||
|
|
||||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
|
||||||
on behalf of whom a Contribution has been received by Licensor and
|
|
||||||
subsequently incorporated within the Work.
|
|
||||||
|
|
||||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
|
||||||
this License, each Contributor hereby grants to You a perpetual,
|
|
||||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
|
||||||
copyright license to reproduce, prepare Derivative Works of,
|
|
||||||
publicly display, publicly perform, sublicense, and distribute the
|
|
||||||
Work and such Derivative Works in Source or Object form.
|
|
||||||
|
|
||||||
3. Grant of Patent License. Subject to the terms and conditions of
|
|
||||||
this License, each Contributor hereby grants to You a perpetual,
|
|
||||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
|
||||||
(except as stated in this section) patent license to make, have made,
|
|
||||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
|
||||||
where such license applies only to those patent claims licensable
|
|
||||||
by such Contributor that are necessarily infringed by their
|
|
||||||
Contribution(s) alone or by combination of their Contribution(s)
|
|
||||||
with the Work to which such Contribution(s) was submitted. If You
|
|
||||||
institute patent litigation against any entity (including a
|
|
||||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
|
||||||
or a Contribution incorporated within the Work constitutes direct
|
|
||||||
or contributory patent infringement, then any patent licenses
|
|
||||||
granted to You under this License for that Work shall terminate
|
|
||||||
as of the date such litigation is filed.
|
|
||||||
|
|
||||||
4. Redistribution. You may reproduce and distribute copies of the
|
|
||||||
Work or Derivative Works thereof in any medium, with or without
|
|
||||||
modifications, and in Source or Object form, provided that You
|
|
||||||
meet the following conditions:
|
|
||||||
|
|
||||||
(a) You must give any other recipients of the Work or
|
|
||||||
Derivative Works a copy of this License; and
|
|
||||||
|
|
||||||
(b) You must cause any modified files to carry prominent notices
|
|
||||||
stating that You changed the files; and
|
|
||||||
|
|
||||||
(c) You must retain, in the Source form of any Derivative Works
|
|
||||||
that You distribute, all copyright, patent, trademark, and
|
|
||||||
attribution notices from the Source form of the Work,
|
|
||||||
excluding those notices that do not pertain to any part of
|
|
||||||
the Derivative Works; and
|
|
||||||
|
|
||||||
(d) If the Work includes a "NOTICE" text file as part of its
|
|
||||||
distribution, then any Derivative Works that You distribute must
|
|
||||||
include a readable copy of the attribution notices contained
|
|
||||||
within such NOTICE file, excluding those notices that do not
|
|
||||||
pertain to any part of the Derivative Works, in at least one
|
|
||||||
of the following places: within a NOTICE text file distributed
|
|
||||||
as part of the Derivative Works; within the Source form or
|
|
||||||
documentation, if provided along with the Derivative Works; or,
|
|
||||||
within a display generated by the Derivative Works, if and
|
|
||||||
wherever such third-party notices normally appear. The contents
|
|
||||||
of the NOTICE file are for informational purposes only and
|
|
||||||
do not modify the License. You may add Your own attribution
|
|
||||||
notices within Derivative Works that You distribute, alongside
|
|
||||||
or as an addendum to the NOTICE text from the Work, provided
|
|
||||||
that such additional attribution notices cannot be construed
|
|
||||||
as modifying the License.
|
|
||||||
|
|
||||||
You may add Your own copyright statement to Your modifications and
|
|
||||||
may provide additional or different license terms and conditions
|
|
||||||
for use, reproduction, or distribution of Your modifications, or
|
|
||||||
for any such Derivative Works as a whole, provided Your use,
|
|
||||||
reproduction, and distribution of the Work otherwise complies with
|
|
||||||
the conditions stated in this License.
|
|
||||||
|
|
||||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
|
||||||
any Contribution intentionally submitted for inclusion in the Work
|
|
||||||
by You to the Licensor shall be under the terms and conditions of
|
|
||||||
this License, without any additional terms or conditions.
|
|
||||||
Notwithstanding the above, nothing herein shall supersede or modify
|
|
||||||
the terms of any separate license agreement you may have executed
|
|
||||||
with Licensor regarding such Contributions.
|
|
||||||
|
|
||||||
6. Trademarks. This License does not grant permission to use the trade
|
|
||||||
names, trademarks, service marks, or product names of the Licensor,
|
|
||||||
except as required for reasonable and customary use in describing the
|
|
||||||
origin of the Work and reproducing the content of the NOTICE file.
|
|
||||||
|
|
||||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
|
||||||
agreed to in writing, Licensor provides the Work (and each
|
|
||||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
|
||||||
implied, including, without limitation, any warranties or conditions
|
|
||||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
|
||||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
|
||||||
appropriateness of using or redistributing the Work and assume any
|
|
||||||
risks associated with Your exercise of permissions under this License.
|
|
||||||
|
|
||||||
8. Limitation of Liability. In no event and under no legal theory,
|
|
||||||
whether in tort (including negligence), contract, or otherwise,
|
|
||||||
unless required by applicable law (such as deliberate and grossly
|
|
||||||
negligent acts) or agreed to in writing, shall any Contributor be
|
|
||||||
liable to You for damages, including any direct, indirect, special,
|
|
||||||
incidental, or consequential damages of any character arising as a
|
|
||||||
result of this License or out of the use or inability to use the
|
|
||||||
Work (including but not limited to damages for loss of goodwill,
|
|
||||||
work stoppage, computer failure or malfunction, or any and all
|
|
||||||
other commercial damages or losses), even if such Contributor
|
|
||||||
has been advised of the possibility of such damages.
|
|
||||||
|
|
||||||
9. Accepting Warranty or Additional Liability. While redistributing
|
|
||||||
the Work or Derivative Works thereof, You may choose to offer,
|
|
||||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
|
||||||
or other liability obligations and/or rights consistent with this
|
|
||||||
License. However, in accepting such obligations, You may act only
|
|
||||||
on Your own behalf and on Your sole responsibility, not on behalf
|
|
||||||
of any other Contributor, and only if You agree to indemnify,
|
|
||||||
defend, and hold each Contributor harmless for any liability
|
|
||||||
incurred by, or claims asserted against, such Contributor by reason
|
|
||||||
of your accepting any such warranty or additional liability.
|
|
||||||
|
|
||||||
END OF TERMS AND CONDITIONS
|
|
||||||
|
|
||||||
APPENDIX: How to apply the Apache License to your work.
|
|
||||||
|
|
||||||
To apply the Apache License to your work, attach the following
|
|
||||||
boilerplate notice, with the fields enclosed by brackets "{}"
|
|
||||||
replaced with your own identifying information. (Don't include
|
|
||||||
the brackets!) The text should be enclosed in the appropriate
|
|
||||||
comment syntax for the file format. We also recommend that a
|
|
||||||
file or class name and description of purpose be included on the
|
|
||||||
same "printed page" as the copyright notice for easier
|
|
||||||
identification within third-party archives.
|
|
||||||
|
|
||||||
Copyright {yyyy} {name of copyright owner}
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
|
|
5
vendor/github.com/coreos/go-oidc/v3/NOTICE
generated
vendored
5
vendor/github.com/coreos/go-oidc/v3/NOTICE
generated
vendored
@ -1,5 +0,0 @@
|
|||||||
CoreOS Project
|
|
||||||
Copyright 2014 CoreOS, Inc
|
|
||||||
|
|
||||||
This product includes software developed at CoreOS, Inc.
|
|
||||||
(http://www.coreos.com/).
|
|
16
vendor/github.com/coreos/go-oidc/v3/oidc/jose.go
generated
vendored
16
vendor/github.com/coreos/go-oidc/v3/oidc/jose.go
generated
vendored
@ -1,16 +0,0 @@
|
|||||||
package oidc
|
|
||||||
|
|
||||||
// JOSE asymmetric signing algorithm values as defined by RFC 7518
|
|
||||||
//
|
|
||||||
// see: https://tools.ietf.org/html/rfc7518#section-3.1
|
|
||||||
const (
|
|
||||||
RS256 = "RS256" // RSASSA-PKCS-v1.5 using SHA-256
|
|
||||||
RS384 = "RS384" // RSASSA-PKCS-v1.5 using SHA-384
|
|
||||||
RS512 = "RS512" // RSASSA-PKCS-v1.5 using SHA-512
|
|
||||||
ES256 = "ES256" // ECDSA using P-256 and SHA-256
|
|
||||||
ES384 = "ES384" // ECDSA using P-384 and SHA-384
|
|
||||||
ES512 = "ES512" // ECDSA using P-521 and SHA-512
|
|
||||||
PS256 = "PS256" // RSASSA-PSS using SHA256 and MGF1-SHA256
|
|
||||||
PS384 = "PS384" // RSASSA-PSS using SHA384 and MGF1-SHA384
|
|
||||||
PS512 = "PS512" // RSASSA-PSS using SHA512 and MGF1-SHA512
|
|
||||||
)
|
|
248
vendor/github.com/coreos/go-oidc/v3/oidc/jwks.go
generated
vendored
248
vendor/github.com/coreos/go-oidc/v3/oidc/jwks.go
generated
vendored
@ -1,248 +0,0 @@
|
|||||||
package oidc
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"crypto"
|
|
||||||
"crypto/ecdsa"
|
|
||||||
"crypto/rsa"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"io/ioutil"
|
|
||||||
"net/http"
|
|
||||||
"sync"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
jose "github.com/go-jose/go-jose/v3"
|
|
||||||
)
|
|
||||||
|
|
||||||
// StaticKeySet is a verifier that validates JWT against a static set of public keys.
|
|
||||||
type StaticKeySet struct {
|
|
||||||
// PublicKeys used to verify the JWT. Supported types are *rsa.PublicKey and
|
|
||||||
// *ecdsa.PublicKey.
|
|
||||||
PublicKeys []crypto.PublicKey
|
|
||||||
}
|
|
||||||
|
|
||||||
// VerifySignature compares the signature against a static set of public keys.
|
|
||||||
func (s *StaticKeySet) VerifySignature(ctx context.Context, jwt string) ([]byte, error) {
|
|
||||||
jws, err := jose.ParseSigned(jwt)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("parsing jwt: %v", err)
|
|
||||||
}
|
|
||||||
for _, pub := range s.PublicKeys {
|
|
||||||
switch pub.(type) {
|
|
||||||
case *rsa.PublicKey:
|
|
||||||
case *ecdsa.PublicKey:
|
|
||||||
default:
|
|
||||||
return nil, fmt.Errorf("invalid public key type provided: %T", pub)
|
|
||||||
}
|
|
||||||
payload, err := jws.Verify(pub)
|
|
||||||
if err != nil {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
return payload, nil
|
|
||||||
}
|
|
||||||
return nil, fmt.Errorf("no public keys able to verify jwt")
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewRemoteKeySet returns a KeySet that can validate JSON web tokens by using HTTP
|
|
||||||
// GETs to fetch JSON web token sets hosted at a remote URL. This is automatically
|
|
||||||
// used by NewProvider using the URLs returned by OpenID Connect discovery, but is
|
|
||||||
// exposed for providers that don't support discovery or to prevent round trips to the
|
|
||||||
// discovery URL.
|
|
||||||
//
|
|
||||||
// The returned KeySet is a long lived verifier that caches keys based on any
|
|
||||||
// keys change. Reuse a common remote key set instead of creating new ones as needed.
|
|
||||||
func NewRemoteKeySet(ctx context.Context, jwksURL string) *RemoteKeySet {
|
|
||||||
return newRemoteKeySet(ctx, jwksURL, time.Now)
|
|
||||||
}
|
|
||||||
|
|
||||||
func newRemoteKeySet(ctx context.Context, jwksURL string, now func() time.Time) *RemoteKeySet {
|
|
||||||
if now == nil {
|
|
||||||
now = time.Now
|
|
||||||
}
|
|
||||||
return &RemoteKeySet{jwksURL: jwksURL, ctx: cloneContext(ctx), now: now}
|
|
||||||
}
|
|
||||||
|
|
||||||
// RemoteKeySet is a KeySet implementation that validates JSON web tokens against
|
|
||||||
// a jwks_uri endpoint.
|
|
||||||
type RemoteKeySet struct {
|
|
||||||
jwksURL string
|
|
||||||
ctx context.Context
|
|
||||||
now func() time.Time
|
|
||||||
|
|
||||||
// guard all other fields
|
|
||||||
mu sync.RWMutex
|
|
||||||
|
|
||||||
// inflight suppresses parallel execution of updateKeys and allows
|
|
||||||
// multiple goroutines to wait for its result.
|
|
||||||
inflight *inflight
|
|
||||||
|
|
||||||
// A set of cached keys.
|
|
||||||
cachedKeys []jose.JSONWebKey
|
|
||||||
}
|
|
||||||
|
|
||||||
// inflight is used to wait on some in-flight request from multiple goroutines.
|
|
||||||
type inflight struct {
|
|
||||||
doneCh chan struct{}
|
|
||||||
|
|
||||||
keys []jose.JSONWebKey
|
|
||||||
err error
|
|
||||||
}
|
|
||||||
|
|
||||||
func newInflight() *inflight {
|
|
||||||
return &inflight{doneCh: make(chan struct{})}
|
|
||||||
}
|
|
||||||
|
|
||||||
// wait returns a channel that multiple goroutines can receive on. Once it returns
|
|
||||||
// a value, the inflight request is done and result() can be inspected.
|
|
||||||
func (i *inflight) wait() <-chan struct{} {
|
|
||||||
return i.doneCh
|
|
||||||
}
|
|
||||||
|
|
||||||
// done can only be called by a single goroutine. It records the result of the
|
|
||||||
// inflight request and signals other goroutines that the result is safe to
|
|
||||||
// inspect.
|
|
||||||
func (i *inflight) done(keys []jose.JSONWebKey, err error) {
|
|
||||||
i.keys = keys
|
|
||||||
i.err = err
|
|
||||||
close(i.doneCh)
|
|
||||||
}
|
|
||||||
|
|
||||||
// result cannot be called until the wait() channel has returned a value.
|
|
||||||
func (i *inflight) result() ([]jose.JSONWebKey, error) {
|
|
||||||
return i.keys, i.err
|
|
||||||
}
|
|
||||||
|
|
||||||
// paresdJWTKey is a context key that allows common setups to avoid parsing the
|
|
||||||
// JWT twice. It holds a *jose.JSONWebSignature value.
|
|
||||||
var parsedJWTKey contextKey
|
|
||||||
|
|
||||||
// VerifySignature validates a payload against a signature from the jwks_uri.
|
|
||||||
//
|
|
||||||
// Users MUST NOT call this method directly and should use an IDTokenVerifier
|
|
||||||
// instead. This method skips critical validations such as 'alg' values and is
|
|
||||||
// only exported to implement the KeySet interface.
|
|
||||||
func (r *RemoteKeySet) VerifySignature(ctx context.Context, jwt string) ([]byte, error) {
|
|
||||||
jws, ok := ctx.Value(parsedJWTKey).(*jose.JSONWebSignature)
|
|
||||||
if !ok {
|
|
||||||
var err error
|
|
||||||
jws, err = jose.ParseSigned(jwt)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("oidc: malformed jwt: %v", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return r.verify(ctx, jws)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *RemoteKeySet) verify(ctx context.Context, jws *jose.JSONWebSignature) ([]byte, error) {
|
|
||||||
// We don't support JWTs signed with multiple signatures.
|
|
||||||
keyID := ""
|
|
||||||
for _, sig := range jws.Signatures {
|
|
||||||
keyID = sig.Header.KeyID
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
keys := r.keysFromCache()
|
|
||||||
for _, key := range keys {
|
|
||||||
if keyID == "" || key.KeyID == keyID {
|
|
||||||
if payload, err := jws.Verify(&key); err == nil {
|
|
||||||
return payload, nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// If the kid doesn't match, check for new keys from the remote. This is the
|
|
||||||
// strategy recommended by the spec.
|
|
||||||
//
|
|
||||||
// https://openid.net/specs/openid-connect-core-1_0.html#RotateSigKeys
|
|
||||||
keys, err := r.keysFromRemote(ctx)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("fetching keys %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, key := range keys {
|
|
||||||
if keyID == "" || key.KeyID == keyID {
|
|
||||||
if payload, err := jws.Verify(&key); err == nil {
|
|
||||||
return payload, nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil, errors.New("failed to verify id token signature")
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *RemoteKeySet) keysFromCache() (keys []jose.JSONWebKey) {
|
|
||||||
r.mu.RLock()
|
|
||||||
defer r.mu.RUnlock()
|
|
||||||
return r.cachedKeys
|
|
||||||
}
|
|
||||||
|
|
||||||
// keysFromRemote syncs the key set from the remote set, records the values in the
|
|
||||||
// cache, and returns the key set.
|
|
||||||
func (r *RemoteKeySet) keysFromRemote(ctx context.Context) ([]jose.JSONWebKey, error) {
|
|
||||||
// Need to lock to inspect the inflight request field.
|
|
||||||
r.mu.Lock()
|
|
||||||
// If there's not a current inflight request, create one.
|
|
||||||
if r.inflight == nil {
|
|
||||||
r.inflight = newInflight()
|
|
||||||
|
|
||||||
// This goroutine has exclusive ownership over the current inflight
|
|
||||||
// request. It releases the resource by nil'ing the inflight field
|
|
||||||
// once the goroutine is done.
|
|
||||||
go func() {
|
|
||||||
// Sync keys and finish inflight when that's done.
|
|
||||||
keys, err := r.updateKeys()
|
|
||||||
|
|
||||||
r.inflight.done(keys, err)
|
|
||||||
|
|
||||||
// Lock to update the keys and indicate that there is no longer an
|
|
||||||
// inflight request.
|
|
||||||
r.mu.Lock()
|
|
||||||
defer r.mu.Unlock()
|
|
||||||
|
|
||||||
if err == nil {
|
|
||||||
r.cachedKeys = keys
|
|
||||||
}
|
|
||||||
|
|
||||||
// Free inflight so a different request can run.
|
|
||||||
r.inflight = nil
|
|
||||||
}()
|
|
||||||
}
|
|
||||||
inflight := r.inflight
|
|
||||||
r.mu.Unlock()
|
|
||||||
|
|
||||||
select {
|
|
||||||
case <-ctx.Done():
|
|
||||||
return nil, ctx.Err()
|
|
||||||
case <-inflight.wait():
|
|
||||||
return inflight.result()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *RemoteKeySet) updateKeys() ([]jose.JSONWebKey, error) {
|
|
||||||
req, err := http.NewRequest("GET", r.jwksURL, nil)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("oidc: can't create request: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
resp, err := doRequest(r.ctx, req)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("oidc: get keys failed %v", err)
|
|
||||||
}
|
|
||||||
defer resp.Body.Close()
|
|
||||||
|
|
||||||
body, err := ioutil.ReadAll(resp.Body)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("unable to read response body: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if resp.StatusCode != http.StatusOK {
|
|
||||||
return nil, fmt.Errorf("oidc: get keys failed: %s %s", resp.Status, body)
|
|
||||||
}
|
|
||||||
|
|
||||||
var keySet jose.JSONWebKeySet
|
|
||||||
err = unmarshalResp(resp, body, &keySet)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("oidc: failed to decode keys: %v %s", err, body)
|
|
||||||
}
|
|
||||||
return keySet.Keys, nil
|
|
||||||
}
|
|
522
vendor/github.com/coreos/go-oidc/v3/oidc/oidc.go
generated
vendored
522
vendor/github.com/coreos/go-oidc/v3/oidc/oidc.go
generated
vendored
@ -1,522 +0,0 @@
|
|||||||
// Package oidc implements OpenID Connect client logic for the golang.org/x/oauth2 package.
|
|
||||||
package oidc
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"crypto/sha256"
|
|
||||||
"crypto/sha512"
|
|
||||||
"encoding/base64"
|
|
||||||
"encoding/json"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"hash"
|
|
||||||
"io/ioutil"
|
|
||||||
"mime"
|
|
||||||
"net/http"
|
|
||||||
"strings"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"golang.org/x/oauth2"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
// ScopeOpenID is the mandatory scope for all OpenID Connect OAuth2 requests.
|
|
||||||
ScopeOpenID = "openid"
|
|
||||||
|
|
||||||
// ScopeOfflineAccess is an optional scope defined by OpenID Connect for requesting
|
|
||||||
// OAuth2 refresh tokens.
|
|
||||||
//
|
|
||||||
// Support for this scope differs between OpenID Connect providers. For instance
|
|
||||||
// Google rejects it, favoring appending "access_type=offline" as part of the
|
|
||||||
// authorization request instead.
|
|
||||||
//
|
|
||||||
// See: https://openid.net/specs/openid-connect-core-1_0.html#OfflineAccess
|
|
||||||
ScopeOfflineAccess = "offline_access"
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
errNoAtHash = errors.New("id token did not have an access token hash")
|
|
||||||
errInvalidAtHash = errors.New("access token hash does not match value in ID token")
|
|
||||||
)
|
|
||||||
|
|
||||||
type contextKey int
|
|
||||||
|
|
||||||
var issuerURLKey contextKey
|
|
||||||
|
|
||||||
// ClientContext returns a new Context that carries the provided HTTP client.
|
|
||||||
//
|
|
||||||
// This method sets the same context key used by the golang.org/x/oauth2 package,
|
|
||||||
// so the returned context works for that package too.
|
|
||||||
//
|
|
||||||
// myClient := &http.Client{}
|
|
||||||
// ctx := oidc.ClientContext(parentContext, myClient)
|
|
||||||
//
|
|
||||||
// // This will use the custom client
|
|
||||||
// provider, err := oidc.NewProvider(ctx, "https://accounts.example.com")
|
|
||||||
//
|
|
||||||
func ClientContext(ctx context.Context, client *http.Client) context.Context {
|
|
||||||
return context.WithValue(ctx, oauth2.HTTPClient, client)
|
|
||||||
}
|
|
||||||
|
|
||||||
// cloneContext copies a context's bag-of-values into a new context that isn't
|
|
||||||
// associated with its cancellation. This is used to initialize remote keys sets
|
|
||||||
// which run in the background and aren't associated with the initial context.
|
|
||||||
func cloneContext(ctx context.Context) context.Context {
|
|
||||||
cp := context.Background()
|
|
||||||
if c, ok := ctx.Value(oauth2.HTTPClient).(*http.Client); ok {
|
|
||||||
cp = ClientContext(cp, c)
|
|
||||||
}
|
|
||||||
return cp
|
|
||||||
}
|
|
||||||
|
|
||||||
// InsecureIssuerURLContext allows discovery to work when the issuer_url reported
|
|
||||||
// by upstream is mismatched with the discovery URL. This is meant for integration
|
|
||||||
// with off-spec providers such as Azure.
|
|
||||||
//
|
|
||||||
// discoveryBaseURL := "https://login.microsoftonline.com/organizations/v2.0"
|
|
||||||
// issuerURL := "https://login.microsoftonline.com/my-tenantid/v2.0"
|
|
||||||
//
|
|
||||||
// ctx := oidc.InsecureIssuerURLContext(parentContext, issuerURL)
|
|
||||||
//
|
|
||||||
// // Provider will be discovered with the discoveryBaseURL, but use issuerURL
|
|
||||||
// // for future issuer validation.
|
|
||||||
// provider, err := oidc.NewProvider(ctx, discoveryBaseURL)
|
|
||||||
//
|
|
||||||
// This is insecure because validating the correct issuer is critical for multi-tenant
|
|
||||||
// proivders. Any overrides here MUST be carefully reviewed.
|
|
||||||
func InsecureIssuerURLContext(ctx context.Context, issuerURL string) context.Context {
|
|
||||||
return context.WithValue(ctx, issuerURLKey, issuerURL)
|
|
||||||
}
|
|
||||||
|
|
||||||
func doRequest(ctx context.Context, req *http.Request) (*http.Response, error) {
|
|
||||||
client := http.DefaultClient
|
|
||||||
if c, ok := ctx.Value(oauth2.HTTPClient).(*http.Client); ok {
|
|
||||||
client = c
|
|
||||||
}
|
|
||||||
return client.Do(req.WithContext(ctx))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Provider represents an OpenID Connect server's configuration.
|
|
||||||
type Provider struct {
|
|
||||||
issuer string
|
|
||||||
authURL string
|
|
||||||
tokenURL string
|
|
||||||
userInfoURL string
|
|
||||||
algorithms []string
|
|
||||||
|
|
||||||
// Raw claims returned by the server.
|
|
||||||
rawClaims []byte
|
|
||||||
|
|
||||||
remoteKeySet KeySet
|
|
||||||
}
|
|
||||||
|
|
||||||
type providerJSON struct {
|
|
||||||
Issuer string `json:"issuer"`
|
|
||||||
AuthURL string `json:"authorization_endpoint"`
|
|
||||||
TokenURL string `json:"token_endpoint"`
|
|
||||||
JWKSURL string `json:"jwks_uri"`
|
|
||||||
UserInfoURL string `json:"userinfo_endpoint"`
|
|
||||||
Algorithms []string `json:"id_token_signing_alg_values_supported"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// supportedAlgorithms is a list of algorithms explicitly supported by this
|
|
||||||
// package. If a provider supports other algorithms, such as HS256 or none,
|
|
||||||
// those values won't be passed to the IDTokenVerifier.
|
|
||||||
var supportedAlgorithms = map[string]bool{
|
|
||||||
RS256: true,
|
|
||||||
RS384: true,
|
|
||||||
RS512: true,
|
|
||||||
ES256: true,
|
|
||||||
ES384: true,
|
|
||||||
ES512: true,
|
|
||||||
PS256: true,
|
|
||||||
PS384: true,
|
|
||||||
PS512: true,
|
|
||||||
}
|
|
||||||
|
|
||||||
// ProviderConfig allows creating providers when discovery isn't supported. It's
|
|
||||||
// generally easier to use NewProvider directly.
|
|
||||||
type ProviderConfig struct {
|
|
||||||
// IssuerURL is the identity of the provider, and the string it uses to sign
|
|
||||||
// ID tokens with. For example "https://accounts.google.com". This value MUST
|
|
||||||
// match ID tokens exactly.
|
|
||||||
IssuerURL string
|
|
||||||
// AuthURL is the endpoint used by the provider to support the OAuth 2.0
|
|
||||||
// authorization endpoint.
|
|
||||||
AuthURL string
|
|
||||||
// TokenURL is the endpoint used by the provider to support the OAuth 2.0
|
|
||||||
// token endpoint.
|
|
||||||
TokenURL string
|
|
||||||
// UserInfoURL is the endpoint used by the provider to support the OpenID
|
|
||||||
// Connect UserInfo flow.
|
|
||||||
//
|
|
||||||
// https://openid.net/specs/openid-connect-core-1_0.html#UserInfo
|
|
||||||
UserInfoURL string
|
|
||||||
// JWKSURL is the endpoint used by the provider to advertise public keys to
|
|
||||||
// verify issued ID tokens. This endpoint is polled as new keys are made
|
|
||||||
// available.
|
|
||||||
JWKSURL string
|
|
||||||
|
|
||||||
// Algorithms, if provided, indicate a list of JWT algorithms allowed to sign
|
|
||||||
// ID tokens. If not provided, this defaults to the algorithms advertised by
|
|
||||||
// the JWK endpoint, then the set of algorithms supported by this package.
|
|
||||||
Algorithms []string
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewProvider initializes a provider from a set of endpoints, rather than
|
|
||||||
// through discovery.
|
|
||||||
func (p *ProviderConfig) NewProvider(ctx context.Context) *Provider {
|
|
||||||
return &Provider{
|
|
||||||
issuer: p.IssuerURL,
|
|
||||||
authURL: p.AuthURL,
|
|
||||||
tokenURL: p.TokenURL,
|
|
||||||
userInfoURL: p.UserInfoURL,
|
|
||||||
algorithms: p.Algorithms,
|
|
||||||
remoteKeySet: NewRemoteKeySet(cloneContext(ctx), p.JWKSURL),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewProvider uses the OpenID Connect discovery mechanism to construct a Provider.
|
|
||||||
//
|
|
||||||
// The issuer is the URL identifier for the service. For example: "https://accounts.google.com"
|
|
||||||
// or "https://login.salesforce.com".
|
|
||||||
func NewProvider(ctx context.Context, issuer string) (*Provider, error) {
|
|
||||||
wellKnown := strings.TrimSuffix(issuer, "/") + "/.well-known/openid-configuration"
|
|
||||||
req, err := http.NewRequest("GET", wellKnown, nil)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
resp, err := doRequest(ctx, req)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
defer resp.Body.Close()
|
|
||||||
|
|
||||||
body, err := ioutil.ReadAll(resp.Body)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("unable to read response body: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if resp.StatusCode != http.StatusOK {
|
|
||||||
return nil, fmt.Errorf("%s: %s", resp.Status, body)
|
|
||||||
}
|
|
||||||
|
|
||||||
var p providerJSON
|
|
||||||
err = unmarshalResp(resp, body, &p)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("oidc: failed to decode provider discovery object: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
issuerURL, skipIssuerValidation := ctx.Value(issuerURLKey).(string)
|
|
||||||
if !skipIssuerValidation {
|
|
||||||
issuerURL = issuer
|
|
||||||
}
|
|
||||||
if p.Issuer != issuerURL && !skipIssuerValidation {
|
|
||||||
return nil, fmt.Errorf("oidc: issuer did not match the issuer returned by provider, expected %q got %q", issuer, p.Issuer)
|
|
||||||
}
|
|
||||||
var algs []string
|
|
||||||
for _, a := range p.Algorithms {
|
|
||||||
if supportedAlgorithms[a] {
|
|
||||||
algs = append(algs, a)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return &Provider{
|
|
||||||
issuer: issuerURL,
|
|
||||||
authURL: p.AuthURL,
|
|
||||||
tokenURL: p.TokenURL,
|
|
||||||
userInfoURL: p.UserInfoURL,
|
|
||||||
algorithms: algs,
|
|
||||||
rawClaims: body,
|
|
||||||
remoteKeySet: NewRemoteKeySet(cloneContext(ctx), p.JWKSURL),
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Claims unmarshals raw fields returned by the server during discovery.
|
|
||||||
//
|
|
||||||
// var claims struct {
|
|
||||||
// ScopesSupported []string `json:"scopes_supported"`
|
|
||||||
// ClaimsSupported []string `json:"claims_supported"`
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
// if err := provider.Claims(&claims); err != nil {
|
|
||||||
// // handle unmarshaling error
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
// For a list of fields defined by the OpenID Connect spec see:
|
|
||||||
// https://openid.net/specs/openid-connect-discovery-1_0.html#ProviderMetadata
|
|
||||||
func (p *Provider) Claims(v interface{}) error {
|
|
||||||
if p.rawClaims == nil {
|
|
||||||
return errors.New("oidc: claims not set")
|
|
||||||
}
|
|
||||||
return json.Unmarshal(p.rawClaims, v)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Endpoint returns the OAuth2 auth and token endpoints for the given provider.
|
|
||||||
func (p *Provider) Endpoint() oauth2.Endpoint {
|
|
||||||
return oauth2.Endpoint{AuthURL: p.authURL, TokenURL: p.tokenURL}
|
|
||||||
}
|
|
||||||
|
|
||||||
// UserInfo represents the OpenID Connect userinfo claims.
|
|
||||||
type UserInfo struct {
|
|
||||||
Subject string `json:"sub"`
|
|
||||||
Profile string `json:"profile"`
|
|
||||||
Email string `json:"email"`
|
|
||||||
EmailVerified bool `json:"email_verified"`
|
|
||||||
|
|
||||||
claims []byte
|
|
||||||
}
|
|
||||||
|
|
||||||
type userInfoRaw struct {
|
|
||||||
Subject string `json:"sub"`
|
|
||||||
Profile string `json:"profile"`
|
|
||||||
Email string `json:"email"`
|
|
||||||
// Handle providers that return email_verified as a string
|
|
||||||
// https://forums.aws.amazon.com/thread.jspa?messageID=949441󧳁 and
|
|
||||||
// https://discuss.elastic.co/t/openid-error-after-authenticating-against-aws-cognito/206018/11
|
|
||||||
EmailVerified stringAsBool `json:"email_verified"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// Claims unmarshals the raw JSON object claims into the provided object.
|
|
||||||
func (u *UserInfo) Claims(v interface{}) error {
|
|
||||||
if u.claims == nil {
|
|
||||||
return errors.New("oidc: claims not set")
|
|
||||||
}
|
|
||||||
return json.Unmarshal(u.claims, v)
|
|
||||||
}
|
|
||||||
|
|
||||||
// UserInfo uses the token source to query the provider's user info endpoint.
|
|
||||||
func (p *Provider) UserInfo(ctx context.Context, tokenSource oauth2.TokenSource) (*UserInfo, error) {
|
|
||||||
if p.userInfoURL == "" {
|
|
||||||
return nil, errors.New("oidc: user info endpoint is not supported by this provider")
|
|
||||||
}
|
|
||||||
|
|
||||||
req, err := http.NewRequest("GET", p.userInfoURL, nil)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("oidc: create GET request: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
token, err := tokenSource.Token()
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("oidc: get access token: %v", err)
|
|
||||||
}
|
|
||||||
token.SetAuthHeader(req)
|
|
||||||
|
|
||||||
resp, err := doRequest(ctx, req)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
defer resp.Body.Close()
|
|
||||||
body, err := ioutil.ReadAll(resp.Body)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if resp.StatusCode != http.StatusOK {
|
|
||||||
return nil, fmt.Errorf("%s: %s", resp.Status, body)
|
|
||||||
}
|
|
||||||
|
|
||||||
ct := resp.Header.Get("Content-Type")
|
|
||||||
mediaType, _, parseErr := mime.ParseMediaType(ct)
|
|
||||||
if parseErr == nil && mediaType == "application/jwt" {
|
|
||||||
payload, err := p.remoteKeySet.VerifySignature(ctx, string(body))
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("oidc: invalid userinfo jwt signature %v", err)
|
|
||||||
}
|
|
||||||
body = payload
|
|
||||||
}
|
|
||||||
|
|
||||||
var userInfo userInfoRaw
|
|
||||||
if err := json.Unmarshal(body, &userInfo); err != nil {
|
|
||||||
return nil, fmt.Errorf("oidc: failed to decode userinfo: %v", err)
|
|
||||||
}
|
|
||||||
return &UserInfo{
|
|
||||||
Subject: userInfo.Subject,
|
|
||||||
Profile: userInfo.Profile,
|
|
||||||
Email: userInfo.Email,
|
|
||||||
EmailVerified: bool(userInfo.EmailVerified),
|
|
||||||
claims: body,
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// IDToken is an OpenID Connect extension that provides a predictable representation
|
|
||||||
// of an authorization event.
|
|
||||||
//
|
|
||||||
// The ID Token only holds fields OpenID Connect requires. To access additional
|
|
||||||
// claims returned by the server, use the Claims method.
|
|
||||||
type IDToken struct {
|
|
||||||
// The URL of the server which issued this token. OpenID Connect
|
|
||||||
// requires this value always be identical to the URL used for
|
|
||||||
// initial discovery.
|
|
||||||
//
|
|
||||||
// Note: Because of a known issue with Google Accounts' implementation
|
|
||||||
// this value may differ when using Google.
|
|
||||||
//
|
|
||||||
// See: https://developers.google.com/identity/protocols/OpenIDConnect#obtainuserinfo
|
|
||||||
Issuer string
|
|
||||||
|
|
||||||
// The client ID, or set of client IDs, that this token is issued for. For
|
|
||||||
// common uses, this is the client that initialized the auth flow.
|
|
||||||
//
|
|
||||||
// This package ensures the audience contains an expected value.
|
|
||||||
Audience []string
|
|
||||||
|
|
||||||
// A unique string which identifies the end user.
|
|
||||||
Subject string
|
|
||||||
|
|
||||||
// Expiry of the token. Ths package will not process tokens that have
|
|
||||||
// expired unless that validation is explicitly turned off.
|
|
||||||
Expiry time.Time
|
|
||||||
// When the token was issued by the provider.
|
|
||||||
IssuedAt time.Time
|
|
||||||
|
|
||||||
// Initial nonce provided during the authentication redirect.
|
|
||||||
//
|
|
||||||
// This package does NOT provided verification on the value of this field
|
|
||||||
// and it's the user's responsibility to ensure it contains a valid value.
|
|
||||||
Nonce string
|
|
||||||
|
|
||||||
// at_hash claim, if set in the ID token. Callers can verify an access token
|
|
||||||
// that corresponds to the ID token using the VerifyAccessToken method.
|
|
||||||
AccessTokenHash string
|
|
||||||
|
|
||||||
// signature algorithm used for ID token, needed to compute a verification hash of an
|
|
||||||
// access token
|
|
||||||
sigAlgorithm string
|
|
||||||
|
|
||||||
// Raw payload of the id_token.
|
|
||||||
claims []byte
|
|
||||||
|
|
||||||
// Map of distributed claim names to claim sources
|
|
||||||
distributedClaims map[string]claimSource
|
|
||||||
}
|
|
||||||
|
|
||||||
// Claims unmarshals the raw JSON payload of the ID Token into a provided struct.
|
|
||||||
//
|
|
||||||
// idToken, err := idTokenVerifier.Verify(rawIDToken)
|
|
||||||
// if err != nil {
|
|
||||||
// // handle error
|
|
||||||
// }
|
|
||||||
// var claims struct {
|
|
||||||
// Email string `json:"email"`
|
|
||||||
// EmailVerified bool `json:"email_verified"`
|
|
||||||
// }
|
|
||||||
// if err := idToken.Claims(&claims); err != nil {
|
|
||||||
// // handle error
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
func (i *IDToken) Claims(v interface{}) error {
|
|
||||||
if i.claims == nil {
|
|
||||||
return errors.New("oidc: claims not set")
|
|
||||||
}
|
|
||||||
return json.Unmarshal(i.claims, v)
|
|
||||||
}
|
|
||||||
|
|
||||||
// VerifyAccessToken verifies that the hash of the access token that corresponds to the iD token
|
|
||||||
// matches the hash in the id token. It returns an error if the hashes don't match.
|
|
||||||
// It is the caller's responsibility to ensure that the optional access token hash is present for the ID token
|
|
||||||
// before calling this method. See https://openid.net/specs/openid-connect-core-1_0.html#CodeIDToken
|
|
||||||
func (i *IDToken) VerifyAccessToken(accessToken string) error {
|
|
||||||
if i.AccessTokenHash == "" {
|
|
||||||
return errNoAtHash
|
|
||||||
}
|
|
||||||
var h hash.Hash
|
|
||||||
switch i.sigAlgorithm {
|
|
||||||
case RS256, ES256, PS256:
|
|
||||||
h = sha256.New()
|
|
||||||
case RS384, ES384, PS384:
|
|
||||||
h = sha512.New384()
|
|
||||||
case RS512, ES512, PS512:
|
|
||||||
h = sha512.New()
|
|
||||||
default:
|
|
||||||
return fmt.Errorf("oidc: unsupported signing algorithm %q", i.sigAlgorithm)
|
|
||||||
}
|
|
||||||
h.Write([]byte(accessToken)) // hash documents that Write will never return an error
|
|
||||||
sum := h.Sum(nil)[:h.Size()/2]
|
|
||||||
actual := base64.RawURLEncoding.EncodeToString(sum)
|
|
||||||
if actual != i.AccessTokenHash {
|
|
||||||
return errInvalidAtHash
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
type idToken struct {
|
|
||||||
Issuer string `json:"iss"`
|
|
||||||
Subject string `json:"sub"`
|
|
||||||
Audience audience `json:"aud"`
|
|
||||||
Expiry jsonTime `json:"exp"`
|
|
||||||
IssuedAt jsonTime `json:"iat"`
|
|
||||||
NotBefore *jsonTime `json:"nbf"`
|
|
||||||
Nonce string `json:"nonce"`
|
|
||||||
AtHash string `json:"at_hash"`
|
|
||||||
ClaimNames map[string]string `json:"_claim_names"`
|
|
||||||
ClaimSources map[string]claimSource `json:"_claim_sources"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type claimSource struct {
|
|
||||||
Endpoint string `json:"endpoint"`
|
|
||||||
AccessToken string `json:"access_token"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type stringAsBool bool
|
|
||||||
|
|
||||||
func (sb *stringAsBool) UnmarshalJSON(b []byte) error {
|
|
||||||
switch string(b) {
|
|
||||||
case "true", `"true"`:
|
|
||||||
*sb = true
|
|
||||||
case "false", `"false"`:
|
|
||||||
*sb = false
|
|
||||||
default:
|
|
||||||
return errors.New("invalid value for boolean")
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
type audience []string
|
|
||||||
|
|
||||||
func (a *audience) UnmarshalJSON(b []byte) error {
|
|
||||||
var s string
|
|
||||||
if json.Unmarshal(b, &s) == nil {
|
|
||||||
*a = audience{s}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
var auds []string
|
|
||||||
if err := json.Unmarshal(b, &auds); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
*a = auds
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
type jsonTime time.Time
|
|
||||||
|
|
||||||
func (j *jsonTime) UnmarshalJSON(b []byte) error {
|
|
||||||
var n json.Number
|
|
||||||
if err := json.Unmarshal(b, &n); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
var unix int64
|
|
||||||
|
|
||||||
if t, err := n.Int64(); err == nil {
|
|
||||||
unix = t
|
|
||||||
} else {
|
|
||||||
f, err := n.Float64()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
unix = int64(f)
|
|
||||||
}
|
|
||||||
*j = jsonTime(time.Unix(unix, 0))
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func unmarshalResp(r *http.Response, body []byte, v interface{}) error {
|
|
||||||
err := json.Unmarshal(body, &v)
|
|
||||||
if err == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
ct := r.Header.Get("Content-Type")
|
|
||||||
mediaType, _, parseErr := mime.ParseMediaType(ct)
|
|
||||||
if parseErr == nil && mediaType == "application/json" {
|
|
||||||
return fmt.Errorf("got Content-Type = application/json, but could not unmarshal as JSON: %v", err)
|
|
||||||
}
|
|
||||||
return fmt.Errorf("expected Content-Type = application/json, got %q: %v", ct, err)
|
|
||||||
}
|
|
344
vendor/github.com/coreos/go-oidc/v3/oidc/verify.go
generated
vendored
344
vendor/github.com/coreos/go-oidc/v3/oidc/verify.go
generated
vendored
@ -1,344 +0,0 @@
|
|||||||
package oidc
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"context"
|
|
||||||
"encoding/base64"
|
|
||||||
"encoding/json"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"io/ioutil"
|
|
||||||
"net/http"
|
|
||||||
"strings"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
jose "github.com/go-jose/go-jose/v3"
|
|
||||||
"golang.org/x/oauth2"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
issuerGoogleAccounts = "https://accounts.google.com"
|
|
||||||
issuerGoogleAccountsNoScheme = "accounts.google.com"
|
|
||||||
)
|
|
||||||
|
|
||||||
// TokenExpiredError indicates that Verify failed because the token was expired. This
|
|
||||||
// error does NOT indicate that the token is not also invalid for other reasons. Other
|
|
||||||
// checks might have failed if the expiration check had not failed.
|
|
||||||
type TokenExpiredError struct {
|
|
||||||
// Expiry is the time when the token expired.
|
|
||||||
Expiry time.Time
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *TokenExpiredError) Error() string {
|
|
||||||
return fmt.Sprintf("oidc: token is expired (Token Expiry: %v)", e.Expiry)
|
|
||||||
}
|
|
||||||
|
|
||||||
// KeySet is a set of publc JSON Web Keys that can be used to validate the signature
|
|
||||||
// of JSON web tokens. This is expected to be backed by a remote key set through
|
|
||||||
// provider metadata discovery or an in-memory set of keys delivered out-of-band.
|
|
||||||
type KeySet interface {
|
|
||||||
// VerifySignature parses the JSON web token, verifies the signature, and returns
|
|
||||||
// the raw payload. Header and claim fields are validated by other parts of the
|
|
||||||
// package. For example, the KeySet does not need to check values such as signature
|
|
||||||
// algorithm, issuer, and audience since the IDTokenVerifier validates these values
|
|
||||||
// independently.
|
|
||||||
//
|
|
||||||
// If VerifySignature makes HTTP requests to verify the token, it's expected to
|
|
||||||
// use any HTTP client associated with the context through ClientContext.
|
|
||||||
VerifySignature(ctx context.Context, jwt string) (payload []byte, err error)
|
|
||||||
}
|
|
||||||
|
|
||||||
// IDTokenVerifier provides verification for ID Tokens.
|
|
||||||
type IDTokenVerifier struct {
|
|
||||||
keySet KeySet
|
|
||||||
config *Config
|
|
||||||
issuer string
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewVerifier returns a verifier manually constructed from a key set and issuer URL.
|
|
||||||
//
|
|
||||||
// It's easier to use provider discovery to construct an IDTokenVerifier than creating
|
|
||||||
// one directly. This method is intended to be used with provider that don't support
|
|
||||||
// metadata discovery, or avoiding round trips when the key set URL is already known.
|
|
||||||
//
|
|
||||||
// This constructor can be used to create a verifier directly using the issuer URL and
|
|
||||||
// JSON Web Key Set URL without using discovery:
|
|
||||||
//
|
|
||||||
// keySet := oidc.NewRemoteKeySet(ctx, "https://www.googleapis.com/oauth2/v3/certs")
|
|
||||||
// verifier := oidc.NewVerifier("https://accounts.google.com", keySet, config)
|
|
||||||
//
|
|
||||||
// Or a static key set (e.g. for testing):
|
|
||||||
//
|
|
||||||
// keySet := &oidc.StaticKeySet{PublicKeys: []crypto.PublicKey{pub1, pub2}}
|
|
||||||
// verifier := oidc.NewVerifier("https://accounts.google.com", keySet, config)
|
|
||||||
//
|
|
||||||
func NewVerifier(issuerURL string, keySet KeySet, config *Config) *IDTokenVerifier {
|
|
||||||
return &IDTokenVerifier{keySet: keySet, config: config, issuer: issuerURL}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Config is the configuration for an IDTokenVerifier.
|
|
||||||
type Config struct {
|
|
||||||
// Expected audience of the token. For a majority of the cases this is expected to be
|
|
||||||
// the ID of the client that initialized the login flow. It may occasionally differ if
|
|
||||||
// the provider supports the authorizing party (azp) claim.
|
|
||||||
//
|
|
||||||
// If not provided, users must explicitly set SkipClientIDCheck.
|
|
||||||
ClientID string
|
|
||||||
// If specified, only this set of algorithms may be used to sign the JWT.
|
|
||||||
//
|
|
||||||
// If the IDTokenVerifier is created from a provider with (*Provider).Verifier, this
|
|
||||||
// defaults to the set of algorithms the provider supports. Otherwise this values
|
|
||||||
// defaults to RS256.
|
|
||||||
SupportedSigningAlgs []string
|
|
||||||
|
|
||||||
// If true, no ClientID check performed. Must be true if ClientID field is empty.
|
|
||||||
SkipClientIDCheck bool
|
|
||||||
// If true, token expiry is not checked.
|
|
||||||
SkipExpiryCheck bool
|
|
||||||
|
|
||||||
// SkipIssuerCheck is intended for specialized cases where the the caller wishes to
|
|
||||||
// defer issuer validation. When enabled, callers MUST independently verify the Token's
|
|
||||||
// Issuer is a known good value.
|
|
||||||
//
|
|
||||||
// Mismatched issuers often indicate client mis-configuration. If mismatches are
|
|
||||||
// unexpected, evaluate if the provided issuer URL is incorrect instead of enabling
|
|
||||||
// this option.
|
|
||||||
SkipIssuerCheck bool
|
|
||||||
|
|
||||||
// Time function to check Token expiry. Defaults to time.Now
|
|
||||||
Now func() time.Time
|
|
||||||
|
|
||||||
// InsecureSkipSignatureCheck causes this package to skip JWT signature validation.
|
|
||||||
// It's intended for special cases where providers (such as Azure), use the "none"
|
|
||||||
// algorithm.
|
|
||||||
//
|
|
||||||
// This option can only be enabled safely when the ID Token is received directly
|
|
||||||
// from the provider after the token exchange.
|
|
||||||
//
|
|
||||||
// This option MUST NOT be used when receiving an ID Token from sources other
|
|
||||||
// than the token endpoint.
|
|
||||||
InsecureSkipSignatureCheck bool
|
|
||||||
}
|
|
||||||
|
|
||||||
// Verifier returns an IDTokenVerifier that uses the provider's key set to verify JWTs.
|
|
||||||
func (p *Provider) Verifier(config *Config) *IDTokenVerifier {
|
|
||||||
if len(config.SupportedSigningAlgs) == 0 && len(p.algorithms) > 0 {
|
|
||||||
// Make a copy so we don't modify the config values.
|
|
||||||
cp := &Config{}
|
|
||||||
*cp = *config
|
|
||||||
cp.SupportedSigningAlgs = p.algorithms
|
|
||||||
config = cp
|
|
||||||
}
|
|
||||||
return NewVerifier(p.issuer, p.remoteKeySet, config)
|
|
||||||
}
|
|
||||||
|
|
||||||
func parseJWT(p string) ([]byte, error) {
|
|
||||||
parts := strings.Split(p, ".")
|
|
||||||
if len(parts) < 2 {
|
|
||||||
return nil, fmt.Errorf("oidc: malformed jwt, expected 3 parts got %d", len(parts))
|
|
||||||
}
|
|
||||||
payload, err := base64.RawURLEncoding.DecodeString(parts[1])
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("oidc: malformed jwt payload: %v", err)
|
|
||||||
}
|
|
||||||
return payload, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func contains(sli []string, ele string) bool {
|
|
||||||
for _, s := range sli {
|
|
||||||
if s == ele {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// Returns the Claims from the distributed JWT token
|
|
||||||
func resolveDistributedClaim(ctx context.Context, verifier *IDTokenVerifier, src claimSource) ([]byte, error) {
|
|
||||||
req, err := http.NewRequest("GET", src.Endpoint, nil)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("malformed request: %v", err)
|
|
||||||
}
|
|
||||||
if src.AccessToken != "" {
|
|
||||||
req.Header.Set("Authorization", "Bearer "+src.AccessToken)
|
|
||||||
}
|
|
||||||
|
|
||||||
resp, err := doRequest(ctx, req)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("oidc: Request to endpoint failed: %v", err)
|
|
||||||
}
|
|
||||||
defer resp.Body.Close()
|
|
||||||
|
|
||||||
body, err := ioutil.ReadAll(resp.Body)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("unable to read response body: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if resp.StatusCode != http.StatusOK {
|
|
||||||
return nil, fmt.Errorf("oidc: request failed: %v", resp.StatusCode)
|
|
||||||
}
|
|
||||||
|
|
||||||
token, err := verifier.Verify(ctx, string(body))
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("malformed response body: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return token.claims, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Verify parses a raw ID Token, verifies it's been signed by the provider, performs
|
|
||||||
// any additional checks depending on the Config, and returns the payload.
|
|
||||||
//
|
|
||||||
// Verify does NOT do nonce validation, which is the callers responsibility.
|
|
||||||
//
|
|
||||||
// See: https://openid.net/specs/openid-connect-core-1_0.html#IDTokenValidation
|
|
||||||
//
|
|
||||||
// oauth2Token, err := oauth2Config.Exchange(ctx, r.URL.Query().Get("code"))
|
|
||||||
// if err != nil {
|
|
||||||
// // handle error
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
// // Extract the ID Token from oauth2 token.
|
|
||||||
// rawIDToken, ok := oauth2Token.Extra("id_token").(string)
|
|
||||||
// if !ok {
|
|
||||||
// // handle error
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
// token, err := verifier.Verify(ctx, rawIDToken)
|
|
||||||
//
|
|
||||||
func (v *IDTokenVerifier) Verify(ctx context.Context, rawIDToken string) (*IDToken, error) {
|
|
||||||
// Throw out tokens with invalid claims before trying to verify the token. This lets
|
|
||||||
// us do cheap checks before possibly re-syncing keys.
|
|
||||||
payload, err := parseJWT(rawIDToken)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("oidc: malformed jwt: %v", err)
|
|
||||||
}
|
|
||||||
var token idToken
|
|
||||||
if err := json.Unmarshal(payload, &token); err != nil {
|
|
||||||
return nil, fmt.Errorf("oidc: failed to unmarshal claims: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
distributedClaims := make(map[string]claimSource)
|
|
||||||
|
|
||||||
//step through the token to map claim names to claim sources"
|
|
||||||
for cn, src := range token.ClaimNames {
|
|
||||||
if src == "" {
|
|
||||||
return nil, fmt.Errorf("oidc: failed to obtain source from claim name")
|
|
||||||
}
|
|
||||||
s, ok := token.ClaimSources[src]
|
|
||||||
if !ok {
|
|
||||||
return nil, fmt.Errorf("oidc: source does not exist")
|
|
||||||
}
|
|
||||||
distributedClaims[cn] = s
|
|
||||||
}
|
|
||||||
|
|
||||||
t := &IDToken{
|
|
||||||
Issuer: token.Issuer,
|
|
||||||
Subject: token.Subject,
|
|
||||||
Audience: []string(token.Audience),
|
|
||||||
Expiry: time.Time(token.Expiry),
|
|
||||||
IssuedAt: time.Time(token.IssuedAt),
|
|
||||||
Nonce: token.Nonce,
|
|
||||||
AccessTokenHash: token.AtHash,
|
|
||||||
claims: payload,
|
|
||||||
distributedClaims: distributedClaims,
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check issuer.
|
|
||||||
if !v.config.SkipIssuerCheck && t.Issuer != v.issuer {
|
|
||||||
// Google sometimes returns "accounts.google.com" as the issuer claim instead of
|
|
||||||
// the required "https://accounts.google.com". Detect this case and allow it only
|
|
||||||
// for Google.
|
|
||||||
//
|
|
||||||
// We will not add hooks to let other providers go off spec like this.
|
|
||||||
if !(v.issuer == issuerGoogleAccounts && t.Issuer == issuerGoogleAccountsNoScheme) {
|
|
||||||
return nil, fmt.Errorf("oidc: id token issued by a different provider, expected %q got %q", v.issuer, t.Issuer)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// If a client ID has been provided, make sure it's part of the audience. SkipClientIDCheck must be true if ClientID is empty.
|
|
||||||
//
|
|
||||||
// This check DOES NOT ensure that the ClientID is the party to which the ID Token was issued (i.e. Authorized party).
|
|
||||||
if !v.config.SkipClientIDCheck {
|
|
||||||
if v.config.ClientID != "" {
|
|
||||||
if !contains(t.Audience, v.config.ClientID) {
|
|
||||||
return nil, fmt.Errorf("oidc: expected audience %q got %q", v.config.ClientID, t.Audience)
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
return nil, fmt.Errorf("oidc: invalid configuration, clientID must be provided or SkipClientIDCheck must be set")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// If a SkipExpiryCheck is false, make sure token is not expired.
|
|
||||||
if !v.config.SkipExpiryCheck {
|
|
||||||
now := time.Now
|
|
||||||
if v.config.Now != nil {
|
|
||||||
now = v.config.Now
|
|
||||||
}
|
|
||||||
nowTime := now()
|
|
||||||
|
|
||||||
if t.Expiry.Before(nowTime) {
|
|
||||||
return nil, &TokenExpiredError{Expiry: t.Expiry}
|
|
||||||
}
|
|
||||||
|
|
||||||
// If nbf claim is provided in token, ensure that it is indeed in the past.
|
|
||||||
if token.NotBefore != nil {
|
|
||||||
nbfTime := time.Time(*token.NotBefore)
|
|
||||||
// Set to 5 minutes since this is what other OpenID Connect providers do to deal with clock skew.
|
|
||||||
// https://github.com/AzureAD/azure-activedirectory-identitymodel-extensions-for-dotnet/blob/6.12.2/src/Microsoft.IdentityModel.Tokens/TokenValidationParameters.cs#L149-L153
|
|
||||||
leeway := 5 * time.Minute
|
|
||||||
|
|
||||||
if nowTime.Add(leeway).Before(nbfTime) {
|
|
||||||
return nil, fmt.Errorf("oidc: current time %v before the nbf (not before) time: %v", nowTime, nbfTime)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if v.config.InsecureSkipSignatureCheck {
|
|
||||||
return t, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
jws, err := jose.ParseSigned(rawIDToken)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("oidc: malformed jwt: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
switch len(jws.Signatures) {
|
|
||||||
case 0:
|
|
||||||
return nil, fmt.Errorf("oidc: id token not signed")
|
|
||||||
case 1:
|
|
||||||
default:
|
|
||||||
return nil, fmt.Errorf("oidc: multiple signatures on id token not supported")
|
|
||||||
}
|
|
||||||
|
|
||||||
sig := jws.Signatures[0]
|
|
||||||
supportedSigAlgs := v.config.SupportedSigningAlgs
|
|
||||||
if len(supportedSigAlgs) == 0 {
|
|
||||||
supportedSigAlgs = []string{RS256}
|
|
||||||
}
|
|
||||||
|
|
||||||
if !contains(supportedSigAlgs, sig.Header.Algorithm) {
|
|
||||||
return nil, fmt.Errorf("oidc: id token signed with unsupported algorithm, expected %q got %q", supportedSigAlgs, sig.Header.Algorithm)
|
|
||||||
}
|
|
||||||
|
|
||||||
t.sigAlgorithm = sig.Header.Algorithm
|
|
||||||
|
|
||||||
ctx = context.WithValue(ctx, parsedJWTKey, jws)
|
|
||||||
gotPayload, err := v.keySet.VerifySignature(ctx, rawIDToken)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("failed to verify signature: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Ensure that the payload returned by the square actually matches the payload parsed earlier.
|
|
||||||
if !bytes.Equal(gotPayload, payload) {
|
|
||||||
return nil, errors.New("oidc: internal error, payload parsed did not match previous payload")
|
|
||||||
}
|
|
||||||
|
|
||||||
return t, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Nonce returns an auth code option which requires the ID Token created by the
|
|
||||||
// OpenID Connect provider to contain the specified nonce.
|
|
||||||
func Nonce(nonce string) oauth2.AuthCodeOption {
|
|
||||||
return oauth2.SetAuthURLParam("nonce", nonce)
|
|
||||||
}
|
|
15
vendor/github.com/davecgh/go-spew/LICENSE
generated
vendored
15
vendor/github.com/davecgh/go-spew/LICENSE
generated
vendored
@ -1,15 +0,0 @@
|
|||||||
ISC License
|
|
||||||
|
|
||||||
Copyright (c) 2012-2016 Dave Collins <dave@davec.name>
|
|
||||||
|
|
||||||
Permission to use, copy, modify, and/or distribute this software for any
|
|
||||||
purpose with or without fee is hereby granted, provided that the above
|
|
||||||
copyright notice and this permission notice appear in all copies.
|
|
||||||
|
|
||||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
|
||||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
|
||||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
|
||||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
|
||||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
|
||||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
|
||||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
|
145
vendor/github.com/davecgh/go-spew/spew/bypass.go
generated
vendored
145
vendor/github.com/davecgh/go-spew/spew/bypass.go
generated
vendored
@ -1,145 +0,0 @@
|
|||||||
// Copyright (c) 2015-2016 Dave Collins <dave@davec.name>
|
|
||||||
//
|
|
||||||
// Permission to use, copy, modify, and distribute this software for any
|
|
||||||
// purpose with or without fee is hereby granted, provided that the above
|
|
||||||
// copyright notice and this permission notice appear in all copies.
|
|
||||||
//
|
|
||||||
// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
|
||||||
// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
|
||||||
// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
|
||||||
// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
|
||||||
// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
|
||||||
// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
|
||||||
// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
|
||||||
|
|
||||||
// NOTE: Due to the following build constraints, this file will only be compiled
|
|
||||||
// when the code is not running on Google App Engine, compiled by GopherJS, and
|
|
||||||
// "-tags safe" is not added to the go build command line. The "disableunsafe"
|
|
||||||
// tag is deprecated and thus should not be used.
|
|
||||||
// Go versions prior to 1.4 are disabled because they use a different layout
|
|
||||||
// for interfaces which make the implementation of unsafeReflectValue more complex.
|
|
||||||
// +build !js,!appengine,!safe,!disableunsafe,go1.4
|
|
||||||
|
|
||||||
package spew
|
|
||||||
|
|
||||||
import (
|
|
||||||
"reflect"
|
|
||||||
"unsafe"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
// UnsafeDisabled is a build-time constant which specifies whether or
|
|
||||||
// not access to the unsafe package is available.
|
|
||||||
UnsafeDisabled = false
|
|
||||||
|
|
||||||
// ptrSize is the size of a pointer on the current arch.
|
|
||||||
ptrSize = unsafe.Sizeof((*byte)(nil))
|
|
||||||
)
|
|
||||||
|
|
||||||
type flag uintptr
|
|
||||||
|
|
||||||
var (
|
|
||||||
// flagRO indicates whether the value field of a reflect.Value
|
|
||||||
// is read-only.
|
|
||||||
flagRO flag
|
|
||||||
|
|
||||||
// flagAddr indicates whether the address of the reflect.Value's
|
|
||||||
// value may be taken.
|
|
||||||
flagAddr flag
|
|
||||||
)
|
|
||||||
|
|
||||||
// flagKindMask holds the bits that make up the kind
|
|
||||||
// part of the flags field. In all the supported versions,
|
|
||||||
// it is in the lower 5 bits.
|
|
||||||
const flagKindMask = flag(0x1f)
|
|
||||||
|
|
||||||
// Different versions of Go have used different
|
|
||||||
// bit layouts for the flags type. This table
|
|
||||||
// records the known combinations.
|
|
||||||
var okFlags = []struct {
|
|
||||||
ro, addr flag
|
|
||||||
}{{
|
|
||||||
// From Go 1.4 to 1.5
|
|
||||||
ro: 1 << 5,
|
|
||||||
addr: 1 << 7,
|
|
||||||
}, {
|
|
||||||
// Up to Go tip.
|
|
||||||
ro: 1<<5 | 1<<6,
|
|
||||||
addr: 1 << 8,
|
|
||||||
}}
|
|
||||||
|
|
||||||
var flagValOffset = func() uintptr {
|
|
||||||
field, ok := reflect.TypeOf(reflect.Value{}).FieldByName("flag")
|
|
||||||
if !ok {
|
|
||||||
panic("reflect.Value has no flag field")
|
|
||||||
}
|
|
||||||
return field.Offset
|
|
||||||
}()
|
|
||||||
|
|
||||||
// flagField returns a pointer to the flag field of a reflect.Value.
|
|
||||||
func flagField(v *reflect.Value) *flag {
|
|
||||||
return (*flag)(unsafe.Pointer(uintptr(unsafe.Pointer(v)) + flagValOffset))
|
|
||||||
}
|
|
||||||
|
|
||||||
// unsafeReflectValue converts the passed reflect.Value into a one that bypasses
|
|
||||||
// the typical safety restrictions preventing access to unaddressable and
|
|
||||||
// unexported data. It works by digging the raw pointer to the underlying
|
|
||||||
// value out of the protected value and generating a new unprotected (unsafe)
|
|
||||||
// reflect.Value to it.
|
|
||||||
//
|
|
||||||
// This allows us to check for implementations of the Stringer and error
|
|
||||||
// interfaces to be used for pretty printing ordinarily unaddressable and
|
|
||||||
// inaccessible values such as unexported struct fields.
|
|
||||||
func unsafeReflectValue(v reflect.Value) reflect.Value {
|
|
||||||
if !v.IsValid() || (v.CanInterface() && v.CanAddr()) {
|
|
||||||
return v
|
|
||||||
}
|
|
||||||
flagFieldPtr := flagField(&v)
|
|
||||||
*flagFieldPtr &^= flagRO
|
|
||||||
*flagFieldPtr |= flagAddr
|
|
||||||
return v
|
|
||||||
}
|
|
||||||
|
|
||||||
// Sanity checks against future reflect package changes
|
|
||||||
// to the type or semantics of the Value.flag field.
|
|
||||||
func init() {
|
|
||||||
field, ok := reflect.TypeOf(reflect.Value{}).FieldByName("flag")
|
|
||||||
if !ok {
|
|
||||||
panic("reflect.Value has no flag field")
|
|
||||||
}
|
|
||||||
if field.Type.Kind() != reflect.TypeOf(flag(0)).Kind() {
|
|
||||||
panic("reflect.Value flag field has changed kind")
|
|
||||||
}
|
|
||||||
type t0 int
|
|
||||||
var t struct {
|
|
||||||
A t0
|
|
||||||
// t0 will have flagEmbedRO set.
|
|
||||||
t0
|
|
||||||
// a will have flagStickyRO set
|
|
||||||
a t0
|
|
||||||
}
|
|
||||||
vA := reflect.ValueOf(t).FieldByName("A")
|
|
||||||
va := reflect.ValueOf(t).FieldByName("a")
|
|
||||||
vt0 := reflect.ValueOf(t).FieldByName("t0")
|
|
||||||
|
|
||||||
// Infer flagRO from the difference between the flags
|
|
||||||
// for the (otherwise identical) fields in t.
|
|
||||||
flagPublic := *flagField(&vA)
|
|
||||||
flagWithRO := *flagField(&va) | *flagField(&vt0)
|
|
||||||
flagRO = flagPublic ^ flagWithRO
|
|
||||||
|
|
||||||
// Infer flagAddr from the difference between a value
|
|
||||||
// taken from a pointer and not.
|
|
||||||
vPtrA := reflect.ValueOf(&t).Elem().FieldByName("A")
|
|
||||||
flagNoPtr := *flagField(&vA)
|
|
||||||
flagPtr := *flagField(&vPtrA)
|
|
||||||
flagAddr = flagNoPtr ^ flagPtr
|
|
||||||
|
|
||||||
// Check that the inferred flags tally with one of the known versions.
|
|
||||||
for _, f := range okFlags {
|
|
||||||
if flagRO == f.ro && flagAddr == f.addr {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
panic("reflect.Value read-only flag has changed semantics")
|
|
||||||
}
|
|
38
vendor/github.com/davecgh/go-spew/spew/bypasssafe.go
generated
vendored
38
vendor/github.com/davecgh/go-spew/spew/bypasssafe.go
generated
vendored
@ -1,38 +0,0 @@
|
|||||||
// Copyright (c) 2015-2016 Dave Collins <dave@davec.name>
|
|
||||||
//
|
|
||||||
// Permission to use, copy, modify, and distribute this software for any
|
|
||||||
// purpose with or without fee is hereby granted, provided that the above
|
|
||||||
// copyright notice and this permission notice appear in all copies.
|
|
||||||
//
|
|
||||||
// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
|
||||||
// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
|
||||||
// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
|
||||||
// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
|
||||||
// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
|
||||||
// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
|
||||||
// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
|
||||||
|
|
||||||
// NOTE: Due to the following build constraints, this file will only be compiled
|
|
||||||
// when the code is running on Google App Engine, compiled by GopherJS, or
|
|
||||||
// "-tags safe" is added to the go build command line. The "disableunsafe"
|
|
||||||
// tag is deprecated and thus should not be used.
|
|
||||||
// +build js appengine safe disableunsafe !go1.4
|
|
||||||
|
|
||||||
package spew
|
|
||||||
|
|
||||||
import "reflect"
|
|
||||||
|
|
||||||
const (
|
|
||||||
// UnsafeDisabled is a build-time constant which specifies whether or
|
|
||||||
// not access to the unsafe package is available.
|
|
||||||
UnsafeDisabled = true
|
|
||||||
)
|
|
||||||
|
|
||||||
// unsafeReflectValue typically converts the passed reflect.Value into a one
|
|
||||||
// that bypasses the typical safety restrictions preventing access to
|
|
||||||
// unaddressable and unexported data. However, doing this relies on access to
|
|
||||||
// the unsafe package. This is a stub version which simply returns the passed
|
|
||||||
// reflect.Value when the unsafe package is not available.
|
|
||||||
func unsafeReflectValue(v reflect.Value) reflect.Value {
|
|
||||||
return v
|
|
||||||
}
|
|
341
vendor/github.com/davecgh/go-spew/spew/common.go
generated
vendored
341
vendor/github.com/davecgh/go-spew/spew/common.go
generated
vendored
@ -1,341 +0,0 @@
|
|||||||
/*
|
|
||||||
* Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
|
|
||||||
*
|
|
||||||
* Permission to use, copy, modify, and distribute this software for any
|
|
||||||
* purpose with or without fee is hereby granted, provided that the above
|
|
||||||
* copyright notice and this permission notice appear in all copies.
|
|
||||||
*
|
|
||||||
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
|
||||||
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
|
||||||
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
|
||||||
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
|
||||||
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
|
||||||
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
|
||||||
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package spew
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"reflect"
|
|
||||||
"sort"
|
|
||||||
"strconv"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Some constants in the form of bytes to avoid string overhead. This mirrors
|
|
||||||
// the technique used in the fmt package.
|
|
||||||
var (
|
|
||||||
panicBytes = []byte("(PANIC=")
|
|
||||||
plusBytes = []byte("+")
|
|
||||||
iBytes = []byte("i")
|
|
||||||
trueBytes = []byte("true")
|
|
||||||
falseBytes = []byte("false")
|
|
||||||
interfaceBytes = []byte("(interface {})")
|
|
||||||
commaNewlineBytes = []byte(",\n")
|
|
||||||
newlineBytes = []byte("\n")
|
|
||||||
openBraceBytes = []byte("{")
|
|
||||||
openBraceNewlineBytes = []byte("{\n")
|
|
||||||
closeBraceBytes = []byte("}")
|
|
||||||
asteriskBytes = []byte("*")
|
|
||||||
colonBytes = []byte(":")
|
|
||||||
colonSpaceBytes = []byte(": ")
|
|
||||||
openParenBytes = []byte("(")
|
|
||||||
closeParenBytes = []byte(")")
|
|
||||||
spaceBytes = []byte(" ")
|
|
||||||
pointerChainBytes = []byte("->")
|
|
||||||
nilAngleBytes = []byte("<nil>")
|
|
||||||
maxNewlineBytes = []byte("<max depth reached>\n")
|
|
||||||
maxShortBytes = []byte("<max>")
|
|
||||||
circularBytes = []byte("<already shown>")
|
|
||||||
circularShortBytes = []byte("<shown>")
|
|
||||||
invalidAngleBytes = []byte("<invalid>")
|
|
||||||
openBracketBytes = []byte("[")
|
|
||||||
closeBracketBytes = []byte("]")
|
|
||||||
percentBytes = []byte("%")
|
|
||||||
precisionBytes = []byte(".")
|
|
||||||
openAngleBytes = []byte("<")
|
|
||||||
closeAngleBytes = []byte(">")
|
|
||||||
openMapBytes = []byte("map[")
|
|
||||||
closeMapBytes = []byte("]")
|
|
||||||
lenEqualsBytes = []byte("len=")
|
|
||||||
capEqualsBytes = []byte("cap=")
|
|
||||||
)
|
|
||||||
|
|
||||||
// hexDigits is used to map a decimal value to a hex digit.
|
|
||||||
var hexDigits = "0123456789abcdef"
|
|
||||||
|
|
||||||
// catchPanic handles any panics that might occur during the handleMethods
|
|
||||||
// calls.
|
|
||||||
func catchPanic(w io.Writer, v reflect.Value) {
|
|
||||||
if err := recover(); err != nil {
|
|
||||||
w.Write(panicBytes)
|
|
||||||
fmt.Fprintf(w, "%v", err)
|
|
||||||
w.Write(closeParenBytes)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// handleMethods attempts to call the Error and String methods on the underlying
|
|
||||||
// type the passed reflect.Value represents and outputes the result to Writer w.
|
|
||||||
//
|
|
||||||
// It handles panics in any called methods by catching and displaying the error
|
|
||||||
// as the formatted value.
|
|
||||||
func handleMethods(cs *ConfigState, w io.Writer, v reflect.Value) (handled bool) {
|
|
||||||
// We need an interface to check if the type implements the error or
|
|
||||||
// Stringer interface. However, the reflect package won't give us an
|
|
||||||
// interface on certain things like unexported struct fields in order
|
|
||||||
// to enforce visibility rules. We use unsafe, when it's available,
|
|
||||||
// to bypass these restrictions since this package does not mutate the
|
|
||||||
// values.
|
|
||||||
if !v.CanInterface() {
|
|
||||||
if UnsafeDisabled {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
v = unsafeReflectValue(v)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Choose whether or not to do error and Stringer interface lookups against
|
|
||||||
// the base type or a pointer to the base type depending on settings.
|
|
||||||
// Technically calling one of these methods with a pointer receiver can
|
|
||||||
// mutate the value, however, types which choose to satisify an error or
|
|
||||||
// Stringer interface with a pointer receiver should not be mutating their
|
|
||||||
// state inside these interface methods.
|
|
||||||
if !cs.DisablePointerMethods && !UnsafeDisabled && !v.CanAddr() {
|
|
||||||
v = unsafeReflectValue(v)
|
|
||||||
}
|
|
||||||
if v.CanAddr() {
|
|
||||||
v = v.Addr()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Is it an error or Stringer?
|
|
||||||
switch iface := v.Interface().(type) {
|
|
||||||
case error:
|
|
||||||
defer catchPanic(w, v)
|
|
||||||
if cs.ContinueOnMethod {
|
|
||||||
w.Write(openParenBytes)
|
|
||||||
w.Write([]byte(iface.Error()))
|
|
||||||
w.Write(closeParenBytes)
|
|
||||||
w.Write(spaceBytes)
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
w.Write([]byte(iface.Error()))
|
|
||||||
return true
|
|
||||||
|
|
||||||
case fmt.Stringer:
|
|
||||||
defer catchPanic(w, v)
|
|
||||||
if cs.ContinueOnMethod {
|
|
||||||
w.Write(openParenBytes)
|
|
||||||
w.Write([]byte(iface.String()))
|
|
||||||
w.Write(closeParenBytes)
|
|
||||||
w.Write(spaceBytes)
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
w.Write([]byte(iface.String()))
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// printBool outputs a boolean value as true or false to Writer w.
|
|
||||||
func printBool(w io.Writer, val bool) {
|
|
||||||
if val {
|
|
||||||
w.Write(trueBytes)
|
|
||||||
} else {
|
|
||||||
w.Write(falseBytes)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// printInt outputs a signed integer value to Writer w.
|
|
||||||
func printInt(w io.Writer, val int64, base int) {
|
|
||||||
w.Write([]byte(strconv.FormatInt(val, base)))
|
|
||||||
}
|
|
||||||
|
|
||||||
// printUint outputs an unsigned integer value to Writer w.
|
|
||||||
func printUint(w io.Writer, val uint64, base int) {
|
|
||||||
w.Write([]byte(strconv.FormatUint(val, base)))
|
|
||||||
}
|
|
||||||
|
|
||||||
// printFloat outputs a floating point value using the specified precision,
|
|
||||||
// which is expected to be 32 or 64bit, to Writer w.
|
|
||||||
func printFloat(w io.Writer, val float64, precision int) {
|
|
||||||
w.Write([]byte(strconv.FormatFloat(val, 'g', -1, precision)))
|
|
||||||
}
|
|
||||||
|
|
||||||
// printComplex outputs a complex value using the specified float precision
|
|
||||||
// for the real and imaginary parts to Writer w.
|
|
||||||
func printComplex(w io.Writer, c complex128, floatPrecision int) {
|
|
||||||
r := real(c)
|
|
||||||
w.Write(openParenBytes)
|
|
||||||
w.Write([]byte(strconv.FormatFloat(r, 'g', -1, floatPrecision)))
|
|
||||||
i := imag(c)
|
|
||||||
if i >= 0 {
|
|
||||||
w.Write(plusBytes)
|
|
||||||
}
|
|
||||||
w.Write([]byte(strconv.FormatFloat(i, 'g', -1, floatPrecision)))
|
|
||||||
w.Write(iBytes)
|
|
||||||
w.Write(closeParenBytes)
|
|
||||||
}
|
|
||||||
|
|
||||||
// printHexPtr outputs a uintptr formatted as hexadecimal with a leading '0x'
|
|
||||||
// prefix to Writer w.
|
|
||||||
func printHexPtr(w io.Writer, p uintptr) {
|
|
||||||
// Null pointer.
|
|
||||||
num := uint64(p)
|
|
||||||
if num == 0 {
|
|
||||||
w.Write(nilAngleBytes)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Max uint64 is 16 bytes in hex + 2 bytes for '0x' prefix
|
|
||||||
buf := make([]byte, 18)
|
|
||||||
|
|
||||||
// It's simpler to construct the hex string right to left.
|
|
||||||
base := uint64(16)
|
|
||||||
i := len(buf) - 1
|
|
||||||
for num >= base {
|
|
||||||
buf[i] = hexDigits[num%base]
|
|
||||||
num /= base
|
|
||||||
i--
|
|
||||||
}
|
|
||||||
buf[i] = hexDigits[num]
|
|
||||||
|
|
||||||
// Add '0x' prefix.
|
|
||||||
i--
|
|
||||||
buf[i] = 'x'
|
|
||||||
i--
|
|
||||||
buf[i] = '0'
|
|
||||||
|
|
||||||
// Strip unused leading bytes.
|
|
||||||
buf = buf[i:]
|
|
||||||
w.Write(buf)
|
|
||||||
}
|
|
||||||
|
|
||||||
// valuesSorter implements sort.Interface to allow a slice of reflect.Value
|
|
||||||
// elements to be sorted.
|
|
||||||
type valuesSorter struct {
|
|
||||||
values []reflect.Value
|
|
||||||
strings []string // either nil or same len and values
|
|
||||||
cs *ConfigState
|
|
||||||
}
|
|
||||||
|
|
||||||
// newValuesSorter initializes a valuesSorter instance, which holds a set of
|
|
||||||
// surrogate keys on which the data should be sorted. It uses flags in
|
|
||||||
// ConfigState to decide if and how to populate those surrogate keys.
|
|
||||||
func newValuesSorter(values []reflect.Value, cs *ConfigState) sort.Interface {
|
|
||||||
vs := &valuesSorter{values: values, cs: cs}
|
|
||||||
if canSortSimply(vs.values[0].Kind()) {
|
|
||||||
return vs
|
|
||||||
}
|
|
||||||
if !cs.DisableMethods {
|
|
||||||
vs.strings = make([]string, len(values))
|
|
||||||
for i := range vs.values {
|
|
||||||
b := bytes.Buffer{}
|
|
||||||
if !handleMethods(cs, &b, vs.values[i]) {
|
|
||||||
vs.strings = nil
|
|
||||||
break
|
|
||||||
}
|
|
||||||
vs.strings[i] = b.String()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if vs.strings == nil && cs.SpewKeys {
|
|
||||||
vs.strings = make([]string, len(values))
|
|
||||||
for i := range vs.values {
|
|
||||||
vs.strings[i] = Sprintf("%#v", vs.values[i].Interface())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return vs
|
|
||||||
}
|
|
||||||
|
|
||||||
// canSortSimply tests whether a reflect.Kind is a primitive that can be sorted
|
|
||||||
// directly, or whether it should be considered for sorting by surrogate keys
|
|
||||||
// (if the ConfigState allows it).
|
|
||||||
func canSortSimply(kind reflect.Kind) bool {
|
|
||||||
// This switch parallels valueSortLess, except for the default case.
|
|
||||||
switch kind {
|
|
||||||
case reflect.Bool:
|
|
||||||
return true
|
|
||||||
case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
|
|
||||||
return true
|
|
||||||
case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:
|
|
||||||
return true
|
|
||||||
case reflect.Float32, reflect.Float64:
|
|
||||||
return true
|
|
||||||
case reflect.String:
|
|
||||||
return true
|
|
||||||
case reflect.Uintptr:
|
|
||||||
return true
|
|
||||||
case reflect.Array:
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// Len returns the number of values in the slice. It is part of the
|
|
||||||
// sort.Interface implementation.
|
|
||||||
func (s *valuesSorter) Len() int {
|
|
||||||
return len(s.values)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Swap swaps the values at the passed indices. It is part of the
|
|
||||||
// sort.Interface implementation.
|
|
||||||
func (s *valuesSorter) Swap(i, j int) {
|
|
||||||
s.values[i], s.values[j] = s.values[j], s.values[i]
|
|
||||||
if s.strings != nil {
|
|
||||||
s.strings[i], s.strings[j] = s.strings[j], s.strings[i]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// valueSortLess returns whether the first value should sort before the second
|
|
||||||
// value. It is used by valueSorter.Less as part of the sort.Interface
|
|
||||||
// implementation.
|
|
||||||
func valueSortLess(a, b reflect.Value) bool {
|
|
||||||
switch a.Kind() {
|
|
||||||
case reflect.Bool:
|
|
||||||
return !a.Bool() && b.Bool()
|
|
||||||
case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
|
|
||||||
return a.Int() < b.Int()
|
|
||||||
case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:
|
|
||||||
return a.Uint() < b.Uint()
|
|
||||||
case reflect.Float32, reflect.Float64:
|
|
||||||
return a.Float() < b.Float()
|
|
||||||
case reflect.String:
|
|
||||||
return a.String() < b.String()
|
|
||||||
case reflect.Uintptr:
|
|
||||||
return a.Uint() < b.Uint()
|
|
||||||
case reflect.Array:
|
|
||||||
// Compare the contents of both arrays.
|
|
||||||
l := a.Len()
|
|
||||||
for i := 0; i < l; i++ {
|
|
||||||
av := a.Index(i)
|
|
||||||
bv := b.Index(i)
|
|
||||||
if av.Interface() == bv.Interface() {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
return valueSortLess(av, bv)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return a.String() < b.String()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Less returns whether the value at index i should sort before the
|
|
||||||
// value at index j. It is part of the sort.Interface implementation.
|
|
||||||
func (s *valuesSorter) Less(i, j int) bool {
|
|
||||||
if s.strings == nil {
|
|
||||||
return valueSortLess(s.values[i], s.values[j])
|
|
||||||
}
|
|
||||||
return s.strings[i] < s.strings[j]
|
|
||||||
}
|
|
||||||
|
|
||||||
// sortValues is a sort function that handles both native types and any type that
|
|
||||||
// can be converted to error or Stringer. Other inputs are sorted according to
|
|
||||||
// their Value.String() value to ensure display stability.
|
|
||||||
func sortValues(values []reflect.Value, cs *ConfigState) {
|
|
||||||
if len(values) == 0 {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
sort.Sort(newValuesSorter(values, cs))
|
|
||||||
}
|
|
306
vendor/github.com/davecgh/go-spew/spew/config.go
generated
vendored
306
vendor/github.com/davecgh/go-spew/spew/config.go
generated
vendored
@ -1,306 +0,0 @@
|
|||||||
/*
|
|
||||||
* Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
|
|
||||||
*
|
|
||||||
* Permission to use, copy, modify, and distribute this software for any
|
|
||||||
* purpose with or without fee is hereby granted, provided that the above
|
|
||||||
* copyright notice and this permission notice appear in all copies.
|
|
||||||
*
|
|
||||||
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
|
||||||
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
|
||||||
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
|
||||||
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
|
||||||
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
|
||||||
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
|
||||||
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package spew
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"os"
|
|
||||||
)
|
|
||||||
|
|
||||||
// ConfigState houses the configuration options used by spew to format and
|
|
||||||
// display values. There is a global instance, Config, that is used to control
|
|
||||||
// all top-level Formatter and Dump functionality. Each ConfigState instance
|
|
||||||
// provides methods equivalent to the top-level functions.
|
|
||||||
//
|
|
||||||
// The zero value for ConfigState provides no indentation. You would typically
|
|
||||||
// want to set it to a space or a tab.
|
|
||||||
//
|
|
||||||
// Alternatively, you can use NewDefaultConfig to get a ConfigState instance
|
|
||||||
// with default settings. See the documentation of NewDefaultConfig for default
|
|
||||||
// values.
|
|
||||||
type ConfigState struct {
|
|
||||||
// Indent specifies the string to use for each indentation level. The
|
|
||||||
// global config instance that all top-level functions use set this to a
|
|
||||||
// single space by default. If you would like more indentation, you might
|
|
||||||
// set this to a tab with "\t" or perhaps two spaces with " ".
|
|
||||||
Indent string
|
|
||||||
|
|
||||||
// MaxDepth controls the maximum number of levels to descend into nested
|
|
||||||
// data structures. The default, 0, means there is no limit.
|
|
||||||
//
|
|
||||||
// NOTE: Circular data structures are properly detected, so it is not
|
|
||||||
// necessary to set this value unless you specifically want to limit deeply
|
|
||||||
// nested data structures.
|
|
||||||
MaxDepth int
|
|
||||||
|
|
||||||
// DisableMethods specifies whether or not error and Stringer interfaces are
|
|
||||||
// invoked for types that implement them.
|
|
||||||
DisableMethods bool
|
|
||||||
|
|
||||||
// DisablePointerMethods specifies whether or not to check for and invoke
|
|
||||||
// error and Stringer interfaces on types which only accept a pointer
|
|
||||||
// receiver when the current type is not a pointer.
|
|
||||||
//
|
|
||||||
// NOTE: This might be an unsafe action since calling one of these methods
|
|
||||||
// with a pointer receiver could technically mutate the value, however,
|
|
||||||
// in practice, types which choose to satisify an error or Stringer
|
|
||||||
// interface with a pointer receiver should not be mutating their state
|
|
||||||
// inside these interface methods. As a result, this option relies on
|
|
||||||
// access to the unsafe package, so it will not have any effect when
|
|
||||||
// running in environments without access to the unsafe package such as
|
|
||||||
// Google App Engine or with the "safe" build tag specified.
|
|
||||||
DisablePointerMethods bool
|
|
||||||
|
|
||||||
// DisablePointerAddresses specifies whether to disable the printing of
|
|
||||||
// pointer addresses. This is useful when diffing data structures in tests.
|
|
||||||
DisablePointerAddresses bool
|
|
||||||
|
|
||||||
// DisableCapacities specifies whether to disable the printing of capacities
|
|
||||||
// for arrays, slices, maps and channels. This is useful when diffing
|
|
||||||
// data structures in tests.
|
|
||||||
DisableCapacities bool
|
|
||||||
|
|
||||||
// ContinueOnMethod specifies whether or not recursion should continue once
|
|
||||||
// a custom error or Stringer interface is invoked. The default, false,
|
|
||||||
// means it will print the results of invoking the custom error or Stringer
|
|
||||||
// interface and return immediately instead of continuing to recurse into
|
|
||||||
// the internals of the data type.
|
|
||||||
//
|
|
||||||
// NOTE: This flag does not have any effect if method invocation is disabled
|
|
||||||
// via the DisableMethods or DisablePointerMethods options.
|
|
||||||
ContinueOnMethod bool
|
|
||||||
|
|
||||||
// SortKeys specifies map keys should be sorted before being printed. Use
|
|
||||||
// this to have a more deterministic, diffable output. Note that only
|
|
||||||
// native types (bool, int, uint, floats, uintptr and string) and types
|
|
||||||
// that support the error or Stringer interfaces (if methods are
|
|
||||||
// enabled) are supported, with other types sorted according to the
|
|
||||||
// reflect.Value.String() output which guarantees display stability.
|
|
||||||
SortKeys bool
|
|
||||||
|
|
||||||
// SpewKeys specifies that, as a last resort attempt, map keys should
|
|
||||||
// be spewed to strings and sorted by those strings. This is only
|
|
||||||
// considered if SortKeys is true.
|
|
||||||
SpewKeys bool
|
|
||||||
}
|
|
||||||
|
|
||||||
// Config is the active configuration of the top-level functions.
|
|
||||||
// The configuration can be changed by modifying the contents of spew.Config.
|
|
||||||
var Config = ConfigState{Indent: " "}
|
|
||||||
|
|
||||||
// Errorf is a wrapper for fmt.Errorf that treats each argument as if it were
|
|
||||||
// passed with a Formatter interface returned by c.NewFormatter. It returns
|
|
||||||
// the formatted string as a value that satisfies error. See NewFormatter
|
|
||||||
// for formatting details.
|
|
||||||
//
|
|
||||||
// This function is shorthand for the following syntax:
|
|
||||||
//
|
|
||||||
// fmt.Errorf(format, c.NewFormatter(a), c.NewFormatter(b))
|
|
||||||
func (c *ConfigState) Errorf(format string, a ...interface{}) (err error) {
|
|
||||||
return fmt.Errorf(format, c.convertArgs(a)...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Fprint is a wrapper for fmt.Fprint that treats each argument as if it were
|
|
||||||
// passed with a Formatter interface returned by c.NewFormatter. It returns
|
|
||||||
// the number of bytes written and any write error encountered. See
|
|
||||||
// NewFormatter for formatting details.
|
|
||||||
//
|
|
||||||
// This function is shorthand for the following syntax:
|
|
||||||
//
|
|
||||||
// fmt.Fprint(w, c.NewFormatter(a), c.NewFormatter(b))
|
|
||||||
func (c *ConfigState) Fprint(w io.Writer, a ...interface{}) (n int, err error) {
|
|
||||||
return fmt.Fprint(w, c.convertArgs(a)...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Fprintf is a wrapper for fmt.Fprintf that treats each argument as if it were
|
|
||||||
// passed with a Formatter interface returned by c.NewFormatter. It returns
|
|
||||||
// the number of bytes written and any write error encountered. See
|
|
||||||
// NewFormatter for formatting details.
|
|
||||||
//
|
|
||||||
// This function is shorthand for the following syntax:
|
|
||||||
//
|
|
||||||
// fmt.Fprintf(w, format, c.NewFormatter(a), c.NewFormatter(b))
|
|
||||||
func (c *ConfigState) Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) {
|
|
||||||
return fmt.Fprintf(w, format, c.convertArgs(a)...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Fprintln is a wrapper for fmt.Fprintln that treats each argument as if it
|
|
||||||
// passed with a Formatter interface returned by c.NewFormatter. See
|
|
||||||
// NewFormatter for formatting details.
|
|
||||||
//
|
|
||||||
// This function is shorthand for the following syntax:
|
|
||||||
//
|
|
||||||
// fmt.Fprintln(w, c.NewFormatter(a), c.NewFormatter(b))
|
|
||||||
func (c *ConfigState) Fprintln(w io.Writer, a ...interface{}) (n int, err error) {
|
|
||||||
return fmt.Fprintln(w, c.convertArgs(a)...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Print is a wrapper for fmt.Print that treats each argument as if it were
|
|
||||||
// passed with a Formatter interface returned by c.NewFormatter. It returns
|
|
||||||
// the number of bytes written and any write error encountered. See
|
|
||||||
// NewFormatter for formatting details.
|
|
||||||
//
|
|
||||||
// This function is shorthand for the following syntax:
|
|
||||||
//
|
|
||||||
// fmt.Print(c.NewFormatter(a), c.NewFormatter(b))
|
|
||||||
func (c *ConfigState) Print(a ...interface{}) (n int, err error) {
|
|
||||||
return fmt.Print(c.convertArgs(a)...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Printf is a wrapper for fmt.Printf that treats each argument as if it were
|
|
||||||
// passed with a Formatter interface returned by c.NewFormatter. It returns
|
|
||||||
// the number of bytes written and any write error encountered. See
|
|
||||||
// NewFormatter for formatting details.
|
|
||||||
//
|
|
||||||
// This function is shorthand for the following syntax:
|
|
||||||
//
|
|
||||||
// fmt.Printf(format, c.NewFormatter(a), c.NewFormatter(b))
|
|
||||||
func (c *ConfigState) Printf(format string, a ...interface{}) (n int, err error) {
|
|
||||||
return fmt.Printf(format, c.convertArgs(a)...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Println is a wrapper for fmt.Println that treats each argument as if it were
|
|
||||||
// passed with a Formatter interface returned by c.NewFormatter. It returns
|
|
||||||
// the number of bytes written and any write error encountered. See
|
|
||||||
// NewFormatter for formatting details.
|
|
||||||
//
|
|
||||||
// This function is shorthand for the following syntax:
|
|
||||||
//
|
|
||||||
// fmt.Println(c.NewFormatter(a), c.NewFormatter(b))
|
|
||||||
func (c *ConfigState) Println(a ...interface{}) (n int, err error) {
|
|
||||||
return fmt.Println(c.convertArgs(a)...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Sprint is a wrapper for fmt.Sprint that treats each argument as if it were
|
|
||||||
// passed with a Formatter interface returned by c.NewFormatter. It returns
|
|
||||||
// the resulting string. See NewFormatter for formatting details.
|
|
||||||
//
|
|
||||||
// This function is shorthand for the following syntax:
|
|
||||||
//
|
|
||||||
// fmt.Sprint(c.NewFormatter(a), c.NewFormatter(b))
|
|
||||||
func (c *ConfigState) Sprint(a ...interface{}) string {
|
|
||||||
return fmt.Sprint(c.convertArgs(a)...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Sprintf is a wrapper for fmt.Sprintf that treats each argument as if it were
|
|
||||||
// passed with a Formatter interface returned by c.NewFormatter. It returns
|
|
||||||
// the resulting string. See NewFormatter for formatting details.
|
|
||||||
//
|
|
||||||
// This function is shorthand for the following syntax:
|
|
||||||
//
|
|
||||||
// fmt.Sprintf(format, c.NewFormatter(a), c.NewFormatter(b))
|
|
||||||
func (c *ConfigState) Sprintf(format string, a ...interface{}) string {
|
|
||||||
return fmt.Sprintf(format, c.convertArgs(a)...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Sprintln is a wrapper for fmt.Sprintln that treats each argument as if it
|
|
||||||
// were passed with a Formatter interface returned by c.NewFormatter. It
|
|
||||||
// returns the resulting string. See NewFormatter for formatting details.
|
|
||||||
//
|
|
||||||
// This function is shorthand for the following syntax:
|
|
||||||
//
|
|
||||||
// fmt.Sprintln(c.NewFormatter(a), c.NewFormatter(b))
|
|
||||||
func (c *ConfigState) Sprintln(a ...interface{}) string {
|
|
||||||
return fmt.Sprintln(c.convertArgs(a)...)
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
NewFormatter returns a custom formatter that satisfies the fmt.Formatter
|
|
||||||
interface. As a result, it integrates cleanly with standard fmt package
|
|
||||||
printing functions. The formatter is useful for inline printing of smaller data
|
|
||||||
types similar to the standard %v format specifier.
|
|
||||||
|
|
||||||
The custom formatter only responds to the %v (most compact), %+v (adds pointer
|
|
||||||
addresses), %#v (adds types), and %#+v (adds types and pointer addresses) verb
|
|
||||||
combinations. Any other verbs such as %x and %q will be sent to the the
|
|
||||||
standard fmt package for formatting. In addition, the custom formatter ignores
|
|
||||||
the width and precision arguments (however they will still work on the format
|
|
||||||
specifiers not handled by the custom formatter).
|
|
||||||
|
|
||||||
Typically this function shouldn't be called directly. It is much easier to make
|
|
||||||
use of the custom formatter by calling one of the convenience functions such as
|
|
||||||
c.Printf, c.Println, or c.Printf.
|
|
||||||
*/
|
|
||||||
func (c *ConfigState) NewFormatter(v interface{}) fmt.Formatter {
|
|
||||||
return newFormatter(c, v)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Fdump formats and displays the passed arguments to io.Writer w. It formats
|
|
||||||
// exactly the same as Dump.
|
|
||||||
func (c *ConfigState) Fdump(w io.Writer, a ...interface{}) {
|
|
||||||
fdump(c, w, a...)
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
Dump displays the passed parameters to standard out with newlines, customizable
|
|
||||||
indentation, and additional debug information such as complete types and all
|
|
||||||
pointer addresses used to indirect to the final value. It provides the
|
|
||||||
following features over the built-in printing facilities provided by the fmt
|
|
||||||
package:
|
|
||||||
|
|
||||||
* Pointers are dereferenced and followed
|
|
||||||
* Circular data structures are detected and handled properly
|
|
||||||
* Custom Stringer/error interfaces are optionally invoked, including
|
|
||||||
on unexported types
|
|
||||||
* Custom types which only implement the Stringer/error interfaces via
|
|
||||||
a pointer receiver are optionally invoked when passing non-pointer
|
|
||||||
variables
|
|
||||||
* Byte arrays and slices are dumped like the hexdump -C command which
|
|
||||||
includes offsets, byte values in hex, and ASCII output
|
|
||||||
|
|
||||||
The configuration options are controlled by modifying the public members
|
|
||||||
of c. See ConfigState for options documentation.
|
|
||||||
|
|
||||||
See Fdump if you would prefer dumping to an arbitrary io.Writer or Sdump to
|
|
||||||
get the formatted result as a string.
|
|
||||||
*/
|
|
||||||
func (c *ConfigState) Dump(a ...interface{}) {
|
|
||||||
fdump(c, os.Stdout, a...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Sdump returns a string with the passed arguments formatted exactly the same
|
|
||||||
// as Dump.
|
|
||||||
func (c *ConfigState) Sdump(a ...interface{}) string {
|
|
||||||
var buf bytes.Buffer
|
|
||||||
fdump(c, &buf, a...)
|
|
||||||
return buf.String()
|
|
||||||
}
|
|
||||||
|
|
||||||
// convertArgs accepts a slice of arguments and returns a slice of the same
|
|
||||||
// length with each argument converted to a spew Formatter interface using
|
|
||||||
// the ConfigState associated with s.
|
|
||||||
func (c *ConfigState) convertArgs(args []interface{}) (formatters []interface{}) {
|
|
||||||
formatters = make([]interface{}, len(args))
|
|
||||||
for index, arg := range args {
|
|
||||||
formatters[index] = newFormatter(c, arg)
|
|
||||||
}
|
|
||||||
return formatters
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewDefaultConfig returns a ConfigState with the following default settings.
|
|
||||||
//
|
|
||||||
// Indent: " "
|
|
||||||
// MaxDepth: 0
|
|
||||||
// DisableMethods: false
|
|
||||||
// DisablePointerMethods: false
|
|
||||||
// ContinueOnMethod: false
|
|
||||||
// SortKeys: false
|
|
||||||
func NewDefaultConfig() *ConfigState {
|
|
||||||
return &ConfigState{Indent: " "}
|
|
||||||
}
|
|
211
vendor/github.com/davecgh/go-spew/spew/doc.go
generated
vendored
211
vendor/github.com/davecgh/go-spew/spew/doc.go
generated
vendored
@ -1,211 +0,0 @@
|
|||||||
/*
|
|
||||||
* Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
|
|
||||||
*
|
|
||||||
* Permission to use, copy, modify, and distribute this software for any
|
|
||||||
* purpose with or without fee is hereby granted, provided that the above
|
|
||||||
* copyright notice and this permission notice appear in all copies.
|
|
||||||
*
|
|
||||||
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
|
||||||
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
|
||||||
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
|
||||||
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
|
||||||
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
|
||||||
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
|
||||||
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
|
||||||
*/
|
|
||||||
|
|
||||||
/*
|
|
||||||
Package spew implements a deep pretty printer for Go data structures to aid in
|
|
||||||
debugging.
|
|
||||||
|
|
||||||
A quick overview of the additional features spew provides over the built-in
|
|
||||||
printing facilities for Go data types are as follows:
|
|
||||||
|
|
||||||
* Pointers are dereferenced and followed
|
|
||||||
* Circular data structures are detected and handled properly
|
|
||||||
* Custom Stringer/error interfaces are optionally invoked, including
|
|
||||||
on unexported types
|
|
||||||
* Custom types which only implement the Stringer/error interfaces via
|
|
||||||
a pointer receiver are optionally invoked when passing non-pointer
|
|
||||||
variables
|
|
||||||
* Byte arrays and slices are dumped like the hexdump -C command which
|
|
||||||
includes offsets, byte values in hex, and ASCII output (only when using
|
|
||||||
Dump style)
|
|
||||||
|
|
||||||
There are two different approaches spew allows for dumping Go data structures:
|
|
||||||
|
|
||||||
* Dump style which prints with newlines, customizable indentation,
|
|
||||||
and additional debug information such as types and all pointer addresses
|
|
||||||
used to indirect to the final value
|
|
||||||
* A custom Formatter interface that integrates cleanly with the standard fmt
|
|
||||||
package and replaces %v, %+v, %#v, and %#+v to provide inline printing
|
|
||||||
similar to the default %v while providing the additional functionality
|
|
||||||
outlined above and passing unsupported format verbs such as %x and %q
|
|
||||||
along to fmt
|
|
||||||
|
|
||||||
Quick Start
|
|
||||||
|
|
||||||
This section demonstrates how to quickly get started with spew. See the
|
|
||||||
sections below for further details on formatting and configuration options.
|
|
||||||
|
|
||||||
To dump a variable with full newlines, indentation, type, and pointer
|
|
||||||
information use Dump, Fdump, or Sdump:
|
|
||||||
spew.Dump(myVar1, myVar2, ...)
|
|
||||||
spew.Fdump(someWriter, myVar1, myVar2, ...)
|
|
||||||
str := spew.Sdump(myVar1, myVar2, ...)
|
|
||||||
|
|
||||||
Alternatively, if you would prefer to use format strings with a compacted inline
|
|
||||||
printing style, use the convenience wrappers Printf, Fprintf, etc with
|
|
||||||
%v (most compact), %+v (adds pointer addresses), %#v (adds types), or
|
|
||||||
%#+v (adds types and pointer addresses):
|
|
||||||
spew.Printf("myVar1: %v -- myVar2: %+v", myVar1, myVar2)
|
|
||||||
spew.Printf("myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
|
|
||||||
spew.Fprintf(someWriter, "myVar1: %v -- myVar2: %+v", myVar1, myVar2)
|
|
||||||
spew.Fprintf(someWriter, "myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
|
|
||||||
|
|
||||||
Configuration Options
|
|
||||||
|
|
||||||
Configuration of spew is handled by fields in the ConfigState type. For
|
|
||||||
convenience, all of the top-level functions use a global state available
|
|
||||||
via the spew.Config global.
|
|
||||||
|
|
||||||
It is also possible to create a ConfigState instance that provides methods
|
|
||||||
equivalent to the top-level functions. This allows concurrent configuration
|
|
||||||
options. See the ConfigState documentation for more details.
|
|
||||||
|
|
||||||
The following configuration options are available:
|
|
||||||
* Indent
|
|
||||||
String to use for each indentation level for Dump functions.
|
|
||||||
It is a single space by default. A popular alternative is "\t".
|
|
||||||
|
|
||||||
* MaxDepth
|
|
||||||
Maximum number of levels to descend into nested data structures.
|
|
||||||
There is no limit by default.
|
|
||||||
|
|
||||||
* DisableMethods
|
|
||||||
Disables invocation of error and Stringer interface methods.
|
|
||||||
Method invocation is enabled by default.
|
|
||||||
|
|
||||||
* DisablePointerMethods
|
|
||||||
Disables invocation of error and Stringer interface methods on types
|
|
||||||
which only accept pointer receivers from non-pointer variables.
|
|
||||||
Pointer method invocation is enabled by default.
|
|
||||||
|
|
||||||
* DisablePointerAddresses
|
|
||||||
DisablePointerAddresses specifies whether to disable the printing of
|
|
||||||
pointer addresses. This is useful when diffing data structures in tests.
|
|
||||||
|
|
||||||
* DisableCapacities
|
|
||||||
DisableCapacities specifies whether to disable the printing of
|
|
||||||
capacities for arrays, slices, maps and channels. This is useful when
|
|
||||||
diffing data structures in tests.
|
|
||||||
|
|
||||||
* ContinueOnMethod
|
|
||||||
Enables recursion into types after invoking error and Stringer interface
|
|
||||||
methods. Recursion after method invocation is disabled by default.
|
|
||||||
|
|
||||||
* SortKeys
|
|
||||||
Specifies map keys should be sorted before being printed. Use
|
|
||||||
this to have a more deterministic, diffable output. Note that
|
|
||||||
only native types (bool, int, uint, floats, uintptr and string)
|
|
||||||
and types which implement error or Stringer interfaces are
|
|
||||||
supported with other types sorted according to the
|
|
||||||
reflect.Value.String() output which guarantees display
|
|
||||||
stability. Natural map order is used by default.
|
|
||||||
|
|
||||||
* SpewKeys
|
|
||||||
Specifies that, as a last resort attempt, map keys should be
|
|
||||||
spewed to strings and sorted by those strings. This is only
|
|
||||||
considered if SortKeys is true.
|
|
||||||
|
|
||||||
Dump Usage
|
|
||||||
|
|
||||||
Simply call spew.Dump with a list of variables you want to dump:
|
|
||||||
|
|
||||||
spew.Dump(myVar1, myVar2, ...)
|
|
||||||
|
|
||||||
You may also call spew.Fdump if you would prefer to output to an arbitrary
|
|
||||||
io.Writer. For example, to dump to standard error:
|
|
||||||
|
|
||||||
spew.Fdump(os.Stderr, myVar1, myVar2, ...)
|
|
||||||
|
|
||||||
A third option is to call spew.Sdump to get the formatted output as a string:
|
|
||||||
|
|
||||||
str := spew.Sdump(myVar1, myVar2, ...)
|
|
||||||
|
|
||||||
Sample Dump Output
|
|
||||||
|
|
||||||
See the Dump example for details on the setup of the types and variables being
|
|
||||||
shown here.
|
|
||||||
|
|
||||||
(main.Foo) {
|
|
||||||
unexportedField: (*main.Bar)(0xf84002e210)({
|
|
||||||
flag: (main.Flag) flagTwo,
|
|
||||||
data: (uintptr) <nil>
|
|
||||||
}),
|
|
||||||
ExportedField: (map[interface {}]interface {}) (len=1) {
|
|
||||||
(string) (len=3) "one": (bool) true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
Byte (and uint8) arrays and slices are displayed uniquely like the hexdump -C
|
|
||||||
command as shown.
|
|
||||||
([]uint8) (len=32 cap=32) {
|
|
||||||
00000000 11 12 13 14 15 16 17 18 19 1a 1b 1c 1d 1e 1f 20 |............... |
|
|
||||||
00000010 21 22 23 24 25 26 27 28 29 2a 2b 2c 2d 2e 2f 30 |!"#$%&'()*+,-./0|
|
|
||||||
00000020 31 32 |12|
|
|
||||||
}
|
|
||||||
|
|
||||||
Custom Formatter
|
|
||||||
|
|
||||||
Spew provides a custom formatter that implements the fmt.Formatter interface
|
|
||||||
so that it integrates cleanly with standard fmt package printing functions. The
|
|
||||||
formatter is useful for inline printing of smaller data types similar to the
|
|
||||||
standard %v format specifier.
|
|
||||||
|
|
||||||
The custom formatter only responds to the %v (most compact), %+v (adds pointer
|
|
||||||
addresses), %#v (adds types), or %#+v (adds types and pointer addresses) verb
|
|
||||||
combinations. Any other verbs such as %x and %q will be sent to the the
|
|
||||||
standard fmt package for formatting. In addition, the custom formatter ignores
|
|
||||||
the width and precision arguments (however they will still work on the format
|
|
||||||
specifiers not handled by the custom formatter).
|
|
||||||
|
|
||||||
Custom Formatter Usage
|
|
||||||
|
|
||||||
The simplest way to make use of the spew custom formatter is to call one of the
|
|
||||||
convenience functions such as spew.Printf, spew.Println, or spew.Printf. The
|
|
||||||
functions have syntax you are most likely already familiar with:
|
|
||||||
|
|
||||||
spew.Printf("myVar1: %v -- myVar2: %+v", myVar1, myVar2)
|
|
||||||
spew.Printf("myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
|
|
||||||
spew.Println(myVar, myVar2)
|
|
||||||
spew.Fprintf(os.Stderr, "myVar1: %v -- myVar2: %+v", myVar1, myVar2)
|
|
||||||
spew.Fprintf(os.Stderr, "myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
|
|
||||||
|
|
||||||
See the Index for the full list convenience functions.
|
|
||||||
|
|
||||||
Sample Formatter Output
|
|
||||||
|
|
||||||
Double pointer to a uint8:
|
|
||||||
%v: <**>5
|
|
||||||
%+v: <**>(0xf8400420d0->0xf8400420c8)5
|
|
||||||
%#v: (**uint8)5
|
|
||||||
%#+v: (**uint8)(0xf8400420d0->0xf8400420c8)5
|
|
||||||
|
|
||||||
Pointer to circular struct with a uint8 field and a pointer to itself:
|
|
||||||
%v: <*>{1 <*><shown>}
|
|
||||||
%+v: <*>(0xf84003e260){ui8:1 c:<*>(0xf84003e260)<shown>}
|
|
||||||
%#v: (*main.circular){ui8:(uint8)1 c:(*main.circular)<shown>}
|
|
||||||
%#+v: (*main.circular)(0xf84003e260){ui8:(uint8)1 c:(*main.circular)(0xf84003e260)<shown>}
|
|
||||||
|
|
||||||
See the Printf example for details on the setup of variables being shown
|
|
||||||
here.
|
|
||||||
|
|
||||||
Errors
|
|
||||||
|
|
||||||
Since it is possible for custom Stringer/error interfaces to panic, spew
|
|
||||||
detects them and handles them internally by printing the panic information
|
|
||||||
inline with the output. Since spew is intended to provide deep pretty printing
|
|
||||||
capabilities on structures, it intentionally does not return any errors.
|
|
||||||
*/
|
|
||||||
package spew
|
|
509
vendor/github.com/davecgh/go-spew/spew/dump.go
generated
vendored
509
vendor/github.com/davecgh/go-spew/spew/dump.go
generated
vendored
@ -1,509 +0,0 @@
|
|||||||
/*
|
|
||||||
* Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
|
|
||||||
*
|
|
||||||
* Permission to use, copy, modify, and distribute this software for any
|
|
||||||
* purpose with or without fee is hereby granted, provided that the above
|
|
||||||
* copyright notice and this permission notice appear in all copies.
|
|
||||||
*
|
|
||||||
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
|
||||||
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
|
||||||
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
|
||||||
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
|
||||||
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
|
||||||
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
|
||||||
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package spew
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"encoding/hex"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"os"
|
|
||||||
"reflect"
|
|
||||||
"regexp"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
// uint8Type is a reflect.Type representing a uint8. It is used to
|
|
||||||
// convert cgo types to uint8 slices for hexdumping.
|
|
||||||
uint8Type = reflect.TypeOf(uint8(0))
|
|
||||||
|
|
||||||
// cCharRE is a regular expression that matches a cgo char.
|
|
||||||
// It is used to detect character arrays to hexdump them.
|
|
||||||
cCharRE = regexp.MustCompile(`^.*\._Ctype_char$`)
|
|
||||||
|
|
||||||
// cUnsignedCharRE is a regular expression that matches a cgo unsigned
|
|
||||||
// char. It is used to detect unsigned character arrays to hexdump
|
|
||||||
// them.
|
|
||||||
cUnsignedCharRE = regexp.MustCompile(`^.*\._Ctype_unsignedchar$`)
|
|
||||||
|
|
||||||
// cUint8tCharRE is a regular expression that matches a cgo uint8_t.
|
|
||||||
// It is used to detect uint8_t arrays to hexdump them.
|
|
||||||
cUint8tCharRE = regexp.MustCompile(`^.*\._Ctype_uint8_t$`)
|
|
||||||
)
|
|
||||||
|
|
||||||
// dumpState contains information about the state of a dump operation.
|
|
||||||
type dumpState struct {
|
|
||||||
w io.Writer
|
|
||||||
depth int
|
|
||||||
pointers map[uintptr]int
|
|
||||||
ignoreNextType bool
|
|
||||||
ignoreNextIndent bool
|
|
||||||
cs *ConfigState
|
|
||||||
}
|
|
||||||
|
|
||||||
// indent performs indentation according to the depth level and cs.Indent
|
|
||||||
// option.
|
|
||||||
func (d *dumpState) indent() {
|
|
||||||
if d.ignoreNextIndent {
|
|
||||||
d.ignoreNextIndent = false
|
|
||||||
return
|
|
||||||
}
|
|
||||||
d.w.Write(bytes.Repeat([]byte(d.cs.Indent), d.depth))
|
|
||||||
}
|
|
||||||
|
|
||||||
// unpackValue returns values inside of non-nil interfaces when possible.
|
|
||||||
// This is useful for data types like structs, arrays, slices, and maps which
|
|
||||||
// can contain varying types packed inside an interface.
|
|
||||||
func (d *dumpState) unpackValue(v reflect.Value) reflect.Value {
|
|
||||||
if v.Kind() == reflect.Interface && !v.IsNil() {
|
|
||||||
v = v.Elem()
|
|
||||||
}
|
|
||||||
return v
|
|
||||||
}
|
|
||||||
|
|
||||||
// dumpPtr handles formatting of pointers by indirecting them as necessary.
|
|
||||||
func (d *dumpState) dumpPtr(v reflect.Value) {
|
|
||||||
// Remove pointers at or below the current depth from map used to detect
|
|
||||||
// circular refs.
|
|
||||||
for k, depth := range d.pointers {
|
|
||||||
if depth >= d.depth {
|
|
||||||
delete(d.pointers, k)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Keep list of all dereferenced pointers to show later.
|
|
||||||
pointerChain := make([]uintptr, 0)
|
|
||||||
|
|
||||||
// Figure out how many levels of indirection there are by dereferencing
|
|
||||||
// pointers and unpacking interfaces down the chain while detecting circular
|
|
||||||
// references.
|
|
||||||
nilFound := false
|
|
||||||
cycleFound := false
|
|
||||||
indirects := 0
|
|
||||||
ve := v
|
|
||||||
for ve.Kind() == reflect.Ptr {
|
|
||||||
if ve.IsNil() {
|
|
||||||
nilFound = true
|
|
||||||
break
|
|
||||||
}
|
|
||||||
indirects++
|
|
||||||
addr := ve.Pointer()
|
|
||||||
pointerChain = append(pointerChain, addr)
|
|
||||||
if pd, ok := d.pointers[addr]; ok && pd < d.depth {
|
|
||||||
cycleFound = true
|
|
||||||
indirects--
|
|
||||||
break
|
|
||||||
}
|
|
||||||
d.pointers[addr] = d.depth
|
|
||||||
|
|
||||||
ve = ve.Elem()
|
|
||||||
if ve.Kind() == reflect.Interface {
|
|
||||||
if ve.IsNil() {
|
|
||||||
nilFound = true
|
|
||||||
break
|
|
||||||
}
|
|
||||||
ve = ve.Elem()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Display type information.
|
|
||||||
d.w.Write(openParenBytes)
|
|
||||||
d.w.Write(bytes.Repeat(asteriskBytes, indirects))
|
|
||||||
d.w.Write([]byte(ve.Type().String()))
|
|
||||||
d.w.Write(closeParenBytes)
|
|
||||||
|
|
||||||
// Display pointer information.
|
|
||||||
if !d.cs.DisablePointerAddresses && len(pointerChain) > 0 {
|
|
||||||
d.w.Write(openParenBytes)
|
|
||||||
for i, addr := range pointerChain {
|
|
||||||
if i > 0 {
|
|
||||||
d.w.Write(pointerChainBytes)
|
|
||||||
}
|
|
||||||
printHexPtr(d.w, addr)
|
|
||||||
}
|
|
||||||
d.w.Write(closeParenBytes)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Display dereferenced value.
|
|
||||||
d.w.Write(openParenBytes)
|
|
||||||
switch {
|
|
||||||
case nilFound:
|
|
||||||
d.w.Write(nilAngleBytes)
|
|
||||||
|
|
||||||
case cycleFound:
|
|
||||||
d.w.Write(circularBytes)
|
|
||||||
|
|
||||||
default:
|
|
||||||
d.ignoreNextType = true
|
|
||||||
d.dump(ve)
|
|
||||||
}
|
|
||||||
d.w.Write(closeParenBytes)
|
|
||||||
}
|
|
||||||
|
|
||||||
// dumpSlice handles formatting of arrays and slices. Byte (uint8 under
|
|
||||||
// reflection) arrays and slices are dumped in hexdump -C fashion.
|
|
||||||
func (d *dumpState) dumpSlice(v reflect.Value) {
|
|
||||||
// Determine whether this type should be hex dumped or not. Also,
|
|
||||||
// for types which should be hexdumped, try to use the underlying data
|
|
||||||
// first, then fall back to trying to convert them to a uint8 slice.
|
|
||||||
var buf []uint8
|
|
||||||
doConvert := false
|
|
||||||
doHexDump := false
|
|
||||||
numEntries := v.Len()
|
|
||||||
if numEntries > 0 {
|
|
||||||
vt := v.Index(0).Type()
|
|
||||||
vts := vt.String()
|
|
||||||
switch {
|
|
||||||
// C types that need to be converted.
|
|
||||||
case cCharRE.MatchString(vts):
|
|
||||||
fallthrough
|
|
||||||
case cUnsignedCharRE.MatchString(vts):
|
|
||||||
fallthrough
|
|
||||||
case cUint8tCharRE.MatchString(vts):
|
|
||||||
doConvert = true
|
|
||||||
|
|
||||||
// Try to use existing uint8 slices and fall back to converting
|
|
||||||
// and copying if that fails.
|
|
||||||
case vt.Kind() == reflect.Uint8:
|
|
||||||
// We need an addressable interface to convert the type
|
|
||||||
// to a byte slice. However, the reflect package won't
|
|
||||||
// give us an interface on certain things like
|
|
||||||
// unexported struct fields in order to enforce
|
|
||||||
// visibility rules. We use unsafe, when available, to
|
|
||||||
// bypass these restrictions since this package does not
|
|
||||||
// mutate the values.
|
|
||||||
vs := v
|
|
||||||
if !vs.CanInterface() || !vs.CanAddr() {
|
|
||||||
vs = unsafeReflectValue(vs)
|
|
||||||
}
|
|
||||||
if !UnsafeDisabled {
|
|
||||||
vs = vs.Slice(0, numEntries)
|
|
||||||
|
|
||||||
// Use the existing uint8 slice if it can be
|
|
||||||
// type asserted.
|
|
||||||
iface := vs.Interface()
|
|
||||||
if slice, ok := iface.([]uint8); ok {
|
|
||||||
buf = slice
|
|
||||||
doHexDump = true
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// The underlying data needs to be converted if it can't
|
|
||||||
// be type asserted to a uint8 slice.
|
|
||||||
doConvert = true
|
|
||||||
}
|
|
||||||
|
|
||||||
// Copy and convert the underlying type if needed.
|
|
||||||
if doConvert && vt.ConvertibleTo(uint8Type) {
|
|
||||||
// Convert and copy each element into a uint8 byte
|
|
||||||
// slice.
|
|
||||||
buf = make([]uint8, numEntries)
|
|
||||||
for i := 0; i < numEntries; i++ {
|
|
||||||
vv := v.Index(i)
|
|
||||||
buf[i] = uint8(vv.Convert(uint8Type).Uint())
|
|
||||||
}
|
|
||||||
doHexDump = true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Hexdump the entire slice as needed.
|
|
||||||
if doHexDump {
|
|
||||||
indent := strings.Repeat(d.cs.Indent, d.depth)
|
|
||||||
str := indent + hex.Dump(buf)
|
|
||||||
str = strings.Replace(str, "\n", "\n"+indent, -1)
|
|
||||||
str = strings.TrimRight(str, d.cs.Indent)
|
|
||||||
d.w.Write([]byte(str))
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Recursively call dump for each item.
|
|
||||||
for i := 0; i < numEntries; i++ {
|
|
||||||
d.dump(d.unpackValue(v.Index(i)))
|
|
||||||
if i < (numEntries - 1) {
|
|
||||||
d.w.Write(commaNewlineBytes)
|
|
||||||
} else {
|
|
||||||
d.w.Write(newlineBytes)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// dump is the main workhorse for dumping a value. It uses the passed reflect
|
|
||||||
// value to figure out what kind of object we are dealing with and formats it
|
|
||||||
// appropriately. It is a recursive function, however circular data structures
|
|
||||||
// are detected and handled properly.
|
|
||||||
func (d *dumpState) dump(v reflect.Value) {
|
|
||||||
// Handle invalid reflect values immediately.
|
|
||||||
kind := v.Kind()
|
|
||||||
if kind == reflect.Invalid {
|
|
||||||
d.w.Write(invalidAngleBytes)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Handle pointers specially.
|
|
||||||
if kind == reflect.Ptr {
|
|
||||||
d.indent()
|
|
||||||
d.dumpPtr(v)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Print type information unless already handled elsewhere.
|
|
||||||
if !d.ignoreNextType {
|
|
||||||
d.indent()
|
|
||||||
d.w.Write(openParenBytes)
|
|
||||||
d.w.Write([]byte(v.Type().String()))
|
|
||||||
d.w.Write(closeParenBytes)
|
|
||||||
d.w.Write(spaceBytes)
|
|
||||||
}
|
|
||||||
d.ignoreNextType = false
|
|
||||||
|
|
||||||
// Display length and capacity if the built-in len and cap functions
|
|
||||||
// work with the value's kind and the len/cap itself is non-zero.
|
|
||||||
valueLen, valueCap := 0, 0
|
|
||||||
switch v.Kind() {
|
|
||||||
case reflect.Array, reflect.Slice, reflect.Chan:
|
|
||||||
valueLen, valueCap = v.Len(), v.Cap()
|
|
||||||
case reflect.Map, reflect.String:
|
|
||||||
valueLen = v.Len()
|
|
||||||
}
|
|
||||||
if valueLen != 0 || !d.cs.DisableCapacities && valueCap != 0 {
|
|
||||||
d.w.Write(openParenBytes)
|
|
||||||
if valueLen != 0 {
|
|
||||||
d.w.Write(lenEqualsBytes)
|
|
||||||
printInt(d.w, int64(valueLen), 10)
|
|
||||||
}
|
|
||||||
if !d.cs.DisableCapacities && valueCap != 0 {
|
|
||||||
if valueLen != 0 {
|
|
||||||
d.w.Write(spaceBytes)
|
|
||||||
}
|
|
||||||
d.w.Write(capEqualsBytes)
|
|
||||||
printInt(d.w, int64(valueCap), 10)
|
|
||||||
}
|
|
||||||
d.w.Write(closeParenBytes)
|
|
||||||
d.w.Write(spaceBytes)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Call Stringer/error interfaces if they exist and the handle methods flag
|
|
||||||
// is enabled
|
|
||||||
if !d.cs.DisableMethods {
|
|
||||||
if (kind != reflect.Invalid) && (kind != reflect.Interface) {
|
|
||||||
if handled := handleMethods(d.cs, d.w, v); handled {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
switch kind {
|
|
||||||
case reflect.Invalid:
|
|
||||||
// Do nothing. We should never get here since invalid has already
|
|
||||||
// been handled above.
|
|
||||||
|
|
||||||
case reflect.Bool:
|
|
||||||
printBool(d.w, v.Bool())
|
|
||||||
|
|
||||||
case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
|
|
||||||
printInt(d.w, v.Int(), 10)
|
|
||||||
|
|
||||||
case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:
|
|
||||||
printUint(d.w, v.Uint(), 10)
|
|
||||||
|
|
||||||
case reflect.Float32:
|
|
||||||
printFloat(d.w, v.Float(), 32)
|
|
||||||
|
|
||||||
case reflect.Float64:
|
|
||||||
printFloat(d.w, v.Float(), 64)
|
|
||||||
|
|
||||||
case reflect.Complex64:
|
|
||||||
printComplex(d.w, v.Complex(), 32)
|
|
||||||
|
|
||||||
case reflect.Complex128:
|
|
||||||
printComplex(d.w, v.Complex(), 64)
|
|
||||||
|
|
||||||
case reflect.Slice:
|
|
||||||
if v.IsNil() {
|
|
||||||
d.w.Write(nilAngleBytes)
|
|
||||||
break
|
|
||||||
}
|
|
||||||
fallthrough
|
|
||||||
|
|
||||||
case reflect.Array:
|
|
||||||
d.w.Write(openBraceNewlineBytes)
|
|
||||||
d.depth++
|
|
||||||
if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) {
|
|
||||||
d.indent()
|
|
||||||
d.w.Write(maxNewlineBytes)
|
|
||||||
} else {
|
|
||||||
d.dumpSlice(v)
|
|
||||||
}
|
|
||||||
d.depth--
|
|
||||||
d.indent()
|
|
||||||
d.w.Write(closeBraceBytes)
|
|
||||||
|
|
||||||
case reflect.String:
|
|
||||||
d.w.Write([]byte(strconv.Quote(v.String())))
|
|
||||||
|
|
||||||
case reflect.Interface:
|
|
||||||
// The only time we should get here is for nil interfaces due to
|
|
||||||
// unpackValue calls.
|
|
||||||
if v.IsNil() {
|
|
||||||
d.w.Write(nilAngleBytes)
|
|
||||||
}
|
|
||||||
|
|
||||||
case reflect.Ptr:
|
|
||||||
// Do nothing. We should never get here since pointers have already
|
|
||||||
// been handled above.
|
|
||||||
|
|
||||||
case reflect.Map:
|
|
||||||
// nil maps should be indicated as different than empty maps
|
|
||||||
if v.IsNil() {
|
|
||||||
d.w.Write(nilAngleBytes)
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
d.w.Write(openBraceNewlineBytes)
|
|
||||||
d.depth++
|
|
||||||
if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) {
|
|
||||||
d.indent()
|
|
||||||
d.w.Write(maxNewlineBytes)
|
|
||||||
} else {
|
|
||||||
numEntries := v.Len()
|
|
||||||
keys := v.MapKeys()
|
|
||||||
if d.cs.SortKeys {
|
|
||||||
sortValues(keys, d.cs)
|
|
||||||
}
|
|
||||||
for i, key := range keys {
|
|
||||||
d.dump(d.unpackValue(key))
|
|
||||||
d.w.Write(colonSpaceBytes)
|
|
||||||
d.ignoreNextIndent = true
|
|
||||||
d.dump(d.unpackValue(v.MapIndex(key)))
|
|
||||||
if i < (numEntries - 1) {
|
|
||||||
d.w.Write(commaNewlineBytes)
|
|
||||||
} else {
|
|
||||||
d.w.Write(newlineBytes)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
d.depth--
|
|
||||||
d.indent()
|
|
||||||
d.w.Write(closeBraceBytes)
|
|
||||||
|
|
||||||
case reflect.Struct:
|
|
||||||
d.w.Write(openBraceNewlineBytes)
|
|
||||||
d.depth++
|
|
||||||
if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) {
|
|
||||||
d.indent()
|
|
||||||
d.w.Write(maxNewlineBytes)
|
|
||||||
} else {
|
|
||||||
vt := v.Type()
|
|
||||||
numFields := v.NumField()
|
|
||||||
for i := 0; i < numFields; i++ {
|
|
||||||
d.indent()
|
|
||||||
vtf := vt.Field(i)
|
|
||||||
d.w.Write([]byte(vtf.Name))
|
|
||||||
d.w.Write(colonSpaceBytes)
|
|
||||||
d.ignoreNextIndent = true
|
|
||||||
d.dump(d.unpackValue(v.Field(i)))
|
|
||||||
if i < (numFields - 1) {
|
|
||||||
d.w.Write(commaNewlineBytes)
|
|
||||||
} else {
|
|
||||||
d.w.Write(newlineBytes)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
d.depth--
|
|
||||||
d.indent()
|
|
||||||
d.w.Write(closeBraceBytes)
|
|
||||||
|
|
||||||
case reflect.Uintptr:
|
|
||||||
printHexPtr(d.w, uintptr(v.Uint()))
|
|
||||||
|
|
||||||
case reflect.UnsafePointer, reflect.Chan, reflect.Func:
|
|
||||||
printHexPtr(d.w, v.Pointer())
|
|
||||||
|
|
||||||
// There were not any other types at the time this code was written, but
|
|
||||||
// fall back to letting the default fmt package handle it in case any new
|
|
||||||
// types are added.
|
|
||||||
default:
|
|
||||||
if v.CanInterface() {
|
|
||||||
fmt.Fprintf(d.w, "%v", v.Interface())
|
|
||||||
} else {
|
|
||||||
fmt.Fprintf(d.w, "%v", v.String())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// fdump is a helper function to consolidate the logic from the various public
|
|
||||||
// methods which take varying writers and config states.
|
|
||||||
func fdump(cs *ConfigState, w io.Writer, a ...interface{}) {
|
|
||||||
for _, arg := range a {
|
|
||||||
if arg == nil {
|
|
||||||
w.Write(interfaceBytes)
|
|
||||||
w.Write(spaceBytes)
|
|
||||||
w.Write(nilAngleBytes)
|
|
||||||
w.Write(newlineBytes)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
d := dumpState{w: w, cs: cs}
|
|
||||||
d.pointers = make(map[uintptr]int)
|
|
||||||
d.dump(reflect.ValueOf(arg))
|
|
||||||
d.w.Write(newlineBytes)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Fdump formats and displays the passed arguments to io.Writer w. It formats
|
|
||||||
// exactly the same as Dump.
|
|
||||||
func Fdump(w io.Writer, a ...interface{}) {
|
|
||||||
fdump(&Config, w, a...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Sdump returns a string with the passed arguments formatted exactly the same
|
|
||||||
// as Dump.
|
|
||||||
func Sdump(a ...interface{}) string {
|
|
||||||
var buf bytes.Buffer
|
|
||||||
fdump(&Config, &buf, a...)
|
|
||||||
return buf.String()
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
Dump displays the passed parameters to standard out with newlines, customizable
|
|
||||||
indentation, and additional debug information such as complete types and all
|
|
||||||
pointer addresses used to indirect to the final value. It provides the
|
|
||||||
following features over the built-in printing facilities provided by the fmt
|
|
||||||
package:
|
|
||||||
|
|
||||||
* Pointers are dereferenced and followed
|
|
||||||
* Circular data structures are detected and handled properly
|
|
||||||
* Custom Stringer/error interfaces are optionally invoked, including
|
|
||||||
on unexported types
|
|
||||||
* Custom types which only implement the Stringer/error interfaces via
|
|
||||||
a pointer receiver are optionally invoked when passing non-pointer
|
|
||||||
variables
|
|
||||||
* Byte arrays and slices are dumped like the hexdump -C command which
|
|
||||||
includes offsets, byte values in hex, and ASCII output
|
|
||||||
|
|
||||||
The configuration options are controlled by an exported package global,
|
|
||||||
spew.Config. See ConfigState for options documentation.
|
|
||||||
|
|
||||||
See Fdump if you would prefer dumping to an arbitrary io.Writer or Sdump to
|
|
||||||
get the formatted result as a string.
|
|
||||||
*/
|
|
||||||
func Dump(a ...interface{}) {
|
|
||||||
fdump(&Config, os.Stdout, a...)
|
|
||||||
}
|
|
419
vendor/github.com/davecgh/go-spew/spew/format.go
generated
vendored
419
vendor/github.com/davecgh/go-spew/spew/format.go
generated
vendored
@ -1,419 +0,0 @@
|
|||||||
/*
|
|
||||||
* Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
|
|
||||||
*
|
|
||||||
* Permission to use, copy, modify, and distribute this software for any
|
|
||||||
* purpose with or without fee is hereby granted, provided that the above
|
|
||||||
* copyright notice and this permission notice appear in all copies.
|
|
||||||
*
|
|
||||||
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
|
||||||
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
|
||||||
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
|
||||||
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
|
||||||
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
|
||||||
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
|
||||||
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package spew
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"fmt"
|
|
||||||
"reflect"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
// supportedFlags is a list of all the character flags supported by fmt package.
|
|
||||||
const supportedFlags = "0-+# "
|
|
||||||
|
|
||||||
// formatState implements the fmt.Formatter interface and contains information
|
|
||||||
// about the state of a formatting operation. The NewFormatter function can
|
|
||||||
// be used to get a new Formatter which can be used directly as arguments
|
|
||||||
// in standard fmt package printing calls.
|
|
||||||
type formatState struct {
|
|
||||||
value interface{}
|
|
||||||
fs fmt.State
|
|
||||||
depth int
|
|
||||||
pointers map[uintptr]int
|
|
||||||
ignoreNextType bool
|
|
||||||
cs *ConfigState
|
|
||||||
}
|
|
||||||
|
|
||||||
// buildDefaultFormat recreates the original format string without precision
|
|
||||||
// and width information to pass in to fmt.Sprintf in the case of an
|
|
||||||
// unrecognized type. Unless new types are added to the language, this
|
|
||||||
// function won't ever be called.
|
|
||||||
func (f *formatState) buildDefaultFormat() (format string) {
|
|
||||||
buf := bytes.NewBuffer(percentBytes)
|
|
||||||
|
|
||||||
for _, flag := range supportedFlags {
|
|
||||||
if f.fs.Flag(int(flag)) {
|
|
||||||
buf.WriteRune(flag)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
buf.WriteRune('v')
|
|
||||||
|
|
||||||
format = buf.String()
|
|
||||||
return format
|
|
||||||
}
|
|
||||||
|
|
||||||
// constructOrigFormat recreates the original format string including precision
|
|
||||||
// and width information to pass along to the standard fmt package. This allows
|
|
||||||
// automatic deferral of all format strings this package doesn't support.
|
|
||||||
func (f *formatState) constructOrigFormat(verb rune) (format string) {
|
|
||||||
buf := bytes.NewBuffer(percentBytes)
|
|
||||||
|
|
||||||
for _, flag := range supportedFlags {
|
|
||||||
if f.fs.Flag(int(flag)) {
|
|
||||||
buf.WriteRune(flag)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if width, ok := f.fs.Width(); ok {
|
|
||||||
buf.WriteString(strconv.Itoa(width))
|
|
||||||
}
|
|
||||||
|
|
||||||
if precision, ok := f.fs.Precision(); ok {
|
|
||||||
buf.Write(precisionBytes)
|
|
||||||
buf.WriteString(strconv.Itoa(precision))
|
|
||||||
}
|
|
||||||
|
|
||||||
buf.WriteRune(verb)
|
|
||||||
|
|
||||||
format = buf.String()
|
|
||||||
return format
|
|
||||||
}
|
|
||||||
|
|
||||||
// unpackValue returns values inside of non-nil interfaces when possible and
|
|
||||||
// ensures that types for values which have been unpacked from an interface
|
|
||||||
// are displayed when the show types flag is also set.
|
|
||||||
// This is useful for data types like structs, arrays, slices, and maps which
|
|
||||||
// can contain varying types packed inside an interface.
|
|
||||||
func (f *formatState) unpackValue(v reflect.Value) reflect.Value {
|
|
||||||
if v.Kind() == reflect.Interface {
|
|
||||||
f.ignoreNextType = false
|
|
||||||
if !v.IsNil() {
|
|
||||||
v = v.Elem()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return v
|
|
||||||
}
|
|
||||||
|
|
||||||
// formatPtr handles formatting of pointers by indirecting them as necessary.
|
|
||||||
func (f *formatState) formatPtr(v reflect.Value) {
|
|
||||||
// Display nil if top level pointer is nil.
|
|
||||||
showTypes := f.fs.Flag('#')
|
|
||||||
if v.IsNil() && (!showTypes || f.ignoreNextType) {
|
|
||||||
f.fs.Write(nilAngleBytes)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Remove pointers at or below the current depth from map used to detect
|
|
||||||
// circular refs.
|
|
||||||
for k, depth := range f.pointers {
|
|
||||||
if depth >= f.depth {
|
|
||||||
delete(f.pointers, k)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Keep list of all dereferenced pointers to possibly show later.
|
|
||||||
pointerChain := make([]uintptr, 0)
|
|
||||||
|
|
||||||
// Figure out how many levels of indirection there are by derferencing
|
|
||||||
// pointers and unpacking interfaces down the chain while detecting circular
|
|
||||||
// references.
|
|
||||||
nilFound := false
|
|
||||||
cycleFound := false
|
|
||||||
indirects := 0
|
|
||||||
ve := v
|
|
||||||
for ve.Kind() == reflect.Ptr {
|
|
||||||
if ve.IsNil() {
|
|
||||||
nilFound = true
|
|
||||||
break
|
|
||||||
}
|
|
||||||
indirects++
|
|
||||||
addr := ve.Pointer()
|
|
||||||
pointerChain = append(pointerChain, addr)
|
|
||||||
if pd, ok := f.pointers[addr]; ok && pd < f.depth {
|
|
||||||
cycleFound = true
|
|
||||||
indirects--
|
|
||||||
break
|
|
||||||
}
|
|
||||||
f.pointers[addr] = f.depth
|
|
||||||
|
|
||||||
ve = ve.Elem()
|
|
||||||
if ve.Kind() == reflect.Interface {
|
|
||||||
if ve.IsNil() {
|
|
||||||
nilFound = true
|
|
||||||
break
|
|
||||||
}
|
|
||||||
ve = ve.Elem()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Display type or indirection level depending on flags.
|
|
||||||
if showTypes && !f.ignoreNextType {
|
|
||||||
f.fs.Write(openParenBytes)
|
|
||||||
f.fs.Write(bytes.Repeat(asteriskBytes, indirects))
|
|
||||||
f.fs.Write([]byte(ve.Type().String()))
|
|
||||||
f.fs.Write(closeParenBytes)
|
|
||||||
} else {
|
|
||||||
if nilFound || cycleFound {
|
|
||||||
indirects += strings.Count(ve.Type().String(), "*")
|
|
||||||
}
|
|
||||||
f.fs.Write(openAngleBytes)
|
|
||||||
f.fs.Write([]byte(strings.Repeat("*", indirects)))
|
|
||||||
f.fs.Write(closeAngleBytes)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Display pointer information depending on flags.
|
|
||||||
if f.fs.Flag('+') && (len(pointerChain) > 0) {
|
|
||||||
f.fs.Write(openParenBytes)
|
|
||||||
for i, addr := range pointerChain {
|
|
||||||
if i > 0 {
|
|
||||||
f.fs.Write(pointerChainBytes)
|
|
||||||
}
|
|
||||||
printHexPtr(f.fs, addr)
|
|
||||||
}
|
|
||||||
f.fs.Write(closeParenBytes)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Display dereferenced value.
|
|
||||||
switch {
|
|
||||||
case nilFound:
|
|
||||||
f.fs.Write(nilAngleBytes)
|
|
||||||
|
|
||||||
case cycleFound:
|
|
||||||
f.fs.Write(circularShortBytes)
|
|
||||||
|
|
||||||
default:
|
|
||||||
f.ignoreNextType = true
|
|
||||||
f.format(ve)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// format is the main workhorse for providing the Formatter interface. It
|
|
||||||
// uses the passed reflect value to figure out what kind of object we are
|
|
||||||
// dealing with and formats it appropriately. It is a recursive function,
|
|
||||||
// however circular data structures are detected and handled properly.
|
|
||||||
func (f *formatState) format(v reflect.Value) {
|
|
||||||
// Handle invalid reflect values immediately.
|
|
||||||
kind := v.Kind()
|
|
||||||
if kind == reflect.Invalid {
|
|
||||||
f.fs.Write(invalidAngleBytes)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Handle pointers specially.
|
|
||||||
if kind == reflect.Ptr {
|
|
||||||
f.formatPtr(v)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Print type information unless already handled elsewhere.
|
|
||||||
if !f.ignoreNextType && f.fs.Flag('#') {
|
|
||||||
f.fs.Write(openParenBytes)
|
|
||||||
f.fs.Write([]byte(v.Type().String()))
|
|
||||||
f.fs.Write(closeParenBytes)
|
|
||||||
}
|
|
||||||
f.ignoreNextType = false
|
|
||||||
|
|
||||||
// Call Stringer/error interfaces if they exist and the handle methods
|
|
||||||
// flag is enabled.
|
|
||||||
if !f.cs.DisableMethods {
|
|
||||||
if (kind != reflect.Invalid) && (kind != reflect.Interface) {
|
|
||||||
if handled := handleMethods(f.cs, f.fs, v); handled {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
switch kind {
|
|
||||||
case reflect.Invalid:
|
|
||||||
// Do nothing. We should never get here since invalid has already
|
|
||||||
// been handled above.
|
|
||||||
|
|
||||||
case reflect.Bool:
|
|
||||||
printBool(f.fs, v.Bool())
|
|
||||||
|
|
||||||
case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
|
|
||||||
printInt(f.fs, v.Int(), 10)
|
|
||||||
|
|
||||||
case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:
|
|
||||||
printUint(f.fs, v.Uint(), 10)
|
|
||||||
|
|
||||||
case reflect.Float32:
|
|
||||||
printFloat(f.fs, v.Float(), 32)
|
|
||||||
|
|
||||||
case reflect.Float64:
|
|
||||||
printFloat(f.fs, v.Float(), 64)
|
|
||||||
|
|
||||||
case reflect.Complex64:
|
|
||||||
printComplex(f.fs, v.Complex(), 32)
|
|
||||||
|
|
||||||
case reflect.Complex128:
|
|
||||||
printComplex(f.fs, v.Complex(), 64)
|
|
||||||
|
|
||||||
case reflect.Slice:
|
|
||||||
if v.IsNil() {
|
|
||||||
f.fs.Write(nilAngleBytes)
|
|
||||||
break
|
|
||||||
}
|
|
||||||
fallthrough
|
|
||||||
|
|
||||||
case reflect.Array:
|
|
||||||
f.fs.Write(openBracketBytes)
|
|
||||||
f.depth++
|
|
||||||
if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) {
|
|
||||||
f.fs.Write(maxShortBytes)
|
|
||||||
} else {
|
|
||||||
numEntries := v.Len()
|
|
||||||
for i := 0; i < numEntries; i++ {
|
|
||||||
if i > 0 {
|
|
||||||
f.fs.Write(spaceBytes)
|
|
||||||
}
|
|
||||||
f.ignoreNextType = true
|
|
||||||
f.format(f.unpackValue(v.Index(i)))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
f.depth--
|
|
||||||
f.fs.Write(closeBracketBytes)
|
|
||||||
|
|
||||||
case reflect.String:
|
|
||||||
f.fs.Write([]byte(v.String()))
|
|
||||||
|
|
||||||
case reflect.Interface:
|
|
||||||
// The only time we should get here is for nil interfaces due to
|
|
||||||
// unpackValue calls.
|
|
||||||
if v.IsNil() {
|
|
||||||
f.fs.Write(nilAngleBytes)
|
|
||||||
}
|
|
||||||
|
|
||||||
case reflect.Ptr:
|
|
||||||
// Do nothing. We should never get here since pointers have already
|
|
||||||
// been handled above.
|
|
||||||
|
|
||||||
case reflect.Map:
|
|
||||||
// nil maps should be indicated as different than empty maps
|
|
||||||
if v.IsNil() {
|
|
||||||
f.fs.Write(nilAngleBytes)
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
f.fs.Write(openMapBytes)
|
|
||||||
f.depth++
|
|
||||||
if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) {
|
|
||||||
f.fs.Write(maxShortBytes)
|
|
||||||
} else {
|
|
||||||
keys := v.MapKeys()
|
|
||||||
if f.cs.SortKeys {
|
|
||||||
sortValues(keys, f.cs)
|
|
||||||
}
|
|
||||||
for i, key := range keys {
|
|
||||||
if i > 0 {
|
|
||||||
f.fs.Write(spaceBytes)
|
|
||||||
}
|
|
||||||
f.ignoreNextType = true
|
|
||||||
f.format(f.unpackValue(key))
|
|
||||||
f.fs.Write(colonBytes)
|
|
||||||
f.ignoreNextType = true
|
|
||||||
f.format(f.unpackValue(v.MapIndex(key)))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
f.depth--
|
|
||||||
f.fs.Write(closeMapBytes)
|
|
||||||
|
|
||||||
case reflect.Struct:
|
|
||||||
numFields := v.NumField()
|
|
||||||
f.fs.Write(openBraceBytes)
|
|
||||||
f.depth++
|
|
||||||
if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) {
|
|
||||||
f.fs.Write(maxShortBytes)
|
|
||||||
} else {
|
|
||||||
vt := v.Type()
|
|
||||||
for i := 0; i < numFields; i++ {
|
|
||||||
if i > 0 {
|
|
||||||
f.fs.Write(spaceBytes)
|
|
||||||
}
|
|
||||||
vtf := vt.Field(i)
|
|
||||||
if f.fs.Flag('+') || f.fs.Flag('#') {
|
|
||||||
f.fs.Write([]byte(vtf.Name))
|
|
||||||
f.fs.Write(colonBytes)
|
|
||||||
}
|
|
||||||
f.format(f.unpackValue(v.Field(i)))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
f.depth--
|
|
||||||
f.fs.Write(closeBraceBytes)
|
|
||||||
|
|
||||||
case reflect.Uintptr:
|
|
||||||
printHexPtr(f.fs, uintptr(v.Uint()))
|
|
||||||
|
|
||||||
case reflect.UnsafePointer, reflect.Chan, reflect.Func:
|
|
||||||
printHexPtr(f.fs, v.Pointer())
|
|
||||||
|
|
||||||
// There were not any other types at the time this code was written, but
|
|
||||||
// fall back to letting the default fmt package handle it if any get added.
|
|
||||||
default:
|
|
||||||
format := f.buildDefaultFormat()
|
|
||||||
if v.CanInterface() {
|
|
||||||
fmt.Fprintf(f.fs, format, v.Interface())
|
|
||||||
} else {
|
|
||||||
fmt.Fprintf(f.fs, format, v.String())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Format satisfies the fmt.Formatter interface. See NewFormatter for usage
|
|
||||||
// details.
|
|
||||||
func (f *formatState) Format(fs fmt.State, verb rune) {
|
|
||||||
f.fs = fs
|
|
||||||
|
|
||||||
// Use standard formatting for verbs that are not v.
|
|
||||||
if verb != 'v' {
|
|
||||||
format := f.constructOrigFormat(verb)
|
|
||||||
fmt.Fprintf(fs, format, f.value)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if f.value == nil {
|
|
||||||
if fs.Flag('#') {
|
|
||||||
fs.Write(interfaceBytes)
|
|
||||||
}
|
|
||||||
fs.Write(nilAngleBytes)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
f.format(reflect.ValueOf(f.value))
|
|
||||||
}
|
|
||||||
|
|
||||||
// newFormatter is a helper function to consolidate the logic from the various
|
|
||||||
// public methods which take varying config states.
|
|
||||||
func newFormatter(cs *ConfigState, v interface{}) fmt.Formatter {
|
|
||||||
fs := &formatState{value: v, cs: cs}
|
|
||||||
fs.pointers = make(map[uintptr]int)
|
|
||||||
return fs
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
NewFormatter returns a custom formatter that satisfies the fmt.Formatter
|
|
||||||
interface. As a result, it integrates cleanly with standard fmt package
|
|
||||||
printing functions. The formatter is useful for inline printing of smaller data
|
|
||||||
types similar to the standard %v format specifier.
|
|
||||||
|
|
||||||
The custom formatter only responds to the %v (most compact), %+v (adds pointer
|
|
||||||
addresses), %#v (adds types), or %#+v (adds types and pointer addresses) verb
|
|
||||||
combinations. Any other verbs such as %x and %q will be sent to the the
|
|
||||||
standard fmt package for formatting. In addition, the custom formatter ignores
|
|
||||||
the width and precision arguments (however they will still work on the format
|
|
||||||
specifiers not handled by the custom formatter).
|
|
||||||
|
|
||||||
Typically this function shouldn't be called directly. It is much easier to make
|
|
||||||
use of the custom formatter by calling one of the convenience functions such as
|
|
||||||
Printf, Println, or Fprintf.
|
|
||||||
*/
|
|
||||||
func NewFormatter(v interface{}) fmt.Formatter {
|
|
||||||
return newFormatter(&Config, v)
|
|
||||||
}
|
|
148
vendor/github.com/davecgh/go-spew/spew/spew.go
generated
vendored
148
vendor/github.com/davecgh/go-spew/spew/spew.go
generated
vendored
@ -1,148 +0,0 @@
|
|||||||
/*
|
|
||||||
* Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
|
|
||||||
*
|
|
||||||
* Permission to use, copy, modify, and distribute this software for any
|
|
||||||
* purpose with or without fee is hereby granted, provided that the above
|
|
||||||
* copyright notice and this permission notice appear in all copies.
|
|
||||||
*
|
|
||||||
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
|
||||||
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
|
||||||
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
|
||||||
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
|
||||||
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
|
||||||
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
|
||||||
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package spew
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Errorf is a wrapper for fmt.Errorf that treats each argument as if it were
|
|
||||||
// passed with a default Formatter interface returned by NewFormatter. It
|
|
||||||
// returns the formatted string as a value that satisfies error. See
|
|
||||||
// NewFormatter for formatting details.
|
|
||||||
//
|
|
||||||
// This function is shorthand for the following syntax:
|
|
||||||
//
|
|
||||||
// fmt.Errorf(format, spew.NewFormatter(a), spew.NewFormatter(b))
|
|
||||||
func Errorf(format string, a ...interface{}) (err error) {
|
|
||||||
return fmt.Errorf(format, convertArgs(a)...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Fprint is a wrapper for fmt.Fprint that treats each argument as if it were
|
|
||||||
// passed with a default Formatter interface returned by NewFormatter. It
|
|
||||||
// returns the number of bytes written and any write error encountered. See
|
|
||||||
// NewFormatter for formatting details.
|
|
||||||
//
|
|
||||||
// This function is shorthand for the following syntax:
|
|
||||||
//
|
|
||||||
// fmt.Fprint(w, spew.NewFormatter(a), spew.NewFormatter(b))
|
|
||||||
func Fprint(w io.Writer, a ...interface{}) (n int, err error) {
|
|
||||||
return fmt.Fprint(w, convertArgs(a)...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Fprintf is a wrapper for fmt.Fprintf that treats each argument as if it were
|
|
||||||
// passed with a default Formatter interface returned by NewFormatter. It
|
|
||||||
// returns the number of bytes written and any write error encountered. See
|
|
||||||
// NewFormatter for formatting details.
|
|
||||||
//
|
|
||||||
// This function is shorthand for the following syntax:
|
|
||||||
//
|
|
||||||
// fmt.Fprintf(w, format, spew.NewFormatter(a), spew.NewFormatter(b))
|
|
||||||
func Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) {
|
|
||||||
return fmt.Fprintf(w, format, convertArgs(a)...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Fprintln is a wrapper for fmt.Fprintln that treats each argument as if it
|
|
||||||
// passed with a default Formatter interface returned by NewFormatter. See
|
|
||||||
// NewFormatter for formatting details.
|
|
||||||
//
|
|
||||||
// This function is shorthand for the following syntax:
|
|
||||||
//
|
|
||||||
// fmt.Fprintln(w, spew.NewFormatter(a), spew.NewFormatter(b))
|
|
||||||
func Fprintln(w io.Writer, a ...interface{}) (n int, err error) {
|
|
||||||
return fmt.Fprintln(w, convertArgs(a)...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Print is a wrapper for fmt.Print that treats each argument as if it were
|
|
||||||
// passed with a default Formatter interface returned by NewFormatter. It
|
|
||||||
// returns the number of bytes written and any write error encountered. See
|
|
||||||
// NewFormatter for formatting details.
|
|
||||||
//
|
|
||||||
// This function is shorthand for the following syntax:
|
|
||||||
//
|
|
||||||
// fmt.Print(spew.NewFormatter(a), spew.NewFormatter(b))
|
|
||||||
func Print(a ...interface{}) (n int, err error) {
|
|
||||||
return fmt.Print(convertArgs(a)...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Printf is a wrapper for fmt.Printf that treats each argument as if it were
|
|
||||||
// passed with a default Formatter interface returned by NewFormatter. It
|
|
||||||
// returns the number of bytes written and any write error encountered. See
|
|
||||||
// NewFormatter for formatting details.
|
|
||||||
//
|
|
||||||
// This function is shorthand for the following syntax:
|
|
||||||
//
|
|
||||||
// fmt.Printf(format, spew.NewFormatter(a), spew.NewFormatter(b))
|
|
||||||
func Printf(format string, a ...interface{}) (n int, err error) {
|
|
||||||
return fmt.Printf(format, convertArgs(a)...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Println is a wrapper for fmt.Println that treats each argument as if it were
|
|
||||||
// passed with a default Formatter interface returned by NewFormatter. It
|
|
||||||
// returns the number of bytes written and any write error encountered. See
|
|
||||||
// NewFormatter for formatting details.
|
|
||||||
//
|
|
||||||
// This function is shorthand for the following syntax:
|
|
||||||
//
|
|
||||||
// fmt.Println(spew.NewFormatter(a), spew.NewFormatter(b))
|
|
||||||
func Println(a ...interface{}) (n int, err error) {
|
|
||||||
return fmt.Println(convertArgs(a)...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Sprint is a wrapper for fmt.Sprint that treats each argument as if it were
|
|
||||||
// passed with a default Formatter interface returned by NewFormatter. It
|
|
||||||
// returns the resulting string. See NewFormatter for formatting details.
|
|
||||||
//
|
|
||||||
// This function is shorthand for the following syntax:
|
|
||||||
//
|
|
||||||
// fmt.Sprint(spew.NewFormatter(a), spew.NewFormatter(b))
|
|
||||||
func Sprint(a ...interface{}) string {
|
|
||||||
return fmt.Sprint(convertArgs(a)...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Sprintf is a wrapper for fmt.Sprintf that treats each argument as if it were
|
|
||||||
// passed with a default Formatter interface returned by NewFormatter. It
|
|
||||||
// returns the resulting string. See NewFormatter for formatting details.
|
|
||||||
//
|
|
||||||
// This function is shorthand for the following syntax:
|
|
||||||
//
|
|
||||||
// fmt.Sprintf(format, spew.NewFormatter(a), spew.NewFormatter(b))
|
|
||||||
func Sprintf(format string, a ...interface{}) string {
|
|
||||||
return fmt.Sprintf(format, convertArgs(a)...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Sprintln is a wrapper for fmt.Sprintln that treats each argument as if it
|
|
||||||
// were passed with a default Formatter interface returned by NewFormatter. It
|
|
||||||
// returns the resulting string. See NewFormatter for formatting details.
|
|
||||||
//
|
|
||||||
// This function is shorthand for the following syntax:
|
|
||||||
//
|
|
||||||
// fmt.Sprintln(spew.NewFormatter(a), spew.NewFormatter(b))
|
|
||||||
func Sprintln(a ...interface{}) string {
|
|
||||||
return fmt.Sprintln(convertArgs(a)...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// convertArgs accepts a slice of arguments and returns a slice of the same
|
|
||||||
// length with each argument converted to a default spew Formatter interface.
|
|
||||||
func convertArgs(args []interface{}) (formatters []interface{}) {
|
|
||||||
formatters = make([]interface{}, len(args))
|
|
||||||
for index, arg := range args {
|
|
||||||
formatters[index] = NewFormatter(arg)
|
|
||||||
}
|
|
||||||
return formatters
|
|
||||||
}
|
|
2
vendor/github.com/go-jose/go-jose/v3/.gitignore
generated
vendored
2
vendor/github.com/go-jose/go-jose/v3/.gitignore
generated
vendored
@ -1,2 +0,0 @@
|
|||||||
jose-util/jose-util
|
|
||||||
jose-util.t.err
|
|
53
vendor/github.com/go-jose/go-jose/v3/.golangci.yml
generated
vendored
53
vendor/github.com/go-jose/go-jose/v3/.golangci.yml
generated
vendored
@ -1,53 +0,0 @@
|
|||||||
# https://github.com/golangci/golangci-lint
|
|
||||||
|
|
||||||
run:
|
|
||||||
skip-files:
|
|
||||||
- doc_test.go
|
|
||||||
modules-download-mode: readonly
|
|
||||||
|
|
||||||
linters:
|
|
||||||
enable-all: true
|
|
||||||
disable:
|
|
||||||
- gochecknoglobals
|
|
||||||
- goconst
|
|
||||||
- lll
|
|
||||||
- maligned
|
|
||||||
- nakedret
|
|
||||||
- scopelint
|
|
||||||
- unparam
|
|
||||||
- funlen # added in 1.18 (requires go-jose changes before it can be enabled)
|
|
||||||
|
|
||||||
linters-settings:
|
|
||||||
gocyclo:
|
|
||||||
min-complexity: 35
|
|
||||||
|
|
||||||
issues:
|
|
||||||
exclude-rules:
|
|
||||||
- text: "don't use ALL_CAPS in Go names"
|
|
||||||
linters:
|
|
||||||
- golint
|
|
||||||
- text: "hardcoded credentials"
|
|
||||||
linters:
|
|
||||||
- gosec
|
|
||||||
- text: "weak cryptographic primitive"
|
|
||||||
linters:
|
|
||||||
- gosec
|
|
||||||
- path: json/
|
|
||||||
linters:
|
|
||||||
- dupl
|
|
||||||
- errcheck
|
|
||||||
- gocritic
|
|
||||||
- gocyclo
|
|
||||||
- golint
|
|
||||||
- govet
|
|
||||||
- ineffassign
|
|
||||||
- staticcheck
|
|
||||||
- structcheck
|
|
||||||
- stylecheck
|
|
||||||
- unused
|
|
||||||
- path: _test\.go
|
|
||||||
linters:
|
|
||||||
- scopelint
|
|
||||||
- path: jwk.go
|
|
||||||
linters:
|
|
||||||
- gocyclo
|
|
33
vendor/github.com/go-jose/go-jose/v3/.travis.yml
generated
vendored
33
vendor/github.com/go-jose/go-jose/v3/.travis.yml
generated
vendored
@ -1,33 +0,0 @@
|
|||||||
language: go
|
|
||||||
|
|
||||||
matrix:
|
|
||||||
fast_finish: true
|
|
||||||
allow_failures:
|
|
||||||
- go: tip
|
|
||||||
|
|
||||||
go:
|
|
||||||
- "1.13.x"
|
|
||||||
- "1.14.x"
|
|
||||||
- tip
|
|
||||||
|
|
||||||
before_script:
|
|
||||||
- export PATH=$HOME/.local/bin:$PATH
|
|
||||||
|
|
||||||
before_install:
|
|
||||||
- go get -u github.com/mattn/goveralls github.com/wadey/gocovmerge
|
|
||||||
- curl -sfL https://install.goreleaser.com/github.com/golangci/golangci-lint.sh | sh -s -- -b $(go env GOPATH)/bin v1.18.0
|
|
||||||
- pip install cram --user
|
|
||||||
|
|
||||||
script:
|
|
||||||
- go test -v -covermode=count -coverprofile=profile.cov .
|
|
||||||
- go test -v -covermode=count -coverprofile=cryptosigner/profile.cov ./cryptosigner
|
|
||||||
- go test -v -covermode=count -coverprofile=cipher/profile.cov ./cipher
|
|
||||||
- go test -v -covermode=count -coverprofile=jwt/profile.cov ./jwt
|
|
||||||
- go test -v ./json # no coverage for forked encoding/json package
|
|
||||||
- golangci-lint run
|
|
||||||
- cd jose-util && go build && PATH=$PWD:$PATH cram -v jose-util.t # cram tests jose-util
|
|
||||||
- cd ..
|
|
||||||
|
|
||||||
after_success:
|
|
||||||
- gocovmerge *.cov */*.cov > merged.coverprofile
|
|
||||||
- goveralls -coverprofile merged.coverprofile -service=travis-ci
|
|
10
vendor/github.com/go-jose/go-jose/v3/BUG-BOUNTY.md
generated
vendored
10
vendor/github.com/go-jose/go-jose/v3/BUG-BOUNTY.md
generated
vendored
@ -1,10 +0,0 @@
|
|||||||
Serious about security
|
|
||||||
======================
|
|
||||||
|
|
||||||
Square recognizes the important contributions the security research community
|
|
||||||
can make. We therefore encourage reporting security issues with the code
|
|
||||||
contained in this repository.
|
|
||||||
|
|
||||||
If you believe you have discovered a security vulnerability, please follow the
|
|
||||||
guidelines at <https://bugcrowd.com/squareopensource>.
|
|
||||||
|
|
15
vendor/github.com/go-jose/go-jose/v3/CONTRIBUTING.md
generated
vendored
15
vendor/github.com/go-jose/go-jose/v3/CONTRIBUTING.md
generated
vendored
@ -1,15 +0,0 @@
|
|||||||
# Contributing
|
|
||||||
|
|
||||||
If you would like to contribute code to go-jose you can do so through GitHub by
|
|
||||||
forking the repository and sending a pull request.
|
|
||||||
|
|
||||||
When submitting code, please make every effort to follow existing conventions
|
|
||||||
and style in order to keep the code as readable as possible. Please also make
|
|
||||||
sure all tests pass by running `go test`, and format your code with `go fmt`.
|
|
||||||
We also recommend using `golint` and `errcheck`.
|
|
||||||
|
|
||||||
Before your code can be accepted into the project you must also sign the
|
|
||||||
Individual Contributor License Agreement. We use [cla-assistant.io][1] and you
|
|
||||||
will be prompted to sign once a pull request is opened.
|
|
||||||
|
|
||||||
[1]: https://cla-assistant.io/
|
|
202
vendor/github.com/go-jose/go-jose/v3/LICENSE
generated
vendored
202
vendor/github.com/go-jose/go-jose/v3/LICENSE
generated
vendored
@ -1,202 +0,0 @@
|
|||||||
|
|
||||||
Apache License
|
|
||||||
Version 2.0, January 2004
|
|
||||||
http://www.apache.org/licenses/
|
|
||||||
|
|
||||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
|
||||||
|
|
||||||
1. Definitions.
|
|
||||||
|
|
||||||
"License" shall mean the terms and conditions for use, reproduction,
|
|
||||||
and distribution as defined by Sections 1 through 9 of this document.
|
|
||||||
|
|
||||||
"Licensor" shall mean the copyright owner or entity authorized by
|
|
||||||
the copyright owner that is granting the License.
|
|
||||||
|
|
||||||
"Legal Entity" shall mean the union of the acting entity and all
|
|
||||||
other entities that control, are controlled by, or are under common
|
|
||||||
control with that entity. For the purposes of this definition,
|
|
||||||
"control" means (i) the power, direct or indirect, to cause the
|
|
||||||
direction or management of such entity, whether by contract or
|
|
||||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
|
||||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
|
||||||
|
|
||||||
"You" (or "Your") shall mean an individual or Legal Entity
|
|
||||||
exercising permissions granted by this License.
|
|
||||||
|
|
||||||
"Source" form shall mean the preferred form for making modifications,
|
|
||||||
including but not limited to software source code, documentation
|
|
||||||
source, and configuration files.
|
|
||||||
|
|
||||||
"Object" form shall mean any form resulting from mechanical
|
|
||||||
transformation or translation of a Source form, including but
|
|
||||||
not limited to compiled object code, generated documentation,
|
|
||||||
and conversions to other media types.
|
|
||||||
|
|
||||||
"Work" shall mean the work of authorship, whether in Source or
|
|
||||||
Object form, made available under the License, as indicated by a
|
|
||||||
copyright notice that is included in or attached to the work
|
|
||||||
(an example is provided in the Appendix below).
|
|
||||||
|
|
||||||
"Derivative Works" shall mean any work, whether in Source or Object
|
|
||||||
form, that is based on (or derived from) the Work and for which the
|
|
||||||
editorial revisions, annotations, elaborations, or other modifications
|
|
||||||
represent, as a whole, an original work of authorship. For the purposes
|
|
||||||
of this License, Derivative Works shall not include works that remain
|
|
||||||
separable from, or merely link (or bind by name) to the interfaces of,
|
|
||||||
the Work and Derivative Works thereof.
|
|
||||||
|
|
||||||
"Contribution" shall mean any work of authorship, including
|
|
||||||
the original version of the Work and any modifications or additions
|
|
||||||
to that Work or Derivative Works thereof, that is intentionally
|
|
||||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
|
||||||
or by an individual or Legal Entity authorized to submit on behalf of
|
|
||||||
the copyright owner. For the purposes of this definition, "submitted"
|
|
||||||
means any form of electronic, verbal, or written communication sent
|
|
||||||
to the Licensor or its representatives, including but not limited to
|
|
||||||
communication on electronic mailing lists, source code control systems,
|
|
||||||
and issue tracking systems that are managed by, or on behalf of, the
|
|
||||||
Licensor for the purpose of discussing and improving the Work, but
|
|
||||||
excluding communication that is conspicuously marked or otherwise
|
|
||||||
designated in writing by the copyright owner as "Not a Contribution."
|
|
||||||
|
|
||||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
|
||||||
on behalf of whom a Contribution has been received by Licensor and
|
|
||||||
subsequently incorporated within the Work.
|
|
||||||
|
|
||||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
|
||||||
this License, each Contributor hereby grants to You a perpetual,
|
|
||||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
|
||||||
copyright license to reproduce, prepare Derivative Works of,
|
|
||||||
publicly display, publicly perform, sublicense, and distribute the
|
|
||||||
Work and such Derivative Works in Source or Object form.
|
|
||||||
|
|
||||||
3. Grant of Patent License. Subject to the terms and conditions of
|
|
||||||
this License, each Contributor hereby grants to You a perpetual,
|
|
||||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
|
||||||
(except as stated in this section) patent license to make, have made,
|
|
||||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
|
||||||
where such license applies only to those patent claims licensable
|
|
||||||
by such Contributor that are necessarily infringed by their
|
|
||||||
Contribution(s) alone or by combination of their Contribution(s)
|
|
||||||
with the Work to which such Contribution(s) was submitted. If You
|
|
||||||
institute patent litigation against any entity (including a
|
|
||||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
|
||||||
or a Contribution incorporated within the Work constitutes direct
|
|
||||||
or contributory patent infringement, then any patent licenses
|
|
||||||
granted to You under this License for that Work shall terminate
|
|
||||||
as of the date such litigation is filed.
|
|
||||||
|
|
||||||
4. Redistribution. You may reproduce and distribute copies of the
|
|
||||||
Work or Derivative Works thereof in any medium, with or without
|
|
||||||
modifications, and in Source or Object form, provided that You
|
|
||||||
meet the following conditions:
|
|
||||||
|
|
||||||
(a) You must give any other recipients of the Work or
|
|
||||||
Derivative Works a copy of this License; and
|
|
||||||
|
|
||||||
(b) You must cause any modified files to carry prominent notices
|
|
||||||
stating that You changed the files; and
|
|
||||||
|
|
||||||
(c) You must retain, in the Source form of any Derivative Works
|
|
||||||
that You distribute, all copyright, patent, trademark, and
|
|
||||||
attribution notices from the Source form of the Work,
|
|
||||||
excluding those notices that do not pertain to any part of
|
|
||||||
the Derivative Works; and
|
|
||||||
|
|
||||||
(d) If the Work includes a "NOTICE" text file as part of its
|
|
||||||
distribution, then any Derivative Works that You distribute must
|
|
||||||
include a readable copy of the attribution notices contained
|
|
||||||
within such NOTICE file, excluding those notices that do not
|
|
||||||
pertain to any part of the Derivative Works, in at least one
|
|
||||||
of the following places: within a NOTICE text file distributed
|
|
||||||
as part of the Derivative Works; within the Source form or
|
|
||||||
documentation, if provided along with the Derivative Works; or,
|
|
||||||
within a display generated by the Derivative Works, if and
|
|
||||||
wherever such third-party notices normally appear. The contents
|
|
||||||
of the NOTICE file are for informational purposes only and
|
|
||||||
do not modify the License. You may add Your own attribution
|
|
||||||
notices within Derivative Works that You distribute, alongside
|
|
||||||
or as an addendum to the NOTICE text from the Work, provided
|
|
||||||
that such additional attribution notices cannot be construed
|
|
||||||
as modifying the License.
|
|
||||||
|
|
||||||
You may add Your own copyright statement to Your modifications and
|
|
||||||
may provide additional or different license terms and conditions
|
|
||||||
for use, reproduction, or distribution of Your modifications, or
|
|
||||||
for any such Derivative Works as a whole, provided Your use,
|
|
||||||
reproduction, and distribution of the Work otherwise complies with
|
|
||||||
the conditions stated in this License.
|
|
||||||
|
|
||||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
|
||||||
any Contribution intentionally submitted for inclusion in the Work
|
|
||||||
by You to the Licensor shall be under the terms and conditions of
|
|
||||||
this License, without any additional terms or conditions.
|
|
||||||
Notwithstanding the above, nothing herein shall supersede or modify
|
|
||||||
the terms of any separate license agreement you may have executed
|
|
||||||
with Licensor regarding such Contributions.
|
|
||||||
|
|
||||||
6. Trademarks. This License does not grant permission to use the trade
|
|
||||||
names, trademarks, service marks, or product names of the Licensor,
|
|
||||||
except as required for reasonable and customary use in describing the
|
|
||||||
origin of the Work and reproducing the content of the NOTICE file.
|
|
||||||
|
|
||||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
|
||||||
agreed to in writing, Licensor provides the Work (and each
|
|
||||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
|
||||||
implied, including, without limitation, any warranties or conditions
|
|
||||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
|
||||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
|
||||||
appropriateness of using or redistributing the Work and assume any
|
|
||||||
risks associated with Your exercise of permissions under this License.
|
|
||||||
|
|
||||||
8. Limitation of Liability. In no event and under no legal theory,
|
|
||||||
whether in tort (including negligence), contract, or otherwise,
|
|
||||||
unless required by applicable law (such as deliberate and grossly
|
|
||||||
negligent acts) or agreed to in writing, shall any Contributor be
|
|
||||||
liable to You for damages, including any direct, indirect, special,
|
|
||||||
incidental, or consequential damages of any character arising as a
|
|
||||||
result of this License or out of the use or inability to use the
|
|
||||||
Work (including but not limited to damages for loss of goodwill,
|
|
||||||
work stoppage, computer failure or malfunction, or any and all
|
|
||||||
other commercial damages or losses), even if such Contributor
|
|
||||||
has been advised of the possibility of such damages.
|
|
||||||
|
|
||||||
9. Accepting Warranty or Additional Liability. While redistributing
|
|
||||||
the Work or Derivative Works thereof, You may choose to offer,
|
|
||||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
|
||||||
or other liability obligations and/or rights consistent with this
|
|
||||||
License. However, in accepting such obligations, You may act only
|
|
||||||
on Your own behalf and on Your sole responsibility, not on behalf
|
|
||||||
of any other Contributor, and only if You agree to indemnify,
|
|
||||||
defend, and hold each Contributor harmless for any liability
|
|
||||||
incurred by, or claims asserted against, such Contributor by reason
|
|
||||||
of your accepting any such warranty or additional liability.
|
|
||||||
|
|
||||||
END OF TERMS AND CONDITIONS
|
|
||||||
|
|
||||||
APPENDIX: How to apply the Apache License to your work.
|
|
||||||
|
|
||||||
To apply the Apache License to your work, attach the following
|
|
||||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
|
||||||
replaced with your own identifying information. (Don't include
|
|
||||||
the brackets!) The text should be enclosed in the appropriate
|
|
||||||
comment syntax for the file format. We also recommend that a
|
|
||||||
file or class name and description of purpose be included on the
|
|
||||||
same "printed page" as the copyright notice for easier
|
|
||||||
identification within third-party archives.
|
|
||||||
|
|
||||||
Copyright [yyyy] [name of copyright owner]
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
122
vendor/github.com/go-jose/go-jose/v3/README.md
generated
vendored
122
vendor/github.com/go-jose/go-jose/v3/README.md
generated
vendored
@ -1,122 +0,0 @@
|
|||||||
# Go JOSE
|
|
||||||
|
|
||||||
[![godoc](http://img.shields.io/badge/godoc-jose_package-blue.svg?style=flat)](https://godoc.org/gopkg.in/go-jose/go-jose.v2)
|
|
||||||
[![godoc](http://img.shields.io/badge/godoc-jwt_package-blue.svg?style=flat)](https://godoc.org/gopkg.in/go-jose/go-jose.v2/jwt)
|
|
||||||
[![license](http://img.shields.io/badge/license-apache_2.0-blue.svg?style=flat)](https://raw.githubusercontent.com/go-jose/go-jose/master/LICENSE)
|
|
||||||
[![build](https://travis-ci.org/go-jose/go-jose.svg?branch=master)](https://travis-ci.org/go-jose/go-jose)
|
|
||||||
[![coverage](https://coveralls.io/repos/github/go-jose/go-jose/badge.svg?branch=master)](https://coveralls.io/r/go-jose/go-jose)
|
|
||||||
|
|
||||||
Package jose aims to provide an implementation of the Javascript Object Signing
|
|
||||||
and Encryption set of standards. This includes support for JSON Web Encryption,
|
|
||||||
JSON Web Signature, and JSON Web Token standards.
|
|
||||||
|
|
||||||
**Disclaimer**: This library contains encryption software that is subject to
|
|
||||||
the U.S. Export Administration Regulations. You may not export, re-export,
|
|
||||||
transfer or download this code or any part of it in violation of any United
|
|
||||||
States law, directive or regulation. In particular this software may not be
|
|
||||||
exported or re-exported in any form or on any media to Iran, North Sudan,
|
|
||||||
Syria, Cuba, or North Korea, or to denied persons or entities mentioned on any
|
|
||||||
US maintained blocked list.
|
|
||||||
|
|
||||||
## Overview
|
|
||||||
|
|
||||||
The implementation follows the
|
|
||||||
[JSON Web Encryption](http://dx.doi.org/10.17487/RFC7516) (RFC 7516),
|
|
||||||
[JSON Web Signature](http://dx.doi.org/10.17487/RFC7515) (RFC 7515), and
|
|
||||||
[JSON Web Token](http://dx.doi.org/10.17487/RFC7519) (RFC 7519) specifications.
|
|
||||||
Tables of supported algorithms are shown below. The library supports both
|
|
||||||
the compact and JWS/JWE JSON Serialization formats, and has optional support for
|
|
||||||
multiple recipients. It also comes with a small command-line utility
|
|
||||||
([`jose-util`](https://github.com/go-jose/go-jose/tree/master/jose-util))
|
|
||||||
for dealing with JOSE messages in a shell.
|
|
||||||
|
|
||||||
**Note**: We use a forked version of the `encoding/json` package from the Go
|
|
||||||
standard library which uses case-sensitive matching for member names (instead
|
|
||||||
of [case-insensitive matching](https://www.ietf.org/mail-archive/web/json/current/msg03763.html)).
|
|
||||||
This is to avoid differences in interpretation of messages between go-jose and
|
|
||||||
libraries in other languages.
|
|
||||||
|
|
||||||
### Versions
|
|
||||||
|
|
||||||
[Version 2](https://gopkg.in/go-jose/go-jose.v2)
|
|
||||||
([branch](https://github.com/go-jose/go-jose/tree/v2),
|
|
||||||
[doc](https://godoc.org/gopkg.in/go-jose/go-jose.v2)) is the current stable version:
|
|
||||||
|
|
||||||
import "gopkg.in/go-jose/go-jose.v2"
|
|
||||||
|
|
||||||
[Version 3](https://github.com/go-jose/go-jose)
|
|
||||||
([branch](https://github.com/go-jose/go-jose/tree/master),
|
|
||||||
[doc](https://godoc.org/github.com/go-jose/go-jose)) is the under development/unstable version (not released yet):
|
|
||||||
|
|
||||||
import "github.com/go-jose/go-jose/v3"
|
|
||||||
|
|
||||||
All new feature development takes place on the `master` branch, which we are
|
|
||||||
preparing to release as version 3 soon. Version 2 will continue to receive
|
|
||||||
critical bug and security fixes. Note that starting with version 3 we are
|
|
||||||
using Go modules for versioning instead of `gopkg.in` as before. Version 3 also will require Go version 1.13 or higher.
|
|
||||||
|
|
||||||
Version 1 (on the `v1` branch) is frozen and not supported anymore.
|
|
||||||
|
|
||||||
### Supported algorithms
|
|
||||||
|
|
||||||
See below for a table of supported algorithms. Algorithm identifiers match
|
|
||||||
the names in the [JSON Web Algorithms](http://dx.doi.org/10.17487/RFC7518)
|
|
||||||
standard where possible. The Godoc reference has a list of constants.
|
|
||||||
|
|
||||||
Key encryption | Algorithm identifier(s)
|
|
||||||
:------------------------- | :------------------------------
|
|
||||||
RSA-PKCS#1v1.5 | RSA1_5
|
|
||||||
RSA-OAEP | RSA-OAEP, RSA-OAEP-256
|
|
||||||
AES key wrap | A128KW, A192KW, A256KW
|
|
||||||
AES-GCM key wrap | A128GCMKW, A192GCMKW, A256GCMKW
|
|
||||||
ECDH-ES + AES key wrap | ECDH-ES+A128KW, ECDH-ES+A192KW, ECDH-ES+A256KW
|
|
||||||
ECDH-ES (direct) | ECDH-ES<sup>1</sup>
|
|
||||||
Direct encryption | dir<sup>1</sup>
|
|
||||||
|
|
||||||
<sup>1. Not supported in multi-recipient mode</sup>
|
|
||||||
|
|
||||||
Signing / MAC | Algorithm identifier(s)
|
|
||||||
:------------------------- | :------------------------------
|
|
||||||
RSASSA-PKCS#1v1.5 | RS256, RS384, RS512
|
|
||||||
RSASSA-PSS | PS256, PS384, PS512
|
|
||||||
HMAC | HS256, HS384, HS512
|
|
||||||
ECDSA | ES256, ES384, ES512
|
|
||||||
Ed25519 | EdDSA<sup>2</sup>
|
|
||||||
|
|
||||||
<sup>2. Only available in version 2 of the package</sup>
|
|
||||||
|
|
||||||
Content encryption | Algorithm identifier(s)
|
|
||||||
:------------------------- | :------------------------------
|
|
||||||
AES-CBC+HMAC | A128CBC-HS256, A192CBC-HS384, A256CBC-HS512
|
|
||||||
AES-GCM | A128GCM, A192GCM, A256GCM
|
|
||||||
|
|
||||||
Compression | Algorithm identifiers(s)
|
|
||||||
:------------------------- | -------------------------------
|
|
||||||
DEFLATE (RFC 1951) | DEF
|
|
||||||
|
|
||||||
### Supported key types
|
|
||||||
|
|
||||||
See below for a table of supported key types. These are understood by the
|
|
||||||
library, and can be passed to corresponding functions such as `NewEncrypter` or
|
|
||||||
`NewSigner`. Each of these keys can also be wrapped in a JWK if desired, which
|
|
||||||
allows attaching a key id.
|
|
||||||
|
|
||||||
Algorithm(s) | Corresponding types
|
|
||||||
:------------------------- | -------------------------------
|
|
||||||
RSA | *[rsa.PublicKey](http://golang.org/pkg/crypto/rsa/#PublicKey), *[rsa.PrivateKey](http://golang.org/pkg/crypto/rsa/#PrivateKey)
|
|
||||||
ECDH, ECDSA | *[ecdsa.PublicKey](http://golang.org/pkg/crypto/ecdsa/#PublicKey), *[ecdsa.PrivateKey](http://golang.org/pkg/crypto/ecdsa/#PrivateKey)
|
|
||||||
EdDSA<sup>1</sup> | [ed25519.PublicKey](https://godoc.org/pkg/crypto/ed25519#PublicKey), [ed25519.PrivateKey](https://godoc.org/pkg/crypto/ed25519#PrivateKey)
|
|
||||||
AES, HMAC | []byte
|
|
||||||
|
|
||||||
<sup>1. Only available in version 2 or later of the package</sup>
|
|
||||||
|
|
||||||
## Examples
|
|
||||||
|
|
||||||
[![godoc](http://img.shields.io/badge/godoc-jose_package-blue.svg?style=flat)](https://godoc.org/gopkg.in/go-jose/go-jose.v2)
|
|
||||||
[![godoc](http://img.shields.io/badge/godoc-jwt_package-blue.svg?style=flat)](https://godoc.org/gopkg.in/go-jose/go-jose.v2/jwt)
|
|
||||||
|
|
||||||
Examples can be found in the Godoc
|
|
||||||
reference for this package. The
|
|
||||||
[`jose-util`](https://github.com/go-jose/go-jose/tree/master/jose-util)
|
|
||||||
subdirectory also contains a small command-line utility which might be useful
|
|
||||||
as an example as well.
|
|
592
vendor/github.com/go-jose/go-jose/v3/asymmetric.go
generated
vendored
592
vendor/github.com/go-jose/go-jose/v3/asymmetric.go
generated
vendored
@ -1,592 +0,0 @@
|
|||||||
/*-
|
|
||||||
* Copyright 2014 Square Inc.
|
|
||||||
*
|
|
||||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
* you may not use this file except in compliance with the License.
|
|
||||||
* You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing, software
|
|
||||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
* See the License for the specific language governing permissions and
|
|
||||||
* limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package jose
|
|
||||||
|
|
||||||
import (
|
|
||||||
"crypto"
|
|
||||||
"crypto/aes"
|
|
||||||
"crypto/ecdsa"
|
|
||||||
"crypto/ed25519"
|
|
||||||
"crypto/rand"
|
|
||||||
"crypto/rsa"
|
|
||||||
"crypto/sha1"
|
|
||||||
"crypto/sha256"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"math/big"
|
|
||||||
|
|
||||||
josecipher "github.com/go-jose/go-jose/v3/cipher"
|
|
||||||
"github.com/go-jose/go-jose/v3/json"
|
|
||||||
)
|
|
||||||
|
|
||||||
// A generic RSA-based encrypter/verifier
|
|
||||||
type rsaEncrypterVerifier struct {
|
|
||||||
publicKey *rsa.PublicKey
|
|
||||||
}
|
|
||||||
|
|
||||||
// A generic RSA-based decrypter/signer
|
|
||||||
type rsaDecrypterSigner struct {
|
|
||||||
privateKey *rsa.PrivateKey
|
|
||||||
}
|
|
||||||
|
|
||||||
// A generic EC-based encrypter/verifier
|
|
||||||
type ecEncrypterVerifier struct {
|
|
||||||
publicKey *ecdsa.PublicKey
|
|
||||||
}
|
|
||||||
|
|
||||||
type edEncrypterVerifier struct {
|
|
||||||
publicKey ed25519.PublicKey
|
|
||||||
}
|
|
||||||
|
|
||||||
// A key generator for ECDH-ES
|
|
||||||
type ecKeyGenerator struct {
|
|
||||||
size int
|
|
||||||
algID string
|
|
||||||
publicKey *ecdsa.PublicKey
|
|
||||||
}
|
|
||||||
|
|
||||||
// A generic EC-based decrypter/signer
|
|
||||||
type ecDecrypterSigner struct {
|
|
||||||
privateKey *ecdsa.PrivateKey
|
|
||||||
}
|
|
||||||
|
|
||||||
type edDecrypterSigner struct {
|
|
||||||
privateKey ed25519.PrivateKey
|
|
||||||
}
|
|
||||||
|
|
||||||
// newRSARecipient creates recipientKeyInfo based on the given key.
|
|
||||||
func newRSARecipient(keyAlg KeyAlgorithm, publicKey *rsa.PublicKey) (recipientKeyInfo, error) {
|
|
||||||
// Verify that key management algorithm is supported by this encrypter
|
|
||||||
switch keyAlg {
|
|
||||||
case RSA1_5, RSA_OAEP, RSA_OAEP_256:
|
|
||||||
default:
|
|
||||||
return recipientKeyInfo{}, ErrUnsupportedAlgorithm
|
|
||||||
}
|
|
||||||
|
|
||||||
if publicKey == nil {
|
|
||||||
return recipientKeyInfo{}, errors.New("invalid public key")
|
|
||||||
}
|
|
||||||
|
|
||||||
return recipientKeyInfo{
|
|
||||||
keyAlg: keyAlg,
|
|
||||||
keyEncrypter: &rsaEncrypterVerifier{
|
|
||||||
publicKey: publicKey,
|
|
||||||
},
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// newRSASigner creates a recipientSigInfo based on the given key.
|
|
||||||
func newRSASigner(sigAlg SignatureAlgorithm, privateKey *rsa.PrivateKey) (recipientSigInfo, error) {
|
|
||||||
// Verify that key management algorithm is supported by this encrypter
|
|
||||||
switch sigAlg {
|
|
||||||
case RS256, RS384, RS512, PS256, PS384, PS512:
|
|
||||||
default:
|
|
||||||
return recipientSigInfo{}, ErrUnsupportedAlgorithm
|
|
||||||
}
|
|
||||||
|
|
||||||
if privateKey == nil {
|
|
||||||
return recipientSigInfo{}, errors.New("invalid private key")
|
|
||||||
}
|
|
||||||
|
|
||||||
return recipientSigInfo{
|
|
||||||
sigAlg: sigAlg,
|
|
||||||
publicKey: staticPublicKey(&JSONWebKey{
|
|
||||||
Key: privateKey.Public(),
|
|
||||||
}),
|
|
||||||
signer: &rsaDecrypterSigner{
|
|
||||||
privateKey: privateKey,
|
|
||||||
},
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func newEd25519Signer(sigAlg SignatureAlgorithm, privateKey ed25519.PrivateKey) (recipientSigInfo, error) {
|
|
||||||
if sigAlg != EdDSA {
|
|
||||||
return recipientSigInfo{}, ErrUnsupportedAlgorithm
|
|
||||||
}
|
|
||||||
|
|
||||||
if privateKey == nil {
|
|
||||||
return recipientSigInfo{}, errors.New("invalid private key")
|
|
||||||
}
|
|
||||||
return recipientSigInfo{
|
|
||||||
sigAlg: sigAlg,
|
|
||||||
publicKey: staticPublicKey(&JSONWebKey{
|
|
||||||
Key: privateKey.Public(),
|
|
||||||
}),
|
|
||||||
signer: &edDecrypterSigner{
|
|
||||||
privateKey: privateKey,
|
|
||||||
},
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// newECDHRecipient creates recipientKeyInfo based on the given key.
|
|
||||||
func newECDHRecipient(keyAlg KeyAlgorithm, publicKey *ecdsa.PublicKey) (recipientKeyInfo, error) {
|
|
||||||
// Verify that key management algorithm is supported by this encrypter
|
|
||||||
switch keyAlg {
|
|
||||||
case ECDH_ES, ECDH_ES_A128KW, ECDH_ES_A192KW, ECDH_ES_A256KW:
|
|
||||||
default:
|
|
||||||
return recipientKeyInfo{}, ErrUnsupportedAlgorithm
|
|
||||||
}
|
|
||||||
|
|
||||||
if publicKey == nil || !publicKey.Curve.IsOnCurve(publicKey.X, publicKey.Y) {
|
|
||||||
return recipientKeyInfo{}, errors.New("invalid public key")
|
|
||||||
}
|
|
||||||
|
|
||||||
return recipientKeyInfo{
|
|
||||||
keyAlg: keyAlg,
|
|
||||||
keyEncrypter: &ecEncrypterVerifier{
|
|
||||||
publicKey: publicKey,
|
|
||||||
},
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// newECDSASigner creates a recipientSigInfo based on the given key.
|
|
||||||
func newECDSASigner(sigAlg SignatureAlgorithm, privateKey *ecdsa.PrivateKey) (recipientSigInfo, error) {
|
|
||||||
// Verify that key management algorithm is supported by this encrypter
|
|
||||||
switch sigAlg {
|
|
||||||
case ES256, ES384, ES512:
|
|
||||||
default:
|
|
||||||
return recipientSigInfo{}, ErrUnsupportedAlgorithm
|
|
||||||
}
|
|
||||||
|
|
||||||
if privateKey == nil {
|
|
||||||
return recipientSigInfo{}, errors.New("invalid private key")
|
|
||||||
}
|
|
||||||
|
|
||||||
return recipientSigInfo{
|
|
||||||
sigAlg: sigAlg,
|
|
||||||
publicKey: staticPublicKey(&JSONWebKey{
|
|
||||||
Key: privateKey.Public(),
|
|
||||||
}),
|
|
||||||
signer: &ecDecrypterSigner{
|
|
||||||
privateKey: privateKey,
|
|
||||||
},
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Encrypt the given payload and update the object.
|
|
||||||
func (ctx rsaEncrypterVerifier) encryptKey(cek []byte, alg KeyAlgorithm) (recipientInfo, error) {
|
|
||||||
encryptedKey, err := ctx.encrypt(cek, alg)
|
|
||||||
if err != nil {
|
|
||||||
return recipientInfo{}, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return recipientInfo{
|
|
||||||
encryptedKey: encryptedKey,
|
|
||||||
header: &rawHeader{},
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Encrypt the given payload. Based on the key encryption algorithm,
|
|
||||||
// this will either use RSA-PKCS1v1.5 or RSA-OAEP (with SHA-1 or SHA-256).
|
|
||||||
func (ctx rsaEncrypterVerifier) encrypt(cek []byte, alg KeyAlgorithm) ([]byte, error) {
|
|
||||||
switch alg {
|
|
||||||
case RSA1_5:
|
|
||||||
return rsa.EncryptPKCS1v15(RandReader, ctx.publicKey, cek)
|
|
||||||
case RSA_OAEP:
|
|
||||||
return rsa.EncryptOAEP(sha1.New(), RandReader, ctx.publicKey, cek, []byte{})
|
|
||||||
case RSA_OAEP_256:
|
|
||||||
return rsa.EncryptOAEP(sha256.New(), RandReader, ctx.publicKey, cek, []byte{})
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil, ErrUnsupportedAlgorithm
|
|
||||||
}
|
|
||||||
|
|
||||||
// Decrypt the given payload and return the content encryption key.
|
|
||||||
func (ctx rsaDecrypterSigner) decryptKey(headers rawHeader, recipient *recipientInfo, generator keyGenerator) ([]byte, error) {
|
|
||||||
return ctx.decrypt(recipient.encryptedKey, headers.getAlgorithm(), generator)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Decrypt the given payload. Based on the key encryption algorithm,
|
|
||||||
// this will either use RSA-PKCS1v1.5 or RSA-OAEP (with SHA-1 or SHA-256).
|
|
||||||
func (ctx rsaDecrypterSigner) decrypt(jek []byte, alg KeyAlgorithm, generator keyGenerator) ([]byte, error) {
|
|
||||||
// Note: The random reader on decrypt operations is only used for blinding,
|
|
||||||
// so stubbing is meanlingless (hence the direct use of rand.Reader).
|
|
||||||
switch alg {
|
|
||||||
case RSA1_5:
|
|
||||||
defer func() {
|
|
||||||
// DecryptPKCS1v15SessionKey sometimes panics on an invalid payload
|
|
||||||
// because of an index out of bounds error, which we want to ignore.
|
|
||||||
// This has been fixed in Go 1.3.1 (released 2014/08/13), the recover()
|
|
||||||
// only exists for preventing crashes with unpatched versions.
|
|
||||||
// See: https://groups.google.com/forum/#!topic/golang-dev/7ihX6Y6kx9k
|
|
||||||
// See: https://code.google.com/p/go/source/detail?r=58ee390ff31602edb66af41ed10901ec95904d33
|
|
||||||
_ = recover()
|
|
||||||
}()
|
|
||||||
|
|
||||||
// Perform some input validation.
|
|
||||||
keyBytes := ctx.privateKey.PublicKey.N.BitLen() / 8
|
|
||||||
if keyBytes != len(jek) {
|
|
||||||
// Input size is incorrect, the encrypted payload should always match
|
|
||||||
// the size of the public modulus (e.g. using a 2048 bit key will
|
|
||||||
// produce 256 bytes of output). Reject this since it's invalid input.
|
|
||||||
return nil, ErrCryptoFailure
|
|
||||||
}
|
|
||||||
|
|
||||||
cek, _, err := generator.genKey()
|
|
||||||
if err != nil {
|
|
||||||
return nil, ErrCryptoFailure
|
|
||||||
}
|
|
||||||
|
|
||||||
// When decrypting an RSA-PKCS1v1.5 payload, we must take precautions to
|
|
||||||
// prevent chosen-ciphertext attacks as described in RFC 3218, "Preventing
|
|
||||||
// the Million Message Attack on Cryptographic Message Syntax". We are
|
|
||||||
// therefore deliberately ignoring errors here.
|
|
||||||
_ = rsa.DecryptPKCS1v15SessionKey(rand.Reader, ctx.privateKey, jek, cek)
|
|
||||||
|
|
||||||
return cek, nil
|
|
||||||
case RSA_OAEP:
|
|
||||||
// Use rand.Reader for RSA blinding
|
|
||||||
return rsa.DecryptOAEP(sha1.New(), rand.Reader, ctx.privateKey, jek, []byte{})
|
|
||||||
case RSA_OAEP_256:
|
|
||||||
// Use rand.Reader for RSA blinding
|
|
||||||
return rsa.DecryptOAEP(sha256.New(), rand.Reader, ctx.privateKey, jek, []byte{})
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil, ErrUnsupportedAlgorithm
|
|
||||||
}
|
|
||||||
|
|
||||||
// Sign the given payload
|
|
||||||
func (ctx rsaDecrypterSigner) signPayload(payload []byte, alg SignatureAlgorithm) (Signature, error) {
|
|
||||||
var hash crypto.Hash
|
|
||||||
|
|
||||||
switch alg {
|
|
||||||
case RS256, PS256:
|
|
||||||
hash = crypto.SHA256
|
|
||||||
case RS384, PS384:
|
|
||||||
hash = crypto.SHA384
|
|
||||||
case RS512, PS512:
|
|
||||||
hash = crypto.SHA512
|
|
||||||
default:
|
|
||||||
return Signature{}, ErrUnsupportedAlgorithm
|
|
||||||
}
|
|
||||||
|
|
||||||
hasher := hash.New()
|
|
||||||
|
|
||||||
// According to documentation, Write() on hash never fails
|
|
||||||
_, _ = hasher.Write(payload)
|
|
||||||
hashed := hasher.Sum(nil)
|
|
||||||
|
|
||||||
var out []byte
|
|
||||||
var err error
|
|
||||||
|
|
||||||
switch alg {
|
|
||||||
case RS256, RS384, RS512:
|
|
||||||
out, err = rsa.SignPKCS1v15(RandReader, ctx.privateKey, hash, hashed)
|
|
||||||
case PS256, PS384, PS512:
|
|
||||||
out, err = rsa.SignPSS(RandReader, ctx.privateKey, hash, hashed, &rsa.PSSOptions{
|
|
||||||
SaltLength: rsa.PSSSaltLengthEqualsHash,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
return Signature{}, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return Signature{
|
|
||||||
Signature: out,
|
|
||||||
protected: &rawHeader{},
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Verify the given payload
|
|
||||||
func (ctx rsaEncrypterVerifier) verifyPayload(payload []byte, signature []byte, alg SignatureAlgorithm) error {
|
|
||||||
var hash crypto.Hash
|
|
||||||
|
|
||||||
switch alg {
|
|
||||||
case RS256, PS256:
|
|
||||||
hash = crypto.SHA256
|
|
||||||
case RS384, PS384:
|
|
||||||
hash = crypto.SHA384
|
|
||||||
case RS512, PS512:
|
|
||||||
hash = crypto.SHA512
|
|
||||||
default:
|
|
||||||
return ErrUnsupportedAlgorithm
|
|
||||||
}
|
|
||||||
|
|
||||||
hasher := hash.New()
|
|
||||||
|
|
||||||
// According to documentation, Write() on hash never fails
|
|
||||||
_, _ = hasher.Write(payload)
|
|
||||||
hashed := hasher.Sum(nil)
|
|
||||||
|
|
||||||
switch alg {
|
|
||||||
case RS256, RS384, RS512:
|
|
||||||
return rsa.VerifyPKCS1v15(ctx.publicKey, hash, hashed, signature)
|
|
||||||
case PS256, PS384, PS512:
|
|
||||||
return rsa.VerifyPSS(ctx.publicKey, hash, hashed, signature, nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
return ErrUnsupportedAlgorithm
|
|
||||||
}
|
|
||||||
|
|
||||||
// Encrypt the given payload and update the object.
|
|
||||||
func (ctx ecEncrypterVerifier) encryptKey(cek []byte, alg KeyAlgorithm) (recipientInfo, error) {
|
|
||||||
switch alg {
|
|
||||||
case ECDH_ES:
|
|
||||||
// ECDH-ES mode doesn't wrap a key, the shared secret is used directly as the key.
|
|
||||||
return recipientInfo{
|
|
||||||
header: &rawHeader{},
|
|
||||||
}, nil
|
|
||||||
case ECDH_ES_A128KW, ECDH_ES_A192KW, ECDH_ES_A256KW:
|
|
||||||
default:
|
|
||||||
return recipientInfo{}, ErrUnsupportedAlgorithm
|
|
||||||
}
|
|
||||||
|
|
||||||
generator := ecKeyGenerator{
|
|
||||||
algID: string(alg),
|
|
||||||
publicKey: ctx.publicKey,
|
|
||||||
}
|
|
||||||
|
|
||||||
switch alg {
|
|
||||||
case ECDH_ES_A128KW:
|
|
||||||
generator.size = 16
|
|
||||||
case ECDH_ES_A192KW:
|
|
||||||
generator.size = 24
|
|
||||||
case ECDH_ES_A256KW:
|
|
||||||
generator.size = 32
|
|
||||||
}
|
|
||||||
|
|
||||||
kek, header, err := generator.genKey()
|
|
||||||
if err != nil {
|
|
||||||
return recipientInfo{}, err
|
|
||||||
}
|
|
||||||
|
|
||||||
block, err := aes.NewCipher(kek)
|
|
||||||
if err != nil {
|
|
||||||
return recipientInfo{}, err
|
|
||||||
}
|
|
||||||
|
|
||||||
jek, err := josecipher.KeyWrap(block, cek)
|
|
||||||
if err != nil {
|
|
||||||
return recipientInfo{}, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return recipientInfo{
|
|
||||||
encryptedKey: jek,
|
|
||||||
header: &header,
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get key size for EC key generator
|
|
||||||
func (ctx ecKeyGenerator) keySize() int {
|
|
||||||
return ctx.size
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get a content encryption key for ECDH-ES
|
|
||||||
func (ctx ecKeyGenerator) genKey() ([]byte, rawHeader, error) {
|
|
||||||
priv, err := ecdsa.GenerateKey(ctx.publicKey.Curve, RandReader)
|
|
||||||
if err != nil {
|
|
||||||
return nil, rawHeader{}, err
|
|
||||||
}
|
|
||||||
|
|
||||||
out := josecipher.DeriveECDHES(ctx.algID, []byte{}, []byte{}, priv, ctx.publicKey, ctx.size)
|
|
||||||
|
|
||||||
b, err := json.Marshal(&JSONWebKey{
|
|
||||||
Key: &priv.PublicKey,
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
return nil, nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
headers := rawHeader{
|
|
||||||
headerEPK: makeRawMessage(b),
|
|
||||||
}
|
|
||||||
|
|
||||||
return out, headers, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Decrypt the given payload and return the content encryption key.
|
|
||||||
func (ctx ecDecrypterSigner) decryptKey(headers rawHeader, recipient *recipientInfo, generator keyGenerator) ([]byte, error) {
|
|
||||||
epk, err := headers.getEPK()
|
|
||||||
if err != nil {
|
|
||||||
return nil, errors.New("go-jose/go-jose: invalid epk header")
|
|
||||||
}
|
|
||||||
if epk == nil {
|
|
||||||
return nil, errors.New("go-jose/go-jose: missing epk header")
|
|
||||||
}
|
|
||||||
|
|
||||||
publicKey, ok := epk.Key.(*ecdsa.PublicKey)
|
|
||||||
if publicKey == nil || !ok {
|
|
||||||
return nil, errors.New("go-jose/go-jose: invalid epk header")
|
|
||||||
}
|
|
||||||
|
|
||||||
if !ctx.privateKey.Curve.IsOnCurve(publicKey.X, publicKey.Y) {
|
|
||||||
return nil, errors.New("go-jose/go-jose: invalid public key in epk header")
|
|
||||||
}
|
|
||||||
|
|
||||||
apuData, err := headers.getAPU()
|
|
||||||
if err != nil {
|
|
||||||
return nil, errors.New("go-jose/go-jose: invalid apu header")
|
|
||||||
}
|
|
||||||
apvData, err := headers.getAPV()
|
|
||||||
if err != nil {
|
|
||||||
return nil, errors.New("go-jose/go-jose: invalid apv header")
|
|
||||||
}
|
|
||||||
|
|
||||||
deriveKey := func(algID string, size int) []byte {
|
|
||||||
return josecipher.DeriveECDHES(algID, apuData.bytes(), apvData.bytes(), ctx.privateKey, publicKey, size)
|
|
||||||
}
|
|
||||||
|
|
||||||
var keySize int
|
|
||||||
|
|
||||||
algorithm := headers.getAlgorithm()
|
|
||||||
switch algorithm {
|
|
||||||
case ECDH_ES:
|
|
||||||
// ECDH-ES uses direct key agreement, no key unwrapping necessary.
|
|
||||||
return deriveKey(string(headers.getEncryption()), generator.keySize()), nil
|
|
||||||
case ECDH_ES_A128KW:
|
|
||||||
keySize = 16
|
|
||||||
case ECDH_ES_A192KW:
|
|
||||||
keySize = 24
|
|
||||||
case ECDH_ES_A256KW:
|
|
||||||
keySize = 32
|
|
||||||
default:
|
|
||||||
return nil, ErrUnsupportedAlgorithm
|
|
||||||
}
|
|
||||||
|
|
||||||
key := deriveKey(string(algorithm), keySize)
|
|
||||||
block, err := aes.NewCipher(key)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return josecipher.KeyUnwrap(block, recipient.encryptedKey)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ctx edDecrypterSigner) signPayload(payload []byte, alg SignatureAlgorithm) (Signature, error) {
|
|
||||||
if alg != EdDSA {
|
|
||||||
return Signature{}, ErrUnsupportedAlgorithm
|
|
||||||
}
|
|
||||||
|
|
||||||
sig, err := ctx.privateKey.Sign(RandReader, payload, crypto.Hash(0))
|
|
||||||
if err != nil {
|
|
||||||
return Signature{}, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return Signature{
|
|
||||||
Signature: sig,
|
|
||||||
protected: &rawHeader{},
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ctx edEncrypterVerifier) verifyPayload(payload []byte, signature []byte, alg SignatureAlgorithm) error {
|
|
||||||
if alg != EdDSA {
|
|
||||||
return ErrUnsupportedAlgorithm
|
|
||||||
}
|
|
||||||
ok := ed25519.Verify(ctx.publicKey, payload, signature)
|
|
||||||
if !ok {
|
|
||||||
return errors.New("go-jose/go-jose: ed25519 signature failed to verify")
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Sign the given payload
|
|
||||||
func (ctx ecDecrypterSigner) signPayload(payload []byte, alg SignatureAlgorithm) (Signature, error) {
|
|
||||||
var expectedBitSize int
|
|
||||||
var hash crypto.Hash
|
|
||||||
|
|
||||||
switch alg {
|
|
||||||
case ES256:
|
|
||||||
expectedBitSize = 256
|
|
||||||
hash = crypto.SHA256
|
|
||||||
case ES384:
|
|
||||||
expectedBitSize = 384
|
|
||||||
hash = crypto.SHA384
|
|
||||||
case ES512:
|
|
||||||
expectedBitSize = 521
|
|
||||||
hash = crypto.SHA512
|
|
||||||
}
|
|
||||||
|
|
||||||
curveBits := ctx.privateKey.Curve.Params().BitSize
|
|
||||||
if expectedBitSize != curveBits {
|
|
||||||
return Signature{}, fmt.Errorf("go-jose/go-jose: expected %d bit key, got %d bits instead", expectedBitSize, curveBits)
|
|
||||||
}
|
|
||||||
|
|
||||||
hasher := hash.New()
|
|
||||||
|
|
||||||
// According to documentation, Write() on hash never fails
|
|
||||||
_, _ = hasher.Write(payload)
|
|
||||||
hashed := hasher.Sum(nil)
|
|
||||||
|
|
||||||
r, s, err := ecdsa.Sign(RandReader, ctx.privateKey, hashed)
|
|
||||||
if err != nil {
|
|
||||||
return Signature{}, err
|
|
||||||
}
|
|
||||||
|
|
||||||
keyBytes := curveBits / 8
|
|
||||||
if curveBits%8 > 0 {
|
|
||||||
keyBytes++
|
|
||||||
}
|
|
||||||
|
|
||||||
// We serialize the outputs (r and s) into big-endian byte arrays and pad
|
|
||||||
// them with zeros on the left to make sure the sizes work out. Both arrays
|
|
||||||
// must be keyBytes long, and the output must be 2*keyBytes long.
|
|
||||||
rBytes := r.Bytes()
|
|
||||||
rBytesPadded := make([]byte, keyBytes)
|
|
||||||
copy(rBytesPadded[keyBytes-len(rBytes):], rBytes)
|
|
||||||
|
|
||||||
sBytes := s.Bytes()
|
|
||||||
sBytesPadded := make([]byte, keyBytes)
|
|
||||||
copy(sBytesPadded[keyBytes-len(sBytes):], sBytes)
|
|
||||||
|
|
||||||
out := append(rBytesPadded, sBytesPadded...)
|
|
||||||
|
|
||||||
return Signature{
|
|
||||||
Signature: out,
|
|
||||||
protected: &rawHeader{},
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Verify the given payload
|
|
||||||
func (ctx ecEncrypterVerifier) verifyPayload(payload []byte, signature []byte, alg SignatureAlgorithm) error {
|
|
||||||
var keySize int
|
|
||||||
var hash crypto.Hash
|
|
||||||
|
|
||||||
switch alg {
|
|
||||||
case ES256:
|
|
||||||
keySize = 32
|
|
||||||
hash = crypto.SHA256
|
|
||||||
case ES384:
|
|
||||||
keySize = 48
|
|
||||||
hash = crypto.SHA384
|
|
||||||
case ES512:
|
|
||||||
keySize = 66
|
|
||||||
hash = crypto.SHA512
|
|
||||||
default:
|
|
||||||
return ErrUnsupportedAlgorithm
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(signature) != 2*keySize {
|
|
||||||
return fmt.Errorf("go-jose/go-jose: invalid signature size, have %d bytes, wanted %d", len(signature), 2*keySize)
|
|
||||||
}
|
|
||||||
|
|
||||||
hasher := hash.New()
|
|
||||||
|
|
||||||
// According to documentation, Write() on hash never fails
|
|
||||||
_, _ = hasher.Write(payload)
|
|
||||||
hashed := hasher.Sum(nil)
|
|
||||||
|
|
||||||
r := big.NewInt(0).SetBytes(signature[:keySize])
|
|
||||||
s := big.NewInt(0).SetBytes(signature[keySize:])
|
|
||||||
|
|
||||||
match := ecdsa.Verify(ctx.publicKey, hashed, r, s)
|
|
||||||
if !match {
|
|
||||||
return errors.New("go-jose/go-jose: ecdsa signature failed to verify")
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
196
vendor/github.com/go-jose/go-jose/v3/cipher/cbc_hmac.go
generated
vendored
196
vendor/github.com/go-jose/go-jose/v3/cipher/cbc_hmac.go
generated
vendored
@ -1,196 +0,0 @@
|
|||||||
/*-
|
|
||||||
* Copyright 2014 Square Inc.
|
|
||||||
*
|
|
||||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
* you may not use this file except in compliance with the License.
|
|
||||||
* You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing, software
|
|
||||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
* See the License for the specific language governing permissions and
|
|
||||||
* limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package josecipher
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"crypto/cipher"
|
|
||||||
"crypto/hmac"
|
|
||||||
"crypto/sha256"
|
|
||||||
"crypto/sha512"
|
|
||||||
"crypto/subtle"
|
|
||||||
"encoding/binary"
|
|
||||||
"errors"
|
|
||||||
"hash"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
nonceBytes = 16
|
|
||||||
)
|
|
||||||
|
|
||||||
// NewCBCHMAC instantiates a new AEAD based on CBC+HMAC.
|
|
||||||
func NewCBCHMAC(key []byte, newBlockCipher func([]byte) (cipher.Block, error)) (cipher.AEAD, error) {
|
|
||||||
keySize := len(key) / 2
|
|
||||||
integrityKey := key[:keySize]
|
|
||||||
encryptionKey := key[keySize:]
|
|
||||||
|
|
||||||
blockCipher, err := newBlockCipher(encryptionKey)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
var hash func() hash.Hash
|
|
||||||
switch keySize {
|
|
||||||
case 16:
|
|
||||||
hash = sha256.New
|
|
||||||
case 24:
|
|
||||||
hash = sha512.New384
|
|
||||||
case 32:
|
|
||||||
hash = sha512.New
|
|
||||||
}
|
|
||||||
|
|
||||||
return &cbcAEAD{
|
|
||||||
hash: hash,
|
|
||||||
blockCipher: blockCipher,
|
|
||||||
authtagBytes: keySize,
|
|
||||||
integrityKey: integrityKey,
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// An AEAD based on CBC+HMAC
|
|
||||||
type cbcAEAD struct {
|
|
||||||
hash func() hash.Hash
|
|
||||||
authtagBytes int
|
|
||||||
integrityKey []byte
|
|
||||||
blockCipher cipher.Block
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ctx *cbcAEAD) NonceSize() int {
|
|
||||||
return nonceBytes
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ctx *cbcAEAD) Overhead() int {
|
|
||||||
// Maximum overhead is block size (for padding) plus auth tag length, where
|
|
||||||
// the length of the auth tag is equivalent to the key size.
|
|
||||||
return ctx.blockCipher.BlockSize() + ctx.authtagBytes
|
|
||||||
}
|
|
||||||
|
|
||||||
// Seal encrypts and authenticates the plaintext.
|
|
||||||
func (ctx *cbcAEAD) Seal(dst, nonce, plaintext, data []byte) []byte {
|
|
||||||
// Output buffer -- must take care not to mangle plaintext input.
|
|
||||||
ciphertext := make([]byte, uint64(len(plaintext))+uint64(ctx.Overhead()))[:len(plaintext)]
|
|
||||||
copy(ciphertext, plaintext)
|
|
||||||
ciphertext = padBuffer(ciphertext, ctx.blockCipher.BlockSize())
|
|
||||||
|
|
||||||
cbc := cipher.NewCBCEncrypter(ctx.blockCipher, nonce)
|
|
||||||
|
|
||||||
cbc.CryptBlocks(ciphertext, ciphertext)
|
|
||||||
authtag := ctx.computeAuthTag(data, nonce, ciphertext)
|
|
||||||
|
|
||||||
ret, out := resize(dst, uint64(len(dst))+uint64(len(ciphertext))+uint64(len(authtag)))
|
|
||||||
copy(out, ciphertext)
|
|
||||||
copy(out[len(ciphertext):], authtag)
|
|
||||||
|
|
||||||
return ret
|
|
||||||
}
|
|
||||||
|
|
||||||
// Open decrypts and authenticates the ciphertext.
|
|
||||||
func (ctx *cbcAEAD) Open(dst, nonce, ciphertext, data []byte) ([]byte, error) {
|
|
||||||
if len(ciphertext) < ctx.authtagBytes {
|
|
||||||
return nil, errors.New("go-jose/go-jose: invalid ciphertext (too short)")
|
|
||||||
}
|
|
||||||
|
|
||||||
offset := len(ciphertext) - ctx.authtagBytes
|
|
||||||
expectedTag := ctx.computeAuthTag(data, nonce, ciphertext[:offset])
|
|
||||||
match := subtle.ConstantTimeCompare(expectedTag, ciphertext[offset:])
|
|
||||||
if match != 1 {
|
|
||||||
return nil, errors.New("go-jose/go-jose: invalid ciphertext (auth tag mismatch)")
|
|
||||||
}
|
|
||||||
|
|
||||||
cbc := cipher.NewCBCDecrypter(ctx.blockCipher, nonce)
|
|
||||||
|
|
||||||
// Make copy of ciphertext buffer, don't want to modify in place
|
|
||||||
buffer := append([]byte{}, ciphertext[:offset]...)
|
|
||||||
|
|
||||||
if len(buffer)%ctx.blockCipher.BlockSize() > 0 {
|
|
||||||
return nil, errors.New("go-jose/go-jose: invalid ciphertext (invalid length)")
|
|
||||||
}
|
|
||||||
|
|
||||||
cbc.CryptBlocks(buffer, buffer)
|
|
||||||
|
|
||||||
// Remove padding
|
|
||||||
plaintext, err := unpadBuffer(buffer, ctx.blockCipher.BlockSize())
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
ret, out := resize(dst, uint64(len(dst))+uint64(len(plaintext)))
|
|
||||||
copy(out, plaintext)
|
|
||||||
|
|
||||||
return ret, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Compute an authentication tag
|
|
||||||
func (ctx *cbcAEAD) computeAuthTag(aad, nonce, ciphertext []byte) []byte {
|
|
||||||
buffer := make([]byte, uint64(len(aad))+uint64(len(nonce))+uint64(len(ciphertext))+8)
|
|
||||||
n := 0
|
|
||||||
n += copy(buffer, aad)
|
|
||||||
n += copy(buffer[n:], nonce)
|
|
||||||
n += copy(buffer[n:], ciphertext)
|
|
||||||
binary.BigEndian.PutUint64(buffer[n:], uint64(len(aad))*8)
|
|
||||||
|
|
||||||
// According to documentation, Write() on hash.Hash never fails.
|
|
||||||
hmac := hmac.New(ctx.hash, ctx.integrityKey)
|
|
||||||
_, _ = hmac.Write(buffer)
|
|
||||||
|
|
||||||
return hmac.Sum(nil)[:ctx.authtagBytes]
|
|
||||||
}
|
|
||||||
|
|
||||||
// resize ensures that the given slice has a capacity of at least n bytes.
|
|
||||||
// If the capacity of the slice is less than n, a new slice is allocated
|
|
||||||
// and the existing data will be copied.
|
|
||||||
func resize(in []byte, n uint64) (head, tail []byte) {
|
|
||||||
if uint64(cap(in)) >= n {
|
|
||||||
head = in[:n]
|
|
||||||
} else {
|
|
||||||
head = make([]byte, n)
|
|
||||||
copy(head, in)
|
|
||||||
}
|
|
||||||
|
|
||||||
tail = head[len(in):]
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Apply padding
|
|
||||||
func padBuffer(buffer []byte, blockSize int) []byte {
|
|
||||||
missing := blockSize - (len(buffer) % blockSize)
|
|
||||||
ret, out := resize(buffer, uint64(len(buffer))+uint64(missing))
|
|
||||||
padding := bytes.Repeat([]byte{byte(missing)}, missing)
|
|
||||||
copy(out, padding)
|
|
||||||
return ret
|
|
||||||
}
|
|
||||||
|
|
||||||
// Remove padding
|
|
||||||
func unpadBuffer(buffer []byte, blockSize int) ([]byte, error) {
|
|
||||||
if len(buffer)%blockSize != 0 {
|
|
||||||
return nil, errors.New("go-jose/go-jose: invalid padding")
|
|
||||||
}
|
|
||||||
|
|
||||||
last := buffer[len(buffer)-1]
|
|
||||||
count := int(last)
|
|
||||||
|
|
||||||
if count == 0 || count > blockSize || count > len(buffer) {
|
|
||||||
return nil, errors.New("go-jose/go-jose: invalid padding")
|
|
||||||
}
|
|
||||||
|
|
||||||
padding := bytes.Repeat([]byte{last}, count)
|
|
||||||
if !bytes.HasSuffix(buffer, padding) {
|
|
||||||
return nil, errors.New("go-jose/go-jose: invalid padding")
|
|
||||||
}
|
|
||||||
|
|
||||||
return buffer[:len(buffer)-count], nil
|
|
||||||
}
|
|
75
vendor/github.com/go-jose/go-jose/v3/cipher/concat_kdf.go
generated
vendored
75
vendor/github.com/go-jose/go-jose/v3/cipher/concat_kdf.go
generated
vendored
@ -1,75 +0,0 @@
|
|||||||
/*-
|
|
||||||
* Copyright 2014 Square Inc.
|
|
||||||
*
|
|
||||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
* you may not use this file except in compliance with the License.
|
|
||||||
* You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing, software
|
|
||||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
* See the License for the specific language governing permissions and
|
|
||||||
* limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package josecipher
|
|
||||||
|
|
||||||
import (
|
|
||||||
"crypto"
|
|
||||||
"encoding/binary"
|
|
||||||
"hash"
|
|
||||||
"io"
|
|
||||||
)
|
|
||||||
|
|
||||||
type concatKDF struct {
|
|
||||||
z, info []byte
|
|
||||||
i uint32
|
|
||||||
cache []byte
|
|
||||||
hasher hash.Hash
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewConcatKDF builds a KDF reader based on the given inputs.
|
|
||||||
func NewConcatKDF(hash crypto.Hash, z, algID, ptyUInfo, ptyVInfo, supPubInfo, supPrivInfo []byte) io.Reader {
|
|
||||||
buffer := make([]byte, uint64(len(algID))+uint64(len(ptyUInfo))+uint64(len(ptyVInfo))+uint64(len(supPubInfo))+uint64(len(supPrivInfo)))
|
|
||||||
n := 0
|
|
||||||
n += copy(buffer, algID)
|
|
||||||
n += copy(buffer[n:], ptyUInfo)
|
|
||||||
n += copy(buffer[n:], ptyVInfo)
|
|
||||||
n += copy(buffer[n:], supPubInfo)
|
|
||||||
copy(buffer[n:], supPrivInfo)
|
|
||||||
|
|
||||||
hasher := hash.New()
|
|
||||||
|
|
||||||
return &concatKDF{
|
|
||||||
z: z,
|
|
||||||
info: buffer,
|
|
||||||
hasher: hasher,
|
|
||||||
cache: []byte{},
|
|
||||||
i: 1,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ctx *concatKDF) Read(out []byte) (int, error) {
|
|
||||||
copied := copy(out, ctx.cache)
|
|
||||||
ctx.cache = ctx.cache[copied:]
|
|
||||||
|
|
||||||
for copied < len(out) {
|
|
||||||
ctx.hasher.Reset()
|
|
||||||
|
|
||||||
// Write on a hash.Hash never fails
|
|
||||||
_ = binary.Write(ctx.hasher, binary.BigEndian, ctx.i)
|
|
||||||
_, _ = ctx.hasher.Write(ctx.z)
|
|
||||||
_, _ = ctx.hasher.Write(ctx.info)
|
|
||||||
|
|
||||||
hash := ctx.hasher.Sum(nil)
|
|
||||||
chunkCopied := copy(out[copied:], hash)
|
|
||||||
copied += chunkCopied
|
|
||||||
ctx.cache = hash[chunkCopied:]
|
|
||||||
|
|
||||||
ctx.i++
|
|
||||||
}
|
|
||||||
|
|
||||||
return copied, nil
|
|
||||||
}
|
|
86
vendor/github.com/go-jose/go-jose/v3/cipher/ecdh_es.go
generated
vendored
86
vendor/github.com/go-jose/go-jose/v3/cipher/ecdh_es.go
generated
vendored
@ -1,86 +0,0 @@
|
|||||||
/*-
|
|
||||||
* Copyright 2014 Square Inc.
|
|
||||||
*
|
|
||||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
* you may not use this file except in compliance with the License.
|
|
||||||
* You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing, software
|
|
||||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
* See the License for the specific language governing permissions and
|
|
||||||
* limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package josecipher
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"crypto"
|
|
||||||
"crypto/ecdsa"
|
|
||||||
"crypto/elliptic"
|
|
||||||
"encoding/binary"
|
|
||||||
)
|
|
||||||
|
|
||||||
// DeriveECDHES derives a shared encryption key using ECDH/ConcatKDF as described in JWE/JWA.
|
|
||||||
// It is an error to call this function with a private/public key that are not on the same
|
|
||||||
// curve. Callers must ensure that the keys are valid before calling this function. Output
|
|
||||||
// size may be at most 1<<16 bytes (64 KiB).
|
|
||||||
func DeriveECDHES(alg string, apuData, apvData []byte, priv *ecdsa.PrivateKey, pub *ecdsa.PublicKey, size int) []byte {
|
|
||||||
if size > 1<<16 {
|
|
||||||
panic("ECDH-ES output size too large, must be less than or equal to 1<<16")
|
|
||||||
}
|
|
||||||
|
|
||||||
// algId, partyUInfo, partyVInfo inputs must be prefixed with the length
|
|
||||||
algID := lengthPrefixed([]byte(alg))
|
|
||||||
ptyUInfo := lengthPrefixed(apuData)
|
|
||||||
ptyVInfo := lengthPrefixed(apvData)
|
|
||||||
|
|
||||||
// suppPubInfo is the encoded length of the output size in bits
|
|
||||||
supPubInfo := make([]byte, 4)
|
|
||||||
binary.BigEndian.PutUint32(supPubInfo, uint32(size)*8)
|
|
||||||
|
|
||||||
if !priv.PublicKey.Curve.IsOnCurve(pub.X, pub.Y) {
|
|
||||||
panic("public key not on same curve as private key")
|
|
||||||
}
|
|
||||||
|
|
||||||
z, _ := priv.Curve.ScalarMult(pub.X, pub.Y, priv.D.Bytes())
|
|
||||||
zBytes := z.Bytes()
|
|
||||||
|
|
||||||
// Note that calling z.Bytes() on a big.Int may strip leading zero bytes from
|
|
||||||
// the returned byte array. This can lead to a problem where zBytes will be
|
|
||||||
// shorter than expected which breaks the key derivation. Therefore we must pad
|
|
||||||
// to the full length of the expected coordinate here before calling the KDF.
|
|
||||||
octSize := dSize(priv.Curve)
|
|
||||||
if len(zBytes) != octSize {
|
|
||||||
zBytes = append(bytes.Repeat([]byte{0}, octSize-len(zBytes)), zBytes...)
|
|
||||||
}
|
|
||||||
|
|
||||||
reader := NewConcatKDF(crypto.SHA256, zBytes, algID, ptyUInfo, ptyVInfo, supPubInfo, []byte{})
|
|
||||||
key := make([]byte, size)
|
|
||||||
|
|
||||||
// Read on the KDF will never fail
|
|
||||||
_, _ = reader.Read(key)
|
|
||||||
|
|
||||||
return key
|
|
||||||
}
|
|
||||||
|
|
||||||
// dSize returns the size in octets for a coordinate on a elliptic curve.
|
|
||||||
func dSize(curve elliptic.Curve) int {
|
|
||||||
order := curve.Params().P
|
|
||||||
bitLen := order.BitLen()
|
|
||||||
size := bitLen / 8
|
|
||||||
if bitLen%8 != 0 {
|
|
||||||
size++
|
|
||||||
}
|
|
||||||
return size
|
|
||||||
}
|
|
||||||
|
|
||||||
func lengthPrefixed(data []byte) []byte {
|
|
||||||
out := make([]byte, len(data)+4)
|
|
||||||
binary.BigEndian.PutUint32(out, uint32(len(data)))
|
|
||||||
copy(out[4:], data)
|
|
||||||
return out
|
|
||||||
}
|
|
109
vendor/github.com/go-jose/go-jose/v3/cipher/key_wrap.go
generated
vendored
109
vendor/github.com/go-jose/go-jose/v3/cipher/key_wrap.go
generated
vendored
@ -1,109 +0,0 @@
|
|||||||
/*-
|
|
||||||
* Copyright 2014 Square Inc.
|
|
||||||
*
|
|
||||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
* you may not use this file except in compliance with the License.
|
|
||||||
* You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing, software
|
|
||||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
* See the License for the specific language governing permissions and
|
|
||||||
* limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package josecipher
|
|
||||||
|
|
||||||
import (
|
|
||||||
"crypto/cipher"
|
|
||||||
"crypto/subtle"
|
|
||||||
"encoding/binary"
|
|
||||||
"errors"
|
|
||||||
)
|
|
||||||
|
|
||||||
var defaultIV = []byte{0xA6, 0xA6, 0xA6, 0xA6, 0xA6, 0xA6, 0xA6, 0xA6}
|
|
||||||
|
|
||||||
// KeyWrap implements NIST key wrapping; it wraps a content encryption key (cek) with the given block cipher.
|
|
||||||
func KeyWrap(block cipher.Block, cek []byte) ([]byte, error) {
|
|
||||||
if len(cek)%8 != 0 {
|
|
||||||
return nil, errors.New("go-jose/go-jose: key wrap input must be 8 byte blocks")
|
|
||||||
}
|
|
||||||
|
|
||||||
n := len(cek) / 8
|
|
||||||
r := make([][]byte, n)
|
|
||||||
|
|
||||||
for i := range r {
|
|
||||||
r[i] = make([]byte, 8)
|
|
||||||
copy(r[i], cek[i*8:])
|
|
||||||
}
|
|
||||||
|
|
||||||
buffer := make([]byte, 16)
|
|
||||||
tBytes := make([]byte, 8)
|
|
||||||
copy(buffer, defaultIV)
|
|
||||||
|
|
||||||
for t := 0; t < 6*n; t++ {
|
|
||||||
copy(buffer[8:], r[t%n])
|
|
||||||
|
|
||||||
block.Encrypt(buffer, buffer)
|
|
||||||
|
|
||||||
binary.BigEndian.PutUint64(tBytes, uint64(t+1))
|
|
||||||
|
|
||||||
for i := 0; i < 8; i++ {
|
|
||||||
buffer[i] ^= tBytes[i]
|
|
||||||
}
|
|
||||||
copy(r[t%n], buffer[8:])
|
|
||||||
}
|
|
||||||
|
|
||||||
out := make([]byte, (n+1)*8)
|
|
||||||
copy(out, buffer[:8])
|
|
||||||
for i := range r {
|
|
||||||
copy(out[(i+1)*8:], r[i])
|
|
||||||
}
|
|
||||||
|
|
||||||
return out, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// KeyUnwrap implements NIST key unwrapping; it unwraps a content encryption key (cek) with the given block cipher.
|
|
||||||
func KeyUnwrap(block cipher.Block, ciphertext []byte) ([]byte, error) {
|
|
||||||
if len(ciphertext)%8 != 0 {
|
|
||||||
return nil, errors.New("go-jose/go-jose: key wrap input must be 8 byte blocks")
|
|
||||||
}
|
|
||||||
|
|
||||||
n := (len(ciphertext) / 8) - 1
|
|
||||||
r := make([][]byte, n)
|
|
||||||
|
|
||||||
for i := range r {
|
|
||||||
r[i] = make([]byte, 8)
|
|
||||||
copy(r[i], ciphertext[(i+1)*8:])
|
|
||||||
}
|
|
||||||
|
|
||||||
buffer := make([]byte, 16)
|
|
||||||
tBytes := make([]byte, 8)
|
|
||||||
copy(buffer[:8], ciphertext[:8])
|
|
||||||
|
|
||||||
for t := 6*n - 1; t >= 0; t-- {
|
|
||||||
binary.BigEndian.PutUint64(tBytes, uint64(t+1))
|
|
||||||
|
|
||||||
for i := 0; i < 8; i++ {
|
|
||||||
buffer[i] ^= tBytes[i]
|
|
||||||
}
|
|
||||||
copy(buffer[8:], r[t%n])
|
|
||||||
|
|
||||||
block.Decrypt(buffer, buffer)
|
|
||||||
|
|
||||||
copy(r[t%n], buffer[8:])
|
|
||||||
}
|
|
||||||
|
|
||||||
if subtle.ConstantTimeCompare(buffer[:8], defaultIV) == 0 {
|
|
||||||
return nil, errors.New("go-jose/go-jose: failed to unwrap key")
|
|
||||||
}
|
|
||||||
|
|
||||||
out := make([]byte, n*8)
|
|
||||||
for i := range r {
|
|
||||||
copy(out[i*8:], r[i])
|
|
||||||
}
|
|
||||||
|
|
||||||
return out, nil
|
|
||||||
}
|
|
544
vendor/github.com/go-jose/go-jose/v3/crypter.go
generated
vendored
544
vendor/github.com/go-jose/go-jose/v3/crypter.go
generated
vendored
@ -1,544 +0,0 @@
|
|||||||
/*-
|
|
||||||
* Copyright 2014 Square Inc.
|
|
||||||
*
|
|
||||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
* you may not use this file except in compliance with the License.
|
|
||||||
* You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing, software
|
|
||||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
* See the License for the specific language governing permissions and
|
|
||||||
* limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package jose
|
|
||||||
|
|
||||||
import (
|
|
||||||
"crypto/ecdsa"
|
|
||||||
"crypto/rsa"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"reflect"
|
|
||||||
|
|
||||||
"github.com/go-jose/go-jose/v3/json"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Encrypter represents an encrypter which produces an encrypted JWE object.
|
|
||||||
type Encrypter interface {
|
|
||||||
Encrypt(plaintext []byte) (*JSONWebEncryption, error)
|
|
||||||
EncryptWithAuthData(plaintext []byte, aad []byte) (*JSONWebEncryption, error)
|
|
||||||
Options() EncrypterOptions
|
|
||||||
}
|
|
||||||
|
|
||||||
// A generic content cipher
|
|
||||||
type contentCipher interface {
|
|
||||||
keySize() int
|
|
||||||
encrypt(cek []byte, aad, plaintext []byte) (*aeadParts, error)
|
|
||||||
decrypt(cek []byte, aad []byte, parts *aeadParts) ([]byte, error)
|
|
||||||
}
|
|
||||||
|
|
||||||
// A key generator (for generating/getting a CEK)
|
|
||||||
type keyGenerator interface {
|
|
||||||
keySize() int
|
|
||||||
genKey() ([]byte, rawHeader, error)
|
|
||||||
}
|
|
||||||
|
|
||||||
// A generic key encrypter
|
|
||||||
type keyEncrypter interface {
|
|
||||||
encryptKey(cek []byte, alg KeyAlgorithm) (recipientInfo, error) // Encrypt a key
|
|
||||||
}
|
|
||||||
|
|
||||||
// A generic key decrypter
|
|
||||||
type keyDecrypter interface {
|
|
||||||
decryptKey(headers rawHeader, recipient *recipientInfo, generator keyGenerator) ([]byte, error) // Decrypt a key
|
|
||||||
}
|
|
||||||
|
|
||||||
// A generic encrypter based on the given key encrypter and content cipher.
|
|
||||||
type genericEncrypter struct {
|
|
||||||
contentAlg ContentEncryption
|
|
||||||
compressionAlg CompressionAlgorithm
|
|
||||||
cipher contentCipher
|
|
||||||
recipients []recipientKeyInfo
|
|
||||||
keyGenerator keyGenerator
|
|
||||||
extraHeaders map[HeaderKey]interface{}
|
|
||||||
}
|
|
||||||
|
|
||||||
type recipientKeyInfo struct {
|
|
||||||
keyID string
|
|
||||||
keyAlg KeyAlgorithm
|
|
||||||
keyEncrypter keyEncrypter
|
|
||||||
}
|
|
||||||
|
|
||||||
// EncrypterOptions represents options that can be set on new encrypters.
|
|
||||||
type EncrypterOptions struct {
|
|
||||||
Compression CompressionAlgorithm
|
|
||||||
|
|
||||||
// Optional map of additional keys to be inserted into the protected header
|
|
||||||
// of a JWS object. Some specifications which make use of JWS like to insert
|
|
||||||
// additional values here. All values must be JSON-serializable.
|
|
||||||
ExtraHeaders map[HeaderKey]interface{}
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithHeader adds an arbitrary value to the ExtraHeaders map, initializing it
|
|
||||||
// if necessary. It returns itself and so can be used in a fluent style.
|
|
||||||
func (eo *EncrypterOptions) WithHeader(k HeaderKey, v interface{}) *EncrypterOptions {
|
|
||||||
if eo.ExtraHeaders == nil {
|
|
||||||
eo.ExtraHeaders = map[HeaderKey]interface{}{}
|
|
||||||
}
|
|
||||||
eo.ExtraHeaders[k] = v
|
|
||||||
return eo
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithContentType adds a content type ("cty") header and returns the updated
|
|
||||||
// EncrypterOptions.
|
|
||||||
func (eo *EncrypterOptions) WithContentType(contentType ContentType) *EncrypterOptions {
|
|
||||||
return eo.WithHeader(HeaderContentType, contentType)
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithType adds a type ("typ") header and returns the updated EncrypterOptions.
|
|
||||||
func (eo *EncrypterOptions) WithType(typ ContentType) *EncrypterOptions {
|
|
||||||
return eo.WithHeader(HeaderType, typ)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Recipient represents an algorithm/key to encrypt messages to.
|
|
||||||
//
|
|
||||||
// PBES2Count and PBES2Salt correspond with the "p2c" and "p2s" headers used
|
|
||||||
// on the password-based encryption algorithms PBES2-HS256+A128KW,
|
|
||||||
// PBES2-HS384+A192KW, and PBES2-HS512+A256KW. If they are not provided a safe
|
|
||||||
// default of 100000 will be used for the count and a 128-bit random salt will
|
|
||||||
// be generated.
|
|
||||||
type Recipient struct {
|
|
||||||
Algorithm KeyAlgorithm
|
|
||||||
Key interface{}
|
|
||||||
KeyID string
|
|
||||||
PBES2Count int
|
|
||||||
PBES2Salt []byte
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewEncrypter creates an appropriate encrypter based on the key type
|
|
||||||
func NewEncrypter(enc ContentEncryption, rcpt Recipient, opts *EncrypterOptions) (Encrypter, error) {
|
|
||||||
encrypter := &genericEncrypter{
|
|
||||||
contentAlg: enc,
|
|
||||||
recipients: []recipientKeyInfo{},
|
|
||||||
cipher: getContentCipher(enc),
|
|
||||||
}
|
|
||||||
if opts != nil {
|
|
||||||
encrypter.compressionAlg = opts.Compression
|
|
||||||
encrypter.extraHeaders = opts.ExtraHeaders
|
|
||||||
}
|
|
||||||
|
|
||||||
if encrypter.cipher == nil {
|
|
||||||
return nil, ErrUnsupportedAlgorithm
|
|
||||||
}
|
|
||||||
|
|
||||||
var keyID string
|
|
||||||
var rawKey interface{}
|
|
||||||
switch encryptionKey := rcpt.Key.(type) {
|
|
||||||
case JSONWebKey:
|
|
||||||
keyID, rawKey = encryptionKey.KeyID, encryptionKey.Key
|
|
||||||
case *JSONWebKey:
|
|
||||||
keyID, rawKey = encryptionKey.KeyID, encryptionKey.Key
|
|
||||||
case OpaqueKeyEncrypter:
|
|
||||||
keyID, rawKey = encryptionKey.KeyID(), encryptionKey
|
|
||||||
default:
|
|
||||||
rawKey = encryptionKey
|
|
||||||
}
|
|
||||||
|
|
||||||
switch rcpt.Algorithm {
|
|
||||||
case DIRECT:
|
|
||||||
// Direct encryption mode must be treated differently
|
|
||||||
if reflect.TypeOf(rawKey) != reflect.TypeOf([]byte{}) {
|
|
||||||
return nil, ErrUnsupportedKeyType
|
|
||||||
}
|
|
||||||
if encrypter.cipher.keySize() != len(rawKey.([]byte)) {
|
|
||||||
return nil, ErrInvalidKeySize
|
|
||||||
}
|
|
||||||
encrypter.keyGenerator = staticKeyGenerator{
|
|
||||||
key: rawKey.([]byte),
|
|
||||||
}
|
|
||||||
recipientInfo, _ := newSymmetricRecipient(rcpt.Algorithm, rawKey.([]byte))
|
|
||||||
recipientInfo.keyID = keyID
|
|
||||||
if rcpt.KeyID != "" {
|
|
||||||
recipientInfo.keyID = rcpt.KeyID
|
|
||||||
}
|
|
||||||
encrypter.recipients = []recipientKeyInfo{recipientInfo}
|
|
||||||
return encrypter, nil
|
|
||||||
case ECDH_ES:
|
|
||||||
// ECDH-ES (w/o key wrapping) is similar to DIRECT mode
|
|
||||||
typeOf := reflect.TypeOf(rawKey)
|
|
||||||
if typeOf != reflect.TypeOf(&ecdsa.PublicKey{}) {
|
|
||||||
return nil, ErrUnsupportedKeyType
|
|
||||||
}
|
|
||||||
encrypter.keyGenerator = ecKeyGenerator{
|
|
||||||
size: encrypter.cipher.keySize(),
|
|
||||||
algID: string(enc),
|
|
||||||
publicKey: rawKey.(*ecdsa.PublicKey),
|
|
||||||
}
|
|
||||||
recipientInfo, _ := newECDHRecipient(rcpt.Algorithm, rawKey.(*ecdsa.PublicKey))
|
|
||||||
recipientInfo.keyID = keyID
|
|
||||||
if rcpt.KeyID != "" {
|
|
||||||
recipientInfo.keyID = rcpt.KeyID
|
|
||||||
}
|
|
||||||
encrypter.recipients = []recipientKeyInfo{recipientInfo}
|
|
||||||
return encrypter, nil
|
|
||||||
default:
|
|
||||||
// Can just add a standard recipient
|
|
||||||
encrypter.keyGenerator = randomKeyGenerator{
|
|
||||||
size: encrypter.cipher.keySize(),
|
|
||||||
}
|
|
||||||
err := encrypter.addRecipient(rcpt)
|
|
||||||
return encrypter, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewMultiEncrypter creates a multi-encrypter based on the given parameters
|
|
||||||
func NewMultiEncrypter(enc ContentEncryption, rcpts []Recipient, opts *EncrypterOptions) (Encrypter, error) {
|
|
||||||
cipher := getContentCipher(enc)
|
|
||||||
|
|
||||||
if cipher == nil {
|
|
||||||
return nil, ErrUnsupportedAlgorithm
|
|
||||||
}
|
|
||||||
if len(rcpts) == 0 {
|
|
||||||
return nil, fmt.Errorf("go-jose/go-jose: recipients is nil or empty")
|
|
||||||
}
|
|
||||||
|
|
||||||
encrypter := &genericEncrypter{
|
|
||||||
contentAlg: enc,
|
|
||||||
recipients: []recipientKeyInfo{},
|
|
||||||
cipher: cipher,
|
|
||||||
keyGenerator: randomKeyGenerator{
|
|
||||||
size: cipher.keySize(),
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
if opts != nil {
|
|
||||||
encrypter.compressionAlg = opts.Compression
|
|
||||||
encrypter.extraHeaders = opts.ExtraHeaders
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, recipient := range rcpts {
|
|
||||||
err := encrypter.addRecipient(recipient)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return encrypter, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ctx *genericEncrypter) addRecipient(recipient Recipient) (err error) {
|
|
||||||
var recipientInfo recipientKeyInfo
|
|
||||||
|
|
||||||
switch recipient.Algorithm {
|
|
||||||
case DIRECT, ECDH_ES:
|
|
||||||
return fmt.Errorf("go-jose/go-jose: key algorithm '%s' not supported in multi-recipient mode", recipient.Algorithm)
|
|
||||||
}
|
|
||||||
|
|
||||||
recipientInfo, err = makeJWERecipient(recipient.Algorithm, recipient.Key)
|
|
||||||
if recipient.KeyID != "" {
|
|
||||||
recipientInfo.keyID = recipient.KeyID
|
|
||||||
}
|
|
||||||
|
|
||||||
switch recipient.Algorithm {
|
|
||||||
case PBES2_HS256_A128KW, PBES2_HS384_A192KW, PBES2_HS512_A256KW:
|
|
||||||
if sr, ok := recipientInfo.keyEncrypter.(*symmetricKeyCipher); ok {
|
|
||||||
sr.p2c = recipient.PBES2Count
|
|
||||||
sr.p2s = recipient.PBES2Salt
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if err == nil {
|
|
||||||
ctx.recipients = append(ctx.recipients, recipientInfo)
|
|
||||||
}
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
func makeJWERecipient(alg KeyAlgorithm, encryptionKey interface{}) (recipientKeyInfo, error) {
|
|
||||||
switch encryptionKey := encryptionKey.(type) {
|
|
||||||
case *rsa.PublicKey:
|
|
||||||
return newRSARecipient(alg, encryptionKey)
|
|
||||||
case *ecdsa.PublicKey:
|
|
||||||
return newECDHRecipient(alg, encryptionKey)
|
|
||||||
case []byte:
|
|
||||||
return newSymmetricRecipient(alg, encryptionKey)
|
|
||||||
case string:
|
|
||||||
return newSymmetricRecipient(alg, []byte(encryptionKey))
|
|
||||||
case *JSONWebKey:
|
|
||||||
recipient, err := makeJWERecipient(alg, encryptionKey.Key)
|
|
||||||
recipient.keyID = encryptionKey.KeyID
|
|
||||||
return recipient, err
|
|
||||||
}
|
|
||||||
if encrypter, ok := encryptionKey.(OpaqueKeyEncrypter); ok {
|
|
||||||
return newOpaqueKeyEncrypter(alg, encrypter)
|
|
||||||
}
|
|
||||||
return recipientKeyInfo{}, ErrUnsupportedKeyType
|
|
||||||
}
|
|
||||||
|
|
||||||
// newDecrypter creates an appropriate decrypter based on the key type
|
|
||||||
func newDecrypter(decryptionKey interface{}) (keyDecrypter, error) {
|
|
||||||
switch decryptionKey := decryptionKey.(type) {
|
|
||||||
case *rsa.PrivateKey:
|
|
||||||
return &rsaDecrypterSigner{
|
|
||||||
privateKey: decryptionKey,
|
|
||||||
}, nil
|
|
||||||
case *ecdsa.PrivateKey:
|
|
||||||
return &ecDecrypterSigner{
|
|
||||||
privateKey: decryptionKey,
|
|
||||||
}, nil
|
|
||||||
case []byte:
|
|
||||||
return &symmetricKeyCipher{
|
|
||||||
key: decryptionKey,
|
|
||||||
}, nil
|
|
||||||
case string:
|
|
||||||
return &symmetricKeyCipher{
|
|
||||||
key: []byte(decryptionKey),
|
|
||||||
}, nil
|
|
||||||
case JSONWebKey:
|
|
||||||
return newDecrypter(decryptionKey.Key)
|
|
||||||
case *JSONWebKey:
|
|
||||||
return newDecrypter(decryptionKey.Key)
|
|
||||||
}
|
|
||||||
if okd, ok := decryptionKey.(OpaqueKeyDecrypter); ok {
|
|
||||||
return &opaqueKeyDecrypter{decrypter: okd}, nil
|
|
||||||
}
|
|
||||||
return nil, ErrUnsupportedKeyType
|
|
||||||
}
|
|
||||||
|
|
||||||
// Implementation of encrypt method producing a JWE object.
|
|
||||||
func (ctx *genericEncrypter) Encrypt(plaintext []byte) (*JSONWebEncryption, error) {
|
|
||||||
return ctx.EncryptWithAuthData(plaintext, nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Implementation of encrypt method producing a JWE object.
|
|
||||||
func (ctx *genericEncrypter) EncryptWithAuthData(plaintext, aad []byte) (*JSONWebEncryption, error) {
|
|
||||||
obj := &JSONWebEncryption{}
|
|
||||||
obj.aad = aad
|
|
||||||
|
|
||||||
obj.protected = &rawHeader{}
|
|
||||||
err := obj.protected.set(headerEncryption, ctx.contentAlg)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
obj.recipients = make([]recipientInfo, len(ctx.recipients))
|
|
||||||
|
|
||||||
if len(ctx.recipients) == 0 {
|
|
||||||
return nil, fmt.Errorf("go-jose/go-jose: no recipients to encrypt to")
|
|
||||||
}
|
|
||||||
|
|
||||||
cek, headers, err := ctx.keyGenerator.genKey()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
obj.protected.merge(&headers)
|
|
||||||
|
|
||||||
for i, info := range ctx.recipients {
|
|
||||||
recipient, err := info.keyEncrypter.encryptKey(cek, info.keyAlg)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
err = recipient.header.set(headerAlgorithm, info.keyAlg)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
if info.keyID != "" {
|
|
||||||
err = recipient.header.set(headerKeyID, info.keyID)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
obj.recipients[i] = recipient
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(ctx.recipients) == 1 {
|
|
||||||
// Move per-recipient headers into main protected header if there's
|
|
||||||
// only a single recipient.
|
|
||||||
obj.protected.merge(obj.recipients[0].header)
|
|
||||||
obj.recipients[0].header = nil
|
|
||||||
}
|
|
||||||
|
|
||||||
if ctx.compressionAlg != NONE {
|
|
||||||
plaintext, err = compress(ctx.compressionAlg, plaintext)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
err = obj.protected.set(headerCompression, ctx.compressionAlg)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
for k, v := range ctx.extraHeaders {
|
|
||||||
b, err := json.Marshal(v)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
(*obj.protected)[k] = makeRawMessage(b)
|
|
||||||
}
|
|
||||||
|
|
||||||
authData := obj.computeAuthData()
|
|
||||||
parts, err := ctx.cipher.encrypt(cek, authData, plaintext)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
obj.iv = parts.iv
|
|
||||||
obj.ciphertext = parts.ciphertext
|
|
||||||
obj.tag = parts.tag
|
|
||||||
|
|
||||||
return obj, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ctx *genericEncrypter) Options() EncrypterOptions {
|
|
||||||
return EncrypterOptions{
|
|
||||||
Compression: ctx.compressionAlg,
|
|
||||||
ExtraHeaders: ctx.extraHeaders,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Decrypt and validate the object and return the plaintext. Note that this
|
|
||||||
// function does not support multi-recipient, if you desire multi-recipient
|
|
||||||
// decryption use DecryptMulti instead.
|
|
||||||
func (obj JSONWebEncryption) Decrypt(decryptionKey interface{}) ([]byte, error) {
|
|
||||||
headers := obj.mergedHeaders(nil)
|
|
||||||
|
|
||||||
if len(obj.recipients) > 1 {
|
|
||||||
return nil, errors.New("go-jose/go-jose: too many recipients in payload; expecting only one")
|
|
||||||
}
|
|
||||||
|
|
||||||
critical, err := headers.getCritical()
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("go-jose/go-jose: invalid crit header")
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(critical) > 0 {
|
|
||||||
return nil, fmt.Errorf("go-jose/go-jose: unsupported crit header")
|
|
||||||
}
|
|
||||||
|
|
||||||
key := tryJWKS(decryptionKey, obj.Header)
|
|
||||||
decrypter, err := newDecrypter(key)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
cipher := getContentCipher(headers.getEncryption())
|
|
||||||
if cipher == nil {
|
|
||||||
return nil, fmt.Errorf("go-jose/go-jose: unsupported enc value '%s'", string(headers.getEncryption()))
|
|
||||||
}
|
|
||||||
|
|
||||||
generator := randomKeyGenerator{
|
|
||||||
size: cipher.keySize(),
|
|
||||||
}
|
|
||||||
|
|
||||||
parts := &aeadParts{
|
|
||||||
iv: obj.iv,
|
|
||||||
ciphertext: obj.ciphertext,
|
|
||||||
tag: obj.tag,
|
|
||||||
}
|
|
||||||
|
|
||||||
authData := obj.computeAuthData()
|
|
||||||
|
|
||||||
var plaintext []byte
|
|
||||||
recipient := obj.recipients[0]
|
|
||||||
recipientHeaders := obj.mergedHeaders(&recipient)
|
|
||||||
|
|
||||||
cek, err := decrypter.decryptKey(recipientHeaders, &recipient, generator)
|
|
||||||
if err == nil {
|
|
||||||
// Found a valid CEK -- let's try to decrypt.
|
|
||||||
plaintext, err = cipher.decrypt(cek, authData, parts)
|
|
||||||
}
|
|
||||||
|
|
||||||
if plaintext == nil {
|
|
||||||
return nil, ErrCryptoFailure
|
|
||||||
}
|
|
||||||
|
|
||||||
// The "zip" header parameter may only be present in the protected header.
|
|
||||||
if comp := obj.protected.getCompression(); comp != "" {
|
|
||||||
plaintext, err = decompress(comp, plaintext)
|
|
||||||
}
|
|
||||||
|
|
||||||
return plaintext, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// DecryptMulti decrypts and validates the object and returns the plaintexts,
|
|
||||||
// with support for multiple recipients. It returns the index of the recipient
|
|
||||||
// for which the decryption was successful, the merged headers for that recipient,
|
|
||||||
// and the plaintext.
|
|
||||||
func (obj JSONWebEncryption) DecryptMulti(decryptionKey interface{}) (int, Header, []byte, error) {
|
|
||||||
globalHeaders := obj.mergedHeaders(nil)
|
|
||||||
|
|
||||||
critical, err := globalHeaders.getCritical()
|
|
||||||
if err != nil {
|
|
||||||
return -1, Header{}, nil, fmt.Errorf("go-jose/go-jose: invalid crit header")
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(critical) > 0 {
|
|
||||||
return -1, Header{}, nil, fmt.Errorf("go-jose/go-jose: unsupported crit header")
|
|
||||||
}
|
|
||||||
|
|
||||||
key := tryJWKS(decryptionKey, obj.Header)
|
|
||||||
decrypter, err := newDecrypter(key)
|
|
||||||
if err != nil {
|
|
||||||
return -1, Header{}, nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
encryption := globalHeaders.getEncryption()
|
|
||||||
cipher := getContentCipher(encryption)
|
|
||||||
if cipher == nil {
|
|
||||||
return -1, Header{}, nil, fmt.Errorf("go-jose/go-jose: unsupported enc value '%s'", string(encryption))
|
|
||||||
}
|
|
||||||
|
|
||||||
generator := randomKeyGenerator{
|
|
||||||
size: cipher.keySize(),
|
|
||||||
}
|
|
||||||
|
|
||||||
parts := &aeadParts{
|
|
||||||
iv: obj.iv,
|
|
||||||
ciphertext: obj.ciphertext,
|
|
||||||
tag: obj.tag,
|
|
||||||
}
|
|
||||||
|
|
||||||
authData := obj.computeAuthData()
|
|
||||||
|
|
||||||
index := -1
|
|
||||||
var plaintext []byte
|
|
||||||
var headers rawHeader
|
|
||||||
|
|
||||||
for i, recipient := range obj.recipients {
|
|
||||||
recipientHeaders := obj.mergedHeaders(&recipient)
|
|
||||||
|
|
||||||
cek, err := decrypter.decryptKey(recipientHeaders, &recipient, generator)
|
|
||||||
if err == nil {
|
|
||||||
// Found a valid CEK -- let's try to decrypt.
|
|
||||||
plaintext, err = cipher.decrypt(cek, authData, parts)
|
|
||||||
if err == nil {
|
|
||||||
index = i
|
|
||||||
headers = recipientHeaders
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if plaintext == nil {
|
|
||||||
return -1, Header{}, nil, ErrCryptoFailure
|
|
||||||
}
|
|
||||||
|
|
||||||
// The "zip" header parameter may only be present in the protected header.
|
|
||||||
if comp := obj.protected.getCompression(); comp != "" {
|
|
||||||
plaintext, _ = decompress(comp, plaintext)
|
|
||||||
}
|
|
||||||
|
|
||||||
sanitized, err := headers.sanitized()
|
|
||||||
if err != nil {
|
|
||||||
return -1, Header{}, nil, fmt.Errorf("go-jose/go-jose: failed to sanitize header: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return index, sanitized, plaintext, err
|
|
||||||
}
|
|
27
vendor/github.com/go-jose/go-jose/v3/doc.go
generated
vendored
27
vendor/github.com/go-jose/go-jose/v3/doc.go
generated
vendored
@ -1,27 +0,0 @@
|
|||||||
/*-
|
|
||||||
* Copyright 2014 Square Inc.
|
|
||||||
*
|
|
||||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
* you may not use this file except in compliance with the License.
|
|
||||||
* You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing, software
|
|
||||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
* See the License for the specific language governing permissions and
|
|
||||||
* limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
/*
|
|
||||||
|
|
||||||
Package jose aims to provide an implementation of the Javascript Object Signing
|
|
||||||
and Encryption set of standards. It implements encryption and signing based on
|
|
||||||
the JSON Web Encryption and JSON Web Signature standards, with optional JSON Web
|
|
||||||
Token support available in a sub-package. The library supports both the compact
|
|
||||||
and JWS/JWE JSON Serialization formats, and has optional support for multiple
|
|
||||||
recipients.
|
|
||||||
|
|
||||||
*/
|
|
||||||
package jose
|
|
191
vendor/github.com/go-jose/go-jose/v3/encoding.go
generated
vendored
191
vendor/github.com/go-jose/go-jose/v3/encoding.go
generated
vendored
@ -1,191 +0,0 @@
|
|||||||
/*-
|
|
||||||
* Copyright 2014 Square Inc.
|
|
||||||
*
|
|
||||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
* you may not use this file except in compliance with the License.
|
|
||||||
* You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing, software
|
|
||||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
* See the License for the specific language governing permissions and
|
|
||||||
* limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package jose
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"compress/flate"
|
|
||||||
"encoding/base64"
|
|
||||||
"encoding/binary"
|
|
||||||
"io"
|
|
||||||
"math/big"
|
|
||||||
"strings"
|
|
||||||
"unicode"
|
|
||||||
|
|
||||||
"github.com/go-jose/go-jose/v3/json"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Helper function to serialize known-good objects.
|
|
||||||
// Precondition: value is not a nil pointer.
|
|
||||||
func mustSerializeJSON(value interface{}) []byte {
|
|
||||||
out, err := json.Marshal(value)
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
// We never want to serialize the top-level value "null," since it's not a
|
|
||||||
// valid JOSE message. But if a caller passes in a nil pointer to this method,
|
|
||||||
// MarshalJSON will happily serialize it as the top-level value "null". If
|
|
||||||
// that value is then embedded in another operation, for instance by being
|
|
||||||
// base64-encoded and fed as input to a signing algorithm
|
|
||||||
// (https://github.com/go-jose/go-jose/issues/22), the result will be
|
|
||||||
// incorrect. Because this method is intended for known-good objects, and a nil
|
|
||||||
// pointer is not a known-good object, we are free to panic in this case.
|
|
||||||
// Note: It's not possible to directly check whether the data pointed at by an
|
|
||||||
// interface is a nil pointer, so we do this hacky workaround.
|
|
||||||
// https://groups.google.com/forum/#!topic/golang-nuts/wnH302gBa4I
|
|
||||||
if string(out) == "null" {
|
|
||||||
panic("Tried to serialize a nil pointer.")
|
|
||||||
}
|
|
||||||
return out
|
|
||||||
}
|
|
||||||
|
|
||||||
// Strip all newlines and whitespace
|
|
||||||
func stripWhitespace(data string) string {
|
|
||||||
buf := strings.Builder{}
|
|
||||||
buf.Grow(len(data))
|
|
||||||
for _, r := range data {
|
|
||||||
if !unicode.IsSpace(r) {
|
|
||||||
buf.WriteRune(r)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return buf.String()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Perform compression based on algorithm
|
|
||||||
func compress(algorithm CompressionAlgorithm, input []byte) ([]byte, error) {
|
|
||||||
switch algorithm {
|
|
||||||
case DEFLATE:
|
|
||||||
return deflate(input)
|
|
||||||
default:
|
|
||||||
return nil, ErrUnsupportedAlgorithm
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Perform decompression based on algorithm
|
|
||||||
func decompress(algorithm CompressionAlgorithm, input []byte) ([]byte, error) {
|
|
||||||
switch algorithm {
|
|
||||||
case DEFLATE:
|
|
||||||
return inflate(input)
|
|
||||||
default:
|
|
||||||
return nil, ErrUnsupportedAlgorithm
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Compress with DEFLATE
|
|
||||||
func deflate(input []byte) ([]byte, error) {
|
|
||||||
output := new(bytes.Buffer)
|
|
||||||
|
|
||||||
// Writing to byte buffer, err is always nil
|
|
||||||
writer, _ := flate.NewWriter(output, 1)
|
|
||||||
_, _ = io.Copy(writer, bytes.NewBuffer(input))
|
|
||||||
|
|
||||||
err := writer.Close()
|
|
||||||
return output.Bytes(), err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Decompress with DEFLATE
|
|
||||||
func inflate(input []byte) ([]byte, error) {
|
|
||||||
output := new(bytes.Buffer)
|
|
||||||
reader := flate.NewReader(bytes.NewBuffer(input))
|
|
||||||
|
|
||||||
_, err := io.Copy(output, reader)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
err = reader.Close()
|
|
||||||
return output.Bytes(), err
|
|
||||||
}
|
|
||||||
|
|
||||||
// byteBuffer represents a slice of bytes that can be serialized to url-safe base64.
|
|
||||||
type byteBuffer struct {
|
|
||||||
data []byte
|
|
||||||
}
|
|
||||||
|
|
||||||
func newBuffer(data []byte) *byteBuffer {
|
|
||||||
if data == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return &byteBuffer{
|
|
||||||
data: data,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func newFixedSizeBuffer(data []byte, length int) *byteBuffer {
|
|
||||||
if len(data) > length {
|
|
||||||
panic("go-jose/go-jose: invalid call to newFixedSizeBuffer (len(data) > length)")
|
|
||||||
}
|
|
||||||
pad := make([]byte, length-len(data))
|
|
||||||
return newBuffer(append(pad, data...))
|
|
||||||
}
|
|
||||||
|
|
||||||
func newBufferFromInt(num uint64) *byteBuffer {
|
|
||||||
data := make([]byte, 8)
|
|
||||||
binary.BigEndian.PutUint64(data, num)
|
|
||||||
return newBuffer(bytes.TrimLeft(data, "\x00"))
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *byteBuffer) MarshalJSON() ([]byte, error) {
|
|
||||||
return json.Marshal(b.base64())
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *byteBuffer) UnmarshalJSON(data []byte) error {
|
|
||||||
var encoded string
|
|
||||||
err := json.Unmarshal(data, &encoded)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if encoded == "" {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
decoded, err := base64URLDecode(encoded)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
*b = *newBuffer(decoded)
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *byteBuffer) base64() string {
|
|
||||||
return base64.RawURLEncoding.EncodeToString(b.data)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *byteBuffer) bytes() []byte {
|
|
||||||
// Handling nil here allows us to transparently handle nil slices when serializing.
|
|
||||||
if b == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return b.data
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b byteBuffer) bigInt() *big.Int {
|
|
||||||
return new(big.Int).SetBytes(b.data)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b byteBuffer) toInt() int {
|
|
||||||
return int(b.bigInt().Int64())
|
|
||||||
}
|
|
||||||
|
|
||||||
// base64URLDecode is implemented as defined in https://www.rfc-editor.org/rfc/rfc7515.html#appendix-C
|
|
||||||
func base64URLDecode(value string) ([]byte, error) {
|
|
||||||
value = strings.TrimRight(value, "=")
|
|
||||||
return base64.RawURLEncoding.DecodeString(value)
|
|
||||||
}
|
|
27
vendor/github.com/go-jose/go-jose/v3/json/LICENSE
generated
vendored
27
vendor/github.com/go-jose/go-jose/v3/json/LICENSE
generated
vendored
@ -1,27 +0,0 @@
|
|||||||
Copyright (c) 2012 The Go Authors. All rights reserved.
|
|
||||||
|
|
||||||
Redistribution and use in source and binary forms, with or without
|
|
||||||
modification, are permitted provided that the following conditions are
|
|
||||||
met:
|
|
||||||
|
|
||||||
* Redistributions of source code must retain the above copyright
|
|
||||||
notice, this list of conditions and the following disclaimer.
|
|
||||||
* Redistributions in binary form must reproduce the above
|
|
||||||
copyright notice, this list of conditions and the following disclaimer
|
|
||||||
in the documentation and/or other materials provided with the
|
|
||||||
distribution.
|
|
||||||
* Neither the name of Google Inc. nor the names of its
|
|
||||||
contributors may be used to endorse or promote products derived from
|
|
||||||
this software without specific prior written permission.
|
|
||||||
|
|
||||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
|
||||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
|
||||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
|
||||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
|
||||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
|
||||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
|
||||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
|
||||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
|
||||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|
||||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
|
||||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
13
vendor/github.com/go-jose/go-jose/v3/json/README.md
generated
vendored
13
vendor/github.com/go-jose/go-jose/v3/json/README.md
generated
vendored
@ -1,13 +0,0 @@
|
|||||||
# Safe JSON
|
|
||||||
|
|
||||||
This repository contains a fork of the `encoding/json` package from Go 1.6.
|
|
||||||
|
|
||||||
The following changes were made:
|
|
||||||
|
|
||||||
* Object deserialization uses case-sensitive member name matching instead of
|
|
||||||
[case-insensitive matching](https://www.ietf.org/mail-archive/web/json/current/msg03763.html).
|
|
||||||
This is to avoid differences in the interpretation of JOSE messages between
|
|
||||||
go-jose and libraries written in other languages.
|
|
||||||
* When deserializing a JSON object, we check for duplicate keys and reject the
|
|
||||||
input whenever we detect a duplicate. Rather than trying to work with malformed
|
|
||||||
data, we prefer to reject it right away.
|
|
1217
vendor/github.com/go-jose/go-jose/v3/json/decode.go
generated
vendored
1217
vendor/github.com/go-jose/go-jose/v3/json/decode.go
generated
vendored
File diff suppressed because it is too large
Load Diff
1197
vendor/github.com/go-jose/go-jose/v3/json/encode.go
generated
vendored
1197
vendor/github.com/go-jose/go-jose/v3/json/encode.go
generated
vendored
File diff suppressed because it is too large
Load Diff
141
vendor/github.com/go-jose/go-jose/v3/json/indent.go
generated
vendored
141
vendor/github.com/go-jose/go-jose/v3/json/indent.go
generated
vendored
@ -1,141 +0,0 @@
|
|||||||
// Copyright 2010 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package json
|
|
||||||
|
|
||||||
import "bytes"
|
|
||||||
|
|
||||||
// Compact appends to dst the JSON-encoded src with
|
|
||||||
// insignificant space characters elided.
|
|
||||||
func Compact(dst *bytes.Buffer, src []byte) error {
|
|
||||||
return compact(dst, src, false)
|
|
||||||
}
|
|
||||||
|
|
||||||
func compact(dst *bytes.Buffer, src []byte, escape bool) error {
|
|
||||||
origLen := dst.Len()
|
|
||||||
var scan scanner
|
|
||||||
scan.reset()
|
|
||||||
start := 0
|
|
||||||
for i, c := range src {
|
|
||||||
if escape && (c == '<' || c == '>' || c == '&') {
|
|
||||||
if start < i {
|
|
||||||
dst.Write(src[start:i])
|
|
||||||
}
|
|
||||||
dst.WriteString(`\u00`)
|
|
||||||
dst.WriteByte(hex[c>>4])
|
|
||||||
dst.WriteByte(hex[c&0xF])
|
|
||||||
start = i + 1
|
|
||||||
}
|
|
||||||
// Convert U+2028 and U+2029 (E2 80 A8 and E2 80 A9).
|
|
||||||
if c == 0xE2 && i+2 < len(src) && src[i+1] == 0x80 && src[i+2]&^1 == 0xA8 {
|
|
||||||
if start < i {
|
|
||||||
dst.Write(src[start:i])
|
|
||||||
}
|
|
||||||
dst.WriteString(`\u202`)
|
|
||||||
dst.WriteByte(hex[src[i+2]&0xF])
|
|
||||||
start = i + 3
|
|
||||||
}
|
|
||||||
v := scan.step(&scan, c)
|
|
||||||
if v >= scanSkipSpace {
|
|
||||||
if v == scanError {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
if start < i {
|
|
||||||
dst.Write(src[start:i])
|
|
||||||
}
|
|
||||||
start = i + 1
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if scan.eof() == scanError {
|
|
||||||
dst.Truncate(origLen)
|
|
||||||
return scan.err
|
|
||||||
}
|
|
||||||
if start < len(src) {
|
|
||||||
dst.Write(src[start:])
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func newline(dst *bytes.Buffer, prefix, indent string, depth int) {
|
|
||||||
dst.WriteByte('\n')
|
|
||||||
dst.WriteString(prefix)
|
|
||||||
for i := 0; i < depth; i++ {
|
|
||||||
dst.WriteString(indent)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Indent appends to dst an indented form of the JSON-encoded src.
|
|
||||||
// Each element in a JSON object or array begins on a new,
|
|
||||||
// indented line beginning with prefix followed by one or more
|
|
||||||
// copies of indent according to the indentation nesting.
|
|
||||||
// The data appended to dst does not begin with the prefix nor
|
|
||||||
// any indentation, to make it easier to embed inside other formatted JSON data.
|
|
||||||
// Although leading space characters (space, tab, carriage return, newline)
|
|
||||||
// at the beginning of src are dropped, trailing space characters
|
|
||||||
// at the end of src are preserved and copied to dst.
|
|
||||||
// For example, if src has no trailing spaces, neither will dst;
|
|
||||||
// if src ends in a trailing newline, so will dst.
|
|
||||||
func Indent(dst *bytes.Buffer, src []byte, prefix, indent string) error {
|
|
||||||
origLen := dst.Len()
|
|
||||||
var scan scanner
|
|
||||||
scan.reset()
|
|
||||||
needIndent := false
|
|
||||||
depth := 0
|
|
||||||
for _, c := range src {
|
|
||||||
scan.bytes++
|
|
||||||
v := scan.step(&scan, c)
|
|
||||||
if v == scanSkipSpace {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if v == scanError {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
if needIndent && v != scanEndObject && v != scanEndArray {
|
|
||||||
needIndent = false
|
|
||||||
depth++
|
|
||||||
newline(dst, prefix, indent, depth)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Emit semantically uninteresting bytes
|
|
||||||
// (in particular, punctuation in strings) unmodified.
|
|
||||||
if v == scanContinue {
|
|
||||||
dst.WriteByte(c)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
// Add spacing around real punctuation.
|
|
||||||
switch c {
|
|
||||||
case '{', '[':
|
|
||||||
// delay indent so that empty object and array are formatted as {} and [].
|
|
||||||
needIndent = true
|
|
||||||
dst.WriteByte(c)
|
|
||||||
|
|
||||||
case ',':
|
|
||||||
dst.WriteByte(c)
|
|
||||||
newline(dst, prefix, indent, depth)
|
|
||||||
|
|
||||||
case ':':
|
|
||||||
dst.WriteByte(c)
|
|
||||||
dst.WriteByte(' ')
|
|
||||||
|
|
||||||
case '}', ']':
|
|
||||||
if needIndent {
|
|
||||||
// suppress indent in empty object/array
|
|
||||||
needIndent = false
|
|
||||||
} else {
|
|
||||||
depth--
|
|
||||||
newline(dst, prefix, indent, depth)
|
|
||||||
}
|
|
||||||
dst.WriteByte(c)
|
|
||||||
|
|
||||||
default:
|
|
||||||
dst.WriteByte(c)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if scan.eof() == scanError {
|
|
||||||
dst.Truncate(origLen)
|
|
||||||
return scan.err
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
623
vendor/github.com/go-jose/go-jose/v3/json/scanner.go
generated
vendored
623
vendor/github.com/go-jose/go-jose/v3/json/scanner.go
generated
vendored
@ -1,623 +0,0 @@
|
|||||||
// Copyright 2010 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package json
|
|
||||||
|
|
||||||
// JSON value parser state machine.
|
|
||||||
// Just about at the limit of what is reasonable to write by hand.
|
|
||||||
// Some parts are a bit tedious, but overall it nicely factors out the
|
|
||||||
// otherwise common code from the multiple scanning functions
|
|
||||||
// in this package (Compact, Indent, checkValid, nextValue, etc).
|
|
||||||
//
|
|
||||||
// This file starts with two simple examples using the scanner
|
|
||||||
// before diving into the scanner itself.
|
|
||||||
|
|
||||||
import "strconv"
|
|
||||||
|
|
||||||
// checkValid verifies that data is valid JSON-encoded data.
|
|
||||||
// scan is passed in for use by checkValid to avoid an allocation.
|
|
||||||
func checkValid(data []byte, scan *scanner) error {
|
|
||||||
scan.reset()
|
|
||||||
for _, c := range data {
|
|
||||||
scan.bytes++
|
|
||||||
if scan.step(scan, c) == scanError {
|
|
||||||
return scan.err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if scan.eof() == scanError {
|
|
||||||
return scan.err
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// nextValue splits data after the next whole JSON value,
|
|
||||||
// returning that value and the bytes that follow it as separate slices.
|
|
||||||
// scan is passed in for use by nextValue to avoid an allocation.
|
|
||||||
func nextValue(data []byte, scan *scanner) (value, rest []byte, err error) {
|
|
||||||
scan.reset()
|
|
||||||
for i, c := range data {
|
|
||||||
v := scan.step(scan, c)
|
|
||||||
if v >= scanEndObject {
|
|
||||||
switch v {
|
|
||||||
// probe the scanner with a space to determine whether we will
|
|
||||||
// get scanEnd on the next character. Otherwise, if the next character
|
|
||||||
// is not a space, scanEndTop allocates a needless error.
|
|
||||||
case scanEndObject, scanEndArray:
|
|
||||||
if scan.step(scan, ' ') == scanEnd {
|
|
||||||
return data[:i+1], data[i+1:], nil
|
|
||||||
}
|
|
||||||
case scanError:
|
|
||||||
return nil, nil, scan.err
|
|
||||||
case scanEnd:
|
|
||||||
return data[:i], data[i:], nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if scan.eof() == scanError {
|
|
||||||
return nil, nil, scan.err
|
|
||||||
}
|
|
||||||
return data, nil, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// A SyntaxError is a description of a JSON syntax error.
|
|
||||||
type SyntaxError struct {
|
|
||||||
msg string // description of error
|
|
||||||
Offset int64 // error occurred after reading Offset bytes
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *SyntaxError) Error() string { return e.msg }
|
|
||||||
|
|
||||||
// A scanner is a JSON scanning state machine.
|
|
||||||
// Callers call scan.reset() and then pass bytes in one at a time
|
|
||||||
// by calling scan.step(&scan, c) for each byte.
|
|
||||||
// The return value, referred to as an opcode, tells the
|
|
||||||
// caller about significant parsing events like beginning
|
|
||||||
// and ending literals, objects, and arrays, so that the
|
|
||||||
// caller can follow along if it wishes.
|
|
||||||
// The return value scanEnd indicates that a single top-level
|
|
||||||
// JSON value has been completed, *before* the byte that
|
|
||||||
// just got passed in. (The indication must be delayed in order
|
|
||||||
// to recognize the end of numbers: is 123 a whole value or
|
|
||||||
// the beginning of 12345e+6?).
|
|
||||||
type scanner struct {
|
|
||||||
// The step is a func to be called to execute the next transition.
|
|
||||||
// Also tried using an integer constant and a single func
|
|
||||||
// with a switch, but using the func directly was 10% faster
|
|
||||||
// on a 64-bit Mac Mini, and it's nicer to read.
|
|
||||||
step func(*scanner, byte) int
|
|
||||||
|
|
||||||
// Reached end of top-level value.
|
|
||||||
endTop bool
|
|
||||||
|
|
||||||
// Stack of what we're in the middle of - array values, object keys, object values.
|
|
||||||
parseState []int
|
|
||||||
|
|
||||||
// Error that happened, if any.
|
|
||||||
err error
|
|
||||||
|
|
||||||
// 1-byte redo (see undo method)
|
|
||||||
redo bool
|
|
||||||
redoCode int
|
|
||||||
redoState func(*scanner, byte) int
|
|
||||||
|
|
||||||
// total bytes consumed, updated by decoder.Decode
|
|
||||||
bytes int64
|
|
||||||
}
|
|
||||||
|
|
||||||
// These values are returned by the state transition functions
|
|
||||||
// assigned to scanner.state and the method scanner.eof.
|
|
||||||
// They give details about the current state of the scan that
|
|
||||||
// callers might be interested to know about.
|
|
||||||
// It is okay to ignore the return value of any particular
|
|
||||||
// call to scanner.state: if one call returns scanError,
|
|
||||||
// every subsequent call will return scanError too.
|
|
||||||
const (
|
|
||||||
// Continue.
|
|
||||||
scanContinue = iota // uninteresting byte
|
|
||||||
scanBeginLiteral // end implied by next result != scanContinue
|
|
||||||
scanBeginObject // begin object
|
|
||||||
scanObjectKey // just finished object key (string)
|
|
||||||
scanObjectValue // just finished non-last object value
|
|
||||||
scanEndObject // end object (implies scanObjectValue if possible)
|
|
||||||
scanBeginArray // begin array
|
|
||||||
scanArrayValue // just finished array value
|
|
||||||
scanEndArray // end array (implies scanArrayValue if possible)
|
|
||||||
scanSkipSpace // space byte; can skip; known to be last "continue" result
|
|
||||||
|
|
||||||
// Stop.
|
|
||||||
scanEnd // top-level value ended *before* this byte; known to be first "stop" result
|
|
||||||
scanError // hit an error, scanner.err.
|
|
||||||
)
|
|
||||||
|
|
||||||
// These values are stored in the parseState stack.
|
|
||||||
// They give the current state of a composite value
|
|
||||||
// being scanned. If the parser is inside a nested value
|
|
||||||
// the parseState describes the nested state, outermost at entry 0.
|
|
||||||
const (
|
|
||||||
parseObjectKey = iota // parsing object key (before colon)
|
|
||||||
parseObjectValue // parsing object value (after colon)
|
|
||||||
parseArrayValue // parsing array value
|
|
||||||
)
|
|
||||||
|
|
||||||
// reset prepares the scanner for use.
|
|
||||||
// It must be called before calling s.step.
|
|
||||||
func (s *scanner) reset() {
|
|
||||||
s.step = stateBeginValue
|
|
||||||
s.parseState = s.parseState[0:0]
|
|
||||||
s.err = nil
|
|
||||||
s.redo = false
|
|
||||||
s.endTop = false
|
|
||||||
}
|
|
||||||
|
|
||||||
// eof tells the scanner that the end of input has been reached.
|
|
||||||
// It returns a scan status just as s.step does.
|
|
||||||
func (s *scanner) eof() int {
|
|
||||||
if s.err != nil {
|
|
||||||
return scanError
|
|
||||||
}
|
|
||||||
if s.endTop {
|
|
||||||
return scanEnd
|
|
||||||
}
|
|
||||||
s.step(s, ' ')
|
|
||||||
if s.endTop {
|
|
||||||
return scanEnd
|
|
||||||
}
|
|
||||||
if s.err == nil {
|
|
||||||
s.err = &SyntaxError{"unexpected end of JSON input", s.bytes}
|
|
||||||
}
|
|
||||||
return scanError
|
|
||||||
}
|
|
||||||
|
|
||||||
// pushParseState pushes a new parse state p onto the parse stack.
|
|
||||||
func (s *scanner) pushParseState(p int) {
|
|
||||||
s.parseState = append(s.parseState, p)
|
|
||||||
}
|
|
||||||
|
|
||||||
// popParseState pops a parse state (already obtained) off the stack
|
|
||||||
// and updates s.step accordingly.
|
|
||||||
func (s *scanner) popParseState() {
|
|
||||||
n := len(s.parseState) - 1
|
|
||||||
s.parseState = s.parseState[0:n]
|
|
||||||
s.redo = false
|
|
||||||
if n == 0 {
|
|
||||||
s.step = stateEndTop
|
|
||||||
s.endTop = true
|
|
||||||
} else {
|
|
||||||
s.step = stateEndValue
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func isSpace(c byte) bool {
|
|
||||||
return c == ' ' || c == '\t' || c == '\r' || c == '\n'
|
|
||||||
}
|
|
||||||
|
|
||||||
// stateBeginValueOrEmpty is the state after reading `[`.
|
|
||||||
func stateBeginValueOrEmpty(s *scanner, c byte) int {
|
|
||||||
if c <= ' ' && isSpace(c) {
|
|
||||||
return scanSkipSpace
|
|
||||||
}
|
|
||||||
if c == ']' {
|
|
||||||
return stateEndValue(s, c)
|
|
||||||
}
|
|
||||||
return stateBeginValue(s, c)
|
|
||||||
}
|
|
||||||
|
|
||||||
// stateBeginValue is the state at the beginning of the input.
|
|
||||||
func stateBeginValue(s *scanner, c byte) int {
|
|
||||||
if c <= ' ' && isSpace(c) {
|
|
||||||
return scanSkipSpace
|
|
||||||
}
|
|
||||||
switch c {
|
|
||||||
case '{':
|
|
||||||
s.step = stateBeginStringOrEmpty
|
|
||||||
s.pushParseState(parseObjectKey)
|
|
||||||
return scanBeginObject
|
|
||||||
case '[':
|
|
||||||
s.step = stateBeginValueOrEmpty
|
|
||||||
s.pushParseState(parseArrayValue)
|
|
||||||
return scanBeginArray
|
|
||||||
case '"':
|
|
||||||
s.step = stateInString
|
|
||||||
return scanBeginLiteral
|
|
||||||
case '-':
|
|
||||||
s.step = stateNeg
|
|
||||||
return scanBeginLiteral
|
|
||||||
case '0': // beginning of 0.123
|
|
||||||
s.step = state0
|
|
||||||
return scanBeginLiteral
|
|
||||||
case 't': // beginning of true
|
|
||||||
s.step = stateT
|
|
||||||
return scanBeginLiteral
|
|
||||||
case 'f': // beginning of false
|
|
||||||
s.step = stateF
|
|
||||||
return scanBeginLiteral
|
|
||||||
case 'n': // beginning of null
|
|
||||||
s.step = stateN
|
|
||||||
return scanBeginLiteral
|
|
||||||
}
|
|
||||||
if '1' <= c && c <= '9' { // beginning of 1234.5
|
|
||||||
s.step = state1
|
|
||||||
return scanBeginLiteral
|
|
||||||
}
|
|
||||||
return s.error(c, "looking for beginning of value")
|
|
||||||
}
|
|
||||||
|
|
||||||
// stateBeginStringOrEmpty is the state after reading `{`.
|
|
||||||
func stateBeginStringOrEmpty(s *scanner, c byte) int {
|
|
||||||
if c <= ' ' && isSpace(c) {
|
|
||||||
return scanSkipSpace
|
|
||||||
}
|
|
||||||
if c == '}' {
|
|
||||||
n := len(s.parseState)
|
|
||||||
s.parseState[n-1] = parseObjectValue
|
|
||||||
return stateEndValue(s, c)
|
|
||||||
}
|
|
||||||
return stateBeginString(s, c)
|
|
||||||
}
|
|
||||||
|
|
||||||
// stateBeginString is the state after reading `{"key": value,`.
|
|
||||||
func stateBeginString(s *scanner, c byte) int {
|
|
||||||
if c <= ' ' && isSpace(c) {
|
|
||||||
return scanSkipSpace
|
|
||||||
}
|
|
||||||
if c == '"' {
|
|
||||||
s.step = stateInString
|
|
||||||
return scanBeginLiteral
|
|
||||||
}
|
|
||||||
return s.error(c, "looking for beginning of object key string")
|
|
||||||
}
|
|
||||||
|
|
||||||
// stateEndValue is the state after completing a value,
|
|
||||||
// such as after reading `{}` or `true` or `["x"`.
|
|
||||||
func stateEndValue(s *scanner, c byte) int {
|
|
||||||
n := len(s.parseState)
|
|
||||||
if n == 0 {
|
|
||||||
// Completed top-level before the current byte.
|
|
||||||
s.step = stateEndTop
|
|
||||||
s.endTop = true
|
|
||||||
return stateEndTop(s, c)
|
|
||||||
}
|
|
||||||
if c <= ' ' && isSpace(c) {
|
|
||||||
s.step = stateEndValue
|
|
||||||
return scanSkipSpace
|
|
||||||
}
|
|
||||||
ps := s.parseState[n-1]
|
|
||||||
switch ps {
|
|
||||||
case parseObjectKey:
|
|
||||||
if c == ':' {
|
|
||||||
s.parseState[n-1] = parseObjectValue
|
|
||||||
s.step = stateBeginValue
|
|
||||||
return scanObjectKey
|
|
||||||
}
|
|
||||||
return s.error(c, "after object key")
|
|
||||||
case parseObjectValue:
|
|
||||||
if c == ',' {
|
|
||||||
s.parseState[n-1] = parseObjectKey
|
|
||||||
s.step = stateBeginString
|
|
||||||
return scanObjectValue
|
|
||||||
}
|
|
||||||
if c == '}' {
|
|
||||||
s.popParseState()
|
|
||||||
return scanEndObject
|
|
||||||
}
|
|
||||||
return s.error(c, "after object key:value pair")
|
|
||||||
case parseArrayValue:
|
|
||||||
if c == ',' {
|
|
||||||
s.step = stateBeginValue
|
|
||||||
return scanArrayValue
|
|
||||||
}
|
|
||||||
if c == ']' {
|
|
||||||
s.popParseState()
|
|
||||||
return scanEndArray
|
|
||||||
}
|
|
||||||
return s.error(c, "after array element")
|
|
||||||
}
|
|
||||||
return s.error(c, "")
|
|
||||||
}
|
|
||||||
|
|
||||||
// stateEndTop is the state after finishing the top-level value,
|
|
||||||
// such as after reading `{}` or `[1,2,3]`.
|
|
||||||
// Only space characters should be seen now.
|
|
||||||
func stateEndTop(s *scanner, c byte) int {
|
|
||||||
if c != ' ' && c != '\t' && c != '\r' && c != '\n' {
|
|
||||||
// Complain about non-space byte on next call.
|
|
||||||
s.error(c, "after top-level value")
|
|
||||||
}
|
|
||||||
return scanEnd
|
|
||||||
}
|
|
||||||
|
|
||||||
// stateInString is the state after reading `"`.
|
|
||||||
func stateInString(s *scanner, c byte) int {
|
|
||||||
if c == '"' {
|
|
||||||
s.step = stateEndValue
|
|
||||||
return scanContinue
|
|
||||||
}
|
|
||||||
if c == '\\' {
|
|
||||||
s.step = stateInStringEsc
|
|
||||||
return scanContinue
|
|
||||||
}
|
|
||||||
if c < 0x20 {
|
|
||||||
return s.error(c, "in string literal")
|
|
||||||
}
|
|
||||||
return scanContinue
|
|
||||||
}
|
|
||||||
|
|
||||||
// stateInStringEsc is the state after reading `"\` during a quoted string.
|
|
||||||
func stateInStringEsc(s *scanner, c byte) int {
|
|
||||||
switch c {
|
|
||||||
case 'b', 'f', 'n', 'r', 't', '\\', '/', '"':
|
|
||||||
s.step = stateInString
|
|
||||||
return scanContinue
|
|
||||||
case 'u':
|
|
||||||
s.step = stateInStringEscU
|
|
||||||
return scanContinue
|
|
||||||
}
|
|
||||||
return s.error(c, "in string escape code")
|
|
||||||
}
|
|
||||||
|
|
||||||
// stateInStringEscU is the state after reading `"\u` during a quoted string.
|
|
||||||
func stateInStringEscU(s *scanner, c byte) int {
|
|
||||||
if '0' <= c && c <= '9' || 'a' <= c && c <= 'f' || 'A' <= c && c <= 'F' {
|
|
||||||
s.step = stateInStringEscU1
|
|
||||||
return scanContinue
|
|
||||||
}
|
|
||||||
// numbers
|
|
||||||
return s.error(c, "in \\u hexadecimal character escape")
|
|
||||||
}
|
|
||||||
|
|
||||||
// stateInStringEscU1 is the state after reading `"\u1` during a quoted string.
|
|
||||||
func stateInStringEscU1(s *scanner, c byte) int {
|
|
||||||
if '0' <= c && c <= '9' || 'a' <= c && c <= 'f' || 'A' <= c && c <= 'F' {
|
|
||||||
s.step = stateInStringEscU12
|
|
||||||
return scanContinue
|
|
||||||
}
|
|
||||||
// numbers
|
|
||||||
return s.error(c, "in \\u hexadecimal character escape")
|
|
||||||
}
|
|
||||||
|
|
||||||
// stateInStringEscU12 is the state after reading `"\u12` during a quoted string.
|
|
||||||
func stateInStringEscU12(s *scanner, c byte) int {
|
|
||||||
if '0' <= c && c <= '9' || 'a' <= c && c <= 'f' || 'A' <= c && c <= 'F' {
|
|
||||||
s.step = stateInStringEscU123
|
|
||||||
return scanContinue
|
|
||||||
}
|
|
||||||
// numbers
|
|
||||||
return s.error(c, "in \\u hexadecimal character escape")
|
|
||||||
}
|
|
||||||
|
|
||||||
// stateInStringEscU123 is the state after reading `"\u123` during a quoted string.
|
|
||||||
func stateInStringEscU123(s *scanner, c byte) int {
|
|
||||||
if '0' <= c && c <= '9' || 'a' <= c && c <= 'f' || 'A' <= c && c <= 'F' {
|
|
||||||
s.step = stateInString
|
|
||||||
return scanContinue
|
|
||||||
}
|
|
||||||
// numbers
|
|
||||||
return s.error(c, "in \\u hexadecimal character escape")
|
|
||||||
}
|
|
||||||
|
|
||||||
// stateNeg is the state after reading `-` during a number.
|
|
||||||
func stateNeg(s *scanner, c byte) int {
|
|
||||||
if c == '0' {
|
|
||||||
s.step = state0
|
|
||||||
return scanContinue
|
|
||||||
}
|
|
||||||
if '1' <= c && c <= '9' {
|
|
||||||
s.step = state1
|
|
||||||
return scanContinue
|
|
||||||
}
|
|
||||||
return s.error(c, "in numeric literal")
|
|
||||||
}
|
|
||||||
|
|
||||||
// state1 is the state after reading a non-zero integer during a number,
|
|
||||||
// such as after reading `1` or `100` but not `0`.
|
|
||||||
func state1(s *scanner, c byte) int {
|
|
||||||
if '0' <= c && c <= '9' {
|
|
||||||
s.step = state1
|
|
||||||
return scanContinue
|
|
||||||
}
|
|
||||||
return state0(s, c)
|
|
||||||
}
|
|
||||||
|
|
||||||
// state0 is the state after reading `0` during a number.
|
|
||||||
func state0(s *scanner, c byte) int {
|
|
||||||
if c == '.' {
|
|
||||||
s.step = stateDot
|
|
||||||
return scanContinue
|
|
||||||
}
|
|
||||||
if c == 'e' || c == 'E' {
|
|
||||||
s.step = stateE
|
|
||||||
return scanContinue
|
|
||||||
}
|
|
||||||
return stateEndValue(s, c)
|
|
||||||
}
|
|
||||||
|
|
||||||
// stateDot is the state after reading the integer and decimal point in a number,
|
|
||||||
// such as after reading `1.`.
|
|
||||||
func stateDot(s *scanner, c byte) int {
|
|
||||||
if '0' <= c && c <= '9' {
|
|
||||||
s.step = stateDot0
|
|
||||||
return scanContinue
|
|
||||||
}
|
|
||||||
return s.error(c, "after decimal point in numeric literal")
|
|
||||||
}
|
|
||||||
|
|
||||||
// stateDot0 is the state after reading the integer, decimal point, and subsequent
|
|
||||||
// digits of a number, such as after reading `3.14`.
|
|
||||||
func stateDot0(s *scanner, c byte) int {
|
|
||||||
if '0' <= c && c <= '9' {
|
|
||||||
return scanContinue
|
|
||||||
}
|
|
||||||
if c == 'e' || c == 'E' {
|
|
||||||
s.step = stateE
|
|
||||||
return scanContinue
|
|
||||||
}
|
|
||||||
return stateEndValue(s, c)
|
|
||||||
}
|
|
||||||
|
|
||||||
// stateE is the state after reading the mantissa and e in a number,
|
|
||||||
// such as after reading `314e` or `0.314e`.
|
|
||||||
func stateE(s *scanner, c byte) int {
|
|
||||||
if c == '+' || c == '-' {
|
|
||||||
s.step = stateESign
|
|
||||||
return scanContinue
|
|
||||||
}
|
|
||||||
return stateESign(s, c)
|
|
||||||
}
|
|
||||||
|
|
||||||
// stateESign is the state after reading the mantissa, e, and sign in a number,
|
|
||||||
// such as after reading `314e-` or `0.314e+`.
|
|
||||||
func stateESign(s *scanner, c byte) int {
|
|
||||||
if '0' <= c && c <= '9' {
|
|
||||||
s.step = stateE0
|
|
||||||
return scanContinue
|
|
||||||
}
|
|
||||||
return s.error(c, "in exponent of numeric literal")
|
|
||||||
}
|
|
||||||
|
|
||||||
// stateE0 is the state after reading the mantissa, e, optional sign,
|
|
||||||
// and at least one digit of the exponent in a number,
|
|
||||||
// such as after reading `314e-2` or `0.314e+1` or `3.14e0`.
|
|
||||||
func stateE0(s *scanner, c byte) int {
|
|
||||||
if '0' <= c && c <= '9' {
|
|
||||||
return scanContinue
|
|
||||||
}
|
|
||||||
return stateEndValue(s, c)
|
|
||||||
}
|
|
||||||
|
|
||||||
// stateT is the state after reading `t`.
|
|
||||||
func stateT(s *scanner, c byte) int {
|
|
||||||
if c == 'r' {
|
|
||||||
s.step = stateTr
|
|
||||||
return scanContinue
|
|
||||||
}
|
|
||||||
return s.error(c, "in literal true (expecting 'r')")
|
|
||||||
}
|
|
||||||
|
|
||||||
// stateTr is the state after reading `tr`.
|
|
||||||
func stateTr(s *scanner, c byte) int {
|
|
||||||
if c == 'u' {
|
|
||||||
s.step = stateTru
|
|
||||||
return scanContinue
|
|
||||||
}
|
|
||||||
return s.error(c, "in literal true (expecting 'u')")
|
|
||||||
}
|
|
||||||
|
|
||||||
// stateTru is the state after reading `tru`.
|
|
||||||
func stateTru(s *scanner, c byte) int {
|
|
||||||
if c == 'e' {
|
|
||||||
s.step = stateEndValue
|
|
||||||
return scanContinue
|
|
||||||
}
|
|
||||||
return s.error(c, "in literal true (expecting 'e')")
|
|
||||||
}
|
|
||||||
|
|
||||||
// stateF is the state after reading `f`.
|
|
||||||
func stateF(s *scanner, c byte) int {
|
|
||||||
if c == 'a' {
|
|
||||||
s.step = stateFa
|
|
||||||
return scanContinue
|
|
||||||
}
|
|
||||||
return s.error(c, "in literal false (expecting 'a')")
|
|
||||||
}
|
|
||||||
|
|
||||||
// stateFa is the state after reading `fa`.
|
|
||||||
func stateFa(s *scanner, c byte) int {
|
|
||||||
if c == 'l' {
|
|
||||||
s.step = stateFal
|
|
||||||
return scanContinue
|
|
||||||
}
|
|
||||||
return s.error(c, "in literal false (expecting 'l')")
|
|
||||||
}
|
|
||||||
|
|
||||||
// stateFal is the state after reading `fal`.
|
|
||||||
func stateFal(s *scanner, c byte) int {
|
|
||||||
if c == 's' {
|
|
||||||
s.step = stateFals
|
|
||||||
return scanContinue
|
|
||||||
}
|
|
||||||
return s.error(c, "in literal false (expecting 's')")
|
|
||||||
}
|
|
||||||
|
|
||||||
// stateFals is the state after reading `fals`.
|
|
||||||
func stateFals(s *scanner, c byte) int {
|
|
||||||
if c == 'e' {
|
|
||||||
s.step = stateEndValue
|
|
||||||
return scanContinue
|
|
||||||
}
|
|
||||||
return s.error(c, "in literal false (expecting 'e')")
|
|
||||||
}
|
|
||||||
|
|
||||||
// stateN is the state after reading `n`.
|
|
||||||
func stateN(s *scanner, c byte) int {
|
|
||||||
if c == 'u' {
|
|
||||||
s.step = stateNu
|
|
||||||
return scanContinue
|
|
||||||
}
|
|
||||||
return s.error(c, "in literal null (expecting 'u')")
|
|
||||||
}
|
|
||||||
|
|
||||||
// stateNu is the state after reading `nu`.
|
|
||||||
func stateNu(s *scanner, c byte) int {
|
|
||||||
if c == 'l' {
|
|
||||||
s.step = stateNul
|
|
||||||
return scanContinue
|
|
||||||
}
|
|
||||||
return s.error(c, "in literal null (expecting 'l')")
|
|
||||||
}
|
|
||||||
|
|
||||||
// stateNul is the state after reading `nul`.
|
|
||||||
func stateNul(s *scanner, c byte) int {
|
|
||||||
if c == 'l' {
|
|
||||||
s.step = stateEndValue
|
|
||||||
return scanContinue
|
|
||||||
}
|
|
||||||
return s.error(c, "in literal null (expecting 'l')")
|
|
||||||
}
|
|
||||||
|
|
||||||
// stateError is the state after reaching a syntax error,
|
|
||||||
// such as after reading `[1}` or `5.1.2`.
|
|
||||||
func stateError(s *scanner, c byte) int {
|
|
||||||
return scanError
|
|
||||||
}
|
|
||||||
|
|
||||||
// error records an error and switches to the error state.
|
|
||||||
func (s *scanner) error(c byte, context string) int {
|
|
||||||
s.step = stateError
|
|
||||||
s.err = &SyntaxError{"invalid character " + quoteChar(c) + " " + context, s.bytes}
|
|
||||||
return scanError
|
|
||||||
}
|
|
||||||
|
|
||||||
// quoteChar formats c as a quoted character literal
|
|
||||||
func quoteChar(c byte) string {
|
|
||||||
// special cases - different from quoted strings
|
|
||||||
if c == '\'' {
|
|
||||||
return `'\''`
|
|
||||||
}
|
|
||||||
if c == '"' {
|
|
||||||
return `'"'`
|
|
||||||
}
|
|
||||||
|
|
||||||
// use quoted string with different quotation marks
|
|
||||||
s := strconv.Quote(string(c))
|
|
||||||
return "'" + s[1:len(s)-1] + "'"
|
|
||||||
}
|
|
||||||
|
|
||||||
// undo causes the scanner to return scanCode from the next state transition.
|
|
||||||
// This gives callers a simple 1-byte undo mechanism.
|
|
||||||
func (s *scanner) undo(scanCode int) {
|
|
||||||
if s.redo {
|
|
||||||
panic("json: invalid use of scanner")
|
|
||||||
}
|
|
||||||
s.redoCode = scanCode
|
|
||||||
s.redoState = s.step
|
|
||||||
s.step = stateRedo
|
|
||||||
s.redo = true
|
|
||||||
}
|
|
||||||
|
|
||||||
// stateRedo helps implement the scanner's 1-byte undo.
|
|
||||||
func stateRedo(s *scanner, c byte) int {
|
|
||||||
s.redo = false
|
|
||||||
s.step = s.redoState
|
|
||||||
return s.redoCode
|
|
||||||
}
|
|
485
vendor/github.com/go-jose/go-jose/v3/json/stream.go
generated
vendored
485
vendor/github.com/go-jose/go-jose/v3/json/stream.go
generated
vendored
@ -1,485 +0,0 @@
|
|||||||
// Copyright 2010 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package json
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"errors"
|
|
||||||
"io"
|
|
||||||
)
|
|
||||||
|
|
||||||
// A Decoder reads and decodes JSON objects from an input stream.
|
|
||||||
type Decoder struct {
|
|
||||||
r io.Reader
|
|
||||||
buf []byte
|
|
||||||
d decodeState
|
|
||||||
scanp int // start of unread data in buf
|
|
||||||
scan scanner
|
|
||||||
err error
|
|
||||||
|
|
||||||
tokenState int
|
|
||||||
tokenStack []int
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewDecoder returns a new decoder that reads from r.
|
|
||||||
//
|
|
||||||
// The decoder introduces its own buffering and may
|
|
||||||
// read data from r beyond the JSON values requested.
|
|
||||||
func NewDecoder(r io.Reader) *Decoder {
|
|
||||||
return &Decoder{r: r}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Deprecated: Use `SetNumberType` instead
|
|
||||||
// UseNumber causes the Decoder to unmarshal a number into an interface{} as a
|
|
||||||
// Number instead of as a float64.
|
|
||||||
func (dec *Decoder) UseNumber() { dec.d.numberType = UnmarshalJSONNumber }
|
|
||||||
|
|
||||||
// SetNumberType causes the Decoder to unmarshal a number into an interface{} as a
|
|
||||||
// Number, float64 or int64 depending on `t` enum value.
|
|
||||||
func (dec *Decoder) SetNumberType(t NumberUnmarshalType) { dec.d.numberType = t }
|
|
||||||
|
|
||||||
// Decode reads the next JSON-encoded value from its
|
|
||||||
// input and stores it in the value pointed to by v.
|
|
||||||
//
|
|
||||||
// See the documentation for Unmarshal for details about
|
|
||||||
// the conversion of JSON into a Go value.
|
|
||||||
func (dec *Decoder) Decode(v interface{}) error {
|
|
||||||
if dec.err != nil {
|
|
||||||
return dec.err
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := dec.tokenPrepareForDecode(); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if !dec.tokenValueAllowed() {
|
|
||||||
return &SyntaxError{msg: "not at beginning of value"}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Read whole value into buffer.
|
|
||||||
n, err := dec.readValue()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
dec.d.init(dec.buf[dec.scanp : dec.scanp+n])
|
|
||||||
dec.scanp += n
|
|
||||||
|
|
||||||
// Don't save err from unmarshal into dec.err:
|
|
||||||
// the connection is still usable since we read a complete JSON
|
|
||||||
// object from it before the error happened.
|
|
||||||
err = dec.d.unmarshal(v)
|
|
||||||
|
|
||||||
// fixup token streaming state
|
|
||||||
dec.tokenValueEnd()
|
|
||||||
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Buffered returns a reader of the data remaining in the Decoder's
|
|
||||||
// buffer. The reader is valid until the next call to Decode.
|
|
||||||
func (dec *Decoder) Buffered() io.Reader {
|
|
||||||
return bytes.NewReader(dec.buf[dec.scanp:])
|
|
||||||
}
|
|
||||||
|
|
||||||
// readValue reads a JSON value into dec.buf.
|
|
||||||
// It returns the length of the encoding.
|
|
||||||
func (dec *Decoder) readValue() (int, error) {
|
|
||||||
dec.scan.reset()
|
|
||||||
|
|
||||||
scanp := dec.scanp
|
|
||||||
var err error
|
|
||||||
Input:
|
|
||||||
for {
|
|
||||||
// Look in the buffer for a new value.
|
|
||||||
for i, c := range dec.buf[scanp:] {
|
|
||||||
dec.scan.bytes++
|
|
||||||
v := dec.scan.step(&dec.scan, c)
|
|
||||||
if v == scanEnd {
|
|
||||||
scanp += i
|
|
||||||
break Input
|
|
||||||
}
|
|
||||||
// scanEnd is delayed one byte.
|
|
||||||
// We might block trying to get that byte from src,
|
|
||||||
// so instead invent a space byte.
|
|
||||||
if (v == scanEndObject || v == scanEndArray) && dec.scan.step(&dec.scan, ' ') == scanEnd {
|
|
||||||
scanp += i + 1
|
|
||||||
break Input
|
|
||||||
}
|
|
||||||
if v == scanError {
|
|
||||||
dec.err = dec.scan.err
|
|
||||||
return 0, dec.scan.err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
scanp = len(dec.buf)
|
|
||||||
|
|
||||||
// Did the last read have an error?
|
|
||||||
// Delayed until now to allow buffer scan.
|
|
||||||
if err != nil {
|
|
||||||
if err == io.EOF {
|
|
||||||
if dec.scan.step(&dec.scan, ' ') == scanEnd {
|
|
||||||
break Input
|
|
||||||
}
|
|
||||||
if nonSpace(dec.buf) {
|
|
||||||
err = io.ErrUnexpectedEOF
|
|
||||||
}
|
|
||||||
}
|
|
||||||
dec.err = err
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
|
|
||||||
n := scanp - dec.scanp
|
|
||||||
err = dec.refill()
|
|
||||||
scanp = dec.scanp + n
|
|
||||||
}
|
|
||||||
return scanp - dec.scanp, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (dec *Decoder) refill() error {
|
|
||||||
// Make room to read more into the buffer.
|
|
||||||
// First slide down data already consumed.
|
|
||||||
if dec.scanp > 0 {
|
|
||||||
n := copy(dec.buf, dec.buf[dec.scanp:])
|
|
||||||
dec.buf = dec.buf[:n]
|
|
||||||
dec.scanp = 0
|
|
||||||
}
|
|
||||||
|
|
||||||
// Grow buffer if not large enough.
|
|
||||||
const minRead = 512
|
|
||||||
if cap(dec.buf)-len(dec.buf) < minRead {
|
|
||||||
newBuf := make([]byte, len(dec.buf), 2*cap(dec.buf)+minRead)
|
|
||||||
copy(newBuf, dec.buf)
|
|
||||||
dec.buf = newBuf
|
|
||||||
}
|
|
||||||
|
|
||||||
// Read. Delay error for next iteration (after scan).
|
|
||||||
n, err := dec.r.Read(dec.buf[len(dec.buf):cap(dec.buf)])
|
|
||||||
dec.buf = dec.buf[0 : len(dec.buf)+n]
|
|
||||||
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
func nonSpace(b []byte) bool {
|
|
||||||
for _, c := range b {
|
|
||||||
if !isSpace(c) {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// An Encoder writes JSON objects to an output stream.
|
|
||||||
type Encoder struct {
|
|
||||||
w io.Writer
|
|
||||||
err error
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewEncoder returns a new encoder that writes to w.
|
|
||||||
func NewEncoder(w io.Writer) *Encoder {
|
|
||||||
return &Encoder{w: w}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Encode writes the JSON encoding of v to the stream,
|
|
||||||
// followed by a newline character.
|
|
||||||
//
|
|
||||||
// See the documentation for Marshal for details about the
|
|
||||||
// conversion of Go values to JSON.
|
|
||||||
func (enc *Encoder) Encode(v interface{}) error {
|
|
||||||
if enc.err != nil {
|
|
||||||
return enc.err
|
|
||||||
}
|
|
||||||
e := newEncodeState()
|
|
||||||
err := e.marshal(v)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Terminate each value with a newline.
|
|
||||||
// This makes the output look a little nicer
|
|
||||||
// when debugging, and some kind of space
|
|
||||||
// is required if the encoded value was a number,
|
|
||||||
// so that the reader knows there aren't more
|
|
||||||
// digits coming.
|
|
||||||
e.WriteByte('\n')
|
|
||||||
|
|
||||||
if _, err = enc.w.Write(e.Bytes()); err != nil {
|
|
||||||
enc.err = err
|
|
||||||
}
|
|
||||||
encodeStatePool.Put(e)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// RawMessage is a raw encoded JSON object.
|
|
||||||
// It implements Marshaler and Unmarshaler and can
|
|
||||||
// be used to delay JSON decoding or precompute a JSON encoding.
|
|
||||||
type RawMessage []byte
|
|
||||||
|
|
||||||
// MarshalJSON returns *m as the JSON encoding of m.
|
|
||||||
func (m *RawMessage) MarshalJSON() ([]byte, error) {
|
|
||||||
return *m, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// UnmarshalJSON sets *m to a copy of data.
|
|
||||||
func (m *RawMessage) UnmarshalJSON(data []byte) error {
|
|
||||||
if m == nil {
|
|
||||||
return errors.New("json.RawMessage: UnmarshalJSON on nil pointer")
|
|
||||||
}
|
|
||||||
*m = append((*m)[0:0], data...)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
var _ Marshaler = (*RawMessage)(nil)
|
|
||||||
var _ Unmarshaler = (*RawMessage)(nil)
|
|
||||||
|
|
||||||
// A Token holds a value of one of these types:
|
|
||||||
//
|
|
||||||
// Delim, for the four JSON delimiters [ ] { }
|
|
||||||
// bool, for JSON booleans
|
|
||||||
// float64, for JSON numbers
|
|
||||||
// Number, for JSON numbers
|
|
||||||
// string, for JSON string literals
|
|
||||||
// nil, for JSON null
|
|
||||||
//
|
|
||||||
type Token interface{}
|
|
||||||
|
|
||||||
const (
|
|
||||||
tokenTopValue = iota
|
|
||||||
tokenArrayStart
|
|
||||||
tokenArrayValue
|
|
||||||
tokenArrayComma
|
|
||||||
tokenObjectStart
|
|
||||||
tokenObjectKey
|
|
||||||
tokenObjectColon
|
|
||||||
tokenObjectValue
|
|
||||||
tokenObjectComma
|
|
||||||
)
|
|
||||||
|
|
||||||
// advance tokenstate from a separator state to a value state
|
|
||||||
func (dec *Decoder) tokenPrepareForDecode() error {
|
|
||||||
// Note: Not calling peek before switch, to avoid
|
|
||||||
// putting peek into the standard Decode path.
|
|
||||||
// peek is only called when using the Token API.
|
|
||||||
switch dec.tokenState {
|
|
||||||
case tokenArrayComma:
|
|
||||||
c, err := dec.peek()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if c != ',' {
|
|
||||||
return &SyntaxError{"expected comma after array element", 0}
|
|
||||||
}
|
|
||||||
dec.scanp++
|
|
||||||
dec.tokenState = tokenArrayValue
|
|
||||||
case tokenObjectColon:
|
|
||||||
c, err := dec.peek()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if c != ':' {
|
|
||||||
return &SyntaxError{"expected colon after object key", 0}
|
|
||||||
}
|
|
||||||
dec.scanp++
|
|
||||||
dec.tokenState = tokenObjectValue
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (dec *Decoder) tokenValueAllowed() bool {
|
|
||||||
switch dec.tokenState {
|
|
||||||
case tokenTopValue, tokenArrayStart, tokenArrayValue, tokenObjectValue:
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
func (dec *Decoder) tokenValueEnd() {
|
|
||||||
switch dec.tokenState {
|
|
||||||
case tokenArrayStart, tokenArrayValue:
|
|
||||||
dec.tokenState = tokenArrayComma
|
|
||||||
case tokenObjectValue:
|
|
||||||
dec.tokenState = tokenObjectComma
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// A Delim is a JSON array or object delimiter, one of [ ] { or }.
|
|
||||||
type Delim rune
|
|
||||||
|
|
||||||
func (d Delim) String() string {
|
|
||||||
return string(d)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Token returns the next JSON token in the input stream.
|
|
||||||
// At the end of the input stream, Token returns nil, io.EOF.
|
|
||||||
//
|
|
||||||
// Token guarantees that the delimiters [ ] { } it returns are
|
|
||||||
// properly nested and matched: if Token encounters an unexpected
|
|
||||||
// delimiter in the input, it will return an error.
|
|
||||||
//
|
|
||||||
// The input stream consists of basic JSON values—bool, string,
|
|
||||||
// number, and null—along with delimiters [ ] { } of type Delim
|
|
||||||
// to mark the start and end of arrays and objects.
|
|
||||||
// Commas and colons are elided.
|
|
||||||
func (dec *Decoder) Token() (Token, error) {
|
|
||||||
for {
|
|
||||||
c, err := dec.peek()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
switch c {
|
|
||||||
case '[':
|
|
||||||
if !dec.tokenValueAllowed() {
|
|
||||||
return dec.tokenError(c)
|
|
||||||
}
|
|
||||||
dec.scanp++
|
|
||||||
dec.tokenStack = append(dec.tokenStack, dec.tokenState)
|
|
||||||
dec.tokenState = tokenArrayStart
|
|
||||||
return Delim('['), nil
|
|
||||||
|
|
||||||
case ']':
|
|
||||||
if dec.tokenState != tokenArrayStart && dec.tokenState != tokenArrayComma {
|
|
||||||
return dec.tokenError(c)
|
|
||||||
}
|
|
||||||
dec.scanp++
|
|
||||||
dec.tokenState = dec.tokenStack[len(dec.tokenStack)-1]
|
|
||||||
dec.tokenStack = dec.tokenStack[:len(dec.tokenStack)-1]
|
|
||||||
dec.tokenValueEnd()
|
|
||||||
return Delim(']'), nil
|
|
||||||
|
|
||||||
case '{':
|
|
||||||
if !dec.tokenValueAllowed() {
|
|
||||||
return dec.tokenError(c)
|
|
||||||
}
|
|
||||||
dec.scanp++
|
|
||||||
dec.tokenStack = append(dec.tokenStack, dec.tokenState)
|
|
||||||
dec.tokenState = tokenObjectStart
|
|
||||||
return Delim('{'), nil
|
|
||||||
|
|
||||||
case '}':
|
|
||||||
if dec.tokenState != tokenObjectStart && dec.tokenState != tokenObjectComma {
|
|
||||||
return dec.tokenError(c)
|
|
||||||
}
|
|
||||||
dec.scanp++
|
|
||||||
dec.tokenState = dec.tokenStack[len(dec.tokenStack)-1]
|
|
||||||
dec.tokenStack = dec.tokenStack[:len(dec.tokenStack)-1]
|
|
||||||
dec.tokenValueEnd()
|
|
||||||
return Delim('}'), nil
|
|
||||||
|
|
||||||
case ':':
|
|
||||||
if dec.tokenState != tokenObjectColon {
|
|
||||||
return dec.tokenError(c)
|
|
||||||
}
|
|
||||||
dec.scanp++
|
|
||||||
dec.tokenState = tokenObjectValue
|
|
||||||
continue
|
|
||||||
|
|
||||||
case ',':
|
|
||||||
if dec.tokenState == tokenArrayComma {
|
|
||||||
dec.scanp++
|
|
||||||
dec.tokenState = tokenArrayValue
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if dec.tokenState == tokenObjectComma {
|
|
||||||
dec.scanp++
|
|
||||||
dec.tokenState = tokenObjectKey
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
return dec.tokenError(c)
|
|
||||||
|
|
||||||
case '"':
|
|
||||||
if dec.tokenState == tokenObjectStart || dec.tokenState == tokenObjectKey {
|
|
||||||
var x string
|
|
||||||
old := dec.tokenState
|
|
||||||
dec.tokenState = tokenTopValue
|
|
||||||
err := dec.Decode(&x)
|
|
||||||
dec.tokenState = old
|
|
||||||
if err != nil {
|
|
||||||
clearOffset(err)
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
dec.tokenState = tokenObjectColon
|
|
||||||
return x, nil
|
|
||||||
}
|
|
||||||
fallthrough
|
|
||||||
|
|
||||||
default:
|
|
||||||
if !dec.tokenValueAllowed() {
|
|
||||||
return dec.tokenError(c)
|
|
||||||
}
|
|
||||||
var x interface{}
|
|
||||||
if err := dec.Decode(&x); err != nil {
|
|
||||||
clearOffset(err)
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return x, nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func clearOffset(err error) {
|
|
||||||
if s, ok := err.(*SyntaxError); ok {
|
|
||||||
s.Offset = 0
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (dec *Decoder) tokenError(c byte) (Token, error) {
|
|
||||||
var context string
|
|
||||||
switch dec.tokenState {
|
|
||||||
case tokenTopValue:
|
|
||||||
context = " looking for beginning of value"
|
|
||||||
case tokenArrayStart, tokenArrayValue, tokenObjectValue:
|
|
||||||
context = " looking for beginning of value"
|
|
||||||
case tokenArrayComma:
|
|
||||||
context = " after array element"
|
|
||||||
case tokenObjectKey:
|
|
||||||
context = " looking for beginning of object key string"
|
|
||||||
case tokenObjectColon:
|
|
||||||
context = " after object key"
|
|
||||||
case tokenObjectComma:
|
|
||||||
context = " after object key:value pair"
|
|
||||||
}
|
|
||||||
return nil, &SyntaxError{"invalid character " + quoteChar(c) + " " + context, 0}
|
|
||||||
}
|
|
||||||
|
|
||||||
// More reports whether there is another element in the
|
|
||||||
// current array or object being parsed.
|
|
||||||
func (dec *Decoder) More() bool {
|
|
||||||
c, err := dec.peek()
|
|
||||||
return err == nil && c != ']' && c != '}'
|
|
||||||
}
|
|
||||||
|
|
||||||
func (dec *Decoder) peek() (byte, error) {
|
|
||||||
var err error
|
|
||||||
for {
|
|
||||||
for i := dec.scanp; i < len(dec.buf); i++ {
|
|
||||||
c := dec.buf[i]
|
|
||||||
if isSpace(c) {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
dec.scanp = i
|
|
||||||
return c, nil
|
|
||||||
}
|
|
||||||
// buffer has been scanned, now report any error
|
|
||||||
if err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
err = dec.refill()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
TODO
|
|
||||||
|
|
||||||
// EncodeToken writes the given JSON token to the stream.
|
|
||||||
// It returns an error if the delimiters [ ] { } are not properly used.
|
|
||||||
//
|
|
||||||
// EncodeToken does not call Flush, because usually it is part of
|
|
||||||
// a larger operation such as Encode, and those will call Flush when finished.
|
|
||||||
// Callers that create an Encoder and then invoke EncodeToken directly,
|
|
||||||
// without using Encode, need to call Flush when finished to ensure that
|
|
||||||
// the JSON is written to the underlying writer.
|
|
||||||
func (e *Encoder) EncodeToken(t Token) error {
|
|
||||||
...
|
|
||||||
}
|
|
||||||
|
|
||||||
*/
|
|
44
vendor/github.com/go-jose/go-jose/v3/json/tags.go
generated
vendored
44
vendor/github.com/go-jose/go-jose/v3/json/tags.go
generated
vendored
@ -1,44 +0,0 @@
|
|||||||
// Copyright 2011 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package json
|
|
||||||
|
|
||||||
import (
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
// tagOptions is the string following a comma in a struct field's "json"
|
|
||||||
// tag, or the empty string. It does not include the leading comma.
|
|
||||||
type tagOptions string
|
|
||||||
|
|
||||||
// parseTag splits a struct field's json tag into its name and
|
|
||||||
// comma-separated options.
|
|
||||||
func parseTag(tag string) (string, tagOptions) {
|
|
||||||
if idx := strings.Index(tag, ","); idx != -1 {
|
|
||||||
return tag[:idx], tagOptions(tag[idx+1:])
|
|
||||||
}
|
|
||||||
return tag, tagOptions("")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Contains reports whether a comma-separated list of options
|
|
||||||
// contains a particular substr flag. substr must be surrounded by a
|
|
||||||
// string boundary or commas.
|
|
||||||
func (o tagOptions) Contains(optionName string) bool {
|
|
||||||
if len(o) == 0 {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
s := string(o)
|
|
||||||
for s != "" {
|
|
||||||
var next string
|
|
||||||
i := strings.Index(s, ",")
|
|
||||||
if i >= 0 {
|
|
||||||
s, next = s[:i], s[i+1:]
|
|
||||||
}
|
|
||||||
if s == optionName {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
s = next
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
295
vendor/github.com/go-jose/go-jose/v3/jwe.go
generated
vendored
295
vendor/github.com/go-jose/go-jose/v3/jwe.go
generated
vendored
@ -1,295 +0,0 @@
|
|||||||
/*-
|
|
||||||
* Copyright 2014 Square Inc.
|
|
||||||
*
|
|
||||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
* you may not use this file except in compliance with the License.
|
|
||||||
* You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing, software
|
|
||||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
* See the License for the specific language governing permissions and
|
|
||||||
* limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package jose
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding/base64"
|
|
||||||
"fmt"
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
"github.com/go-jose/go-jose/v3/json"
|
|
||||||
)
|
|
||||||
|
|
||||||
// rawJSONWebEncryption represents a raw JWE JSON object. Used for parsing/serializing.
|
|
||||||
type rawJSONWebEncryption struct {
|
|
||||||
Protected *byteBuffer `json:"protected,omitempty"`
|
|
||||||
Unprotected *rawHeader `json:"unprotected,omitempty"`
|
|
||||||
Header *rawHeader `json:"header,omitempty"`
|
|
||||||
Recipients []rawRecipientInfo `json:"recipients,omitempty"`
|
|
||||||
Aad *byteBuffer `json:"aad,omitempty"`
|
|
||||||
EncryptedKey *byteBuffer `json:"encrypted_key,omitempty"`
|
|
||||||
Iv *byteBuffer `json:"iv,omitempty"`
|
|
||||||
Ciphertext *byteBuffer `json:"ciphertext,omitempty"`
|
|
||||||
Tag *byteBuffer `json:"tag,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// rawRecipientInfo represents a raw JWE Per-Recipient header JSON object. Used for parsing/serializing.
|
|
||||||
type rawRecipientInfo struct {
|
|
||||||
Header *rawHeader `json:"header,omitempty"`
|
|
||||||
EncryptedKey string `json:"encrypted_key,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// JSONWebEncryption represents an encrypted JWE object after parsing.
|
|
||||||
type JSONWebEncryption struct {
|
|
||||||
Header Header
|
|
||||||
protected, unprotected *rawHeader
|
|
||||||
recipients []recipientInfo
|
|
||||||
aad, iv, ciphertext, tag []byte
|
|
||||||
original *rawJSONWebEncryption
|
|
||||||
}
|
|
||||||
|
|
||||||
// recipientInfo represents a raw JWE Per-Recipient header JSON object after parsing.
|
|
||||||
type recipientInfo struct {
|
|
||||||
header *rawHeader
|
|
||||||
encryptedKey []byte
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetAuthData retrieves the (optional) authenticated data attached to the object.
|
|
||||||
func (obj JSONWebEncryption) GetAuthData() []byte {
|
|
||||||
if obj.aad != nil {
|
|
||||||
out := make([]byte, len(obj.aad))
|
|
||||||
copy(out, obj.aad)
|
|
||||||
return out
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get the merged header values
|
|
||||||
func (obj JSONWebEncryption) mergedHeaders(recipient *recipientInfo) rawHeader {
|
|
||||||
out := rawHeader{}
|
|
||||||
out.merge(obj.protected)
|
|
||||||
out.merge(obj.unprotected)
|
|
||||||
|
|
||||||
if recipient != nil {
|
|
||||||
out.merge(recipient.header)
|
|
||||||
}
|
|
||||||
|
|
||||||
return out
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get the additional authenticated data from a JWE object.
|
|
||||||
func (obj JSONWebEncryption) computeAuthData() []byte {
|
|
||||||
var protected string
|
|
||||||
|
|
||||||
switch {
|
|
||||||
case obj.original != nil && obj.original.Protected != nil:
|
|
||||||
protected = obj.original.Protected.base64()
|
|
||||||
case obj.protected != nil:
|
|
||||||
protected = base64.RawURLEncoding.EncodeToString(mustSerializeJSON((obj.protected)))
|
|
||||||
default:
|
|
||||||
protected = ""
|
|
||||||
}
|
|
||||||
|
|
||||||
output := []byte(protected)
|
|
||||||
if obj.aad != nil {
|
|
||||||
output = append(output, '.')
|
|
||||||
output = append(output, []byte(base64.RawURLEncoding.EncodeToString(obj.aad))...)
|
|
||||||
}
|
|
||||||
|
|
||||||
return output
|
|
||||||
}
|
|
||||||
|
|
||||||
// ParseEncrypted parses an encrypted message in compact or JWE JSON Serialization format.
|
|
||||||
func ParseEncrypted(input string) (*JSONWebEncryption, error) {
|
|
||||||
input = stripWhitespace(input)
|
|
||||||
if strings.HasPrefix(input, "{") {
|
|
||||||
return parseEncryptedFull(input)
|
|
||||||
}
|
|
||||||
|
|
||||||
return parseEncryptedCompact(input)
|
|
||||||
}
|
|
||||||
|
|
||||||
// parseEncryptedFull parses a message in compact format.
|
|
||||||
func parseEncryptedFull(input string) (*JSONWebEncryption, error) {
|
|
||||||
var parsed rawJSONWebEncryption
|
|
||||||
err := json.Unmarshal([]byte(input), &parsed)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return parsed.sanitized()
|
|
||||||
}
|
|
||||||
|
|
||||||
// sanitized produces a cleaned-up JWE object from the raw JSON.
|
|
||||||
func (parsed *rawJSONWebEncryption) sanitized() (*JSONWebEncryption, error) {
|
|
||||||
obj := &JSONWebEncryption{
|
|
||||||
original: parsed,
|
|
||||||
unprotected: parsed.Unprotected,
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check that there is not a nonce in the unprotected headers
|
|
||||||
if parsed.Unprotected != nil {
|
|
||||||
if nonce := parsed.Unprotected.getNonce(); nonce != "" {
|
|
||||||
return nil, ErrUnprotectedNonce
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if parsed.Header != nil {
|
|
||||||
if nonce := parsed.Header.getNonce(); nonce != "" {
|
|
||||||
return nil, ErrUnprotectedNonce
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if parsed.Protected != nil && len(parsed.Protected.bytes()) > 0 {
|
|
||||||
err := json.Unmarshal(parsed.Protected.bytes(), &obj.protected)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("go-jose/go-jose: invalid protected header: %s, %s", err, parsed.Protected.base64())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Note: this must be called _after_ we parse the protected header,
|
|
||||||
// otherwise fields from the protected header will not get picked up.
|
|
||||||
var err error
|
|
||||||
mergedHeaders := obj.mergedHeaders(nil)
|
|
||||||
obj.Header, err = mergedHeaders.sanitized()
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("go-jose/go-jose: cannot sanitize merged headers: %v (%v)", err, mergedHeaders)
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(parsed.Recipients) == 0 {
|
|
||||||
obj.recipients = []recipientInfo{
|
|
||||||
{
|
|
||||||
header: parsed.Header,
|
|
||||||
encryptedKey: parsed.EncryptedKey.bytes(),
|
|
||||||
},
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
obj.recipients = make([]recipientInfo, len(parsed.Recipients))
|
|
||||||
for r := range parsed.Recipients {
|
|
||||||
encryptedKey, err := base64URLDecode(parsed.Recipients[r].EncryptedKey)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check that there is not a nonce in the unprotected header
|
|
||||||
if parsed.Recipients[r].Header != nil && parsed.Recipients[r].Header.getNonce() != "" {
|
|
||||||
return nil, ErrUnprotectedNonce
|
|
||||||
}
|
|
||||||
|
|
||||||
obj.recipients[r].header = parsed.Recipients[r].Header
|
|
||||||
obj.recipients[r].encryptedKey = encryptedKey
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, recipient := range obj.recipients {
|
|
||||||
headers := obj.mergedHeaders(&recipient)
|
|
||||||
if headers.getAlgorithm() == "" || headers.getEncryption() == "" {
|
|
||||||
return nil, fmt.Errorf("go-jose/go-jose: message is missing alg/enc headers")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
obj.iv = parsed.Iv.bytes()
|
|
||||||
obj.ciphertext = parsed.Ciphertext.bytes()
|
|
||||||
obj.tag = parsed.Tag.bytes()
|
|
||||||
obj.aad = parsed.Aad.bytes()
|
|
||||||
|
|
||||||
return obj, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// parseEncryptedCompact parses a message in compact format.
|
|
||||||
func parseEncryptedCompact(input string) (*JSONWebEncryption, error) {
|
|
||||||
parts := strings.Split(input, ".")
|
|
||||||
if len(parts) != 5 {
|
|
||||||
return nil, fmt.Errorf("go-jose/go-jose: compact JWE format must have five parts")
|
|
||||||
}
|
|
||||||
|
|
||||||
rawProtected, err := base64URLDecode(parts[0])
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
encryptedKey, err := base64URLDecode(parts[1])
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
iv, err := base64URLDecode(parts[2])
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
ciphertext, err := base64URLDecode(parts[3])
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
tag, err := base64URLDecode(parts[4])
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
raw := &rawJSONWebEncryption{
|
|
||||||
Protected: newBuffer(rawProtected),
|
|
||||||
EncryptedKey: newBuffer(encryptedKey),
|
|
||||||
Iv: newBuffer(iv),
|
|
||||||
Ciphertext: newBuffer(ciphertext),
|
|
||||||
Tag: newBuffer(tag),
|
|
||||||
}
|
|
||||||
|
|
||||||
return raw.sanitized()
|
|
||||||
}
|
|
||||||
|
|
||||||
// CompactSerialize serializes an object using the compact serialization format.
|
|
||||||
func (obj JSONWebEncryption) CompactSerialize() (string, error) {
|
|
||||||
if len(obj.recipients) != 1 || obj.unprotected != nil ||
|
|
||||||
obj.protected == nil || obj.recipients[0].header != nil {
|
|
||||||
return "", ErrNotSupported
|
|
||||||
}
|
|
||||||
|
|
||||||
serializedProtected := mustSerializeJSON(obj.protected)
|
|
||||||
|
|
||||||
return fmt.Sprintf(
|
|
||||||
"%s.%s.%s.%s.%s",
|
|
||||||
base64.RawURLEncoding.EncodeToString(serializedProtected),
|
|
||||||
base64.RawURLEncoding.EncodeToString(obj.recipients[0].encryptedKey),
|
|
||||||
base64.RawURLEncoding.EncodeToString(obj.iv),
|
|
||||||
base64.RawURLEncoding.EncodeToString(obj.ciphertext),
|
|
||||||
base64.RawURLEncoding.EncodeToString(obj.tag)), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// FullSerialize serializes an object using the full JSON serialization format.
|
|
||||||
func (obj JSONWebEncryption) FullSerialize() string {
|
|
||||||
raw := rawJSONWebEncryption{
|
|
||||||
Unprotected: obj.unprotected,
|
|
||||||
Iv: newBuffer(obj.iv),
|
|
||||||
Ciphertext: newBuffer(obj.ciphertext),
|
|
||||||
EncryptedKey: newBuffer(obj.recipients[0].encryptedKey),
|
|
||||||
Tag: newBuffer(obj.tag),
|
|
||||||
Aad: newBuffer(obj.aad),
|
|
||||||
Recipients: []rawRecipientInfo{},
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(obj.recipients) > 1 {
|
|
||||||
for _, recipient := range obj.recipients {
|
|
||||||
info := rawRecipientInfo{
|
|
||||||
Header: recipient.header,
|
|
||||||
EncryptedKey: base64.RawURLEncoding.EncodeToString(recipient.encryptedKey),
|
|
||||||
}
|
|
||||||
raw.Recipients = append(raw.Recipients, info)
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
// Use flattened serialization
|
|
||||||
raw.Header = obj.recipients[0].header
|
|
||||||
raw.EncryptedKey = newBuffer(obj.recipients[0].encryptedKey)
|
|
||||||
}
|
|
||||||
|
|
||||||
if obj.protected != nil {
|
|
||||||
raw.Protected = newBuffer(mustSerializeJSON(obj.protected))
|
|
||||||
}
|
|
||||||
|
|
||||||
return string(mustSerializeJSON(raw))
|
|
||||||
}
|
|
798
vendor/github.com/go-jose/go-jose/v3/jwk.go
generated
vendored
798
vendor/github.com/go-jose/go-jose/v3/jwk.go
generated
vendored
@ -1,798 +0,0 @@
|
|||||||
/*-
|
|
||||||
* Copyright 2014 Square Inc.
|
|
||||||
*
|
|
||||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
* you may not use this file except in compliance with the License.
|
|
||||||
* You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing, software
|
|
||||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
* See the License for the specific language governing permissions and
|
|
||||||
* limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package jose
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"crypto"
|
|
||||||
"crypto/ecdsa"
|
|
||||||
"crypto/ed25519"
|
|
||||||
"crypto/elliptic"
|
|
||||||
"crypto/rsa"
|
|
||||||
"crypto/sha1"
|
|
||||||
"crypto/sha256"
|
|
||||||
"crypto/x509"
|
|
||||||
"encoding/base64"
|
|
||||||
"encoding/hex"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"math/big"
|
|
||||||
"net/url"
|
|
||||||
"reflect"
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
"github.com/go-jose/go-jose/v3/json"
|
|
||||||
)
|
|
||||||
|
|
||||||
// rawJSONWebKey represents a public or private key in JWK format, used for parsing/serializing.
|
|
||||||
type rawJSONWebKey struct {
|
|
||||||
Use string `json:"use,omitempty"`
|
|
||||||
Kty string `json:"kty,omitempty"`
|
|
||||||
Kid string `json:"kid,omitempty"`
|
|
||||||
Crv string `json:"crv,omitempty"`
|
|
||||||
Alg string `json:"alg,omitempty"`
|
|
||||||
K *byteBuffer `json:"k,omitempty"`
|
|
||||||
X *byteBuffer `json:"x,omitempty"`
|
|
||||||
Y *byteBuffer `json:"y,omitempty"`
|
|
||||||
N *byteBuffer `json:"n,omitempty"`
|
|
||||||
E *byteBuffer `json:"e,omitempty"`
|
|
||||||
// -- Following fields are only used for private keys --
|
|
||||||
// RSA uses D, P and Q, while ECDSA uses only D. Fields Dp, Dq, and Qi are
|
|
||||||
// completely optional. Therefore for RSA/ECDSA, D != nil is a contract that
|
|
||||||
// we have a private key whereas D == nil means we have only a public key.
|
|
||||||
D *byteBuffer `json:"d,omitempty"`
|
|
||||||
P *byteBuffer `json:"p,omitempty"`
|
|
||||||
Q *byteBuffer `json:"q,omitempty"`
|
|
||||||
Dp *byteBuffer `json:"dp,omitempty"`
|
|
||||||
Dq *byteBuffer `json:"dq,omitempty"`
|
|
||||||
Qi *byteBuffer `json:"qi,omitempty"`
|
|
||||||
// Certificates
|
|
||||||
X5c []string `json:"x5c,omitempty"`
|
|
||||||
X5u string `json:"x5u,omitempty"`
|
|
||||||
X5tSHA1 string `json:"x5t,omitempty"`
|
|
||||||
X5tSHA256 string `json:"x5t#S256,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// JSONWebKey represents a public or private key in JWK format.
|
|
||||||
type JSONWebKey struct {
|
|
||||||
// Cryptographic key, can be a symmetric or asymmetric key.
|
|
||||||
Key interface{}
|
|
||||||
// Key identifier, parsed from `kid` header.
|
|
||||||
KeyID string
|
|
||||||
// Key algorithm, parsed from `alg` header.
|
|
||||||
Algorithm string
|
|
||||||
// Key use, parsed from `use` header.
|
|
||||||
Use string
|
|
||||||
|
|
||||||
// X.509 certificate chain, parsed from `x5c` header.
|
|
||||||
Certificates []*x509.Certificate
|
|
||||||
// X.509 certificate URL, parsed from `x5u` header.
|
|
||||||
CertificatesURL *url.URL
|
|
||||||
// X.509 certificate thumbprint (SHA-1), parsed from `x5t` header.
|
|
||||||
CertificateThumbprintSHA1 []byte
|
|
||||||
// X.509 certificate thumbprint (SHA-256), parsed from `x5t#S256` header.
|
|
||||||
CertificateThumbprintSHA256 []byte
|
|
||||||
}
|
|
||||||
|
|
||||||
// MarshalJSON serializes the given key to its JSON representation.
|
|
||||||
func (k JSONWebKey) MarshalJSON() ([]byte, error) {
|
|
||||||
var raw *rawJSONWebKey
|
|
||||||
var err error
|
|
||||||
|
|
||||||
switch key := k.Key.(type) {
|
|
||||||
case ed25519.PublicKey:
|
|
||||||
raw = fromEdPublicKey(key)
|
|
||||||
case *ecdsa.PublicKey:
|
|
||||||
raw, err = fromEcPublicKey(key)
|
|
||||||
case *rsa.PublicKey:
|
|
||||||
raw = fromRsaPublicKey(key)
|
|
||||||
case ed25519.PrivateKey:
|
|
||||||
raw, err = fromEdPrivateKey(key)
|
|
||||||
case *ecdsa.PrivateKey:
|
|
||||||
raw, err = fromEcPrivateKey(key)
|
|
||||||
case *rsa.PrivateKey:
|
|
||||||
raw, err = fromRsaPrivateKey(key)
|
|
||||||
case []byte:
|
|
||||||
raw, err = fromSymmetricKey(key)
|
|
||||||
default:
|
|
||||||
return nil, fmt.Errorf("go-jose/go-jose: unknown key type '%s'", reflect.TypeOf(key))
|
|
||||||
}
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
raw.Kid = k.KeyID
|
|
||||||
raw.Alg = k.Algorithm
|
|
||||||
raw.Use = k.Use
|
|
||||||
|
|
||||||
for _, cert := range k.Certificates {
|
|
||||||
raw.X5c = append(raw.X5c, base64.StdEncoding.EncodeToString(cert.Raw))
|
|
||||||
}
|
|
||||||
|
|
||||||
x5tSHA1Len := len(k.CertificateThumbprintSHA1)
|
|
||||||
x5tSHA256Len := len(k.CertificateThumbprintSHA256)
|
|
||||||
if x5tSHA1Len > 0 {
|
|
||||||
if x5tSHA1Len != sha1.Size {
|
|
||||||
return nil, fmt.Errorf("go-jose/go-jose: invalid SHA-1 thumbprint (must be %d bytes, not %d)", sha1.Size, x5tSHA1Len)
|
|
||||||
}
|
|
||||||
raw.X5tSHA1 = base64.RawURLEncoding.EncodeToString(k.CertificateThumbprintSHA1)
|
|
||||||
}
|
|
||||||
if x5tSHA256Len > 0 {
|
|
||||||
if x5tSHA256Len != sha256.Size {
|
|
||||||
return nil, fmt.Errorf("go-jose/go-jose: invalid SHA-256 thumbprint (must be %d bytes, not %d)", sha256.Size, x5tSHA256Len)
|
|
||||||
}
|
|
||||||
raw.X5tSHA256 = base64.RawURLEncoding.EncodeToString(k.CertificateThumbprintSHA256)
|
|
||||||
}
|
|
||||||
|
|
||||||
// If cert chain is attached (as opposed to being behind a URL), check the
|
|
||||||
// keys thumbprints to make sure they match what is expected. This is to
|
|
||||||
// ensure we don't accidentally produce a JWK with semantically inconsistent
|
|
||||||
// data in the headers.
|
|
||||||
if len(k.Certificates) > 0 {
|
|
||||||
expectedSHA1 := sha1.Sum(k.Certificates[0].Raw)
|
|
||||||
expectedSHA256 := sha256.Sum256(k.Certificates[0].Raw)
|
|
||||||
|
|
||||||
if len(k.CertificateThumbprintSHA1) > 0 && !bytes.Equal(k.CertificateThumbprintSHA1, expectedSHA1[:]) {
|
|
||||||
return nil, errors.New("go-jose/go-jose: invalid SHA-1 thumbprint, does not match cert chain")
|
|
||||||
}
|
|
||||||
if len(k.CertificateThumbprintSHA256) > 0 && !bytes.Equal(k.CertificateThumbprintSHA256, expectedSHA256[:]) {
|
|
||||||
return nil, errors.New("go-jose/go-jose: invalid or SHA-256 thumbprint, does not match cert chain")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if k.CertificatesURL != nil {
|
|
||||||
raw.X5u = k.CertificatesURL.String()
|
|
||||||
}
|
|
||||||
|
|
||||||
return json.Marshal(raw)
|
|
||||||
}
|
|
||||||
|
|
||||||
// UnmarshalJSON reads a key from its JSON representation.
|
|
||||||
func (k *JSONWebKey) UnmarshalJSON(data []byte) (err error) {
|
|
||||||
var raw rawJSONWebKey
|
|
||||||
err = json.Unmarshal(data, &raw)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
certs, err := parseCertificateChain(raw.X5c)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("go-jose/go-jose: failed to unmarshal x5c field: %s", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
var key interface{}
|
|
||||||
var certPub interface{}
|
|
||||||
var keyPub interface{}
|
|
||||||
|
|
||||||
if len(certs) > 0 {
|
|
||||||
// We need to check that leaf public key matches the key embedded in this
|
|
||||||
// JWK, as required by the standard (see RFC 7517, Section 4.7). Otherwise
|
|
||||||
// the JWK parsed could be semantically invalid. Technically, should also
|
|
||||||
// check key usage fields and other extensions on the cert here, but the
|
|
||||||
// standard doesn't exactly explain how they're supposed to map from the
|
|
||||||
// JWK representation to the X.509 extensions.
|
|
||||||
certPub = certs[0].PublicKey
|
|
||||||
}
|
|
||||||
|
|
||||||
switch raw.Kty {
|
|
||||||
case "EC":
|
|
||||||
if raw.D != nil {
|
|
||||||
key, err = raw.ecPrivateKey()
|
|
||||||
if err == nil {
|
|
||||||
keyPub = key.(*ecdsa.PrivateKey).Public()
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
key, err = raw.ecPublicKey()
|
|
||||||
keyPub = key
|
|
||||||
}
|
|
||||||
case "RSA":
|
|
||||||
if raw.D != nil {
|
|
||||||
key, err = raw.rsaPrivateKey()
|
|
||||||
if err == nil {
|
|
||||||
keyPub = key.(*rsa.PrivateKey).Public()
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
key, err = raw.rsaPublicKey()
|
|
||||||
keyPub = key
|
|
||||||
}
|
|
||||||
case "oct":
|
|
||||||
if certPub != nil {
|
|
||||||
return errors.New("go-jose/go-jose: invalid JWK, found 'oct' (symmetric) key with cert chain")
|
|
||||||
}
|
|
||||||
key, err = raw.symmetricKey()
|
|
||||||
case "OKP":
|
|
||||||
if raw.Crv == "Ed25519" && raw.X != nil {
|
|
||||||
if raw.D != nil {
|
|
||||||
key, err = raw.edPrivateKey()
|
|
||||||
if err == nil {
|
|
||||||
keyPub = key.(ed25519.PrivateKey).Public()
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
key, err = raw.edPublicKey()
|
|
||||||
keyPub = key
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
err = fmt.Errorf("go-jose/go-jose: unknown curve %s'", raw.Crv)
|
|
||||||
}
|
|
||||||
default:
|
|
||||||
err = fmt.Errorf("go-jose/go-jose: unknown json web key type '%s'", raw.Kty)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if certPub != nil && keyPub != nil {
|
|
||||||
if !reflect.DeepEqual(certPub, keyPub) {
|
|
||||||
return errors.New("go-jose/go-jose: invalid JWK, public keys in key and x5c fields do not match")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
*k = JSONWebKey{Key: key, KeyID: raw.Kid, Algorithm: raw.Alg, Use: raw.Use, Certificates: certs}
|
|
||||||
|
|
||||||
if raw.X5u != "" {
|
|
||||||
k.CertificatesURL, err = url.Parse(raw.X5u)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("go-jose/go-jose: invalid JWK, x5u header is invalid URL: %w", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// x5t parameters are base64url-encoded SHA thumbprints
|
|
||||||
// See RFC 7517, Section 4.8, https://tools.ietf.org/html/rfc7517#section-4.8
|
|
||||||
x5tSHA1bytes, err := base64URLDecode(raw.X5tSHA1)
|
|
||||||
if err != nil {
|
|
||||||
return errors.New("go-jose/go-jose: invalid JWK, x5t header has invalid encoding")
|
|
||||||
}
|
|
||||||
|
|
||||||
// RFC 7517, Section 4.8 is ambiguous as to whether the digest output should be byte or hex,
|
|
||||||
// for this reason, after base64 decoding, if the size is sha1.Size it's likely that the value is a byte encoded
|
|
||||||
// checksum so we skip this. Otherwise if the checksum was hex encoded we expect a 40 byte sized array so we'll
|
|
||||||
// try to hex decode it. When Marshalling this value we'll always use a base64 encoded version of byte format checksum.
|
|
||||||
if len(x5tSHA1bytes) == 2*sha1.Size {
|
|
||||||
hx, err := hex.DecodeString(string(x5tSHA1bytes))
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("go-jose/go-jose: invalid JWK, unable to hex decode x5t: %v", err)
|
|
||||||
|
|
||||||
}
|
|
||||||
x5tSHA1bytes = hx
|
|
||||||
}
|
|
||||||
|
|
||||||
k.CertificateThumbprintSHA1 = x5tSHA1bytes
|
|
||||||
|
|
||||||
x5tSHA256bytes, err := base64URLDecode(raw.X5tSHA256)
|
|
||||||
if err != nil {
|
|
||||||
return errors.New("go-jose/go-jose: invalid JWK, x5t#S256 header has invalid encoding")
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(x5tSHA256bytes) == 2*sha256.Size {
|
|
||||||
hx256, err := hex.DecodeString(string(x5tSHA256bytes))
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("go-jose/go-jose: invalid JWK, unable to hex decode x5t#S256: %v", err)
|
|
||||||
}
|
|
||||||
x5tSHA256bytes = hx256
|
|
||||||
}
|
|
||||||
|
|
||||||
k.CertificateThumbprintSHA256 = x5tSHA256bytes
|
|
||||||
|
|
||||||
x5tSHA1Len := len(k.CertificateThumbprintSHA1)
|
|
||||||
x5tSHA256Len := len(k.CertificateThumbprintSHA256)
|
|
||||||
if x5tSHA1Len > 0 && x5tSHA1Len != sha1.Size {
|
|
||||||
return errors.New("go-jose/go-jose: invalid JWK, x5t header is of incorrect size")
|
|
||||||
}
|
|
||||||
if x5tSHA256Len > 0 && x5tSHA256Len != sha256.Size {
|
|
||||||
return errors.New("go-jose/go-jose: invalid JWK, x5t#S256 header is of incorrect size")
|
|
||||||
}
|
|
||||||
|
|
||||||
// If certificate chain *and* thumbprints are set, verify correctness.
|
|
||||||
if len(k.Certificates) > 0 {
|
|
||||||
leaf := k.Certificates[0]
|
|
||||||
sha1sum := sha1.Sum(leaf.Raw)
|
|
||||||
sha256sum := sha256.Sum256(leaf.Raw)
|
|
||||||
|
|
||||||
if len(k.CertificateThumbprintSHA1) > 0 && !bytes.Equal(sha1sum[:], k.CertificateThumbprintSHA1) {
|
|
||||||
return errors.New("go-jose/go-jose: invalid JWK, x5c thumbprint does not match x5t value")
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(k.CertificateThumbprintSHA256) > 0 && !bytes.Equal(sha256sum[:], k.CertificateThumbprintSHA256) {
|
|
||||||
return errors.New("go-jose/go-jose: invalid JWK, x5c thumbprint does not match x5t#S256 value")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// JSONWebKeySet represents a JWK Set object.
|
|
||||||
type JSONWebKeySet struct {
|
|
||||||
Keys []JSONWebKey `json:"keys"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// Key convenience method returns keys by key ID. Specification states
|
|
||||||
// that a JWK Set "SHOULD" use distinct key IDs, but allows for some
|
|
||||||
// cases where they are not distinct. Hence method returns a slice
|
|
||||||
// of JSONWebKeys.
|
|
||||||
func (s *JSONWebKeySet) Key(kid string) []JSONWebKey {
|
|
||||||
var keys []JSONWebKey
|
|
||||||
for _, key := range s.Keys {
|
|
||||||
if key.KeyID == kid {
|
|
||||||
keys = append(keys, key)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return keys
|
|
||||||
}
|
|
||||||
|
|
||||||
const rsaThumbprintTemplate = `{"e":"%s","kty":"RSA","n":"%s"}`
|
|
||||||
const ecThumbprintTemplate = `{"crv":"%s","kty":"EC","x":"%s","y":"%s"}`
|
|
||||||
const edThumbprintTemplate = `{"crv":"%s","kty":"OKP","x":"%s"}`
|
|
||||||
|
|
||||||
func ecThumbprintInput(curve elliptic.Curve, x, y *big.Int) (string, error) {
|
|
||||||
coordLength := curveSize(curve)
|
|
||||||
crv, err := curveName(curve)
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(x.Bytes()) > coordLength || len(y.Bytes()) > coordLength {
|
|
||||||
return "", errors.New("go-jose/go-jose: invalid elliptic key (too large)")
|
|
||||||
}
|
|
||||||
|
|
||||||
return fmt.Sprintf(ecThumbprintTemplate, crv,
|
|
||||||
newFixedSizeBuffer(x.Bytes(), coordLength).base64(),
|
|
||||||
newFixedSizeBuffer(y.Bytes(), coordLength).base64()), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func rsaThumbprintInput(n *big.Int, e int) (string, error) {
|
|
||||||
return fmt.Sprintf(rsaThumbprintTemplate,
|
|
||||||
newBufferFromInt(uint64(e)).base64(),
|
|
||||||
newBuffer(n.Bytes()).base64()), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func edThumbprintInput(ed ed25519.PublicKey) (string, error) {
|
|
||||||
crv := "Ed25519"
|
|
||||||
if len(ed) > 32 {
|
|
||||||
return "", errors.New("go-jose/go-jose: invalid elliptic key (too large)")
|
|
||||||
}
|
|
||||||
return fmt.Sprintf(edThumbprintTemplate, crv,
|
|
||||||
newFixedSizeBuffer(ed, 32).base64()), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Thumbprint computes the JWK Thumbprint of a key using the
|
|
||||||
// indicated hash algorithm.
|
|
||||||
func (k *JSONWebKey) Thumbprint(hash crypto.Hash) ([]byte, error) {
|
|
||||||
var input string
|
|
||||||
var err error
|
|
||||||
switch key := k.Key.(type) {
|
|
||||||
case ed25519.PublicKey:
|
|
||||||
input, err = edThumbprintInput(key)
|
|
||||||
case *ecdsa.PublicKey:
|
|
||||||
input, err = ecThumbprintInput(key.Curve, key.X, key.Y)
|
|
||||||
case *ecdsa.PrivateKey:
|
|
||||||
input, err = ecThumbprintInput(key.Curve, key.X, key.Y)
|
|
||||||
case *rsa.PublicKey:
|
|
||||||
input, err = rsaThumbprintInput(key.N, key.E)
|
|
||||||
case *rsa.PrivateKey:
|
|
||||||
input, err = rsaThumbprintInput(key.N, key.E)
|
|
||||||
case ed25519.PrivateKey:
|
|
||||||
input, err = edThumbprintInput(ed25519.PublicKey(key[32:]))
|
|
||||||
default:
|
|
||||||
return nil, fmt.Errorf("go-jose/go-jose: unknown key type '%s'", reflect.TypeOf(key))
|
|
||||||
}
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
h := hash.New()
|
|
||||||
_, _ = h.Write([]byte(input))
|
|
||||||
return h.Sum(nil), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// IsPublic returns true if the JWK represents a public key (not symmetric, not private).
|
|
||||||
func (k *JSONWebKey) IsPublic() bool {
|
|
||||||
switch k.Key.(type) {
|
|
||||||
case *ecdsa.PublicKey, *rsa.PublicKey, ed25519.PublicKey:
|
|
||||||
return true
|
|
||||||
default:
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Public creates JSONWebKey with corresponding public key if JWK represents asymmetric private key.
|
|
||||||
func (k *JSONWebKey) Public() JSONWebKey {
|
|
||||||
if k.IsPublic() {
|
|
||||||
return *k
|
|
||||||
}
|
|
||||||
ret := *k
|
|
||||||
switch key := k.Key.(type) {
|
|
||||||
case *ecdsa.PrivateKey:
|
|
||||||
ret.Key = key.Public()
|
|
||||||
case *rsa.PrivateKey:
|
|
||||||
ret.Key = key.Public()
|
|
||||||
case ed25519.PrivateKey:
|
|
||||||
ret.Key = key.Public()
|
|
||||||
default:
|
|
||||||
return JSONWebKey{} // returning invalid key
|
|
||||||
}
|
|
||||||
return ret
|
|
||||||
}
|
|
||||||
|
|
||||||
// Valid checks that the key contains the expected parameters.
|
|
||||||
func (k *JSONWebKey) Valid() bool {
|
|
||||||
if k.Key == nil {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
switch key := k.Key.(type) {
|
|
||||||
case *ecdsa.PublicKey:
|
|
||||||
if key.Curve == nil || key.X == nil || key.Y == nil {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
case *ecdsa.PrivateKey:
|
|
||||||
if key.Curve == nil || key.X == nil || key.Y == nil || key.D == nil {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
case *rsa.PublicKey:
|
|
||||||
if key.N == nil || key.E == 0 {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
case *rsa.PrivateKey:
|
|
||||||
if key.N == nil || key.E == 0 || key.D == nil || len(key.Primes) < 2 {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
case ed25519.PublicKey:
|
|
||||||
if len(key) != 32 {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
case ed25519.PrivateKey:
|
|
||||||
if len(key) != 64 {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
default:
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
func (key rawJSONWebKey) rsaPublicKey() (*rsa.PublicKey, error) {
|
|
||||||
if key.N == nil || key.E == nil {
|
|
||||||
return nil, fmt.Errorf("go-jose/go-jose: invalid RSA key, missing n/e values")
|
|
||||||
}
|
|
||||||
|
|
||||||
return &rsa.PublicKey{
|
|
||||||
N: key.N.bigInt(),
|
|
||||||
E: key.E.toInt(),
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func fromEdPublicKey(pub ed25519.PublicKey) *rawJSONWebKey {
|
|
||||||
return &rawJSONWebKey{
|
|
||||||
Kty: "OKP",
|
|
||||||
Crv: "Ed25519",
|
|
||||||
X: newBuffer(pub),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func fromRsaPublicKey(pub *rsa.PublicKey) *rawJSONWebKey {
|
|
||||||
return &rawJSONWebKey{
|
|
||||||
Kty: "RSA",
|
|
||||||
N: newBuffer(pub.N.Bytes()),
|
|
||||||
E: newBufferFromInt(uint64(pub.E)),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (key rawJSONWebKey) ecPublicKey() (*ecdsa.PublicKey, error) {
|
|
||||||
var curve elliptic.Curve
|
|
||||||
switch key.Crv {
|
|
||||||
case "P-256":
|
|
||||||
curve = elliptic.P256()
|
|
||||||
case "P-384":
|
|
||||||
curve = elliptic.P384()
|
|
||||||
case "P-521":
|
|
||||||
curve = elliptic.P521()
|
|
||||||
default:
|
|
||||||
return nil, fmt.Errorf("go-jose/go-jose: unsupported elliptic curve '%s'", key.Crv)
|
|
||||||
}
|
|
||||||
|
|
||||||
if key.X == nil || key.Y == nil {
|
|
||||||
return nil, errors.New("go-jose/go-jose: invalid EC key, missing x/y values")
|
|
||||||
}
|
|
||||||
|
|
||||||
// The length of this octet string MUST be the full size of a coordinate for
|
|
||||||
// the curve specified in the "crv" parameter.
|
|
||||||
// https://tools.ietf.org/html/rfc7518#section-6.2.1.2
|
|
||||||
if curveSize(curve) != len(key.X.data) {
|
|
||||||
return nil, fmt.Errorf("go-jose/go-jose: invalid EC public key, wrong length for x")
|
|
||||||
}
|
|
||||||
|
|
||||||
if curveSize(curve) != len(key.Y.data) {
|
|
||||||
return nil, fmt.Errorf("go-jose/go-jose: invalid EC public key, wrong length for y")
|
|
||||||
}
|
|
||||||
|
|
||||||
x := key.X.bigInt()
|
|
||||||
y := key.Y.bigInt()
|
|
||||||
|
|
||||||
if !curve.IsOnCurve(x, y) {
|
|
||||||
return nil, errors.New("go-jose/go-jose: invalid EC key, X/Y are not on declared curve")
|
|
||||||
}
|
|
||||||
|
|
||||||
return &ecdsa.PublicKey{
|
|
||||||
Curve: curve,
|
|
||||||
X: x,
|
|
||||||
Y: y,
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func fromEcPublicKey(pub *ecdsa.PublicKey) (*rawJSONWebKey, error) {
|
|
||||||
if pub == nil || pub.X == nil || pub.Y == nil {
|
|
||||||
return nil, fmt.Errorf("go-jose/go-jose: invalid EC key (nil, or X/Y missing)")
|
|
||||||
}
|
|
||||||
|
|
||||||
name, err := curveName(pub.Curve)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
size := curveSize(pub.Curve)
|
|
||||||
|
|
||||||
xBytes := pub.X.Bytes()
|
|
||||||
yBytes := pub.Y.Bytes()
|
|
||||||
|
|
||||||
if len(xBytes) > size || len(yBytes) > size {
|
|
||||||
return nil, fmt.Errorf("go-jose/go-jose: invalid EC key (X/Y too large)")
|
|
||||||
}
|
|
||||||
|
|
||||||
key := &rawJSONWebKey{
|
|
||||||
Kty: "EC",
|
|
||||||
Crv: name,
|
|
||||||
X: newFixedSizeBuffer(xBytes, size),
|
|
||||||
Y: newFixedSizeBuffer(yBytes, size),
|
|
||||||
}
|
|
||||||
|
|
||||||
return key, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (key rawJSONWebKey) edPrivateKey() (ed25519.PrivateKey, error) {
|
|
||||||
var missing []string
|
|
||||||
switch {
|
|
||||||
case key.D == nil:
|
|
||||||
missing = append(missing, "D")
|
|
||||||
case key.X == nil:
|
|
||||||
missing = append(missing, "X")
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(missing) > 0 {
|
|
||||||
return nil, fmt.Errorf("go-jose/go-jose: invalid Ed25519 private key, missing %s value(s)", strings.Join(missing, ", "))
|
|
||||||
}
|
|
||||||
|
|
||||||
privateKey := make([]byte, ed25519.PrivateKeySize)
|
|
||||||
copy(privateKey[0:32], key.D.bytes())
|
|
||||||
copy(privateKey[32:], key.X.bytes())
|
|
||||||
rv := ed25519.PrivateKey(privateKey)
|
|
||||||
return rv, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (key rawJSONWebKey) edPublicKey() (ed25519.PublicKey, error) {
|
|
||||||
if key.X == nil {
|
|
||||||
return nil, fmt.Errorf("go-jose/go-jose: invalid Ed key, missing x value")
|
|
||||||
}
|
|
||||||
publicKey := make([]byte, ed25519.PublicKeySize)
|
|
||||||
copy(publicKey[0:32], key.X.bytes())
|
|
||||||
rv := ed25519.PublicKey(publicKey)
|
|
||||||
return rv, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (key rawJSONWebKey) rsaPrivateKey() (*rsa.PrivateKey, error) {
|
|
||||||
var missing []string
|
|
||||||
switch {
|
|
||||||
case key.N == nil:
|
|
||||||
missing = append(missing, "N")
|
|
||||||
case key.E == nil:
|
|
||||||
missing = append(missing, "E")
|
|
||||||
case key.D == nil:
|
|
||||||
missing = append(missing, "D")
|
|
||||||
case key.P == nil:
|
|
||||||
missing = append(missing, "P")
|
|
||||||
case key.Q == nil:
|
|
||||||
missing = append(missing, "Q")
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(missing) > 0 {
|
|
||||||
return nil, fmt.Errorf("go-jose/go-jose: invalid RSA private key, missing %s value(s)", strings.Join(missing, ", "))
|
|
||||||
}
|
|
||||||
|
|
||||||
rv := &rsa.PrivateKey{
|
|
||||||
PublicKey: rsa.PublicKey{
|
|
||||||
N: key.N.bigInt(),
|
|
||||||
E: key.E.toInt(),
|
|
||||||
},
|
|
||||||
D: key.D.bigInt(),
|
|
||||||
Primes: []*big.Int{
|
|
||||||
key.P.bigInt(),
|
|
||||||
key.Q.bigInt(),
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
if key.Dp != nil {
|
|
||||||
rv.Precomputed.Dp = key.Dp.bigInt()
|
|
||||||
}
|
|
||||||
if key.Dq != nil {
|
|
||||||
rv.Precomputed.Dq = key.Dq.bigInt()
|
|
||||||
}
|
|
||||||
if key.Qi != nil {
|
|
||||||
rv.Precomputed.Qinv = key.Qi.bigInt()
|
|
||||||
}
|
|
||||||
|
|
||||||
err := rv.Validate()
|
|
||||||
return rv, err
|
|
||||||
}
|
|
||||||
|
|
||||||
func fromEdPrivateKey(ed ed25519.PrivateKey) (*rawJSONWebKey, error) {
|
|
||||||
raw := fromEdPublicKey(ed25519.PublicKey(ed[32:]))
|
|
||||||
|
|
||||||
raw.D = newBuffer(ed[0:32])
|
|
||||||
return raw, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func fromRsaPrivateKey(rsa *rsa.PrivateKey) (*rawJSONWebKey, error) {
|
|
||||||
if len(rsa.Primes) != 2 {
|
|
||||||
return nil, ErrUnsupportedKeyType
|
|
||||||
}
|
|
||||||
|
|
||||||
raw := fromRsaPublicKey(&rsa.PublicKey)
|
|
||||||
|
|
||||||
raw.D = newBuffer(rsa.D.Bytes())
|
|
||||||
raw.P = newBuffer(rsa.Primes[0].Bytes())
|
|
||||||
raw.Q = newBuffer(rsa.Primes[1].Bytes())
|
|
||||||
|
|
||||||
if rsa.Precomputed.Dp != nil {
|
|
||||||
raw.Dp = newBuffer(rsa.Precomputed.Dp.Bytes())
|
|
||||||
}
|
|
||||||
if rsa.Precomputed.Dq != nil {
|
|
||||||
raw.Dq = newBuffer(rsa.Precomputed.Dq.Bytes())
|
|
||||||
}
|
|
||||||
if rsa.Precomputed.Qinv != nil {
|
|
||||||
raw.Qi = newBuffer(rsa.Precomputed.Qinv.Bytes())
|
|
||||||
}
|
|
||||||
|
|
||||||
return raw, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (key rawJSONWebKey) ecPrivateKey() (*ecdsa.PrivateKey, error) {
|
|
||||||
var curve elliptic.Curve
|
|
||||||
switch key.Crv {
|
|
||||||
case "P-256":
|
|
||||||
curve = elliptic.P256()
|
|
||||||
case "P-384":
|
|
||||||
curve = elliptic.P384()
|
|
||||||
case "P-521":
|
|
||||||
curve = elliptic.P521()
|
|
||||||
default:
|
|
||||||
return nil, fmt.Errorf("go-jose/go-jose: unsupported elliptic curve '%s'", key.Crv)
|
|
||||||
}
|
|
||||||
|
|
||||||
if key.X == nil || key.Y == nil || key.D == nil {
|
|
||||||
return nil, fmt.Errorf("go-jose/go-jose: invalid EC private key, missing x/y/d values")
|
|
||||||
}
|
|
||||||
|
|
||||||
// The length of this octet string MUST be the full size of a coordinate for
|
|
||||||
// the curve specified in the "crv" parameter.
|
|
||||||
// https://tools.ietf.org/html/rfc7518#section-6.2.1.2
|
|
||||||
if curveSize(curve) != len(key.X.data) {
|
|
||||||
return nil, fmt.Errorf("go-jose/go-jose: invalid EC private key, wrong length for x")
|
|
||||||
}
|
|
||||||
|
|
||||||
if curveSize(curve) != len(key.Y.data) {
|
|
||||||
return nil, fmt.Errorf("go-jose/go-jose: invalid EC private key, wrong length for y")
|
|
||||||
}
|
|
||||||
|
|
||||||
// https://tools.ietf.org/html/rfc7518#section-6.2.2.1
|
|
||||||
if dSize(curve) != len(key.D.data) {
|
|
||||||
return nil, fmt.Errorf("go-jose/go-jose: invalid EC private key, wrong length for d")
|
|
||||||
}
|
|
||||||
|
|
||||||
x := key.X.bigInt()
|
|
||||||
y := key.Y.bigInt()
|
|
||||||
|
|
||||||
if !curve.IsOnCurve(x, y) {
|
|
||||||
return nil, errors.New("go-jose/go-jose: invalid EC key, X/Y are not on declared curve")
|
|
||||||
}
|
|
||||||
|
|
||||||
return &ecdsa.PrivateKey{
|
|
||||||
PublicKey: ecdsa.PublicKey{
|
|
||||||
Curve: curve,
|
|
||||||
X: x,
|
|
||||||
Y: y,
|
|
||||||
},
|
|
||||||
D: key.D.bigInt(),
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func fromEcPrivateKey(ec *ecdsa.PrivateKey) (*rawJSONWebKey, error) {
|
|
||||||
raw, err := fromEcPublicKey(&ec.PublicKey)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
if ec.D == nil {
|
|
||||||
return nil, fmt.Errorf("go-jose/go-jose: invalid EC private key")
|
|
||||||
}
|
|
||||||
|
|
||||||
raw.D = newFixedSizeBuffer(ec.D.Bytes(), dSize(ec.PublicKey.Curve))
|
|
||||||
|
|
||||||
return raw, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// dSize returns the size in octets for the "d" member of an elliptic curve
|
|
||||||
// private key.
|
|
||||||
// The length of this octet string MUST be ceiling(log-base-2(n)/8)
|
|
||||||
// octets (where n is the order of the curve).
|
|
||||||
// https://tools.ietf.org/html/rfc7518#section-6.2.2.1
|
|
||||||
func dSize(curve elliptic.Curve) int {
|
|
||||||
order := curve.Params().P
|
|
||||||
bitLen := order.BitLen()
|
|
||||||
size := bitLen / 8
|
|
||||||
if bitLen%8 != 0 {
|
|
||||||
size++
|
|
||||||
}
|
|
||||||
return size
|
|
||||||
}
|
|
||||||
|
|
||||||
func fromSymmetricKey(key []byte) (*rawJSONWebKey, error) {
|
|
||||||
return &rawJSONWebKey{
|
|
||||||
Kty: "oct",
|
|
||||||
K: newBuffer(key),
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (key rawJSONWebKey) symmetricKey() ([]byte, error) {
|
|
||||||
if key.K == nil {
|
|
||||||
return nil, fmt.Errorf("go-jose/go-jose: invalid OCT (symmetric) key, missing k value")
|
|
||||||
}
|
|
||||||
return key.K.bytes(), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func tryJWKS(key interface{}, headers ...Header) interface{} {
|
|
||||||
var jwks JSONWebKeySet
|
|
||||||
|
|
||||||
switch jwksType := key.(type) {
|
|
||||||
case *JSONWebKeySet:
|
|
||||||
jwks = *jwksType
|
|
||||||
case JSONWebKeySet:
|
|
||||||
jwks = jwksType
|
|
||||||
default:
|
|
||||||
return key
|
|
||||||
}
|
|
||||||
|
|
||||||
var kid string
|
|
||||||
for _, header := range headers {
|
|
||||||
if header.KeyID != "" {
|
|
||||||
kid = header.KeyID
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if kid == "" {
|
|
||||||
return key
|
|
||||||
}
|
|
||||||
|
|
||||||
keys := jwks.Key(kid)
|
|
||||||
if len(keys) == 0 {
|
|
||||||
return key
|
|
||||||
}
|
|
||||||
|
|
||||||
return keys[0].Key
|
|
||||||
}
|
|
366
vendor/github.com/go-jose/go-jose/v3/jws.go
generated
vendored
366
vendor/github.com/go-jose/go-jose/v3/jws.go
generated
vendored
@ -1,366 +0,0 @@
|
|||||||
/*-
|
|
||||||
* Copyright 2014 Square Inc.
|
|
||||||
*
|
|
||||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
* you may not use this file except in compliance with the License.
|
|
||||||
* You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing, software
|
|
||||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
* See the License for the specific language governing permissions and
|
|
||||||
* limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package jose
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"encoding/base64"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
"github.com/go-jose/go-jose/v3/json"
|
|
||||||
)
|
|
||||||
|
|
||||||
// rawJSONWebSignature represents a raw JWS JSON object. Used for parsing/serializing.
|
|
||||||
type rawJSONWebSignature struct {
|
|
||||||
Payload *byteBuffer `json:"payload,omitempty"`
|
|
||||||
Signatures []rawSignatureInfo `json:"signatures,omitempty"`
|
|
||||||
Protected *byteBuffer `json:"protected,omitempty"`
|
|
||||||
Header *rawHeader `json:"header,omitempty"`
|
|
||||||
Signature *byteBuffer `json:"signature,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// rawSignatureInfo represents a single JWS signature over the JWS payload and protected header.
|
|
||||||
type rawSignatureInfo struct {
|
|
||||||
Protected *byteBuffer `json:"protected,omitempty"`
|
|
||||||
Header *rawHeader `json:"header,omitempty"`
|
|
||||||
Signature *byteBuffer `json:"signature,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// JSONWebSignature represents a signed JWS object after parsing.
|
|
||||||
type JSONWebSignature struct {
|
|
||||||
payload []byte
|
|
||||||
// Signatures attached to this object (may be more than one for multi-sig).
|
|
||||||
// Be careful about accessing these directly, prefer to use Verify() or
|
|
||||||
// VerifyMulti() to ensure that the data you're getting is verified.
|
|
||||||
Signatures []Signature
|
|
||||||
}
|
|
||||||
|
|
||||||
// Signature represents a single signature over the JWS payload and protected header.
|
|
||||||
type Signature struct {
|
|
||||||
// Merged header fields. Contains both protected and unprotected header
|
|
||||||
// values. Prefer using Protected and Unprotected fields instead of this.
|
|
||||||
// Values in this header may or may not have been signed and in general
|
|
||||||
// should not be trusted.
|
|
||||||
Header Header
|
|
||||||
|
|
||||||
// Protected header. Values in this header were signed and
|
|
||||||
// will be verified as part of the signature verification process.
|
|
||||||
Protected Header
|
|
||||||
|
|
||||||
// Unprotected header. Values in this header were not signed
|
|
||||||
// and in general should not be trusted.
|
|
||||||
Unprotected Header
|
|
||||||
|
|
||||||
// The actual signature value
|
|
||||||
Signature []byte
|
|
||||||
|
|
||||||
protected *rawHeader
|
|
||||||
header *rawHeader
|
|
||||||
original *rawSignatureInfo
|
|
||||||
}
|
|
||||||
|
|
||||||
// ParseSigned parses a signed message in compact or JWS JSON Serialization format.
|
|
||||||
func ParseSigned(signature string) (*JSONWebSignature, error) {
|
|
||||||
signature = stripWhitespace(signature)
|
|
||||||
if strings.HasPrefix(signature, "{") {
|
|
||||||
return parseSignedFull(signature)
|
|
||||||
}
|
|
||||||
|
|
||||||
return parseSignedCompact(signature, nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ParseDetached parses a signed message in compact serialization format with detached payload.
|
|
||||||
func ParseDetached(signature string, payload []byte) (*JSONWebSignature, error) {
|
|
||||||
if payload == nil {
|
|
||||||
return nil, errors.New("go-jose/go-jose: nil payload")
|
|
||||||
}
|
|
||||||
return parseSignedCompact(stripWhitespace(signature), payload)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get a header value
|
|
||||||
func (sig Signature) mergedHeaders() rawHeader {
|
|
||||||
out := rawHeader{}
|
|
||||||
out.merge(sig.protected)
|
|
||||||
out.merge(sig.header)
|
|
||||||
return out
|
|
||||||
}
|
|
||||||
|
|
||||||
// Compute data to be signed
|
|
||||||
func (obj JSONWebSignature) computeAuthData(payload []byte, signature *Signature) ([]byte, error) {
|
|
||||||
var authData bytes.Buffer
|
|
||||||
|
|
||||||
protectedHeader := new(rawHeader)
|
|
||||||
|
|
||||||
if signature.original != nil && signature.original.Protected != nil {
|
|
||||||
if err := json.Unmarshal(signature.original.Protected.bytes(), protectedHeader); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
authData.WriteString(signature.original.Protected.base64())
|
|
||||||
} else if signature.protected != nil {
|
|
||||||
protectedHeader = signature.protected
|
|
||||||
authData.WriteString(base64.RawURLEncoding.EncodeToString(mustSerializeJSON(protectedHeader)))
|
|
||||||
}
|
|
||||||
|
|
||||||
needsBase64 := true
|
|
||||||
|
|
||||||
if protectedHeader != nil {
|
|
||||||
var err error
|
|
||||||
if needsBase64, err = protectedHeader.getB64(); err != nil {
|
|
||||||
needsBase64 = true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
authData.WriteByte('.')
|
|
||||||
|
|
||||||
if needsBase64 {
|
|
||||||
authData.WriteString(base64.RawURLEncoding.EncodeToString(payload))
|
|
||||||
} else {
|
|
||||||
authData.Write(payload)
|
|
||||||
}
|
|
||||||
|
|
||||||
return authData.Bytes(), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// parseSignedFull parses a message in full format.
|
|
||||||
func parseSignedFull(input string) (*JSONWebSignature, error) {
|
|
||||||
var parsed rawJSONWebSignature
|
|
||||||
err := json.Unmarshal([]byte(input), &parsed)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return parsed.sanitized()
|
|
||||||
}
|
|
||||||
|
|
||||||
// sanitized produces a cleaned-up JWS object from the raw JSON.
|
|
||||||
func (parsed *rawJSONWebSignature) sanitized() (*JSONWebSignature, error) {
|
|
||||||
if parsed.Payload == nil {
|
|
||||||
return nil, fmt.Errorf("go-jose/go-jose: missing payload in JWS message")
|
|
||||||
}
|
|
||||||
|
|
||||||
obj := &JSONWebSignature{
|
|
||||||
payload: parsed.Payload.bytes(),
|
|
||||||
Signatures: make([]Signature, len(parsed.Signatures)),
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(parsed.Signatures) == 0 {
|
|
||||||
// No signatures array, must be flattened serialization
|
|
||||||
signature := Signature{}
|
|
||||||
if parsed.Protected != nil && len(parsed.Protected.bytes()) > 0 {
|
|
||||||
signature.protected = &rawHeader{}
|
|
||||||
err := json.Unmarshal(parsed.Protected.bytes(), signature.protected)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check that there is not a nonce in the unprotected header
|
|
||||||
if parsed.Header != nil && parsed.Header.getNonce() != "" {
|
|
||||||
return nil, ErrUnprotectedNonce
|
|
||||||
}
|
|
||||||
|
|
||||||
signature.header = parsed.Header
|
|
||||||
signature.Signature = parsed.Signature.bytes()
|
|
||||||
// Make a fake "original" rawSignatureInfo to store the unprocessed
|
|
||||||
// Protected header. This is necessary because the Protected header can
|
|
||||||
// contain arbitrary fields not registered as part of the spec. See
|
|
||||||
// https://tools.ietf.org/html/draft-ietf-jose-json-web-signature-41#section-4
|
|
||||||
// If we unmarshal Protected into a rawHeader with its explicit list of fields,
|
|
||||||
// we cannot marshal losslessly. So we have to keep around the original bytes.
|
|
||||||
// This is used in computeAuthData, which will first attempt to use
|
|
||||||
// the original bytes of a protected header, and fall back on marshaling the
|
|
||||||
// header struct only if those bytes are not available.
|
|
||||||
signature.original = &rawSignatureInfo{
|
|
||||||
Protected: parsed.Protected,
|
|
||||||
Header: parsed.Header,
|
|
||||||
Signature: parsed.Signature,
|
|
||||||
}
|
|
||||||
|
|
||||||
var err error
|
|
||||||
signature.Header, err = signature.mergedHeaders().sanitized()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
if signature.header != nil {
|
|
||||||
signature.Unprotected, err = signature.header.sanitized()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if signature.protected != nil {
|
|
||||||
signature.Protected, err = signature.protected.sanitized()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// As per RFC 7515 Section 4.1.3, only public keys are allowed to be embedded.
|
|
||||||
jwk := signature.Header.JSONWebKey
|
|
||||||
if jwk != nil && (!jwk.Valid() || !jwk.IsPublic()) {
|
|
||||||
return nil, errors.New("go-jose/go-jose: invalid embedded jwk, must be public key")
|
|
||||||
}
|
|
||||||
|
|
||||||
obj.Signatures = append(obj.Signatures, signature)
|
|
||||||
}
|
|
||||||
|
|
||||||
for i, sig := range parsed.Signatures {
|
|
||||||
if sig.Protected != nil && len(sig.Protected.bytes()) > 0 {
|
|
||||||
obj.Signatures[i].protected = &rawHeader{}
|
|
||||||
err := json.Unmarshal(sig.Protected.bytes(), obj.Signatures[i].protected)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check that there is not a nonce in the unprotected header
|
|
||||||
if sig.Header != nil && sig.Header.getNonce() != "" {
|
|
||||||
return nil, ErrUnprotectedNonce
|
|
||||||
}
|
|
||||||
|
|
||||||
var err error
|
|
||||||
obj.Signatures[i].Header, err = obj.Signatures[i].mergedHeaders().sanitized()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
if obj.Signatures[i].header != nil {
|
|
||||||
obj.Signatures[i].Unprotected, err = obj.Signatures[i].header.sanitized()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if obj.Signatures[i].protected != nil {
|
|
||||||
obj.Signatures[i].Protected, err = obj.Signatures[i].protected.sanitized()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
obj.Signatures[i].Signature = sig.Signature.bytes()
|
|
||||||
|
|
||||||
// As per RFC 7515 Section 4.1.3, only public keys are allowed to be embedded.
|
|
||||||
jwk := obj.Signatures[i].Header.JSONWebKey
|
|
||||||
if jwk != nil && (!jwk.Valid() || !jwk.IsPublic()) {
|
|
||||||
return nil, errors.New("go-jose/go-jose: invalid embedded jwk, must be public key")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Copy value of sig
|
|
||||||
original := sig
|
|
||||||
|
|
||||||
obj.Signatures[i].header = sig.Header
|
|
||||||
obj.Signatures[i].original = &original
|
|
||||||
}
|
|
||||||
|
|
||||||
return obj, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// parseSignedCompact parses a message in compact format.
|
|
||||||
func parseSignedCompact(input string, payload []byte) (*JSONWebSignature, error) {
|
|
||||||
parts := strings.Split(input, ".")
|
|
||||||
if len(parts) != 3 {
|
|
||||||
return nil, fmt.Errorf("go-jose/go-jose: compact JWS format must have three parts")
|
|
||||||
}
|
|
||||||
|
|
||||||
if parts[1] != "" && payload != nil {
|
|
||||||
return nil, fmt.Errorf("go-jose/go-jose: payload is not detached")
|
|
||||||
}
|
|
||||||
|
|
||||||
rawProtected, err := base64URLDecode(parts[0])
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
if payload == nil {
|
|
||||||
payload, err = base64URLDecode(parts[1])
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
signature, err := base64URLDecode(parts[2])
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
raw := &rawJSONWebSignature{
|
|
||||||
Payload: newBuffer(payload),
|
|
||||||
Protected: newBuffer(rawProtected),
|
|
||||||
Signature: newBuffer(signature),
|
|
||||||
}
|
|
||||||
return raw.sanitized()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (obj JSONWebSignature) compactSerialize(detached bool) (string, error) {
|
|
||||||
if len(obj.Signatures) != 1 || obj.Signatures[0].header != nil || obj.Signatures[0].protected == nil {
|
|
||||||
return "", ErrNotSupported
|
|
||||||
}
|
|
||||||
|
|
||||||
serializedProtected := base64.RawURLEncoding.EncodeToString(mustSerializeJSON(obj.Signatures[0].protected))
|
|
||||||
payload := ""
|
|
||||||
signature := base64.RawURLEncoding.EncodeToString(obj.Signatures[0].Signature)
|
|
||||||
|
|
||||||
if !detached {
|
|
||||||
payload = base64.RawURLEncoding.EncodeToString(obj.payload)
|
|
||||||
}
|
|
||||||
|
|
||||||
return fmt.Sprintf("%s.%s.%s", serializedProtected, payload, signature), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// CompactSerialize serializes an object using the compact serialization format.
|
|
||||||
func (obj JSONWebSignature) CompactSerialize() (string, error) {
|
|
||||||
return obj.compactSerialize(false)
|
|
||||||
}
|
|
||||||
|
|
||||||
// DetachedCompactSerialize serializes an object using the compact serialization format with detached payload.
|
|
||||||
func (obj JSONWebSignature) DetachedCompactSerialize() (string, error) {
|
|
||||||
return obj.compactSerialize(true)
|
|
||||||
}
|
|
||||||
|
|
||||||
// FullSerialize serializes an object using the full JSON serialization format.
|
|
||||||
func (obj JSONWebSignature) FullSerialize() string {
|
|
||||||
raw := rawJSONWebSignature{
|
|
||||||
Payload: newBuffer(obj.payload),
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(obj.Signatures) == 1 {
|
|
||||||
if obj.Signatures[0].protected != nil {
|
|
||||||
serializedProtected := mustSerializeJSON(obj.Signatures[0].protected)
|
|
||||||
raw.Protected = newBuffer(serializedProtected)
|
|
||||||
}
|
|
||||||
raw.Header = obj.Signatures[0].header
|
|
||||||
raw.Signature = newBuffer(obj.Signatures[0].Signature)
|
|
||||||
} else {
|
|
||||||
raw.Signatures = make([]rawSignatureInfo, len(obj.Signatures))
|
|
||||||
for i, signature := range obj.Signatures {
|
|
||||||
raw.Signatures[i] = rawSignatureInfo{
|
|
||||||
Header: signature.header,
|
|
||||||
Signature: newBuffer(signature.Signature),
|
|
||||||
}
|
|
||||||
|
|
||||||
if signature.protected != nil {
|
|
||||||
raw.Signatures[i].Protected = newBuffer(mustSerializeJSON(signature.protected))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return string(mustSerializeJSON(raw))
|
|
||||||
}
|
|
144
vendor/github.com/go-jose/go-jose/v3/opaque.go
generated
vendored
144
vendor/github.com/go-jose/go-jose/v3/opaque.go
generated
vendored
@ -1,144 +0,0 @@
|
|||||||
/*-
|
|
||||||
* Copyright 2018 Square Inc.
|
|
||||||
*
|
|
||||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
* you may not use this file except in compliance with the License.
|
|
||||||
* You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing, software
|
|
||||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
* See the License for the specific language governing permissions and
|
|
||||||
* limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package jose
|
|
||||||
|
|
||||||
// OpaqueSigner is an interface that supports signing payloads with opaque
|
|
||||||
// private key(s). Private key operations performed by implementers may, for
|
|
||||||
// example, occur in a hardware module. An OpaqueSigner may rotate signing keys
|
|
||||||
// transparently to the user of this interface.
|
|
||||||
type OpaqueSigner interface {
|
|
||||||
// Public returns the public key of the current signing key.
|
|
||||||
Public() *JSONWebKey
|
|
||||||
// Algs returns a list of supported signing algorithms.
|
|
||||||
Algs() []SignatureAlgorithm
|
|
||||||
// SignPayload signs a payload with the current signing key using the given
|
|
||||||
// algorithm.
|
|
||||||
SignPayload(payload []byte, alg SignatureAlgorithm) ([]byte, error)
|
|
||||||
}
|
|
||||||
|
|
||||||
type opaqueSigner struct {
|
|
||||||
signer OpaqueSigner
|
|
||||||
}
|
|
||||||
|
|
||||||
func newOpaqueSigner(alg SignatureAlgorithm, signer OpaqueSigner) (recipientSigInfo, error) {
|
|
||||||
var algSupported bool
|
|
||||||
for _, salg := range signer.Algs() {
|
|
||||||
if alg == salg {
|
|
||||||
algSupported = true
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if !algSupported {
|
|
||||||
return recipientSigInfo{}, ErrUnsupportedAlgorithm
|
|
||||||
}
|
|
||||||
|
|
||||||
return recipientSigInfo{
|
|
||||||
sigAlg: alg,
|
|
||||||
publicKey: signer.Public,
|
|
||||||
signer: &opaqueSigner{
|
|
||||||
signer: signer,
|
|
||||||
},
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (o *opaqueSigner) signPayload(payload []byte, alg SignatureAlgorithm) (Signature, error) {
|
|
||||||
out, err := o.signer.SignPayload(payload, alg)
|
|
||||||
if err != nil {
|
|
||||||
return Signature{}, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return Signature{
|
|
||||||
Signature: out,
|
|
||||||
protected: &rawHeader{},
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// OpaqueVerifier is an interface that supports verifying payloads with opaque
|
|
||||||
// public key(s). An OpaqueSigner may rotate signing keys transparently to the
|
|
||||||
// user of this interface.
|
|
||||||
type OpaqueVerifier interface {
|
|
||||||
VerifyPayload(payload []byte, signature []byte, alg SignatureAlgorithm) error
|
|
||||||
}
|
|
||||||
|
|
||||||
type opaqueVerifier struct {
|
|
||||||
verifier OpaqueVerifier
|
|
||||||
}
|
|
||||||
|
|
||||||
func (o *opaqueVerifier) verifyPayload(payload []byte, signature []byte, alg SignatureAlgorithm) error {
|
|
||||||
return o.verifier.VerifyPayload(payload, signature, alg)
|
|
||||||
}
|
|
||||||
|
|
||||||
// OpaqueKeyEncrypter is an interface that supports encrypting keys with an opaque key.
|
|
||||||
type OpaqueKeyEncrypter interface {
|
|
||||||
// KeyID returns the kid
|
|
||||||
KeyID() string
|
|
||||||
// Algs returns a list of supported key encryption algorithms.
|
|
||||||
Algs() []KeyAlgorithm
|
|
||||||
// encryptKey encrypts the CEK using the given algorithm.
|
|
||||||
encryptKey(cek []byte, alg KeyAlgorithm) (recipientInfo, error)
|
|
||||||
}
|
|
||||||
|
|
||||||
type opaqueKeyEncrypter struct {
|
|
||||||
encrypter OpaqueKeyEncrypter
|
|
||||||
}
|
|
||||||
|
|
||||||
func newOpaqueKeyEncrypter(alg KeyAlgorithm, encrypter OpaqueKeyEncrypter) (recipientKeyInfo, error) {
|
|
||||||
var algSupported bool
|
|
||||||
for _, salg := range encrypter.Algs() {
|
|
||||||
if alg == salg {
|
|
||||||
algSupported = true
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if !algSupported {
|
|
||||||
return recipientKeyInfo{}, ErrUnsupportedAlgorithm
|
|
||||||
}
|
|
||||||
|
|
||||||
return recipientKeyInfo{
|
|
||||||
keyID: encrypter.KeyID(),
|
|
||||||
keyAlg: alg,
|
|
||||||
keyEncrypter: &opaqueKeyEncrypter{
|
|
||||||
encrypter: encrypter,
|
|
||||||
},
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (oke *opaqueKeyEncrypter) encryptKey(cek []byte, alg KeyAlgorithm) (recipientInfo, error) {
|
|
||||||
return oke.encrypter.encryptKey(cek, alg)
|
|
||||||
}
|
|
||||||
|
|
||||||
//OpaqueKeyDecrypter is an interface that supports decrypting keys with an opaque key.
|
|
||||||
type OpaqueKeyDecrypter interface {
|
|
||||||
DecryptKey(encryptedKey []byte, header Header) ([]byte, error)
|
|
||||||
}
|
|
||||||
|
|
||||||
type opaqueKeyDecrypter struct {
|
|
||||||
decrypter OpaqueKeyDecrypter
|
|
||||||
}
|
|
||||||
|
|
||||||
func (okd *opaqueKeyDecrypter) decryptKey(headers rawHeader, recipient *recipientInfo, generator keyGenerator) ([]byte, error) {
|
|
||||||
mergedHeaders := rawHeader{}
|
|
||||||
mergedHeaders.merge(&headers)
|
|
||||||
mergedHeaders.merge(recipient.header)
|
|
||||||
|
|
||||||
header, err := mergedHeaders.sanitized()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return okd.decrypter.DecryptKey(recipient.encryptedKey, header)
|
|
||||||
}
|
|
520
vendor/github.com/go-jose/go-jose/v3/shared.go
generated
vendored
520
vendor/github.com/go-jose/go-jose/v3/shared.go
generated
vendored
@ -1,520 +0,0 @@
|
|||||||
/*-
|
|
||||||
* Copyright 2014 Square Inc.
|
|
||||||
*
|
|
||||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
* you may not use this file except in compliance with the License.
|
|
||||||
* You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing, software
|
|
||||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
* See the License for the specific language governing permissions and
|
|
||||||
* limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package jose
|
|
||||||
|
|
||||||
import (
|
|
||||||
"crypto/elliptic"
|
|
||||||
"crypto/x509"
|
|
||||||
"encoding/base64"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
|
|
||||||
"github.com/go-jose/go-jose/v3/json"
|
|
||||||
)
|
|
||||||
|
|
||||||
// KeyAlgorithm represents a key management algorithm.
|
|
||||||
type KeyAlgorithm string
|
|
||||||
|
|
||||||
// SignatureAlgorithm represents a signature (or MAC) algorithm.
|
|
||||||
type SignatureAlgorithm string
|
|
||||||
|
|
||||||
// ContentEncryption represents a content encryption algorithm.
|
|
||||||
type ContentEncryption string
|
|
||||||
|
|
||||||
// CompressionAlgorithm represents an algorithm used for plaintext compression.
|
|
||||||
type CompressionAlgorithm string
|
|
||||||
|
|
||||||
// ContentType represents type of the contained data.
|
|
||||||
type ContentType string
|
|
||||||
|
|
||||||
var (
|
|
||||||
// ErrCryptoFailure represents an error in cryptographic primitive. This
|
|
||||||
// occurs when, for example, a message had an invalid authentication tag or
|
|
||||||
// could not be decrypted.
|
|
||||||
ErrCryptoFailure = errors.New("go-jose/go-jose: error in cryptographic primitive")
|
|
||||||
|
|
||||||
// ErrUnsupportedAlgorithm indicates that a selected algorithm is not
|
|
||||||
// supported. This occurs when trying to instantiate an encrypter for an
|
|
||||||
// algorithm that is not yet implemented.
|
|
||||||
ErrUnsupportedAlgorithm = errors.New("go-jose/go-jose: unknown/unsupported algorithm")
|
|
||||||
|
|
||||||
// ErrUnsupportedKeyType indicates that the given key type/format is not
|
|
||||||
// supported. This occurs when trying to instantiate an encrypter and passing
|
|
||||||
// it a key of an unrecognized type or with unsupported parameters, such as
|
|
||||||
// an RSA private key with more than two primes.
|
|
||||||
ErrUnsupportedKeyType = errors.New("go-jose/go-jose: unsupported key type/format")
|
|
||||||
|
|
||||||
// ErrInvalidKeySize indicates that the given key is not the correct size
|
|
||||||
// for the selected algorithm. This can occur, for example, when trying to
|
|
||||||
// encrypt with AES-256 but passing only a 128-bit key as input.
|
|
||||||
ErrInvalidKeySize = errors.New("go-jose/go-jose: invalid key size for algorithm")
|
|
||||||
|
|
||||||
// ErrNotSupported serialization of object is not supported. This occurs when
|
|
||||||
// trying to compact-serialize an object which can't be represented in
|
|
||||||
// compact form.
|
|
||||||
ErrNotSupported = errors.New("go-jose/go-jose: compact serialization not supported for object")
|
|
||||||
|
|
||||||
// ErrUnprotectedNonce indicates that while parsing a JWS or JWE object, a
|
|
||||||
// nonce header parameter was included in an unprotected header object.
|
|
||||||
ErrUnprotectedNonce = errors.New("go-jose/go-jose: Nonce parameter included in unprotected header")
|
|
||||||
)
|
|
||||||
|
|
||||||
// Key management algorithms
|
|
||||||
const (
|
|
||||||
ED25519 = KeyAlgorithm("ED25519")
|
|
||||||
RSA1_5 = KeyAlgorithm("RSA1_5") // RSA-PKCS1v1.5
|
|
||||||
RSA_OAEP = KeyAlgorithm("RSA-OAEP") // RSA-OAEP-SHA1
|
|
||||||
RSA_OAEP_256 = KeyAlgorithm("RSA-OAEP-256") // RSA-OAEP-SHA256
|
|
||||||
A128KW = KeyAlgorithm("A128KW") // AES key wrap (128)
|
|
||||||
A192KW = KeyAlgorithm("A192KW") // AES key wrap (192)
|
|
||||||
A256KW = KeyAlgorithm("A256KW") // AES key wrap (256)
|
|
||||||
DIRECT = KeyAlgorithm("dir") // Direct encryption
|
|
||||||
ECDH_ES = KeyAlgorithm("ECDH-ES") // ECDH-ES
|
|
||||||
ECDH_ES_A128KW = KeyAlgorithm("ECDH-ES+A128KW") // ECDH-ES + AES key wrap (128)
|
|
||||||
ECDH_ES_A192KW = KeyAlgorithm("ECDH-ES+A192KW") // ECDH-ES + AES key wrap (192)
|
|
||||||
ECDH_ES_A256KW = KeyAlgorithm("ECDH-ES+A256KW") // ECDH-ES + AES key wrap (256)
|
|
||||||
A128GCMKW = KeyAlgorithm("A128GCMKW") // AES-GCM key wrap (128)
|
|
||||||
A192GCMKW = KeyAlgorithm("A192GCMKW") // AES-GCM key wrap (192)
|
|
||||||
A256GCMKW = KeyAlgorithm("A256GCMKW") // AES-GCM key wrap (256)
|
|
||||||
PBES2_HS256_A128KW = KeyAlgorithm("PBES2-HS256+A128KW") // PBES2 + HMAC-SHA256 + AES key wrap (128)
|
|
||||||
PBES2_HS384_A192KW = KeyAlgorithm("PBES2-HS384+A192KW") // PBES2 + HMAC-SHA384 + AES key wrap (192)
|
|
||||||
PBES2_HS512_A256KW = KeyAlgorithm("PBES2-HS512+A256KW") // PBES2 + HMAC-SHA512 + AES key wrap (256)
|
|
||||||
)
|
|
||||||
|
|
||||||
// Signature algorithms
|
|
||||||
const (
|
|
||||||
EdDSA = SignatureAlgorithm("EdDSA")
|
|
||||||
HS256 = SignatureAlgorithm("HS256") // HMAC using SHA-256
|
|
||||||
HS384 = SignatureAlgorithm("HS384") // HMAC using SHA-384
|
|
||||||
HS512 = SignatureAlgorithm("HS512") // HMAC using SHA-512
|
|
||||||
RS256 = SignatureAlgorithm("RS256") // RSASSA-PKCS-v1.5 using SHA-256
|
|
||||||
RS384 = SignatureAlgorithm("RS384") // RSASSA-PKCS-v1.5 using SHA-384
|
|
||||||
RS512 = SignatureAlgorithm("RS512") // RSASSA-PKCS-v1.5 using SHA-512
|
|
||||||
ES256 = SignatureAlgorithm("ES256") // ECDSA using P-256 and SHA-256
|
|
||||||
ES384 = SignatureAlgorithm("ES384") // ECDSA using P-384 and SHA-384
|
|
||||||
ES512 = SignatureAlgorithm("ES512") // ECDSA using P-521 and SHA-512
|
|
||||||
PS256 = SignatureAlgorithm("PS256") // RSASSA-PSS using SHA256 and MGF1-SHA256
|
|
||||||
PS384 = SignatureAlgorithm("PS384") // RSASSA-PSS using SHA384 and MGF1-SHA384
|
|
||||||
PS512 = SignatureAlgorithm("PS512") // RSASSA-PSS using SHA512 and MGF1-SHA512
|
|
||||||
)
|
|
||||||
|
|
||||||
// Content encryption algorithms
|
|
||||||
const (
|
|
||||||
A128CBC_HS256 = ContentEncryption("A128CBC-HS256") // AES-CBC + HMAC-SHA256 (128)
|
|
||||||
A192CBC_HS384 = ContentEncryption("A192CBC-HS384") // AES-CBC + HMAC-SHA384 (192)
|
|
||||||
A256CBC_HS512 = ContentEncryption("A256CBC-HS512") // AES-CBC + HMAC-SHA512 (256)
|
|
||||||
A128GCM = ContentEncryption("A128GCM") // AES-GCM (128)
|
|
||||||
A192GCM = ContentEncryption("A192GCM") // AES-GCM (192)
|
|
||||||
A256GCM = ContentEncryption("A256GCM") // AES-GCM (256)
|
|
||||||
)
|
|
||||||
|
|
||||||
// Compression algorithms
|
|
||||||
const (
|
|
||||||
NONE = CompressionAlgorithm("") // No compression
|
|
||||||
DEFLATE = CompressionAlgorithm("DEF") // DEFLATE (RFC 1951)
|
|
||||||
)
|
|
||||||
|
|
||||||
// A key in the protected header of a JWS object. Use of the Header...
|
|
||||||
// constants is preferred to enhance type safety.
|
|
||||||
type HeaderKey string
|
|
||||||
|
|
||||||
const (
|
|
||||||
HeaderType = "typ" // string
|
|
||||||
HeaderContentType = "cty" // string
|
|
||||||
|
|
||||||
// These are set by go-jose and shouldn't need to be set by consumers of the
|
|
||||||
// library.
|
|
||||||
headerAlgorithm = "alg" // string
|
|
||||||
headerEncryption = "enc" // ContentEncryption
|
|
||||||
headerCompression = "zip" // CompressionAlgorithm
|
|
||||||
headerCritical = "crit" // []string
|
|
||||||
|
|
||||||
headerAPU = "apu" // *byteBuffer
|
|
||||||
headerAPV = "apv" // *byteBuffer
|
|
||||||
headerEPK = "epk" // *JSONWebKey
|
|
||||||
headerIV = "iv" // *byteBuffer
|
|
||||||
headerTag = "tag" // *byteBuffer
|
|
||||||
headerX5c = "x5c" // []*x509.Certificate
|
|
||||||
|
|
||||||
headerJWK = "jwk" // *JSONWebKey
|
|
||||||
headerKeyID = "kid" // string
|
|
||||||
headerNonce = "nonce" // string
|
|
||||||
headerB64 = "b64" // bool
|
|
||||||
|
|
||||||
headerP2C = "p2c" // *byteBuffer (int)
|
|
||||||
headerP2S = "p2s" // *byteBuffer ([]byte)
|
|
||||||
|
|
||||||
)
|
|
||||||
|
|
||||||
// supportedCritical is the set of supported extensions that are understood and processed.
|
|
||||||
var supportedCritical = map[string]bool{
|
|
||||||
headerB64: true,
|
|
||||||
}
|
|
||||||
|
|
||||||
// rawHeader represents the JOSE header for JWE/JWS objects (used for parsing).
|
|
||||||
//
|
|
||||||
// The decoding of the constituent items is deferred because we want to marshal
|
|
||||||
// some members into particular structs rather than generic maps, but at the
|
|
||||||
// same time we need to receive any extra fields unhandled by this library to
|
|
||||||
// pass through to consuming code in case it wants to examine them.
|
|
||||||
type rawHeader map[HeaderKey]*json.RawMessage
|
|
||||||
|
|
||||||
// Header represents the read-only JOSE header for JWE/JWS objects.
|
|
||||||
type Header struct {
|
|
||||||
KeyID string
|
|
||||||
JSONWebKey *JSONWebKey
|
|
||||||
Algorithm string
|
|
||||||
Nonce string
|
|
||||||
|
|
||||||
// Unverified certificate chain parsed from x5c header.
|
|
||||||
certificates []*x509.Certificate
|
|
||||||
|
|
||||||
// Any headers not recognised above get unmarshalled
|
|
||||||
// from JSON in a generic manner and placed in this map.
|
|
||||||
ExtraHeaders map[HeaderKey]interface{}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Certificates verifies & returns the certificate chain present
|
|
||||||
// in the x5c header field of a message, if one was present. Returns
|
|
||||||
// an error if there was no x5c header present or the chain could
|
|
||||||
// not be validated with the given verify options.
|
|
||||||
func (h Header) Certificates(opts x509.VerifyOptions) ([][]*x509.Certificate, error) {
|
|
||||||
if len(h.certificates) == 0 {
|
|
||||||
return nil, errors.New("go-jose/go-jose: no x5c header present in message")
|
|
||||||
}
|
|
||||||
|
|
||||||
leaf := h.certificates[0]
|
|
||||||
if opts.Intermediates == nil {
|
|
||||||
opts.Intermediates = x509.NewCertPool()
|
|
||||||
for _, intermediate := range h.certificates[1:] {
|
|
||||||
opts.Intermediates.AddCert(intermediate)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return leaf.Verify(opts)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (parsed rawHeader) set(k HeaderKey, v interface{}) error {
|
|
||||||
b, err := json.Marshal(v)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
parsed[k] = makeRawMessage(b)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// getString gets a string from the raw JSON, defaulting to "".
|
|
||||||
func (parsed rawHeader) getString(k HeaderKey) string {
|
|
||||||
v, ok := parsed[k]
|
|
||||||
if !ok || v == nil {
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
var s string
|
|
||||||
err := json.Unmarshal(*v, &s)
|
|
||||||
if err != nil {
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
|
|
||||||
// getByteBuffer gets a byte buffer from the raw JSON. Returns (nil, nil) if
|
|
||||||
// not specified.
|
|
||||||
func (parsed rawHeader) getByteBuffer(k HeaderKey) (*byteBuffer, error) {
|
|
||||||
v := parsed[k]
|
|
||||||
if v == nil {
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
var bb *byteBuffer
|
|
||||||
err := json.Unmarshal(*v, &bb)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return bb, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// getAlgorithm extracts parsed "alg" from the raw JSON as a KeyAlgorithm.
|
|
||||||
func (parsed rawHeader) getAlgorithm() KeyAlgorithm {
|
|
||||||
return KeyAlgorithm(parsed.getString(headerAlgorithm))
|
|
||||||
}
|
|
||||||
|
|
||||||
// getSignatureAlgorithm extracts parsed "alg" from the raw JSON as a SignatureAlgorithm.
|
|
||||||
func (parsed rawHeader) getSignatureAlgorithm() SignatureAlgorithm {
|
|
||||||
return SignatureAlgorithm(parsed.getString(headerAlgorithm))
|
|
||||||
}
|
|
||||||
|
|
||||||
// getEncryption extracts parsed "enc" from the raw JSON.
|
|
||||||
func (parsed rawHeader) getEncryption() ContentEncryption {
|
|
||||||
return ContentEncryption(parsed.getString(headerEncryption))
|
|
||||||
}
|
|
||||||
|
|
||||||
// getCompression extracts parsed "zip" from the raw JSON.
|
|
||||||
func (parsed rawHeader) getCompression() CompressionAlgorithm {
|
|
||||||
return CompressionAlgorithm(parsed.getString(headerCompression))
|
|
||||||
}
|
|
||||||
|
|
||||||
func (parsed rawHeader) getNonce() string {
|
|
||||||
return parsed.getString(headerNonce)
|
|
||||||
}
|
|
||||||
|
|
||||||
// getEPK extracts parsed "epk" from the raw JSON.
|
|
||||||
func (parsed rawHeader) getEPK() (*JSONWebKey, error) {
|
|
||||||
v := parsed[headerEPK]
|
|
||||||
if v == nil {
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
var epk *JSONWebKey
|
|
||||||
err := json.Unmarshal(*v, &epk)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return epk, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// getAPU extracts parsed "apu" from the raw JSON.
|
|
||||||
func (parsed rawHeader) getAPU() (*byteBuffer, error) {
|
|
||||||
return parsed.getByteBuffer(headerAPU)
|
|
||||||
}
|
|
||||||
|
|
||||||
// getAPV extracts parsed "apv" from the raw JSON.
|
|
||||||
func (parsed rawHeader) getAPV() (*byteBuffer, error) {
|
|
||||||
return parsed.getByteBuffer(headerAPV)
|
|
||||||
}
|
|
||||||
|
|
||||||
// getIV extracts parsed "iv" from the raw JSON.
|
|
||||||
func (parsed rawHeader) getIV() (*byteBuffer, error) {
|
|
||||||
return parsed.getByteBuffer(headerIV)
|
|
||||||
}
|
|
||||||
|
|
||||||
// getTag extracts parsed "tag" from the raw JSON.
|
|
||||||
func (parsed rawHeader) getTag() (*byteBuffer, error) {
|
|
||||||
return parsed.getByteBuffer(headerTag)
|
|
||||||
}
|
|
||||||
|
|
||||||
// getJWK extracts parsed "jwk" from the raw JSON.
|
|
||||||
func (parsed rawHeader) getJWK() (*JSONWebKey, error) {
|
|
||||||
v := parsed[headerJWK]
|
|
||||||
if v == nil {
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
var jwk *JSONWebKey
|
|
||||||
err := json.Unmarshal(*v, &jwk)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return jwk, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// getCritical extracts parsed "crit" from the raw JSON. If omitted, it
|
|
||||||
// returns an empty slice.
|
|
||||||
func (parsed rawHeader) getCritical() ([]string, error) {
|
|
||||||
v := parsed[headerCritical]
|
|
||||||
if v == nil {
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
var q []string
|
|
||||||
err := json.Unmarshal(*v, &q)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return q, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// getS2C extracts parsed "p2c" from the raw JSON.
|
|
||||||
func (parsed rawHeader) getP2C() (int, error) {
|
|
||||||
v := parsed[headerP2C]
|
|
||||||
if v == nil {
|
|
||||||
return 0, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
var p2c int
|
|
||||||
err := json.Unmarshal(*v, &p2c)
|
|
||||||
if err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
return p2c, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// getS2S extracts parsed "p2s" from the raw JSON.
|
|
||||||
func (parsed rawHeader) getP2S() (*byteBuffer, error) {
|
|
||||||
return parsed.getByteBuffer(headerP2S)
|
|
||||||
}
|
|
||||||
|
|
||||||
// getB64 extracts parsed "b64" from the raw JSON, defaulting to true.
|
|
||||||
func (parsed rawHeader) getB64() (bool, error) {
|
|
||||||
v := parsed[headerB64]
|
|
||||||
if v == nil {
|
|
||||||
return true, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
var b64 bool
|
|
||||||
err := json.Unmarshal(*v, &b64)
|
|
||||||
if err != nil {
|
|
||||||
return true, err
|
|
||||||
}
|
|
||||||
return b64, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// sanitized produces a cleaned-up header object from the raw JSON.
|
|
||||||
func (parsed rawHeader) sanitized() (h Header, err error) {
|
|
||||||
for k, v := range parsed {
|
|
||||||
if v == nil {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
switch k {
|
|
||||||
case headerJWK:
|
|
||||||
var jwk *JSONWebKey
|
|
||||||
err = json.Unmarshal(*v, &jwk)
|
|
||||||
if err != nil {
|
|
||||||
err = fmt.Errorf("failed to unmarshal JWK: %v: %#v", err, string(*v))
|
|
||||||
return
|
|
||||||
}
|
|
||||||
h.JSONWebKey = jwk
|
|
||||||
case headerKeyID:
|
|
||||||
var s string
|
|
||||||
err = json.Unmarshal(*v, &s)
|
|
||||||
if err != nil {
|
|
||||||
err = fmt.Errorf("failed to unmarshal key ID: %v: %#v", err, string(*v))
|
|
||||||
return
|
|
||||||
}
|
|
||||||
h.KeyID = s
|
|
||||||
case headerAlgorithm:
|
|
||||||
var s string
|
|
||||||
err = json.Unmarshal(*v, &s)
|
|
||||||
if err != nil {
|
|
||||||
err = fmt.Errorf("failed to unmarshal algorithm: %v: %#v", err, string(*v))
|
|
||||||
return
|
|
||||||
}
|
|
||||||
h.Algorithm = s
|
|
||||||
case headerNonce:
|
|
||||||
var s string
|
|
||||||
err = json.Unmarshal(*v, &s)
|
|
||||||
if err != nil {
|
|
||||||
err = fmt.Errorf("failed to unmarshal nonce: %v: %#v", err, string(*v))
|
|
||||||
return
|
|
||||||
}
|
|
||||||
h.Nonce = s
|
|
||||||
case headerX5c:
|
|
||||||
c := []string{}
|
|
||||||
err = json.Unmarshal(*v, &c)
|
|
||||||
if err != nil {
|
|
||||||
err = fmt.Errorf("failed to unmarshal x5c header: %v: %#v", err, string(*v))
|
|
||||||
return
|
|
||||||
}
|
|
||||||
h.certificates, err = parseCertificateChain(c)
|
|
||||||
if err != nil {
|
|
||||||
err = fmt.Errorf("failed to unmarshal x5c header: %v: %#v", err, string(*v))
|
|
||||||
return
|
|
||||||
}
|
|
||||||
default:
|
|
||||||
if h.ExtraHeaders == nil {
|
|
||||||
h.ExtraHeaders = map[HeaderKey]interface{}{}
|
|
||||||
}
|
|
||||||
var v2 interface{}
|
|
||||||
err = json.Unmarshal(*v, &v2)
|
|
||||||
if err != nil {
|
|
||||||
err = fmt.Errorf("failed to unmarshal value: %v: %#v", err, string(*v))
|
|
||||||
return
|
|
||||||
}
|
|
||||||
h.ExtraHeaders[k] = v2
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
func parseCertificateChain(chain []string) ([]*x509.Certificate, error) {
|
|
||||||
out := make([]*x509.Certificate, len(chain))
|
|
||||||
for i, cert := range chain {
|
|
||||||
raw, err := base64.StdEncoding.DecodeString(cert)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
out[i], err = x509.ParseCertificate(raw)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return out, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (parsed rawHeader) isSet(k HeaderKey) bool {
|
|
||||||
dvr := parsed[k]
|
|
||||||
if dvr == nil {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
var dv interface{}
|
|
||||||
err := json.Unmarshal(*dvr, &dv)
|
|
||||||
if err != nil {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
if dvStr, ok := dv.(string); ok {
|
|
||||||
return dvStr != ""
|
|
||||||
}
|
|
||||||
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
// Merge headers from src into dst, giving precedence to headers from l.
|
|
||||||
func (parsed rawHeader) merge(src *rawHeader) {
|
|
||||||
if src == nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
for k, v := range *src {
|
|
||||||
if parsed.isSet(k) {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
parsed[k] = v
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get JOSE name of curve
|
|
||||||
func curveName(crv elliptic.Curve) (string, error) {
|
|
||||||
switch crv {
|
|
||||||
case elliptic.P256():
|
|
||||||
return "P-256", nil
|
|
||||||
case elliptic.P384():
|
|
||||||
return "P-384", nil
|
|
||||||
case elliptic.P521():
|
|
||||||
return "P-521", nil
|
|
||||||
default:
|
|
||||||
return "", fmt.Errorf("go-jose/go-jose: unsupported/unknown elliptic curve")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get size of curve in bytes
|
|
||||||
func curveSize(crv elliptic.Curve) int {
|
|
||||||
bits := crv.Params().BitSize
|
|
||||||
|
|
||||||
div := bits / 8
|
|
||||||
mod := bits % 8
|
|
||||||
|
|
||||||
if mod == 0 {
|
|
||||||
return div
|
|
||||||
}
|
|
||||||
|
|
||||||
return div + 1
|
|
||||||
}
|
|
||||||
|
|
||||||
func makeRawMessage(b []byte) *json.RawMessage {
|
|
||||||
rm := json.RawMessage(b)
|
|
||||||
return &rm
|
|
||||||
}
|
|
450
vendor/github.com/go-jose/go-jose/v3/signing.go
generated
vendored
450
vendor/github.com/go-jose/go-jose/v3/signing.go
generated
vendored
@ -1,450 +0,0 @@
|
|||||||
/*-
|
|
||||||
* Copyright 2014 Square Inc.
|
|
||||||
*
|
|
||||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
* you may not use this file except in compliance with the License.
|
|
||||||
* You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing, software
|
|
||||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
* See the License for the specific language governing permissions and
|
|
||||||
* limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package jose
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"crypto/ecdsa"
|
|
||||||
"crypto/ed25519"
|
|
||||||
"crypto/rsa"
|
|
||||||
"encoding/base64"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
|
|
||||||
"github.com/go-jose/go-jose/v3/json"
|
|
||||||
)
|
|
||||||
|
|
||||||
// NonceSource represents a source of random nonces to go into JWS objects
|
|
||||||
type NonceSource interface {
|
|
||||||
Nonce() (string, error)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Signer represents a signer which takes a payload and produces a signed JWS object.
|
|
||||||
type Signer interface {
|
|
||||||
Sign(payload []byte) (*JSONWebSignature, error)
|
|
||||||
Options() SignerOptions
|
|
||||||
}
|
|
||||||
|
|
||||||
// SigningKey represents an algorithm/key used to sign a message.
|
|
||||||
type SigningKey struct {
|
|
||||||
Algorithm SignatureAlgorithm
|
|
||||||
Key interface{}
|
|
||||||
}
|
|
||||||
|
|
||||||
// SignerOptions represents options that can be set when creating signers.
|
|
||||||
type SignerOptions struct {
|
|
||||||
NonceSource NonceSource
|
|
||||||
EmbedJWK bool
|
|
||||||
|
|
||||||
// Optional map of additional keys to be inserted into the protected header
|
|
||||||
// of a JWS object. Some specifications which make use of JWS like to insert
|
|
||||||
// additional values here. All values must be JSON-serializable.
|
|
||||||
ExtraHeaders map[HeaderKey]interface{}
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithHeader adds an arbitrary value to the ExtraHeaders map, initializing it
|
|
||||||
// if necessary. It returns itself and so can be used in a fluent style.
|
|
||||||
func (so *SignerOptions) WithHeader(k HeaderKey, v interface{}) *SignerOptions {
|
|
||||||
if so.ExtraHeaders == nil {
|
|
||||||
so.ExtraHeaders = map[HeaderKey]interface{}{}
|
|
||||||
}
|
|
||||||
so.ExtraHeaders[k] = v
|
|
||||||
return so
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithContentType adds a content type ("cty") header and returns the updated
|
|
||||||
// SignerOptions.
|
|
||||||
func (so *SignerOptions) WithContentType(contentType ContentType) *SignerOptions {
|
|
||||||
return so.WithHeader(HeaderContentType, contentType)
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithType adds a type ("typ") header and returns the updated SignerOptions.
|
|
||||||
func (so *SignerOptions) WithType(typ ContentType) *SignerOptions {
|
|
||||||
return so.WithHeader(HeaderType, typ)
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithCritical adds the given names to the critical ("crit") header and returns
|
|
||||||
// the updated SignerOptions.
|
|
||||||
func (so *SignerOptions) WithCritical(names ...string) *SignerOptions {
|
|
||||||
if so.ExtraHeaders[headerCritical] == nil {
|
|
||||||
so.WithHeader(headerCritical, make([]string, 0, len(names)))
|
|
||||||
}
|
|
||||||
crit := so.ExtraHeaders[headerCritical].([]string)
|
|
||||||
so.ExtraHeaders[headerCritical] = append(crit, names...)
|
|
||||||
return so
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithBase64 adds a base64url-encode payload ("b64") header and returns the updated
|
|
||||||
// SignerOptions. When the "b64" value is "false", the payload is not base64 encoded.
|
|
||||||
func (so *SignerOptions) WithBase64(b64 bool) *SignerOptions {
|
|
||||||
if !b64 {
|
|
||||||
so.WithHeader(headerB64, b64)
|
|
||||||
so.WithCritical(headerB64)
|
|
||||||
}
|
|
||||||
return so
|
|
||||||
}
|
|
||||||
|
|
||||||
type payloadSigner interface {
|
|
||||||
signPayload(payload []byte, alg SignatureAlgorithm) (Signature, error)
|
|
||||||
}
|
|
||||||
|
|
||||||
type payloadVerifier interface {
|
|
||||||
verifyPayload(payload []byte, signature []byte, alg SignatureAlgorithm) error
|
|
||||||
}
|
|
||||||
|
|
||||||
type genericSigner struct {
|
|
||||||
recipients []recipientSigInfo
|
|
||||||
nonceSource NonceSource
|
|
||||||
embedJWK bool
|
|
||||||
extraHeaders map[HeaderKey]interface{}
|
|
||||||
}
|
|
||||||
|
|
||||||
type recipientSigInfo struct {
|
|
||||||
sigAlg SignatureAlgorithm
|
|
||||||
publicKey func() *JSONWebKey
|
|
||||||
signer payloadSigner
|
|
||||||
}
|
|
||||||
|
|
||||||
func staticPublicKey(jwk *JSONWebKey) func() *JSONWebKey {
|
|
||||||
return func() *JSONWebKey {
|
|
||||||
return jwk
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewSigner creates an appropriate signer based on the key type
|
|
||||||
func NewSigner(sig SigningKey, opts *SignerOptions) (Signer, error) {
|
|
||||||
return NewMultiSigner([]SigningKey{sig}, opts)
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewMultiSigner creates a signer for multiple recipients
|
|
||||||
func NewMultiSigner(sigs []SigningKey, opts *SignerOptions) (Signer, error) {
|
|
||||||
signer := &genericSigner{recipients: []recipientSigInfo{}}
|
|
||||||
|
|
||||||
if opts != nil {
|
|
||||||
signer.nonceSource = opts.NonceSource
|
|
||||||
signer.embedJWK = opts.EmbedJWK
|
|
||||||
signer.extraHeaders = opts.ExtraHeaders
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, sig := range sigs {
|
|
||||||
err := signer.addRecipient(sig.Algorithm, sig.Key)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return signer, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// newVerifier creates a verifier based on the key type
|
|
||||||
func newVerifier(verificationKey interface{}) (payloadVerifier, error) {
|
|
||||||
switch verificationKey := verificationKey.(type) {
|
|
||||||
case ed25519.PublicKey:
|
|
||||||
return &edEncrypterVerifier{
|
|
||||||
publicKey: verificationKey,
|
|
||||||
}, nil
|
|
||||||
case *rsa.PublicKey:
|
|
||||||
return &rsaEncrypterVerifier{
|
|
||||||
publicKey: verificationKey,
|
|
||||||
}, nil
|
|
||||||
case *ecdsa.PublicKey:
|
|
||||||
return &ecEncrypterVerifier{
|
|
||||||
publicKey: verificationKey,
|
|
||||||
}, nil
|
|
||||||
case []byte:
|
|
||||||
return &symmetricMac{
|
|
||||||
key: verificationKey,
|
|
||||||
}, nil
|
|
||||||
case JSONWebKey:
|
|
||||||
return newVerifier(verificationKey.Key)
|
|
||||||
case *JSONWebKey:
|
|
||||||
return newVerifier(verificationKey.Key)
|
|
||||||
}
|
|
||||||
if ov, ok := verificationKey.(OpaqueVerifier); ok {
|
|
||||||
return &opaqueVerifier{verifier: ov}, nil
|
|
||||||
}
|
|
||||||
return nil, ErrUnsupportedKeyType
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ctx *genericSigner) addRecipient(alg SignatureAlgorithm, signingKey interface{}) error {
|
|
||||||
recipient, err := makeJWSRecipient(alg, signingKey)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
ctx.recipients = append(ctx.recipients, recipient)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func makeJWSRecipient(alg SignatureAlgorithm, signingKey interface{}) (recipientSigInfo, error) {
|
|
||||||
switch signingKey := signingKey.(type) {
|
|
||||||
case ed25519.PrivateKey:
|
|
||||||
return newEd25519Signer(alg, signingKey)
|
|
||||||
case *rsa.PrivateKey:
|
|
||||||
return newRSASigner(alg, signingKey)
|
|
||||||
case *ecdsa.PrivateKey:
|
|
||||||
return newECDSASigner(alg, signingKey)
|
|
||||||
case []byte:
|
|
||||||
return newSymmetricSigner(alg, signingKey)
|
|
||||||
case JSONWebKey:
|
|
||||||
return newJWKSigner(alg, signingKey)
|
|
||||||
case *JSONWebKey:
|
|
||||||
return newJWKSigner(alg, *signingKey)
|
|
||||||
}
|
|
||||||
if signer, ok := signingKey.(OpaqueSigner); ok {
|
|
||||||
return newOpaqueSigner(alg, signer)
|
|
||||||
}
|
|
||||||
return recipientSigInfo{}, ErrUnsupportedKeyType
|
|
||||||
}
|
|
||||||
|
|
||||||
func newJWKSigner(alg SignatureAlgorithm, signingKey JSONWebKey) (recipientSigInfo, error) {
|
|
||||||
recipient, err := makeJWSRecipient(alg, signingKey.Key)
|
|
||||||
if err != nil {
|
|
||||||
return recipientSigInfo{}, err
|
|
||||||
}
|
|
||||||
if recipient.publicKey != nil && recipient.publicKey() != nil {
|
|
||||||
// recipient.publicKey is a JWK synthesized for embedding when recipientSigInfo
|
|
||||||
// was created for the inner key (such as a RSA or ECDSA public key). It contains
|
|
||||||
// the pub key for embedding, but doesn't have extra params like key id.
|
|
||||||
publicKey := signingKey
|
|
||||||
publicKey.Key = recipient.publicKey().Key
|
|
||||||
recipient.publicKey = staticPublicKey(&publicKey)
|
|
||||||
|
|
||||||
// This should be impossible, but let's check anyway.
|
|
||||||
if !recipient.publicKey().IsPublic() {
|
|
||||||
return recipientSigInfo{}, errors.New("go-jose/go-jose: public key was unexpectedly not public")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return recipient, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ctx *genericSigner) Sign(payload []byte) (*JSONWebSignature, error) {
|
|
||||||
obj := &JSONWebSignature{}
|
|
||||||
obj.payload = payload
|
|
||||||
obj.Signatures = make([]Signature, len(ctx.recipients))
|
|
||||||
|
|
||||||
for i, recipient := range ctx.recipients {
|
|
||||||
protected := map[HeaderKey]interface{}{
|
|
||||||
headerAlgorithm: string(recipient.sigAlg),
|
|
||||||
}
|
|
||||||
|
|
||||||
if recipient.publicKey != nil && recipient.publicKey() != nil {
|
|
||||||
// We want to embed the JWK or set the kid header, but not both. Having a protected
|
|
||||||
// header that contains an embedded JWK while also simultaneously containing the kid
|
|
||||||
// header is confusing, and at least in ACME the two are considered to be mutually
|
|
||||||
// exclusive. The fact that both can exist at the same time is a somewhat unfortunate
|
|
||||||
// result of the JOSE spec. We've decided that this library will only include one or
|
|
||||||
// the other to avoid this confusion.
|
|
||||||
//
|
|
||||||
// See https://github.com/go-jose/go-jose/issues/157 for more context.
|
|
||||||
if ctx.embedJWK {
|
|
||||||
protected[headerJWK] = recipient.publicKey()
|
|
||||||
} else {
|
|
||||||
keyID := recipient.publicKey().KeyID
|
|
||||||
if keyID != "" {
|
|
||||||
protected[headerKeyID] = keyID
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if ctx.nonceSource != nil {
|
|
||||||
nonce, err := ctx.nonceSource.Nonce()
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("go-jose/go-jose: Error generating nonce: %v", err)
|
|
||||||
}
|
|
||||||
protected[headerNonce] = nonce
|
|
||||||
}
|
|
||||||
|
|
||||||
for k, v := range ctx.extraHeaders {
|
|
||||||
protected[k] = v
|
|
||||||
}
|
|
||||||
|
|
||||||
serializedProtected := mustSerializeJSON(protected)
|
|
||||||
needsBase64 := true
|
|
||||||
|
|
||||||
if b64, ok := protected[headerB64]; ok {
|
|
||||||
if needsBase64, ok = b64.(bool); !ok {
|
|
||||||
return nil, errors.New("go-jose/go-jose: Invalid b64 header parameter")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
var input bytes.Buffer
|
|
||||||
|
|
||||||
input.WriteString(base64.RawURLEncoding.EncodeToString(serializedProtected))
|
|
||||||
input.WriteByte('.')
|
|
||||||
|
|
||||||
if needsBase64 {
|
|
||||||
input.WriteString(base64.RawURLEncoding.EncodeToString(payload))
|
|
||||||
} else {
|
|
||||||
input.Write(payload)
|
|
||||||
}
|
|
||||||
|
|
||||||
signatureInfo, err := recipient.signer.signPayload(input.Bytes(), recipient.sigAlg)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
signatureInfo.protected = &rawHeader{}
|
|
||||||
for k, v := range protected {
|
|
||||||
b, err := json.Marshal(v)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("go-jose/go-jose: Error marshalling item %#v: %v", k, err)
|
|
||||||
}
|
|
||||||
(*signatureInfo.protected)[k] = makeRawMessage(b)
|
|
||||||
}
|
|
||||||
obj.Signatures[i] = signatureInfo
|
|
||||||
}
|
|
||||||
|
|
||||||
return obj, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ctx *genericSigner) Options() SignerOptions {
|
|
||||||
return SignerOptions{
|
|
||||||
NonceSource: ctx.nonceSource,
|
|
||||||
EmbedJWK: ctx.embedJWK,
|
|
||||||
ExtraHeaders: ctx.extraHeaders,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Verify validates the signature on the object and returns the payload.
|
|
||||||
// This function does not support multi-signature, if you desire multi-sig
|
|
||||||
// verification use VerifyMulti instead.
|
|
||||||
//
|
|
||||||
// Be careful when verifying signatures based on embedded JWKs inside the
|
|
||||||
// payload header. You cannot assume that the key received in a payload is
|
|
||||||
// trusted.
|
|
||||||
func (obj JSONWebSignature) Verify(verificationKey interface{}) ([]byte, error) {
|
|
||||||
err := obj.DetachedVerify(obj.payload, verificationKey)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return obj.payload, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// UnsafePayloadWithoutVerification returns the payload without
|
|
||||||
// verifying it. The content returned from this function cannot be
|
|
||||||
// trusted.
|
|
||||||
func (obj JSONWebSignature) UnsafePayloadWithoutVerification() []byte {
|
|
||||||
return obj.payload
|
|
||||||
}
|
|
||||||
|
|
||||||
// DetachedVerify validates a detached signature on the given payload. In
|
|
||||||
// most cases, you will probably want to use Verify instead. DetachedVerify
|
|
||||||
// is only useful if you have a payload and signature that are separated from
|
|
||||||
// each other.
|
|
||||||
func (obj JSONWebSignature) DetachedVerify(payload []byte, verificationKey interface{}) error {
|
|
||||||
key := tryJWKS(verificationKey, obj.headers()...)
|
|
||||||
verifier, err := newVerifier(key)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(obj.Signatures) > 1 {
|
|
||||||
return errors.New("go-jose/go-jose: too many signatures in payload; expecting only one")
|
|
||||||
}
|
|
||||||
|
|
||||||
signature := obj.Signatures[0]
|
|
||||||
headers := signature.mergedHeaders()
|
|
||||||
critical, err := headers.getCritical()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, name := range critical {
|
|
||||||
if !supportedCritical[name] {
|
|
||||||
return ErrCryptoFailure
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
input, err := obj.computeAuthData(payload, &signature)
|
|
||||||
if err != nil {
|
|
||||||
return ErrCryptoFailure
|
|
||||||
}
|
|
||||||
|
|
||||||
alg := headers.getSignatureAlgorithm()
|
|
||||||
err = verifier.verifyPayload(input, signature.Signature, alg)
|
|
||||||
if err == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
return ErrCryptoFailure
|
|
||||||
}
|
|
||||||
|
|
||||||
// VerifyMulti validates (one of the multiple) signatures on the object and
|
|
||||||
// returns the index of the signature that was verified, along with the signature
|
|
||||||
// object and the payload. We return the signature and index to guarantee that
|
|
||||||
// callers are getting the verified value.
|
|
||||||
func (obj JSONWebSignature) VerifyMulti(verificationKey interface{}) (int, Signature, []byte, error) {
|
|
||||||
idx, sig, err := obj.DetachedVerifyMulti(obj.payload, verificationKey)
|
|
||||||
if err != nil {
|
|
||||||
return -1, Signature{}, nil, err
|
|
||||||
}
|
|
||||||
return idx, sig, obj.payload, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// DetachedVerifyMulti validates a detached signature on the given payload with
|
|
||||||
// a signature/object that has potentially multiple signers. This returns the index
|
|
||||||
// of the signature that was verified, along with the signature object. We return
|
|
||||||
// the signature and index to guarantee that callers are getting the verified value.
|
|
||||||
//
|
|
||||||
// In most cases, you will probably want to use Verify or VerifyMulti instead.
|
|
||||||
// DetachedVerifyMulti is only useful if you have a payload and signature that are
|
|
||||||
// separated from each other, and the signature can have multiple signers at the
|
|
||||||
// same time.
|
|
||||||
func (obj JSONWebSignature) DetachedVerifyMulti(payload []byte, verificationKey interface{}) (int, Signature, error) {
|
|
||||||
key := tryJWKS(verificationKey, obj.headers()...)
|
|
||||||
verifier, err := newVerifier(key)
|
|
||||||
if err != nil {
|
|
||||||
return -1, Signature{}, err
|
|
||||||
}
|
|
||||||
|
|
||||||
outer:
|
|
||||||
for i, signature := range obj.Signatures {
|
|
||||||
headers := signature.mergedHeaders()
|
|
||||||
critical, err := headers.getCritical()
|
|
||||||
if err != nil {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, name := range critical {
|
|
||||||
if !supportedCritical[name] {
|
|
||||||
continue outer
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
input, err := obj.computeAuthData(payload, &signature)
|
|
||||||
if err != nil {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
alg := headers.getSignatureAlgorithm()
|
|
||||||
err = verifier.verifyPayload(input, signature.Signature, alg)
|
|
||||||
if err == nil {
|
|
||||||
return i, signature, nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return -1, Signature{}, ErrCryptoFailure
|
|
||||||
}
|
|
||||||
|
|
||||||
func (obj JSONWebSignature) headers() []Header {
|
|
||||||
headers := make([]Header, len(obj.Signatures))
|
|
||||||
for i, sig := range obj.Signatures {
|
|
||||||
headers[i] = sig.Header
|
|
||||||
}
|
|
||||||
return headers
|
|
||||||
}
|
|
495
vendor/github.com/go-jose/go-jose/v3/symmetric.go
generated
vendored
495
vendor/github.com/go-jose/go-jose/v3/symmetric.go
generated
vendored
@ -1,495 +0,0 @@
|
|||||||
/*-
|
|
||||||
* Copyright 2014 Square Inc.
|
|
||||||
*
|
|
||||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
* you may not use this file except in compliance with the License.
|
|
||||||
* You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing, software
|
|
||||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
* See the License for the specific language governing permissions and
|
|
||||||
* limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package jose
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"crypto/aes"
|
|
||||||
"crypto/cipher"
|
|
||||||
"crypto/hmac"
|
|
||||||
"crypto/rand"
|
|
||||||
"crypto/sha256"
|
|
||||||
"crypto/sha512"
|
|
||||||
"crypto/subtle"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"hash"
|
|
||||||
"io"
|
|
||||||
|
|
||||||
"golang.org/x/crypto/pbkdf2"
|
|
||||||
|
|
||||||
josecipher "github.com/go-jose/go-jose/v3/cipher"
|
|
||||||
)
|
|
||||||
|
|
||||||
// RandReader is a cryptographically secure random number generator (stubbed out in tests).
|
|
||||||
var RandReader = rand.Reader
|
|
||||||
|
|
||||||
const (
|
|
||||||
// RFC7518 recommends a minimum of 1,000 iterations:
|
|
||||||
// https://tools.ietf.org/html/rfc7518#section-4.8.1.2
|
|
||||||
// NIST recommends a minimum of 10,000:
|
|
||||||
// https://pages.nist.gov/800-63-3/sp800-63b.html
|
|
||||||
// 1Password uses 100,000:
|
|
||||||
// https://support.1password.com/pbkdf2/
|
|
||||||
defaultP2C = 100000
|
|
||||||
// Default salt size: 128 bits
|
|
||||||
defaultP2SSize = 16
|
|
||||||
)
|
|
||||||
|
|
||||||
// Dummy key cipher for shared symmetric key mode
|
|
||||||
type symmetricKeyCipher struct {
|
|
||||||
key []byte // Pre-shared content-encryption key
|
|
||||||
p2c int // PBES2 Count
|
|
||||||
p2s []byte // PBES2 Salt Input
|
|
||||||
}
|
|
||||||
|
|
||||||
// Signer/verifier for MAC modes
|
|
||||||
type symmetricMac struct {
|
|
||||||
key []byte
|
|
||||||
}
|
|
||||||
|
|
||||||
// Input/output from an AEAD operation
|
|
||||||
type aeadParts struct {
|
|
||||||
iv, ciphertext, tag []byte
|
|
||||||
}
|
|
||||||
|
|
||||||
// A content cipher based on an AEAD construction
|
|
||||||
type aeadContentCipher struct {
|
|
||||||
keyBytes int
|
|
||||||
authtagBytes int
|
|
||||||
getAead func(key []byte) (cipher.AEAD, error)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Random key generator
|
|
||||||
type randomKeyGenerator struct {
|
|
||||||
size int
|
|
||||||
}
|
|
||||||
|
|
||||||
// Static key generator
|
|
||||||
type staticKeyGenerator struct {
|
|
||||||
key []byte
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create a new content cipher based on AES-GCM
|
|
||||||
func newAESGCM(keySize int) contentCipher {
|
|
||||||
return &aeadContentCipher{
|
|
||||||
keyBytes: keySize,
|
|
||||||
authtagBytes: 16,
|
|
||||||
getAead: func(key []byte) (cipher.AEAD, error) {
|
|
||||||
aes, err := aes.NewCipher(key)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return cipher.NewGCM(aes)
|
|
||||||
},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create a new content cipher based on AES-CBC+HMAC
|
|
||||||
func newAESCBC(keySize int) contentCipher {
|
|
||||||
return &aeadContentCipher{
|
|
||||||
keyBytes: keySize * 2,
|
|
||||||
authtagBytes: keySize,
|
|
||||||
getAead: func(key []byte) (cipher.AEAD, error) {
|
|
||||||
return josecipher.NewCBCHMAC(key, aes.NewCipher)
|
|
||||||
},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get an AEAD cipher object for the given content encryption algorithm
|
|
||||||
func getContentCipher(alg ContentEncryption) contentCipher {
|
|
||||||
switch alg {
|
|
||||||
case A128GCM:
|
|
||||||
return newAESGCM(16)
|
|
||||||
case A192GCM:
|
|
||||||
return newAESGCM(24)
|
|
||||||
case A256GCM:
|
|
||||||
return newAESGCM(32)
|
|
||||||
case A128CBC_HS256:
|
|
||||||
return newAESCBC(16)
|
|
||||||
case A192CBC_HS384:
|
|
||||||
return newAESCBC(24)
|
|
||||||
case A256CBC_HS512:
|
|
||||||
return newAESCBC(32)
|
|
||||||
default:
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// getPbkdf2Params returns the key length and hash function used in
|
|
||||||
// pbkdf2.Key.
|
|
||||||
func getPbkdf2Params(alg KeyAlgorithm) (int, func() hash.Hash) {
|
|
||||||
switch alg {
|
|
||||||
case PBES2_HS256_A128KW:
|
|
||||||
return 16, sha256.New
|
|
||||||
case PBES2_HS384_A192KW:
|
|
||||||
return 24, sha512.New384
|
|
||||||
case PBES2_HS512_A256KW:
|
|
||||||
return 32, sha512.New
|
|
||||||
default:
|
|
||||||
panic("invalid algorithm")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// getRandomSalt generates a new salt of the given size.
|
|
||||||
func getRandomSalt(size int) ([]byte, error) {
|
|
||||||
salt := make([]byte, size)
|
|
||||||
_, err := io.ReadFull(RandReader, salt)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return salt, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// newSymmetricRecipient creates a JWE encrypter based on AES-GCM key wrap.
|
|
||||||
func newSymmetricRecipient(keyAlg KeyAlgorithm, key []byte) (recipientKeyInfo, error) {
|
|
||||||
switch keyAlg {
|
|
||||||
case DIRECT, A128GCMKW, A192GCMKW, A256GCMKW, A128KW, A192KW, A256KW:
|
|
||||||
case PBES2_HS256_A128KW, PBES2_HS384_A192KW, PBES2_HS512_A256KW:
|
|
||||||
default:
|
|
||||||
return recipientKeyInfo{}, ErrUnsupportedAlgorithm
|
|
||||||
}
|
|
||||||
|
|
||||||
return recipientKeyInfo{
|
|
||||||
keyAlg: keyAlg,
|
|
||||||
keyEncrypter: &symmetricKeyCipher{
|
|
||||||
key: key,
|
|
||||||
},
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// newSymmetricSigner creates a recipientSigInfo based on the given key.
|
|
||||||
func newSymmetricSigner(sigAlg SignatureAlgorithm, key []byte) (recipientSigInfo, error) {
|
|
||||||
// Verify that key management algorithm is supported by this encrypter
|
|
||||||
switch sigAlg {
|
|
||||||
case HS256, HS384, HS512:
|
|
||||||
default:
|
|
||||||
return recipientSigInfo{}, ErrUnsupportedAlgorithm
|
|
||||||
}
|
|
||||||
|
|
||||||
return recipientSigInfo{
|
|
||||||
sigAlg: sigAlg,
|
|
||||||
signer: &symmetricMac{
|
|
||||||
key: key,
|
|
||||||
},
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Generate a random key for the given content cipher
|
|
||||||
func (ctx randomKeyGenerator) genKey() ([]byte, rawHeader, error) {
|
|
||||||
key := make([]byte, ctx.size)
|
|
||||||
_, err := io.ReadFull(RandReader, key)
|
|
||||||
if err != nil {
|
|
||||||
return nil, rawHeader{}, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return key, rawHeader{}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Key size for random generator
|
|
||||||
func (ctx randomKeyGenerator) keySize() int {
|
|
||||||
return ctx.size
|
|
||||||
}
|
|
||||||
|
|
||||||
// Generate a static key (for direct mode)
|
|
||||||
func (ctx staticKeyGenerator) genKey() ([]byte, rawHeader, error) {
|
|
||||||
cek := make([]byte, len(ctx.key))
|
|
||||||
copy(cek, ctx.key)
|
|
||||||
return cek, rawHeader{}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Key size for static generator
|
|
||||||
func (ctx staticKeyGenerator) keySize() int {
|
|
||||||
return len(ctx.key)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get key size for this cipher
|
|
||||||
func (ctx aeadContentCipher) keySize() int {
|
|
||||||
return ctx.keyBytes
|
|
||||||
}
|
|
||||||
|
|
||||||
// Encrypt some data
|
|
||||||
func (ctx aeadContentCipher) encrypt(key, aad, pt []byte) (*aeadParts, error) {
|
|
||||||
// Get a new AEAD instance
|
|
||||||
aead, err := ctx.getAead(key)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Initialize a new nonce
|
|
||||||
iv := make([]byte, aead.NonceSize())
|
|
||||||
_, err = io.ReadFull(RandReader, iv)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
ciphertextAndTag := aead.Seal(nil, iv, pt, aad)
|
|
||||||
offset := len(ciphertextAndTag) - ctx.authtagBytes
|
|
||||||
|
|
||||||
return &aeadParts{
|
|
||||||
iv: iv,
|
|
||||||
ciphertext: ciphertextAndTag[:offset],
|
|
||||||
tag: ciphertextAndTag[offset:],
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Decrypt some data
|
|
||||||
func (ctx aeadContentCipher) decrypt(key, aad []byte, parts *aeadParts) ([]byte, error) {
|
|
||||||
aead, err := ctx.getAead(key)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(parts.iv) != aead.NonceSize() || len(parts.tag) < ctx.authtagBytes {
|
|
||||||
return nil, ErrCryptoFailure
|
|
||||||
}
|
|
||||||
|
|
||||||
return aead.Open(nil, parts.iv, append(parts.ciphertext, parts.tag...), aad)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Encrypt the content encryption key.
|
|
||||||
func (ctx *symmetricKeyCipher) encryptKey(cek []byte, alg KeyAlgorithm) (recipientInfo, error) {
|
|
||||||
switch alg {
|
|
||||||
case DIRECT:
|
|
||||||
return recipientInfo{
|
|
||||||
header: &rawHeader{},
|
|
||||||
}, nil
|
|
||||||
case A128GCMKW, A192GCMKW, A256GCMKW:
|
|
||||||
aead := newAESGCM(len(ctx.key))
|
|
||||||
|
|
||||||
parts, err := aead.encrypt(ctx.key, []byte{}, cek)
|
|
||||||
if err != nil {
|
|
||||||
return recipientInfo{}, err
|
|
||||||
}
|
|
||||||
|
|
||||||
header := &rawHeader{}
|
|
||||||
|
|
||||||
if err = header.set(headerIV, newBuffer(parts.iv)); err != nil {
|
|
||||||
return recipientInfo{}, err
|
|
||||||
}
|
|
||||||
|
|
||||||
if err = header.set(headerTag, newBuffer(parts.tag)); err != nil {
|
|
||||||
return recipientInfo{}, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return recipientInfo{
|
|
||||||
header: header,
|
|
||||||
encryptedKey: parts.ciphertext,
|
|
||||||
}, nil
|
|
||||||
case A128KW, A192KW, A256KW:
|
|
||||||
block, err := aes.NewCipher(ctx.key)
|
|
||||||
if err != nil {
|
|
||||||
return recipientInfo{}, err
|
|
||||||
}
|
|
||||||
|
|
||||||
jek, err := josecipher.KeyWrap(block, cek)
|
|
||||||
if err != nil {
|
|
||||||
return recipientInfo{}, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return recipientInfo{
|
|
||||||
encryptedKey: jek,
|
|
||||||
header: &rawHeader{},
|
|
||||||
}, nil
|
|
||||||
case PBES2_HS256_A128KW, PBES2_HS384_A192KW, PBES2_HS512_A256KW:
|
|
||||||
if len(ctx.p2s) == 0 {
|
|
||||||
salt, err := getRandomSalt(defaultP2SSize)
|
|
||||||
if err != nil {
|
|
||||||
return recipientInfo{}, err
|
|
||||||
}
|
|
||||||
ctx.p2s = salt
|
|
||||||
}
|
|
||||||
|
|
||||||
if ctx.p2c <= 0 {
|
|
||||||
ctx.p2c = defaultP2C
|
|
||||||
}
|
|
||||||
|
|
||||||
// salt is UTF8(Alg) || 0x00 || Salt Input
|
|
||||||
salt := bytes.Join([][]byte{[]byte(alg), ctx.p2s}, []byte{0x00})
|
|
||||||
|
|
||||||
// derive key
|
|
||||||
keyLen, h := getPbkdf2Params(alg)
|
|
||||||
key := pbkdf2.Key(ctx.key, salt, ctx.p2c, keyLen, h)
|
|
||||||
|
|
||||||
// use AES cipher with derived key
|
|
||||||
block, err := aes.NewCipher(key)
|
|
||||||
if err != nil {
|
|
||||||
return recipientInfo{}, err
|
|
||||||
}
|
|
||||||
|
|
||||||
jek, err := josecipher.KeyWrap(block, cek)
|
|
||||||
if err != nil {
|
|
||||||
return recipientInfo{}, err
|
|
||||||
}
|
|
||||||
|
|
||||||
header := &rawHeader{}
|
|
||||||
|
|
||||||
if err = header.set(headerP2C, ctx.p2c); err != nil {
|
|
||||||
return recipientInfo{}, err
|
|
||||||
}
|
|
||||||
|
|
||||||
if err = header.set(headerP2S, newBuffer(ctx.p2s)); err != nil {
|
|
||||||
return recipientInfo{}, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return recipientInfo{
|
|
||||||
encryptedKey: jek,
|
|
||||||
header: header,
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
return recipientInfo{}, ErrUnsupportedAlgorithm
|
|
||||||
}
|
|
||||||
|
|
||||||
// Decrypt the content encryption key.
|
|
||||||
func (ctx *symmetricKeyCipher) decryptKey(headers rawHeader, recipient *recipientInfo, generator keyGenerator) ([]byte, error) {
|
|
||||||
switch headers.getAlgorithm() {
|
|
||||||
case DIRECT:
|
|
||||||
cek := make([]byte, len(ctx.key))
|
|
||||||
copy(cek, ctx.key)
|
|
||||||
return cek, nil
|
|
||||||
case A128GCMKW, A192GCMKW, A256GCMKW:
|
|
||||||
aead := newAESGCM(len(ctx.key))
|
|
||||||
|
|
||||||
iv, err := headers.getIV()
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("go-jose/go-jose: invalid IV: %v", err)
|
|
||||||
}
|
|
||||||
tag, err := headers.getTag()
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("go-jose/go-jose: invalid tag: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
parts := &aeadParts{
|
|
||||||
iv: iv.bytes(),
|
|
||||||
ciphertext: recipient.encryptedKey,
|
|
||||||
tag: tag.bytes(),
|
|
||||||
}
|
|
||||||
|
|
||||||
cek, err := aead.decrypt(ctx.key, []byte{}, parts)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return cek, nil
|
|
||||||
case A128KW, A192KW, A256KW:
|
|
||||||
block, err := aes.NewCipher(ctx.key)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
cek, err := josecipher.KeyUnwrap(block, recipient.encryptedKey)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return cek, nil
|
|
||||||
case PBES2_HS256_A128KW, PBES2_HS384_A192KW, PBES2_HS512_A256KW:
|
|
||||||
p2s, err := headers.getP2S()
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("go-jose/go-jose: invalid P2S: %v", err)
|
|
||||||
}
|
|
||||||
if p2s == nil || len(p2s.data) == 0 {
|
|
||||||
return nil, fmt.Errorf("go-jose/go-jose: invalid P2S: must be present")
|
|
||||||
}
|
|
||||||
|
|
||||||
p2c, err := headers.getP2C()
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("go-jose/go-jose: invalid P2C: %v", err)
|
|
||||||
}
|
|
||||||
if p2c <= 0 {
|
|
||||||
return nil, fmt.Errorf("go-jose/go-jose: invalid P2C: must be a positive integer")
|
|
||||||
}
|
|
||||||
|
|
||||||
// salt is UTF8(Alg) || 0x00 || Salt Input
|
|
||||||
alg := headers.getAlgorithm()
|
|
||||||
salt := bytes.Join([][]byte{[]byte(alg), p2s.bytes()}, []byte{0x00})
|
|
||||||
|
|
||||||
// derive key
|
|
||||||
keyLen, h := getPbkdf2Params(alg)
|
|
||||||
key := pbkdf2.Key(ctx.key, salt, p2c, keyLen, h)
|
|
||||||
|
|
||||||
// use AES cipher with derived key
|
|
||||||
block, err := aes.NewCipher(key)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
cek, err := josecipher.KeyUnwrap(block, recipient.encryptedKey)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return cek, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil, ErrUnsupportedAlgorithm
|
|
||||||
}
|
|
||||||
|
|
||||||
// Sign the given payload
|
|
||||||
func (ctx symmetricMac) signPayload(payload []byte, alg SignatureAlgorithm) (Signature, error) {
|
|
||||||
mac, err := ctx.hmac(payload, alg)
|
|
||||||
if err != nil {
|
|
||||||
return Signature{}, errors.New("go-jose/go-jose: failed to compute hmac")
|
|
||||||
}
|
|
||||||
|
|
||||||
return Signature{
|
|
||||||
Signature: mac,
|
|
||||||
protected: &rawHeader{},
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Verify the given payload
|
|
||||||
func (ctx symmetricMac) verifyPayload(payload []byte, mac []byte, alg SignatureAlgorithm) error {
|
|
||||||
expected, err := ctx.hmac(payload, alg)
|
|
||||||
if err != nil {
|
|
||||||
return errors.New("go-jose/go-jose: failed to compute hmac")
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(mac) != len(expected) {
|
|
||||||
return errors.New("go-jose/go-jose: invalid hmac")
|
|
||||||
}
|
|
||||||
|
|
||||||
match := subtle.ConstantTimeCompare(mac, expected)
|
|
||||||
if match != 1 {
|
|
||||||
return errors.New("go-jose/go-jose: invalid hmac")
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Compute the HMAC based on the given alg value
|
|
||||||
func (ctx symmetricMac) hmac(payload []byte, alg SignatureAlgorithm) ([]byte, error) {
|
|
||||||
var hash func() hash.Hash
|
|
||||||
|
|
||||||
switch alg {
|
|
||||||
case HS256:
|
|
||||||
hash = sha256.New
|
|
||||||
case HS384:
|
|
||||||
hash = sha512.New384
|
|
||||||
case HS512:
|
|
||||||
hash = sha512.New
|
|
||||||
default:
|
|
||||||
return nil, ErrUnsupportedAlgorithm
|
|
||||||
}
|
|
||||||
|
|
||||||
hmac := hmac.New(hash, ctx.key)
|
|
||||||
|
|
||||||
// According to documentation, Write() on hash never fails
|
|
||||||
_, _ = hmac.Write(payload)
|
|
||||||
return hmac.Sum(nil), nil
|
|
||||||
}
|
|
16
vendor/github.com/go-ping/ping/.editorconfig
generated
vendored
16
vendor/github.com/go-ping/ping/.editorconfig
generated
vendored
@ -1,16 +0,0 @@
|
|||||||
# https://editorconfig.org
|
|
||||||
|
|
||||||
root = true
|
|
||||||
|
|
||||||
[*]
|
|
||||||
end_of_line = lf
|
|
||||||
insert_final_newline = true
|
|
||||||
trim_trailing_whitespace = true
|
|
||||||
charset = utf-8
|
|
||||||
indent_style = space
|
|
||||||
|
|
||||||
[Makefile]
|
|
||||||
indent_style = tab
|
|
||||||
|
|
||||||
[*.go]
|
|
||||||
indent_style = tab
|
|
2
vendor/github.com/go-ping/ping/.gitignore
generated
vendored
2
vendor/github.com/go-ping/ping/.gitignore
generated
vendored
@ -1,2 +0,0 @@
|
|||||||
/ping
|
|
||||||
/dist
|
|
6
vendor/github.com/go-ping/ping/.golangci.yml
generated
vendored
6
vendor/github.com/go-ping/ping/.golangci.yml
generated
vendored
@ -1,6 +0,0 @@
|
|||||||
---
|
|
||||||
issues:
|
|
||||||
exclude-rules:
|
|
||||||
- path: _test.go
|
|
||||||
linters:
|
|
||||||
- errcheck
|
|
46
vendor/github.com/go-ping/ping/.goreleaser.yml
generated
vendored
46
vendor/github.com/go-ping/ping/.goreleaser.yml
generated
vendored
@ -1,46 +0,0 @@
|
|||||||
project_name: ping
|
|
||||||
before:
|
|
||||||
hooks:
|
|
||||||
- go mod download
|
|
||||||
builds:
|
|
||||||
- binary: ping
|
|
||||||
dir: cmd/ping
|
|
||||||
goarch:
|
|
||||||
- amd64
|
|
||||||
- arm
|
|
||||||
- arm64
|
|
||||||
goarm:
|
|
||||||
- 6
|
|
||||||
- 7
|
|
||||||
goos:
|
|
||||||
- darwin
|
|
||||||
- freebsd
|
|
||||||
- linux
|
|
||||||
- windows
|
|
||||||
archives:
|
|
||||||
- files:
|
|
||||||
- LICENSE
|
|
||||||
- README.md
|
|
||||||
format_overrides:
|
|
||||||
- goos: windows
|
|
||||||
format: zip
|
|
||||||
wrap_in_directory: true
|
|
||||||
# TODO: Decide if we want packages (name conflcits with /bin/ping?)
|
|
||||||
# nfpms:
|
|
||||||
# homepage: https://github.com/go-ping/ping
|
|
||||||
# maintainer: 'Go Ping Maintainers <go-ping@example.com>'
|
|
||||||
# description: Ping written in Go.
|
|
||||||
# license: MIT
|
|
||||||
# formats:
|
|
||||||
# - deb
|
|
||||||
# - rpm
|
|
||||||
checksum:
|
|
||||||
name_template: 'checksums.txt'
|
|
||||||
snapshot:
|
|
||||||
name_template: "{{ .Tag }}-{{ .ShortCommit }}"
|
|
||||||
changelog:
|
|
||||||
sort: asc
|
|
||||||
filters:
|
|
||||||
exclude:
|
|
||||||
- '^docs:'
|
|
||||||
- '^test:'
|
|
44
vendor/github.com/go-ping/ping/CONTRIBUTING.md
generated
vendored
44
vendor/github.com/go-ping/ping/CONTRIBUTING.md
generated
vendored
@ -1,44 +0,0 @@
|
|||||||
# Contributing
|
|
||||||
|
|
||||||
First off, thanks for taking the time to contribute!
|
|
||||||
|
|
||||||
Remember that this is open source software so please consider the other people who will read your code.
|
|
||||||
Make it look nice for them, document your logic in comments and add or update the unit test cases.
|
|
||||||
|
|
||||||
This library is used by various other projects, companies and individuals in live production environments so please discuss any breaking changes with us before making them.
|
|
||||||
Feel free to join us in the #go-ping channel of the [Gophers Slack](https://invite.slack.golangbridge.org/).
|
|
||||||
|
|
||||||
## Pull Requests
|
|
||||||
|
|
||||||
[Fork the repo on GitHub](https://github.com/go-ping/ping/fork) and clone it to your local machine.
|
|
||||||
|
|
||||||
```bash
|
|
||||||
git clone https://github.com/YOUR_USERNAME/ping.git && cd ping
|
|
||||||
```
|
|
||||||
|
|
||||||
Here is a guide on [how to configure a remote repository](https://docs.github.com/en/free-pro-team@latest/github/collaborating-with-issues-and-pull-requests/configuring-a-remote-for-a-fork).
|
|
||||||
|
|
||||||
Check out a new branch, make changes, run tests, commit & sign-off, then push branch to your fork.
|
|
||||||
|
|
||||||
```bash
|
|
||||||
$ git checkout -b <BRANCH_NAME>
|
|
||||||
# edit files
|
|
||||||
$ make style vet test
|
|
||||||
$ git add <CHANGED_FILES>
|
|
||||||
$ git commit -s
|
|
||||||
$ git push <FORK> <BRANCH_NAME>
|
|
||||||
```
|
|
||||||
|
|
||||||
Open a [new pull request](https://github.com/go-ping/ping/compare) in the main `go-ping/ping` repository.
|
|
||||||
Please describe the purpose of your PR and remember link it to any related issues.
|
|
||||||
|
|
||||||
*We may ask you to rebase your feature branch or squash the commits in order to keep the history clean.*
|
|
||||||
|
|
||||||
## Development Guides
|
|
||||||
|
|
||||||
- Run `make style vet test` before committing your changes.
|
|
||||||
- Document your logic in code comments.
|
|
||||||
- Add tests for bug fixes and new features.
|
|
||||||
- Use UNIX-style (LF) line endings.
|
|
||||||
- End every file with a single blank line.
|
|
||||||
- Use the UTF-8 character set.
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user