mirror of
https://github.com/TwiN/gatus.git
synced 2024-12-22 06:31:15 +01:00
#205: Work on supporting OpenID Connect for auth
This commit is contained in:
parent
4ab5724fc1
commit
be9087bee3
@ -15,26 +15,31 @@ func CreateRouter(staticFolder string, securityConfig *security.Config, uiConfig
|
||||
if enabledMetrics {
|
||||
router.Handle("/metrics", promhttp.Handler()).Methods("GET")
|
||||
}
|
||||
api := router.PathPrefix("/api").Subrouter()
|
||||
protected := api.PathPrefix("/").Subrouter()
|
||||
unprotected := api.PathPrefix("/").Subrouter()
|
||||
if securityConfig != nil {
|
||||
if err := securityConfig.RegisterHandlers(router); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
securityConfig.ApplySecurityMiddleware(protected)
|
||||
}
|
||||
// Endpoints
|
||||
protected.HandleFunc("/v1/endpoints/statuses", EndpointStatuses).Methods("GET") // No GzipHandler for this one, because we cache the content as Gzipped already
|
||||
protected.HandleFunc("/v1/endpoints/{key}/statuses", GzipHandlerFunc(EndpointStatus)).Methods("GET")
|
||||
unprotected.HandleFunc("/v1/endpoints/{key}/uptimes/{duration}/badge.svg", UptimeBadge).Methods("GET")
|
||||
unprotected.HandleFunc("/v1/endpoints/{key}/response-times/{duration}/badge.svg", ResponseTimeBadge).Methods("GET")
|
||||
unprotected.HandleFunc("/v1/endpoints/{key}/response-times/{duration}/chart.svg", ResponseTimeChart).Methods("GET")
|
||||
// XXX: Remove the lines between this and the next XXX comment in v4.0.0
|
||||
protected.HandleFunc("/v1/services/statuses", EndpointStatuses).Methods("GET") // No GzipHandler for this one, because we cache the content as Gzipped already
|
||||
protected.HandleFunc("/v1/services/{key}/statuses", GzipHandlerFunc(EndpointStatus)).Methods("GET")
|
||||
unprotected.HandleFunc("/v1/services/{key}/uptimes/{duration}/badge.svg", UptimeBadge).Methods("GET")
|
||||
unprotected.HandleFunc("/v1/services/{key}/response-times/{duration}/badge.svg", ResponseTimeBadge).Methods("GET")
|
||||
unprotected.HandleFunc("/v1/services/{key}/response-times/{duration}/chart.svg", ResponseTimeChart).Methods("GET")
|
||||
// XXX: Remove the lines between this and the previous XXX comment in v4.0.0
|
||||
// Misc
|
||||
router.Handle("/health", health.Handler().WithJSON(true)).Methods("GET")
|
||||
router.HandleFunc("/favicon.ico", FavIcon(staticFolder)).Methods("GET")
|
||||
// Endpoints
|
||||
router.HandleFunc("/api/v1/endpoints/statuses", secureIfNecessary(securityConfig, EndpointStatuses)).Methods("GET") // No GzipHandler for this one, because we cache the content as Gzipped already
|
||||
router.HandleFunc("/api/v1/endpoints/{key}/statuses", secureIfNecessary(securityConfig, GzipHandlerFunc(EndpointStatus))).Methods("GET")
|
||||
router.HandleFunc("/api/v1/endpoints/{key}/uptimes/{duration}/badge.svg", UptimeBadge).Methods("GET")
|
||||
router.HandleFunc("/api/v1/endpoints/{key}/response-times/{duration}/badge.svg", ResponseTimeBadge).Methods("GET")
|
||||
router.HandleFunc("/api/v1/endpoints/{key}/response-times/{duration}/chart.svg", ResponseTimeChart).Methods("GET")
|
||||
// XXX: Remove the lines between this and the next XXX comment in v4.0.0
|
||||
router.HandleFunc("/api/v1/services/statuses", secureIfNecessary(securityConfig, EndpointStatuses)).Methods("GET") // No GzipHandler for this one, because we cache the content as Gzipped already
|
||||
router.HandleFunc("/api/v1/services/{key}/statuses", secureIfNecessary(securityConfig, GzipHandlerFunc(EndpointStatus))).Methods("GET")
|
||||
router.HandleFunc("/api/v1/services/{key}/uptimes/{duration}/badge.svg", UptimeBadge).Methods("GET")
|
||||
router.HandleFunc("/api/v1/services/{key}/response-times/{duration}/badge.svg", ResponseTimeBadge).Methods("GET")
|
||||
router.HandleFunc("/api/v1/services/{key}/response-times/{duration}/chart.svg", ResponseTimeChart).Methods("GET")
|
||||
// XXX: Remove the lines between this and the previous XXX comment in v4.0.0
|
||||
// SPA
|
||||
router.HandleFunc("/services/{name}", SinglePageApplication(staticFolder, uiConfig)).Methods("GET") // XXX: Remove this in v4.0.0
|
||||
router.HandleFunc("/endpoints/{name}", SinglePageApplication(staticFolder, uiConfig)).Methods("GET")
|
||||
@ -43,10 +48,3 @@ func CreateRouter(staticFolder string, securityConfig *security.Config, uiConfig
|
||||
router.PathPrefix("/").Handler(GzipHandler(http.FileServer(http.Dir(staticFolder))))
|
||||
return router
|
||||
}
|
||||
|
||||
func secureIfNecessary(securityConfig *security.Config, handler http.HandlerFunc) http.HandlerFunc {
|
||||
if securityConfig != nil {
|
||||
return security.Handler(handler, securityConfig)
|
||||
}
|
||||
return handler
|
||||
}
|
||||
|
2
go.mod
2
go.mod
@ -3,6 +3,7 @@ module github.com/TwiN/gatus/v3
|
||||
go 1.17
|
||||
|
||||
require (
|
||||
github.com/TwiN/g8 v1.2.0
|
||||
github.com/TwiN/gocache v1.2.4
|
||||
github.com/TwiN/health v1.3.0
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
@ -50,6 +51,7 @@ require (
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/TwiN/gocache/v2 v2.0.0 // indirect
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 // indirect
|
||||
google.golang.org/appengine v1.6.6 // indirect
|
||||
gopkg.in/square/go-jose.v2 v2.5.1 // indirect
|
||||
|
4
go.sum
4
go.sum
@ -33,8 +33,12 @@ cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9
|
||||
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
|
||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
|
||||
github.com/TwiN/g8 v1.2.0 h1:pNCSaNuFe0B8cAm9Ir2aCsnAeO2j4Y1FsHeYop+mOXQ=
|
||||
github.com/TwiN/g8 v1.2.0/go.mod h1:SiIdItS0agSUloFqdQQt/RObB2jGSq+nnE9WfFv3RIo=
|
||||
github.com/TwiN/gocache v1.2.4 h1:AfJ1YRcxtQ/zZEN61URDwk/dwFG7LSRenU5qIm9dQzo=
|
||||
github.com/TwiN/gocache v1.2.4/go.mod h1:BjabsQQy6z5uHDorHa4LJVPEzFeitLIDbCtdv3gc1gA=
|
||||
github.com/TwiN/gocache/v2 v2.0.0 h1:CPbDNKdSJpmBkh7aWcO7D3KK1yWaMlwX+3dsBPE8/so=
|
||||
github.com/TwiN/gocache/v2 v2.0.0/go.mod h1:j4MABVaia2Tp53ERWc/3l4YxkswtPjB2hQzmL/kD/VQ=
|
||||
github.com/TwiN/health v1.3.0 h1:xw90rZqg0NH5MRkVHzlgtDdP+EQd43v3yMqQVtYlGHg=
|
||||
github.com/TwiN/health v1.3.0/go.mod h1:Bt+lEvSi6C/9NWb7OoGmUmgtS4dfPeMM9EINnURv5dE=
|
||||
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
||||
|
15
security/basic.go
Normal file
15
security/basic.go
Normal file
@ -0,0 +1,15 @@
|
||||
package security
|
||||
|
||||
// BasicConfig is the configuration for Basic authentication
|
||||
type BasicConfig struct {
|
||||
// Username is the name which will need to be used for a successful authentication
|
||||
Username string `yaml:"username"`
|
||||
|
||||
// PasswordSha512Hash is the SHA512 hash of the password which will need to be used for a successful authentication
|
||||
PasswordSha512Hash string `yaml:"password-sha512"`
|
||||
}
|
||||
|
||||
// isValid returns whether the basic security configuration is valid or not
|
||||
func (c *BasicConfig) isValid() bool {
|
||||
return len(c.Username) > 0 && len(c.PasswordSha512Hash) == 128
|
||||
}
|
@ -1,9 +1,19 @@
|
||||
package security
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"strings"
|
||||
|
||||
"github.com/TwiN/g8"
|
||||
"github.com/gorilla/mux"
|
||||
)
|
||||
|
||||
const (
|
||||
cookieNameState = "gatus_state"
|
||||
cookieNameNonce = "gatus_nonce"
|
||||
cookieNameSession = "gatus_session"
|
||||
)
|
||||
|
||||
// Config is the security configuration for Gatus
|
||||
type Config struct {
|
||||
Basic *BasicConfig `yaml:"basic,omitempty"`
|
||||
@ -21,22 +31,44 @@ func (c *Config) RegisterHandlers(router *mux.Router) error {
|
||||
if err := c.OIDC.initialize(); err != nil {
|
||||
return err
|
||||
}
|
||||
router.HandleFunc("/login", c.OIDC.loginHandler)
|
||||
router.HandleFunc("/oidc/login", c.OIDC.loginHandler)
|
||||
router.HandleFunc("/authorization-code/callback", c.OIDC.callbackHandler)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// BasicConfig is the configuration for Basic authentication
|
||||
type BasicConfig struct {
|
||||
// Username is the name which will need to be used for a successful authentication
|
||||
Username string `yaml:"username"`
|
||||
|
||||
// PasswordSha512Hash is the SHA512 hash of the password which will need to be used for a successful authentication
|
||||
PasswordSha512Hash string `yaml:"password-sha512"`
|
||||
}
|
||||
|
||||
// isValid returns whether the basic security configuration is valid or not
|
||||
func (c *BasicConfig) isValid() bool {
|
||||
return len(c.Username) > 0 && len(c.PasswordSha512Hash) == 128
|
||||
func (c *Config) ApplySecurityMiddleware(api *mux.Router) {
|
||||
if c.OIDC != nil {
|
||||
// We're going to use g8 for session handling
|
||||
clientProvider := g8.NewClientProvider(func(token string) *g8.Client {
|
||||
if _, exists := sessions.Get(token); exists {
|
||||
return g8.NewClient(token)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
customTokenExtractorFunc := func(request *http.Request) string {
|
||||
sessionCookie, err := request.Cookie(cookieNameSession)
|
||||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
return sessionCookie.Value
|
||||
}
|
||||
// TODO: g8: Add a way to update cookie after? would need the writer
|
||||
authorizationService := g8.NewAuthorizationService().WithClientProvider(clientProvider)
|
||||
gate := g8.New().WithAuthorizationService(authorizationService).WithCustomTokenExtractor(customTokenExtractorFunc)
|
||||
api.Use(gate.Protect)
|
||||
} else if c.Basic != nil {
|
||||
api.Use(func(handler http.Handler) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
usernameEntered, passwordEntered, ok := r.BasicAuth()
|
||||
if !ok || usernameEntered != c.Basic.Username || Sha512(passwordEntered) != strings.ToLower(c.Basic.PasswordSha512Hash) {
|
||||
w.Header().Set("WWW-Authenticate", "Basic")
|
||||
w.WriteHeader(http.StatusUnauthorized)
|
||||
_, _ = w.Write([]byte("Unauthorized"))
|
||||
return
|
||||
}
|
||||
handler.ServeHTTP(w, r)
|
||||
})
|
||||
})
|
||||
}
|
||||
}
|
||||
|
@ -2,9 +2,12 @@ package security
|
||||
|
||||
import (
|
||||
"context"
|
||||
"log"
|
||||
"net/http"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/TwiN/gocache"
|
||||
"github.com/coreos/go-oidc/v3/oidc"
|
||||
"github.com/google/uuid"
|
||||
"golang.org/x/oauth2"
|
||||
@ -12,11 +15,12 @@ import (
|
||||
|
||||
// OIDCConfig is the configuration for OIDC authentication
|
||||
type OIDCConfig struct {
|
||||
IssuerURL string `yaml:"issuer-url"` // e.g. https://dev-12345678.okta.com
|
||||
RedirectURL string `yaml:"redirect-url"` // e.g. http://localhost:8080/authorization-code/callback
|
||||
ClientID string `yaml:"client-id"`
|
||||
ClientSecret string `yaml:"client-secret"`
|
||||
Scopes []string `yaml:"scopes"` // e.g. [openid]
|
||||
IssuerURL string `yaml:"issuer-url"` // e.g. https://dev-12345678.okta.com
|
||||
RedirectURL string `yaml:"redirect-url"` // e.g. http://localhost:8080/authorization-code/callback
|
||||
ClientID string `yaml:"client-id"`
|
||||
ClientSecret string `yaml:"client-secret"`
|
||||
Scopes []string `yaml:"scopes"` // e.g. ["openid"]
|
||||
AllowedSubjects []string `yaml:"allowed-subjects"` // e.g. ["user1@example.com"]. If empty, all subjects are allowed
|
||||
|
||||
oauth2Config oauth2.Config
|
||||
verifier *oidc.IDTokenVerifier
|
||||
@ -47,25 +51,32 @@ func (c *OIDCConfig) initialize() error {
|
||||
func (c *OIDCConfig) loginHandler(w http.ResponseWriter, r *http.Request) {
|
||||
state, nonce := uuid.NewString(), uuid.NewString()
|
||||
http.SetCookie(w, &http.Cookie{
|
||||
Name: "state",
|
||||
Name: cookieNameState,
|
||||
Value: state,
|
||||
Path: "/",
|
||||
MaxAge: int(time.Hour.Seconds()),
|
||||
Secure: r.TLS != nil,
|
||||
SameSite: http.SameSiteLaxMode,
|
||||
HttpOnly: true,
|
||||
})
|
||||
http.SetCookie(w, &http.Cookie{
|
||||
Name: "nonce",
|
||||
Name: cookieNameNonce,
|
||||
Value: nonce,
|
||||
Path: "/",
|
||||
MaxAge: int(time.Hour.Seconds()),
|
||||
Secure: r.TLS != nil,
|
||||
SameSite: http.SameSiteLaxMode,
|
||||
HttpOnly: true,
|
||||
})
|
||||
http.Redirect(w, r, c.oauth2Config.AuthCodeURL(state, oidc.Nonce(nonce)), http.StatusFound)
|
||||
}
|
||||
|
||||
func (c *OIDCConfig) callbackHandler(w http.ResponseWriter, r *http.Request) {
|
||||
// Check if there's an error
|
||||
if len(r.URL.Query().Get("error")) > 0 {
|
||||
http.Error(w, r.URL.Query().Get("error")+": "+r.URL.Query().Get("error_description"), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
// Ensure that the state has the expected value
|
||||
state, err := r.Cookie("state")
|
||||
state, err := r.Cookie(cookieNameState)
|
||||
if err != nil {
|
||||
http.Error(w, "state not found", http.StatusBadRequest)
|
||||
return
|
||||
@ -91,7 +102,7 @@ func (c *OIDCConfig) callbackHandler(w http.ResponseWriter, r *http.Request) {
|
||||
return
|
||||
}
|
||||
// Validate nonce
|
||||
nonce, err := r.Cookie("nonce")
|
||||
nonce, err := r.Cookie(cookieNameNonce)
|
||||
if err != nil {
|
||||
http.Error(w, "nonce not found", http.StatusBadRequest)
|
||||
return
|
||||
@ -100,5 +111,34 @@ func (c *OIDCConfig) callbackHandler(w http.ResponseWriter, r *http.Request) {
|
||||
http.Error(w, "nonce did not match", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
http.Redirect(w, r, "/", http.StatusFound)
|
||||
if len(c.AllowedSubjects) == 0 {
|
||||
// If there's no allowed subjects, all subjects are allowed.
|
||||
c.setSessionCookie(w, idToken)
|
||||
http.Redirect(w, r, "/", http.StatusFound)
|
||||
return
|
||||
}
|
||||
for _, subject := range c.AllowedSubjects {
|
||||
if strings.ToLower(subject) == strings.ToLower(idToken.Subject) {
|
||||
c.setSessionCookie(w, idToken)
|
||||
http.Redirect(w, r, "/", http.StatusFound)
|
||||
return
|
||||
}
|
||||
}
|
||||
log.Println("user is not in the list of allowed subjects")
|
||||
http.Redirect(w, r, "/login?error=access_denied", http.StatusFound)
|
||||
}
|
||||
|
||||
func (c *OIDCConfig) setSessionCookie(w http.ResponseWriter, idToken *oidc.IDToken) {
|
||||
// At this point, the user has been confirmed. All that's left to do is create a session.
|
||||
sessionID := uuid.NewString()
|
||||
sessions.SetWithTTL(sessionID, idToken.Subject, time.Hour)
|
||||
http.SetCookie(w, &http.Cookie{
|
||||
Name: cookieNameSession,
|
||||
Value: sessionID,
|
||||
Path: "/",
|
||||
MaxAge: int(time.Hour.Seconds()),
|
||||
SameSite: http.SameSiteStrictMode,
|
||||
})
|
||||
}
|
||||
|
||||
var sessions = gocache.NewCache()
|
||||
|
1
vendor/github.com/TwiN/g8/.gitattributes
generated
vendored
Normal file
1
vendor/github.com/TwiN/g8/.gitattributes
generated
vendored
Normal file
@ -0,0 +1 @@
|
||||
* text=lf
|
2
vendor/github.com/TwiN/g8/.gitignore
generated
vendored
Normal file
2
vendor/github.com/TwiN/g8/.gitignore
generated
vendored
Normal file
@ -0,0 +1,2 @@
|
||||
.idea
|
||||
*.iml
|
9
vendor/github.com/TwiN/g8/LICENSE.md
generated
vendored
Normal file
9
vendor/github.com/TwiN/g8/LICENSE.md
generated
vendored
Normal file
@ -0,0 +1,9 @@
|
||||
MIT License
|
||||
|
||||
Copyright (c) 2021 TwiN
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
238
vendor/github.com/TwiN/g8/README.md
generated
vendored
Normal file
238
vendor/github.com/TwiN/g8/README.md
generated
vendored
Normal file
@ -0,0 +1,238 @@
|
||||
# g8
|
||||
|
||||
![build](https://github.com/TwiN/g8/workflows/build/badge.svg?branch=master)
|
||||
[![Go Report Card](https://goreportcard.com/badge/github.com/TwiN/g8)](https://goreportcard.com/report/github.com/TwiN/g8)
|
||||
[![codecov](https://codecov.io/gh/TwiN/g8/branch/master/graph/badge.svg)](https://codecov.io/gh/TwiN/g8)
|
||||
[![Go version](https://img.shields.io/github/go-mod/go-version/TwiN/g8.svg)](https://github.com/TwiN/g8)
|
||||
[![Go Reference](https://pkg.go.dev/badge/github.com/TwiN/g8.svg)](https://pkg.go.dev/github.com/TwiN/g8)
|
||||
[![Follow TwiN](https://img.shields.io/github/followers/TwiN?label=Follow&style=social)](https://github.com/TwiN)
|
||||
|
||||
g8, pronounced gate, is a simple Go library for protecting HTTP handlers.
|
||||
|
||||
Tired of constantly re-implementing a security layer for each application? Me too, that's why I made g8.
|
||||
|
||||
|
||||
## Installation
|
||||
```console
|
||||
go get -u github.com/TwiN/g8
|
||||
```
|
||||
|
||||
|
||||
## Usage
|
||||
Because the entire purpose of g8 is to NOT waste time configuring the layer of security, the primary emphasis is to
|
||||
keep it as simple as possible.
|
||||
|
||||
|
||||
### Simple
|
||||
Just want a simple layer of security without the need for advanced permissions? This configuration is what you're
|
||||
looking for.
|
||||
|
||||
```go
|
||||
authorizationService := g8.NewAuthorizationService().WithToken("mytoken")
|
||||
gate := g8.New().WithAuthorizationService(authorizationService)
|
||||
|
||||
router := http.NewServeMux()
|
||||
router.Handle("/unprotected", yourHandler)
|
||||
router.Handle("/protected", gate.Protect(yourHandler))
|
||||
|
||||
http.ListenAndServe(":8080", router)
|
||||
```
|
||||
|
||||
The endpoint `/protected` is now only accessible if you pass the header `Authorization: Bearer mytoken`.
|
||||
|
||||
If you use `http.HandleFunc` instead of `http.Handle`, you may use `gate.ProtectFunc(yourHandler)` instead.
|
||||
|
||||
If you're not using the `Authorization` header, you can specify a custom token extractor.
|
||||
This enables use cases like [Protecting a handler using session cookie](#protecting-a-handler-using-session-cookie)
|
||||
|
||||
|
||||
### Advanced permissions
|
||||
If you have tokens with more permissions than others, g8's permission system will make managing authorization a breeze.
|
||||
|
||||
Rather than registering tokens, think of it as registering clients, the only difference being that clients may be
|
||||
configured with permissions while tokens cannot.
|
||||
|
||||
```go
|
||||
authorizationService := g8.NewAuthorizationService().WithClient(g8.NewClient("mytoken").WithPermission("admin"))
|
||||
gate := g8.New().WithAuthorizationService(authorizationService)
|
||||
|
||||
router := http.NewServeMux()
|
||||
router.Handle("/unprotected", yourHandler)
|
||||
router.Handle("/protected-with-admin", gate.ProtectWithPermissions(yourHandler, []string{"admin"}))
|
||||
|
||||
http.ListenAndServe(":8080", router)
|
||||
```
|
||||
|
||||
The endpoint `/protected-with-admin` is now only accessible if you pass the header `Authorization: Bearer mytoken`,
|
||||
because the client with the token `mytoken` has the permission `admin`. Note that the following handler would also be
|
||||
accessible with that token:
|
||||
```go
|
||||
router.Handle("/protected", gate.Protect(yourHandler))
|
||||
```
|
||||
|
||||
To clarify, both clients and tokens have access to handlers that aren't protected with extra permissions, and
|
||||
essentially, tokens are registered as clients with no extra permissions in the background.
|
||||
|
||||
Creating a token like so:
|
||||
```go
|
||||
authorizationService := g8.NewAuthorizationService().WithToken("mytoken")
|
||||
```
|
||||
is the equivalent of creating the following client:
|
||||
```go
|
||||
authorizationService := g8.NewAuthorizationService().WithClient(g8.NewClient("mytoken"))
|
||||
```
|
||||
|
||||
|
||||
### With client provider
|
||||
A client provider's task is to retrieve a Client from an external source (e.g. a database) when provided with a token.
|
||||
You should use a client provider when you have a lot of tokens and it wouldn't make sense to register all of them using
|
||||
`AuthorizationService`'s `WithToken`/`WithTokens`/`WithClient`/`WithClients`.
|
||||
|
||||
Note that the provider is used as a fallback source. As such, if a token is explicitly registered using one of the 4
|
||||
aforementioned functions, the client provider will not be used.
|
||||
|
||||
```go
|
||||
clientProvider := g8.NewClientProvider(func(token string) *g8.Client {
|
||||
// We'll assume that the following function calls your database and returns a struct "User" that
|
||||
// has the user's token as well as the permissions granted to said user
|
||||
user := database.GetUserByToken(token)
|
||||
if user != nil {
|
||||
return g8.NewClient(user.Token).WithPermissions(user.Permissions)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
authorizationService := g8.NewAuthorizationService().WithClientProvider(clientProvider)
|
||||
gate := g8.New().WithAuthorizationService(authorizationService)
|
||||
```
|
||||
|
||||
You can also configure the client provider to cache the output of the function you provide to retrieve clients by token:
|
||||
```go
|
||||
clientProvider := g8.NewClientProvider(...).WithCache(ttl, maxSize)
|
||||
```
|
||||
|
||||
Since g8 leverages [TwiN/gocache](https://github.com/TwiN/gocache), you can also use gocache's
|
||||
constants for configuring the TTL and the maximum size:
|
||||
- Setting the TTL to `gocache.NoExpiration` (-1) will disable the TTL.
|
||||
- Setting the maximum size to `gocache.NoMaxSize` (0) will disable the maximum cache size
|
||||
|
||||
If you're using a TTL and have a lot of tokens (100k+), you may want to use `clientProvider.StartJanitor()` to allow
|
||||
the cache to passively delete expired entries. If you have to re-initialize the client provider after the janitor has
|
||||
been started, make sure to stop the janitor first (`clientProvider.StopJanitor()`). This is because the janitor runs on
|
||||
a separate goroutine, thus, if you were to re-create a client provider and re-assign it, the old client provider would
|
||||
still exist in memory with the old cache. I'm only specifying this for completeness, because for the overwhelming
|
||||
majority of people, the gate will be created on application start and never modified again until the application shuts
|
||||
down, in which case, you don't even need to worry about stopping the janitor.
|
||||
|
||||
To avoid any misunderstandings, using a client provider is not mandatory. If you only have a few tokens and you can load
|
||||
them on application start, you can just leverage `AuthorizationService`'s `WithToken`/`WithTokens`/`WithClient`/`WithClients`.
|
||||
|
||||
|
||||
## AuthorizationService
|
||||
As the previous examples may have hinted, there are several ways to create clients. The one thing they have
|
||||
in common is that they all go through AuthorizationService, which is in charge of both managing clients and determining
|
||||
whether a request should be blocked or allowed through.
|
||||
|
||||
| Function | Description |
|
||||
|:-------------------|:---------------------------------------------------------------------------------------------------------------------------------|
|
||||
| WithToken | Creates a single static client with no extra permissions |
|
||||
| WithTokens | Creates a slice of static clients with no extra permissions |
|
||||
| WithClient | Creates a single static client |
|
||||
| WithClients | Creates a slice of static clients |
|
||||
| WithClientProvider | Creates a client provider which will allow a fallback to a dynamic source (e.g. to a database) when a static client is not found |
|
||||
|
||||
Except for `WithClientProvider`, every functions listed above can be called more than once.
|
||||
As a result, you may safely perform actions like this:
|
||||
```go
|
||||
authorizationService := g8.NewAuthorizationService().
|
||||
WithToken("123").
|
||||
WithToken("456").
|
||||
WithClient(g8.NewClient("789").WithPermission("admin"))
|
||||
gate := g8.New().WithAuthorizationService(authorizationService)
|
||||
```
|
||||
|
||||
Be aware that g8.Client supports a list of permissions as well. You may call `WithPermission` several times, or call
|
||||
`WithPermissions` with a slice of permissions instead.
|
||||
|
||||
|
||||
### Permissions
|
||||
Unlike client permissions, handler permissions are requirements.
|
||||
|
||||
A client may have as many permissions as you want, but for said client to have access to a handler protected by
|
||||
permissions, the client must have all permissions defined by said handler in order to have access to it.
|
||||
|
||||
In other words, a client with the permissions `create`, `read`, `update` and `delete` would have access to all of these handlers:
|
||||
```go
|
||||
gate := g8.New().WithAuthorizationService(g8.NewAuthorizationService().WithClient(g8.NewClient("mytoken").WithPermissions([]string{"create", "read", "update", "delete"})))
|
||||
router := http.NewServeMux()
|
||||
router.Handle("/", gate.Protect(homeHandler)) // equivalent of gate.ProtectWithPermissions(homeHandler, []string{})
|
||||
router.Handle("/create", gate.ProtectWithPermissions(createHandler, []string{"create"}))
|
||||
router.Handle("/read", gate.ProtectWithPermissions(readHandler, []string{"read"}))
|
||||
router.Handle("/update", gate.ProtectWithPermissions(updateHandler, []string{"update"}))
|
||||
router.Handle("/delete", gate.ProtectWithPermissions(deleteHandler, []string{"delete"}))
|
||||
router.Handle("/crud", gate.ProtectWithPermissions(crudHandler, []string{"create", "read", "update", "delete"}))
|
||||
```
|
||||
But it would not have access to the following handler, because while `mytoken` has the `read` permission, it does not
|
||||
have the `backup` permission:
|
||||
```go
|
||||
router.Handle("/backup", gate.ProtectWithPermissions(&testHandler{}, []string{"read", "backup"}))
|
||||
```
|
||||
|
||||
## Rate limiting
|
||||
To add a rate limit of 100 requests per second:
|
||||
```
|
||||
gate := g8.New().WithRateLimit(100)
|
||||
```
|
||||
|
||||
## Special use cases
|
||||
### Protecting a handler using session cookie
|
||||
If you want to only allow authenticated users to access a handler, you can use a custom token extractor function
|
||||
combined with a client provider.
|
||||
|
||||
First, we'll create a function to extract the session ID from the session cookie. While a session ID does not
|
||||
theoretically refer to a token, g8 uses the term `token` as a blanket term to refer to any string that can be used to
|
||||
identify a client.
|
||||
```go
|
||||
customTokenExtractorFunc := func(request *http.Request) string {
|
||||
sessionCookie, err := request.Cookie("session")
|
||||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
return sessionCookie.Value
|
||||
}
|
||||
```
|
||||
|
||||
Next, we need to create a client provider that will validate our token, which refers to the session ID in this case.
|
||||
```go
|
||||
clientProvider := g8.NewClientProvider(func(token string) *g8.Client {
|
||||
// We'll assume that the following function calls your database and validates whether the session is valid.
|
||||
isSessionValid := database.CheckIfSessionIsValid(token)
|
||||
if !isSessionValid {
|
||||
return nil // Returning nil will cause the gate to return a 401 Unauthorized.
|
||||
}
|
||||
// You could also retrieve the user and their permissions if you wanted instead, but for this example,
|
||||
// all we care about is confirming whether the session is valid or not.
|
||||
return g8.NewClient(token)
|
||||
})
|
||||
```
|
||||
|
||||
Keep in mind that you can get really creative with the client provider above.
|
||||
For instance, you could refresh the session's expiration time, which will allow the user to stay logged in for
|
||||
as long as they're active.
|
||||
|
||||
You're also not limited to using something stateful like the example above. You could use a JWT and have your client
|
||||
provider validate said JWT.
|
||||
|
||||
Finally, we can create the authorization service and the gate:
|
||||
```go
|
||||
authorizationService := g8.NewAuthorizationService().WithClientProvider(clientProvider)
|
||||
gate := g8.New().WithAuthorizationService(authorizationService).WithCustomTokenExtractor(customTokenExtractorFunc)
|
||||
```
|
||||
|
||||
If you need to access the token (session ID in this case) from the protected handlers, you can retrieve it from the
|
||||
request context by using the key `g8.TokenContextKey`:
|
||||
```go
|
||||
http.Handle("/handle", gate.ProtectFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
sessionID, _ := r.Context().Value(g8.TokenContextKey).(string)
|
||||
// ...
|
||||
}))
|
||||
```
|
122
vendor/github.com/TwiN/g8/authorization.go
generated
vendored
Normal file
122
vendor/github.com/TwiN/g8/authorization.go
generated
vendored
Normal file
@ -0,0 +1,122 @@
|
||||
package g8
|
||||
|
||||
import (
|
||||
"sync"
|
||||
)
|
||||
|
||||
// AuthorizationService is the service that manages client/token registry and client fallback as well as the service
|
||||
// that determines whether a token meets the specific requirements to be authorized by a Gate or not.
|
||||
type AuthorizationService struct {
|
||||
clients map[string]*Client
|
||||
clientProvider *ClientProvider
|
||||
|
||||
mutex sync.RWMutex
|
||||
}
|
||||
|
||||
// NewAuthorizationService creates a new AuthorizationService
|
||||
func NewAuthorizationService() *AuthorizationService {
|
||||
return &AuthorizationService{
|
||||
clients: make(map[string]*Client),
|
||||
}
|
||||
}
|
||||
|
||||
// WithToken is used to specify a single token for which authorization will be granted
|
||||
//
|
||||
// The client that will be created from this token will have access to all handlers that are not protected with a
|
||||
// specific permission.
|
||||
//
|
||||
// In other words, if you were to do the following:
|
||||
// gate := g8.New().WithAuthorizationService(g8.NewAuthorizationService().WithToken("12345"))
|
||||
//
|
||||
// The following handler would be accessible with the token 12345:
|
||||
// router.Handle("/1st-handler", gate.Protect(yourHandler))
|
||||
//
|
||||
// But not this one would not be accessible with the token 12345:
|
||||
// router.Handle("/2nd-handler", gate.ProtectWithPermissions(yourOtherHandler, []string{"admin"}))
|
||||
//
|
||||
// Calling this function multiple times will add multiple clients, though you may want to use WithTokens instead
|
||||
// if you plan to add multiple clients
|
||||
//
|
||||
// If you wish to configure advanced permissions, consider using WithClient instead.
|
||||
//
|
||||
func (authorizationService *AuthorizationService) WithToken(token string) *AuthorizationService {
|
||||
authorizationService.mutex.Lock()
|
||||
authorizationService.clients[token] = NewClient(token)
|
||||
authorizationService.mutex.Unlock()
|
||||
return authorizationService
|
||||
}
|
||||
|
||||
// WithTokens is used to specify a slice of tokens for which authorization will be granted
|
||||
func (authorizationService *AuthorizationService) WithTokens(tokens []string) *AuthorizationService {
|
||||
authorizationService.mutex.Lock()
|
||||
for _, token := range tokens {
|
||||
authorizationService.clients[token] = NewClient(token)
|
||||
}
|
||||
authorizationService.mutex.Unlock()
|
||||
return authorizationService
|
||||
}
|
||||
|
||||
// WithClient is used to specify a single client for which authorization will be granted
|
||||
//
|
||||
// When compared to WithToken, the advantage of using this function is that you may specify the client's
|
||||
// permissions and thus, be a lot more granular with what endpoint a token has access to.
|
||||
//
|
||||
// In other words, if you were to do the following:
|
||||
// gate := g8.New().WithAuthorizationService(g8.NewAuthorizationService().WithClient(g8.NewClient("12345").WithPermission("mod")))
|
||||
//
|
||||
// The following handlers would be accessible with the token 12345:
|
||||
// router.Handle("/1st-handler", gate.ProtectWithPermissions(yourHandler, []string{"mod"}))
|
||||
// router.Handle("/2nd-handler", gate.Protect(yourOtherHandler))
|
||||
//
|
||||
// But not this one, because the user does not have the permission "admin":
|
||||
// router.Handle("/3rd-handler", gate.ProtectWithPermissions(yetAnotherHandler, []string{"admin"}))
|
||||
//
|
||||
// Calling this function multiple times will add multiple clients, though you may want to use WithClients instead
|
||||
// if you plan to add multiple clients
|
||||
func (authorizationService *AuthorizationService) WithClient(client *Client) *AuthorizationService {
|
||||
authorizationService.mutex.Lock()
|
||||
authorizationService.clients[client.Token] = client
|
||||
authorizationService.mutex.Unlock()
|
||||
return authorizationService
|
||||
}
|
||||
|
||||
// WithClients is used to specify a slice of clients for which authorization will be granted
|
||||
func (authorizationService *AuthorizationService) WithClients(clients []*Client) *AuthorizationService {
|
||||
authorizationService.mutex.Lock()
|
||||
for _, client := range clients {
|
||||
authorizationService.clients[client.Token] = client
|
||||
}
|
||||
authorizationService.mutex.Unlock()
|
||||
return authorizationService
|
||||
}
|
||||
|
||||
// WithClientProvider allows specifying a custom provider to fetch clients by token.
|
||||
//
|
||||
// For example, you can use it to fallback to making a call in your database when a request is made with a token that
|
||||
// hasn't been specified via WithToken, WithTokens, WithClient or WithClients.
|
||||
func (authorizationService *AuthorizationService) WithClientProvider(provider *ClientProvider) *AuthorizationService {
|
||||
authorizationService.clientProvider = provider
|
||||
return authorizationService
|
||||
}
|
||||
|
||||
// IsAuthorized checks whether a client with a given token exists and has the permissions required.
|
||||
//
|
||||
// If permissionsRequired is nil or empty and a client with the given token exists, said client will have access to all
|
||||
// handlers that are not protected by a given permission.
|
||||
func (authorizationService *AuthorizationService) IsAuthorized(token string, permissionsRequired []string) bool {
|
||||
if len(token) == 0 {
|
||||
return false
|
||||
}
|
||||
authorizationService.mutex.RLock()
|
||||
client, _ := authorizationService.clients[token]
|
||||
authorizationService.mutex.RUnlock()
|
||||
// If there's no clients with the given token directly stored in the AuthorizationService, fall back to the
|
||||
// client provider, if there's one configured.
|
||||
if client == nil && authorizationService.clientProvider != nil {
|
||||
client = authorizationService.clientProvider.GetClientByToken(token)
|
||||
}
|
||||
if client != nil {
|
||||
return client.HasPermissions(permissionsRequired)
|
||||
}
|
||||
return false
|
||||
}
|
58
vendor/github.com/TwiN/g8/client.go
generated
vendored
Normal file
58
vendor/github.com/TwiN/g8/client.go
generated
vendored
Normal file
@ -0,0 +1,58 @@
|
||||
package g8
|
||||
|
||||
// Client is a struct containing both a Token and a slice of extra Permissions that said token has.
|
||||
type Client struct {
|
||||
// Token is the value used to authenticate with the API.
|
||||
Token string
|
||||
|
||||
// Permissions is a slice of extra permissions that may be used for more granular access control.
|
||||
//
|
||||
// If you only wish to use Gate.Protect and Gate.ProtectFunc, you do not have to worry about this,
|
||||
// since they're only used by Gate.ProtectWithPermissions and Gate.ProtectFuncWithPermissions
|
||||
Permissions []string
|
||||
}
|
||||
|
||||
// NewClient creates a Client with a given token
|
||||
func NewClient(token string) *Client {
|
||||
return &Client{
|
||||
Token: token,
|
||||
}
|
||||
}
|
||||
|
||||
// NewClientWithPermissions creates a Client with a slice of permissions
|
||||
// Equivalent to using NewClient and WithPermissions
|
||||
func NewClientWithPermissions(token string, permissions []string) *Client {
|
||||
return NewClient(token).WithPermissions(permissions)
|
||||
}
|
||||
|
||||
// WithPermissions adds a slice of permissions to a client
|
||||
func (client *Client) WithPermissions(permissions []string) *Client {
|
||||
client.Permissions = append(client.Permissions, permissions...)
|
||||
return client
|
||||
}
|
||||
|
||||
// WithPermission adds a permission to a client
|
||||
func (client *Client) WithPermission(permission string) *Client {
|
||||
client.Permissions = append(client.Permissions, permission)
|
||||
return client
|
||||
}
|
||||
|
||||
// HasPermission checks whether a client has a given permission
|
||||
func (client Client) HasPermission(permissionRequired string) bool {
|
||||
for _, permission := range client.Permissions {
|
||||
if permissionRequired == permission {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// HasPermissions checks whether a client has the all permissions passed
|
||||
func (client Client) HasPermissions(permissionsRequired []string) bool {
|
||||
for _, permissionRequired := range permissionsRequired {
|
||||
if !client.HasPermission(permissionRequired) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
140
vendor/github.com/TwiN/g8/clientprovider.go
generated
vendored
Normal file
140
vendor/github.com/TwiN/g8/clientprovider.go
generated
vendored
Normal file
@ -0,0 +1,140 @@
|
||||
package g8
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"time"
|
||||
|
||||
"github.com/TwiN/gocache/v2"
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrNoExpiration is the error returned by ClientProvider.StartCacheJanitor if there was an attempt to start the
|
||||
// janitor despite no expiration being configured.
|
||||
// To clarify, this is because the cache janitor is only useful when an expiration is set.
|
||||
ErrNoExpiration = errors.New("no point starting the janitor if the TTL is set to not expire")
|
||||
|
||||
// ErrCacheNotInitialized is the error returned by ClientProvider.StartCacheJanitor if there was an attempt to start
|
||||
// the janitor despite the cache not having been initialized using ClientProvider.WithCache
|
||||
ErrCacheNotInitialized = errors.New("cannot start janitor because cache is not configured")
|
||||
)
|
||||
|
||||
// ClientProvider has the task of retrieving a Client from an external source (e.g. a database) when provided with a
|
||||
// token. It should be used when you have a lot of tokens, and it wouldn't make sense to register all of them using
|
||||
// AuthorizationService's WithToken, WithTokens, WithClient or WithClients.
|
||||
//
|
||||
// Note that the provider is used as a fallback source. As such, if a token is explicitly registered using one of the 4
|
||||
// aforementioned functions, the client provider will not be used by the AuthorizationService when a request is made
|
||||
// with said token. It will, however, be called upon if a token that is not explicitly registered in
|
||||
// AuthorizationService is sent alongside a request going through the Gate.
|
||||
//
|
||||
// clientProvider := g8.NewClientProvider(func(token string) *g8.Client {
|
||||
// // We'll assume that the following function calls your database and returns a struct "User" that
|
||||
// // has the user's token as well as the permissions granted to said user
|
||||
// user := database.GetUserByToken(token)
|
||||
// if user != nil {
|
||||
// return g8.NewClient(user.Token).WithPermissions(user.Permissions)
|
||||
// }
|
||||
// return nil
|
||||
// })
|
||||
// gate := g8.New().WithAuthorizationService(g8.NewAuthorizationService().WithClientProvider(clientProvider))
|
||||
//
|
||||
type ClientProvider struct {
|
||||
getClientByTokenFunc func(token string) *Client
|
||||
|
||||
cache *gocache.Cache
|
||||
ttl time.Duration
|
||||
}
|
||||
|
||||
// NewClientProvider creates a ClientProvider
|
||||
// The parameter that must be passed is a function that the provider will use to retrieve a client by a given token
|
||||
//
|
||||
// Example:
|
||||
// clientProvider := g8.NewClientProvider(func(token string) *g8.Client {
|
||||
// // We'll assume that the following function calls your database and returns a struct "User" that
|
||||
// // has the user's token as well as the permissions granted to said user
|
||||
// user := database.GetUserByToken(token)
|
||||
// if user == nil {
|
||||
// return nil
|
||||
// }
|
||||
// return g8.NewClient(user.Token).WithPermissions(user.Permissions)
|
||||
// })
|
||||
// gate := g8.New().WithAuthorizationService(g8.NewAuthorizationService().WithClientProvider(clientProvider))
|
||||
//
|
||||
func NewClientProvider(getClientByTokenFunc func(token string) *Client) *ClientProvider {
|
||||
return &ClientProvider{
|
||||
getClientByTokenFunc: getClientByTokenFunc,
|
||||
}
|
||||
}
|
||||
|
||||
// WithCache adds cache options to the ClientProvider.
|
||||
//
|
||||
// ttl is the time until the cache entry will expire. A TTL of gocache.NoExpiration (-1) means no expiration
|
||||
// maxSize is the maximum amount of entries that can be in the cache at any given time.
|
||||
// If a value of gocache.NoMaxSize (0) or less is provided for maxSize, there will be no maximum size.
|
||||
//
|
||||
// Example:
|
||||
// clientProvider := g8.NewClientProvider(func(token string) *g8.Client {
|
||||
// // We'll assume that the following function calls your database and returns a struct "User" that
|
||||
// // has the user's token as well as the permissions granted to said user
|
||||
// user := database.GetUserByToken(token)
|
||||
// if user != nil {
|
||||
// return g8.NewClient(user.Token).WithPermissions(user.Permissions)
|
||||
// }
|
||||
// return nil
|
||||
// })
|
||||
// gate := g8.New().WithAuthorizationService(g8.NewAuthorizationService().WithClientProvider(clientProvider.WithCache(time.Hour, 70000)))
|
||||
//
|
||||
func (provider *ClientProvider) WithCache(ttl time.Duration, maxSize int) *ClientProvider {
|
||||
provider.cache = gocache.NewCache().WithEvictionPolicy(gocache.LeastRecentlyUsed).WithMaxSize(maxSize)
|
||||
provider.ttl = ttl
|
||||
return provider
|
||||
}
|
||||
|
||||
// StartCacheJanitor starts the cache janitor, which passively deletes expired cache entries in the background.
|
||||
//
|
||||
// Not really necessary unless you have a lot of clients (100000+).
|
||||
//
|
||||
// Even without the janitor, active eviction will still happen (i.e. when GetClientByToken is called, but the cache
|
||||
// entry for the given token has expired, the cache entry will be automatically deleted and re-fetched from the
|
||||
// user-defined getClientByTokenFunc)
|
||||
func (provider *ClientProvider) StartCacheJanitor() error {
|
||||
if provider.cache == nil {
|
||||
// Can't start the cache janitor if there's no cache
|
||||
return ErrCacheNotInitialized
|
||||
}
|
||||
if provider.ttl != gocache.NoExpiration {
|
||||
return provider.cache.StartJanitor()
|
||||
}
|
||||
return ErrNoExpiration
|
||||
}
|
||||
|
||||
// StopCacheJanitor stops the cache janitor
|
||||
//
|
||||
// Not required unless your application initializes multiple providers over the course of its lifecycle.
|
||||
// In English, that means if you initialize a ClientProvider only once on application start and it stays up
|
||||
// until your application shuts down, you don't need to call this function.
|
||||
func (provider *ClientProvider) StopCacheJanitor() {
|
||||
if provider.cache != nil {
|
||||
provider.cache.StopJanitor()
|
||||
}
|
||||
}
|
||||
|
||||
// GetClientByToken retrieves a client by its token through the provided getClientByTokenFunc.
|
||||
func (provider *ClientProvider) GetClientByToken(token string) *Client {
|
||||
if provider.cache == nil {
|
||||
return provider.getClientByTokenFunc(token)
|
||||
}
|
||||
if cachedClient, exists := provider.cache.Get(token); exists {
|
||||
if cachedClient == nil {
|
||||
return nil
|
||||
}
|
||||
// Safely typecast the client.
|
||||
// Regardless of whether the typecast is successful or not, we return client since it'll be either client or
|
||||
// nil. Technically, it should never be nil, but it's better to be safe than sorry.
|
||||
client, _ := cachedClient.(*Client)
|
||||
return client
|
||||
}
|
||||
client := provider.getClientByTokenFunc(token)
|
||||
provider.cache.SetWithTTL(token, client, provider.ttl)
|
||||
return client
|
||||
}
|
212
vendor/github.com/TwiN/g8/gate.go
generated
vendored
Normal file
212
vendor/github.com/TwiN/g8/gate.go
generated
vendored
Normal file
@ -0,0 +1,212 @@
|
||||
package g8
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/http"
|
||||
"strings"
|
||||
)
|
||||
|
||||
const (
|
||||
// AuthorizationHeader is the header in which g8 looks for the authorization bearer token
|
||||
AuthorizationHeader = "Authorization"
|
||||
|
||||
// DefaultUnauthorizedResponseBody is the default response body returned if a request was sent with a missing or invalid token
|
||||
DefaultUnauthorizedResponseBody = "token is missing or invalid"
|
||||
|
||||
// DefaultTooManyRequestsResponseBody is the default response body returned if a request exceeded the allowed rate limit
|
||||
DefaultTooManyRequestsResponseBody = "too many requests"
|
||||
|
||||
// TokenContextKey is the key used to store the token in the context.
|
||||
TokenContextKey = "g8.token"
|
||||
)
|
||||
|
||||
// Gate is lock to the front door of your API, letting only those you allow through.
|
||||
type Gate struct {
|
||||
authorizationService *AuthorizationService
|
||||
unauthorizedResponseBody []byte
|
||||
|
||||
customTokenExtractorFunc func(request *http.Request) string
|
||||
|
||||
rateLimiter *RateLimiter
|
||||
tooManyRequestsResponseBody []byte
|
||||
}
|
||||
|
||||
// Deprecated: use New instead.
|
||||
func NewGate(authorizationService *AuthorizationService) *Gate {
|
||||
return &Gate{
|
||||
authorizationService: authorizationService,
|
||||
unauthorizedResponseBody: []byte(DefaultUnauthorizedResponseBody),
|
||||
tooManyRequestsResponseBody: []byte(DefaultTooManyRequestsResponseBody),
|
||||
}
|
||||
}
|
||||
|
||||
// New creates a new Gate.
|
||||
func New() *Gate {
|
||||
return &Gate{
|
||||
unauthorizedResponseBody: []byte(DefaultUnauthorizedResponseBody),
|
||||
tooManyRequestsResponseBody: []byte(DefaultTooManyRequestsResponseBody),
|
||||
}
|
||||
}
|
||||
|
||||
// WithAuthorizationService sets the authorization service to use.
|
||||
//
|
||||
// If there is no authorization service, Gate will not enforce authorization.
|
||||
func (gate *Gate) WithAuthorizationService(authorizationService *AuthorizationService) *Gate {
|
||||
gate.authorizationService = authorizationService
|
||||
return gate
|
||||
}
|
||||
|
||||
// WithCustomUnauthorizedResponseBody sets a custom response body when Gate determines that a request must be blocked
|
||||
func (gate *Gate) WithCustomUnauthorizedResponseBody(unauthorizedResponseBody []byte) *Gate {
|
||||
gate.unauthorizedResponseBody = unauthorizedResponseBody
|
||||
return gate
|
||||
}
|
||||
|
||||
// WithCustomTokenExtractor allows the specification of a custom function to extract a token from a request.
|
||||
// If a custom token extractor is not specified, the token will be extracted from the Authorization header.
|
||||
//
|
||||
// For instance, if you're using a session cookie, you can extract the token from the cookie like so:
|
||||
// authorizationService := g8.NewAuthorizationService()
|
||||
// customTokenExtractorFunc := func(request *http.Request) string {
|
||||
// sessionCookie, err := request.Cookie("session")
|
||||
// if err != nil {
|
||||
// return ""
|
||||
// }
|
||||
// return sessionCookie.Value
|
||||
// }
|
||||
// gate := g8.New().WithAuthorizationService(authorizationService).WithCustomTokenExtractor(customTokenExtractorFunc)
|
||||
//
|
||||
// You would normally use this with a client provider that matches whatever need you have.
|
||||
// For example, if you're using a session cookie, your client provider would retrieve the user from the session ID
|
||||
// extracted by this custom token extractor.
|
||||
//
|
||||
// Note that for the sake of convenience, the token extracted from the request is passed the protected handlers request
|
||||
// context under the key TokenContextKey. This is especially useful if the token is in fact a session ID.
|
||||
func (gate *Gate) WithCustomTokenExtractor(customTokenExtractorFunc func(request *http.Request) string) *Gate {
|
||||
gate.customTokenExtractorFunc = customTokenExtractorFunc
|
||||
return gate
|
||||
}
|
||||
|
||||
// WithRateLimit adds rate limiting to the Gate
|
||||
//
|
||||
// If you just want to use a gate for rate limiting purposes:
|
||||
// gate := g8.New().WithRateLimit(50)
|
||||
//
|
||||
func (gate *Gate) WithRateLimit(maximumRequestsPerSecond int) *Gate {
|
||||
gate.rateLimiter = NewRateLimiter(maximumRequestsPerSecond)
|
||||
return gate
|
||||
}
|
||||
|
||||
// Protect secures a handler, requiring requests going through to have a valid Authorization Bearer token.
|
||||
// Unlike ProtectWithPermissions, Protect will allow access to any registered tokens, regardless of their permissions
|
||||
// or lack thereof.
|
||||
//
|
||||
// Example:
|
||||
// gate := g8.New().WithAuthorizationService(g8.NewAuthorizationService().WithToken("token"))
|
||||
// router := http.NewServeMux()
|
||||
// // Without protection
|
||||
// router.Handle("/handle", yourHandler)
|
||||
// // With protection
|
||||
// router.Handle("/handle", gate.Protect(yourHandler))
|
||||
//
|
||||
// The token extracted from the request is passed to the handlerFunc request context under the key TokenContextKey
|
||||
func (gate *Gate) Protect(handler http.Handler) http.Handler {
|
||||
return gate.ProtectWithPermissions(handler, nil)
|
||||
}
|
||||
|
||||
// ProtectWithPermissions secures a handler, requiring requests going through to have a valid Authorization Bearer token
|
||||
// as well as a slice of permissions that must be met.
|
||||
//
|
||||
// Example:
|
||||
// gate := g8.New().WithAuthorizationService(g8.NewAuthorizationService().WithClient(g8.NewClient("token").WithPermission("admin")))
|
||||
// router := http.NewServeMux()
|
||||
// // Without protection
|
||||
// router.Handle("/handle", yourHandler)
|
||||
// // With protection
|
||||
// router.Handle("/handle", gate.ProtectWithPermissions(yourHandler, []string{"admin"}))
|
||||
//
|
||||
// The token extracted from the request is passed to the handlerFunc request context under the key TokenContextKey
|
||||
func (gate *Gate) ProtectWithPermissions(handler http.Handler, permissions []string) http.Handler {
|
||||
return gate.ProtectFuncWithPermissions(func(writer http.ResponseWriter, request *http.Request) {
|
||||
handler.ServeHTTP(writer, request)
|
||||
}, permissions)
|
||||
}
|
||||
|
||||
// ProtectWithPermission does the same thing as ProtectWithPermissions, but for a single permission instead of a
|
||||
// slice of permissions
|
||||
//
|
||||
// See ProtectWithPermissions for further documentation
|
||||
func (gate *Gate) ProtectWithPermission(handler http.Handler, permission string) http.Handler {
|
||||
return gate.ProtectFuncWithPermissions(func(writer http.ResponseWriter, request *http.Request) {
|
||||
handler.ServeHTTP(writer, request)
|
||||
}, []string{permission})
|
||||
}
|
||||
|
||||
// ProtectFunc secures a handlerFunc, requiring requests going through to have a valid Authorization Bearer token.
|
||||
// Unlike ProtectFuncWithPermissions, ProtectFunc will allow access to any registered tokens, regardless of their
|
||||
// permissions or lack thereof.
|
||||
//
|
||||
// Example:
|
||||
// gate := g8.New().WithAuthorizationService(g8.NewAuthorizationService().WithToken("token"))
|
||||
// router := http.NewServeMux()
|
||||
// // Without protection
|
||||
// router.HandleFunc("/handle", yourHandlerFunc)
|
||||
// // With protection
|
||||
// router.HandleFunc("/handle", gate.ProtectFunc(yourHandlerFunc))
|
||||
//
|
||||
// The token extracted from the request is passed to the handlerFunc request context under the key TokenContextKey
|
||||
func (gate *Gate) ProtectFunc(handlerFunc http.HandlerFunc) http.HandlerFunc {
|
||||
return gate.ProtectFuncWithPermissions(handlerFunc, nil)
|
||||
}
|
||||
|
||||
// ProtectFuncWithPermissions secures a handler, requiring requests going through to have a valid Authorization Bearer
|
||||
// token as well as a slice of permissions that must be met.
|
||||
//
|
||||
// Example:
|
||||
// gate := g8.New().WithAuthorizationService(g8.NewAuthorizationService().WithClient(g8.NewClient("token").WithPermission("admin")))
|
||||
// router := http.NewServeMux()
|
||||
// // Without protection
|
||||
// router.HandleFunc("/handle", yourHandlerFunc)
|
||||
// // With protection
|
||||
// router.HandleFunc("/handle", gate.ProtectFuncWithPermissions(yourHandlerFunc, []string{"admin"}))
|
||||
//
|
||||
// The token extracted from the request is passed to the handlerFunc request context under the key TokenContextKey
|
||||
func (gate *Gate) ProtectFuncWithPermissions(handlerFunc http.HandlerFunc, permissions []string) http.HandlerFunc {
|
||||
return func(writer http.ResponseWriter, request *http.Request) {
|
||||
if gate.rateLimiter != nil {
|
||||
if !gate.rateLimiter.Try() {
|
||||
writer.WriteHeader(http.StatusTooManyRequests)
|
||||
_, _ = writer.Write(gate.tooManyRequestsResponseBody)
|
||||
return
|
||||
}
|
||||
}
|
||||
if gate.authorizationService != nil {
|
||||
var token string
|
||||
if gate.customTokenExtractorFunc != nil {
|
||||
token = gate.customTokenExtractorFunc(request)
|
||||
} else {
|
||||
token = extractTokenFromRequest(request)
|
||||
}
|
||||
if !gate.authorizationService.IsAuthorized(token, permissions) {
|
||||
writer.WriteHeader(http.StatusUnauthorized)
|
||||
_, _ = writer.Write(gate.unauthorizedResponseBody)
|
||||
return
|
||||
}
|
||||
request = request.WithContext(context.WithValue(request.Context(), TokenContextKey, token))
|
||||
}
|
||||
handlerFunc(writer, request)
|
||||
}
|
||||
}
|
||||
|
||||
// ProtectFuncWithPermission does the same thing as ProtectFuncWithPermissions, but for a single permission instead of a
|
||||
// slice of permissions
|
||||
//
|
||||
// See ProtectFuncWithPermissions for further documentation
|
||||
func (gate *Gate) ProtectFuncWithPermission(handlerFunc http.HandlerFunc, permission string) http.HandlerFunc {
|
||||
return gate.ProtectFuncWithPermissions(handlerFunc, []string{permission})
|
||||
}
|
||||
|
||||
// extractTokenFromRequest extracts the bearer token from the AuthorizationHeader
|
||||
func extractTokenFromRequest(request *http.Request) string {
|
||||
return strings.TrimPrefix(request.Header.Get(AuthorizationHeader), "Bearer ")
|
||||
}
|
42
vendor/github.com/TwiN/g8/ratelimiter.go
generated
vendored
Normal file
42
vendor/github.com/TwiN/g8/ratelimiter.go
generated
vendored
Normal file
@ -0,0 +1,42 @@
|
||||
package g8
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
// RateLimiter is a fixed rate limiter
|
||||
type RateLimiter struct {
|
||||
maximumExecutionsPerSecond int
|
||||
executionsLeftInWindow int
|
||||
windowStartTime time.Time
|
||||
mutex sync.Mutex
|
||||
}
|
||||
|
||||
// NewRateLimiter creates a RateLimiter
|
||||
func NewRateLimiter(maximumExecutionsPerSecond int) *RateLimiter {
|
||||
return &RateLimiter{
|
||||
windowStartTime: time.Now(),
|
||||
executionsLeftInWindow: maximumExecutionsPerSecond,
|
||||
maximumExecutionsPerSecond: maximumExecutionsPerSecond,
|
||||
}
|
||||
}
|
||||
|
||||
// Try updates the number of executions if the rate limit quota hasn't been reached and returns whether the
|
||||
// attempt was successful or not.
|
||||
//
|
||||
// Returns false if the execution was not successful (rate limit quota has been reached)
|
||||
// Returns true if the execution was successful (rate limit quota has not been reached)
|
||||
func (r *RateLimiter) Try() bool {
|
||||
r.mutex.Lock()
|
||||
defer r.mutex.Unlock()
|
||||
if time.Now().Add(-time.Second).After(r.windowStartTime) {
|
||||
r.windowStartTime = time.Now()
|
||||
r.executionsLeftInWindow = r.maximumExecutionsPerSecond
|
||||
}
|
||||
if r.executionsLeftInWindow == 0 {
|
||||
return false
|
||||
}
|
||||
r.executionsLeftInWindow--
|
||||
return true
|
||||
}
|
1
vendor/github.com/TwiN/gocache/v2/.gitattributes
generated
vendored
Normal file
1
vendor/github.com/TwiN/gocache/v2/.gitattributes
generated
vendored
Normal file
@ -0,0 +1 @@
|
||||
* text=lf
|
1
vendor/github.com/TwiN/gocache/v2/.gitignore
generated
vendored
Normal file
1
vendor/github.com/TwiN/gocache/v2/.gitignore
generated
vendored
Normal file
@ -0,0 +1 @@
|
||||
.idea
|
9
vendor/github.com/TwiN/gocache/v2/LICENSE.md
generated
vendored
Normal file
9
vendor/github.com/TwiN/gocache/v2/LICENSE.md
generated
vendored
Normal file
@ -0,0 +1,9 @@
|
||||
MIT License
|
||||
|
||||
Copyright (c) 2021 TwiN
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
370
vendor/github.com/TwiN/gocache/v2/README.md
generated
vendored
Normal file
370
vendor/github.com/TwiN/gocache/v2/README.md
generated
vendored
Normal file
@ -0,0 +1,370 @@
|
||||
# gocache
|
||||
![build](https://github.com/TwiN/gocache/workflows/build/badge.svg?branch=master)
|
||||
[![Go Report Card](https://goreportcard.com/badge/github.com/TwiN/gocache)](https://goreportcard.com/report/github.com/TwiN/gocache)
|
||||
[![codecov](https://codecov.io/gh/TwiN/gocache/branch/master/graph/badge.svg)](https://codecov.io/gh/TwiN/gocache)
|
||||
[![Go version](https://img.shields.io/github/go-mod/go-version/TwiN/gocache.svg)](https://github.com/TwiN/gocache)
|
||||
[![Go Reference](https://pkg.go.dev/badge/github.com/TwiN/gocache.svg)](https://pkg.go.dev/github.com/TwiN/gocache)
|
||||
[![Follow TwiN](https://img.shields.io/github/followers/TwiN?label=Follow&style=social)](https://github.com/TwiN)
|
||||
|
||||
gocache is an easy-to-use, high-performance, lightweight and thread-safe (goroutine-safe) in-memory key-value cache
|
||||
with support for LRU and FIFO eviction policies as well as expiration, bulk operations and even retrieval of keys by pattern.
|
||||
|
||||
|
||||
## Table of Contents
|
||||
|
||||
- [Features](#features)
|
||||
- [Usage](#usage)
|
||||
- [Initializing the cache](#initializing-the-cache)
|
||||
- [Functions](#functions)
|
||||
- [Examples](#examples)
|
||||
- [Creating or updating an entry](#creating-or-updating-an-entry)
|
||||
- [Getting an entry](#getting-an-entry)
|
||||
- [Deleting an entry](#deleting-an-entry)
|
||||
- [Complex example](#complex-example)
|
||||
- [Persistence](#persistence)
|
||||
- [Eviction](#eviction)
|
||||
- [MaxSize](#maxsize)
|
||||
- [MaxMemoryUsage](#maxmemoryusage)
|
||||
- [Expiration](#expiration)
|
||||
- [Performance](#performance)
|
||||
- [Summary](#summary)
|
||||
- [Results](#results)
|
||||
- [FAQ](#faq)
|
||||
- [How can I persist the data on application termination?](#how-can-i-persist-the-data-on-application-termination)
|
||||
|
||||
|
||||
## Features
|
||||
gocache supports the following cache eviction policies:
|
||||
- First in first out (FIFO)
|
||||
- Least recently used (LRU)
|
||||
|
||||
It also supports cache entry TTL, which is both active and passive. Active expiration means that if you attempt
|
||||
to retrieve a cache key that has already expired, it will delete it on the spot and the behavior will be as if
|
||||
the cache key didn't exist. As for passive expiration, there's a background task that will take care of deleting
|
||||
expired keys.
|
||||
|
||||
It also includes what you'd expect from a cache, like GET/SET, bulk operations and get by pattern.
|
||||
|
||||
|
||||
## Usage
|
||||
```
|
||||
go get -u github.com/TwiN/gocache/v2
|
||||
```
|
||||
|
||||
|
||||
### Initializing the cache
|
||||
```go
|
||||
cache := gocache.NewCache().WithMaxSize(1000).WithEvictionPolicy(gocache.LeastRecentlyUsed)
|
||||
```
|
||||
|
||||
If you're planning on using expiration (`SetWithTTL` or `Expire`) and you want expired entries to be automatically deleted
|
||||
in the background, make sure to start the janitor when you instantiate the cache:
|
||||
|
||||
```go
|
||||
cache.StartJanitor()
|
||||
```
|
||||
|
||||
### Functions
|
||||
| Function | Description |
|
||||
| --------------------------------- | ----------- |
|
||||
| WithMaxSize | Sets the max size of the cache. `gocache.NoMaxSize` means there is no limit. If not set, the default max size is `gocache.DefaultMaxSize`.
|
||||
| WithMaxMemoryUsage | Sets the max memory usage of the cache. `gocache.NoMaxMemoryUsage` means there is no limit. The default behavior is to not evict based on memory usage.
|
||||
| WithEvictionPolicy | Sets the eviction algorithm to be used when the cache reaches the max size. If not set, the default eviction policy is `gocache.FirstInFirstOut` (FIFO).
|
||||
| WithForceNilInterfaceOnNilPointer | Configures whether values with a nil pointer passed to write functions should be forcefully set to nil. Defaults to true.
|
||||
| StartJanitor | Starts the janitor, which is in charge of deleting expired cache entries in the background.
|
||||
| StopJanitor | Stops the janitor.
|
||||
| Set | Same as `SetWithTTL`, but with no expiration (`gocache.NoExpiration`)
|
||||
| SetAll | Same as `Set`, but in bulk
|
||||
| SetWithTTL | Creates or updates a cache entry with the given key, value and expiration time. If the max size after the aforementioned operation is above the configured max size, the tail will be evicted. Depending on the eviction policy, the tail is defined as the oldest
|
||||
| Get | Gets a cache entry by its key.
|
||||
| GetByKeys | Gets a map of entries by their keys. The resulting map will contain all keys, even if some of the keys in the slice passed as parameter were not present in the cache.
|
||||
| GetAll | Gets all cache entries.
|
||||
| GetKeysByPattern | Retrieves a slice of keys that matches a given pattern.
|
||||
| Delete | Removes a key from the cache.
|
||||
| DeleteAll | Removes multiple keys from the cache.
|
||||
| Count | Gets the size of the cache. This includes cache keys which may have already expired, but have not been removed yet.
|
||||
| Clear | Wipes the cache.
|
||||
| TTL | Gets the time until a cache key expires.
|
||||
| Expire | Sets the expiration time of an existing cache key.
|
||||
|
||||
For further documentation, please refer to [Go Reference](https://pkg.go.dev/github.com/TwiN/gocache)
|
||||
|
||||
|
||||
### Examples
|
||||
|
||||
#### Creating or updating an entry
|
||||
```go
|
||||
cache.Set("key", "value")
|
||||
cache.Set("key", 1)
|
||||
cache.Set("key", struct{ Text string }{Test: "value"})
|
||||
cache.SetWithTTL("key", []byte("value"), 24*time.Hour)
|
||||
```
|
||||
|
||||
#### Getting an entry
|
||||
```go
|
||||
value, exists := cache.Get("key")
|
||||
```
|
||||
You can also get multiple entries by using `cache.GetByKeys([]string{"key1", "key2"})`
|
||||
|
||||
#### Deleting an entry
|
||||
```go
|
||||
cache.Delete("key")
|
||||
```
|
||||
You can also delete multiple entries by using `cache.DeleteAll([]string{"key1", "key2"})`
|
||||
|
||||
#### Complex example
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/TwiN/gocache/v2"
|
||||
)
|
||||
|
||||
func main() {
|
||||
cache := gocache.NewCache().WithEvictionPolicy(gocache.LeastRecentlyUsed).WithMaxSize(10000)
|
||||
cache.StartJanitor() // Passively manages expired entries
|
||||
defer cache.StopJanitor()
|
||||
|
||||
cache.Set("key", "value")
|
||||
cache.SetWithTTL("key-with-ttl", "value", 60*time.Minute)
|
||||
cache.SetAll(map[string]interface{}{"k1": "v1", "k2": "v2", "k3": "v3"})
|
||||
|
||||
fmt.Println("[Count] Cache size:", cache.Count())
|
||||
|
||||
value, exists := cache.Get("key")
|
||||
fmt.Printf("[Get] key=key; value=%s; exists=%v\n", value, exists)
|
||||
for key, value := range cache.GetByKeys([]string{"k1", "k2", "k3"}) {
|
||||
fmt.Printf("[GetByKeys] key=%s; value=%s\n", key, value)
|
||||
}
|
||||
for _, key := range cache.GetKeysByPattern("key*", 0) {
|
||||
fmt.Printf("[GetKeysByPattern] pattern=key*; key=%s\n", key)
|
||||
}
|
||||
|
||||
cache.Expire("key", time.Hour)
|
||||
time.Sleep(500*time.Millisecond)
|
||||
timeUntilExpiration, _ := cache.TTL("key")
|
||||
fmt.Println("[TTL] Number of minutes before 'key' expires:", int(timeUntilExpiration.Seconds()))
|
||||
|
||||
cache.Delete("key")
|
||||
cache.DeleteAll([]string{"k1", "k2", "k3"})
|
||||
|
||||
cache.Clear()
|
||||
fmt.Println("[Count] Cache size after clearing the cache:", cache.Count())
|
||||
}
|
||||
```
|
||||
|
||||
<details>
|
||||
<summary>Output</summary>
|
||||
|
||||
```
|
||||
[Count] Cache size: 5
|
||||
[Get] key=key; value=value; exists=true
|
||||
[GetByKeys] key=k1; value=v1
|
||||
[GetByKeys] key=k2; value=v2
|
||||
[GetByKeys] key=k3; value=v3
|
||||
[GetKeysByPattern] pattern=key*; key=key-with-ttl
|
||||
[GetKeysByPattern] pattern=key*; key=key
|
||||
[TTL] Number of minutes before 'key' expires: 3599
|
||||
[Count] Cache size after clearing the cache: 0
|
||||
```
|
||||
</details>
|
||||
|
||||
|
||||
## Persistence
|
||||
Prior to v2, gocache supported persistence out of the box.
|
||||
|
||||
After some thinking, I decided that persistence added too many dependencies, and given than this is a cache library
|
||||
and most people wouldn't be interested in persistence, I decided to get rid of it.
|
||||
|
||||
That being said, you can use the `GetAll` and `SetAll` methods of `gocache.Cache` to implement persistence yourself.
|
||||
|
||||
|
||||
## Eviction
|
||||
### MaxSize
|
||||
Eviction by MaxSize is the default behavior, and is also the most efficient.
|
||||
|
||||
The code below will create a cache that has a maximum size of 1000:
|
||||
```go
|
||||
cache := gocache.NewCache().WithMaxSize(1000)
|
||||
```
|
||||
This means that whenever an operation causes the total size of the cache to go above 1000, the tail will be evicted.
|
||||
|
||||
### MaxMemoryUsage
|
||||
Eviction by MaxMemoryUsage is **disabled by default**, and is in alpha.
|
||||
|
||||
The code below will create a cache that has a maximum memory usage of 50MB:
|
||||
```go
|
||||
cache := gocache.NewCache().WithMaxSize(0).WithMaxMemoryUsage(50*gocache.Megabyte)
|
||||
```
|
||||
This means that whenever an operation causes the total memory usage of the cache to go above 50MB, one or more tails
|
||||
will be evicted.
|
||||
|
||||
Unlike evictions caused by reaching the MaxSize, evictions triggered by MaxMemoryUsage may lead to multiple entries
|
||||
being evicted in a row. The reason for this is that if, for instance, you had 100 entries of 0.1MB each and you suddenly added
|
||||
a single entry of 10MB, 100 entries would need to be evicted to make enough space for that new big entry.
|
||||
|
||||
It's very important to keep in mind that eviction by MaxMemoryUsage is approximate.
|
||||
|
||||
**The only memory taken into consideration is the size of the cache, not the size of the entire application.**
|
||||
If you pass along 100MB worth of data in a matter of seconds, even though the cache's memory usage will remain
|
||||
under 50MB (or whatever you configure the MaxMemoryUsage to), the memory footprint generated by that 100MB will
|
||||
still exist until the next GC cycle.
|
||||
|
||||
As previously mentioned, this is a work in progress, and here's a list of the things you should keep in mind:
|
||||
- The memory usage of structs are a gross estimation and may not reflect the actual memory usage.
|
||||
- Native types (string, int, bool, []byte, etc.) are the most accurate for calculating the memory usage.
|
||||
- Adding an entry bigger than the configured MaxMemoryUsage will work, but it will evict all other entries.
|
||||
|
||||
|
||||
## Expiration
|
||||
There are two ways that the deletion of expired keys can take place:
|
||||
- Active
|
||||
- Passive
|
||||
|
||||
**Active deletion of expired keys** happens when an attempt is made to access the value of a cache entry that expired.
|
||||
`Get`, `GetByKeys` and `GetAll` are the only functions that can trigger active deletion of expired keys.
|
||||
|
||||
**Passive deletion of expired keys** runs in the background and is managed by the janitor.
|
||||
If you do not start the janitor, there will be no passive deletion of expired keys.
|
||||
|
||||
|
||||
## Performance
|
||||
### Summary
|
||||
- **Set**: Both map and gocache have the same performance.
|
||||
- **Get**: Map is faster than gocache.
|
||||
|
||||
This is because gocache keeps track of the head and the tail for eviction and expiration/TTL.
|
||||
|
||||
Ultimately, the difference is negligible.
|
||||
|
||||
We could add a way to disable eviction or disable expiration altogether just to match the map's performance,
|
||||
but if you're looking into using a library like gocache, odds are, you want more than just a map.
|
||||
|
||||
|
||||
### Results
|
||||
| key | value |
|
||||
|:------ |:-------- |
|
||||
| goos | windows |
|
||||
| goarch | amd64 |
|
||||
| cpu | i7-9700K |
|
||||
| mem | 32G DDR4 |
|
||||
|
||||
```
|
||||
// Normal map
|
||||
BenchmarkMap_Get
|
||||
BenchmarkMap_Get-8 46087372 26.7 ns/op
|
||||
BenchmarkMap_Set
|
||||
BenchmarkMap_Set/small_value-8 3841911 389 ns/op
|
||||
BenchmarkMap_Set/medium_value-8 3887074 391 ns/op
|
||||
BenchmarkMap_Set/large_value-8 3921956 393 ns/op
|
||||
// Gocache
|
||||
BenchmarkCache_Get
|
||||
BenchmarkCache_Get/FirstInFirstOut-8 27273036 46.4 ns/op
|
||||
BenchmarkCache_Get/LeastRecentlyUsed-8 26648248 46.3 ns/op
|
||||
BenchmarkCache_Set
|
||||
BenchmarkCache_Set/FirstInFirstOut_small_value-8 2919584 405 ns/op
|
||||
BenchmarkCache_Set/FirstInFirstOut_medium_value-8 2990841 391 ns/op
|
||||
BenchmarkCache_Set/FirstInFirstOut_large_value-8 2970513 391 ns/op
|
||||
BenchmarkCache_Set/LeastRecentlyUsed_small_value-8 2962939 402 ns/op
|
||||
BenchmarkCache_Set/LeastRecentlyUsed_medium_value-8 2962963 390 ns/op
|
||||
BenchmarkCache_Set/LeastRecentlyUsed_large_value-8 2962928 394 ns/op
|
||||
BenchmarkCache_SetUsingMaxMemoryUsage
|
||||
BenchmarkCache_SetUsingMaxMemoryUsage/small_value-8 2683356 447 ns/op
|
||||
BenchmarkCache_SetUsingMaxMemoryUsage/medium_value-8 2637578 441 ns/op
|
||||
BenchmarkCache_SetUsingMaxMemoryUsage/large_value-8 2672434 443 ns/op
|
||||
BenchmarkCache_SetWithMaxSize
|
||||
BenchmarkCache_SetWithMaxSize/100_small_value-8 4782966 252 ns/op
|
||||
BenchmarkCache_SetWithMaxSize/10000_small_value-8 4067967 296 ns/op
|
||||
BenchmarkCache_SetWithMaxSize/100000_small_value-8 3762055 328 ns/op
|
||||
BenchmarkCache_SetWithMaxSize/100_medium_value-8 4760479 252 ns/op
|
||||
BenchmarkCache_SetWithMaxSize/10000_medium_value-8 4081050 295 ns/op
|
||||
BenchmarkCache_SetWithMaxSize/100000_medium_value-8 3785050 330 ns/op
|
||||
BenchmarkCache_SetWithMaxSize/100_large_value-8 4732909 254 ns/op
|
||||
BenchmarkCache_SetWithMaxSize/10000_large_value-8 4079533 297 ns/op
|
||||
BenchmarkCache_SetWithMaxSize/100000_large_value-8 3712820 331 ns/op
|
||||
BenchmarkCache_SetWithMaxSizeAndLRU
|
||||
BenchmarkCache_SetWithMaxSizeAndLRU/100_small_value-8 4761732 254 ns/op
|
||||
BenchmarkCache_SetWithMaxSizeAndLRU/10000_small_value-8 4084474 296 ns/op
|
||||
BenchmarkCache_SetWithMaxSizeAndLRU/100000_small_value-8 3761402 329 ns/op
|
||||
BenchmarkCache_SetWithMaxSizeAndLRU/100_medium_value-8 4783075 254 ns/op
|
||||
BenchmarkCache_SetWithMaxSizeAndLRU/10000_medium_value-8 4103980 296 ns/op
|
||||
BenchmarkCache_SetWithMaxSizeAndLRU/100000_medium_value-8 3646023 331 ns/op
|
||||
BenchmarkCache_SetWithMaxSizeAndLRU/100_large_value-8 4779025 254 ns/op
|
||||
BenchmarkCache_SetWithMaxSizeAndLRU/10000_large_value-8 4096192 296 ns/op
|
||||
BenchmarkCache_SetWithMaxSizeAndLRU/100000_large_value-8 3726823 331 ns/op
|
||||
BenchmarkCache_GetSetMultipleConcurrent
|
||||
BenchmarkCache_GetSetMultipleConcurrent-8 707142 1698 ns/op
|
||||
BenchmarkCache_GetSetConcurrentWithFrequentEviction
|
||||
BenchmarkCache_GetSetConcurrentWithFrequentEviction/FirstInFirstOut-8 3616256 334 ns/op
|
||||
BenchmarkCache_GetSetConcurrentWithFrequentEviction/LeastRecentlyUsed-8 3636367 331 ns/op
|
||||
BenchmarkCache_GetConcurrentWithLRU
|
||||
BenchmarkCache_GetConcurrentWithLRU/FirstInFirstOut-8 4405557 268 ns/op
|
||||
BenchmarkCache_GetConcurrentWithLRU/LeastRecentlyUsed-8 4445475 269 ns/op
|
||||
BenchmarkCache_WithForceNilInterfaceOnNilPointer
|
||||
BenchmarkCache_WithForceNilInterfaceOnNilPointer/true_with_nil_struct_pointer-8 6184591 191 ns/op
|
||||
BenchmarkCache_WithForceNilInterfaceOnNilPointer/true-8 6090482 191 ns/op
|
||||
BenchmarkCache_WithForceNilInterfaceOnNilPointer/false_with_nil_struct_pointer-8 6184629 187 ns/op
|
||||
BenchmarkCache_WithForceNilInterfaceOnNilPointer/false-8 6281781 186 ns/op
|
||||
(Trimmed "BenchmarkCache_" for readability)
|
||||
WithForceNilInterfaceOnNilPointerWithConcurrency
|
||||
WithForceNilInterfaceOnNilPointerWithConcurrency/true_with_nil_struct_pointer-8 4379564 268 ns/op
|
||||
WithForceNilInterfaceOnNilPointerWithConcurrency/true-8 4379558 265 ns/op
|
||||
WithForceNilInterfaceOnNilPointerWithConcurrency/false_with_nil_struct_pointer-8 4444456 261 ns/op
|
||||
WithForceNilInterfaceOnNilPointerWithConcurrency/false-8 4493896 262 ns/op
|
||||
```
|
||||
|
||||
|
||||
## FAQ
|
||||
|
||||
### How can I persist the data on application termination?
|
||||
While creating your own auto save feature might come in handy, it may still lead to loss of data if the application
|
||||
automatically saves every 10 minutes and your application crashes 9 minutes after the previous save.
|
||||
|
||||
To increase your odds of not losing any data, you can use Go's `signal` package, more specifically its `Notify` function
|
||||
which allows listening for termination signals like SIGTERM and SIGINT. Once a termination signal is caught, you can
|
||||
add the necessary logic for a graceful shutdown.
|
||||
|
||||
In the following example, the code that would usually be present in the `main` function is moved to a different function
|
||||
named `Start` which is launched on a different goroutine so that listening for a termination signals is what blocks the
|
||||
main goroutine instead:
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"log"
|
||||
"os"
|
||||
"os/signal"
|
||||
"syscall"
|
||||
|
||||
"github.com/TwiN/gocache/v2"
|
||||
)
|
||||
|
||||
var cache = gocache.NewCache()
|
||||
|
||||
func main() {
|
||||
data := retrieveCacheEntriesUsingWhateverMeanYouUsedToPersistIt()
|
||||
cache.SetAll(data)
|
||||
// Start everything else on another goroutine to prevent blocking the main goroutine
|
||||
go Start()
|
||||
// Wait for termination signal
|
||||
sig := make(chan os.Signal, 1)
|
||||
done := make(chan bool, 1)
|
||||
signal.Notify(sig, os.Interrupt, syscall.SIGTERM)
|
||||
go func() {
|
||||
<-sig
|
||||
log.Println("Received termination signal, attempting to gracefully shut down")
|
||||
// Persist the cache entries
|
||||
cacheEntries := cache.GetAll()
|
||||
persistCacheEntriesHoweverYouWant(cacheEntries)
|
||||
// Tell the main goroutine that we're done
|
||||
done <- true
|
||||
}()
|
||||
<-done
|
||||
log.Println("Shutting down")
|
||||
}
|
||||
```
|
||||
|
||||
Note that this won't protect you from a SIGKILL, as this signal cannot be caught.
|
108
vendor/github.com/TwiN/gocache/v2/entry.go
generated
vendored
Normal file
108
vendor/github.com/TwiN/gocache/v2/entry.go
generated
vendored
Normal file
@ -0,0 +1,108 @@
|
||||
package gocache
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// Entry is a cache entry
|
||||
type Entry struct {
|
||||
// Key is the name of the cache entry
|
||||
Key string
|
||||
|
||||
// Value is the value of the cache entry
|
||||
Value interface{}
|
||||
|
||||
// RelevantTimestamp is the variable used to store either:
|
||||
// - creation timestamp, if the Cache's EvictionPolicy is FirstInFirstOut
|
||||
// - last access timestamp, if the Cache's EvictionPolicy is LeastRecentlyUsed
|
||||
//
|
||||
// Note that updating an existing entry will also update this value
|
||||
RelevantTimestamp time.Time
|
||||
|
||||
// Expiration is the unix time in nanoseconds at which the entry will expire (-1 means no expiration)
|
||||
Expiration int64
|
||||
|
||||
next *Entry
|
||||
previous *Entry
|
||||
}
|
||||
|
||||
// Accessed updates the Entry's RelevantTimestamp to now
|
||||
func (entry *Entry) Accessed() {
|
||||
entry.RelevantTimestamp = time.Now()
|
||||
}
|
||||
|
||||
// Expired returns whether the Entry has expired
|
||||
func (entry Entry) Expired() bool {
|
||||
if entry.Expiration > 0 {
|
||||
if time.Now().UnixNano() > entry.Expiration {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// SizeInBytes returns the size of an entry in bytes, approximately.
|
||||
func (entry *Entry) SizeInBytes() int {
|
||||
return toBytes(entry.Key) + toBytes(entry.Value) + 32
|
||||
}
|
||||
|
||||
func toBytes(value interface{}) int {
|
||||
switch value.(type) {
|
||||
case string:
|
||||
return int(unsafe.Sizeof(value)) + len(value.(string))
|
||||
case int8, uint8, bool:
|
||||
return int(unsafe.Sizeof(value)) + 1
|
||||
case int16, uint16:
|
||||
return int(unsafe.Sizeof(value)) + 2
|
||||
case int32, uint32, float32, complex64:
|
||||
return int(unsafe.Sizeof(value)) + 4
|
||||
case int64, uint64, int, uint, float64, complex128:
|
||||
return int(unsafe.Sizeof(value)) + 8
|
||||
case []interface{}:
|
||||
size := 0
|
||||
for _, v := range value.([]interface{}) {
|
||||
size += toBytes(v)
|
||||
}
|
||||
return int(unsafe.Sizeof(value)) + size
|
||||
case []string:
|
||||
size := 0
|
||||
for _, v := range value.([]string) {
|
||||
size += toBytes(v)
|
||||
}
|
||||
return int(unsafe.Sizeof(value)) + size
|
||||
case []int8:
|
||||
return int(unsafe.Sizeof(value)) + len(value.([]int8))
|
||||
case []uint8:
|
||||
return int(unsafe.Sizeof(value)) + len(value.([]uint8))
|
||||
case []bool:
|
||||
return int(unsafe.Sizeof(value)) + len(value.([]bool))
|
||||
case []int16:
|
||||
return int(unsafe.Sizeof(value)) + (len(value.([]int16)) * 2)
|
||||
case []uint16:
|
||||
return int(unsafe.Sizeof(value)) + (len(value.([]uint16)) * 2)
|
||||
case []int32:
|
||||
return int(unsafe.Sizeof(value)) + (len(value.([]int32)) * 4)
|
||||
case []uint32:
|
||||
return int(unsafe.Sizeof(value)) + (len(value.([]uint32)) * 4)
|
||||
case []float32:
|
||||
return int(unsafe.Sizeof(value)) + (len(value.([]float32)) * 4)
|
||||
case []complex64:
|
||||
return int(unsafe.Sizeof(value)) + (len(value.([]complex64)) * 4)
|
||||
case []int64:
|
||||
return int(unsafe.Sizeof(value)) + (len(value.([]int64)) * 8)
|
||||
case []uint64:
|
||||
return int(unsafe.Sizeof(value)) + (len(value.([]uint64)) * 8)
|
||||
case []int:
|
||||
return int(unsafe.Sizeof(value)) + (len(value.([]int)) * 8)
|
||||
case []uint:
|
||||
return int(unsafe.Sizeof(value)) + (len(value.([]uint)) * 8)
|
||||
case []float64:
|
||||
return int(unsafe.Sizeof(value)) + (len(value.([]float64)) * 8)
|
||||
case []complex128:
|
||||
return int(unsafe.Sizeof(value)) + (len(value.([]complex128)) * 8)
|
||||
default:
|
||||
return int(unsafe.Sizeof(value)) + len(fmt.Sprintf("%v", value))
|
||||
}
|
||||
}
|
567
vendor/github.com/TwiN/gocache/v2/gocache.go
generated
vendored
Normal file
567
vendor/github.com/TwiN/gocache/v2/gocache.go
generated
vendored
Normal file
@ -0,0 +1,567 @@
|
||||
package gocache
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"reflect"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
var (
|
||||
Debug = false
|
||||
)
|
||||
|
||||
const (
|
||||
// NoMaxSize means that the cache has no maximum number of entries in the cache
|
||||
// Setting Cache.maxSize to this value also means there will be no eviction
|
||||
NoMaxSize = 0
|
||||
|
||||
// NoMaxMemoryUsage means that the cache has no maximum number of entries in the cache
|
||||
NoMaxMemoryUsage = 0
|
||||
|
||||
// DefaultMaxSize is the max size set if no max size is specified
|
||||
DefaultMaxSize = 100000
|
||||
|
||||
// NoExpiration is the value that must be used as TTL to specify that the given key should never expire
|
||||
NoExpiration = -1
|
||||
|
||||
Kilobyte = 1024
|
||||
Megabyte = 1024 * Kilobyte
|
||||
Gigabyte = 1024 * Megabyte
|
||||
)
|
||||
|
||||
var (
|
||||
ErrKeyDoesNotExist = errors.New("key does not exist") // Returned when a cache key does not exist
|
||||
ErrKeyHasNoExpiration = errors.New("key has no expiration") // Returned when a cache key has no expiration
|
||||
ErrJanitorAlreadyRunning = errors.New("janitor is already running") // Returned when the janitor has already been started
|
||||
)
|
||||
|
||||
// Cache is the core struct of gocache which contains the data as well as all relevant configuration fields
|
||||
type Cache struct {
|
||||
// maxSize is the maximum amount of entries that can be in the cache at any given time
|
||||
// By default, this is set to DefaultMaxSize
|
||||
maxSize int
|
||||
|
||||
// maxMemoryUsage is the maximum amount of memory that can be taken up by the cache at any time
|
||||
// By default, this is set to NoMaxMemoryUsage, meaning that the default behavior is to not evict
|
||||
// based on maximum memory usage
|
||||
maxMemoryUsage int
|
||||
|
||||
// evictionPolicy is the eviction policy
|
||||
evictionPolicy EvictionPolicy
|
||||
|
||||
// stats is the object that contains cache statistics/metrics
|
||||
stats *Statistics
|
||||
|
||||
// entries is the content of the cache
|
||||
entries map[string]*Entry
|
||||
|
||||
// mutex is the lock for making concurrent operations on the cache
|
||||
mutex sync.RWMutex
|
||||
|
||||
// head is the cache entry at the head of the cache
|
||||
head *Entry
|
||||
|
||||
// tail is the last cache node and also the next entry that will be evicted
|
||||
tail *Entry
|
||||
|
||||
// stopJanitor is the channel used to stop the janitor
|
||||
stopJanitor chan bool
|
||||
|
||||
// memoryUsage is the approximate memory usage of the cache (dataset only) in bytes
|
||||
memoryUsage int
|
||||
|
||||
// forceNilInterfaceOnNilPointer determines whether all Set-like functions should set a value as nil if the
|
||||
// interface passed has a nil value but not a nil type.
|
||||
//
|
||||
// By default, interfaces are only nil when both their type and value is nil.
|
||||
// This means that when you pass a pointer to a nil value, the type of the interface
|
||||
// will still show as nil, which means that if you don't cast the interface after
|
||||
// retrieving it, a nil check will return that the value is not false.
|
||||
forceNilInterfaceOnNilPointer bool
|
||||
}
|
||||
|
||||
// MaxSize returns the maximum amount of keys that can be present in the cache before
|
||||
// new entries trigger the eviction of the tail
|
||||
func (cache *Cache) MaxSize() int {
|
||||
return cache.maxSize
|
||||
}
|
||||
|
||||
// MaxMemoryUsage returns the configured maxMemoryUsage of the cache
|
||||
func (cache *Cache) MaxMemoryUsage() int {
|
||||
return cache.maxMemoryUsage
|
||||
}
|
||||
|
||||
// EvictionPolicy returns the EvictionPolicy of the Cache
|
||||
func (cache *Cache) EvictionPolicy() EvictionPolicy {
|
||||
return cache.evictionPolicy
|
||||
}
|
||||
|
||||
// Stats returns statistics from the cache
|
||||
func (cache *Cache) Stats() Statistics {
|
||||
cache.mutex.RLock()
|
||||
stats := Statistics{
|
||||
EvictedKeys: cache.stats.EvictedKeys,
|
||||
ExpiredKeys: cache.stats.ExpiredKeys,
|
||||
Hits: cache.stats.Hits,
|
||||
Misses: cache.stats.Misses,
|
||||
}
|
||||
cache.mutex.RUnlock()
|
||||
return stats
|
||||
}
|
||||
|
||||
// MemoryUsage returns the current memory usage of the cache's dataset in bytes
|
||||
// If MaxMemoryUsage is set to NoMaxMemoryUsage, this will return 0
|
||||
func (cache *Cache) MemoryUsage() int {
|
||||
return cache.memoryUsage
|
||||
}
|
||||
|
||||
// WithMaxSize sets the maximum amount of entries that can be in the cache at any given time
|
||||
// A maxSize of 0 or less means infinite
|
||||
func (cache *Cache) WithMaxSize(maxSize int) *Cache {
|
||||
if maxSize < 0 {
|
||||
maxSize = NoMaxSize
|
||||
}
|
||||
if maxSize != NoMaxSize && cache.Count() == 0 {
|
||||
cache.entries = make(map[string]*Entry, maxSize)
|
||||
}
|
||||
cache.maxSize = maxSize
|
||||
return cache
|
||||
}
|
||||
|
||||
// WithMaxMemoryUsage sets the maximum amount of memory that can be used by the cache at any given time
|
||||
//
|
||||
// NOTE: This is approximate.
|
||||
//
|
||||
// Setting this to NoMaxMemoryUsage will disable eviction by memory usage
|
||||
func (cache *Cache) WithMaxMemoryUsage(maxMemoryUsageInBytes int) *Cache {
|
||||
if maxMemoryUsageInBytes < 0 {
|
||||
maxMemoryUsageInBytes = NoMaxMemoryUsage
|
||||
}
|
||||
cache.maxMemoryUsage = maxMemoryUsageInBytes
|
||||
return cache
|
||||
}
|
||||
|
||||
// WithEvictionPolicy sets eviction algorithm.
|
||||
// Defaults to FirstInFirstOut (FIFO)
|
||||
func (cache *Cache) WithEvictionPolicy(policy EvictionPolicy) *Cache {
|
||||
cache.evictionPolicy = policy
|
||||
return cache
|
||||
}
|
||||
|
||||
// WithForceNilInterfaceOnNilPointer sets whether all Set-like functions should set a value as nil if the
|
||||
// interface passed has a nil value but not a nil type.
|
||||
//
|
||||
// In Go, an interface is only nil if both its type and value are nil, which means that a nil pointer
|
||||
// (e.g. (*Struct)(nil)) will retain its attribution to the type, and the unmodified value returned from
|
||||
// Cache.Get, for instance, would return false when compared with nil if this option is set to false.
|
||||
//
|
||||
// We can bypass this by detecting if the interface's value is nil and setting it to nil rather than
|
||||
// a nil pointer, which will make the value returned from Cache.Get return true when compared with nil.
|
||||
// This is exactly what passing true to WithForceNilInterfaceOnNilPointer does, and it's also the default behavior.
|
||||
//
|
||||
// Alternatively, you may pass false to WithForceNilInterfaceOnNilPointer, which will mean that you'll have
|
||||
// to cast the value returned from Cache.Get to its original type to check for whether the pointer returned
|
||||
// is nil or not.
|
||||
//
|
||||
// If set to true (default):
|
||||
// cache := gocache.NewCache().WithForceNilInterfaceOnNilPointer(true)
|
||||
// cache.Set("key", (*Struct)(nil))
|
||||
// value, _ := cache.Get("key")
|
||||
// // the following returns true, because the interface{} was forcefully set to nil
|
||||
// if value == nil {}
|
||||
// // the following will panic, because the value has been casted to its type (which is nil)
|
||||
// if value.(*Struct) == nil {}
|
||||
//
|
||||
// If set to false:
|
||||
// cache := gocache.NewCache().WithForceNilInterfaceOnNilPointer(false)
|
||||
// cache.Set("key", (*Struct)(nil))
|
||||
// value, _ := cache.Get("key")
|
||||
// // the following returns false, because the interface{} returned has a non-nil type (*Struct)
|
||||
// if value == nil {}
|
||||
// // the following returns true, because the value has been casted to its type
|
||||
// if value.(*Struct) == nil {}
|
||||
//
|
||||
// In other words, if set to true, you do not need to cast the value returned from the cache to
|
||||
// to check if the value is nil.
|
||||
//
|
||||
// Defaults to true
|
||||
func (cache *Cache) WithForceNilInterfaceOnNilPointer(forceNilInterfaceOnNilPointer bool) *Cache {
|
||||
cache.forceNilInterfaceOnNilPointer = forceNilInterfaceOnNilPointer
|
||||
return cache
|
||||
}
|
||||
|
||||
// NewCache creates a new Cache
|
||||
//
|
||||
// Should be used in conjunction with Cache.WithMaxSize, Cache.WithMaxMemoryUsage and/or Cache.WithEvictionPolicy
|
||||
// gocache.NewCache().WithMaxSize(10000).WithEvictionPolicy(gocache.LeastRecentlyUsed)
|
||||
//
|
||||
func NewCache() *Cache {
|
||||
return &Cache{
|
||||
maxSize: DefaultMaxSize,
|
||||
evictionPolicy: FirstInFirstOut,
|
||||
stats: &Statistics{},
|
||||
entries: make(map[string]*Entry),
|
||||
mutex: sync.RWMutex{},
|
||||
stopJanitor: nil,
|
||||
forceNilInterfaceOnNilPointer: true,
|
||||
}
|
||||
}
|
||||
|
||||
// Set creates or updates a key with a given value
|
||||
func (cache *Cache) Set(key string, value interface{}) {
|
||||
cache.SetWithTTL(key, value, NoExpiration)
|
||||
}
|
||||
|
||||
// SetWithTTL creates or updates a key with a given value and sets an expiration time (-1 is NoExpiration)
|
||||
//
|
||||
// The TTL provided must be greater than 0, or NoExpiration (-1). If a negative value that isn't -1 (NoExpiration) is
|
||||
// provided, the entry will not be created if the key doesn't exist
|
||||
func (cache *Cache) SetWithTTL(key string, value interface{}, ttl time.Duration) {
|
||||
// An interface is only nil if both its value and its type are nil, however, passing a nil pointer as an interface{}
|
||||
// means that the interface itself is not nil, because the interface value is nil but not the type.
|
||||
if cache.forceNilInterfaceOnNilPointer {
|
||||
if value != nil && (reflect.ValueOf(value).Kind() == reflect.Ptr && reflect.ValueOf(value).IsNil()) {
|
||||
value = nil
|
||||
}
|
||||
}
|
||||
cache.mutex.Lock()
|
||||
entry, ok := cache.get(key)
|
||||
if !ok {
|
||||
// A negative TTL that isn't -1 (NoExpiration) or 0 is an entry that will expire instantly,
|
||||
// so might as well just not create it in the first place
|
||||
if ttl != NoExpiration && ttl < 1 {
|
||||
cache.mutex.Unlock()
|
||||
return
|
||||
}
|
||||
// Cache entry doesn't exist, so we have to create a new one
|
||||
entry = &Entry{
|
||||
Key: key,
|
||||
Value: value,
|
||||
RelevantTimestamp: time.Now(),
|
||||
next: cache.head,
|
||||
}
|
||||
if cache.head == nil {
|
||||
cache.tail = entry
|
||||
} else {
|
||||
cache.head.previous = entry
|
||||
}
|
||||
cache.head = entry
|
||||
cache.entries[key] = entry
|
||||
if cache.maxMemoryUsage != NoMaxMemoryUsage {
|
||||
cache.memoryUsage += entry.SizeInBytes()
|
||||
}
|
||||
} else {
|
||||
// A negative TTL that isn't -1 (NoExpiration) or 0 is an entry that will expire instantly,
|
||||
// so might as well just delete it immediately instead of updating it
|
||||
if ttl != NoExpiration && ttl < 1 {
|
||||
cache.delete(key)
|
||||
cache.mutex.Unlock()
|
||||
return
|
||||
}
|
||||
if cache.maxMemoryUsage != NoMaxMemoryUsage {
|
||||
// Subtract the old entry from the cache's memoryUsage
|
||||
cache.memoryUsage -= entry.SizeInBytes()
|
||||
}
|
||||
// Update existing entry's value
|
||||
entry.Value = value
|
||||
entry.RelevantTimestamp = time.Now()
|
||||
if cache.maxMemoryUsage != NoMaxMemoryUsage {
|
||||
// Add the memory usage of the new entry to the cache's memoryUsage
|
||||
cache.memoryUsage += entry.SizeInBytes()
|
||||
}
|
||||
// Because we just updated the entry, we need to move it back to HEAD
|
||||
cache.moveExistingEntryToHead(entry)
|
||||
}
|
||||
if ttl != NoExpiration {
|
||||
entry.Expiration = time.Now().Add(ttl).UnixNano()
|
||||
} else {
|
||||
entry.Expiration = NoExpiration
|
||||
}
|
||||
// If the cache doesn't have a maxSize/maxMemoryUsage, then there's no point
|
||||
// checking if we need to evict an entry, so we'll just return now
|
||||
if cache.maxSize == NoMaxSize && cache.maxMemoryUsage == NoMaxMemoryUsage {
|
||||
cache.mutex.Unlock()
|
||||
return
|
||||
}
|
||||
// If there's a maxSize and the cache has more entries than the maxSize, evict
|
||||
if cache.maxSize != NoMaxSize && len(cache.entries) > cache.maxSize {
|
||||
cache.evict()
|
||||
}
|
||||
// If there's a maxMemoryUsage and the memoryUsage is above the maxMemoryUsage, evict
|
||||
if cache.maxMemoryUsage != NoMaxMemoryUsage && cache.memoryUsage > cache.maxMemoryUsage {
|
||||
for cache.memoryUsage > cache.maxMemoryUsage && len(cache.entries) > 0 {
|
||||
cache.evict()
|
||||
}
|
||||
}
|
||||
cache.mutex.Unlock()
|
||||
}
|
||||
|
||||
// SetAll creates or updates multiple values
|
||||
func (cache *Cache) SetAll(entries map[string]interface{}) {
|
||||
for key, value := range entries {
|
||||
cache.SetWithTTL(key, value, NoExpiration)
|
||||
}
|
||||
}
|
||||
|
||||
// Get retrieves an entry using the key passed as parameter
|
||||
// If there is no such entry, the value returned will be nil and the boolean will be false
|
||||
// If there is an entry, the value returned will be the value cached and the boolean will be true
|
||||
func (cache *Cache) Get(key string) (interface{}, bool) {
|
||||
cache.mutex.Lock()
|
||||
entry, ok := cache.get(key)
|
||||
if !ok {
|
||||
cache.mutex.Unlock()
|
||||
cache.stats.Misses++
|
||||
return nil, false
|
||||
}
|
||||
if entry.Expired() {
|
||||
cache.stats.ExpiredKeys++
|
||||
cache.delete(key)
|
||||
cache.mutex.Unlock()
|
||||
return nil, false
|
||||
}
|
||||
cache.stats.Hits++
|
||||
if cache.evictionPolicy == LeastRecentlyUsed {
|
||||
entry.Accessed()
|
||||
if cache.head == entry {
|
||||
cache.mutex.Unlock()
|
||||
return entry.Value, true
|
||||
}
|
||||
// Because the eviction policy is LRU, we need to move the entry back to HEAD
|
||||
cache.moveExistingEntryToHead(entry)
|
||||
}
|
||||
cache.mutex.Unlock()
|
||||
return entry.Value, true
|
||||
}
|
||||
|
||||
// GetValue retrieves an entry using the key passed as parameter
|
||||
// Unlike Get, this function only returns the value
|
||||
func (cache *Cache) GetValue(key string) interface{} {
|
||||
value, _ := cache.Get(key)
|
||||
return value
|
||||
}
|
||||
|
||||
// GetByKeys retrieves multiple entries using the keys passed as parameter
|
||||
// All keys are returned in the map, regardless of whether they exist or not, however, entries that do not exist in the
|
||||
// cache will return nil, meaning that there is no way of determining whether a key genuinely has the value nil, or
|
||||
// whether it doesn't exist in the cache using only this function.
|
||||
func (cache *Cache) GetByKeys(keys []string) map[string]interface{} {
|
||||
entries := make(map[string]interface{})
|
||||
for _, key := range keys {
|
||||
entries[key], _ = cache.Get(key)
|
||||
}
|
||||
return entries
|
||||
}
|
||||
|
||||
// GetAll retrieves all cache entries
|
||||
//
|
||||
// If the eviction policy is LeastRecentlyUsed, note that unlike Get and GetByKeys, this does not update the last access
|
||||
// timestamp. The reason for this is that since all cache entries will be accessed, updating the last access timestamp
|
||||
// would provide very little benefit while harming the ability to accurately determine the next key that will be evicted
|
||||
//
|
||||
// You should probably avoid using this if you have a lot of entries.
|
||||
//
|
||||
// GetKeysByPattern is a good alternative if you want to retrieve entries that you do not have the key for, as it only
|
||||
// retrieves the keys and does not trigger active eviction and has a parameter for setting a limit to the number of keys
|
||||
// you wish to retrieve.
|
||||
func (cache *Cache) GetAll() map[string]interface{} {
|
||||
entries := make(map[string]interface{})
|
||||
cache.mutex.Lock()
|
||||
for key, entry := range cache.entries {
|
||||
if entry.Expired() {
|
||||
cache.delete(key)
|
||||
continue
|
||||
}
|
||||
entries[key] = entry.Value
|
||||
}
|
||||
cache.stats.Hits += uint64(len(entries))
|
||||
cache.mutex.Unlock()
|
||||
return entries
|
||||
}
|
||||
|
||||
// GetKeysByPattern retrieves a slice of keys that match a given pattern
|
||||
// If the limit is set to 0, the entire cache will be searched for matching keys.
|
||||
// If the limit is above 0, the search will stop once the specified number of matching keys have been found.
|
||||
//
|
||||
// e.g.
|
||||
// cache.GetKeysByPattern("*some*", 0) will return all keys containing "some" in them
|
||||
// cache.GetKeysByPattern("*some*", 5) will return 5 keys (or less) containing "some" in them
|
||||
//
|
||||
// Note that GetKeysByPattern does not trigger active evictions, nor does it count as accessing the entry, the latter
|
||||
// only applying if the cache uses the LeastRecentlyUsed eviction policy.
|
||||
// The reason for that behavior is that these two (active eviction and access) only applies when you access the value
|
||||
// of the cache entry, and this function only returns the keys.
|
||||
func (cache *Cache) GetKeysByPattern(pattern string, limit int) []string {
|
||||
var matchingKeys []string
|
||||
cache.mutex.Lock()
|
||||
for key, value := range cache.entries {
|
||||
if value.Expired() {
|
||||
continue
|
||||
}
|
||||
if MatchPattern(pattern, key) {
|
||||
matchingKeys = append(matchingKeys, key)
|
||||
if limit > 0 && len(matchingKeys) >= limit {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
cache.mutex.Unlock()
|
||||
return matchingKeys
|
||||
}
|
||||
|
||||
// Delete removes a key from the cache
|
||||
//
|
||||
// Returns false if the key did not exist.
|
||||
func (cache *Cache) Delete(key string) bool {
|
||||
cache.mutex.Lock()
|
||||
ok := cache.delete(key)
|
||||
cache.mutex.Unlock()
|
||||
return ok
|
||||
}
|
||||
|
||||
// DeleteAll deletes multiple entries based on the keys passed as parameter
|
||||
//
|
||||
// Returns the number of keys deleted
|
||||
func (cache *Cache) DeleteAll(keys []string) int {
|
||||
numberOfKeysDeleted := 0
|
||||
cache.mutex.Lock()
|
||||
for _, key := range keys {
|
||||
if cache.delete(key) {
|
||||
numberOfKeysDeleted++
|
||||
}
|
||||
}
|
||||
cache.mutex.Unlock()
|
||||
return numberOfKeysDeleted
|
||||
}
|
||||
|
||||
// Count returns the total amount of entries in the cache, regardless of whether they're expired or not
|
||||
func (cache *Cache) Count() int {
|
||||
cache.mutex.RLock()
|
||||
count := len(cache.entries)
|
||||
cache.mutex.RUnlock()
|
||||
return count
|
||||
}
|
||||
|
||||
// Clear deletes all entries from the cache
|
||||
func (cache *Cache) Clear() {
|
||||
cache.mutex.Lock()
|
||||
cache.entries = make(map[string]*Entry)
|
||||
cache.memoryUsage = 0
|
||||
cache.head = nil
|
||||
cache.tail = nil
|
||||
cache.mutex.Unlock()
|
||||
}
|
||||
|
||||
// TTL returns the time until the cache entry specified by the key passed as parameter
|
||||
// will be deleted.
|
||||
func (cache *Cache) TTL(key string) (time.Duration, error) {
|
||||
cache.mutex.RLock()
|
||||
entry, ok := cache.get(key)
|
||||
cache.mutex.RUnlock()
|
||||
if !ok {
|
||||
return 0, ErrKeyDoesNotExist
|
||||
}
|
||||
if entry.Expiration == NoExpiration {
|
||||
return 0, ErrKeyHasNoExpiration
|
||||
}
|
||||
timeUntilExpiration := time.Until(time.Unix(0, entry.Expiration))
|
||||
if timeUntilExpiration < 0 {
|
||||
// The key has already expired but hasn't been deleted yet.
|
||||
// From the client's perspective, this means that the cache entry doesn't exist
|
||||
return 0, ErrKeyDoesNotExist
|
||||
}
|
||||
return timeUntilExpiration, nil
|
||||
}
|
||||
|
||||
// Expire sets a key's expiration time
|
||||
//
|
||||
// A TTL of -1 means that the key will never expire
|
||||
// A TTL of 0 means that the key will expire immediately
|
||||
// If using LRU, note that this does not reset the position of the key
|
||||
//
|
||||
// Returns true if the cache key exists and has had its expiration time altered
|
||||
func (cache *Cache) Expire(key string, ttl time.Duration) bool {
|
||||
entry, ok := cache.get(key)
|
||||
if !ok || entry.Expired() {
|
||||
return false
|
||||
}
|
||||
if ttl != NoExpiration {
|
||||
entry.Expiration = time.Now().Add(ttl).UnixNano()
|
||||
} else {
|
||||
entry.Expiration = NoExpiration
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// get retrieves an entry using the key passed as parameter, but unlike Get, it doesn't update the access time or
|
||||
// move the position of the entry to the head
|
||||
func (cache *Cache) get(key string) (*Entry, bool) {
|
||||
entry, ok := cache.entries[key]
|
||||
return entry, ok
|
||||
}
|
||||
|
||||
func (cache *Cache) delete(key string) bool {
|
||||
entry, ok := cache.entries[key]
|
||||
if ok {
|
||||
if cache.maxMemoryUsage != NoMaxMemoryUsage {
|
||||
cache.memoryUsage -= entry.SizeInBytes()
|
||||
}
|
||||
cache.removeExistingEntryReferences(entry)
|
||||
delete(cache.entries, key)
|
||||
}
|
||||
return ok
|
||||
}
|
||||
|
||||
// moveExistingEntryToHead replaces the current cache head for an existing entry
|
||||
func (cache *Cache) moveExistingEntryToHead(entry *Entry) {
|
||||
if !(entry == cache.head && entry == cache.tail) {
|
||||
cache.removeExistingEntryReferences(entry)
|
||||
}
|
||||
if entry != cache.head {
|
||||
entry.next = cache.head
|
||||
entry.previous = nil
|
||||
if cache.head != nil {
|
||||
cache.head.previous = entry
|
||||
}
|
||||
cache.head = entry
|
||||
}
|
||||
}
|
||||
|
||||
// removeExistingEntryReferences modifies the next and previous reference of an existing entry and re-links
|
||||
// the next and previous entry accordingly, as well as the cache head or/and the cache tail if necessary.
|
||||
// Note that it does not remove the entry from the cache, only the references.
|
||||
func (cache *Cache) removeExistingEntryReferences(entry *Entry) {
|
||||
if cache.tail == entry && cache.head == entry {
|
||||
cache.tail = nil
|
||||
cache.head = nil
|
||||
} else if cache.tail == entry {
|
||||
cache.tail = cache.tail.previous
|
||||
} else if cache.head == entry {
|
||||
cache.head = cache.head.next
|
||||
}
|
||||
if entry.previous != nil {
|
||||
entry.previous.next = entry.next
|
||||
}
|
||||
if entry.next != nil {
|
||||
entry.next.previous = entry.previous
|
||||
}
|
||||
entry.next = nil
|
||||
entry.previous = nil
|
||||
}
|
||||
|
||||
// evict removes the tail from the cache
|
||||
func (cache *Cache) evict() {
|
||||
if cache.tail == nil || len(cache.entries) == 0 {
|
||||
return
|
||||
}
|
||||
if cache.tail != nil {
|
||||
oldTail := cache.tail
|
||||
cache.removeExistingEntryReferences(oldTail)
|
||||
delete(cache.entries, oldTail.Key)
|
||||
if cache.maxMemoryUsage != NoMaxMemoryUsage {
|
||||
cache.memoryUsage -= oldTail.SizeInBytes()
|
||||
}
|
||||
cache.stats.EvictedKeys++
|
||||
}
|
||||
}
|
146
vendor/github.com/TwiN/gocache/v2/janitor.go
generated
vendored
Normal file
146
vendor/github.com/TwiN/gocache/v2/janitor.go
generated
vendored
Normal file
@ -0,0 +1,146 @@
|
||||
package gocache
|
||||
|
||||
import (
|
||||
"log"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
// JanitorShiftTarget is the target number of expired keys to find during passive clean up duty
|
||||
// before pausing the passive expired keys eviction process
|
||||
JanitorShiftTarget = 25
|
||||
|
||||
// JanitorMaxIterationsPerShift is the maximum number of nodes to traverse before pausing
|
||||
//
|
||||
// This is to prevent the janitor from traversing the entire cache, which could take a long time
|
||||
// to complete depending on the size of the cache.
|
||||
//
|
||||
// By limiting it to a small number, we are effectively reducing the impact of passive eviction.
|
||||
JanitorMaxIterationsPerShift = 1000
|
||||
|
||||
// JanitorMinShiftBackOff is the minimum interval between each iteration of steps
|
||||
// defined by JanitorMaxIterationsPerShift
|
||||
JanitorMinShiftBackOff = 50 * time.Millisecond
|
||||
|
||||
// JanitorMaxShiftBackOff is the maximum interval between each iteration of steps
|
||||
// defined by JanitorMaxIterationsPerShift
|
||||
JanitorMaxShiftBackOff = 500 * time.Millisecond
|
||||
)
|
||||
|
||||
// StartJanitor starts the janitor on a different goroutine
|
||||
// The janitor's job is to delete expired keys in the background, in other words, it takes care of passive eviction.
|
||||
// It can be stopped by calling Cache.StopJanitor.
|
||||
// If you do not start the janitor, expired keys will only be deleted when they are accessed through Get, GetByKeys, or
|
||||
// GetAll.
|
||||
func (cache *Cache) StartJanitor() error {
|
||||
if cache.stopJanitor != nil {
|
||||
return ErrJanitorAlreadyRunning
|
||||
}
|
||||
cache.stopJanitor = make(chan bool)
|
||||
go func() {
|
||||
// rather than starting from the tail on every run, we can try to start from the last traversed entry
|
||||
var lastTraversedNode *Entry
|
||||
totalNumberOfExpiredKeysInPreviousRunFromTailToHead := 0
|
||||
backOff := JanitorMinShiftBackOff
|
||||
for {
|
||||
select {
|
||||
case <-time.After(backOff):
|
||||
// Passive clean up duty
|
||||
cache.mutex.Lock()
|
||||
if cache.tail != nil {
|
||||
start := time.Now()
|
||||
steps := 0
|
||||
expiredEntriesFound := 0
|
||||
current := cache.tail
|
||||
if lastTraversedNode != nil {
|
||||
// Make sure the lastTraversedNode is still in the cache, otherwise we might be traversing nodes that were already deleted.
|
||||
// Furthermore, we need to make sure that the entry from the cache has the same pointer as the lastTraversedNode
|
||||
// to verify that there isn't just a new cache entry with the same key (i.e. in case lastTraversedNode got evicted)
|
||||
if entryFromCache, isInCache := cache.get(lastTraversedNode.Key); isInCache && entryFromCache == lastTraversedNode {
|
||||
current = lastTraversedNode
|
||||
}
|
||||
}
|
||||
if current == cache.tail {
|
||||
if Debug {
|
||||
log.Printf("There are currently %d entries in the cache. The last walk resulted in finding %d expired keys", len(cache.entries), totalNumberOfExpiredKeysInPreviousRunFromTailToHead)
|
||||
}
|
||||
totalNumberOfExpiredKeysInPreviousRunFromTailToHead = 0
|
||||
}
|
||||
for current != nil {
|
||||
// since we're walking from the tail to the head, we get the previous reference
|
||||
var previous *Entry
|
||||
steps++
|
||||
if current.Expired() {
|
||||
expiredEntriesFound++
|
||||
// Because delete will remove the previous reference from the entry, we need to store the
|
||||
// previous reference before we delete it
|
||||
previous = current.previous
|
||||
cache.delete(current.Key)
|
||||
cache.stats.ExpiredKeys++
|
||||
}
|
||||
if current == cache.head {
|
||||
lastTraversedNode = nil
|
||||
break
|
||||
}
|
||||
// Travel to the current node's previous node only if no specific previous node has been specified
|
||||
if previous != nil {
|
||||
current = previous
|
||||
} else {
|
||||
current = current.previous
|
||||
}
|
||||
lastTraversedNode = current
|
||||
if steps == JanitorMaxIterationsPerShift || expiredEntriesFound >= JanitorShiftTarget {
|
||||
if expiredEntriesFound > 0 {
|
||||
backOff = JanitorMinShiftBackOff
|
||||
} else {
|
||||
if backOff*2 <= JanitorMaxShiftBackOff {
|
||||
backOff *= 2
|
||||
} else {
|
||||
backOff = JanitorMaxShiftBackOff
|
||||
}
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
if Debug {
|
||||
log.Printf("traversed %d nodes and found %d expired entries in %s before stopping\n", steps, expiredEntriesFound, time.Since(start))
|
||||
}
|
||||
totalNumberOfExpiredKeysInPreviousRunFromTailToHead += expiredEntriesFound
|
||||
} else {
|
||||
if backOff*2 < JanitorMaxShiftBackOff {
|
||||
backOff *= 2
|
||||
} else {
|
||||
backOff = JanitorMaxShiftBackOff
|
||||
}
|
||||
}
|
||||
cache.mutex.Unlock()
|
||||
case <-cache.stopJanitor:
|
||||
cache.stopJanitor <- true
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
//if Debug {
|
||||
// go func() {
|
||||
// var m runtime.MemStats
|
||||
// for {
|
||||
// runtime.ReadMemStats(&m)
|
||||
// log.Printf("Alloc=%vMB; HeapReleased=%vMB; Sys=%vMB; HeapInUse=%vMB; HeapObjects=%v; HeapObjectsFreed=%v; GC=%v; cache.memoryUsage=%vMB; cacheSize=%d\n", m.Alloc/1024/1024, m.HeapReleased/1024/1024, m.Sys/1024/1024, m.HeapInuse/1024/1024, m.HeapObjects, m.Frees, m.NumGC, cache.memoryUsage/1024/1024, cache.Count())
|
||||
// time.Sleep(3 * time.Second)
|
||||
// }
|
||||
// }()
|
||||
//}
|
||||
return nil
|
||||
}
|
||||
|
||||
// StopJanitor stops the janitor
|
||||
func (cache *Cache) StopJanitor() {
|
||||
if cache.stopJanitor != nil {
|
||||
// Tell the janitor to stop, and then wait for the janitor to reply on the same channel that it's stopping
|
||||
// This may seem a bit odd, but this allows us to avoid a data race condition when trying to set
|
||||
// cache.stopJanitor to nil
|
||||
cache.stopJanitor <- true
|
||||
<-cache.stopJanitor
|
||||
cache.stopJanitor = nil
|
||||
}
|
||||
}
|
12
vendor/github.com/TwiN/gocache/v2/pattern.go
generated
vendored
Normal file
12
vendor/github.com/TwiN/gocache/v2/pattern.go
generated
vendored
Normal file
@ -0,0 +1,12 @@
|
||||
package gocache
|
||||
|
||||
import "path/filepath"
|
||||
|
||||
// MatchPattern checks whether a string matches a pattern
|
||||
func MatchPattern(pattern, s string) bool {
|
||||
if pattern == "*" {
|
||||
return true
|
||||
}
|
||||
matched, _ := filepath.Match(pattern, s)
|
||||
return matched
|
||||
}
|
33
vendor/github.com/TwiN/gocache/v2/policy.go
generated
vendored
Normal file
33
vendor/github.com/TwiN/gocache/v2/policy.go
generated
vendored
Normal file
@ -0,0 +1,33 @@
|
||||
package gocache
|
||||
|
||||
// EvictionPolicy is what dictates how evictions are handled
|
||||
type EvictionPolicy string
|
||||
|
||||
var (
|
||||
// LeastRecentlyUsed is an eviction policy that causes the most recently accessed cache entry to be moved to the
|
||||
// head of the cache. Effectively, this causes the cache entries that have not been accessed for some time to
|
||||
// gradually move closer and closer to the tail, and since the tail is the entry that gets deleted when an eviction
|
||||
// is required, it allows less used cache entries to be evicted while keeping recently accessed entries at or close
|
||||
// to the head.
|
||||
//
|
||||
// For instance, creating a Cache with a Cache.MaxSize of 3 and creating the entries 1, 2 and 3 in that order would
|
||||
// put 3 at the head and 1 at the tail:
|
||||
// 3 (head) -> 2 -> 1 (tail)
|
||||
// If the cache entry 1 was then accessed, 1 would become the head and 2 the tail:
|
||||
// 1 (head) -> 3 -> 2 (tail)
|
||||
// If a cache entry 4 was then created, because the Cache.MaxSize is 3, the tail (2) would then be evicted:
|
||||
// 4 (head) -> 1 -> 3 (tail)
|
||||
LeastRecentlyUsed EvictionPolicy = "LeastRecentlyUsed"
|
||||
|
||||
// FirstInFirstOut is an eviction policy that causes cache entries to be evicted in the same order that they are
|
||||
// created.
|
||||
//
|
||||
// For instance, creating a Cache with a Cache.MaxSize of 3 and creating the entries 1, 2 and 3 in that order would
|
||||
// put 3 at the head and 1 at the tail:
|
||||
// 3 (head) -> 2 -> 1 (tail)
|
||||
// If the cache entry 1 was then accessed, unlike with LeastRecentlyUsed, nothing would change:
|
||||
// 3 (head) -> 2 -> 1 (tail)
|
||||
// If a cache entry 4 was then created, because the Cache.MaxSize is 3, the tail (1) would then be evicted:
|
||||
// 4 (head) -> 3 -> 2 (tail)
|
||||
FirstInFirstOut EvictionPolicy = "FirstInFirstOut"
|
||||
)
|
15
vendor/github.com/TwiN/gocache/v2/statistics.go
generated
vendored
Normal file
15
vendor/github.com/TwiN/gocache/v2/statistics.go
generated
vendored
Normal file
@ -0,0 +1,15 @@
|
||||
package gocache
|
||||
|
||||
type Statistics struct {
|
||||
// EvictedKeys is the number of keys that were evicted
|
||||
EvictedKeys uint64
|
||||
|
||||
// ExpiredKeys is the number of keys that were automatically deleted as a result of expiring
|
||||
ExpiredKeys uint64
|
||||
|
||||
// Hits is the number of cache hits
|
||||
Hits uint64
|
||||
|
||||
// Misses is the number of cache misses
|
||||
Misses uint64
|
||||
}
|
202
vendor/github.com/coreos/go-oidc/v3/LICENSE
generated
vendored
Normal file
202
vendor/github.com/coreos/go-oidc/v3/LICENSE
generated
vendored
Normal file
@ -0,0 +1,202 @@
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "{}"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright {yyyy} {name of copyright owner}
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
|
5
vendor/github.com/coreos/go-oidc/v3/NOTICE
generated
vendored
Normal file
5
vendor/github.com/coreos/go-oidc/v3/NOTICE
generated
vendored
Normal file
@ -0,0 +1,5 @@
|
||||
CoreOS Project
|
||||
Copyright 2014 CoreOS, Inc
|
||||
|
||||
This product includes software developed at CoreOS, Inc.
|
||||
(http://www.coreos.com/).
|
16
vendor/github.com/coreos/go-oidc/v3/oidc/jose.go
generated
vendored
Normal file
16
vendor/github.com/coreos/go-oidc/v3/oidc/jose.go
generated
vendored
Normal file
@ -0,0 +1,16 @@
|
||||
package oidc
|
||||
|
||||
// JOSE asymmetric signing algorithm values as defined by RFC 7518
|
||||
//
|
||||
// see: https://tools.ietf.org/html/rfc7518#section-3.1
|
||||
const (
|
||||
RS256 = "RS256" // RSASSA-PKCS-v1.5 using SHA-256
|
||||
RS384 = "RS384" // RSASSA-PKCS-v1.5 using SHA-384
|
||||
RS512 = "RS512" // RSASSA-PKCS-v1.5 using SHA-512
|
||||
ES256 = "ES256" // ECDSA using P-256 and SHA-256
|
||||
ES384 = "ES384" // ECDSA using P-384 and SHA-384
|
||||
ES512 = "ES512" // ECDSA using P-521 and SHA-512
|
||||
PS256 = "PS256" // RSASSA-PSS using SHA256 and MGF1-SHA256
|
||||
PS384 = "PS384" // RSASSA-PSS using SHA384 and MGF1-SHA384
|
||||
PS512 = "PS512" // RSASSA-PSS using SHA512 and MGF1-SHA512
|
||||
)
|
208
vendor/github.com/coreos/go-oidc/v3/oidc/jwks.go
generated
vendored
Normal file
208
vendor/github.com/coreos/go-oidc/v3/oidc/jwks.go
generated
vendored
Normal file
@ -0,0 +1,208 @@
|
||||
package oidc
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
jose "gopkg.in/square/go-jose.v2"
|
||||
)
|
||||
|
||||
// NewRemoteKeySet returns a KeySet that can validate JSON web tokens by using HTTP
|
||||
// GETs to fetch JSON web token sets hosted at a remote URL. This is automatically
|
||||
// used by NewProvider using the URLs returned by OpenID Connect discovery, but is
|
||||
// exposed for providers that don't support discovery or to prevent round trips to the
|
||||
// discovery URL.
|
||||
//
|
||||
// The returned KeySet is a long lived verifier that caches keys based on any
|
||||
// keys change. Reuse a common remote key set instead of creating new ones as needed.
|
||||
func NewRemoteKeySet(ctx context.Context, jwksURL string) *RemoteKeySet {
|
||||
return newRemoteKeySet(ctx, jwksURL, time.Now)
|
||||
}
|
||||
|
||||
func newRemoteKeySet(ctx context.Context, jwksURL string, now func() time.Time) *RemoteKeySet {
|
||||
if now == nil {
|
||||
now = time.Now
|
||||
}
|
||||
return &RemoteKeySet{jwksURL: jwksURL, ctx: cloneContext(ctx), now: now}
|
||||
}
|
||||
|
||||
// RemoteKeySet is a KeySet implementation that validates JSON web tokens against
|
||||
// a jwks_uri endpoint.
|
||||
type RemoteKeySet struct {
|
||||
jwksURL string
|
||||
ctx context.Context
|
||||
now func() time.Time
|
||||
|
||||
// guard all other fields
|
||||
mu sync.RWMutex
|
||||
|
||||
// inflight suppresses parallel execution of updateKeys and allows
|
||||
// multiple goroutines to wait for its result.
|
||||
inflight *inflight
|
||||
|
||||
// A set of cached keys.
|
||||
cachedKeys []jose.JSONWebKey
|
||||
}
|
||||
|
||||
// inflight is used to wait on some in-flight request from multiple goroutines.
|
||||
type inflight struct {
|
||||
doneCh chan struct{}
|
||||
|
||||
keys []jose.JSONWebKey
|
||||
err error
|
||||
}
|
||||
|
||||
func newInflight() *inflight {
|
||||
return &inflight{doneCh: make(chan struct{})}
|
||||
}
|
||||
|
||||
// wait returns a channel that multiple goroutines can receive on. Once it returns
|
||||
// a value, the inflight request is done and result() can be inspected.
|
||||
func (i *inflight) wait() <-chan struct{} {
|
||||
return i.doneCh
|
||||
}
|
||||
|
||||
// done can only be called by a single goroutine. It records the result of the
|
||||
// inflight request and signals other goroutines that the result is safe to
|
||||
// inspect.
|
||||
func (i *inflight) done(keys []jose.JSONWebKey, err error) {
|
||||
i.keys = keys
|
||||
i.err = err
|
||||
close(i.doneCh)
|
||||
}
|
||||
|
||||
// result cannot be called until the wait() channel has returned a value.
|
||||
func (i *inflight) result() ([]jose.JSONWebKey, error) {
|
||||
return i.keys, i.err
|
||||
}
|
||||
|
||||
// VerifySignature validates a payload against a signature from the jwks_uri.
|
||||
//
|
||||
// Users MUST NOT call this method directly and should use an IDTokenVerifier
|
||||
// instead. This method skips critical validations such as 'alg' values and is
|
||||
// only exported to implement the KeySet interface.
|
||||
func (r *RemoteKeySet) VerifySignature(ctx context.Context, jwt string) ([]byte, error) {
|
||||
jws, err := jose.ParseSigned(jwt)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("oidc: malformed jwt: %v", err)
|
||||
}
|
||||
return r.verify(ctx, jws)
|
||||
}
|
||||
|
||||
func (r *RemoteKeySet) verify(ctx context.Context, jws *jose.JSONWebSignature) ([]byte, error) {
|
||||
// We don't support JWTs signed with multiple signatures.
|
||||
keyID := ""
|
||||
for _, sig := range jws.Signatures {
|
||||
keyID = sig.Header.KeyID
|
||||
break
|
||||
}
|
||||
|
||||
keys := r.keysFromCache()
|
||||
for _, key := range keys {
|
||||
if keyID == "" || key.KeyID == keyID {
|
||||
if payload, err := jws.Verify(&key); err == nil {
|
||||
return payload, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// If the kid doesn't match, check for new keys from the remote. This is the
|
||||
// strategy recommended by the spec.
|
||||
//
|
||||
// https://openid.net/specs/openid-connect-core-1_0.html#RotateSigKeys
|
||||
keys, err := r.keysFromRemote(ctx)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("fetching keys %v", err)
|
||||
}
|
||||
|
||||
for _, key := range keys {
|
||||
if keyID == "" || key.KeyID == keyID {
|
||||
if payload, err := jws.Verify(&key); err == nil {
|
||||
return payload, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil, errors.New("failed to verify id token signature")
|
||||
}
|
||||
|
||||
func (r *RemoteKeySet) keysFromCache() (keys []jose.JSONWebKey) {
|
||||
r.mu.RLock()
|
||||
defer r.mu.RUnlock()
|
||||
return r.cachedKeys
|
||||
}
|
||||
|
||||
// keysFromRemote syncs the key set from the remote set, records the values in the
|
||||
// cache, and returns the key set.
|
||||
func (r *RemoteKeySet) keysFromRemote(ctx context.Context) ([]jose.JSONWebKey, error) {
|
||||
// Need to lock to inspect the inflight request field.
|
||||
r.mu.Lock()
|
||||
// If there's not a current inflight request, create one.
|
||||
if r.inflight == nil {
|
||||
r.inflight = newInflight()
|
||||
|
||||
// This goroutine has exclusive ownership over the current inflight
|
||||
// request. It releases the resource by nil'ing the inflight field
|
||||
// once the goroutine is done.
|
||||
go func() {
|
||||
// Sync keys and finish inflight when that's done.
|
||||
keys, err := r.updateKeys()
|
||||
|
||||
r.inflight.done(keys, err)
|
||||
|
||||
// Lock to update the keys and indicate that there is no longer an
|
||||
// inflight request.
|
||||
r.mu.Lock()
|
||||
defer r.mu.Unlock()
|
||||
|
||||
if err == nil {
|
||||
r.cachedKeys = keys
|
||||
}
|
||||
|
||||
// Free inflight so a different request can run.
|
||||
r.inflight = nil
|
||||
}()
|
||||
}
|
||||
inflight := r.inflight
|
||||
r.mu.Unlock()
|
||||
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return nil, ctx.Err()
|
||||
case <-inflight.wait():
|
||||
return inflight.result()
|
||||
}
|
||||
}
|
||||
|
||||
func (r *RemoteKeySet) updateKeys() ([]jose.JSONWebKey, error) {
|
||||
req, err := http.NewRequest("GET", r.jwksURL, nil)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("oidc: can't create request: %v", err)
|
||||
}
|
||||
|
||||
resp, err := doRequest(r.ctx, req)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("oidc: get keys failed %v", err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
body, err := ioutil.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to read response body: %v", err)
|
||||
}
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return nil, fmt.Errorf("oidc: get keys failed: %s %s", resp.Status, body)
|
||||
}
|
||||
|
||||
var keySet jose.JSONWebKeySet
|
||||
err = unmarshalResp(resp, body, &keySet)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("oidc: failed to decode keys: %v %s", err, body)
|
||||
}
|
||||
return keySet.Keys, nil
|
||||
}
|
480
vendor/github.com/coreos/go-oidc/v3/oidc/oidc.go
generated
vendored
Normal file
480
vendor/github.com/coreos/go-oidc/v3/oidc/oidc.go
generated
vendored
Normal file
@ -0,0 +1,480 @@
|
||||
// Package oidc implements OpenID Connect client logic for the golang.org/x/oauth2 package.
|
||||
package oidc
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/sha256"
|
||||
"crypto/sha512"
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"hash"
|
||||
"io/ioutil"
|
||||
"mime"
|
||||
"net/http"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"golang.org/x/oauth2"
|
||||
)
|
||||
|
||||
const (
|
||||
// ScopeOpenID is the mandatory scope for all OpenID Connect OAuth2 requests.
|
||||
ScopeOpenID = "openid"
|
||||
|
||||
// ScopeOfflineAccess is an optional scope defined by OpenID Connect for requesting
|
||||
// OAuth2 refresh tokens.
|
||||
//
|
||||
// Support for this scope differs between OpenID Connect providers. For instance
|
||||
// Google rejects it, favoring appending "access_type=offline" as part of the
|
||||
// authorization request instead.
|
||||
//
|
||||
// See: https://openid.net/specs/openid-connect-core-1_0.html#OfflineAccess
|
||||
ScopeOfflineAccess = "offline_access"
|
||||
)
|
||||
|
||||
var (
|
||||
errNoAtHash = errors.New("id token did not have an access token hash")
|
||||
errInvalidAtHash = errors.New("access token hash does not match value in ID token")
|
||||
)
|
||||
|
||||
type contextKey int
|
||||
|
||||
var issuerURLKey contextKey
|
||||
|
||||
// ClientContext returns a new Context that carries the provided HTTP client.
|
||||
//
|
||||
// This method sets the same context key used by the golang.org/x/oauth2 package,
|
||||
// so the returned context works for that package too.
|
||||
//
|
||||
// myClient := &http.Client{}
|
||||
// ctx := oidc.ClientContext(parentContext, myClient)
|
||||
//
|
||||
// // This will use the custom client
|
||||
// provider, err := oidc.NewProvider(ctx, "https://accounts.example.com")
|
||||
//
|
||||
func ClientContext(ctx context.Context, client *http.Client) context.Context {
|
||||
return context.WithValue(ctx, oauth2.HTTPClient, client)
|
||||
}
|
||||
|
||||
// cloneContext copies a context's bag-of-values into a new context that isn't
|
||||
// associated with its cancellation. This is used to initialize remote keys sets
|
||||
// which run in the background and aren't associated with the initial context.
|
||||
func cloneContext(ctx context.Context) context.Context {
|
||||
cp := context.Background()
|
||||
if c, ok := ctx.Value(oauth2.HTTPClient).(*http.Client); ok {
|
||||
cp = ClientContext(cp, c)
|
||||
}
|
||||
return cp
|
||||
}
|
||||
|
||||
// InsecureIssuerURLContext allows discovery to work when the issuer_url reported
|
||||
// by upstream is mismatched with the discovery URL. This is meant for integration
|
||||
// with off-spec providers such as Azure.
|
||||
//
|
||||
// discoveryBaseURL := "https://login.microsoftonline.com/organizations/v2.0"
|
||||
// issuerURL := "https://login.microsoftonline.com/my-tenantid/v2.0"
|
||||
//
|
||||
// ctx := oidc.InsecureIssuerURLContext(parentContext, issuerURL)
|
||||
//
|
||||
// // Provider will be discovered with the discoveryBaseURL, but use issuerURL
|
||||
// // for future issuer validation.
|
||||
// provider, err := oidc.NewProvider(ctx, discoveryBaseURL)
|
||||
//
|
||||
// This is insecure because validating the correct issuer is critical for multi-tenant
|
||||
// proivders. Any overrides here MUST be carefully reviewed.
|
||||
func InsecureIssuerURLContext(ctx context.Context, issuerURL string) context.Context {
|
||||
return context.WithValue(ctx, issuerURLKey, issuerURL)
|
||||
}
|
||||
|
||||
func doRequest(ctx context.Context, req *http.Request) (*http.Response, error) {
|
||||
client := http.DefaultClient
|
||||
if c, ok := ctx.Value(oauth2.HTTPClient).(*http.Client); ok {
|
||||
client = c
|
||||
}
|
||||
return client.Do(req.WithContext(ctx))
|
||||
}
|
||||
|
||||
// Provider represents an OpenID Connect server's configuration.
|
||||
type Provider struct {
|
||||
issuer string
|
||||
authURL string
|
||||
tokenURL string
|
||||
userInfoURL string
|
||||
algorithms []string
|
||||
|
||||
// Raw claims returned by the server.
|
||||
rawClaims []byte
|
||||
|
||||
remoteKeySet KeySet
|
||||
}
|
||||
|
||||
type providerJSON struct {
|
||||
Issuer string `json:"issuer"`
|
||||
AuthURL string `json:"authorization_endpoint"`
|
||||
TokenURL string `json:"token_endpoint"`
|
||||
JWKSURL string `json:"jwks_uri"`
|
||||
UserInfoURL string `json:"userinfo_endpoint"`
|
||||
Algorithms []string `json:"id_token_signing_alg_values_supported"`
|
||||
}
|
||||
|
||||
// supportedAlgorithms is a list of algorithms explicitly supported by this
|
||||
// package. If a provider supports other algorithms, such as HS256 or none,
|
||||
// those values won't be passed to the IDTokenVerifier.
|
||||
var supportedAlgorithms = map[string]bool{
|
||||
RS256: true,
|
||||
RS384: true,
|
||||
RS512: true,
|
||||
ES256: true,
|
||||
ES384: true,
|
||||
ES512: true,
|
||||
PS256: true,
|
||||
PS384: true,
|
||||
PS512: true,
|
||||
}
|
||||
|
||||
// NewProvider uses the OpenID Connect discovery mechanism to construct a Provider.
|
||||
//
|
||||
// The issuer is the URL identifier for the service. For example: "https://accounts.google.com"
|
||||
// or "https://login.salesforce.com".
|
||||
func NewProvider(ctx context.Context, issuer string) (*Provider, error) {
|
||||
wellKnown := strings.TrimSuffix(issuer, "/") + "/.well-known/openid-configuration"
|
||||
req, err := http.NewRequest("GET", wellKnown, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
resp, err := doRequest(ctx, req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
body, err := ioutil.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to read response body: %v", err)
|
||||
}
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return nil, fmt.Errorf("%s: %s", resp.Status, body)
|
||||
}
|
||||
|
||||
var p providerJSON
|
||||
err = unmarshalResp(resp, body, &p)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("oidc: failed to decode provider discovery object: %v", err)
|
||||
}
|
||||
|
||||
issuerURL, skipIssuerValidation := ctx.Value(issuerURLKey).(string)
|
||||
if !skipIssuerValidation {
|
||||
issuerURL = issuer
|
||||
}
|
||||
if p.Issuer != issuerURL && !skipIssuerValidation {
|
||||
return nil, fmt.Errorf("oidc: issuer did not match the issuer returned by provider, expected %q got %q", issuer, p.Issuer)
|
||||
}
|
||||
var algs []string
|
||||
for _, a := range p.Algorithms {
|
||||
if supportedAlgorithms[a] {
|
||||
algs = append(algs, a)
|
||||
}
|
||||
}
|
||||
return &Provider{
|
||||
issuer: issuerURL,
|
||||
authURL: p.AuthURL,
|
||||
tokenURL: p.TokenURL,
|
||||
userInfoURL: p.UserInfoURL,
|
||||
algorithms: algs,
|
||||
rawClaims: body,
|
||||
remoteKeySet: NewRemoteKeySet(cloneContext(ctx), p.JWKSURL),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Claims unmarshals raw fields returned by the server during discovery.
|
||||
//
|
||||
// var claims struct {
|
||||
// ScopesSupported []string `json:"scopes_supported"`
|
||||
// ClaimsSupported []string `json:"claims_supported"`
|
||||
// }
|
||||
//
|
||||
// if err := provider.Claims(&claims); err != nil {
|
||||
// // handle unmarshaling error
|
||||
// }
|
||||
//
|
||||
// For a list of fields defined by the OpenID Connect spec see:
|
||||
// https://openid.net/specs/openid-connect-discovery-1_0.html#ProviderMetadata
|
||||
func (p *Provider) Claims(v interface{}) error {
|
||||
if p.rawClaims == nil {
|
||||
return errors.New("oidc: claims not set")
|
||||
}
|
||||
return json.Unmarshal(p.rawClaims, v)
|
||||
}
|
||||
|
||||
// Endpoint returns the OAuth2 auth and token endpoints for the given provider.
|
||||
func (p *Provider) Endpoint() oauth2.Endpoint {
|
||||
return oauth2.Endpoint{AuthURL: p.authURL, TokenURL: p.tokenURL}
|
||||
}
|
||||
|
||||
// UserInfo represents the OpenID Connect userinfo claims.
|
||||
type UserInfo struct {
|
||||
Subject string `json:"sub"`
|
||||
Profile string `json:"profile"`
|
||||
Email string `json:"email"`
|
||||
EmailVerified bool `json:"email_verified"`
|
||||
|
||||
claims []byte
|
||||
}
|
||||
|
||||
type userInfoRaw struct {
|
||||
Subject string `json:"sub"`
|
||||
Profile string `json:"profile"`
|
||||
Email string `json:"email"`
|
||||
// Handle providers that return email_verified as a string
|
||||
// https://forums.aws.amazon.com/thread.jspa?messageID=949441󧳁 and
|
||||
// https://discuss.elastic.co/t/openid-error-after-authenticating-against-aws-cognito/206018/11
|
||||
EmailVerified stringAsBool `json:"email_verified"`
|
||||
}
|
||||
|
||||
// Claims unmarshals the raw JSON object claims into the provided object.
|
||||
func (u *UserInfo) Claims(v interface{}) error {
|
||||
if u.claims == nil {
|
||||
return errors.New("oidc: claims not set")
|
||||
}
|
||||
return json.Unmarshal(u.claims, v)
|
||||
}
|
||||
|
||||
// UserInfo uses the token source to query the provider's user info endpoint.
|
||||
func (p *Provider) UserInfo(ctx context.Context, tokenSource oauth2.TokenSource) (*UserInfo, error) {
|
||||
if p.userInfoURL == "" {
|
||||
return nil, errors.New("oidc: user info endpoint is not supported by this provider")
|
||||
}
|
||||
|
||||
req, err := http.NewRequest("GET", p.userInfoURL, nil)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("oidc: create GET request: %v", err)
|
||||
}
|
||||
|
||||
token, err := tokenSource.Token()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("oidc: get access token: %v", err)
|
||||
}
|
||||
token.SetAuthHeader(req)
|
||||
|
||||
resp, err := doRequest(ctx, req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
body, err := ioutil.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return nil, fmt.Errorf("%s: %s", resp.Status, body)
|
||||
}
|
||||
|
||||
ct := resp.Header.Get("Content-Type")
|
||||
mediaType, _, parseErr := mime.ParseMediaType(ct)
|
||||
if parseErr == nil && mediaType == "application/jwt" {
|
||||
payload, err := p.remoteKeySet.VerifySignature(ctx, string(body))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("oidc: invalid userinfo jwt signature %v", err)
|
||||
}
|
||||
body = payload
|
||||
}
|
||||
|
||||
var userInfo userInfoRaw
|
||||
if err := json.Unmarshal(body, &userInfo); err != nil {
|
||||
return nil, fmt.Errorf("oidc: failed to decode userinfo: %v", err)
|
||||
}
|
||||
return &UserInfo{
|
||||
Subject: userInfo.Subject,
|
||||
Profile: userInfo.Profile,
|
||||
Email: userInfo.Email,
|
||||
EmailVerified: bool(userInfo.EmailVerified),
|
||||
claims: body,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// IDToken is an OpenID Connect extension that provides a predictable representation
|
||||
// of an authorization event.
|
||||
//
|
||||
// The ID Token only holds fields OpenID Connect requires. To access additional
|
||||
// claims returned by the server, use the Claims method.
|
||||
type IDToken struct {
|
||||
// The URL of the server which issued this token. OpenID Connect
|
||||
// requires this value always be identical to the URL used for
|
||||
// initial discovery.
|
||||
//
|
||||
// Note: Because of a known issue with Google Accounts' implementation
|
||||
// this value may differ when using Google.
|
||||
//
|
||||
// See: https://developers.google.com/identity/protocols/OpenIDConnect#obtainuserinfo
|
||||
Issuer string
|
||||
|
||||
// The client ID, or set of client IDs, that this token is issued for. For
|
||||
// common uses, this is the client that initialized the auth flow.
|
||||
//
|
||||
// This package ensures the audience contains an expected value.
|
||||
Audience []string
|
||||
|
||||
// A unique string which identifies the end user.
|
||||
Subject string
|
||||
|
||||
// Expiry of the token. Ths package will not process tokens that have
|
||||
// expired unless that validation is explicitly turned off.
|
||||
Expiry time.Time
|
||||
// When the token was issued by the provider.
|
||||
IssuedAt time.Time
|
||||
|
||||
// Initial nonce provided during the authentication redirect.
|
||||
//
|
||||
// This package does NOT provided verification on the value of this field
|
||||
// and it's the user's responsibility to ensure it contains a valid value.
|
||||
Nonce string
|
||||
|
||||
// at_hash claim, if set in the ID token. Callers can verify an access token
|
||||
// that corresponds to the ID token using the VerifyAccessToken method.
|
||||
AccessTokenHash string
|
||||
|
||||
// signature algorithm used for ID token, needed to compute a verification hash of an
|
||||
// access token
|
||||
sigAlgorithm string
|
||||
|
||||
// Raw payload of the id_token.
|
||||
claims []byte
|
||||
|
||||
// Map of distributed claim names to claim sources
|
||||
distributedClaims map[string]claimSource
|
||||
}
|
||||
|
||||
// Claims unmarshals the raw JSON payload of the ID Token into a provided struct.
|
||||
//
|
||||
// idToken, err := idTokenVerifier.Verify(rawIDToken)
|
||||
// if err != nil {
|
||||
// // handle error
|
||||
// }
|
||||
// var claims struct {
|
||||
// Email string `json:"email"`
|
||||
// EmailVerified bool `json:"email_verified"`
|
||||
// }
|
||||
// if err := idToken.Claims(&claims); err != nil {
|
||||
// // handle error
|
||||
// }
|
||||
//
|
||||
func (i *IDToken) Claims(v interface{}) error {
|
||||
if i.claims == nil {
|
||||
return errors.New("oidc: claims not set")
|
||||
}
|
||||
return json.Unmarshal(i.claims, v)
|
||||
}
|
||||
|
||||
// VerifyAccessToken verifies that the hash of the access token that corresponds to the iD token
|
||||
// matches the hash in the id token. It returns an error if the hashes don't match.
|
||||
// It is the caller's responsibility to ensure that the optional access token hash is present for the ID token
|
||||
// before calling this method. See https://openid.net/specs/openid-connect-core-1_0.html#CodeIDToken
|
||||
func (i *IDToken) VerifyAccessToken(accessToken string) error {
|
||||
if i.AccessTokenHash == "" {
|
||||
return errNoAtHash
|
||||
}
|
||||
var h hash.Hash
|
||||
switch i.sigAlgorithm {
|
||||
case RS256, ES256, PS256:
|
||||
h = sha256.New()
|
||||
case RS384, ES384, PS384:
|
||||
h = sha512.New384()
|
||||
case RS512, ES512, PS512:
|
||||
h = sha512.New()
|
||||
default:
|
||||
return fmt.Errorf("oidc: unsupported signing algorithm %q", i.sigAlgorithm)
|
||||
}
|
||||
h.Write([]byte(accessToken)) // hash documents that Write will never return an error
|
||||
sum := h.Sum(nil)[:h.Size()/2]
|
||||
actual := base64.RawURLEncoding.EncodeToString(sum)
|
||||
if actual != i.AccessTokenHash {
|
||||
return errInvalidAtHash
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type idToken struct {
|
||||
Issuer string `json:"iss"`
|
||||
Subject string `json:"sub"`
|
||||
Audience audience `json:"aud"`
|
||||
Expiry jsonTime `json:"exp"`
|
||||
IssuedAt jsonTime `json:"iat"`
|
||||
NotBefore *jsonTime `json:"nbf"`
|
||||
Nonce string `json:"nonce"`
|
||||
AtHash string `json:"at_hash"`
|
||||
ClaimNames map[string]string `json:"_claim_names"`
|
||||
ClaimSources map[string]claimSource `json:"_claim_sources"`
|
||||
}
|
||||
|
||||
type claimSource struct {
|
||||
Endpoint string `json:"endpoint"`
|
||||
AccessToken string `json:"access_token"`
|
||||
}
|
||||
|
||||
type stringAsBool bool
|
||||
|
||||
func (sb *stringAsBool) UnmarshalJSON(b []byte) error {
|
||||
switch string(b) {
|
||||
case "true", `"true"`:
|
||||
*sb = true
|
||||
case "false", `"false"`:
|
||||
*sb = false
|
||||
default:
|
||||
return errors.New("invalid value for boolean")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type audience []string
|
||||
|
||||
func (a *audience) UnmarshalJSON(b []byte) error {
|
||||
var s string
|
||||
if json.Unmarshal(b, &s) == nil {
|
||||
*a = audience{s}
|
||||
return nil
|
||||
}
|
||||
var auds []string
|
||||
if err := json.Unmarshal(b, &auds); err != nil {
|
||||
return err
|
||||
}
|
||||
*a = auds
|
||||
return nil
|
||||
}
|
||||
|
||||
type jsonTime time.Time
|
||||
|
||||
func (j *jsonTime) UnmarshalJSON(b []byte) error {
|
||||
var n json.Number
|
||||
if err := json.Unmarshal(b, &n); err != nil {
|
||||
return err
|
||||
}
|
||||
var unix int64
|
||||
|
||||
if t, err := n.Int64(); err == nil {
|
||||
unix = t
|
||||
} else {
|
||||
f, err := n.Float64()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
unix = int64(f)
|
||||
}
|
||||
*j = jsonTime(time.Unix(unix, 0))
|
||||
return nil
|
||||
}
|
||||
|
||||
func unmarshalResp(r *http.Response, body []byte, v interface{}) error {
|
||||
err := json.Unmarshal(body, &v)
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
ct := r.Header.Get("Content-Type")
|
||||
mediaType, _, parseErr := mime.ParseMediaType(ct)
|
||||
if parseErr == nil && mediaType == "application/json" {
|
||||
return fmt.Errorf("got Content-Type = application/json, but could not unmarshal as JSON: %v", err)
|
||||
}
|
||||
return fmt.Errorf("expected Content-Type = application/json, got %q: %v", ct, err)
|
||||
}
|
322
vendor/github.com/coreos/go-oidc/v3/oidc/verify.go
generated
vendored
Normal file
322
vendor/github.com/coreos/go-oidc/v3/oidc/verify.go
generated
vendored
Normal file
@ -0,0 +1,322 @@
|
||||
package oidc
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"golang.org/x/oauth2"
|
||||
jose "gopkg.in/square/go-jose.v2"
|
||||
)
|
||||
|
||||
const (
|
||||
issuerGoogleAccounts = "https://accounts.google.com"
|
||||
issuerGoogleAccountsNoScheme = "accounts.google.com"
|
||||
)
|
||||
|
||||
// KeySet is a set of publc JSON Web Keys that can be used to validate the signature
|
||||
// of JSON web tokens. This is expected to be backed by a remote key set through
|
||||
// provider metadata discovery or an in-memory set of keys delivered out-of-band.
|
||||
type KeySet interface {
|
||||
// VerifySignature parses the JSON web token, verifies the signature, and returns
|
||||
// the raw payload. Header and claim fields are validated by other parts of the
|
||||
// package. For example, the KeySet does not need to check values such as signature
|
||||
// algorithm, issuer, and audience since the IDTokenVerifier validates these values
|
||||
// independently.
|
||||
//
|
||||
// If VerifySignature makes HTTP requests to verify the token, it's expected to
|
||||
// use any HTTP client associated with the context through ClientContext.
|
||||
VerifySignature(ctx context.Context, jwt string) (payload []byte, err error)
|
||||
}
|
||||
|
||||
// IDTokenVerifier provides verification for ID Tokens.
|
||||
type IDTokenVerifier struct {
|
||||
keySet KeySet
|
||||
config *Config
|
||||
issuer string
|
||||
}
|
||||
|
||||
// NewVerifier returns a verifier manually constructed from a key set and issuer URL.
|
||||
//
|
||||
// It's easier to use provider discovery to construct an IDTokenVerifier than creating
|
||||
// one directly. This method is intended to be used with provider that don't support
|
||||
// metadata discovery, or avoiding round trips when the key set URL is already known.
|
||||
//
|
||||
// This constructor can be used to create a verifier directly using the issuer URL and
|
||||
// JSON Web Key Set URL without using discovery:
|
||||
//
|
||||
// keySet := oidc.NewRemoteKeySet(ctx, "https://www.googleapis.com/oauth2/v3/certs")
|
||||
// verifier := oidc.NewVerifier("https://accounts.google.com", keySet, config)
|
||||
//
|
||||
// Since KeySet is an interface, this constructor can also be used to supply custom
|
||||
// public key sources. For example, if a user wanted to supply public keys out-of-band
|
||||
// and hold them statically in-memory:
|
||||
//
|
||||
// // Custom KeySet implementation.
|
||||
// keySet := newStatisKeySet(publicKeys...)
|
||||
//
|
||||
// // Verifier uses the custom KeySet implementation.
|
||||
// verifier := oidc.NewVerifier("https://auth.example.com", keySet, config)
|
||||
//
|
||||
func NewVerifier(issuerURL string, keySet KeySet, config *Config) *IDTokenVerifier {
|
||||
return &IDTokenVerifier{keySet: keySet, config: config, issuer: issuerURL}
|
||||
}
|
||||
|
||||
// Config is the configuration for an IDTokenVerifier.
|
||||
type Config struct {
|
||||
// Expected audience of the token. For a majority of the cases this is expected to be
|
||||
// the ID of the client that initialized the login flow. It may occasionally differ if
|
||||
// the provider supports the authorizing party (azp) claim.
|
||||
//
|
||||
// If not provided, users must explicitly set SkipClientIDCheck.
|
||||
ClientID string
|
||||
// If specified, only this set of algorithms may be used to sign the JWT.
|
||||
//
|
||||
// If the IDTokenVerifier is created from a provider with (*Provider).Verifier, this
|
||||
// defaults to the set of algorithms the provider supports. Otherwise this values
|
||||
// defaults to RS256.
|
||||
SupportedSigningAlgs []string
|
||||
|
||||
// If true, no ClientID check performed. Must be true if ClientID field is empty.
|
||||
SkipClientIDCheck bool
|
||||
// If true, token expiry is not checked.
|
||||
SkipExpiryCheck bool
|
||||
|
||||
// SkipIssuerCheck is intended for specialized cases where the the caller wishes to
|
||||
// defer issuer validation. When enabled, callers MUST independently verify the Token's
|
||||
// Issuer is a known good value.
|
||||
//
|
||||
// Mismatched issuers often indicate client mis-configuration. If mismatches are
|
||||
// unexpected, evaluate if the provided issuer URL is incorrect instead of enabling
|
||||
// this option.
|
||||
SkipIssuerCheck bool
|
||||
|
||||
// Time function to check Token expiry. Defaults to time.Now
|
||||
Now func() time.Time
|
||||
}
|
||||
|
||||
// Verifier returns an IDTokenVerifier that uses the provider's key set to verify JWTs.
|
||||
//
|
||||
// The returned IDTokenVerifier is tied to the Provider's context and its behavior is
|
||||
// undefined once the Provider's context is canceled.
|
||||
func (p *Provider) Verifier(config *Config) *IDTokenVerifier {
|
||||
if len(config.SupportedSigningAlgs) == 0 && len(p.algorithms) > 0 {
|
||||
// Make a copy so we don't modify the config values.
|
||||
cp := &Config{}
|
||||
*cp = *config
|
||||
cp.SupportedSigningAlgs = p.algorithms
|
||||
config = cp
|
||||
}
|
||||
return NewVerifier(p.issuer, p.remoteKeySet, config)
|
||||
}
|
||||
|
||||
func parseJWT(p string) ([]byte, error) {
|
||||
parts := strings.Split(p, ".")
|
||||
if len(parts) < 2 {
|
||||
return nil, fmt.Errorf("oidc: malformed jwt, expected 3 parts got %d", len(parts))
|
||||
}
|
||||
payload, err := base64.RawURLEncoding.DecodeString(parts[1])
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("oidc: malformed jwt payload: %v", err)
|
||||
}
|
||||
return payload, nil
|
||||
}
|
||||
|
||||
func contains(sli []string, ele string) bool {
|
||||
for _, s := range sli {
|
||||
if s == ele {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Returns the Claims from the distributed JWT token
|
||||
func resolveDistributedClaim(ctx context.Context, verifier *IDTokenVerifier, src claimSource) ([]byte, error) {
|
||||
req, err := http.NewRequest("GET", src.Endpoint, nil)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("malformed request: %v", err)
|
||||
}
|
||||
if src.AccessToken != "" {
|
||||
req.Header.Set("Authorization", "Bearer "+src.AccessToken)
|
||||
}
|
||||
|
||||
resp, err := doRequest(ctx, req)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("oidc: Request to endpoint failed: %v", err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
body, err := ioutil.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to read response body: %v", err)
|
||||
}
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return nil, fmt.Errorf("oidc: request failed: %v", resp.StatusCode)
|
||||
}
|
||||
|
||||
token, err := verifier.Verify(ctx, string(body))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("malformed response body: %v", err)
|
||||
}
|
||||
|
||||
return token.claims, nil
|
||||
}
|
||||
|
||||
// Verify parses a raw ID Token, verifies it's been signed by the provider, performs
|
||||
// any additional checks depending on the Config, and returns the payload.
|
||||
//
|
||||
// Verify does NOT do nonce validation, which is the callers responsibility.
|
||||
//
|
||||
// See: https://openid.net/specs/openid-connect-core-1_0.html#IDTokenValidation
|
||||
//
|
||||
// oauth2Token, err := oauth2Config.Exchange(ctx, r.URL.Query().Get("code"))
|
||||
// if err != nil {
|
||||
// // handle error
|
||||
// }
|
||||
//
|
||||
// // Extract the ID Token from oauth2 token.
|
||||
// rawIDToken, ok := oauth2Token.Extra("id_token").(string)
|
||||
// if !ok {
|
||||
// // handle error
|
||||
// }
|
||||
//
|
||||
// token, err := verifier.Verify(ctx, rawIDToken)
|
||||
//
|
||||
func (v *IDTokenVerifier) Verify(ctx context.Context, rawIDToken string) (*IDToken, error) {
|
||||
jws, err := jose.ParseSigned(rawIDToken)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("oidc: malformed jwt: %v", err)
|
||||
}
|
||||
|
||||
// Throw out tokens with invalid claims before trying to verify the token. This lets
|
||||
// us do cheap checks before possibly re-syncing keys.
|
||||
payload, err := parseJWT(rawIDToken)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("oidc: malformed jwt: %v", err)
|
||||
}
|
||||
var token idToken
|
||||
if err := json.Unmarshal(payload, &token); err != nil {
|
||||
return nil, fmt.Errorf("oidc: failed to unmarshal claims: %v", err)
|
||||
}
|
||||
|
||||
distributedClaims := make(map[string]claimSource)
|
||||
|
||||
//step through the token to map claim names to claim sources"
|
||||
for cn, src := range token.ClaimNames {
|
||||
if src == "" {
|
||||
return nil, fmt.Errorf("oidc: failed to obtain source from claim name")
|
||||
}
|
||||
s, ok := token.ClaimSources[src]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("oidc: source does not exist")
|
||||
}
|
||||
distributedClaims[cn] = s
|
||||
}
|
||||
|
||||
t := &IDToken{
|
||||
Issuer: token.Issuer,
|
||||
Subject: token.Subject,
|
||||
Audience: []string(token.Audience),
|
||||
Expiry: time.Time(token.Expiry),
|
||||
IssuedAt: time.Time(token.IssuedAt),
|
||||
Nonce: token.Nonce,
|
||||
AccessTokenHash: token.AtHash,
|
||||
claims: payload,
|
||||
distributedClaims: distributedClaims,
|
||||
}
|
||||
|
||||
// Check issuer.
|
||||
if !v.config.SkipIssuerCheck && t.Issuer != v.issuer {
|
||||
// Google sometimes returns "accounts.google.com" as the issuer claim instead of
|
||||
// the required "https://accounts.google.com". Detect this case and allow it only
|
||||
// for Google.
|
||||
//
|
||||
// We will not add hooks to let other providers go off spec like this.
|
||||
if !(v.issuer == issuerGoogleAccounts && t.Issuer == issuerGoogleAccountsNoScheme) {
|
||||
return nil, fmt.Errorf("oidc: id token issued by a different provider, expected %q got %q", v.issuer, t.Issuer)
|
||||
}
|
||||
}
|
||||
|
||||
// If a client ID has been provided, make sure it's part of the audience. SkipClientIDCheck must be true if ClientID is empty.
|
||||
//
|
||||
// This check DOES NOT ensure that the ClientID is the party to which the ID Token was issued (i.e. Authorized party).
|
||||
if !v.config.SkipClientIDCheck {
|
||||
if v.config.ClientID != "" {
|
||||
if !contains(t.Audience, v.config.ClientID) {
|
||||
return nil, fmt.Errorf("oidc: expected audience %q got %q", v.config.ClientID, t.Audience)
|
||||
}
|
||||
} else {
|
||||
return nil, fmt.Errorf("oidc: invalid configuration, clientID must be provided or SkipClientIDCheck must be set")
|
||||
}
|
||||
}
|
||||
|
||||
// If a SkipExpiryCheck is false, make sure token is not expired.
|
||||
if !v.config.SkipExpiryCheck {
|
||||
now := time.Now
|
||||
if v.config.Now != nil {
|
||||
now = v.config.Now
|
||||
}
|
||||
nowTime := now()
|
||||
|
||||
if t.Expiry.Before(nowTime) {
|
||||
return nil, fmt.Errorf("oidc: token is expired (Token Expiry: %v)", t.Expiry)
|
||||
}
|
||||
|
||||
// If nbf claim is provided in token, ensure that it is indeed in the past.
|
||||
if token.NotBefore != nil {
|
||||
nbfTime := time.Time(*token.NotBefore)
|
||||
leeway := 1 * time.Minute
|
||||
|
||||
if nowTime.Add(leeway).Before(nbfTime) {
|
||||
return nil, fmt.Errorf("oidc: current time %v before the nbf (not before) time: %v", nowTime, nbfTime)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
switch len(jws.Signatures) {
|
||||
case 0:
|
||||
return nil, fmt.Errorf("oidc: id token not signed")
|
||||
case 1:
|
||||
default:
|
||||
return nil, fmt.Errorf("oidc: multiple signatures on id token not supported")
|
||||
}
|
||||
|
||||
sig := jws.Signatures[0]
|
||||
supportedSigAlgs := v.config.SupportedSigningAlgs
|
||||
if len(supportedSigAlgs) == 0 {
|
||||
supportedSigAlgs = []string{RS256}
|
||||
}
|
||||
|
||||
if !contains(supportedSigAlgs, sig.Header.Algorithm) {
|
||||
return nil, fmt.Errorf("oidc: id token signed with unsupported algorithm, expected %q got %q", supportedSigAlgs, sig.Header.Algorithm)
|
||||
}
|
||||
|
||||
t.sigAlgorithm = sig.Header.Algorithm
|
||||
|
||||
gotPayload, err := v.keySet.VerifySignature(ctx, rawIDToken)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to verify signature: %v", err)
|
||||
}
|
||||
|
||||
// Ensure that the payload returned by the square actually matches the payload parsed earlier.
|
||||
if !bytes.Equal(gotPayload, payload) {
|
||||
return nil, errors.New("oidc: internal error, payload parsed did not match previous payload")
|
||||
}
|
||||
|
||||
return t, nil
|
||||
}
|
||||
|
||||
// Nonce returns an auth code option which requires the ID Token created by the
|
||||
// OpenID Connect provider to contain the specified nonce.
|
||||
func Nonce(nonce string) oauth2.AuthCodeOption {
|
||||
return oauth2.SetAuthURLParam("nonce", nonce)
|
||||
}
|
3
vendor/golang.org/x/crypto/AUTHORS
generated
vendored
Normal file
3
vendor/golang.org/x/crypto/AUTHORS
generated
vendored
Normal file
@ -0,0 +1,3 @@
|
||||
# This source code refers to The Go Authors for copyright purposes.
|
||||
# The master list of authors is in the main Go distribution,
|
||||
# visible at https://tip.golang.org/AUTHORS.
|
3
vendor/golang.org/x/crypto/CONTRIBUTORS
generated
vendored
Normal file
3
vendor/golang.org/x/crypto/CONTRIBUTORS
generated
vendored
Normal file
@ -0,0 +1,3 @@
|
||||
# This source code was written by the Go contributors.
|
||||
# The master list of contributors is in the main Go distribution,
|
||||
# visible at https://tip.golang.org/CONTRIBUTORS.
|
27
vendor/golang.org/x/crypto/LICENSE
generated
vendored
Normal file
27
vendor/golang.org/x/crypto/LICENSE
generated
vendored
Normal file
@ -0,0 +1,27 @@
|
||||
Copyright (c) 2009 The Go Authors. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Google Inc. nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
22
vendor/golang.org/x/crypto/PATENTS
generated
vendored
Normal file
22
vendor/golang.org/x/crypto/PATENTS
generated
vendored
Normal file
@ -0,0 +1,22 @@
|
||||
Additional IP Rights Grant (Patents)
|
||||
|
||||
"This implementation" means the copyrightable works distributed by
|
||||
Google as part of the Go project.
|
||||
|
||||
Google hereby grants to You a perpetual, worldwide, non-exclusive,
|
||||
no-charge, royalty-free, irrevocable (except as stated in this section)
|
||||
patent license to make, have made, use, offer to sell, sell, import,
|
||||
transfer and otherwise run, modify and propagate the contents of this
|
||||
implementation of Go, where such license applies only to those patent
|
||||
claims, both currently owned or controlled by Google and acquired in
|
||||
the future, licensable by Google that are necessarily infringed by this
|
||||
implementation of Go. This grant does not include claims that would be
|
||||
infringed only as a consequence of further modification of this
|
||||
implementation. If you or your agent or exclusive licensee institute or
|
||||
order or agree to the institution of patent litigation against any
|
||||
entity (including a cross-claim or counterclaim in a lawsuit) alleging
|
||||
that this implementation of Go or any code incorporated within this
|
||||
implementation of Go constitutes direct or contributory patent
|
||||
infringement, or inducement of patent infringement, then any patent
|
||||
rights granted to you under this License for this implementation of Go
|
||||
shall terminate as of the date such litigation is filed.
|
222
vendor/golang.org/x/crypto/ed25519/ed25519.go
generated
vendored
Normal file
222
vendor/golang.org/x/crypto/ed25519/ed25519.go
generated
vendored
Normal file
@ -0,0 +1,222 @@
|
||||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// In Go 1.13, the ed25519 package was promoted to the standard library as
|
||||
// crypto/ed25519, and this package became a wrapper for the standard library one.
|
||||
//
|
||||
// +build !go1.13
|
||||
|
||||
// Package ed25519 implements the Ed25519 signature algorithm. See
|
||||
// https://ed25519.cr.yp.to/.
|
||||
//
|
||||
// These functions are also compatible with the “Ed25519” function defined in
|
||||
// RFC 8032. However, unlike RFC 8032's formulation, this package's private key
|
||||
// representation includes a public key suffix to make multiple signing
|
||||
// operations with the same key more efficient. This package refers to the RFC
|
||||
// 8032 private key as the “seed”.
|
||||
package ed25519
|
||||
|
||||
// This code is a port of the public domain, “ref10” implementation of ed25519
|
||||
// from SUPERCOP.
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto"
|
||||
cryptorand "crypto/rand"
|
||||
"crypto/sha512"
|
||||
"errors"
|
||||
"io"
|
||||
"strconv"
|
||||
|
||||
"golang.org/x/crypto/ed25519/internal/edwards25519"
|
||||
)
|
||||
|
||||
const (
|
||||
// PublicKeySize is the size, in bytes, of public keys as used in this package.
|
||||
PublicKeySize = 32
|
||||
// PrivateKeySize is the size, in bytes, of private keys as used in this package.
|
||||
PrivateKeySize = 64
|
||||
// SignatureSize is the size, in bytes, of signatures generated and verified by this package.
|
||||
SignatureSize = 64
|
||||
// SeedSize is the size, in bytes, of private key seeds. These are the private key representations used by RFC 8032.
|
||||
SeedSize = 32
|
||||
)
|
||||
|
||||
// PublicKey is the type of Ed25519 public keys.
|
||||
type PublicKey []byte
|
||||
|
||||
// PrivateKey is the type of Ed25519 private keys. It implements crypto.Signer.
|
||||
type PrivateKey []byte
|
||||
|
||||
// Public returns the PublicKey corresponding to priv.
|
||||
func (priv PrivateKey) Public() crypto.PublicKey {
|
||||
publicKey := make([]byte, PublicKeySize)
|
||||
copy(publicKey, priv[32:])
|
||||
return PublicKey(publicKey)
|
||||
}
|
||||
|
||||
// Seed returns the private key seed corresponding to priv. It is provided for
|
||||
// interoperability with RFC 8032. RFC 8032's private keys correspond to seeds
|
||||
// in this package.
|
||||
func (priv PrivateKey) Seed() []byte {
|
||||
seed := make([]byte, SeedSize)
|
||||
copy(seed, priv[:32])
|
||||
return seed
|
||||
}
|
||||
|
||||
// Sign signs the given message with priv.
|
||||
// Ed25519 performs two passes over messages to be signed and therefore cannot
|
||||
// handle pre-hashed messages. Thus opts.HashFunc() must return zero to
|
||||
// indicate the message hasn't been hashed. This can be achieved by passing
|
||||
// crypto.Hash(0) as the value for opts.
|
||||
func (priv PrivateKey) Sign(rand io.Reader, message []byte, opts crypto.SignerOpts) (signature []byte, err error) {
|
||||
if opts.HashFunc() != crypto.Hash(0) {
|
||||
return nil, errors.New("ed25519: cannot sign hashed message")
|
||||
}
|
||||
|
||||
return Sign(priv, message), nil
|
||||
}
|
||||
|
||||
// GenerateKey generates a public/private key pair using entropy from rand.
|
||||
// If rand is nil, crypto/rand.Reader will be used.
|
||||
func GenerateKey(rand io.Reader) (PublicKey, PrivateKey, error) {
|
||||
if rand == nil {
|
||||
rand = cryptorand.Reader
|
||||
}
|
||||
|
||||
seed := make([]byte, SeedSize)
|
||||
if _, err := io.ReadFull(rand, seed); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
privateKey := NewKeyFromSeed(seed)
|
||||
publicKey := make([]byte, PublicKeySize)
|
||||
copy(publicKey, privateKey[32:])
|
||||
|
||||
return publicKey, privateKey, nil
|
||||
}
|
||||
|
||||
// NewKeyFromSeed calculates a private key from a seed. It will panic if
|
||||
// len(seed) is not SeedSize. This function is provided for interoperability
|
||||
// with RFC 8032. RFC 8032's private keys correspond to seeds in this
|
||||
// package.
|
||||
func NewKeyFromSeed(seed []byte) PrivateKey {
|
||||
if l := len(seed); l != SeedSize {
|
||||
panic("ed25519: bad seed length: " + strconv.Itoa(l))
|
||||
}
|
||||
|
||||
digest := sha512.Sum512(seed)
|
||||
digest[0] &= 248
|
||||
digest[31] &= 127
|
||||
digest[31] |= 64
|
||||
|
||||
var A edwards25519.ExtendedGroupElement
|
||||
var hBytes [32]byte
|
||||
copy(hBytes[:], digest[:])
|
||||
edwards25519.GeScalarMultBase(&A, &hBytes)
|
||||
var publicKeyBytes [32]byte
|
||||
A.ToBytes(&publicKeyBytes)
|
||||
|
||||
privateKey := make([]byte, PrivateKeySize)
|
||||
copy(privateKey, seed)
|
||||
copy(privateKey[32:], publicKeyBytes[:])
|
||||
|
||||
return privateKey
|
||||
}
|
||||
|
||||
// Sign signs the message with privateKey and returns a signature. It will
|
||||
// panic if len(privateKey) is not PrivateKeySize.
|
||||
func Sign(privateKey PrivateKey, message []byte) []byte {
|
||||
if l := len(privateKey); l != PrivateKeySize {
|
||||
panic("ed25519: bad private key length: " + strconv.Itoa(l))
|
||||
}
|
||||
|
||||
h := sha512.New()
|
||||
h.Write(privateKey[:32])
|
||||
|
||||
var digest1, messageDigest, hramDigest [64]byte
|
||||
var expandedSecretKey [32]byte
|
||||
h.Sum(digest1[:0])
|
||||
copy(expandedSecretKey[:], digest1[:])
|
||||
expandedSecretKey[0] &= 248
|
||||
expandedSecretKey[31] &= 63
|
||||
expandedSecretKey[31] |= 64
|
||||
|
||||
h.Reset()
|
||||
h.Write(digest1[32:])
|
||||
h.Write(message)
|
||||
h.Sum(messageDigest[:0])
|
||||
|
||||
var messageDigestReduced [32]byte
|
||||
edwards25519.ScReduce(&messageDigestReduced, &messageDigest)
|
||||
var R edwards25519.ExtendedGroupElement
|
||||
edwards25519.GeScalarMultBase(&R, &messageDigestReduced)
|
||||
|
||||
var encodedR [32]byte
|
||||
R.ToBytes(&encodedR)
|
||||
|
||||
h.Reset()
|
||||
h.Write(encodedR[:])
|
||||
h.Write(privateKey[32:])
|
||||
h.Write(message)
|
||||
h.Sum(hramDigest[:0])
|
||||
var hramDigestReduced [32]byte
|
||||
edwards25519.ScReduce(&hramDigestReduced, &hramDigest)
|
||||
|
||||
var s [32]byte
|
||||
edwards25519.ScMulAdd(&s, &hramDigestReduced, &expandedSecretKey, &messageDigestReduced)
|
||||
|
||||
signature := make([]byte, SignatureSize)
|
||||
copy(signature[:], encodedR[:])
|
||||
copy(signature[32:], s[:])
|
||||
|
||||
return signature
|
||||
}
|
||||
|
||||
// Verify reports whether sig is a valid signature of message by publicKey. It
|
||||
// will panic if len(publicKey) is not PublicKeySize.
|
||||
func Verify(publicKey PublicKey, message, sig []byte) bool {
|
||||
if l := len(publicKey); l != PublicKeySize {
|
||||
panic("ed25519: bad public key length: " + strconv.Itoa(l))
|
||||
}
|
||||
|
||||
if len(sig) != SignatureSize || sig[63]&224 != 0 {
|
||||
return false
|
||||
}
|
||||
|
||||
var A edwards25519.ExtendedGroupElement
|
||||
var publicKeyBytes [32]byte
|
||||
copy(publicKeyBytes[:], publicKey)
|
||||
if !A.FromBytes(&publicKeyBytes) {
|
||||
return false
|
||||
}
|
||||
edwards25519.FeNeg(&A.X, &A.X)
|
||||
edwards25519.FeNeg(&A.T, &A.T)
|
||||
|
||||
h := sha512.New()
|
||||
h.Write(sig[:32])
|
||||
h.Write(publicKey[:])
|
||||
h.Write(message)
|
||||
var digest [64]byte
|
||||
h.Sum(digest[:0])
|
||||
|
||||
var hReduced [32]byte
|
||||
edwards25519.ScReduce(&hReduced, &digest)
|
||||
|
||||
var R edwards25519.ProjectiveGroupElement
|
||||
var s [32]byte
|
||||
copy(s[:], sig[32:])
|
||||
|
||||
// https://tools.ietf.org/html/rfc8032#section-5.1.7 requires that s be in
|
||||
// the range [0, order) in order to prevent signature malleability.
|
||||
if !edwards25519.ScMinimal(&s) {
|
||||
return false
|
||||
}
|
||||
|
||||
edwards25519.GeDoubleScalarMultVartime(&R, &hReduced, &A, &s)
|
||||
|
||||
var checkR [32]byte
|
||||
R.ToBytes(&checkR)
|
||||
return bytes.Equal(sig[:32], checkR[:])
|
||||
}
|
73
vendor/golang.org/x/crypto/ed25519/ed25519_go113.go
generated
vendored
Normal file
73
vendor/golang.org/x/crypto/ed25519/ed25519_go113.go
generated
vendored
Normal file
@ -0,0 +1,73 @@
|
||||
// Copyright 2019 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build go1.13
|
||||
|
||||
// Package ed25519 implements the Ed25519 signature algorithm. See
|
||||
// https://ed25519.cr.yp.to/.
|
||||
//
|
||||
// These functions are also compatible with the “Ed25519” function defined in
|
||||
// RFC 8032. However, unlike RFC 8032's formulation, this package's private key
|
||||
// representation includes a public key suffix to make multiple signing
|
||||
// operations with the same key more efficient. This package refers to the RFC
|
||||
// 8032 private key as the “seed”.
|
||||
//
|
||||
// Beginning with Go 1.13, the functionality of this package was moved to the
|
||||
// standard library as crypto/ed25519. This package only acts as a compatibility
|
||||
// wrapper.
|
||||
package ed25519
|
||||
|
||||
import (
|
||||
"crypto/ed25519"
|
||||
"io"
|
||||
)
|
||||
|
||||
const (
|
||||
// PublicKeySize is the size, in bytes, of public keys as used in this package.
|
||||
PublicKeySize = 32
|
||||
// PrivateKeySize is the size, in bytes, of private keys as used in this package.
|
||||
PrivateKeySize = 64
|
||||
// SignatureSize is the size, in bytes, of signatures generated and verified by this package.
|
||||
SignatureSize = 64
|
||||
// SeedSize is the size, in bytes, of private key seeds. These are the private key representations used by RFC 8032.
|
||||
SeedSize = 32
|
||||
)
|
||||
|
||||
// PublicKey is the type of Ed25519 public keys.
|
||||
//
|
||||
// This type is an alias for crypto/ed25519's PublicKey type.
|
||||
// See the crypto/ed25519 package for the methods on this type.
|
||||
type PublicKey = ed25519.PublicKey
|
||||
|
||||
// PrivateKey is the type of Ed25519 private keys. It implements crypto.Signer.
|
||||
//
|
||||
// This type is an alias for crypto/ed25519's PrivateKey type.
|
||||
// See the crypto/ed25519 package for the methods on this type.
|
||||
type PrivateKey = ed25519.PrivateKey
|
||||
|
||||
// GenerateKey generates a public/private key pair using entropy from rand.
|
||||
// If rand is nil, crypto/rand.Reader will be used.
|
||||
func GenerateKey(rand io.Reader) (PublicKey, PrivateKey, error) {
|
||||
return ed25519.GenerateKey(rand)
|
||||
}
|
||||
|
||||
// NewKeyFromSeed calculates a private key from a seed. It will panic if
|
||||
// len(seed) is not SeedSize. This function is provided for interoperability
|
||||
// with RFC 8032. RFC 8032's private keys correspond to seeds in this
|
||||
// package.
|
||||
func NewKeyFromSeed(seed []byte) PrivateKey {
|
||||
return ed25519.NewKeyFromSeed(seed)
|
||||
}
|
||||
|
||||
// Sign signs the message with privateKey and returns a signature. It will
|
||||
// panic if len(privateKey) is not PrivateKeySize.
|
||||
func Sign(privateKey PrivateKey, message []byte) []byte {
|
||||
return ed25519.Sign(privateKey, message)
|
||||
}
|
||||
|
||||
// Verify reports whether sig is a valid signature of message by publicKey. It
|
||||
// will panic if len(publicKey) is not PublicKeySize.
|
||||
func Verify(publicKey PublicKey, message, sig []byte) bool {
|
||||
return ed25519.Verify(publicKey, message, sig)
|
||||
}
|
1422
vendor/golang.org/x/crypto/ed25519/internal/edwards25519/const.go
generated
vendored
Normal file
1422
vendor/golang.org/x/crypto/ed25519/internal/edwards25519/const.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
1793
vendor/golang.org/x/crypto/ed25519/internal/edwards25519/edwards25519.go
generated
vendored
Normal file
1793
vendor/golang.org/x/crypto/ed25519/internal/edwards25519/edwards25519.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
77
vendor/golang.org/x/crypto/pbkdf2/pbkdf2.go
generated
vendored
Normal file
77
vendor/golang.org/x/crypto/pbkdf2/pbkdf2.go
generated
vendored
Normal file
@ -0,0 +1,77 @@
|
||||
// Copyright 2012 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
/*
|
||||
Package pbkdf2 implements the key derivation function PBKDF2 as defined in RFC
|
||||
2898 / PKCS #5 v2.0.
|
||||
|
||||
A key derivation function is useful when encrypting data based on a password
|
||||
or any other not-fully-random data. It uses a pseudorandom function to derive
|
||||
a secure encryption key based on the password.
|
||||
|
||||
While v2.0 of the standard defines only one pseudorandom function to use,
|
||||
HMAC-SHA1, the drafted v2.1 specification allows use of all five FIPS Approved
|
||||
Hash Functions SHA-1, SHA-224, SHA-256, SHA-384 and SHA-512 for HMAC. To
|
||||
choose, you can pass the `New` functions from the different SHA packages to
|
||||
pbkdf2.Key.
|
||||
*/
|
||||
package pbkdf2 // import "golang.org/x/crypto/pbkdf2"
|
||||
|
||||
import (
|
||||
"crypto/hmac"
|
||||
"hash"
|
||||
)
|
||||
|
||||
// Key derives a key from the password, salt and iteration count, returning a
|
||||
// []byte of length keylen that can be used as cryptographic key. The key is
|
||||
// derived based on the method described as PBKDF2 with the HMAC variant using
|
||||
// the supplied hash function.
|
||||
//
|
||||
// For example, to use a HMAC-SHA-1 based PBKDF2 key derivation function, you
|
||||
// can get a derived key for e.g. AES-256 (which needs a 32-byte key) by
|
||||
// doing:
|
||||
//
|
||||
// dk := pbkdf2.Key([]byte("some password"), salt, 4096, 32, sha1.New)
|
||||
//
|
||||
// Remember to get a good random salt. At least 8 bytes is recommended by the
|
||||
// RFC.
|
||||
//
|
||||
// Using a higher iteration count will increase the cost of an exhaustive
|
||||
// search but will also make derivation proportionally slower.
|
||||
func Key(password, salt []byte, iter, keyLen int, h func() hash.Hash) []byte {
|
||||
prf := hmac.New(h, password)
|
||||
hashLen := prf.Size()
|
||||
numBlocks := (keyLen + hashLen - 1) / hashLen
|
||||
|
||||
var buf [4]byte
|
||||
dk := make([]byte, 0, numBlocks*hashLen)
|
||||
U := make([]byte, hashLen)
|
||||
for block := 1; block <= numBlocks; block++ {
|
||||
// N.B.: || means concatenation, ^ means XOR
|
||||
// for each block T_i = U_1 ^ U_2 ^ ... ^ U_iter
|
||||
// U_1 = PRF(password, salt || uint(i))
|
||||
prf.Reset()
|
||||
prf.Write(salt)
|
||||
buf[0] = byte(block >> 24)
|
||||
buf[1] = byte(block >> 16)
|
||||
buf[2] = byte(block >> 8)
|
||||
buf[3] = byte(block)
|
||||
prf.Write(buf[:4])
|
||||
dk = prf.Sum(dk)
|
||||
T := dk[len(dk)-hashLen:]
|
||||
copy(U, T)
|
||||
|
||||
// U_n = PRF(password, U_(n-1))
|
||||
for n := 2; n <= iter; n++ {
|
||||
prf.Reset()
|
||||
prf.Write(U)
|
||||
U = U[:0]
|
||||
U = prf.Sum(U)
|
||||
for x := range U {
|
||||
T[x] ^= U[x]
|
||||
}
|
||||
}
|
||||
}
|
||||
return dk[:keyLen]
|
||||
}
|
56
vendor/golang.org/x/net/context/context.go
generated
vendored
Normal file
56
vendor/golang.org/x/net/context/context.go
generated
vendored
Normal file
@ -0,0 +1,56 @@
|
||||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package context defines the Context type, which carries deadlines,
|
||||
// cancelation signals, and other request-scoped values across API boundaries
|
||||
// and between processes.
|
||||
// As of Go 1.7 this package is available in the standard library under the
|
||||
// name context. https://golang.org/pkg/context.
|
||||
//
|
||||
// Incoming requests to a server should create a Context, and outgoing calls to
|
||||
// servers should accept a Context. The chain of function calls between must
|
||||
// propagate the Context, optionally replacing it with a modified copy created
|
||||
// using WithDeadline, WithTimeout, WithCancel, or WithValue.
|
||||
//
|
||||
// Programs that use Contexts should follow these rules to keep interfaces
|
||||
// consistent across packages and enable static analysis tools to check context
|
||||
// propagation:
|
||||
//
|
||||
// Do not store Contexts inside a struct type; instead, pass a Context
|
||||
// explicitly to each function that needs it. The Context should be the first
|
||||
// parameter, typically named ctx:
|
||||
//
|
||||
// func DoSomething(ctx context.Context, arg Arg) error {
|
||||
// // ... use ctx ...
|
||||
// }
|
||||
//
|
||||
// Do not pass a nil Context, even if a function permits it. Pass context.TODO
|
||||
// if you are unsure about which Context to use.
|
||||
//
|
||||
// Use context Values only for request-scoped data that transits processes and
|
||||
// APIs, not for passing optional parameters to functions.
|
||||
//
|
||||
// The same Context may be passed to functions running in different goroutines;
|
||||
// Contexts are safe for simultaneous use by multiple goroutines.
|
||||
//
|
||||
// See http://blog.golang.org/context for example code for a server that uses
|
||||
// Contexts.
|
||||
package context // import "golang.org/x/net/context"
|
||||
|
||||
// Background returns a non-nil, empty Context. It is never canceled, has no
|
||||
// values, and has no deadline. It is typically used by the main function,
|
||||
// initialization, and tests, and as the top-level Context for incoming
|
||||
// requests.
|
||||
func Background() Context {
|
||||
return background
|
||||
}
|
||||
|
||||
// TODO returns a non-nil, empty Context. Code should use context.TODO when
|
||||
// it's unclear which Context to use or it is not yet available (because the
|
||||
// surrounding function has not yet been extended to accept a Context
|
||||
// parameter). TODO is recognized by static analysis tools that determine
|
||||
// whether Contexts are propagated correctly in a program.
|
||||
func TODO() Context {
|
||||
return todo
|
||||
}
|
71
vendor/golang.org/x/net/context/ctxhttp/ctxhttp.go
generated
vendored
Normal file
71
vendor/golang.org/x/net/context/ctxhttp/ctxhttp.go
generated
vendored
Normal file
@ -0,0 +1,71 @@
|
||||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package ctxhttp provides helper functions for performing context-aware HTTP requests.
|
||||
package ctxhttp // import "golang.org/x/net/context/ctxhttp"
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Do sends an HTTP request with the provided http.Client and returns
|
||||
// an HTTP response.
|
||||
//
|
||||
// If the client is nil, http.DefaultClient is used.
|
||||
//
|
||||
// The provided ctx must be non-nil. If it is canceled or times out,
|
||||
// ctx.Err() will be returned.
|
||||
func Do(ctx context.Context, client *http.Client, req *http.Request) (*http.Response, error) {
|
||||
if client == nil {
|
||||
client = http.DefaultClient
|
||||
}
|
||||
resp, err := client.Do(req.WithContext(ctx))
|
||||
// If we got an error, and the context has been canceled,
|
||||
// the context's error is probably more useful.
|
||||
if err != nil {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
err = ctx.Err()
|
||||
default:
|
||||
}
|
||||
}
|
||||
return resp, err
|
||||
}
|
||||
|
||||
// Get issues a GET request via the Do function.
|
||||
func Get(ctx context.Context, client *http.Client, url string) (*http.Response, error) {
|
||||
req, err := http.NewRequest("GET", url, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return Do(ctx, client, req)
|
||||
}
|
||||
|
||||
// Head issues a HEAD request via the Do function.
|
||||
func Head(ctx context.Context, client *http.Client, url string) (*http.Response, error) {
|
||||
req, err := http.NewRequest("HEAD", url, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return Do(ctx, client, req)
|
||||
}
|
||||
|
||||
// Post issues a POST request via the Do function.
|
||||
func Post(ctx context.Context, client *http.Client, url string, bodyType string, body io.Reader) (*http.Response, error) {
|
||||
req, err := http.NewRequest("POST", url, body)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
req.Header.Set("Content-Type", bodyType)
|
||||
return Do(ctx, client, req)
|
||||
}
|
||||
|
||||
// PostForm issues a POST request via the Do function.
|
||||
func PostForm(ctx context.Context, client *http.Client, url string, data url.Values) (*http.Response, error) {
|
||||
return Post(ctx, client, url, "application/x-www-form-urlencoded", strings.NewReader(data.Encode()))
|
||||
}
|
73
vendor/golang.org/x/net/context/go17.go
generated
vendored
Normal file
73
vendor/golang.org/x/net/context/go17.go
generated
vendored
Normal file
@ -0,0 +1,73 @@
|
||||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build go1.7
|
||||
// +build go1.7
|
||||
|
||||
package context
|
||||
|
||||
import (
|
||||
"context" // standard library's context, as of Go 1.7
|
||||
"time"
|
||||
)
|
||||
|
||||
var (
|
||||
todo = context.TODO()
|
||||
background = context.Background()
|
||||
)
|
||||
|
||||
// Canceled is the error returned by Context.Err when the context is canceled.
|
||||
var Canceled = context.Canceled
|
||||
|
||||
// DeadlineExceeded is the error returned by Context.Err when the context's
|
||||
// deadline passes.
|
||||
var DeadlineExceeded = context.DeadlineExceeded
|
||||
|
||||
// WithCancel returns a copy of parent with a new Done channel. The returned
|
||||
// context's Done channel is closed when the returned cancel function is called
|
||||
// or when the parent context's Done channel is closed, whichever happens first.
|
||||
//
|
||||
// Canceling this context releases resources associated with it, so code should
|
||||
// call cancel as soon as the operations running in this Context complete.
|
||||
func WithCancel(parent Context) (ctx Context, cancel CancelFunc) {
|
||||
ctx, f := context.WithCancel(parent)
|
||||
return ctx, CancelFunc(f)
|
||||
}
|
||||
|
||||
// WithDeadline returns a copy of the parent context with the deadline adjusted
|
||||
// to be no later than d. If the parent's deadline is already earlier than d,
|
||||
// WithDeadline(parent, d) is semantically equivalent to parent. The returned
|
||||
// context's Done channel is closed when the deadline expires, when the returned
|
||||
// cancel function is called, or when the parent context's Done channel is
|
||||
// closed, whichever happens first.
|
||||
//
|
||||
// Canceling this context releases resources associated with it, so code should
|
||||
// call cancel as soon as the operations running in this Context complete.
|
||||
func WithDeadline(parent Context, deadline time.Time) (Context, CancelFunc) {
|
||||
ctx, f := context.WithDeadline(parent, deadline)
|
||||
return ctx, CancelFunc(f)
|
||||
}
|
||||
|
||||
// WithTimeout returns WithDeadline(parent, time.Now().Add(timeout)).
|
||||
//
|
||||
// Canceling this context releases resources associated with it, so code should
|
||||
// call cancel as soon as the operations running in this Context complete:
|
||||
//
|
||||
// func slowOperationWithTimeout(ctx context.Context) (Result, error) {
|
||||
// ctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond)
|
||||
// defer cancel() // releases resources if slowOperation completes before timeout elapses
|
||||
// return slowOperation(ctx)
|
||||
// }
|
||||
func WithTimeout(parent Context, timeout time.Duration) (Context, CancelFunc) {
|
||||
return WithDeadline(parent, time.Now().Add(timeout))
|
||||
}
|
||||
|
||||
// WithValue returns a copy of parent in which the value associated with key is
|
||||
// val.
|
||||
//
|
||||
// Use context Values only for request-scoped data that transits processes and
|
||||
// APIs, not for passing optional parameters to functions.
|
||||
func WithValue(parent Context, key interface{}, val interface{}) Context {
|
||||
return context.WithValue(parent, key, val)
|
||||
}
|
21
vendor/golang.org/x/net/context/go19.go
generated
vendored
Normal file
21
vendor/golang.org/x/net/context/go19.go
generated
vendored
Normal file
@ -0,0 +1,21 @@
|
||||
// Copyright 2017 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build go1.9
|
||||
// +build go1.9
|
||||
|
||||
package context
|
||||
|
||||
import "context" // standard library's context, as of Go 1.7
|
||||
|
||||
// A Context carries a deadline, a cancelation signal, and other values across
|
||||
// API boundaries.
|
||||
//
|
||||
// Context's methods may be called by multiple goroutines simultaneously.
|
||||
type Context = context.Context
|
||||
|
||||
// A CancelFunc tells an operation to abandon its work.
|
||||
// A CancelFunc does not wait for the work to stop.
|
||||
// After the first call, subsequent calls to a CancelFunc do nothing.
|
||||
type CancelFunc = context.CancelFunc
|
301
vendor/golang.org/x/net/context/pre_go17.go
generated
vendored
Normal file
301
vendor/golang.org/x/net/context/pre_go17.go
generated
vendored
Normal file
@ -0,0 +1,301 @@
|
||||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build !go1.7
|
||||
// +build !go1.7
|
||||
|
||||
package context
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
// An emptyCtx is never canceled, has no values, and has no deadline. It is not
|
||||
// struct{}, since vars of this type must have distinct addresses.
|
||||
type emptyCtx int
|
||||
|
||||
func (*emptyCtx) Deadline() (deadline time.Time, ok bool) {
|
||||
return
|
||||
}
|
||||
|
||||
func (*emptyCtx) Done() <-chan struct{} {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (*emptyCtx) Err() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (*emptyCtx) Value(key interface{}) interface{} {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (e *emptyCtx) String() string {
|
||||
switch e {
|
||||
case background:
|
||||
return "context.Background"
|
||||
case todo:
|
||||
return "context.TODO"
|
||||
}
|
||||
return "unknown empty Context"
|
||||
}
|
||||
|
||||
var (
|
||||
background = new(emptyCtx)
|
||||
todo = new(emptyCtx)
|
||||
)
|
||||
|
||||
// Canceled is the error returned by Context.Err when the context is canceled.
|
||||
var Canceled = errors.New("context canceled")
|
||||
|
||||
// DeadlineExceeded is the error returned by Context.Err when the context's
|
||||
// deadline passes.
|
||||
var DeadlineExceeded = errors.New("context deadline exceeded")
|
||||
|
||||
// WithCancel returns a copy of parent with a new Done channel. The returned
|
||||
// context's Done channel is closed when the returned cancel function is called
|
||||
// or when the parent context's Done channel is closed, whichever happens first.
|
||||
//
|
||||
// Canceling this context releases resources associated with it, so code should
|
||||
// call cancel as soon as the operations running in this Context complete.
|
||||
func WithCancel(parent Context) (ctx Context, cancel CancelFunc) {
|
||||
c := newCancelCtx(parent)
|
||||
propagateCancel(parent, c)
|
||||
return c, func() { c.cancel(true, Canceled) }
|
||||
}
|
||||
|
||||
// newCancelCtx returns an initialized cancelCtx.
|
||||
func newCancelCtx(parent Context) *cancelCtx {
|
||||
return &cancelCtx{
|
||||
Context: parent,
|
||||
done: make(chan struct{}),
|
||||
}
|
||||
}
|
||||
|
||||
// propagateCancel arranges for child to be canceled when parent is.
|
||||
func propagateCancel(parent Context, child canceler) {
|
||||
if parent.Done() == nil {
|
||||
return // parent is never canceled
|
||||
}
|
||||
if p, ok := parentCancelCtx(parent); ok {
|
||||
p.mu.Lock()
|
||||
if p.err != nil {
|
||||
// parent has already been canceled
|
||||
child.cancel(false, p.err)
|
||||
} else {
|
||||
if p.children == nil {
|
||||
p.children = make(map[canceler]bool)
|
||||
}
|
||||
p.children[child] = true
|
||||
}
|
||||
p.mu.Unlock()
|
||||
} else {
|
||||
go func() {
|
||||
select {
|
||||
case <-parent.Done():
|
||||
child.cancel(false, parent.Err())
|
||||
case <-child.Done():
|
||||
}
|
||||
}()
|
||||
}
|
||||
}
|
||||
|
||||
// parentCancelCtx follows a chain of parent references until it finds a
|
||||
// *cancelCtx. This function understands how each of the concrete types in this
|
||||
// package represents its parent.
|
||||
func parentCancelCtx(parent Context) (*cancelCtx, bool) {
|
||||
for {
|
||||
switch c := parent.(type) {
|
||||
case *cancelCtx:
|
||||
return c, true
|
||||
case *timerCtx:
|
||||
return c.cancelCtx, true
|
||||
case *valueCtx:
|
||||
parent = c.Context
|
||||
default:
|
||||
return nil, false
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// removeChild removes a context from its parent.
|
||||
func removeChild(parent Context, child canceler) {
|
||||
p, ok := parentCancelCtx(parent)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
p.mu.Lock()
|
||||
if p.children != nil {
|
||||
delete(p.children, child)
|
||||
}
|
||||
p.mu.Unlock()
|
||||
}
|
||||
|
||||
// A canceler is a context type that can be canceled directly. The
|
||||
// implementations are *cancelCtx and *timerCtx.
|
||||
type canceler interface {
|
||||
cancel(removeFromParent bool, err error)
|
||||
Done() <-chan struct{}
|
||||
}
|
||||
|
||||
// A cancelCtx can be canceled. When canceled, it also cancels any children
|
||||
// that implement canceler.
|
||||
type cancelCtx struct {
|
||||
Context
|
||||
|
||||
done chan struct{} // closed by the first cancel call.
|
||||
|
||||
mu sync.Mutex
|
||||
children map[canceler]bool // set to nil by the first cancel call
|
||||
err error // set to non-nil by the first cancel call
|
||||
}
|
||||
|
||||
func (c *cancelCtx) Done() <-chan struct{} {
|
||||
return c.done
|
||||
}
|
||||
|
||||
func (c *cancelCtx) Err() error {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
return c.err
|
||||
}
|
||||
|
||||
func (c *cancelCtx) String() string {
|
||||
return fmt.Sprintf("%v.WithCancel", c.Context)
|
||||
}
|
||||
|
||||
// cancel closes c.done, cancels each of c's children, and, if
|
||||
// removeFromParent is true, removes c from its parent's children.
|
||||
func (c *cancelCtx) cancel(removeFromParent bool, err error) {
|
||||
if err == nil {
|
||||
panic("context: internal error: missing cancel error")
|
||||
}
|
||||
c.mu.Lock()
|
||||
if c.err != nil {
|
||||
c.mu.Unlock()
|
||||
return // already canceled
|
||||
}
|
||||
c.err = err
|
||||
close(c.done)
|
||||
for child := range c.children {
|
||||
// NOTE: acquiring the child's lock while holding parent's lock.
|
||||
child.cancel(false, err)
|
||||
}
|
||||
c.children = nil
|
||||
c.mu.Unlock()
|
||||
|
||||
if removeFromParent {
|
||||
removeChild(c.Context, c)
|
||||
}
|
||||
}
|
||||
|
||||
// WithDeadline returns a copy of the parent context with the deadline adjusted
|
||||
// to be no later than d. If the parent's deadline is already earlier than d,
|
||||
// WithDeadline(parent, d) is semantically equivalent to parent. The returned
|
||||
// context's Done channel is closed when the deadline expires, when the returned
|
||||
// cancel function is called, or when the parent context's Done channel is
|
||||
// closed, whichever happens first.
|
||||
//
|
||||
// Canceling this context releases resources associated with it, so code should
|
||||
// call cancel as soon as the operations running in this Context complete.
|
||||
func WithDeadline(parent Context, deadline time.Time) (Context, CancelFunc) {
|
||||
if cur, ok := parent.Deadline(); ok && cur.Before(deadline) {
|
||||
// The current deadline is already sooner than the new one.
|
||||
return WithCancel(parent)
|
||||
}
|
||||
c := &timerCtx{
|
||||
cancelCtx: newCancelCtx(parent),
|
||||
deadline: deadline,
|
||||
}
|
||||
propagateCancel(parent, c)
|
||||
d := deadline.Sub(time.Now())
|
||||
if d <= 0 {
|
||||
c.cancel(true, DeadlineExceeded) // deadline has already passed
|
||||
return c, func() { c.cancel(true, Canceled) }
|
||||
}
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
if c.err == nil {
|
||||
c.timer = time.AfterFunc(d, func() {
|
||||
c.cancel(true, DeadlineExceeded)
|
||||
})
|
||||
}
|
||||
return c, func() { c.cancel(true, Canceled) }
|
||||
}
|
||||
|
||||
// A timerCtx carries a timer and a deadline. It embeds a cancelCtx to
|
||||
// implement Done and Err. It implements cancel by stopping its timer then
|
||||
// delegating to cancelCtx.cancel.
|
||||
type timerCtx struct {
|
||||
*cancelCtx
|
||||
timer *time.Timer // Under cancelCtx.mu.
|
||||
|
||||
deadline time.Time
|
||||
}
|
||||
|
||||
func (c *timerCtx) Deadline() (deadline time.Time, ok bool) {
|
||||
return c.deadline, true
|
||||
}
|
||||
|
||||
func (c *timerCtx) String() string {
|
||||
return fmt.Sprintf("%v.WithDeadline(%s [%s])", c.cancelCtx.Context, c.deadline, c.deadline.Sub(time.Now()))
|
||||
}
|
||||
|
||||
func (c *timerCtx) cancel(removeFromParent bool, err error) {
|
||||
c.cancelCtx.cancel(false, err)
|
||||
if removeFromParent {
|
||||
// Remove this timerCtx from its parent cancelCtx's children.
|
||||
removeChild(c.cancelCtx.Context, c)
|
||||
}
|
||||
c.mu.Lock()
|
||||
if c.timer != nil {
|
||||
c.timer.Stop()
|
||||
c.timer = nil
|
||||
}
|
||||
c.mu.Unlock()
|
||||
}
|
||||
|
||||
// WithTimeout returns WithDeadline(parent, time.Now().Add(timeout)).
|
||||
//
|
||||
// Canceling this context releases resources associated with it, so code should
|
||||
// call cancel as soon as the operations running in this Context complete:
|
||||
//
|
||||
// func slowOperationWithTimeout(ctx context.Context) (Result, error) {
|
||||
// ctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond)
|
||||
// defer cancel() // releases resources if slowOperation completes before timeout elapses
|
||||
// return slowOperation(ctx)
|
||||
// }
|
||||
func WithTimeout(parent Context, timeout time.Duration) (Context, CancelFunc) {
|
||||
return WithDeadline(parent, time.Now().Add(timeout))
|
||||
}
|
||||
|
||||
// WithValue returns a copy of parent in which the value associated with key is
|
||||
// val.
|
||||
//
|
||||
// Use context Values only for request-scoped data that transits processes and
|
||||
// APIs, not for passing optional parameters to functions.
|
||||
func WithValue(parent Context, key interface{}, val interface{}) Context {
|
||||
return &valueCtx{parent, key, val}
|
||||
}
|
||||
|
||||
// A valueCtx carries a key-value pair. It implements Value for that key and
|
||||
// delegates all other calls to the embedded Context.
|
||||
type valueCtx struct {
|
||||
Context
|
||||
key, val interface{}
|
||||
}
|
||||
|
||||
func (c *valueCtx) String() string {
|
||||
return fmt.Sprintf("%v.WithValue(%#v, %#v)", c.Context, c.key, c.val)
|
||||
}
|
||||
|
||||
func (c *valueCtx) Value(key interface{}) interface{} {
|
||||
if c.key == key {
|
||||
return c.val
|
||||
}
|
||||
return c.Context.Value(key)
|
||||
}
|
110
vendor/golang.org/x/net/context/pre_go19.go
generated
vendored
Normal file
110
vendor/golang.org/x/net/context/pre_go19.go
generated
vendored
Normal file
@ -0,0 +1,110 @@
|
||||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build !go1.9
|
||||
// +build !go1.9
|
||||
|
||||
package context
|
||||
|
||||
import "time"
|
||||
|
||||
// A Context carries a deadline, a cancelation signal, and other values across
|
||||
// API boundaries.
|
||||
//
|
||||
// Context's methods may be called by multiple goroutines simultaneously.
|
||||
type Context interface {
|
||||
// Deadline returns the time when work done on behalf of this context
|
||||
// should be canceled. Deadline returns ok==false when no deadline is
|
||||
// set. Successive calls to Deadline return the same results.
|
||||
Deadline() (deadline time.Time, ok bool)
|
||||
|
||||
// Done returns a channel that's closed when work done on behalf of this
|
||||
// context should be canceled. Done may return nil if this context can
|
||||
// never be canceled. Successive calls to Done return the same value.
|
||||
//
|
||||
// WithCancel arranges for Done to be closed when cancel is called;
|
||||
// WithDeadline arranges for Done to be closed when the deadline
|
||||
// expires; WithTimeout arranges for Done to be closed when the timeout
|
||||
// elapses.
|
||||
//
|
||||
// Done is provided for use in select statements:
|
||||
//
|
||||
// // Stream generates values with DoSomething and sends them to out
|
||||
// // until DoSomething returns an error or ctx.Done is closed.
|
||||
// func Stream(ctx context.Context, out chan<- Value) error {
|
||||
// for {
|
||||
// v, err := DoSomething(ctx)
|
||||
// if err != nil {
|
||||
// return err
|
||||
// }
|
||||
// select {
|
||||
// case <-ctx.Done():
|
||||
// return ctx.Err()
|
||||
// case out <- v:
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
//
|
||||
// See http://blog.golang.org/pipelines for more examples of how to use
|
||||
// a Done channel for cancelation.
|
||||
Done() <-chan struct{}
|
||||
|
||||
// Err returns a non-nil error value after Done is closed. Err returns
|
||||
// Canceled if the context was canceled or DeadlineExceeded if the
|
||||
// context's deadline passed. No other values for Err are defined.
|
||||
// After Done is closed, successive calls to Err return the same value.
|
||||
Err() error
|
||||
|
||||
// Value returns the value associated with this context for key, or nil
|
||||
// if no value is associated with key. Successive calls to Value with
|
||||
// the same key returns the same result.
|
||||
//
|
||||
// Use context values only for request-scoped data that transits
|
||||
// processes and API boundaries, not for passing optional parameters to
|
||||
// functions.
|
||||
//
|
||||
// A key identifies a specific value in a Context. Functions that wish
|
||||
// to store values in Context typically allocate a key in a global
|
||||
// variable then use that key as the argument to context.WithValue and
|
||||
// Context.Value. A key can be any type that supports equality;
|
||||
// packages should define keys as an unexported type to avoid
|
||||
// collisions.
|
||||
//
|
||||
// Packages that define a Context key should provide type-safe accessors
|
||||
// for the values stores using that key:
|
||||
//
|
||||
// // Package user defines a User type that's stored in Contexts.
|
||||
// package user
|
||||
//
|
||||
// import "golang.org/x/net/context"
|
||||
//
|
||||
// // User is the type of value stored in the Contexts.
|
||||
// type User struct {...}
|
||||
//
|
||||
// // key is an unexported type for keys defined in this package.
|
||||
// // This prevents collisions with keys defined in other packages.
|
||||
// type key int
|
||||
//
|
||||
// // userKey is the key for user.User values in Contexts. It is
|
||||
// // unexported; clients use user.NewContext and user.FromContext
|
||||
// // instead of using this key directly.
|
||||
// var userKey key = 0
|
||||
//
|
||||
// // NewContext returns a new Context that carries value u.
|
||||
// func NewContext(ctx context.Context, u *User) context.Context {
|
||||
// return context.WithValue(ctx, userKey, u)
|
||||
// }
|
||||
//
|
||||
// // FromContext returns the User value stored in ctx, if any.
|
||||
// func FromContext(ctx context.Context) (*User, bool) {
|
||||
// u, ok := ctx.Value(userKey).(*User)
|
||||
// return u, ok
|
||||
// }
|
||||
Value(key interface{}) interface{}
|
||||
}
|
||||
|
||||
// A CancelFunc tells an operation to abandon its work.
|
||||
// A CancelFunc does not wait for the work to stop.
|
||||
// After the first call, subsequent calls to a CancelFunc do nothing.
|
||||
type CancelFunc func()
|
13
vendor/golang.org/x/oauth2/.travis.yml
generated
vendored
Normal file
13
vendor/golang.org/x/oauth2/.travis.yml
generated
vendored
Normal file
@ -0,0 +1,13 @@
|
||||
language: go
|
||||
|
||||
go:
|
||||
- tip
|
||||
|
||||
install:
|
||||
- export GOPATH="$HOME/gopath"
|
||||
- mkdir -p "$GOPATH/src/golang.org/x"
|
||||
- mv "$TRAVIS_BUILD_DIR" "$GOPATH/src/golang.org/x/oauth2"
|
||||
- go get -v -t -d golang.org/x/oauth2/...
|
||||
|
||||
script:
|
||||
- go test -v golang.org/x/oauth2/...
|
3
vendor/golang.org/x/oauth2/AUTHORS
generated
vendored
Normal file
3
vendor/golang.org/x/oauth2/AUTHORS
generated
vendored
Normal file
@ -0,0 +1,3 @@
|
||||
# This source code refers to The Go Authors for copyright purposes.
|
||||
# The master list of authors is in the main Go distribution,
|
||||
# visible at http://tip.golang.org/AUTHORS.
|
26
vendor/golang.org/x/oauth2/CONTRIBUTING.md
generated
vendored
Normal file
26
vendor/golang.org/x/oauth2/CONTRIBUTING.md
generated
vendored
Normal file
@ -0,0 +1,26 @@
|
||||
# Contributing to Go
|
||||
|
||||
Go is an open source project.
|
||||
|
||||
It is the work of hundreds of contributors. We appreciate your help!
|
||||
|
||||
## Filing issues
|
||||
|
||||
When [filing an issue](https://github.com/golang/oauth2/issues), make sure to answer these five questions:
|
||||
|
||||
1. What version of Go are you using (`go version`)?
|
||||
2. What operating system and processor architecture are you using?
|
||||
3. What did you do?
|
||||
4. What did you expect to see?
|
||||
5. What did you see instead?
|
||||
|
||||
General questions should go to the [golang-nuts mailing list](https://groups.google.com/group/golang-nuts) instead of the issue tracker.
|
||||
The gophers there will answer or ask you to file an issue if you've tripped over a bug.
|
||||
|
||||
## Contributing code
|
||||
|
||||
Please read the [Contribution Guidelines](https://golang.org/doc/contribute.html)
|
||||
before sending patches.
|
||||
|
||||
Unless otherwise noted, the Go source files are distributed under
|
||||
the BSD-style license found in the LICENSE file.
|
3
vendor/golang.org/x/oauth2/CONTRIBUTORS
generated
vendored
Normal file
3
vendor/golang.org/x/oauth2/CONTRIBUTORS
generated
vendored
Normal file
@ -0,0 +1,3 @@
|
||||
# This source code was written by the Go contributors.
|
||||
# The master list of contributors is in the main Go distribution,
|
||||
# visible at http://tip.golang.org/CONTRIBUTORS.
|
27
vendor/golang.org/x/oauth2/LICENSE
generated
vendored
Normal file
27
vendor/golang.org/x/oauth2/LICENSE
generated
vendored
Normal file
@ -0,0 +1,27 @@
|
||||
Copyright (c) 2009 The Go Authors. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Google Inc. nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
36
vendor/golang.org/x/oauth2/README.md
generated
vendored
Normal file
36
vendor/golang.org/x/oauth2/README.md
generated
vendored
Normal file
@ -0,0 +1,36 @@
|
||||
# OAuth2 for Go
|
||||
|
||||
[![Go Reference](https://pkg.go.dev/badge/golang.org/x/oauth2.svg)](https://pkg.go.dev/golang.org/x/oauth2)
|
||||
[![Build Status](https://travis-ci.org/golang/oauth2.svg?branch=master)](https://travis-ci.org/golang/oauth2)
|
||||
|
||||
oauth2 package contains a client implementation for OAuth 2.0 spec.
|
||||
|
||||
## Installation
|
||||
|
||||
~~~~
|
||||
go get golang.org/x/oauth2
|
||||
~~~~
|
||||
|
||||
Or you can manually git clone the repository to
|
||||
`$(go env GOPATH)/src/golang.org/x/oauth2`.
|
||||
|
||||
See pkg.go.dev for further documentation and examples.
|
||||
|
||||
* [pkg.go.dev/golang.org/x/oauth2](https://pkg.go.dev/golang.org/x/oauth2)
|
||||
* [pkg.go.dev/golang.org/x/oauth2/google](https://pkg.go.dev/golang.org/x/oauth2/google)
|
||||
|
||||
## Policy for new packages
|
||||
|
||||
We no longer accept new provider-specific packages in this repo if all
|
||||
they do is add a single endpoint variable. If you just want to add a
|
||||
single endpoint, add it to the
|
||||
[pkg.go.dev/golang.org/x/oauth2/endpoints](https://pkg.go.dev/golang.org/x/oauth2/endpoints)
|
||||
package.
|
||||
|
||||
## Report Issues / Send Patches
|
||||
|
||||
This repository uses Gerrit for code changes. To learn how to submit changes to
|
||||
this repository, see https://golang.org/doc/contribute.html.
|
||||
|
||||
The main issue tracker for the oauth2 repository is located at
|
||||
https://github.com/golang/oauth2/issues.
|
14
vendor/golang.org/x/oauth2/internal/client_appengine.go
generated
vendored
Normal file
14
vendor/golang.org/x/oauth2/internal/client_appengine.go
generated
vendored
Normal file
@ -0,0 +1,14 @@
|
||||
// Copyright 2018 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build appengine
|
||||
// +build appengine
|
||||
|
||||
package internal
|
||||
|
||||
import "google.golang.org/appengine/urlfetch"
|
||||
|
||||
func init() {
|
||||
appengineClientHook = urlfetch.Client
|
||||
}
|
6
vendor/golang.org/x/oauth2/internal/doc.go
generated
vendored
Normal file
6
vendor/golang.org/x/oauth2/internal/doc.go
generated
vendored
Normal file
@ -0,0 +1,6 @@
|
||||
// Copyright 2017 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package internal contains support packages for oauth2 package.
|
||||
package internal
|
37
vendor/golang.org/x/oauth2/internal/oauth2.go
generated
vendored
Normal file
37
vendor/golang.org/x/oauth2/internal/oauth2.go
generated
vendored
Normal file
@ -0,0 +1,37 @@
|
||||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package internal
|
||||
|
||||
import (
|
||||
"crypto/rsa"
|
||||
"crypto/x509"
|
||||
"encoding/pem"
|
||||
"errors"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
// ParseKey converts the binary contents of a private key file
|
||||
// to an *rsa.PrivateKey. It detects whether the private key is in a
|
||||
// PEM container or not. If so, it extracts the the private key
|
||||
// from PEM container before conversion. It only supports PEM
|
||||
// containers with no passphrase.
|
||||
func ParseKey(key []byte) (*rsa.PrivateKey, error) {
|
||||
block, _ := pem.Decode(key)
|
||||
if block != nil {
|
||||
key = block.Bytes
|
||||
}
|
||||
parsedKey, err := x509.ParsePKCS8PrivateKey(key)
|
||||
if err != nil {
|
||||
parsedKey, err = x509.ParsePKCS1PrivateKey(key)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("private key should be a PEM or plain PKCS1 or PKCS8; parse error: %v", err)
|
||||
}
|
||||
}
|
||||
parsed, ok := parsedKey.(*rsa.PrivateKey)
|
||||
if !ok {
|
||||
return nil, errors.New("private key is invalid")
|
||||
}
|
||||
return parsed, nil
|
||||
}
|
294
vendor/golang.org/x/oauth2/internal/token.go
generated
vendored
Normal file
294
vendor/golang.org/x/oauth2/internal/token.go
generated
vendored
Normal file
@ -0,0 +1,294 @@
|
||||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package internal
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"math"
|
||||
"mime"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"golang.org/x/net/context/ctxhttp"
|
||||
)
|
||||
|
||||
// Token represents the credentials used to authorize
|
||||
// the requests to access protected resources on the OAuth 2.0
|
||||
// provider's backend.
|
||||
//
|
||||
// This type is a mirror of oauth2.Token and exists to break
|
||||
// an otherwise-circular dependency. Other internal packages
|
||||
// should convert this Token into an oauth2.Token before use.
|
||||
type Token struct {
|
||||
// AccessToken is the token that authorizes and authenticates
|
||||
// the requests.
|
||||
AccessToken string
|
||||
|
||||
// TokenType is the type of token.
|
||||
// The Type method returns either this or "Bearer", the default.
|
||||
TokenType string
|
||||
|
||||
// RefreshToken is a token that's used by the application
|
||||
// (as opposed to the user) to refresh the access token
|
||||
// if it expires.
|
||||
RefreshToken string
|
||||
|
||||
// Expiry is the optional expiration time of the access token.
|
||||
//
|
||||
// If zero, TokenSource implementations will reuse the same
|
||||
// token forever and RefreshToken or equivalent
|
||||
// mechanisms for that TokenSource will not be used.
|
||||
Expiry time.Time
|
||||
|
||||
// Raw optionally contains extra metadata from the server
|
||||
// when updating a token.
|
||||
Raw interface{}
|
||||
}
|
||||
|
||||
// tokenJSON is the struct representing the HTTP response from OAuth2
|
||||
// providers returning a token in JSON form.
|
||||
type tokenJSON struct {
|
||||
AccessToken string `json:"access_token"`
|
||||
TokenType string `json:"token_type"`
|
||||
RefreshToken string `json:"refresh_token"`
|
||||
ExpiresIn expirationTime `json:"expires_in"` // at least PayPal returns string, while most return number
|
||||
}
|
||||
|
||||
func (e *tokenJSON) expiry() (t time.Time) {
|
||||
if v := e.ExpiresIn; v != 0 {
|
||||
return time.Now().Add(time.Duration(v) * time.Second)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
type expirationTime int32
|
||||
|
||||
func (e *expirationTime) UnmarshalJSON(b []byte) error {
|
||||
if len(b) == 0 || string(b) == "null" {
|
||||
return nil
|
||||
}
|
||||
var n json.Number
|
||||
err := json.Unmarshal(b, &n)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
i, err := n.Int64()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if i > math.MaxInt32 {
|
||||
i = math.MaxInt32
|
||||
}
|
||||
*e = expirationTime(i)
|
||||
return nil
|
||||
}
|
||||
|
||||
// RegisterBrokenAuthHeaderProvider previously did something. It is now a no-op.
|
||||
//
|
||||
// Deprecated: this function no longer does anything. Caller code that
|
||||
// wants to avoid potential extra HTTP requests made during
|
||||
// auto-probing of the provider's auth style should set
|
||||
// Endpoint.AuthStyle.
|
||||
func RegisterBrokenAuthHeaderProvider(tokenURL string) {}
|
||||
|
||||
// AuthStyle is a copy of the golang.org/x/oauth2 package's AuthStyle type.
|
||||
type AuthStyle int
|
||||
|
||||
const (
|
||||
AuthStyleUnknown AuthStyle = 0
|
||||
AuthStyleInParams AuthStyle = 1
|
||||
AuthStyleInHeader AuthStyle = 2
|
||||
)
|
||||
|
||||
// authStyleCache is the set of tokenURLs we've successfully used via
|
||||
// RetrieveToken and which style auth we ended up using.
|
||||
// It's called a cache, but it doesn't (yet?) shrink. It's expected that
|
||||
// the set of OAuth2 servers a program contacts over time is fixed and
|
||||
// small.
|
||||
var authStyleCache struct {
|
||||
sync.Mutex
|
||||
m map[string]AuthStyle // keyed by tokenURL
|
||||
}
|
||||
|
||||
// ResetAuthCache resets the global authentication style cache used
|
||||
// for AuthStyleUnknown token requests.
|
||||
func ResetAuthCache() {
|
||||
authStyleCache.Lock()
|
||||
defer authStyleCache.Unlock()
|
||||
authStyleCache.m = nil
|
||||
}
|
||||
|
||||
// lookupAuthStyle reports which auth style we last used with tokenURL
|
||||
// when calling RetrieveToken and whether we have ever done so.
|
||||
func lookupAuthStyle(tokenURL string) (style AuthStyle, ok bool) {
|
||||
authStyleCache.Lock()
|
||||
defer authStyleCache.Unlock()
|
||||
style, ok = authStyleCache.m[tokenURL]
|
||||
return
|
||||
}
|
||||
|
||||
// setAuthStyle adds an entry to authStyleCache, documented above.
|
||||
func setAuthStyle(tokenURL string, v AuthStyle) {
|
||||
authStyleCache.Lock()
|
||||
defer authStyleCache.Unlock()
|
||||
if authStyleCache.m == nil {
|
||||
authStyleCache.m = make(map[string]AuthStyle)
|
||||
}
|
||||
authStyleCache.m[tokenURL] = v
|
||||
}
|
||||
|
||||
// newTokenRequest returns a new *http.Request to retrieve a new token
|
||||
// from tokenURL using the provided clientID, clientSecret, and POST
|
||||
// body parameters.
|
||||
//
|
||||
// inParams is whether the clientID & clientSecret should be encoded
|
||||
// as the POST body. An 'inParams' value of true means to send it in
|
||||
// the POST body (along with any values in v); false means to send it
|
||||
// in the Authorization header.
|
||||
func newTokenRequest(tokenURL, clientID, clientSecret string, v url.Values, authStyle AuthStyle) (*http.Request, error) {
|
||||
if authStyle == AuthStyleInParams {
|
||||
v = cloneURLValues(v)
|
||||
if clientID != "" {
|
||||
v.Set("client_id", clientID)
|
||||
}
|
||||
if clientSecret != "" {
|
||||
v.Set("client_secret", clientSecret)
|
||||
}
|
||||
}
|
||||
req, err := http.NewRequest("POST", tokenURL, strings.NewReader(v.Encode()))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
|
||||
if authStyle == AuthStyleInHeader {
|
||||
req.SetBasicAuth(url.QueryEscape(clientID), url.QueryEscape(clientSecret))
|
||||
}
|
||||
return req, nil
|
||||
}
|
||||
|
||||
func cloneURLValues(v url.Values) url.Values {
|
||||
v2 := make(url.Values, len(v))
|
||||
for k, vv := range v {
|
||||
v2[k] = append([]string(nil), vv...)
|
||||
}
|
||||
return v2
|
||||
}
|
||||
|
||||
func RetrieveToken(ctx context.Context, clientID, clientSecret, tokenURL string, v url.Values, authStyle AuthStyle) (*Token, error) {
|
||||
needsAuthStyleProbe := authStyle == 0
|
||||
if needsAuthStyleProbe {
|
||||
if style, ok := lookupAuthStyle(tokenURL); ok {
|
||||
authStyle = style
|
||||
needsAuthStyleProbe = false
|
||||
} else {
|
||||
authStyle = AuthStyleInHeader // the first way we'll try
|
||||
}
|
||||
}
|
||||
req, err := newTokenRequest(tokenURL, clientID, clientSecret, v, authStyle)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
token, err := doTokenRoundTrip(ctx, req)
|
||||
if err != nil && needsAuthStyleProbe {
|
||||
// If we get an error, assume the server wants the
|
||||
// clientID & clientSecret in a different form.
|
||||
// See https://code.google.com/p/goauth2/issues/detail?id=31 for background.
|
||||
// In summary:
|
||||
// - Reddit only accepts client secret in the Authorization header
|
||||
// - Dropbox accepts either it in URL param or Auth header, but not both.
|
||||
// - Google only accepts URL param (not spec compliant?), not Auth header
|
||||
// - Stripe only accepts client secret in Auth header with Bearer method, not Basic
|
||||
//
|
||||
// We used to maintain a big table in this code of all the sites and which way
|
||||
// they went, but maintaining it didn't scale & got annoying.
|
||||
// So just try both ways.
|
||||
authStyle = AuthStyleInParams // the second way we'll try
|
||||
req, _ = newTokenRequest(tokenURL, clientID, clientSecret, v, authStyle)
|
||||
token, err = doTokenRoundTrip(ctx, req)
|
||||
}
|
||||
if needsAuthStyleProbe && err == nil {
|
||||
setAuthStyle(tokenURL, authStyle)
|
||||
}
|
||||
// Don't overwrite `RefreshToken` with an empty value
|
||||
// if this was a token refreshing request.
|
||||
if token != nil && token.RefreshToken == "" {
|
||||
token.RefreshToken = v.Get("refresh_token")
|
||||
}
|
||||
return token, err
|
||||
}
|
||||
|
||||
func doTokenRoundTrip(ctx context.Context, req *http.Request) (*Token, error) {
|
||||
r, err := ctxhttp.Do(ctx, ContextClient(ctx), req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
body, err := ioutil.ReadAll(io.LimitReader(r.Body, 1<<20))
|
||||
r.Body.Close()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("oauth2: cannot fetch token: %v", err)
|
||||
}
|
||||
if code := r.StatusCode; code < 200 || code > 299 {
|
||||
return nil, &RetrieveError{
|
||||
Response: r,
|
||||
Body: body,
|
||||
}
|
||||
}
|
||||
|
||||
var token *Token
|
||||
content, _, _ := mime.ParseMediaType(r.Header.Get("Content-Type"))
|
||||
switch content {
|
||||
case "application/x-www-form-urlencoded", "text/plain":
|
||||
vals, err := url.ParseQuery(string(body))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
token = &Token{
|
||||
AccessToken: vals.Get("access_token"),
|
||||
TokenType: vals.Get("token_type"),
|
||||
RefreshToken: vals.Get("refresh_token"),
|
||||
Raw: vals,
|
||||
}
|
||||
e := vals.Get("expires_in")
|
||||
expires, _ := strconv.Atoi(e)
|
||||
if expires != 0 {
|
||||
token.Expiry = time.Now().Add(time.Duration(expires) * time.Second)
|
||||
}
|
||||
default:
|
||||
var tj tokenJSON
|
||||
if err = json.Unmarshal(body, &tj); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
token = &Token{
|
||||
AccessToken: tj.AccessToken,
|
||||
TokenType: tj.TokenType,
|
||||
RefreshToken: tj.RefreshToken,
|
||||
Expiry: tj.expiry(),
|
||||
Raw: make(map[string]interface{}),
|
||||
}
|
||||
json.Unmarshal(body, &token.Raw) // no error checks for optional fields
|
||||
}
|
||||
if token.AccessToken == "" {
|
||||
return nil, errors.New("oauth2: server response missing access_token")
|
||||
}
|
||||
return token, nil
|
||||
}
|
||||
|
||||
type RetrieveError struct {
|
||||
Response *http.Response
|
||||
Body []byte
|
||||
}
|
||||
|
||||
func (r *RetrieveError) Error() string {
|
||||
return fmt.Sprintf("oauth2: cannot fetch token: %v\nResponse: %s", r.Response.Status, r.Body)
|
||||
}
|
33
vendor/golang.org/x/oauth2/internal/transport.go
generated
vendored
Normal file
33
vendor/golang.org/x/oauth2/internal/transport.go
generated
vendored
Normal file
@ -0,0 +1,33 @@
|
||||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package internal
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/http"
|
||||
)
|
||||
|
||||
// HTTPClient is the context key to use with golang.org/x/net/context's
|
||||
// WithValue function to associate an *http.Client value with a context.
|
||||
var HTTPClient ContextKey
|
||||
|
||||
// ContextKey is just an empty struct. It exists so HTTPClient can be
|
||||
// an immutable public variable with a unique type. It's immutable
|
||||
// because nobody else can create a ContextKey, being unexported.
|
||||
type ContextKey struct{}
|
||||
|
||||
var appengineClientHook func(context.Context) *http.Client
|
||||
|
||||
func ContextClient(ctx context.Context) *http.Client {
|
||||
if ctx != nil {
|
||||
if hc, ok := ctx.Value(HTTPClient).(*http.Client); ok {
|
||||
return hc
|
||||
}
|
||||
}
|
||||
if appengineClientHook != nil {
|
||||
return appengineClientHook(ctx)
|
||||
}
|
||||
return http.DefaultClient
|
||||
}
|
381
vendor/golang.org/x/oauth2/oauth2.go
generated
vendored
Normal file
381
vendor/golang.org/x/oauth2/oauth2.go
generated
vendored
Normal file
@ -0,0 +1,381 @@
|
||||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package oauth2 provides support for making
|
||||
// OAuth2 authorized and authenticated HTTP requests,
|
||||
// as specified in RFC 6749.
|
||||
// It can additionally grant authorization with Bearer JWT.
|
||||
package oauth2 // import "golang.org/x/oauth2"
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"errors"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"golang.org/x/oauth2/internal"
|
||||
)
|
||||
|
||||
// NoContext is the default context you should supply if not using
|
||||
// your own context.Context (see https://golang.org/x/net/context).
|
||||
//
|
||||
// Deprecated: Use context.Background() or context.TODO() instead.
|
||||
var NoContext = context.TODO()
|
||||
|
||||
// RegisterBrokenAuthHeaderProvider previously did something. It is now a no-op.
|
||||
//
|
||||
// Deprecated: this function no longer does anything. Caller code that
|
||||
// wants to avoid potential extra HTTP requests made during
|
||||
// auto-probing of the provider's auth style should set
|
||||
// Endpoint.AuthStyle.
|
||||
func RegisterBrokenAuthHeaderProvider(tokenURL string) {}
|
||||
|
||||
// Config describes a typical 3-legged OAuth2 flow, with both the
|
||||
// client application information and the server's endpoint URLs.
|
||||
// For the client credentials 2-legged OAuth2 flow, see the clientcredentials
|
||||
// package (https://golang.org/x/oauth2/clientcredentials).
|
||||
type Config struct {
|
||||
// ClientID is the application's ID.
|
||||
ClientID string
|
||||
|
||||
// ClientSecret is the application's secret.
|
||||
ClientSecret string
|
||||
|
||||
// Endpoint contains the resource server's token endpoint
|
||||
// URLs. These are constants specific to each server and are
|
||||
// often available via site-specific packages, such as
|
||||
// google.Endpoint or github.Endpoint.
|
||||
Endpoint Endpoint
|
||||
|
||||
// RedirectURL is the URL to redirect users going through
|
||||
// the OAuth flow, after the resource owner's URLs.
|
||||
RedirectURL string
|
||||
|
||||
// Scope specifies optional requested permissions.
|
||||
Scopes []string
|
||||
}
|
||||
|
||||
// A TokenSource is anything that can return a token.
|
||||
type TokenSource interface {
|
||||
// Token returns a token or an error.
|
||||
// Token must be safe for concurrent use by multiple goroutines.
|
||||
// The returned Token must not be modified.
|
||||
Token() (*Token, error)
|
||||
}
|
||||
|
||||
// Endpoint represents an OAuth 2.0 provider's authorization and token
|
||||
// endpoint URLs.
|
||||
type Endpoint struct {
|
||||
AuthURL string
|
||||
TokenURL string
|
||||
|
||||
// AuthStyle optionally specifies how the endpoint wants the
|
||||
// client ID & client secret sent. The zero value means to
|
||||
// auto-detect.
|
||||
AuthStyle AuthStyle
|
||||
}
|
||||
|
||||
// AuthStyle represents how requests for tokens are authenticated
|
||||
// to the server.
|
||||
type AuthStyle int
|
||||
|
||||
const (
|
||||
// AuthStyleAutoDetect means to auto-detect which authentication
|
||||
// style the provider wants by trying both ways and caching
|
||||
// the successful way for the future.
|
||||
AuthStyleAutoDetect AuthStyle = 0
|
||||
|
||||
// AuthStyleInParams sends the "client_id" and "client_secret"
|
||||
// in the POST body as application/x-www-form-urlencoded parameters.
|
||||
AuthStyleInParams AuthStyle = 1
|
||||
|
||||
// AuthStyleInHeader sends the client_id and client_password
|
||||
// using HTTP Basic Authorization. This is an optional style
|
||||
// described in the OAuth2 RFC 6749 section 2.3.1.
|
||||
AuthStyleInHeader AuthStyle = 2
|
||||
)
|
||||
|
||||
var (
|
||||
// AccessTypeOnline and AccessTypeOffline are options passed
|
||||
// to the Options.AuthCodeURL method. They modify the
|
||||
// "access_type" field that gets sent in the URL returned by
|
||||
// AuthCodeURL.
|
||||
//
|
||||
// Online is the default if neither is specified. If your
|
||||
// application needs to refresh access tokens when the user
|
||||
// is not present at the browser, then use offline. This will
|
||||
// result in your application obtaining a refresh token the
|
||||
// first time your application exchanges an authorization
|
||||
// code for a user.
|
||||
AccessTypeOnline AuthCodeOption = SetAuthURLParam("access_type", "online")
|
||||
AccessTypeOffline AuthCodeOption = SetAuthURLParam("access_type", "offline")
|
||||
|
||||
// ApprovalForce forces the users to view the consent dialog
|
||||
// and confirm the permissions request at the URL returned
|
||||
// from AuthCodeURL, even if they've already done so.
|
||||
ApprovalForce AuthCodeOption = SetAuthURLParam("prompt", "consent")
|
||||
)
|
||||
|
||||
// An AuthCodeOption is passed to Config.AuthCodeURL.
|
||||
type AuthCodeOption interface {
|
||||
setValue(url.Values)
|
||||
}
|
||||
|
||||
type setParam struct{ k, v string }
|
||||
|
||||
func (p setParam) setValue(m url.Values) { m.Set(p.k, p.v) }
|
||||
|
||||
// SetAuthURLParam builds an AuthCodeOption which passes key/value parameters
|
||||
// to a provider's authorization endpoint.
|
||||
func SetAuthURLParam(key, value string) AuthCodeOption {
|
||||
return setParam{key, value}
|
||||
}
|
||||
|
||||
// AuthCodeURL returns a URL to OAuth 2.0 provider's consent page
|
||||
// that asks for permissions for the required scopes explicitly.
|
||||
//
|
||||
// State is a token to protect the user from CSRF attacks. You must
|
||||
// always provide a non-empty string and validate that it matches the
|
||||
// the state query parameter on your redirect callback.
|
||||
// See http://tools.ietf.org/html/rfc6749#section-10.12 for more info.
|
||||
//
|
||||
// Opts may include AccessTypeOnline or AccessTypeOffline, as well
|
||||
// as ApprovalForce.
|
||||
// It can also be used to pass the PKCE challenge.
|
||||
// See https://www.oauth.com/oauth2-servers/pkce/ for more info.
|
||||
func (c *Config) AuthCodeURL(state string, opts ...AuthCodeOption) string {
|
||||
var buf bytes.Buffer
|
||||
buf.WriteString(c.Endpoint.AuthURL)
|
||||
v := url.Values{
|
||||
"response_type": {"code"},
|
||||
"client_id": {c.ClientID},
|
||||
}
|
||||
if c.RedirectURL != "" {
|
||||
v.Set("redirect_uri", c.RedirectURL)
|
||||
}
|
||||
if len(c.Scopes) > 0 {
|
||||
v.Set("scope", strings.Join(c.Scopes, " "))
|
||||
}
|
||||
if state != "" {
|
||||
// TODO(light): Docs say never to omit state; don't allow empty.
|
||||
v.Set("state", state)
|
||||
}
|
||||
for _, opt := range opts {
|
||||
opt.setValue(v)
|
||||
}
|
||||
if strings.Contains(c.Endpoint.AuthURL, "?") {
|
||||
buf.WriteByte('&')
|
||||
} else {
|
||||
buf.WriteByte('?')
|
||||
}
|
||||
buf.WriteString(v.Encode())
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
// PasswordCredentialsToken converts a resource owner username and password
|
||||
// pair into a token.
|
||||
//
|
||||
// Per the RFC, this grant type should only be used "when there is a high
|
||||
// degree of trust between the resource owner and the client (e.g., the client
|
||||
// is part of the device operating system or a highly privileged application),
|
||||
// and when other authorization grant types are not available."
|
||||
// See https://tools.ietf.org/html/rfc6749#section-4.3 for more info.
|
||||
//
|
||||
// The provided context optionally controls which HTTP client is used. See the HTTPClient variable.
|
||||
func (c *Config) PasswordCredentialsToken(ctx context.Context, username, password string) (*Token, error) {
|
||||
v := url.Values{
|
||||
"grant_type": {"password"},
|
||||
"username": {username},
|
||||
"password": {password},
|
||||
}
|
||||
if len(c.Scopes) > 0 {
|
||||
v.Set("scope", strings.Join(c.Scopes, " "))
|
||||
}
|
||||
return retrieveToken(ctx, c, v)
|
||||
}
|
||||
|
||||
// Exchange converts an authorization code into a token.
|
||||
//
|
||||
// It is used after a resource provider redirects the user back
|
||||
// to the Redirect URI (the URL obtained from AuthCodeURL).
|
||||
//
|
||||
// The provided context optionally controls which HTTP client is used. See the HTTPClient variable.
|
||||
//
|
||||
// The code will be in the *http.Request.FormValue("code"). Before
|
||||
// calling Exchange, be sure to validate FormValue("state").
|
||||
//
|
||||
// Opts may include the PKCE verifier code if previously used in AuthCodeURL.
|
||||
// See https://www.oauth.com/oauth2-servers/pkce/ for more info.
|
||||
func (c *Config) Exchange(ctx context.Context, code string, opts ...AuthCodeOption) (*Token, error) {
|
||||
v := url.Values{
|
||||
"grant_type": {"authorization_code"},
|
||||
"code": {code},
|
||||
}
|
||||
if c.RedirectURL != "" {
|
||||
v.Set("redirect_uri", c.RedirectURL)
|
||||
}
|
||||
for _, opt := range opts {
|
||||
opt.setValue(v)
|
||||
}
|
||||
return retrieveToken(ctx, c, v)
|
||||
}
|
||||
|
||||
// Client returns an HTTP client using the provided token.
|
||||
// The token will auto-refresh as necessary. The underlying
|
||||
// HTTP transport will be obtained using the provided context.
|
||||
// The returned client and its Transport should not be modified.
|
||||
func (c *Config) Client(ctx context.Context, t *Token) *http.Client {
|
||||
return NewClient(ctx, c.TokenSource(ctx, t))
|
||||
}
|
||||
|
||||
// TokenSource returns a TokenSource that returns t until t expires,
|
||||
// automatically refreshing it as necessary using the provided context.
|
||||
//
|
||||
// Most users will use Config.Client instead.
|
||||
func (c *Config) TokenSource(ctx context.Context, t *Token) TokenSource {
|
||||
tkr := &tokenRefresher{
|
||||
ctx: ctx,
|
||||
conf: c,
|
||||
}
|
||||
if t != nil {
|
||||
tkr.refreshToken = t.RefreshToken
|
||||
}
|
||||
return &reuseTokenSource{
|
||||
t: t,
|
||||
new: tkr,
|
||||
}
|
||||
}
|
||||
|
||||
// tokenRefresher is a TokenSource that makes "grant_type"=="refresh_token"
|
||||
// HTTP requests to renew a token using a RefreshToken.
|
||||
type tokenRefresher struct {
|
||||
ctx context.Context // used to get HTTP requests
|
||||
conf *Config
|
||||
refreshToken string
|
||||
}
|
||||
|
||||
// WARNING: Token is not safe for concurrent access, as it
|
||||
// updates the tokenRefresher's refreshToken field.
|
||||
// Within this package, it is used by reuseTokenSource which
|
||||
// synchronizes calls to this method with its own mutex.
|
||||
func (tf *tokenRefresher) Token() (*Token, error) {
|
||||
if tf.refreshToken == "" {
|
||||
return nil, errors.New("oauth2: token expired and refresh token is not set")
|
||||
}
|
||||
|
||||
tk, err := retrieveToken(tf.ctx, tf.conf, url.Values{
|
||||
"grant_type": {"refresh_token"},
|
||||
"refresh_token": {tf.refreshToken},
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if tf.refreshToken != tk.RefreshToken {
|
||||
tf.refreshToken = tk.RefreshToken
|
||||
}
|
||||
return tk, err
|
||||
}
|
||||
|
||||
// reuseTokenSource is a TokenSource that holds a single token in memory
|
||||
// and validates its expiry before each call to retrieve it with
|
||||
// Token. If it's expired, it will be auto-refreshed using the
|
||||
// new TokenSource.
|
||||
type reuseTokenSource struct {
|
||||
new TokenSource // called when t is expired.
|
||||
|
||||
mu sync.Mutex // guards t
|
||||
t *Token
|
||||
}
|
||||
|
||||
// Token returns the current token if it's still valid, else will
|
||||
// refresh the current token (using r.Context for HTTP client
|
||||
// information) and return the new one.
|
||||
func (s *reuseTokenSource) Token() (*Token, error) {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
if s.t.Valid() {
|
||||
return s.t, nil
|
||||
}
|
||||
t, err := s.new.Token()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
s.t = t
|
||||
return t, nil
|
||||
}
|
||||
|
||||
// StaticTokenSource returns a TokenSource that always returns the same token.
|
||||
// Because the provided token t is never refreshed, StaticTokenSource is only
|
||||
// useful for tokens that never expire.
|
||||
func StaticTokenSource(t *Token) TokenSource {
|
||||
return staticTokenSource{t}
|
||||
}
|
||||
|
||||
// staticTokenSource is a TokenSource that always returns the same Token.
|
||||
type staticTokenSource struct {
|
||||
t *Token
|
||||
}
|
||||
|
||||
func (s staticTokenSource) Token() (*Token, error) {
|
||||
return s.t, nil
|
||||
}
|
||||
|
||||
// HTTPClient is the context key to use with golang.org/x/net/context's
|
||||
// WithValue function to associate an *http.Client value with a context.
|
||||
var HTTPClient internal.ContextKey
|
||||
|
||||
// NewClient creates an *http.Client from a Context and TokenSource.
|
||||
// The returned client is not valid beyond the lifetime of the context.
|
||||
//
|
||||
// Note that if a custom *http.Client is provided via the Context it
|
||||
// is used only for token acquisition and is not used to configure the
|
||||
// *http.Client returned from NewClient.
|
||||
//
|
||||
// As a special case, if src is nil, a non-OAuth2 client is returned
|
||||
// using the provided context. This exists to support related OAuth2
|
||||
// packages.
|
||||
func NewClient(ctx context.Context, src TokenSource) *http.Client {
|
||||
if src == nil {
|
||||
return internal.ContextClient(ctx)
|
||||
}
|
||||
return &http.Client{
|
||||
Transport: &Transport{
|
||||
Base: internal.ContextClient(ctx).Transport,
|
||||
Source: ReuseTokenSource(nil, src),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// ReuseTokenSource returns a TokenSource which repeatedly returns the
|
||||
// same token as long as it's valid, starting with t.
|
||||
// When its cached token is invalid, a new token is obtained from src.
|
||||
//
|
||||
// ReuseTokenSource is typically used to reuse tokens from a cache
|
||||
// (such as a file on disk) between runs of a program, rather than
|
||||
// obtaining new tokens unnecessarily.
|
||||
//
|
||||
// The initial token t may be nil, in which case the TokenSource is
|
||||
// wrapped in a caching version if it isn't one already. This also
|
||||
// means it's always safe to wrap ReuseTokenSource around any other
|
||||
// TokenSource without adverse effects.
|
||||
func ReuseTokenSource(t *Token, src TokenSource) TokenSource {
|
||||
// Don't wrap a reuseTokenSource in itself. That would work,
|
||||
// but cause an unnecessary number of mutex operations.
|
||||
// Just build the equivalent one.
|
||||
if rt, ok := src.(*reuseTokenSource); ok {
|
||||
if t == nil {
|
||||
// Just use it directly.
|
||||
return rt
|
||||
}
|
||||
src = rt.new
|
||||
}
|
||||
return &reuseTokenSource{
|
||||
t: t,
|
||||
new: src,
|
||||
}
|
||||
}
|
178
vendor/golang.org/x/oauth2/token.go
generated
vendored
Normal file
178
vendor/golang.org/x/oauth2/token.go
generated
vendored
Normal file
@ -0,0 +1,178 @@
|
||||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package oauth2
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"golang.org/x/oauth2/internal"
|
||||
)
|
||||
|
||||
// expiryDelta determines how earlier a token should be considered
|
||||
// expired than its actual expiration time. It is used to avoid late
|
||||
// expirations due to client-server time mismatches.
|
||||
const expiryDelta = 10 * time.Second
|
||||
|
||||
// Token represents the credentials used to authorize
|
||||
// the requests to access protected resources on the OAuth 2.0
|
||||
// provider's backend.
|
||||
//
|
||||
// Most users of this package should not access fields of Token
|
||||
// directly. They're exported mostly for use by related packages
|
||||
// implementing derivative OAuth2 flows.
|
||||
type Token struct {
|
||||
// AccessToken is the token that authorizes and authenticates
|
||||
// the requests.
|
||||
AccessToken string `json:"access_token"`
|
||||
|
||||
// TokenType is the type of token.
|
||||
// The Type method returns either this or "Bearer", the default.
|
||||
TokenType string `json:"token_type,omitempty"`
|
||||
|
||||
// RefreshToken is a token that's used by the application
|
||||
// (as opposed to the user) to refresh the access token
|
||||
// if it expires.
|
||||
RefreshToken string `json:"refresh_token,omitempty"`
|
||||
|
||||
// Expiry is the optional expiration time of the access token.
|
||||
//
|
||||
// If zero, TokenSource implementations will reuse the same
|
||||
// token forever and RefreshToken or equivalent
|
||||
// mechanisms for that TokenSource will not be used.
|
||||
Expiry time.Time `json:"expiry,omitempty"`
|
||||
|
||||
// raw optionally contains extra metadata from the server
|
||||
// when updating a token.
|
||||
raw interface{}
|
||||
}
|
||||
|
||||
// Type returns t.TokenType if non-empty, else "Bearer".
|
||||
func (t *Token) Type() string {
|
||||
if strings.EqualFold(t.TokenType, "bearer") {
|
||||
return "Bearer"
|
||||
}
|
||||
if strings.EqualFold(t.TokenType, "mac") {
|
||||
return "MAC"
|
||||
}
|
||||
if strings.EqualFold(t.TokenType, "basic") {
|
||||
return "Basic"
|
||||
}
|
||||
if t.TokenType != "" {
|
||||
return t.TokenType
|
||||
}
|
||||
return "Bearer"
|
||||
}
|
||||
|
||||
// SetAuthHeader sets the Authorization header to r using the access
|
||||
// token in t.
|
||||
//
|
||||
// This method is unnecessary when using Transport or an HTTP Client
|
||||
// returned by this package.
|
||||
func (t *Token) SetAuthHeader(r *http.Request) {
|
||||
r.Header.Set("Authorization", t.Type()+" "+t.AccessToken)
|
||||
}
|
||||
|
||||
// WithExtra returns a new Token that's a clone of t, but using the
|
||||
// provided raw extra map. This is only intended for use by packages
|
||||
// implementing derivative OAuth2 flows.
|
||||
func (t *Token) WithExtra(extra interface{}) *Token {
|
||||
t2 := new(Token)
|
||||
*t2 = *t
|
||||
t2.raw = extra
|
||||
return t2
|
||||
}
|
||||
|
||||
// Extra returns an extra field.
|
||||
// Extra fields are key-value pairs returned by the server as a
|
||||
// part of the token retrieval response.
|
||||
func (t *Token) Extra(key string) interface{} {
|
||||
if raw, ok := t.raw.(map[string]interface{}); ok {
|
||||
return raw[key]
|
||||
}
|
||||
|
||||
vals, ok := t.raw.(url.Values)
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
|
||||
v := vals.Get(key)
|
||||
switch s := strings.TrimSpace(v); strings.Count(s, ".") {
|
||||
case 0: // Contains no "."; try to parse as int
|
||||
if i, err := strconv.ParseInt(s, 10, 64); err == nil {
|
||||
return i
|
||||
}
|
||||
case 1: // Contains a single "."; try to parse as float
|
||||
if f, err := strconv.ParseFloat(s, 64); err == nil {
|
||||
return f
|
||||
}
|
||||
}
|
||||
|
||||
return v
|
||||
}
|
||||
|
||||
// timeNow is time.Now but pulled out as a variable for tests.
|
||||
var timeNow = time.Now
|
||||
|
||||
// expired reports whether the token is expired.
|
||||
// t must be non-nil.
|
||||
func (t *Token) expired() bool {
|
||||
if t.Expiry.IsZero() {
|
||||
return false
|
||||
}
|
||||
return t.Expiry.Round(0).Add(-expiryDelta).Before(timeNow())
|
||||
}
|
||||
|
||||
// Valid reports whether t is non-nil, has an AccessToken, and is not expired.
|
||||
func (t *Token) Valid() bool {
|
||||
return t != nil && t.AccessToken != "" && !t.expired()
|
||||
}
|
||||
|
||||
// tokenFromInternal maps an *internal.Token struct into
|
||||
// a *Token struct.
|
||||
func tokenFromInternal(t *internal.Token) *Token {
|
||||
if t == nil {
|
||||
return nil
|
||||
}
|
||||
return &Token{
|
||||
AccessToken: t.AccessToken,
|
||||
TokenType: t.TokenType,
|
||||
RefreshToken: t.RefreshToken,
|
||||
Expiry: t.Expiry,
|
||||
raw: t.Raw,
|
||||
}
|
||||
}
|
||||
|
||||
// retrieveToken takes a *Config and uses that to retrieve an *internal.Token.
|
||||
// This token is then mapped from *internal.Token into an *oauth2.Token which is returned along
|
||||
// with an error..
|
||||
func retrieveToken(ctx context.Context, c *Config, v url.Values) (*Token, error) {
|
||||
tk, err := internal.RetrieveToken(ctx, c.ClientID, c.ClientSecret, c.Endpoint.TokenURL, v, internal.AuthStyle(c.Endpoint.AuthStyle))
|
||||
if err != nil {
|
||||
if rErr, ok := err.(*internal.RetrieveError); ok {
|
||||
return nil, (*RetrieveError)(rErr)
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
return tokenFromInternal(tk), nil
|
||||
}
|
||||
|
||||
// RetrieveError is the error returned when the token endpoint returns a
|
||||
// non-2XX HTTP status code.
|
||||
type RetrieveError struct {
|
||||
Response *http.Response
|
||||
// Body is the body that was consumed by reading Response.Body.
|
||||
// It may be truncated.
|
||||
Body []byte
|
||||
}
|
||||
|
||||
func (r *RetrieveError) Error() string {
|
||||
return fmt.Sprintf("oauth2: cannot fetch token: %v\nResponse: %s", r.Response.Status, r.Body)
|
||||
}
|
89
vendor/golang.org/x/oauth2/transport.go
generated
vendored
Normal file
89
vendor/golang.org/x/oauth2/transport.go
generated
vendored
Normal file
@ -0,0 +1,89 @@
|
||||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package oauth2
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"log"
|
||||
"net/http"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// Transport is an http.RoundTripper that makes OAuth 2.0 HTTP requests,
|
||||
// wrapping a base RoundTripper and adding an Authorization header
|
||||
// with a token from the supplied Sources.
|
||||
//
|
||||
// Transport is a low-level mechanism. Most code will use the
|
||||
// higher-level Config.Client method instead.
|
||||
type Transport struct {
|
||||
// Source supplies the token to add to outgoing requests'
|
||||
// Authorization headers.
|
||||
Source TokenSource
|
||||
|
||||
// Base is the base RoundTripper used to make HTTP requests.
|
||||
// If nil, http.DefaultTransport is used.
|
||||
Base http.RoundTripper
|
||||
}
|
||||
|
||||
// RoundTrip authorizes and authenticates the request with an
|
||||
// access token from Transport's Source.
|
||||
func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) {
|
||||
reqBodyClosed := false
|
||||
if req.Body != nil {
|
||||
defer func() {
|
||||
if !reqBodyClosed {
|
||||
req.Body.Close()
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
if t.Source == nil {
|
||||
return nil, errors.New("oauth2: Transport's Source is nil")
|
||||
}
|
||||
token, err := t.Source.Token()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
req2 := cloneRequest(req) // per RoundTripper contract
|
||||
token.SetAuthHeader(req2)
|
||||
|
||||
// req.Body is assumed to be closed by the base RoundTripper.
|
||||
reqBodyClosed = true
|
||||
return t.base().RoundTrip(req2)
|
||||
}
|
||||
|
||||
var cancelOnce sync.Once
|
||||
|
||||
// CancelRequest does nothing. It used to be a legacy cancellation mechanism
|
||||
// but now only it only logs on first use to warn that it's deprecated.
|
||||
//
|
||||
// Deprecated: use contexts for cancellation instead.
|
||||
func (t *Transport) CancelRequest(req *http.Request) {
|
||||
cancelOnce.Do(func() {
|
||||
log.Printf("deprecated: golang.org/x/oauth2: Transport.CancelRequest no longer does anything; use contexts")
|
||||
})
|
||||
}
|
||||
|
||||
func (t *Transport) base() http.RoundTripper {
|
||||
if t.Base != nil {
|
||||
return t.Base
|
||||
}
|
||||
return http.DefaultTransport
|
||||
}
|
||||
|
||||
// cloneRequest returns a clone of the provided *http.Request.
|
||||
// The clone is a shallow copy of the struct and its Header map.
|
||||
func cloneRequest(r *http.Request) *http.Request {
|
||||
// shallow copy of the struct
|
||||
r2 := new(http.Request)
|
||||
*r2 = *r
|
||||
// deep copy of the Header
|
||||
r2.Header = make(http.Header, len(r.Header))
|
||||
for k, s := range r.Header {
|
||||
r2.Header[k] = append([]string(nil), s...)
|
||||
}
|
||||
return r2
|
||||
}
|
202
vendor/google.golang.org/appengine/LICENSE
generated
vendored
Normal file
202
vendor/google.golang.org/appengine/LICENSE
generated
vendored
Normal file
@ -0,0 +1,202 @@
|
||||
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
678
vendor/google.golang.org/appengine/internal/api.go
generated
vendored
Normal file
678
vendor/google.golang.org/appengine/internal/api.go
generated
vendored
Normal file
@ -0,0 +1,678 @@
|
||||
// Copyright 2011 Google Inc. All rights reserved.
|
||||
// Use of this source code is governed by the Apache 2.0
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build !appengine
|
||||
|
||||
package internal
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
netcontext "golang.org/x/net/context"
|
||||
|
||||
basepb "google.golang.org/appengine/internal/base"
|
||||
logpb "google.golang.org/appengine/internal/log"
|
||||
remotepb "google.golang.org/appengine/internal/remote_api"
|
||||
)
|
||||
|
||||
const (
|
||||
apiPath = "/rpc_http"
|
||||
defaultTicketSuffix = "/default.20150612t184001.0"
|
||||
)
|
||||
|
||||
var (
|
||||
// Incoming headers.
|
||||
ticketHeader = http.CanonicalHeaderKey("X-AppEngine-API-Ticket")
|
||||
dapperHeader = http.CanonicalHeaderKey("X-Google-DapperTraceInfo")
|
||||
traceHeader = http.CanonicalHeaderKey("X-Cloud-Trace-Context")
|
||||
curNamespaceHeader = http.CanonicalHeaderKey("X-AppEngine-Current-Namespace")
|
||||
userIPHeader = http.CanonicalHeaderKey("X-AppEngine-User-IP")
|
||||
remoteAddrHeader = http.CanonicalHeaderKey("X-AppEngine-Remote-Addr")
|
||||
devRequestIdHeader = http.CanonicalHeaderKey("X-Appengine-Dev-Request-Id")
|
||||
|
||||
// Outgoing headers.
|
||||
apiEndpointHeader = http.CanonicalHeaderKey("X-Google-RPC-Service-Endpoint")
|
||||
apiEndpointHeaderValue = []string{"app-engine-apis"}
|
||||
apiMethodHeader = http.CanonicalHeaderKey("X-Google-RPC-Service-Method")
|
||||
apiMethodHeaderValue = []string{"/VMRemoteAPI.CallRemoteAPI"}
|
||||
apiDeadlineHeader = http.CanonicalHeaderKey("X-Google-RPC-Service-Deadline")
|
||||
apiContentType = http.CanonicalHeaderKey("Content-Type")
|
||||
apiContentTypeValue = []string{"application/octet-stream"}
|
||||
logFlushHeader = http.CanonicalHeaderKey("X-AppEngine-Log-Flush-Count")
|
||||
|
||||
apiHTTPClient = &http.Client{
|
||||
Transport: &http.Transport{
|
||||
Proxy: http.ProxyFromEnvironment,
|
||||
Dial: limitDial,
|
||||
MaxIdleConns: 1000,
|
||||
MaxIdleConnsPerHost: 10000,
|
||||
IdleConnTimeout: 90 * time.Second,
|
||||
},
|
||||
}
|
||||
|
||||
defaultTicketOnce sync.Once
|
||||
defaultTicket string
|
||||
backgroundContextOnce sync.Once
|
||||
backgroundContext netcontext.Context
|
||||
)
|
||||
|
||||
func apiURL() *url.URL {
|
||||
host, port := "appengine.googleapis.internal", "10001"
|
||||
if h := os.Getenv("API_HOST"); h != "" {
|
||||
host = h
|
||||
}
|
||||
if p := os.Getenv("API_PORT"); p != "" {
|
||||
port = p
|
||||
}
|
||||
return &url.URL{
|
||||
Scheme: "http",
|
||||
Host: host + ":" + port,
|
||||
Path: apiPath,
|
||||
}
|
||||
}
|
||||
|
||||
func handleHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
c := &context{
|
||||
req: r,
|
||||
outHeader: w.Header(),
|
||||
apiURL: apiURL(),
|
||||
}
|
||||
r = r.WithContext(withContext(r.Context(), c))
|
||||
c.req = r
|
||||
|
||||
stopFlushing := make(chan int)
|
||||
|
||||
// Patch up RemoteAddr so it looks reasonable.
|
||||
if addr := r.Header.Get(userIPHeader); addr != "" {
|
||||
r.RemoteAddr = addr
|
||||
} else if addr = r.Header.Get(remoteAddrHeader); addr != "" {
|
||||
r.RemoteAddr = addr
|
||||
} else {
|
||||
// Should not normally reach here, but pick a sensible default anyway.
|
||||
r.RemoteAddr = "127.0.0.1"
|
||||
}
|
||||
// The address in the headers will most likely be of these forms:
|
||||
// 123.123.123.123
|
||||
// 2001:db8::1
|
||||
// net/http.Request.RemoteAddr is specified to be in "IP:port" form.
|
||||
if _, _, err := net.SplitHostPort(r.RemoteAddr); err != nil {
|
||||
// Assume the remote address is only a host; add a default port.
|
||||
r.RemoteAddr = net.JoinHostPort(r.RemoteAddr, "80")
|
||||
}
|
||||
|
||||
// Start goroutine responsible for flushing app logs.
|
||||
// This is done after adding c to ctx.m (and stopped before removing it)
|
||||
// because flushing logs requires making an API call.
|
||||
go c.logFlusher(stopFlushing)
|
||||
|
||||
executeRequestSafely(c, r)
|
||||
c.outHeader = nil // make sure header changes aren't respected any more
|
||||
|
||||
stopFlushing <- 1 // any logging beyond this point will be dropped
|
||||
|
||||
// Flush any pending logs asynchronously.
|
||||
c.pendingLogs.Lock()
|
||||
flushes := c.pendingLogs.flushes
|
||||
if len(c.pendingLogs.lines) > 0 {
|
||||
flushes++
|
||||
}
|
||||
c.pendingLogs.Unlock()
|
||||
flushed := make(chan struct{})
|
||||
go func() {
|
||||
defer close(flushed)
|
||||
// Force a log flush, because with very short requests we
|
||||
// may not ever flush logs.
|
||||
c.flushLog(true)
|
||||
}()
|
||||
w.Header().Set(logFlushHeader, strconv.Itoa(flushes))
|
||||
|
||||
// Avoid nil Write call if c.Write is never called.
|
||||
if c.outCode != 0 {
|
||||
w.WriteHeader(c.outCode)
|
||||
}
|
||||
if c.outBody != nil {
|
||||
w.Write(c.outBody)
|
||||
}
|
||||
// Wait for the last flush to complete before returning,
|
||||
// otherwise the security ticket will not be valid.
|
||||
<-flushed
|
||||
}
|
||||
|
||||
func executeRequestSafely(c *context, r *http.Request) {
|
||||
defer func() {
|
||||
if x := recover(); x != nil {
|
||||
logf(c, 4, "%s", renderPanic(x)) // 4 == critical
|
||||
c.outCode = 500
|
||||
}
|
||||
}()
|
||||
|
||||
http.DefaultServeMux.ServeHTTP(c, r)
|
||||
}
|
||||
|
||||
func renderPanic(x interface{}) string {
|
||||
buf := make([]byte, 16<<10) // 16 KB should be plenty
|
||||
buf = buf[:runtime.Stack(buf, false)]
|
||||
|
||||
// Remove the first few stack frames:
|
||||
// this func
|
||||
// the recover closure in the caller
|
||||
// That will root the stack trace at the site of the panic.
|
||||
const (
|
||||
skipStart = "internal.renderPanic"
|
||||
skipFrames = 2
|
||||
)
|
||||
start := bytes.Index(buf, []byte(skipStart))
|
||||
p := start
|
||||
for i := 0; i < skipFrames*2 && p+1 < len(buf); i++ {
|
||||
p = bytes.IndexByte(buf[p+1:], '\n') + p + 1
|
||||
if p < 0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
if p >= 0 {
|
||||
// buf[start:p+1] is the block to remove.
|
||||
// Copy buf[p+1:] over buf[start:] and shrink buf.
|
||||
copy(buf[start:], buf[p+1:])
|
||||
buf = buf[:len(buf)-(p+1-start)]
|
||||
}
|
||||
|
||||
// Add panic heading.
|
||||
head := fmt.Sprintf("panic: %v\n\n", x)
|
||||
if len(head) > len(buf) {
|
||||
// Extremely unlikely to happen.
|
||||
return head
|
||||
}
|
||||
copy(buf[len(head):], buf)
|
||||
copy(buf, head)
|
||||
|
||||
return string(buf)
|
||||
}
|
||||
|
||||
// context represents the context of an in-flight HTTP request.
|
||||
// It implements the appengine.Context and http.ResponseWriter interfaces.
|
||||
type context struct {
|
||||
req *http.Request
|
||||
|
||||
outCode int
|
||||
outHeader http.Header
|
||||
outBody []byte
|
||||
|
||||
pendingLogs struct {
|
||||
sync.Mutex
|
||||
lines []*logpb.UserAppLogLine
|
||||
flushes int
|
||||
}
|
||||
|
||||
apiURL *url.URL
|
||||
}
|
||||
|
||||
var contextKey = "holds a *context"
|
||||
|
||||
// jointContext joins two contexts in a superficial way.
|
||||
// It takes values and timeouts from a base context, and only values from another context.
|
||||
type jointContext struct {
|
||||
base netcontext.Context
|
||||
valuesOnly netcontext.Context
|
||||
}
|
||||
|
||||
func (c jointContext) Deadline() (time.Time, bool) {
|
||||
return c.base.Deadline()
|
||||
}
|
||||
|
||||
func (c jointContext) Done() <-chan struct{} {
|
||||
return c.base.Done()
|
||||
}
|
||||
|
||||
func (c jointContext) Err() error {
|
||||
return c.base.Err()
|
||||
}
|
||||
|
||||
func (c jointContext) Value(key interface{}) interface{} {
|
||||
if val := c.base.Value(key); val != nil {
|
||||
return val
|
||||
}
|
||||
return c.valuesOnly.Value(key)
|
||||
}
|
||||
|
||||
// fromContext returns the App Engine context or nil if ctx is not
|
||||
// derived from an App Engine context.
|
||||
func fromContext(ctx netcontext.Context) *context {
|
||||
c, _ := ctx.Value(&contextKey).(*context)
|
||||
return c
|
||||
}
|
||||
|
||||
func withContext(parent netcontext.Context, c *context) netcontext.Context {
|
||||
ctx := netcontext.WithValue(parent, &contextKey, c)
|
||||
if ns := c.req.Header.Get(curNamespaceHeader); ns != "" {
|
||||
ctx = withNamespace(ctx, ns)
|
||||
}
|
||||
return ctx
|
||||
}
|
||||
|
||||
func toContext(c *context) netcontext.Context {
|
||||
return withContext(netcontext.Background(), c)
|
||||
}
|
||||
|
||||
func IncomingHeaders(ctx netcontext.Context) http.Header {
|
||||
if c := fromContext(ctx); c != nil {
|
||||
return c.req.Header
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func ReqContext(req *http.Request) netcontext.Context {
|
||||
return req.Context()
|
||||
}
|
||||
|
||||
func WithContext(parent netcontext.Context, req *http.Request) netcontext.Context {
|
||||
return jointContext{
|
||||
base: parent,
|
||||
valuesOnly: req.Context(),
|
||||
}
|
||||
}
|
||||
|
||||
// DefaultTicket returns a ticket used for background context or dev_appserver.
|
||||
func DefaultTicket() string {
|
||||
defaultTicketOnce.Do(func() {
|
||||
if IsDevAppServer() {
|
||||
defaultTicket = "testapp" + defaultTicketSuffix
|
||||
return
|
||||
}
|
||||
appID := partitionlessAppID()
|
||||
escAppID := strings.Replace(strings.Replace(appID, ":", "_", -1), ".", "_", -1)
|
||||
majVersion := VersionID(nil)
|
||||
if i := strings.Index(majVersion, "."); i > 0 {
|
||||
majVersion = majVersion[:i]
|
||||
}
|
||||
defaultTicket = fmt.Sprintf("%s/%s.%s.%s", escAppID, ModuleName(nil), majVersion, InstanceID())
|
||||
})
|
||||
return defaultTicket
|
||||
}
|
||||
|
||||
func BackgroundContext() netcontext.Context {
|
||||
backgroundContextOnce.Do(func() {
|
||||
// Compute background security ticket.
|
||||
ticket := DefaultTicket()
|
||||
|
||||
c := &context{
|
||||
req: &http.Request{
|
||||
Header: http.Header{
|
||||
ticketHeader: []string{ticket},
|
||||
},
|
||||
},
|
||||
apiURL: apiURL(),
|
||||
}
|
||||
backgroundContext = toContext(c)
|
||||
|
||||
// TODO(dsymonds): Wire up the shutdown handler to do a final flush.
|
||||
go c.logFlusher(make(chan int))
|
||||
})
|
||||
|
||||
return backgroundContext
|
||||
}
|
||||
|
||||
// RegisterTestRequest registers the HTTP request req for testing, such that
|
||||
// any API calls are sent to the provided URL. It returns a closure to delete
|
||||
// the registration.
|
||||
// It should only be used by aetest package.
|
||||
func RegisterTestRequest(req *http.Request, apiURL *url.URL, decorate func(netcontext.Context) netcontext.Context) (*http.Request, func()) {
|
||||
c := &context{
|
||||
req: req,
|
||||
apiURL: apiURL,
|
||||
}
|
||||
ctx := withContext(decorate(req.Context()), c)
|
||||
req = req.WithContext(ctx)
|
||||
c.req = req
|
||||
return req, func() {}
|
||||
}
|
||||
|
||||
var errTimeout = &CallError{
|
||||
Detail: "Deadline exceeded",
|
||||
Code: int32(remotepb.RpcError_CANCELLED),
|
||||
Timeout: true,
|
||||
}
|
||||
|
||||
func (c *context) Header() http.Header { return c.outHeader }
|
||||
|
||||
// Copied from $GOROOT/src/pkg/net/http/transfer.go. Some response status
|
||||
// codes do not permit a response body (nor response entity headers such as
|
||||
// Content-Length, Content-Type, etc).
|
||||
func bodyAllowedForStatus(status int) bool {
|
||||
switch {
|
||||
case status >= 100 && status <= 199:
|
||||
return false
|
||||
case status == 204:
|
||||
return false
|
||||
case status == 304:
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func (c *context) Write(b []byte) (int, error) {
|
||||
if c.outCode == 0 {
|
||||
c.WriteHeader(http.StatusOK)
|
||||
}
|
||||
if len(b) > 0 && !bodyAllowedForStatus(c.outCode) {
|
||||
return 0, http.ErrBodyNotAllowed
|
||||
}
|
||||
c.outBody = append(c.outBody, b...)
|
||||
return len(b), nil
|
||||
}
|
||||
|
||||
func (c *context) WriteHeader(code int) {
|
||||
if c.outCode != 0 {
|
||||
logf(c, 3, "WriteHeader called multiple times on request.") // error level
|
||||
return
|
||||
}
|
||||
c.outCode = code
|
||||
}
|
||||
|
||||
func (c *context) post(body []byte, timeout time.Duration) (b []byte, err error) {
|
||||
hreq := &http.Request{
|
||||
Method: "POST",
|
||||
URL: c.apiURL,
|
||||
Header: http.Header{
|
||||
apiEndpointHeader: apiEndpointHeaderValue,
|
||||
apiMethodHeader: apiMethodHeaderValue,
|
||||
apiContentType: apiContentTypeValue,
|
||||
apiDeadlineHeader: []string{strconv.FormatFloat(timeout.Seconds(), 'f', -1, 64)},
|
||||
},
|
||||
Body: ioutil.NopCloser(bytes.NewReader(body)),
|
||||
ContentLength: int64(len(body)),
|
||||
Host: c.apiURL.Host,
|
||||
}
|
||||
if info := c.req.Header.Get(dapperHeader); info != "" {
|
||||
hreq.Header.Set(dapperHeader, info)
|
||||
}
|
||||
if info := c.req.Header.Get(traceHeader); info != "" {
|
||||
hreq.Header.Set(traceHeader, info)
|
||||
}
|
||||
|
||||
tr := apiHTTPClient.Transport.(*http.Transport)
|
||||
|
||||
var timedOut int32 // atomic; set to 1 if timed out
|
||||
t := time.AfterFunc(timeout, func() {
|
||||
atomic.StoreInt32(&timedOut, 1)
|
||||
tr.CancelRequest(hreq)
|
||||
})
|
||||
defer t.Stop()
|
||||
defer func() {
|
||||
// Check if timeout was exceeded.
|
||||
if atomic.LoadInt32(&timedOut) != 0 {
|
||||
err = errTimeout
|
||||
}
|
||||
}()
|
||||
|
||||
hresp, err := apiHTTPClient.Do(hreq)
|
||||
if err != nil {
|
||||
return nil, &CallError{
|
||||
Detail: fmt.Sprintf("service bridge HTTP failed: %v", err),
|
||||
Code: int32(remotepb.RpcError_UNKNOWN),
|
||||
}
|
||||
}
|
||||
defer hresp.Body.Close()
|
||||
hrespBody, err := ioutil.ReadAll(hresp.Body)
|
||||
if hresp.StatusCode != 200 {
|
||||
return nil, &CallError{
|
||||
Detail: fmt.Sprintf("service bridge returned HTTP %d (%q)", hresp.StatusCode, hrespBody),
|
||||
Code: int32(remotepb.RpcError_UNKNOWN),
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
return nil, &CallError{
|
||||
Detail: fmt.Sprintf("service bridge response bad: %v", err),
|
||||
Code: int32(remotepb.RpcError_UNKNOWN),
|
||||
}
|
||||
}
|
||||
return hrespBody, nil
|
||||
}
|
||||
|
||||
func Call(ctx netcontext.Context, service, method string, in, out proto.Message) error {
|
||||
if ns := NamespaceFromContext(ctx); ns != "" {
|
||||
if fn, ok := NamespaceMods[service]; ok {
|
||||
fn(in, ns)
|
||||
}
|
||||
}
|
||||
|
||||
if f, ctx, ok := callOverrideFromContext(ctx); ok {
|
||||
return f(ctx, service, method, in, out)
|
||||
}
|
||||
|
||||
// Handle already-done contexts quickly.
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
default:
|
||||
}
|
||||
|
||||
c := fromContext(ctx)
|
||||
if c == nil {
|
||||
// Give a good error message rather than a panic lower down.
|
||||
return errNotAppEngineContext
|
||||
}
|
||||
|
||||
// Apply transaction modifications if we're in a transaction.
|
||||
if t := transactionFromContext(ctx); t != nil {
|
||||
if t.finished {
|
||||
return errors.New("transaction context has expired")
|
||||
}
|
||||
applyTransaction(in, &t.transaction)
|
||||
}
|
||||
|
||||
// Default RPC timeout is 60s.
|
||||
timeout := 60 * time.Second
|
||||
if deadline, ok := ctx.Deadline(); ok {
|
||||
timeout = deadline.Sub(time.Now())
|
||||
}
|
||||
|
||||
data, err := proto.Marshal(in)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ticket := c.req.Header.Get(ticketHeader)
|
||||
// Use a test ticket under test environment.
|
||||
if ticket == "" {
|
||||
if appid := ctx.Value(&appIDOverrideKey); appid != nil {
|
||||
ticket = appid.(string) + defaultTicketSuffix
|
||||
}
|
||||
}
|
||||
// Fall back to use background ticket when the request ticket is not available in Flex or dev_appserver.
|
||||
if ticket == "" {
|
||||
ticket = DefaultTicket()
|
||||
}
|
||||
if dri := c.req.Header.Get(devRequestIdHeader); IsDevAppServer() && dri != "" {
|
||||
ticket = dri
|
||||
}
|
||||
req := &remotepb.Request{
|
||||
ServiceName: &service,
|
||||
Method: &method,
|
||||
Request: data,
|
||||
RequestId: &ticket,
|
||||
}
|
||||
hreqBody, err := proto.Marshal(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
hrespBody, err := c.post(hreqBody, timeout)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
res := &remotepb.Response{}
|
||||
if err := proto.Unmarshal(hrespBody, res); err != nil {
|
||||
return err
|
||||
}
|
||||
if res.RpcError != nil {
|
||||
ce := &CallError{
|
||||
Detail: res.RpcError.GetDetail(),
|
||||
Code: *res.RpcError.Code,
|
||||
}
|
||||
switch remotepb.RpcError_ErrorCode(ce.Code) {
|
||||
case remotepb.RpcError_CANCELLED, remotepb.RpcError_DEADLINE_EXCEEDED:
|
||||
ce.Timeout = true
|
||||
}
|
||||
return ce
|
||||
}
|
||||
if res.ApplicationError != nil {
|
||||
return &APIError{
|
||||
Service: *req.ServiceName,
|
||||
Detail: res.ApplicationError.GetDetail(),
|
||||
Code: *res.ApplicationError.Code,
|
||||
}
|
||||
}
|
||||
if res.Exception != nil || res.JavaException != nil {
|
||||
// This shouldn't happen, but let's be defensive.
|
||||
return &CallError{
|
||||
Detail: "service bridge returned exception",
|
||||
Code: int32(remotepb.RpcError_UNKNOWN),
|
||||
}
|
||||
}
|
||||
return proto.Unmarshal(res.Response, out)
|
||||
}
|
||||
|
||||
func (c *context) Request() *http.Request {
|
||||
return c.req
|
||||
}
|
||||
|
||||
func (c *context) addLogLine(ll *logpb.UserAppLogLine) {
|
||||
// Truncate long log lines.
|
||||
// TODO(dsymonds): Check if this is still necessary.
|
||||
const lim = 8 << 10
|
||||
if len(*ll.Message) > lim {
|
||||
suffix := fmt.Sprintf("...(length %d)", len(*ll.Message))
|
||||
ll.Message = proto.String((*ll.Message)[:lim-len(suffix)] + suffix)
|
||||
}
|
||||
|
||||
c.pendingLogs.Lock()
|
||||
c.pendingLogs.lines = append(c.pendingLogs.lines, ll)
|
||||
c.pendingLogs.Unlock()
|
||||
}
|
||||
|
||||
var logLevelName = map[int64]string{
|
||||
0: "DEBUG",
|
||||
1: "INFO",
|
||||
2: "WARNING",
|
||||
3: "ERROR",
|
||||
4: "CRITICAL",
|
||||
}
|
||||
|
||||
func logf(c *context, level int64, format string, args ...interface{}) {
|
||||
if c == nil {
|
||||
panic("not an App Engine context")
|
||||
}
|
||||
s := fmt.Sprintf(format, args...)
|
||||
s = strings.TrimRight(s, "\n") // Remove any trailing newline characters.
|
||||
c.addLogLine(&logpb.UserAppLogLine{
|
||||
TimestampUsec: proto.Int64(time.Now().UnixNano() / 1e3),
|
||||
Level: &level,
|
||||
Message: &s,
|
||||
})
|
||||
// Only duplicate log to stderr if not running on App Engine second generation
|
||||
if !IsSecondGen() {
|
||||
log.Print(logLevelName[level] + ": " + s)
|
||||
}
|
||||
}
|
||||
|
||||
// flushLog attempts to flush any pending logs to the appserver.
|
||||
// It should not be called concurrently.
|
||||
func (c *context) flushLog(force bool) (flushed bool) {
|
||||
c.pendingLogs.Lock()
|
||||
// Grab up to 30 MB. We can get away with up to 32 MB, but let's be cautious.
|
||||
n, rem := 0, 30<<20
|
||||
for ; n < len(c.pendingLogs.lines); n++ {
|
||||
ll := c.pendingLogs.lines[n]
|
||||
// Each log line will require about 3 bytes of overhead.
|
||||
nb := proto.Size(ll) + 3
|
||||
if nb > rem {
|
||||
break
|
||||
}
|
||||
rem -= nb
|
||||
}
|
||||
lines := c.pendingLogs.lines[:n]
|
||||
c.pendingLogs.lines = c.pendingLogs.lines[n:]
|
||||
c.pendingLogs.Unlock()
|
||||
|
||||
if len(lines) == 0 && !force {
|
||||
// Nothing to flush.
|
||||
return false
|
||||
}
|
||||
|
||||
rescueLogs := false
|
||||
defer func() {
|
||||
if rescueLogs {
|
||||
c.pendingLogs.Lock()
|
||||
c.pendingLogs.lines = append(lines, c.pendingLogs.lines...)
|
||||
c.pendingLogs.Unlock()
|
||||
}
|
||||
}()
|
||||
|
||||
buf, err := proto.Marshal(&logpb.UserAppLogGroup{
|
||||
LogLine: lines,
|
||||
})
|
||||
if err != nil {
|
||||
log.Printf("internal.flushLog: marshaling UserAppLogGroup: %v", err)
|
||||
rescueLogs = true
|
||||
return false
|
||||
}
|
||||
|
||||
req := &logpb.FlushRequest{
|
||||
Logs: buf,
|
||||
}
|
||||
res := &basepb.VoidProto{}
|
||||
c.pendingLogs.Lock()
|
||||
c.pendingLogs.flushes++
|
||||
c.pendingLogs.Unlock()
|
||||
if err := Call(toContext(c), "logservice", "Flush", req, res); err != nil {
|
||||
log.Printf("internal.flushLog: Flush RPC: %v", err)
|
||||
rescueLogs = true
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
const (
|
||||
// Log flushing parameters.
|
||||
flushInterval = 1 * time.Second
|
||||
forceFlushInterval = 60 * time.Second
|
||||
)
|
||||
|
||||
func (c *context) logFlusher(stop <-chan int) {
|
||||
lastFlush := time.Now()
|
||||
tick := time.NewTicker(flushInterval)
|
||||
for {
|
||||
select {
|
||||
case <-stop:
|
||||
// Request finished.
|
||||
tick.Stop()
|
||||
return
|
||||
case <-tick.C:
|
||||
force := time.Now().Sub(lastFlush) > forceFlushInterval
|
||||
if c.flushLog(force) {
|
||||
lastFlush = time.Now()
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func ContextForTesting(req *http.Request) netcontext.Context {
|
||||
return toContext(&context{req: req})
|
||||
}
|
169
vendor/google.golang.org/appengine/internal/api_classic.go
generated
vendored
Normal file
169
vendor/google.golang.org/appengine/internal/api_classic.go
generated
vendored
Normal file
@ -0,0 +1,169 @@
|
||||
// Copyright 2015 Google Inc. All rights reserved.
|
||||
// Use of this source code is governed by the Apache 2.0
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build appengine
|
||||
|
||||
package internal
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"appengine"
|
||||
"appengine_internal"
|
||||
basepb "appengine_internal/base"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
netcontext "golang.org/x/net/context"
|
||||
)
|
||||
|
||||
var contextKey = "holds an appengine.Context"
|
||||
|
||||
// fromContext returns the App Engine context or nil if ctx is not
|
||||
// derived from an App Engine context.
|
||||
func fromContext(ctx netcontext.Context) appengine.Context {
|
||||
c, _ := ctx.Value(&contextKey).(appengine.Context)
|
||||
return c
|
||||
}
|
||||
|
||||
// This is only for classic App Engine adapters.
|
||||
func ClassicContextFromContext(ctx netcontext.Context) (appengine.Context, error) {
|
||||
c := fromContext(ctx)
|
||||
if c == nil {
|
||||
return nil, errNotAppEngineContext
|
||||
}
|
||||
return c, nil
|
||||
}
|
||||
|
||||
func withContext(parent netcontext.Context, c appengine.Context) netcontext.Context {
|
||||
ctx := netcontext.WithValue(parent, &contextKey, c)
|
||||
|
||||
s := &basepb.StringProto{}
|
||||
c.Call("__go__", "GetNamespace", &basepb.VoidProto{}, s, nil)
|
||||
if ns := s.GetValue(); ns != "" {
|
||||
ctx = NamespacedContext(ctx, ns)
|
||||
}
|
||||
|
||||
return ctx
|
||||
}
|
||||
|
||||
func IncomingHeaders(ctx netcontext.Context) http.Header {
|
||||
if c := fromContext(ctx); c != nil {
|
||||
if req, ok := c.Request().(*http.Request); ok {
|
||||
return req.Header
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func ReqContext(req *http.Request) netcontext.Context {
|
||||
return WithContext(netcontext.Background(), req)
|
||||
}
|
||||
|
||||
func WithContext(parent netcontext.Context, req *http.Request) netcontext.Context {
|
||||
c := appengine.NewContext(req)
|
||||
return withContext(parent, c)
|
||||
}
|
||||
|
||||
type testingContext struct {
|
||||
appengine.Context
|
||||
|
||||
req *http.Request
|
||||
}
|
||||
|
||||
func (t *testingContext) FullyQualifiedAppID() string { return "dev~testcontext" }
|
||||
func (t *testingContext) Call(service, method string, _, _ appengine_internal.ProtoMessage, _ *appengine_internal.CallOptions) error {
|
||||
if service == "__go__" && method == "GetNamespace" {
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("testingContext: unsupported Call")
|
||||
}
|
||||
func (t *testingContext) Request() interface{} { return t.req }
|
||||
|
||||
func ContextForTesting(req *http.Request) netcontext.Context {
|
||||
return withContext(netcontext.Background(), &testingContext{req: req})
|
||||
}
|
||||
|
||||
func Call(ctx netcontext.Context, service, method string, in, out proto.Message) error {
|
||||
if ns := NamespaceFromContext(ctx); ns != "" {
|
||||
if fn, ok := NamespaceMods[service]; ok {
|
||||
fn(in, ns)
|
||||
}
|
||||
}
|
||||
|
||||
if f, ctx, ok := callOverrideFromContext(ctx); ok {
|
||||
return f(ctx, service, method, in, out)
|
||||
}
|
||||
|
||||
// Handle already-done contexts quickly.
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
default:
|
||||
}
|
||||
|
||||
c := fromContext(ctx)
|
||||
if c == nil {
|
||||
// Give a good error message rather than a panic lower down.
|
||||
return errNotAppEngineContext
|
||||
}
|
||||
|
||||
// Apply transaction modifications if we're in a transaction.
|
||||
if t := transactionFromContext(ctx); t != nil {
|
||||
if t.finished {
|
||||
return errors.New("transaction context has expired")
|
||||
}
|
||||
applyTransaction(in, &t.transaction)
|
||||
}
|
||||
|
||||
var opts *appengine_internal.CallOptions
|
||||
if d, ok := ctx.Deadline(); ok {
|
||||
opts = &appengine_internal.CallOptions{
|
||||
Timeout: d.Sub(time.Now()),
|
||||
}
|
||||
}
|
||||
|
||||
err := c.Call(service, method, in, out, opts)
|
||||
switch v := err.(type) {
|
||||
case *appengine_internal.APIError:
|
||||
return &APIError{
|
||||
Service: v.Service,
|
||||
Detail: v.Detail,
|
||||
Code: v.Code,
|
||||
}
|
||||
case *appengine_internal.CallError:
|
||||
return &CallError{
|
||||
Detail: v.Detail,
|
||||
Code: v.Code,
|
||||
Timeout: v.Timeout,
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func handleHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
panic("handleHTTP called; this should be impossible")
|
||||
}
|
||||
|
||||
func logf(c appengine.Context, level int64, format string, args ...interface{}) {
|
||||
var fn func(format string, args ...interface{})
|
||||
switch level {
|
||||
case 0:
|
||||
fn = c.Debugf
|
||||
case 1:
|
||||
fn = c.Infof
|
||||
case 2:
|
||||
fn = c.Warningf
|
||||
case 3:
|
||||
fn = c.Errorf
|
||||
case 4:
|
||||
fn = c.Criticalf
|
||||
default:
|
||||
// This shouldn't happen.
|
||||
fn = c.Criticalf
|
||||
}
|
||||
fn(format, args...)
|
||||
}
|
123
vendor/google.golang.org/appengine/internal/api_common.go
generated
vendored
Normal file
123
vendor/google.golang.org/appengine/internal/api_common.go
generated
vendored
Normal file
@ -0,0 +1,123 @@
|
||||
// Copyright 2015 Google Inc. All rights reserved.
|
||||
// Use of this source code is governed by the Apache 2.0
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package internal
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"os"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
netcontext "golang.org/x/net/context"
|
||||
)
|
||||
|
||||
var errNotAppEngineContext = errors.New("not an App Engine context")
|
||||
|
||||
type CallOverrideFunc func(ctx netcontext.Context, service, method string, in, out proto.Message) error
|
||||
|
||||
var callOverrideKey = "holds []CallOverrideFunc"
|
||||
|
||||
func WithCallOverride(ctx netcontext.Context, f CallOverrideFunc) netcontext.Context {
|
||||
// We avoid appending to any existing call override
|
||||
// so we don't risk overwriting a popped stack below.
|
||||
var cofs []CallOverrideFunc
|
||||
if uf, ok := ctx.Value(&callOverrideKey).([]CallOverrideFunc); ok {
|
||||
cofs = append(cofs, uf...)
|
||||
}
|
||||
cofs = append(cofs, f)
|
||||
return netcontext.WithValue(ctx, &callOverrideKey, cofs)
|
||||
}
|
||||
|
||||
func callOverrideFromContext(ctx netcontext.Context) (CallOverrideFunc, netcontext.Context, bool) {
|
||||
cofs, _ := ctx.Value(&callOverrideKey).([]CallOverrideFunc)
|
||||
if len(cofs) == 0 {
|
||||
return nil, nil, false
|
||||
}
|
||||
// We found a list of overrides; grab the last, and reconstitute a
|
||||
// context that will hide it.
|
||||
f := cofs[len(cofs)-1]
|
||||
ctx = netcontext.WithValue(ctx, &callOverrideKey, cofs[:len(cofs)-1])
|
||||
return f, ctx, true
|
||||
}
|
||||
|
||||
type logOverrideFunc func(level int64, format string, args ...interface{})
|
||||
|
||||
var logOverrideKey = "holds a logOverrideFunc"
|
||||
|
||||
func WithLogOverride(ctx netcontext.Context, f logOverrideFunc) netcontext.Context {
|
||||
return netcontext.WithValue(ctx, &logOverrideKey, f)
|
||||
}
|
||||
|
||||
var appIDOverrideKey = "holds a string, being the full app ID"
|
||||
|
||||
func WithAppIDOverride(ctx netcontext.Context, appID string) netcontext.Context {
|
||||
return netcontext.WithValue(ctx, &appIDOverrideKey, appID)
|
||||
}
|
||||
|
||||
var namespaceKey = "holds the namespace string"
|
||||
|
||||
func withNamespace(ctx netcontext.Context, ns string) netcontext.Context {
|
||||
return netcontext.WithValue(ctx, &namespaceKey, ns)
|
||||
}
|
||||
|
||||
func NamespaceFromContext(ctx netcontext.Context) string {
|
||||
// If there's no namespace, return the empty string.
|
||||
ns, _ := ctx.Value(&namespaceKey).(string)
|
||||
return ns
|
||||
}
|
||||
|
||||
// FullyQualifiedAppID returns the fully-qualified application ID.
|
||||
// This may contain a partition prefix (e.g. "s~" for High Replication apps),
|
||||
// or a domain prefix (e.g. "example.com:").
|
||||
func FullyQualifiedAppID(ctx netcontext.Context) string {
|
||||
if id, ok := ctx.Value(&appIDOverrideKey).(string); ok {
|
||||
return id
|
||||
}
|
||||
return fullyQualifiedAppID(ctx)
|
||||
}
|
||||
|
||||
func Logf(ctx netcontext.Context, level int64, format string, args ...interface{}) {
|
||||
if f, ok := ctx.Value(&logOverrideKey).(logOverrideFunc); ok {
|
||||
f(level, format, args...)
|
||||
return
|
||||
}
|
||||
c := fromContext(ctx)
|
||||
if c == nil {
|
||||
panic(errNotAppEngineContext)
|
||||
}
|
||||
logf(c, level, format, args...)
|
||||
}
|
||||
|
||||
// NamespacedContext wraps a Context to support namespaces.
|
||||
func NamespacedContext(ctx netcontext.Context, namespace string) netcontext.Context {
|
||||
return withNamespace(ctx, namespace)
|
||||
}
|
||||
|
||||
// SetTestEnv sets the env variables for testing background ticket in Flex.
|
||||
func SetTestEnv() func() {
|
||||
var environ = []struct {
|
||||
key, value string
|
||||
}{
|
||||
{"GAE_LONG_APP_ID", "my-app-id"},
|
||||
{"GAE_MINOR_VERSION", "067924799508853122"},
|
||||
{"GAE_MODULE_INSTANCE", "0"},
|
||||
{"GAE_MODULE_NAME", "default"},
|
||||
{"GAE_MODULE_VERSION", "20150612t184001"},
|
||||
}
|
||||
|
||||
for _, v := range environ {
|
||||
old := os.Getenv(v.key)
|
||||
os.Setenv(v.key, v.value)
|
||||
v.value = old
|
||||
}
|
||||
return func() { // Restore old environment after the test completes.
|
||||
for _, v := range environ {
|
||||
if v.value == "" {
|
||||
os.Unsetenv(v.key)
|
||||
continue
|
||||
}
|
||||
os.Setenv(v.key, v.value)
|
||||
}
|
||||
}
|
||||
}
|
28
vendor/google.golang.org/appengine/internal/app_id.go
generated
vendored
Normal file
28
vendor/google.golang.org/appengine/internal/app_id.go
generated
vendored
Normal file
@ -0,0 +1,28 @@
|
||||
// Copyright 2011 Google Inc. All rights reserved.
|
||||
// Use of this source code is governed by the Apache 2.0
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package internal
|
||||
|
||||
import (
|
||||
"strings"
|
||||
)
|
||||
|
||||
func parseFullAppID(appid string) (partition, domain, displayID string) {
|
||||
if i := strings.Index(appid, "~"); i != -1 {
|
||||
partition, appid = appid[:i], appid[i+1:]
|
||||
}
|
||||
if i := strings.Index(appid, ":"); i != -1 {
|
||||
domain, appid = appid[:i], appid[i+1:]
|
||||
}
|
||||
return partition, domain, appid
|
||||
}
|
||||
|
||||
// appID returns "appid" or "domain.com:appid".
|
||||
func appID(fullAppID string) string {
|
||||
_, dom, dis := parseFullAppID(fullAppID)
|
||||
if dom != "" {
|
||||
return dom + ":" + dis
|
||||
}
|
||||
return dis
|
||||
}
|
308
vendor/google.golang.org/appengine/internal/base/api_base.pb.go
generated
vendored
Normal file
308
vendor/google.golang.org/appengine/internal/base/api_base.pb.go
generated
vendored
Normal file
@ -0,0 +1,308 @@
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// source: google.golang.org/appengine/internal/base/api_base.proto
|
||||
|
||||
package base
|
||||
|
||||
import proto "github.com/golang/protobuf/proto"
|
||||
import fmt "fmt"
|
||||
import math "math"
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ = proto.Marshal
|
||||
var _ = fmt.Errorf
|
||||
var _ = math.Inf
|
||||
|
||||
// This is a compile-time assertion to ensure that this generated file
|
||||
// is compatible with the proto package it is being compiled against.
|
||||
// A compilation error at this line likely means your copy of the
|
||||
// proto package needs to be updated.
|
||||
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
|
||||
|
||||
type StringProto struct {
|
||||
Value *string `protobuf:"bytes,1,req,name=value" json:"value,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *StringProto) Reset() { *m = StringProto{} }
|
||||
func (m *StringProto) String() string { return proto.CompactTextString(m) }
|
||||
func (*StringProto) ProtoMessage() {}
|
||||
func (*StringProto) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_api_base_9d49f8792e0c1140, []int{0}
|
||||
}
|
||||
func (m *StringProto) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_StringProto.Unmarshal(m, b)
|
||||
}
|
||||
func (m *StringProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_StringProto.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (dst *StringProto) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_StringProto.Merge(dst, src)
|
||||
}
|
||||
func (m *StringProto) XXX_Size() int {
|
||||
return xxx_messageInfo_StringProto.Size(m)
|
||||
}
|
||||
func (m *StringProto) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_StringProto.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_StringProto proto.InternalMessageInfo
|
||||
|
||||
func (m *StringProto) GetValue() string {
|
||||
if m != nil && m.Value != nil {
|
||||
return *m.Value
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
type Integer32Proto struct {
|
||||
Value *int32 `protobuf:"varint,1,req,name=value" json:"value,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *Integer32Proto) Reset() { *m = Integer32Proto{} }
|
||||
func (m *Integer32Proto) String() string { return proto.CompactTextString(m) }
|
||||
func (*Integer32Proto) ProtoMessage() {}
|
||||
func (*Integer32Proto) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_api_base_9d49f8792e0c1140, []int{1}
|
||||
}
|
||||
func (m *Integer32Proto) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_Integer32Proto.Unmarshal(m, b)
|
||||
}
|
||||
func (m *Integer32Proto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_Integer32Proto.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (dst *Integer32Proto) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_Integer32Proto.Merge(dst, src)
|
||||
}
|
||||
func (m *Integer32Proto) XXX_Size() int {
|
||||
return xxx_messageInfo_Integer32Proto.Size(m)
|
||||
}
|
||||
func (m *Integer32Proto) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_Integer32Proto.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_Integer32Proto proto.InternalMessageInfo
|
||||
|
||||
func (m *Integer32Proto) GetValue() int32 {
|
||||
if m != nil && m.Value != nil {
|
||||
return *m.Value
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
type Integer64Proto struct {
|
||||
Value *int64 `protobuf:"varint,1,req,name=value" json:"value,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *Integer64Proto) Reset() { *m = Integer64Proto{} }
|
||||
func (m *Integer64Proto) String() string { return proto.CompactTextString(m) }
|
||||
func (*Integer64Proto) ProtoMessage() {}
|
||||
func (*Integer64Proto) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_api_base_9d49f8792e0c1140, []int{2}
|
||||
}
|
||||
func (m *Integer64Proto) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_Integer64Proto.Unmarshal(m, b)
|
||||
}
|
||||
func (m *Integer64Proto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_Integer64Proto.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (dst *Integer64Proto) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_Integer64Proto.Merge(dst, src)
|
||||
}
|
||||
func (m *Integer64Proto) XXX_Size() int {
|
||||
return xxx_messageInfo_Integer64Proto.Size(m)
|
||||
}
|
||||
func (m *Integer64Proto) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_Integer64Proto.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_Integer64Proto proto.InternalMessageInfo
|
||||
|
||||
func (m *Integer64Proto) GetValue() int64 {
|
||||
if m != nil && m.Value != nil {
|
||||
return *m.Value
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
type BoolProto struct {
|
||||
Value *bool `protobuf:"varint,1,req,name=value" json:"value,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *BoolProto) Reset() { *m = BoolProto{} }
|
||||
func (m *BoolProto) String() string { return proto.CompactTextString(m) }
|
||||
func (*BoolProto) ProtoMessage() {}
|
||||
func (*BoolProto) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_api_base_9d49f8792e0c1140, []int{3}
|
||||
}
|
||||
func (m *BoolProto) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_BoolProto.Unmarshal(m, b)
|
||||
}
|
||||
func (m *BoolProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_BoolProto.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (dst *BoolProto) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_BoolProto.Merge(dst, src)
|
||||
}
|
||||
func (m *BoolProto) XXX_Size() int {
|
||||
return xxx_messageInfo_BoolProto.Size(m)
|
||||
}
|
||||
func (m *BoolProto) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_BoolProto.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_BoolProto proto.InternalMessageInfo
|
||||
|
||||
func (m *BoolProto) GetValue() bool {
|
||||
if m != nil && m.Value != nil {
|
||||
return *m.Value
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
type DoubleProto struct {
|
||||
Value *float64 `protobuf:"fixed64,1,req,name=value" json:"value,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *DoubleProto) Reset() { *m = DoubleProto{} }
|
||||
func (m *DoubleProto) String() string { return proto.CompactTextString(m) }
|
||||
func (*DoubleProto) ProtoMessage() {}
|
||||
func (*DoubleProto) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_api_base_9d49f8792e0c1140, []int{4}
|
||||
}
|
||||
func (m *DoubleProto) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_DoubleProto.Unmarshal(m, b)
|
||||
}
|
||||
func (m *DoubleProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_DoubleProto.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (dst *DoubleProto) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_DoubleProto.Merge(dst, src)
|
||||
}
|
||||
func (m *DoubleProto) XXX_Size() int {
|
||||
return xxx_messageInfo_DoubleProto.Size(m)
|
||||
}
|
||||
func (m *DoubleProto) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_DoubleProto.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_DoubleProto proto.InternalMessageInfo
|
||||
|
||||
func (m *DoubleProto) GetValue() float64 {
|
||||
if m != nil && m.Value != nil {
|
||||
return *m.Value
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
type BytesProto struct {
|
||||
Value []byte `protobuf:"bytes,1,req,name=value" json:"value,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *BytesProto) Reset() { *m = BytesProto{} }
|
||||
func (m *BytesProto) String() string { return proto.CompactTextString(m) }
|
||||
func (*BytesProto) ProtoMessage() {}
|
||||
func (*BytesProto) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_api_base_9d49f8792e0c1140, []int{5}
|
||||
}
|
||||
func (m *BytesProto) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_BytesProto.Unmarshal(m, b)
|
||||
}
|
||||
func (m *BytesProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_BytesProto.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (dst *BytesProto) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_BytesProto.Merge(dst, src)
|
||||
}
|
||||
func (m *BytesProto) XXX_Size() int {
|
||||
return xxx_messageInfo_BytesProto.Size(m)
|
||||
}
|
||||
func (m *BytesProto) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_BytesProto.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_BytesProto proto.InternalMessageInfo
|
||||
|
||||
func (m *BytesProto) GetValue() []byte {
|
||||
if m != nil {
|
||||
return m.Value
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type VoidProto struct {
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *VoidProto) Reset() { *m = VoidProto{} }
|
||||
func (m *VoidProto) String() string { return proto.CompactTextString(m) }
|
||||
func (*VoidProto) ProtoMessage() {}
|
||||
func (*VoidProto) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_api_base_9d49f8792e0c1140, []int{6}
|
||||
}
|
||||
func (m *VoidProto) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_VoidProto.Unmarshal(m, b)
|
||||
}
|
||||
func (m *VoidProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_VoidProto.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (dst *VoidProto) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_VoidProto.Merge(dst, src)
|
||||
}
|
||||
func (m *VoidProto) XXX_Size() int {
|
||||
return xxx_messageInfo_VoidProto.Size(m)
|
||||
}
|
||||
func (m *VoidProto) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_VoidProto.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_VoidProto proto.InternalMessageInfo
|
||||
|
||||
func init() {
|
||||
proto.RegisterType((*StringProto)(nil), "appengine.base.StringProto")
|
||||
proto.RegisterType((*Integer32Proto)(nil), "appengine.base.Integer32Proto")
|
||||
proto.RegisterType((*Integer64Proto)(nil), "appengine.base.Integer64Proto")
|
||||
proto.RegisterType((*BoolProto)(nil), "appengine.base.BoolProto")
|
||||
proto.RegisterType((*DoubleProto)(nil), "appengine.base.DoubleProto")
|
||||
proto.RegisterType((*BytesProto)(nil), "appengine.base.BytesProto")
|
||||
proto.RegisterType((*VoidProto)(nil), "appengine.base.VoidProto")
|
||||
}
|
||||
|
||||
func init() {
|
||||
proto.RegisterFile("google.golang.org/appengine/internal/base/api_base.proto", fileDescriptor_api_base_9d49f8792e0c1140)
|
||||
}
|
||||
|
||||
var fileDescriptor_api_base_9d49f8792e0c1140 = []byte{
|
||||
// 199 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0xcf, 0x3f, 0x4b, 0xc6, 0x30,
|
||||
0x10, 0x06, 0x70, 0x5a, 0xad, 0xb4, 0x57, 0xe9, 0x20, 0x0e, 0x1d, 0xb5, 0x05, 0x71, 0x4a, 0x40,
|
||||
0x45, 0x9c, 0x83, 0x8b, 0x9b, 0x28, 0x38, 0xb8, 0x48, 0x8a, 0xc7, 0x11, 0x08, 0xb9, 0x90, 0xa6,
|
||||
0x82, 0xdf, 0x5e, 0xda, 0xd2, 0xfa, 0xc2, 0x9b, 0xed, 0xfe, 0xfc, 0xe0, 0xe1, 0x81, 0x27, 0x62,
|
||||
0x26, 0x8b, 0x82, 0xd8, 0x6a, 0x47, 0x82, 0x03, 0x49, 0xed, 0x3d, 0x3a, 0x32, 0x0e, 0xa5, 0x71,
|
||||
0x11, 0x83, 0xd3, 0x56, 0x0e, 0x7a, 0x44, 0xa9, 0xbd, 0xf9, 0x9a, 0x07, 0xe1, 0x03, 0x47, 0xbe,
|
||||
0x68, 0x76, 0x27, 0xe6, 0x6b, 0xd7, 0x43, 0xfd, 0x1e, 0x83, 0x71, 0xf4, 0xba, 0xbc, 0x2f, 0xa1,
|
||||
0xf8, 0xd1, 0x76, 0xc2, 0x36, 0xbb, 0xca, 0x6f, 0xab, 0xb7, 0x75, 0xe9, 0x6e, 0xa0, 0x79, 0x71,
|
||||
0x11, 0x09, 0xc3, 0xfd, 0x5d, 0xc2, 0x15, 0xc7, 0xee, 0xf1, 0x21, 0xe1, 0x4e, 0x36, 0x77, 0x0d,
|
||||
0x95, 0x62, 0xb6, 0x09, 0x52, 0x6e, 0xa4, 0x87, 0xfa, 0x99, 0xa7, 0xc1, 0x62, 0x02, 0x65, 0xff,
|
||||
0x79, 0xa0, 0x7e, 0x23, 0x8e, 0xab, 0x69, 0x0f, 0xcd, 0xb9, 0xca, 0xcb, 0xdd, 0xd5, 0x50, 0x7d,
|
||||
0xb0, 0xf9, 0x5e, 0x98, 0x3a, 0xfb, 0x3c, 0x9d, 0x9b, 0xff, 0x05, 0x00, 0x00, 0xff, 0xff, 0xba,
|
||||
0x37, 0x25, 0xea, 0x44, 0x01, 0x00, 0x00,
|
||||
}
|
33
vendor/google.golang.org/appengine/internal/base/api_base.proto
generated
vendored
Normal file
33
vendor/google.golang.org/appengine/internal/base/api_base.proto
generated
vendored
Normal file
@ -0,0 +1,33 @@
|
||||
// Built-in base types for API calls. Primarily useful as return types.
|
||||
|
||||
syntax = "proto2";
|
||||
option go_package = "base";
|
||||
|
||||
package appengine.base;
|
||||
|
||||
message StringProto {
|
||||
required string value = 1;
|
||||
}
|
||||
|
||||
message Integer32Proto {
|
||||
required int32 value = 1;
|
||||
}
|
||||
|
||||
message Integer64Proto {
|
||||
required int64 value = 1;
|
||||
}
|
||||
|
||||
message BoolProto {
|
||||
required bool value = 1;
|
||||
}
|
||||
|
||||
message DoubleProto {
|
||||
required double value = 1;
|
||||
}
|
||||
|
||||
message BytesProto {
|
||||
required bytes value = 1 [ctype=CORD];
|
||||
}
|
||||
|
||||
message VoidProto {
|
||||
}
|
4367
vendor/google.golang.org/appengine/internal/datastore/datastore_v3.pb.go
generated
vendored
Normal file
4367
vendor/google.golang.org/appengine/internal/datastore/datastore_v3.pb.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
551
vendor/google.golang.org/appengine/internal/datastore/datastore_v3.proto
generated
vendored
Normal file
551
vendor/google.golang.org/appengine/internal/datastore/datastore_v3.proto
generated
vendored
Normal file
@ -0,0 +1,551 @@
|
||||
syntax = "proto2";
|
||||
option go_package = "datastore";
|
||||
|
||||
package appengine;
|
||||
|
||||
message Action{}
|
||||
|
||||
message PropertyValue {
|
||||
optional int64 int64Value = 1;
|
||||
optional bool booleanValue = 2;
|
||||
optional string stringValue = 3;
|
||||
optional double doubleValue = 4;
|
||||
|
||||
optional group PointValue = 5 {
|
||||
required double x = 6;
|
||||
required double y = 7;
|
||||
}
|
||||
|
||||
optional group UserValue = 8 {
|
||||
required string email = 9;
|
||||
required string auth_domain = 10;
|
||||
optional string nickname = 11;
|
||||
optional string federated_identity = 21;
|
||||
optional string federated_provider = 22;
|
||||
}
|
||||
|
||||
optional group ReferenceValue = 12 {
|
||||
required string app = 13;
|
||||
optional string name_space = 20;
|
||||
repeated group PathElement = 14 {
|
||||
required string type = 15;
|
||||
optional int64 id = 16;
|
||||
optional string name = 17;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
message Property {
|
||||
enum Meaning {
|
||||
NO_MEANING = 0;
|
||||
BLOB = 14;
|
||||
TEXT = 15;
|
||||
BYTESTRING = 16;
|
||||
|
||||
ATOM_CATEGORY = 1;
|
||||
ATOM_LINK = 2;
|
||||
ATOM_TITLE = 3;
|
||||
ATOM_CONTENT = 4;
|
||||
ATOM_SUMMARY = 5;
|
||||
ATOM_AUTHOR = 6;
|
||||
|
||||
GD_WHEN = 7;
|
||||
GD_EMAIL = 8;
|
||||
GEORSS_POINT = 9;
|
||||
GD_IM = 10;
|
||||
|
||||
GD_PHONENUMBER = 11;
|
||||
GD_POSTALADDRESS = 12;
|
||||
|
||||
GD_RATING = 13;
|
||||
|
||||
BLOBKEY = 17;
|
||||
ENTITY_PROTO = 19;
|
||||
|
||||
INDEX_VALUE = 18;
|
||||
};
|
||||
|
||||
optional Meaning meaning = 1 [default = NO_MEANING];
|
||||
optional string meaning_uri = 2;
|
||||
|
||||
required string name = 3;
|
||||
|
||||
required PropertyValue value = 5;
|
||||
|
||||
required bool multiple = 4;
|
||||
|
||||
optional bool searchable = 6 [default=false];
|
||||
|
||||
enum FtsTokenizationOption {
|
||||
HTML = 1;
|
||||
ATOM = 2;
|
||||
}
|
||||
|
||||
optional FtsTokenizationOption fts_tokenization_option = 8;
|
||||
|
||||
optional string locale = 9 [default = "en"];
|
||||
}
|
||||
|
||||
message Path {
|
||||
repeated group Element = 1 {
|
||||
required string type = 2;
|
||||
optional int64 id = 3;
|
||||
optional string name = 4;
|
||||
}
|
||||
}
|
||||
|
||||
message Reference {
|
||||
required string app = 13;
|
||||
optional string name_space = 20;
|
||||
required Path path = 14;
|
||||
}
|
||||
|
||||
message User {
|
||||
required string email = 1;
|
||||
required string auth_domain = 2;
|
||||
optional string nickname = 3;
|
||||
optional string federated_identity = 6;
|
||||
optional string federated_provider = 7;
|
||||
}
|
||||
|
||||
message EntityProto {
|
||||
required Reference key = 13;
|
||||
required Path entity_group = 16;
|
||||
optional User owner = 17;
|
||||
|
||||
enum Kind {
|
||||
GD_CONTACT = 1;
|
||||
GD_EVENT = 2;
|
||||
GD_MESSAGE = 3;
|
||||
}
|
||||
optional Kind kind = 4;
|
||||
optional string kind_uri = 5;
|
||||
|
||||
repeated Property property = 14;
|
||||
repeated Property raw_property = 15;
|
||||
|
||||
optional int32 rank = 18;
|
||||
}
|
||||
|
||||
message CompositeProperty {
|
||||
required int64 index_id = 1;
|
||||
repeated string value = 2;
|
||||
}
|
||||
|
||||
message Index {
|
||||
required string entity_type = 1;
|
||||
required bool ancestor = 5;
|
||||
repeated group Property = 2 {
|
||||
required string name = 3;
|
||||
enum Direction {
|
||||
ASCENDING = 1;
|
||||
DESCENDING = 2;
|
||||
}
|
||||
optional Direction direction = 4 [default = ASCENDING];
|
||||
}
|
||||
}
|
||||
|
||||
message CompositeIndex {
|
||||
required string app_id = 1;
|
||||
required int64 id = 2;
|
||||
required Index definition = 3;
|
||||
|
||||
enum State {
|
||||
WRITE_ONLY = 1;
|
||||
READ_WRITE = 2;
|
||||
DELETED = 3;
|
||||
ERROR = 4;
|
||||
}
|
||||
required State state = 4;
|
||||
|
||||
optional bool only_use_if_required = 6 [default = false];
|
||||
}
|
||||
|
||||
message IndexPostfix {
|
||||
message IndexValue {
|
||||
required string property_name = 1;
|
||||
required PropertyValue value = 2;
|
||||
}
|
||||
|
||||
repeated IndexValue index_value = 1;
|
||||
|
||||
optional Reference key = 2;
|
||||
|
||||
optional bool before = 3 [default=true];
|
||||
}
|
||||
|
||||
message IndexPosition {
|
||||
optional string key = 1;
|
||||
|
||||
optional bool before = 2 [default=true];
|
||||
}
|
||||
|
||||
message Snapshot {
|
||||
enum Status {
|
||||
INACTIVE = 0;
|
||||
ACTIVE = 1;
|
||||
}
|
||||
|
||||
required int64 ts = 1;
|
||||
}
|
||||
|
||||
message InternalHeader {
|
||||
optional string qos = 1;
|
||||
}
|
||||
|
||||
message Transaction {
|
||||
optional InternalHeader header = 4;
|
||||
required fixed64 handle = 1;
|
||||
required string app = 2;
|
||||
optional bool mark_changes = 3 [default = false];
|
||||
}
|
||||
|
||||
message Query {
|
||||
optional InternalHeader header = 39;
|
||||
|
||||
required string app = 1;
|
||||
optional string name_space = 29;
|
||||
|
||||
optional string kind = 3;
|
||||
optional Reference ancestor = 17;
|
||||
|
||||
repeated group Filter = 4 {
|
||||
enum Operator {
|
||||
LESS_THAN = 1;
|
||||
LESS_THAN_OR_EQUAL = 2;
|
||||
GREATER_THAN = 3;
|
||||
GREATER_THAN_OR_EQUAL = 4;
|
||||
EQUAL = 5;
|
||||
IN = 6;
|
||||
EXISTS = 7;
|
||||
}
|
||||
|
||||
required Operator op = 6;
|
||||
repeated Property property = 14;
|
||||
}
|
||||
|
||||
optional string search_query = 8;
|
||||
|
||||
repeated group Order = 9 {
|
||||
enum Direction {
|
||||
ASCENDING = 1;
|
||||
DESCENDING = 2;
|
||||
}
|
||||
|
||||
required string property = 10;
|
||||
optional Direction direction = 11 [default = ASCENDING];
|
||||
}
|
||||
|
||||
enum Hint {
|
||||
ORDER_FIRST = 1;
|
||||
ANCESTOR_FIRST = 2;
|
||||
FILTER_FIRST = 3;
|
||||
}
|
||||
optional Hint hint = 18;
|
||||
|
||||
optional int32 count = 23;
|
||||
|
||||
optional int32 offset = 12 [default = 0];
|
||||
|
||||
optional int32 limit = 16;
|
||||
|
||||
optional CompiledCursor compiled_cursor = 30;
|
||||
optional CompiledCursor end_compiled_cursor = 31;
|
||||
|
||||
repeated CompositeIndex composite_index = 19;
|
||||
|
||||
optional bool require_perfect_plan = 20 [default = false];
|
||||
|
||||
optional bool keys_only = 21 [default = false];
|
||||
|
||||
optional Transaction transaction = 22;
|
||||
|
||||
optional bool compile = 25 [default = false];
|
||||
|
||||
optional int64 failover_ms = 26;
|
||||
|
||||
optional bool strong = 32;
|
||||
|
||||
repeated string property_name = 33;
|
||||
|
||||
repeated string group_by_property_name = 34;
|
||||
|
||||
optional bool distinct = 24;
|
||||
|
||||
optional int64 min_safe_time_seconds = 35;
|
||||
|
||||
repeated string safe_replica_name = 36;
|
||||
|
||||
optional bool persist_offset = 37 [default=false];
|
||||
}
|
||||
|
||||
message CompiledQuery {
|
||||
required group PrimaryScan = 1 {
|
||||
optional string index_name = 2;
|
||||
|
||||
optional string start_key = 3;
|
||||
optional bool start_inclusive = 4;
|
||||
optional string end_key = 5;
|
||||
optional bool end_inclusive = 6;
|
||||
|
||||
repeated string start_postfix_value = 22;
|
||||
repeated string end_postfix_value = 23;
|
||||
|
||||
optional int64 end_unapplied_log_timestamp_us = 19;
|
||||
}
|
||||
|
||||
repeated group MergeJoinScan = 7 {
|
||||
required string index_name = 8;
|
||||
|
||||
repeated string prefix_value = 9;
|
||||
|
||||
optional bool value_prefix = 20 [default=false];
|
||||
}
|
||||
|
||||
optional Index index_def = 21;
|
||||
|
||||
optional int32 offset = 10 [default = 0];
|
||||
|
||||
optional int32 limit = 11;
|
||||
|
||||
required bool keys_only = 12;
|
||||
|
||||
repeated string property_name = 24;
|
||||
|
||||
optional int32 distinct_infix_size = 25;
|
||||
|
||||
optional group EntityFilter = 13 {
|
||||
optional bool distinct = 14 [default=false];
|
||||
|
||||
optional string kind = 17;
|
||||
optional Reference ancestor = 18;
|
||||
}
|
||||
}
|
||||
|
||||
message CompiledCursor {
|
||||
optional group Position = 2 {
|
||||
optional string start_key = 27;
|
||||
|
||||
repeated group IndexValue = 29 {
|
||||
optional string property = 30;
|
||||
required PropertyValue value = 31;
|
||||
}
|
||||
|
||||
optional Reference key = 32;
|
||||
|
||||
optional bool start_inclusive = 28 [default=true];
|
||||
}
|
||||
}
|
||||
|
||||
message Cursor {
|
||||
required fixed64 cursor = 1;
|
||||
|
||||
optional string app = 2;
|
||||
}
|
||||
|
||||
message Error {
|
||||
enum ErrorCode {
|
||||
BAD_REQUEST = 1;
|
||||
CONCURRENT_TRANSACTION = 2;
|
||||
INTERNAL_ERROR = 3;
|
||||
NEED_INDEX = 4;
|
||||
TIMEOUT = 5;
|
||||
PERMISSION_DENIED = 6;
|
||||
BIGTABLE_ERROR = 7;
|
||||
COMMITTED_BUT_STILL_APPLYING = 8;
|
||||
CAPABILITY_DISABLED = 9;
|
||||
TRY_ALTERNATE_BACKEND = 10;
|
||||
SAFE_TIME_TOO_OLD = 11;
|
||||
}
|
||||
}
|
||||
|
||||
message Cost {
|
||||
optional int32 index_writes = 1;
|
||||
optional int32 index_write_bytes = 2;
|
||||
optional int32 entity_writes = 3;
|
||||
optional int32 entity_write_bytes = 4;
|
||||
optional group CommitCost = 5 {
|
||||
optional int32 requested_entity_puts = 6;
|
||||
optional int32 requested_entity_deletes = 7;
|
||||
};
|
||||
optional int32 approximate_storage_delta = 8;
|
||||
optional int32 id_sequence_updates = 9;
|
||||
}
|
||||
|
||||
message GetRequest {
|
||||
optional InternalHeader header = 6;
|
||||
|
||||
repeated Reference key = 1;
|
||||
optional Transaction transaction = 2;
|
||||
|
||||
optional int64 failover_ms = 3;
|
||||
|
||||
optional bool strong = 4;
|
||||
|
||||
optional bool allow_deferred = 5 [default=false];
|
||||
}
|
||||
|
||||
message GetResponse {
|
||||
repeated group Entity = 1 {
|
||||
optional EntityProto entity = 2;
|
||||
optional Reference key = 4;
|
||||
|
||||
optional int64 version = 3;
|
||||
}
|
||||
|
||||
repeated Reference deferred = 5;
|
||||
|
||||
optional bool in_order = 6 [default=true];
|
||||
}
|
||||
|
||||
message PutRequest {
|
||||
optional InternalHeader header = 11;
|
||||
|
||||
repeated EntityProto entity = 1;
|
||||
optional Transaction transaction = 2;
|
||||
repeated CompositeIndex composite_index = 3;
|
||||
|
||||
optional bool trusted = 4 [default = false];
|
||||
|
||||
optional bool force = 7 [default = false];
|
||||
|
||||
optional bool mark_changes = 8 [default = false];
|
||||
repeated Snapshot snapshot = 9;
|
||||
|
||||
enum AutoIdPolicy {
|
||||
CURRENT = 0;
|
||||
SEQUENTIAL = 1;
|
||||
}
|
||||
optional AutoIdPolicy auto_id_policy = 10 [default = CURRENT];
|
||||
}
|
||||
|
||||
message PutResponse {
|
||||
repeated Reference key = 1;
|
||||
optional Cost cost = 2;
|
||||
repeated int64 version = 3;
|
||||
}
|
||||
|
||||
message TouchRequest {
|
||||
optional InternalHeader header = 10;
|
||||
|
||||
repeated Reference key = 1;
|
||||
repeated CompositeIndex composite_index = 2;
|
||||
optional bool force = 3 [default = false];
|
||||
repeated Snapshot snapshot = 9;
|
||||
}
|
||||
|
||||
message TouchResponse {
|
||||
optional Cost cost = 1;
|
||||
}
|
||||
|
||||
message DeleteRequest {
|
||||
optional InternalHeader header = 10;
|
||||
|
||||
repeated Reference key = 6;
|
||||
optional Transaction transaction = 5;
|
||||
|
||||
optional bool trusted = 4 [default = false];
|
||||
|
||||
optional bool force = 7 [default = false];
|
||||
|
||||
optional bool mark_changes = 8 [default = false];
|
||||
repeated Snapshot snapshot = 9;
|
||||
}
|
||||
|
||||
message DeleteResponse {
|
||||
optional Cost cost = 1;
|
||||
repeated int64 version = 3;
|
||||
}
|
||||
|
||||
message NextRequest {
|
||||
optional InternalHeader header = 5;
|
||||
|
||||
required Cursor cursor = 1;
|
||||
optional int32 count = 2;
|
||||
|
||||
optional int32 offset = 4 [default = 0];
|
||||
|
||||
optional bool compile = 3 [default = false];
|
||||
}
|
||||
|
||||
message QueryResult {
|
||||
optional Cursor cursor = 1;
|
||||
|
||||
repeated EntityProto result = 2;
|
||||
|
||||
optional int32 skipped_results = 7;
|
||||
|
||||
required bool more_results = 3;
|
||||
|
||||
optional bool keys_only = 4;
|
||||
|
||||
optional bool index_only = 9;
|
||||
|
||||
optional bool small_ops = 10;
|
||||
|
||||
optional CompiledQuery compiled_query = 5;
|
||||
|
||||
optional CompiledCursor compiled_cursor = 6;
|
||||
|
||||
repeated CompositeIndex index = 8;
|
||||
|
||||
repeated int64 version = 11;
|
||||
}
|
||||
|
||||
message AllocateIdsRequest {
|
||||
optional InternalHeader header = 4;
|
||||
|
||||
optional Reference model_key = 1;
|
||||
|
||||
optional int64 size = 2;
|
||||
|
||||
optional int64 max = 3;
|
||||
|
||||
repeated Reference reserve = 5;
|
||||
}
|
||||
|
||||
message AllocateIdsResponse {
|
||||
required int64 start = 1;
|
||||
required int64 end = 2;
|
||||
optional Cost cost = 3;
|
||||
}
|
||||
|
||||
message CompositeIndices {
|
||||
repeated CompositeIndex index = 1;
|
||||
}
|
||||
|
||||
message AddActionsRequest {
|
||||
optional InternalHeader header = 3;
|
||||
|
||||
required Transaction transaction = 1;
|
||||
repeated Action action = 2;
|
||||
}
|
||||
|
||||
message AddActionsResponse {
|
||||
}
|
||||
|
||||
message BeginTransactionRequest {
|
||||
optional InternalHeader header = 3;
|
||||
|
||||
required string app = 1;
|
||||
optional bool allow_multiple_eg = 2 [default = false];
|
||||
optional string database_id = 4;
|
||||
|
||||
enum TransactionMode {
|
||||
UNKNOWN = 0;
|
||||
READ_ONLY = 1;
|
||||
READ_WRITE = 2;
|
||||
}
|
||||
optional TransactionMode mode = 5 [default = UNKNOWN];
|
||||
|
||||
optional Transaction previous_transaction = 7;
|
||||
}
|
||||
|
||||
message CommitResponse {
|
||||
optional Cost cost = 1;
|
||||
|
||||
repeated group Version = 3 {
|
||||
required Reference root_entity_key = 4;
|
||||
required int64 version = 5;
|
||||
}
|
||||
}
|
55
vendor/google.golang.org/appengine/internal/identity.go
generated
vendored
Normal file
55
vendor/google.golang.org/appengine/internal/identity.go
generated
vendored
Normal file
@ -0,0 +1,55 @@
|
||||
// Copyright 2011 Google Inc. All rights reserved.
|
||||
// Use of this source code is governed by the Apache 2.0
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package internal
|
||||
|
||||
import (
|
||||
"os"
|
||||
|
||||
netcontext "golang.org/x/net/context"
|
||||
)
|
||||
|
||||
var (
|
||||
// This is set to true in identity_classic.go, which is behind the appengine build tag.
|
||||
// The appengine build tag is set for the first generation runtimes (<= Go 1.9) but not
|
||||
// the second generation runtimes (>= Go 1.11), so this indicates whether we're on a
|
||||
// first-gen runtime. See IsStandard below for the second-gen check.
|
||||
appengineStandard bool
|
||||
|
||||
// This is set to true in identity_flex.go, which is behind the appenginevm build tag.
|
||||
appengineFlex bool
|
||||
)
|
||||
|
||||
// AppID is the implementation of the wrapper function of the same name in
|
||||
// ../identity.go. See that file for commentary.
|
||||
func AppID(c netcontext.Context) string {
|
||||
return appID(FullyQualifiedAppID(c))
|
||||
}
|
||||
|
||||
// IsStandard is the implementation of the wrapper function of the same name in
|
||||
// ../appengine.go. See that file for commentary.
|
||||
func IsStandard() bool {
|
||||
// appengineStandard will be true for first-gen runtimes (<= Go 1.9) but not
|
||||
// second-gen (>= Go 1.11).
|
||||
return appengineStandard || IsSecondGen()
|
||||
}
|
||||
|
||||
// IsStandard is the implementation of the wrapper function of the same name in
|
||||
// ../appengine.go. See that file for commentary.
|
||||
func IsSecondGen() bool {
|
||||
// Second-gen runtimes set $GAE_ENV so we use that to check if we're on a second-gen runtime.
|
||||
return os.Getenv("GAE_ENV") == "standard"
|
||||
}
|
||||
|
||||
// IsFlex is the implementation of the wrapper function of the same name in
|
||||
// ../appengine.go. See that file for commentary.
|
||||
func IsFlex() bool {
|
||||
return appengineFlex
|
||||
}
|
||||
|
||||
// IsAppEngine is the implementation of the wrapper function of the same name in
|
||||
// ../appengine.go. See that file for commentary.
|
||||
func IsAppEngine() bool {
|
||||
return IsStandard() || IsFlex()
|
||||
}
|
61
vendor/google.golang.org/appengine/internal/identity_classic.go
generated
vendored
Normal file
61
vendor/google.golang.org/appengine/internal/identity_classic.go
generated
vendored
Normal file
@ -0,0 +1,61 @@
|
||||
// Copyright 2015 Google Inc. All rights reserved.
|
||||
// Use of this source code is governed by the Apache 2.0
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build appengine
|
||||
|
||||
package internal
|
||||
|
||||
import (
|
||||
"appengine"
|
||||
|
||||
netcontext "golang.org/x/net/context"
|
||||
)
|
||||
|
||||
func init() {
|
||||
appengineStandard = true
|
||||
}
|
||||
|
||||
func DefaultVersionHostname(ctx netcontext.Context) string {
|
||||
c := fromContext(ctx)
|
||||
if c == nil {
|
||||
panic(errNotAppEngineContext)
|
||||
}
|
||||
return appengine.DefaultVersionHostname(c)
|
||||
}
|
||||
|
||||
func Datacenter(_ netcontext.Context) string { return appengine.Datacenter() }
|
||||
func ServerSoftware() string { return appengine.ServerSoftware() }
|
||||
func InstanceID() string { return appengine.InstanceID() }
|
||||
func IsDevAppServer() bool { return appengine.IsDevAppServer() }
|
||||
|
||||
func RequestID(ctx netcontext.Context) string {
|
||||
c := fromContext(ctx)
|
||||
if c == nil {
|
||||
panic(errNotAppEngineContext)
|
||||
}
|
||||
return appengine.RequestID(c)
|
||||
}
|
||||
|
||||
func ModuleName(ctx netcontext.Context) string {
|
||||
c := fromContext(ctx)
|
||||
if c == nil {
|
||||
panic(errNotAppEngineContext)
|
||||
}
|
||||
return appengine.ModuleName(c)
|
||||
}
|
||||
func VersionID(ctx netcontext.Context) string {
|
||||
c := fromContext(ctx)
|
||||
if c == nil {
|
||||
panic(errNotAppEngineContext)
|
||||
}
|
||||
return appengine.VersionID(c)
|
||||
}
|
||||
|
||||
func fullyQualifiedAppID(ctx netcontext.Context) string {
|
||||
c := fromContext(ctx)
|
||||
if c == nil {
|
||||
panic(errNotAppEngineContext)
|
||||
}
|
||||
return c.FullyQualifiedAppID()
|
||||
}
|
11
vendor/google.golang.org/appengine/internal/identity_flex.go
generated
vendored
Normal file
11
vendor/google.golang.org/appengine/internal/identity_flex.go
generated
vendored
Normal file
@ -0,0 +1,11 @@
|
||||
// Copyright 2018 Google LLC. All rights reserved.
|
||||
// Use of this source code is governed by the Apache 2.0
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build appenginevm
|
||||
|
||||
package internal
|
||||
|
||||
func init() {
|
||||
appengineFlex = true
|
||||
}
|
134
vendor/google.golang.org/appengine/internal/identity_vm.go
generated
vendored
Normal file
134
vendor/google.golang.org/appengine/internal/identity_vm.go
generated
vendored
Normal file
@ -0,0 +1,134 @@
|
||||
// Copyright 2011 Google Inc. All rights reserved.
|
||||
// Use of this source code is governed by the Apache 2.0
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build !appengine
|
||||
|
||||
package internal
|
||||
|
||||
import (
|
||||
"log"
|
||||
"net/http"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
netcontext "golang.org/x/net/context"
|
||||
)
|
||||
|
||||
// These functions are implementations of the wrapper functions
|
||||
// in ../appengine/identity.go. See that file for commentary.
|
||||
|
||||
const (
|
||||
hDefaultVersionHostname = "X-AppEngine-Default-Version-Hostname"
|
||||
hRequestLogId = "X-AppEngine-Request-Log-Id"
|
||||
hDatacenter = "X-AppEngine-Datacenter"
|
||||
)
|
||||
|
||||
func ctxHeaders(ctx netcontext.Context) http.Header {
|
||||
c := fromContext(ctx)
|
||||
if c == nil {
|
||||
return nil
|
||||
}
|
||||
return c.Request().Header
|
||||
}
|
||||
|
||||
func DefaultVersionHostname(ctx netcontext.Context) string {
|
||||
return ctxHeaders(ctx).Get(hDefaultVersionHostname)
|
||||
}
|
||||
|
||||
func RequestID(ctx netcontext.Context) string {
|
||||
return ctxHeaders(ctx).Get(hRequestLogId)
|
||||
}
|
||||
|
||||
func Datacenter(ctx netcontext.Context) string {
|
||||
if dc := ctxHeaders(ctx).Get(hDatacenter); dc != "" {
|
||||
return dc
|
||||
}
|
||||
// If the header isn't set, read zone from the metadata service.
|
||||
// It has the format projects/[NUMERIC_PROJECT_ID]/zones/[ZONE]
|
||||
zone, err := getMetadata("instance/zone")
|
||||
if err != nil {
|
||||
log.Printf("Datacenter: %v", err)
|
||||
return ""
|
||||
}
|
||||
parts := strings.Split(string(zone), "/")
|
||||
if len(parts) == 0 {
|
||||
return ""
|
||||
}
|
||||
return parts[len(parts)-1]
|
||||
}
|
||||
|
||||
func ServerSoftware() string {
|
||||
// TODO(dsymonds): Remove fallback when we've verified this.
|
||||
if s := os.Getenv("SERVER_SOFTWARE"); s != "" {
|
||||
return s
|
||||
}
|
||||
if s := os.Getenv("GAE_ENV"); s != "" {
|
||||
return s
|
||||
}
|
||||
return "Google App Engine/1.x.x"
|
||||
}
|
||||
|
||||
// TODO(dsymonds): Remove the metadata fetches.
|
||||
|
||||
func ModuleName(_ netcontext.Context) string {
|
||||
if s := os.Getenv("GAE_MODULE_NAME"); s != "" {
|
||||
return s
|
||||
}
|
||||
if s := os.Getenv("GAE_SERVICE"); s != "" {
|
||||
return s
|
||||
}
|
||||
return string(mustGetMetadata("instance/attributes/gae_backend_name"))
|
||||
}
|
||||
|
||||
func VersionID(_ netcontext.Context) string {
|
||||
if s1, s2 := os.Getenv("GAE_MODULE_VERSION"), os.Getenv("GAE_MINOR_VERSION"); s1 != "" && s2 != "" {
|
||||
return s1 + "." + s2
|
||||
}
|
||||
if s1, s2 := os.Getenv("GAE_VERSION"), os.Getenv("GAE_DEPLOYMENT_ID"); s1 != "" && s2 != "" {
|
||||
return s1 + "." + s2
|
||||
}
|
||||
return string(mustGetMetadata("instance/attributes/gae_backend_version")) + "." + string(mustGetMetadata("instance/attributes/gae_backend_minor_version"))
|
||||
}
|
||||
|
||||
func InstanceID() string {
|
||||
if s := os.Getenv("GAE_MODULE_INSTANCE"); s != "" {
|
||||
return s
|
||||
}
|
||||
if s := os.Getenv("GAE_INSTANCE"); s != "" {
|
||||
return s
|
||||
}
|
||||
return string(mustGetMetadata("instance/attributes/gae_backend_instance"))
|
||||
}
|
||||
|
||||
func partitionlessAppID() string {
|
||||
// gae_project has everything except the partition prefix.
|
||||
if appID := os.Getenv("GAE_LONG_APP_ID"); appID != "" {
|
||||
return appID
|
||||
}
|
||||
if project := os.Getenv("GOOGLE_CLOUD_PROJECT"); project != "" {
|
||||
return project
|
||||
}
|
||||
return string(mustGetMetadata("instance/attributes/gae_project"))
|
||||
}
|
||||
|
||||
func fullyQualifiedAppID(_ netcontext.Context) string {
|
||||
if s := os.Getenv("GAE_APPLICATION"); s != "" {
|
||||
return s
|
||||
}
|
||||
appID := partitionlessAppID()
|
||||
|
||||
part := os.Getenv("GAE_PARTITION")
|
||||
if part == "" {
|
||||
part = string(mustGetMetadata("instance/attributes/gae_partition"))
|
||||
}
|
||||
|
||||
if part != "" {
|
||||
appID = part + "~" + appID
|
||||
}
|
||||
return appID
|
||||
}
|
||||
|
||||
func IsDevAppServer() bool {
|
||||
return os.Getenv("RUN_WITH_DEVAPPSERVER") != ""
|
||||
}
|
110
vendor/google.golang.org/appengine/internal/internal.go
generated
vendored
Normal file
110
vendor/google.golang.org/appengine/internal/internal.go
generated
vendored
Normal file
@ -0,0 +1,110 @@
|
||||
// Copyright 2011 Google Inc. All rights reserved.
|
||||
// Use of this source code is governed by the Apache 2.0
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package internal provides support for package appengine.
|
||||
//
|
||||
// Programs should not use this package directly. Its API is not stable.
|
||||
// Use packages appengine and appengine/* instead.
|
||||
package internal
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
|
||||
remotepb "google.golang.org/appengine/internal/remote_api"
|
||||
)
|
||||
|
||||
// errorCodeMaps is a map of service name to the error code map for the service.
|
||||
var errorCodeMaps = make(map[string]map[int32]string)
|
||||
|
||||
// RegisterErrorCodeMap is called from API implementations to register their
|
||||
// error code map. This should only be called from init functions.
|
||||
func RegisterErrorCodeMap(service string, m map[int32]string) {
|
||||
errorCodeMaps[service] = m
|
||||
}
|
||||
|
||||
type timeoutCodeKey struct {
|
||||
service string
|
||||
code int32
|
||||
}
|
||||
|
||||
// timeoutCodes is the set of service+code pairs that represent timeouts.
|
||||
var timeoutCodes = make(map[timeoutCodeKey]bool)
|
||||
|
||||
func RegisterTimeoutErrorCode(service string, code int32) {
|
||||
timeoutCodes[timeoutCodeKey{service, code}] = true
|
||||
}
|
||||
|
||||
// APIError is the type returned by appengine.Context's Call method
|
||||
// when an API call fails in an API-specific way. This may be, for instance,
|
||||
// a taskqueue API call failing with TaskQueueServiceError::UNKNOWN_QUEUE.
|
||||
type APIError struct {
|
||||
Service string
|
||||
Detail string
|
||||
Code int32 // API-specific error code
|
||||
}
|
||||
|
||||
func (e *APIError) Error() string {
|
||||
if e.Code == 0 {
|
||||
if e.Detail == "" {
|
||||
return "APIError <empty>"
|
||||
}
|
||||
return e.Detail
|
||||
}
|
||||
s := fmt.Sprintf("API error %d", e.Code)
|
||||
if m, ok := errorCodeMaps[e.Service]; ok {
|
||||
s += " (" + e.Service + ": " + m[e.Code] + ")"
|
||||
} else {
|
||||
// Shouldn't happen, but provide a bit more detail if it does.
|
||||
s = e.Service + " " + s
|
||||
}
|
||||
if e.Detail != "" {
|
||||
s += ": " + e.Detail
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
func (e *APIError) IsTimeout() bool {
|
||||
return timeoutCodes[timeoutCodeKey{e.Service, e.Code}]
|
||||
}
|
||||
|
||||
// CallError is the type returned by appengine.Context's Call method when an
|
||||
// API call fails in a generic way, such as RpcError::CAPABILITY_DISABLED.
|
||||
type CallError struct {
|
||||
Detail string
|
||||
Code int32
|
||||
// TODO: Remove this if we get a distinguishable error code.
|
||||
Timeout bool
|
||||
}
|
||||
|
||||
func (e *CallError) Error() string {
|
||||
var msg string
|
||||
switch remotepb.RpcError_ErrorCode(e.Code) {
|
||||
case remotepb.RpcError_UNKNOWN:
|
||||
return e.Detail
|
||||
case remotepb.RpcError_OVER_QUOTA:
|
||||
msg = "Over quota"
|
||||
case remotepb.RpcError_CAPABILITY_DISABLED:
|
||||
msg = "Capability disabled"
|
||||
case remotepb.RpcError_CANCELLED:
|
||||
msg = "Canceled"
|
||||
default:
|
||||
msg = fmt.Sprintf("Call error %d", e.Code)
|
||||
}
|
||||
s := msg + ": " + e.Detail
|
||||
if e.Timeout {
|
||||
s += " (timeout)"
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
func (e *CallError) IsTimeout() bool {
|
||||
return e.Timeout
|
||||
}
|
||||
|
||||
// NamespaceMods is a map from API service to a function that will mutate an RPC request to attach a namespace.
|
||||
// The function should be prepared to be called on the same message more than once; it should only modify the
|
||||
// RPC request the first time.
|
||||
var NamespaceMods = make(map[string]func(m proto.Message, namespace string))
|
1313
vendor/google.golang.org/appengine/internal/log/log_service.pb.go
generated
vendored
Normal file
1313
vendor/google.golang.org/appengine/internal/log/log_service.pb.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
150
vendor/google.golang.org/appengine/internal/log/log_service.proto
generated
vendored
Normal file
150
vendor/google.golang.org/appengine/internal/log/log_service.proto
generated
vendored
Normal file
@ -0,0 +1,150 @@
|
||||
syntax = "proto2";
|
||||
option go_package = "log";
|
||||
|
||||
package appengine;
|
||||
|
||||
message LogServiceError {
|
||||
enum ErrorCode {
|
||||
OK = 0;
|
||||
INVALID_REQUEST = 1;
|
||||
STORAGE_ERROR = 2;
|
||||
}
|
||||
}
|
||||
|
||||
message UserAppLogLine {
|
||||
required int64 timestamp_usec = 1;
|
||||
required int64 level = 2;
|
||||
required string message = 3;
|
||||
}
|
||||
|
||||
message UserAppLogGroup {
|
||||
repeated UserAppLogLine log_line = 2;
|
||||
}
|
||||
|
||||
message FlushRequest {
|
||||
optional bytes logs = 1;
|
||||
}
|
||||
|
||||
message SetStatusRequest {
|
||||
required string status = 1;
|
||||
}
|
||||
|
||||
|
||||
message LogOffset {
|
||||
optional bytes request_id = 1;
|
||||
}
|
||||
|
||||
message LogLine {
|
||||
required int64 time = 1;
|
||||
required int32 level = 2;
|
||||
required string log_message = 3;
|
||||
}
|
||||
|
||||
message RequestLog {
|
||||
required string app_id = 1;
|
||||
optional string module_id = 37 [default="default"];
|
||||
required string version_id = 2;
|
||||
required bytes request_id = 3;
|
||||
optional LogOffset offset = 35;
|
||||
required string ip = 4;
|
||||
optional string nickname = 5;
|
||||
required int64 start_time = 6;
|
||||
required int64 end_time = 7;
|
||||
required int64 latency = 8;
|
||||
required int64 mcycles = 9;
|
||||
required string method = 10;
|
||||
required string resource = 11;
|
||||
required string http_version = 12;
|
||||
required int32 status = 13;
|
||||
required int64 response_size = 14;
|
||||
optional string referrer = 15;
|
||||
optional string user_agent = 16;
|
||||
required string url_map_entry = 17;
|
||||
required string combined = 18;
|
||||
optional int64 api_mcycles = 19;
|
||||
optional string host = 20;
|
||||
optional double cost = 21;
|
||||
|
||||
optional string task_queue_name = 22;
|
||||
optional string task_name = 23;
|
||||
|
||||
optional bool was_loading_request = 24;
|
||||
optional int64 pending_time = 25;
|
||||
optional int32 replica_index = 26 [default = -1];
|
||||
optional bool finished = 27 [default = true];
|
||||
optional bytes clone_key = 28;
|
||||
|
||||
repeated LogLine line = 29;
|
||||
|
||||
optional bool lines_incomplete = 36;
|
||||
optional bytes app_engine_release = 38;
|
||||
|
||||
optional int32 exit_reason = 30;
|
||||
optional bool was_throttled_for_time = 31;
|
||||
optional bool was_throttled_for_requests = 32;
|
||||
optional int64 throttled_time = 33;
|
||||
|
||||
optional bytes server_name = 34;
|
||||
}
|
||||
|
||||
message LogModuleVersion {
|
||||
optional string module_id = 1 [default="default"];
|
||||
optional string version_id = 2;
|
||||
}
|
||||
|
||||
message LogReadRequest {
|
||||
required string app_id = 1;
|
||||
repeated string version_id = 2;
|
||||
repeated LogModuleVersion module_version = 19;
|
||||
|
||||
optional int64 start_time = 3;
|
||||
optional int64 end_time = 4;
|
||||
optional LogOffset offset = 5;
|
||||
repeated bytes request_id = 6;
|
||||
|
||||
optional int32 minimum_log_level = 7;
|
||||
optional bool include_incomplete = 8;
|
||||
optional int64 count = 9;
|
||||
|
||||
optional string combined_log_regex = 14;
|
||||
optional string host_regex = 15;
|
||||
optional int32 replica_index = 16;
|
||||
|
||||
optional bool include_app_logs = 10;
|
||||
optional int32 app_logs_per_request = 17;
|
||||
optional bool include_host = 11;
|
||||
optional bool include_all = 12;
|
||||
optional bool cache_iterator = 13;
|
||||
optional int32 num_shards = 18;
|
||||
}
|
||||
|
||||
message LogReadResponse {
|
||||
repeated RequestLog log = 1;
|
||||
optional LogOffset offset = 2;
|
||||
optional int64 last_end_time = 3;
|
||||
}
|
||||
|
||||
message LogUsageRecord {
|
||||
optional string version_id = 1;
|
||||
optional int32 start_time = 2;
|
||||
optional int32 end_time = 3;
|
||||
optional int64 count = 4;
|
||||
optional int64 total_size = 5;
|
||||
optional int32 records = 6;
|
||||
}
|
||||
|
||||
message LogUsageRequest {
|
||||
required string app_id = 1;
|
||||
repeated string version_id = 2;
|
||||
optional int32 start_time = 3;
|
||||
optional int32 end_time = 4;
|
||||
optional uint32 resolution_hours = 5 [default = 1];
|
||||
optional bool combine_versions = 6;
|
||||
optional int32 usage_version = 7;
|
||||
optional bool versions_only = 8;
|
||||
}
|
||||
|
||||
message LogUsageResponse {
|
||||
repeated LogUsageRecord usage = 1;
|
||||
optional LogUsageRecord summary = 2;
|
||||
}
|
16
vendor/google.golang.org/appengine/internal/main.go
generated
vendored
Normal file
16
vendor/google.golang.org/appengine/internal/main.go
generated
vendored
Normal file
@ -0,0 +1,16 @@
|
||||
// Copyright 2011 Google Inc. All rights reserved.
|
||||
// Use of this source code is governed by the Apache 2.0
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build appengine
|
||||
|
||||
package internal
|
||||
|
||||
import (
|
||||
"appengine_internal"
|
||||
)
|
||||
|
||||
func Main() {
|
||||
MainPath = ""
|
||||
appengine_internal.Main()
|
||||
}
|
7
vendor/google.golang.org/appengine/internal/main_common.go
generated
vendored
Normal file
7
vendor/google.golang.org/appengine/internal/main_common.go
generated
vendored
Normal file
@ -0,0 +1,7 @@
|
||||
package internal
|
||||
|
||||
// MainPath stores the file path of the main package. On App Engine Standard
|
||||
// using Go version 1.9 and below, this will be unset. On App Engine Flex and
|
||||
// App Engine Standard second-gen (Go 1.11 and above), this will be the
|
||||
// filepath to package main.
|
||||
var MainPath string
|
69
vendor/google.golang.org/appengine/internal/main_vm.go
generated
vendored
Normal file
69
vendor/google.golang.org/appengine/internal/main_vm.go
generated
vendored
Normal file
@ -0,0 +1,69 @@
|
||||
// Copyright 2011 Google Inc. All rights reserved.
|
||||
// Use of this source code is governed by the Apache 2.0
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build !appengine
|
||||
|
||||
package internal
|
||||
|
||||
import (
|
||||
"io"
|
||||
"log"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
)
|
||||
|
||||
func Main() {
|
||||
MainPath = filepath.Dir(findMainPath())
|
||||
installHealthChecker(http.DefaultServeMux)
|
||||
|
||||
port := "8080"
|
||||
if s := os.Getenv("PORT"); s != "" {
|
||||
port = s
|
||||
}
|
||||
|
||||
host := ""
|
||||
if IsDevAppServer() {
|
||||
host = "127.0.0.1"
|
||||
}
|
||||
if err := http.ListenAndServe(host+":"+port, http.HandlerFunc(handleHTTP)); err != nil {
|
||||
log.Fatalf("http.ListenAndServe: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Find the path to package main by looking at the root Caller.
|
||||
func findMainPath() string {
|
||||
pc := make([]uintptr, 100)
|
||||
n := runtime.Callers(2, pc)
|
||||
frames := runtime.CallersFrames(pc[:n])
|
||||
for {
|
||||
frame, more := frames.Next()
|
||||
// Tests won't have package main, instead they have testing.tRunner
|
||||
if frame.Function == "main.main" || frame.Function == "testing.tRunner" {
|
||||
return frame.File
|
||||
}
|
||||
if !more {
|
||||
break
|
||||
}
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func installHealthChecker(mux *http.ServeMux) {
|
||||
// If no health check handler has been installed by this point, add a trivial one.
|
||||
const healthPath = "/_ah/health"
|
||||
hreq := &http.Request{
|
||||
Method: "GET",
|
||||
URL: &url.URL{
|
||||
Path: healthPath,
|
||||
},
|
||||
}
|
||||
if _, pat := mux.Handler(hreq); pat != healthPath {
|
||||
mux.HandleFunc(healthPath, func(w http.ResponseWriter, r *http.Request) {
|
||||
io.WriteString(w, "ok")
|
||||
})
|
||||
}
|
||||
}
|
60
vendor/google.golang.org/appengine/internal/metadata.go
generated
vendored
Normal file
60
vendor/google.golang.org/appengine/internal/metadata.go
generated
vendored
Normal file
@ -0,0 +1,60 @@
|
||||
// Copyright 2014 Google Inc. All rights reserved.
|
||||
// Use of this source code is governed by the Apache 2.0
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package internal
|
||||
|
||||
// This file has code for accessing metadata.
|
||||
//
|
||||
// References:
|
||||
// https://cloud.google.com/compute/docs/metadata
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/url"
|
||||
)
|
||||
|
||||
const (
|
||||
metadataHost = "metadata"
|
||||
metadataPath = "/computeMetadata/v1/"
|
||||
)
|
||||
|
||||
var (
|
||||
metadataRequestHeaders = http.Header{
|
||||
"Metadata-Flavor": []string{"Google"},
|
||||
}
|
||||
)
|
||||
|
||||
// TODO(dsymonds): Do we need to support default values, like Python?
|
||||
func mustGetMetadata(key string) []byte {
|
||||
b, err := getMetadata(key)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("Metadata fetch failed for '%s': %v", key, err))
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
func getMetadata(key string) ([]byte, error) {
|
||||
// TODO(dsymonds): May need to use url.Parse to support keys with query args.
|
||||
req := &http.Request{
|
||||
Method: "GET",
|
||||
URL: &url.URL{
|
||||
Scheme: "http",
|
||||
Host: metadataHost,
|
||||
Path: metadataPath + key,
|
||||
},
|
||||
Header: metadataRequestHeaders,
|
||||
Host: metadataHost,
|
||||
}
|
||||
resp, err := http.DefaultClient.Do(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
if resp.StatusCode != 200 {
|
||||
return nil, fmt.Errorf("metadata server returned HTTP %d", resp.StatusCode)
|
||||
}
|
||||
return ioutil.ReadAll(resp.Body)
|
||||
}
|
56
vendor/google.golang.org/appengine/internal/net.go
generated
vendored
Normal file
56
vendor/google.golang.org/appengine/internal/net.go
generated
vendored
Normal file
@ -0,0 +1,56 @@
|
||||
// Copyright 2014 Google Inc. All rights reserved.
|
||||
// Use of this source code is governed by the Apache 2.0
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package internal
|
||||
|
||||
// This file implements a network dialer that limits the number of concurrent connections.
|
||||
// It is only used for API calls.
|
||||
|
||||
import (
|
||||
"log"
|
||||
"net"
|
||||
"runtime"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
var limitSem = make(chan int, 100) // TODO(dsymonds): Use environment variable.
|
||||
|
||||
func limitRelease() {
|
||||
// non-blocking
|
||||
select {
|
||||
case <-limitSem:
|
||||
default:
|
||||
// This should not normally happen.
|
||||
log.Print("appengine: unbalanced limitSem release!")
|
||||
}
|
||||
}
|
||||
|
||||
func limitDial(network, addr string) (net.Conn, error) {
|
||||
limitSem <- 1
|
||||
|
||||
// Dial with a timeout in case the API host is MIA.
|
||||
// The connection should normally be very fast.
|
||||
conn, err := net.DialTimeout(network, addr, 10*time.Second)
|
||||
if err != nil {
|
||||
limitRelease()
|
||||
return nil, err
|
||||
}
|
||||
lc := &limitConn{Conn: conn}
|
||||
runtime.SetFinalizer(lc, (*limitConn).Close) // shouldn't usually be required
|
||||
return lc, nil
|
||||
}
|
||||
|
||||
type limitConn struct {
|
||||
close sync.Once
|
||||
net.Conn
|
||||
}
|
||||
|
||||
func (lc *limitConn) Close() error {
|
||||
defer lc.close.Do(func() {
|
||||
limitRelease()
|
||||
runtime.SetFinalizer(lc, nil)
|
||||
})
|
||||
return lc.Conn.Close()
|
||||
}
|
40
vendor/google.golang.org/appengine/internal/regen.sh
generated
vendored
Normal file
40
vendor/google.golang.org/appengine/internal/regen.sh
generated
vendored
Normal file
@ -0,0 +1,40 @@
|
||||
#!/bin/bash -e
|
||||
#
|
||||
# This script rebuilds the generated code for the protocol buffers.
|
||||
# To run this you will need protoc and goprotobuf installed;
|
||||
# see https://github.com/golang/protobuf for instructions.
|
||||
|
||||
PKG=google.golang.org/appengine
|
||||
|
||||
function die() {
|
||||
echo 1>&2 $*
|
||||
exit 1
|
||||
}
|
||||
|
||||
# Sanity check that the right tools are accessible.
|
||||
for tool in go protoc protoc-gen-go; do
|
||||
q=$(which $tool) || die "didn't find $tool"
|
||||
echo 1>&2 "$tool: $q"
|
||||
done
|
||||
|
||||
echo -n 1>&2 "finding package dir... "
|
||||
pkgdir=$(go list -f '{{.Dir}}' $PKG)
|
||||
echo 1>&2 $pkgdir
|
||||
base=$(echo $pkgdir | sed "s,/$PKG\$,,")
|
||||
echo 1>&2 "base: $base"
|
||||
cd $base
|
||||
|
||||
# Run protoc once per package.
|
||||
for dir in $(find $PKG/internal -name '*.proto' | xargs dirname | sort | uniq); do
|
||||
echo 1>&2 "* $dir"
|
||||
protoc --go_out=. $dir/*.proto
|
||||
done
|
||||
|
||||
for f in $(find $PKG/internal -name '*.pb.go'); do
|
||||
# Remove proto.RegisterEnum calls.
|
||||
# These cause duplicate registration panics when these packages
|
||||
# are used on classic App Engine. proto.RegisterEnum only affects
|
||||
# parsing the text format; we don't care about that.
|
||||
# https://code.google.com/p/googleappengine/issues/detail?id=11670#c17
|
||||
sed -i '/proto.RegisterEnum/d' $f
|
||||
done
|
361
vendor/google.golang.org/appengine/internal/remote_api/remote_api.pb.go
generated
vendored
Normal file
361
vendor/google.golang.org/appengine/internal/remote_api/remote_api.pb.go
generated
vendored
Normal file
@ -0,0 +1,361 @@
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// source: google.golang.org/appengine/internal/remote_api/remote_api.proto
|
||||
|
||||
package remote_api
|
||||
|
||||
import proto "github.com/golang/protobuf/proto"
|
||||
import fmt "fmt"
|
||||
import math "math"
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ = proto.Marshal
|
||||
var _ = fmt.Errorf
|
||||
var _ = math.Inf
|
||||
|
||||
// This is a compile-time assertion to ensure that this generated file
|
||||
// is compatible with the proto package it is being compiled against.
|
||||
// A compilation error at this line likely means your copy of the
|
||||
// proto package needs to be updated.
|
||||
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
|
||||
|
||||
type RpcError_ErrorCode int32
|
||||
|
||||
const (
|
||||
RpcError_UNKNOWN RpcError_ErrorCode = 0
|
||||
RpcError_CALL_NOT_FOUND RpcError_ErrorCode = 1
|
||||
RpcError_PARSE_ERROR RpcError_ErrorCode = 2
|
||||
RpcError_SECURITY_VIOLATION RpcError_ErrorCode = 3
|
||||
RpcError_OVER_QUOTA RpcError_ErrorCode = 4
|
||||
RpcError_REQUEST_TOO_LARGE RpcError_ErrorCode = 5
|
||||
RpcError_CAPABILITY_DISABLED RpcError_ErrorCode = 6
|
||||
RpcError_FEATURE_DISABLED RpcError_ErrorCode = 7
|
||||
RpcError_BAD_REQUEST RpcError_ErrorCode = 8
|
||||
RpcError_RESPONSE_TOO_LARGE RpcError_ErrorCode = 9
|
||||
RpcError_CANCELLED RpcError_ErrorCode = 10
|
||||
RpcError_REPLAY_ERROR RpcError_ErrorCode = 11
|
||||
RpcError_DEADLINE_EXCEEDED RpcError_ErrorCode = 12
|
||||
)
|
||||
|
||||
var RpcError_ErrorCode_name = map[int32]string{
|
||||
0: "UNKNOWN",
|
||||
1: "CALL_NOT_FOUND",
|
||||
2: "PARSE_ERROR",
|
||||
3: "SECURITY_VIOLATION",
|
||||
4: "OVER_QUOTA",
|
||||
5: "REQUEST_TOO_LARGE",
|
||||
6: "CAPABILITY_DISABLED",
|
||||
7: "FEATURE_DISABLED",
|
||||
8: "BAD_REQUEST",
|
||||
9: "RESPONSE_TOO_LARGE",
|
||||
10: "CANCELLED",
|
||||
11: "REPLAY_ERROR",
|
||||
12: "DEADLINE_EXCEEDED",
|
||||
}
|
||||
var RpcError_ErrorCode_value = map[string]int32{
|
||||
"UNKNOWN": 0,
|
||||
"CALL_NOT_FOUND": 1,
|
||||
"PARSE_ERROR": 2,
|
||||
"SECURITY_VIOLATION": 3,
|
||||
"OVER_QUOTA": 4,
|
||||
"REQUEST_TOO_LARGE": 5,
|
||||
"CAPABILITY_DISABLED": 6,
|
||||
"FEATURE_DISABLED": 7,
|
||||
"BAD_REQUEST": 8,
|
||||
"RESPONSE_TOO_LARGE": 9,
|
||||
"CANCELLED": 10,
|
||||
"REPLAY_ERROR": 11,
|
||||
"DEADLINE_EXCEEDED": 12,
|
||||
}
|
||||
|
||||
func (x RpcError_ErrorCode) Enum() *RpcError_ErrorCode {
|
||||
p := new(RpcError_ErrorCode)
|
||||
*p = x
|
||||
return p
|
||||
}
|
||||
func (x RpcError_ErrorCode) String() string {
|
||||
return proto.EnumName(RpcError_ErrorCode_name, int32(x))
|
||||
}
|
||||
func (x *RpcError_ErrorCode) UnmarshalJSON(data []byte) error {
|
||||
value, err := proto.UnmarshalJSONEnum(RpcError_ErrorCode_value, data, "RpcError_ErrorCode")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*x = RpcError_ErrorCode(value)
|
||||
return nil
|
||||
}
|
||||
func (RpcError_ErrorCode) EnumDescriptor() ([]byte, []int) {
|
||||
return fileDescriptor_remote_api_1978114ec33a273d, []int{2, 0}
|
||||
}
|
||||
|
||||
type Request struct {
|
||||
ServiceName *string `protobuf:"bytes,2,req,name=service_name,json=serviceName" json:"service_name,omitempty"`
|
||||
Method *string `protobuf:"bytes,3,req,name=method" json:"method,omitempty"`
|
||||
Request []byte `protobuf:"bytes,4,req,name=request" json:"request,omitempty"`
|
||||
RequestId *string `protobuf:"bytes,5,opt,name=request_id,json=requestId" json:"request_id,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *Request) Reset() { *m = Request{} }
|
||||
func (m *Request) String() string { return proto.CompactTextString(m) }
|
||||
func (*Request) ProtoMessage() {}
|
||||
func (*Request) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_remote_api_1978114ec33a273d, []int{0}
|
||||
}
|
||||
func (m *Request) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_Request.Unmarshal(m, b)
|
||||
}
|
||||
func (m *Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_Request.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (dst *Request) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_Request.Merge(dst, src)
|
||||
}
|
||||
func (m *Request) XXX_Size() int {
|
||||
return xxx_messageInfo_Request.Size(m)
|
||||
}
|
||||
func (m *Request) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_Request.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_Request proto.InternalMessageInfo
|
||||
|
||||
func (m *Request) GetServiceName() string {
|
||||
if m != nil && m.ServiceName != nil {
|
||||
return *m.ServiceName
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *Request) GetMethod() string {
|
||||
if m != nil && m.Method != nil {
|
||||
return *m.Method
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *Request) GetRequest() []byte {
|
||||
if m != nil {
|
||||
return m.Request
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *Request) GetRequestId() string {
|
||||
if m != nil && m.RequestId != nil {
|
||||
return *m.RequestId
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
type ApplicationError struct {
|
||||
Code *int32 `protobuf:"varint,1,req,name=code" json:"code,omitempty"`
|
||||
Detail *string `protobuf:"bytes,2,req,name=detail" json:"detail,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *ApplicationError) Reset() { *m = ApplicationError{} }
|
||||
func (m *ApplicationError) String() string { return proto.CompactTextString(m) }
|
||||
func (*ApplicationError) ProtoMessage() {}
|
||||
func (*ApplicationError) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_remote_api_1978114ec33a273d, []int{1}
|
||||
}
|
||||
func (m *ApplicationError) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_ApplicationError.Unmarshal(m, b)
|
||||
}
|
||||
func (m *ApplicationError) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_ApplicationError.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (dst *ApplicationError) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_ApplicationError.Merge(dst, src)
|
||||
}
|
||||
func (m *ApplicationError) XXX_Size() int {
|
||||
return xxx_messageInfo_ApplicationError.Size(m)
|
||||
}
|
||||
func (m *ApplicationError) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_ApplicationError.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_ApplicationError proto.InternalMessageInfo
|
||||
|
||||
func (m *ApplicationError) GetCode() int32 {
|
||||
if m != nil && m.Code != nil {
|
||||
return *m.Code
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (m *ApplicationError) GetDetail() string {
|
||||
if m != nil && m.Detail != nil {
|
||||
return *m.Detail
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
type RpcError struct {
|
||||
Code *int32 `protobuf:"varint,1,req,name=code" json:"code,omitempty"`
|
||||
Detail *string `protobuf:"bytes,2,opt,name=detail" json:"detail,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *RpcError) Reset() { *m = RpcError{} }
|
||||
func (m *RpcError) String() string { return proto.CompactTextString(m) }
|
||||
func (*RpcError) ProtoMessage() {}
|
||||
func (*RpcError) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_remote_api_1978114ec33a273d, []int{2}
|
||||
}
|
||||
func (m *RpcError) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_RpcError.Unmarshal(m, b)
|
||||
}
|
||||
func (m *RpcError) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_RpcError.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (dst *RpcError) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_RpcError.Merge(dst, src)
|
||||
}
|
||||
func (m *RpcError) XXX_Size() int {
|
||||
return xxx_messageInfo_RpcError.Size(m)
|
||||
}
|
||||
func (m *RpcError) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_RpcError.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_RpcError proto.InternalMessageInfo
|
||||
|
||||
func (m *RpcError) GetCode() int32 {
|
||||
if m != nil && m.Code != nil {
|
||||
return *m.Code
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (m *RpcError) GetDetail() string {
|
||||
if m != nil && m.Detail != nil {
|
||||
return *m.Detail
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
type Response struct {
|
||||
Response []byte `protobuf:"bytes,1,opt,name=response" json:"response,omitempty"`
|
||||
Exception []byte `protobuf:"bytes,2,opt,name=exception" json:"exception,omitempty"`
|
||||
ApplicationError *ApplicationError `protobuf:"bytes,3,opt,name=application_error,json=applicationError" json:"application_error,omitempty"`
|
||||
JavaException []byte `protobuf:"bytes,4,opt,name=java_exception,json=javaException" json:"java_exception,omitempty"`
|
||||
RpcError *RpcError `protobuf:"bytes,5,opt,name=rpc_error,json=rpcError" json:"rpc_error,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *Response) Reset() { *m = Response{} }
|
||||
func (m *Response) String() string { return proto.CompactTextString(m) }
|
||||
func (*Response) ProtoMessage() {}
|
||||
func (*Response) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_remote_api_1978114ec33a273d, []int{3}
|
||||
}
|
||||
func (m *Response) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_Response.Unmarshal(m, b)
|
||||
}
|
||||
func (m *Response) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_Response.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (dst *Response) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_Response.Merge(dst, src)
|
||||
}
|
||||
func (m *Response) XXX_Size() int {
|
||||
return xxx_messageInfo_Response.Size(m)
|
||||
}
|
||||
func (m *Response) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_Response.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_Response proto.InternalMessageInfo
|
||||
|
||||
func (m *Response) GetResponse() []byte {
|
||||
if m != nil {
|
||||
return m.Response
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *Response) GetException() []byte {
|
||||
if m != nil {
|
||||
return m.Exception
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *Response) GetApplicationError() *ApplicationError {
|
||||
if m != nil {
|
||||
return m.ApplicationError
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *Response) GetJavaException() []byte {
|
||||
if m != nil {
|
||||
return m.JavaException
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *Response) GetRpcError() *RpcError {
|
||||
if m != nil {
|
||||
return m.RpcError
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
proto.RegisterType((*Request)(nil), "remote_api.Request")
|
||||
proto.RegisterType((*ApplicationError)(nil), "remote_api.ApplicationError")
|
||||
proto.RegisterType((*RpcError)(nil), "remote_api.RpcError")
|
||||
proto.RegisterType((*Response)(nil), "remote_api.Response")
|
||||
}
|
||||
|
||||
func init() {
|
||||
proto.RegisterFile("google.golang.org/appengine/internal/remote_api/remote_api.proto", fileDescriptor_remote_api_1978114ec33a273d)
|
||||
}
|
||||
|
||||
var fileDescriptor_remote_api_1978114ec33a273d = []byte{
|
||||
// 531 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x93, 0x51, 0x6e, 0xd3, 0x40,
|
||||
0x10, 0x86, 0xb1, 0x9b, 0x34, 0xf1, 0xc4, 0x2d, 0xdb, 0xa5, 0x14, 0x0b, 0x15, 0x29, 0x44, 0x42,
|
||||
0xca, 0x53, 0x2a, 0x38, 0x00, 0x62, 0x63, 0x6f, 0x91, 0x85, 0x65, 0xa7, 0x6b, 0xbb, 0x50, 0x5e,
|
||||
0x56, 0x2b, 0x67, 0x65, 0x8c, 0x12, 0xaf, 0xd9, 0x98, 0x8a, 0x17, 0x6e, 0xc0, 0xb5, 0x38, 0x0c,
|
||||
0xb7, 0x40, 0x36, 0x6e, 0x63, 0xf5, 0x89, 0xb7, 0x7f, 0x7e, 0x7b, 0xe6, 0x1b, 0xcd, 0xcc, 0xc2,
|
||||
0xbb, 0x5c, 0xa9, 0x7c, 0x23, 0x17, 0xb9, 0xda, 0x88, 0x32, 0x5f, 0x28, 0x9d, 0x5f, 0x88, 0xaa,
|
||||
0x92, 0x65, 0x5e, 0x94, 0xf2, 0xa2, 0x28, 0x6b, 0xa9, 0x4b, 0xb1, 0xb9, 0xd0, 0x72, 0xab, 0x6a,
|
||||
0xc9, 0x45, 0x55, 0xf4, 0xe4, 0xa2, 0xd2, 0xaa, 0x56, 0x18, 0xf6, 0xce, 0xec, 0x27, 0x8c, 0x98,
|
||||
0xfc, 0xf6, 0x5d, 0xee, 0x6a, 0xfc, 0x12, 0xec, 0x9d, 0xd4, 0xb7, 0x45, 0x26, 0x79, 0x29, 0xb6,
|
||||
0xd2, 0x31, 0xa7, 0xe6, 0xdc, 0x62, 0x93, 0xce, 0x0b, 0xc5, 0x56, 0xe2, 0x33, 0x38, 0xdc, 0xca,
|
||||
0xfa, 0x8b, 0x5a, 0x3b, 0x07, 0xed, 0xc7, 0x2e, 0xc2, 0x0e, 0x8c, 0xf4, 0xbf, 0x2a, 0xce, 0x60,
|
||||
0x6a, 0xce, 0x6d, 0x76, 0x17, 0xe2, 0x17, 0x00, 0x9d, 0xe4, 0xc5, 0xda, 0x19, 0x4e, 0x8d, 0xb9,
|
||||
0xc5, 0xac, 0xce, 0xf1, 0xd7, 0xb3, 0xb7, 0x80, 0x48, 0x55, 0x6d, 0x8a, 0x4c, 0xd4, 0x85, 0x2a,
|
||||
0xa9, 0xd6, 0x4a, 0x63, 0x0c, 0x83, 0x4c, 0xad, 0xa5, 0x63, 0x4c, 0xcd, 0xf9, 0x90, 0xb5, 0xba,
|
||||
0x01, 0xaf, 0x65, 0x2d, 0x8a, 0x4d, 0xd7, 0x55, 0x17, 0xcd, 0x7e, 0x9b, 0x30, 0x66, 0x55, 0xf6,
|
||||
0x7f, 0x89, 0x46, 0x2f, 0xf1, 0x97, 0x09, 0x56, 0x9b, 0xe5, 0x36, 0x7f, 0x4d, 0x60, 0x94, 0x86,
|
||||
0x1f, 0xc2, 0xe8, 0x63, 0x88, 0x1e, 0x61, 0x0c, 0xc7, 0x2e, 0x09, 0x02, 0x1e, 0x46, 0x09, 0xbf,
|
||||
0x8c, 0xd2, 0xd0, 0x43, 0x06, 0x7e, 0x0c, 0x93, 0x15, 0x61, 0x31, 0xe5, 0x94, 0xb1, 0x88, 0x21,
|
||||
0x13, 0x9f, 0x01, 0x8e, 0xa9, 0x9b, 0x32, 0x3f, 0xb9, 0xe1, 0xd7, 0x7e, 0x14, 0x90, 0xc4, 0x8f,
|
||||
0x42, 0x74, 0x80, 0x8f, 0x01, 0xa2, 0x6b, 0xca, 0xf8, 0x55, 0x1a, 0x25, 0x04, 0x0d, 0xf0, 0x53,
|
||||
0x38, 0x61, 0xf4, 0x2a, 0xa5, 0x71, 0xc2, 0x93, 0x28, 0xe2, 0x01, 0x61, 0xef, 0x29, 0x1a, 0xe2,
|
||||
0x67, 0xf0, 0xc4, 0x25, 0x2b, 0xb2, 0xf4, 0x83, 0xa6, 0x80, 0xe7, 0xc7, 0x64, 0x19, 0x50, 0x0f,
|
||||
0x1d, 0xe2, 0x53, 0x40, 0x97, 0x94, 0x24, 0x29, 0xa3, 0x7b, 0x77, 0xd4, 0xe0, 0x97, 0xc4, 0xe3,
|
||||
0x5d, 0x25, 0x34, 0x6e, 0xf0, 0x8c, 0xc6, 0xab, 0x28, 0x8c, 0x69, 0xaf, 0xae, 0x85, 0x8f, 0xc0,
|
||||
0x72, 0x49, 0xe8, 0xd2, 0xa0, 0xc9, 0x03, 0x8c, 0xc0, 0x66, 0x74, 0x15, 0x90, 0x9b, 0xae, 0xef,
|
||||
0x49, 0xd3, 0x8f, 0x47, 0x89, 0x17, 0xf8, 0x21, 0xe5, 0xf4, 0x93, 0x4b, 0xa9, 0x47, 0x3d, 0x64,
|
||||
0xcf, 0xfe, 0x18, 0x30, 0x66, 0x72, 0x57, 0xa9, 0x72, 0x27, 0xf1, 0x73, 0x18, 0xeb, 0x4e, 0x3b,
|
||||
0xc6, 0xd4, 0x98, 0xdb, 0xec, 0x3e, 0xc6, 0xe7, 0x60, 0xc9, 0x1f, 0x99, 0xac, 0x9a, 0x75, 0xb5,
|
||||
0x23, 0xb5, 0xd9, 0xde, 0xc0, 0x3e, 0x9c, 0x88, 0xfd, 0x3a, 0xb9, 0x6c, 0x06, 0xec, 0x1c, 0x4c,
|
||||
0x8d, 0xf9, 0xe4, 0xcd, 0xf9, 0xa2, 0x77, 0x87, 0x0f, 0x77, 0xce, 0x90, 0x78, 0x78, 0x05, 0xaf,
|
||||
0xe0, 0xf8, 0xab, 0xb8, 0x15, 0x7c, 0x4f, 0x1b, 0xb4, 0xb4, 0xa3, 0xc6, 0xa5, 0xf7, 0xc4, 0xd7,
|
||||
0x60, 0xe9, 0x2a, 0xeb, 0x48, 0xc3, 0x96, 0x74, 0xda, 0x27, 0xdd, 0x1d, 0x07, 0x1b, 0xeb, 0x4e,
|
||||
0x2d, 0xed, 0xcf, 0xbd, 0x07, 0xf0, 0x37, 0x00, 0x00, 0xff, 0xff, 0x38, 0xd1, 0x0f, 0x22, 0x4f,
|
||||
0x03, 0x00, 0x00,
|
||||
}
|
44
vendor/google.golang.org/appengine/internal/remote_api/remote_api.proto
generated
vendored
Normal file
44
vendor/google.golang.org/appengine/internal/remote_api/remote_api.proto
generated
vendored
Normal file
@ -0,0 +1,44 @@
|
||||
syntax = "proto2";
|
||||
option go_package = "remote_api";
|
||||
|
||||
package remote_api;
|
||||
|
||||
message Request {
|
||||
required string service_name = 2;
|
||||
required string method = 3;
|
||||
required bytes request = 4;
|
||||
optional string request_id = 5;
|
||||
}
|
||||
|
||||
message ApplicationError {
|
||||
required int32 code = 1;
|
||||
required string detail = 2;
|
||||
}
|
||||
|
||||
message RpcError {
|
||||
enum ErrorCode {
|
||||
UNKNOWN = 0;
|
||||
CALL_NOT_FOUND = 1;
|
||||
PARSE_ERROR = 2;
|
||||
SECURITY_VIOLATION = 3;
|
||||
OVER_QUOTA = 4;
|
||||
REQUEST_TOO_LARGE = 5;
|
||||
CAPABILITY_DISABLED = 6;
|
||||
FEATURE_DISABLED = 7;
|
||||
BAD_REQUEST = 8;
|
||||
RESPONSE_TOO_LARGE = 9;
|
||||
CANCELLED = 10;
|
||||
REPLAY_ERROR = 11;
|
||||
DEADLINE_EXCEEDED = 12;
|
||||
}
|
||||
required int32 code = 1;
|
||||
optional string detail = 2;
|
||||
}
|
||||
|
||||
message Response {
|
||||
optional bytes response = 1;
|
||||
optional bytes exception = 2;
|
||||
optional ApplicationError application_error = 3;
|
||||
optional bytes java_exception = 4;
|
||||
optional RpcError rpc_error = 5;
|
||||
}
|
115
vendor/google.golang.org/appengine/internal/transaction.go
generated
vendored
Normal file
115
vendor/google.golang.org/appengine/internal/transaction.go
generated
vendored
Normal file
@ -0,0 +1,115 @@
|
||||
// Copyright 2014 Google Inc. All rights reserved.
|
||||
// Use of this source code is governed by the Apache 2.0
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package internal
|
||||
|
||||
// This file implements hooks for applying datastore transactions.
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"reflect"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
netcontext "golang.org/x/net/context"
|
||||
|
||||
basepb "google.golang.org/appengine/internal/base"
|
||||
pb "google.golang.org/appengine/internal/datastore"
|
||||
)
|
||||
|
||||
var transactionSetters = make(map[reflect.Type]reflect.Value)
|
||||
|
||||
// RegisterTransactionSetter registers a function that sets transaction information
|
||||
// in a protocol buffer message. f should be a function with two arguments,
|
||||
// the first being a protocol buffer type, and the second being *datastore.Transaction.
|
||||
func RegisterTransactionSetter(f interface{}) {
|
||||
v := reflect.ValueOf(f)
|
||||
transactionSetters[v.Type().In(0)] = v
|
||||
}
|
||||
|
||||
// applyTransaction applies the transaction t to message pb
|
||||
// by using the relevant setter passed to RegisterTransactionSetter.
|
||||
func applyTransaction(pb proto.Message, t *pb.Transaction) {
|
||||
v := reflect.ValueOf(pb)
|
||||
if f, ok := transactionSetters[v.Type()]; ok {
|
||||
f.Call([]reflect.Value{v, reflect.ValueOf(t)})
|
||||
}
|
||||
}
|
||||
|
||||
var transactionKey = "used for *Transaction"
|
||||
|
||||
func transactionFromContext(ctx netcontext.Context) *transaction {
|
||||
t, _ := ctx.Value(&transactionKey).(*transaction)
|
||||
return t
|
||||
}
|
||||
|
||||
func withTransaction(ctx netcontext.Context, t *transaction) netcontext.Context {
|
||||
return netcontext.WithValue(ctx, &transactionKey, t)
|
||||
}
|
||||
|
||||
type transaction struct {
|
||||
transaction pb.Transaction
|
||||
finished bool
|
||||
}
|
||||
|
||||
var ErrConcurrentTransaction = errors.New("internal: concurrent transaction")
|
||||
|
||||
func RunTransactionOnce(c netcontext.Context, f func(netcontext.Context) error, xg bool, readOnly bool, previousTransaction *pb.Transaction) (*pb.Transaction, error) {
|
||||
if transactionFromContext(c) != nil {
|
||||
return nil, errors.New("nested transactions are not supported")
|
||||
}
|
||||
|
||||
// Begin the transaction.
|
||||
t := &transaction{}
|
||||
req := &pb.BeginTransactionRequest{
|
||||
App: proto.String(FullyQualifiedAppID(c)),
|
||||
}
|
||||
if xg {
|
||||
req.AllowMultipleEg = proto.Bool(true)
|
||||
}
|
||||
if previousTransaction != nil {
|
||||
req.PreviousTransaction = previousTransaction
|
||||
}
|
||||
if readOnly {
|
||||
req.Mode = pb.BeginTransactionRequest_READ_ONLY.Enum()
|
||||
} else {
|
||||
req.Mode = pb.BeginTransactionRequest_READ_WRITE.Enum()
|
||||
}
|
||||
if err := Call(c, "datastore_v3", "BeginTransaction", req, &t.transaction); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Call f, rolling back the transaction if f returns a non-nil error, or panics.
|
||||
// The panic is not recovered.
|
||||
defer func() {
|
||||
if t.finished {
|
||||
return
|
||||
}
|
||||
t.finished = true
|
||||
// Ignore the error return value, since we are already returning a non-nil
|
||||
// error (or we're panicking).
|
||||
Call(c, "datastore_v3", "Rollback", &t.transaction, &basepb.VoidProto{})
|
||||
}()
|
||||
if err := f(withTransaction(c, t)); err != nil {
|
||||
return &t.transaction, err
|
||||
}
|
||||
t.finished = true
|
||||
|
||||
// Commit the transaction.
|
||||
res := &pb.CommitResponse{}
|
||||
err := Call(c, "datastore_v3", "Commit", &t.transaction, res)
|
||||
if ae, ok := err.(*APIError); ok {
|
||||
/* TODO: restore this conditional
|
||||
if appengine.IsDevAppServer() {
|
||||
*/
|
||||
// The Python Dev AppServer raises an ApplicationError with error code 2 (which is
|
||||
// Error.CONCURRENT_TRANSACTION) and message "Concurrency exception.".
|
||||
if ae.Code == int32(pb.Error_BAD_REQUEST) && ae.Detail == "ApplicationError: 2 Concurrency exception." {
|
||||
return &t.transaction, ErrConcurrentTransaction
|
||||
}
|
||||
if ae.Code == int32(pb.Error_CONCURRENT_TRANSACTION) {
|
||||
return &t.transaction, ErrConcurrentTransaction
|
||||
}
|
||||
}
|
||||
return &t.transaction, err
|
||||
}
|
527
vendor/google.golang.org/appengine/internal/urlfetch/urlfetch_service.pb.go
generated
vendored
Normal file
527
vendor/google.golang.org/appengine/internal/urlfetch/urlfetch_service.pb.go
generated
vendored
Normal file
@ -0,0 +1,527 @@
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// source: google.golang.org/appengine/internal/urlfetch/urlfetch_service.proto
|
||||
|
||||
package urlfetch
|
||||
|
||||
import proto "github.com/golang/protobuf/proto"
|
||||
import fmt "fmt"
|
||||
import math "math"
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ = proto.Marshal
|
||||
var _ = fmt.Errorf
|
||||
var _ = math.Inf
|
||||
|
||||
// This is a compile-time assertion to ensure that this generated file
|
||||
// is compatible with the proto package it is being compiled against.
|
||||
// A compilation error at this line likely means your copy of the
|
||||
// proto package needs to be updated.
|
||||
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
|
||||
|
||||
type URLFetchServiceError_ErrorCode int32
|
||||
|
||||
const (
|
||||
URLFetchServiceError_OK URLFetchServiceError_ErrorCode = 0
|
||||
URLFetchServiceError_INVALID_URL URLFetchServiceError_ErrorCode = 1
|
||||
URLFetchServiceError_FETCH_ERROR URLFetchServiceError_ErrorCode = 2
|
||||
URLFetchServiceError_UNSPECIFIED_ERROR URLFetchServiceError_ErrorCode = 3
|
||||
URLFetchServiceError_RESPONSE_TOO_LARGE URLFetchServiceError_ErrorCode = 4
|
||||
URLFetchServiceError_DEADLINE_EXCEEDED URLFetchServiceError_ErrorCode = 5
|
||||
URLFetchServiceError_SSL_CERTIFICATE_ERROR URLFetchServiceError_ErrorCode = 6
|
||||
URLFetchServiceError_DNS_ERROR URLFetchServiceError_ErrorCode = 7
|
||||
URLFetchServiceError_CLOSED URLFetchServiceError_ErrorCode = 8
|
||||
URLFetchServiceError_INTERNAL_TRANSIENT_ERROR URLFetchServiceError_ErrorCode = 9
|
||||
URLFetchServiceError_TOO_MANY_REDIRECTS URLFetchServiceError_ErrorCode = 10
|
||||
URLFetchServiceError_MALFORMED_REPLY URLFetchServiceError_ErrorCode = 11
|
||||
URLFetchServiceError_CONNECTION_ERROR URLFetchServiceError_ErrorCode = 12
|
||||
)
|
||||
|
||||
var URLFetchServiceError_ErrorCode_name = map[int32]string{
|
||||
0: "OK",
|
||||
1: "INVALID_URL",
|
||||
2: "FETCH_ERROR",
|
||||
3: "UNSPECIFIED_ERROR",
|
||||
4: "RESPONSE_TOO_LARGE",
|
||||
5: "DEADLINE_EXCEEDED",
|
||||
6: "SSL_CERTIFICATE_ERROR",
|
||||
7: "DNS_ERROR",
|
||||
8: "CLOSED",
|
||||
9: "INTERNAL_TRANSIENT_ERROR",
|
||||
10: "TOO_MANY_REDIRECTS",
|
||||
11: "MALFORMED_REPLY",
|
||||
12: "CONNECTION_ERROR",
|
||||
}
|
||||
var URLFetchServiceError_ErrorCode_value = map[string]int32{
|
||||
"OK": 0,
|
||||
"INVALID_URL": 1,
|
||||
"FETCH_ERROR": 2,
|
||||
"UNSPECIFIED_ERROR": 3,
|
||||
"RESPONSE_TOO_LARGE": 4,
|
||||
"DEADLINE_EXCEEDED": 5,
|
||||
"SSL_CERTIFICATE_ERROR": 6,
|
||||
"DNS_ERROR": 7,
|
||||
"CLOSED": 8,
|
||||
"INTERNAL_TRANSIENT_ERROR": 9,
|
||||
"TOO_MANY_REDIRECTS": 10,
|
||||
"MALFORMED_REPLY": 11,
|
||||
"CONNECTION_ERROR": 12,
|
||||
}
|
||||
|
||||
func (x URLFetchServiceError_ErrorCode) Enum() *URLFetchServiceError_ErrorCode {
|
||||
p := new(URLFetchServiceError_ErrorCode)
|
||||
*p = x
|
||||
return p
|
||||
}
|
||||
func (x URLFetchServiceError_ErrorCode) String() string {
|
||||
return proto.EnumName(URLFetchServiceError_ErrorCode_name, int32(x))
|
||||
}
|
||||
func (x *URLFetchServiceError_ErrorCode) UnmarshalJSON(data []byte) error {
|
||||
value, err := proto.UnmarshalJSONEnum(URLFetchServiceError_ErrorCode_value, data, "URLFetchServiceError_ErrorCode")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*x = URLFetchServiceError_ErrorCode(value)
|
||||
return nil
|
||||
}
|
||||
func (URLFetchServiceError_ErrorCode) EnumDescriptor() ([]byte, []int) {
|
||||
return fileDescriptor_urlfetch_service_b245a7065f33bced, []int{0, 0}
|
||||
}
|
||||
|
||||
type URLFetchRequest_RequestMethod int32
|
||||
|
||||
const (
|
||||
URLFetchRequest_GET URLFetchRequest_RequestMethod = 1
|
||||
URLFetchRequest_POST URLFetchRequest_RequestMethod = 2
|
||||
URLFetchRequest_HEAD URLFetchRequest_RequestMethod = 3
|
||||
URLFetchRequest_PUT URLFetchRequest_RequestMethod = 4
|
||||
URLFetchRequest_DELETE URLFetchRequest_RequestMethod = 5
|
||||
URLFetchRequest_PATCH URLFetchRequest_RequestMethod = 6
|
||||
)
|
||||
|
||||
var URLFetchRequest_RequestMethod_name = map[int32]string{
|
||||
1: "GET",
|
||||
2: "POST",
|
||||
3: "HEAD",
|
||||
4: "PUT",
|
||||
5: "DELETE",
|
||||
6: "PATCH",
|
||||
}
|
||||
var URLFetchRequest_RequestMethod_value = map[string]int32{
|
||||
"GET": 1,
|
||||
"POST": 2,
|
||||
"HEAD": 3,
|
||||
"PUT": 4,
|
||||
"DELETE": 5,
|
||||
"PATCH": 6,
|
||||
}
|
||||
|
||||
func (x URLFetchRequest_RequestMethod) Enum() *URLFetchRequest_RequestMethod {
|
||||
p := new(URLFetchRequest_RequestMethod)
|
||||
*p = x
|
||||
return p
|
||||
}
|
||||
func (x URLFetchRequest_RequestMethod) String() string {
|
||||
return proto.EnumName(URLFetchRequest_RequestMethod_name, int32(x))
|
||||
}
|
||||
func (x *URLFetchRequest_RequestMethod) UnmarshalJSON(data []byte) error {
|
||||
value, err := proto.UnmarshalJSONEnum(URLFetchRequest_RequestMethod_value, data, "URLFetchRequest_RequestMethod")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*x = URLFetchRequest_RequestMethod(value)
|
||||
return nil
|
||||
}
|
||||
func (URLFetchRequest_RequestMethod) EnumDescriptor() ([]byte, []int) {
|
||||
return fileDescriptor_urlfetch_service_b245a7065f33bced, []int{1, 0}
|
||||
}
|
||||
|
||||
type URLFetchServiceError struct {
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *URLFetchServiceError) Reset() { *m = URLFetchServiceError{} }
|
||||
func (m *URLFetchServiceError) String() string { return proto.CompactTextString(m) }
|
||||
func (*URLFetchServiceError) ProtoMessage() {}
|
||||
func (*URLFetchServiceError) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_urlfetch_service_b245a7065f33bced, []int{0}
|
||||
}
|
||||
func (m *URLFetchServiceError) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_URLFetchServiceError.Unmarshal(m, b)
|
||||
}
|
||||
func (m *URLFetchServiceError) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_URLFetchServiceError.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (dst *URLFetchServiceError) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_URLFetchServiceError.Merge(dst, src)
|
||||
}
|
||||
func (m *URLFetchServiceError) XXX_Size() int {
|
||||
return xxx_messageInfo_URLFetchServiceError.Size(m)
|
||||
}
|
||||
func (m *URLFetchServiceError) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_URLFetchServiceError.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_URLFetchServiceError proto.InternalMessageInfo
|
||||
|
||||
type URLFetchRequest struct {
|
||||
Method *URLFetchRequest_RequestMethod `protobuf:"varint,1,req,name=Method,enum=appengine.URLFetchRequest_RequestMethod" json:"Method,omitempty"`
|
||||
Url *string `protobuf:"bytes,2,req,name=Url" json:"Url,omitempty"`
|
||||
Header []*URLFetchRequest_Header `protobuf:"group,3,rep,name=Header,json=header" json:"header,omitempty"`
|
||||
Payload []byte `protobuf:"bytes,6,opt,name=Payload" json:"Payload,omitempty"`
|
||||
FollowRedirects *bool `protobuf:"varint,7,opt,name=FollowRedirects,def=1" json:"FollowRedirects,omitempty"`
|
||||
Deadline *float64 `protobuf:"fixed64,8,opt,name=Deadline" json:"Deadline,omitempty"`
|
||||
MustValidateServerCertificate *bool `protobuf:"varint,9,opt,name=MustValidateServerCertificate,def=1" json:"MustValidateServerCertificate,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *URLFetchRequest) Reset() { *m = URLFetchRequest{} }
|
||||
func (m *URLFetchRequest) String() string { return proto.CompactTextString(m) }
|
||||
func (*URLFetchRequest) ProtoMessage() {}
|
||||
func (*URLFetchRequest) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_urlfetch_service_b245a7065f33bced, []int{1}
|
||||
}
|
||||
func (m *URLFetchRequest) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_URLFetchRequest.Unmarshal(m, b)
|
||||
}
|
||||
func (m *URLFetchRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_URLFetchRequest.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (dst *URLFetchRequest) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_URLFetchRequest.Merge(dst, src)
|
||||
}
|
||||
func (m *URLFetchRequest) XXX_Size() int {
|
||||
return xxx_messageInfo_URLFetchRequest.Size(m)
|
||||
}
|
||||
func (m *URLFetchRequest) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_URLFetchRequest.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_URLFetchRequest proto.InternalMessageInfo
|
||||
|
||||
const Default_URLFetchRequest_FollowRedirects bool = true
|
||||
const Default_URLFetchRequest_MustValidateServerCertificate bool = true
|
||||
|
||||
func (m *URLFetchRequest) GetMethod() URLFetchRequest_RequestMethod {
|
||||
if m != nil && m.Method != nil {
|
||||
return *m.Method
|
||||
}
|
||||
return URLFetchRequest_GET
|
||||
}
|
||||
|
||||
func (m *URLFetchRequest) GetUrl() string {
|
||||
if m != nil && m.Url != nil {
|
||||
return *m.Url
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *URLFetchRequest) GetHeader() []*URLFetchRequest_Header {
|
||||
if m != nil {
|
||||
return m.Header
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *URLFetchRequest) GetPayload() []byte {
|
||||
if m != nil {
|
||||
return m.Payload
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *URLFetchRequest) GetFollowRedirects() bool {
|
||||
if m != nil && m.FollowRedirects != nil {
|
||||
return *m.FollowRedirects
|
||||
}
|
||||
return Default_URLFetchRequest_FollowRedirects
|
||||
}
|
||||
|
||||
func (m *URLFetchRequest) GetDeadline() float64 {
|
||||
if m != nil && m.Deadline != nil {
|
||||
return *m.Deadline
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (m *URLFetchRequest) GetMustValidateServerCertificate() bool {
|
||||
if m != nil && m.MustValidateServerCertificate != nil {
|
||||
return *m.MustValidateServerCertificate
|
||||
}
|
||||
return Default_URLFetchRequest_MustValidateServerCertificate
|
||||
}
|
||||
|
||||
type URLFetchRequest_Header struct {
|
||||
Key *string `protobuf:"bytes,4,req,name=Key" json:"Key,omitempty"`
|
||||
Value *string `protobuf:"bytes,5,req,name=Value" json:"Value,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *URLFetchRequest_Header) Reset() { *m = URLFetchRequest_Header{} }
|
||||
func (m *URLFetchRequest_Header) String() string { return proto.CompactTextString(m) }
|
||||
func (*URLFetchRequest_Header) ProtoMessage() {}
|
||||
func (*URLFetchRequest_Header) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_urlfetch_service_b245a7065f33bced, []int{1, 0}
|
||||
}
|
||||
func (m *URLFetchRequest_Header) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_URLFetchRequest_Header.Unmarshal(m, b)
|
||||
}
|
||||
func (m *URLFetchRequest_Header) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_URLFetchRequest_Header.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (dst *URLFetchRequest_Header) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_URLFetchRequest_Header.Merge(dst, src)
|
||||
}
|
||||
func (m *URLFetchRequest_Header) XXX_Size() int {
|
||||
return xxx_messageInfo_URLFetchRequest_Header.Size(m)
|
||||
}
|
||||
func (m *URLFetchRequest_Header) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_URLFetchRequest_Header.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_URLFetchRequest_Header proto.InternalMessageInfo
|
||||
|
||||
func (m *URLFetchRequest_Header) GetKey() string {
|
||||
if m != nil && m.Key != nil {
|
||||
return *m.Key
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *URLFetchRequest_Header) GetValue() string {
|
||||
if m != nil && m.Value != nil {
|
||||
return *m.Value
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
type URLFetchResponse struct {
|
||||
Content []byte `protobuf:"bytes,1,opt,name=Content" json:"Content,omitempty"`
|
||||
StatusCode *int32 `protobuf:"varint,2,req,name=StatusCode" json:"StatusCode,omitempty"`
|
||||
Header []*URLFetchResponse_Header `protobuf:"group,3,rep,name=Header,json=header" json:"header,omitempty"`
|
||||
ContentWasTruncated *bool `protobuf:"varint,6,opt,name=ContentWasTruncated,def=0" json:"ContentWasTruncated,omitempty"`
|
||||
ExternalBytesSent *int64 `protobuf:"varint,7,opt,name=ExternalBytesSent" json:"ExternalBytesSent,omitempty"`
|
||||
ExternalBytesReceived *int64 `protobuf:"varint,8,opt,name=ExternalBytesReceived" json:"ExternalBytesReceived,omitempty"`
|
||||
FinalUrl *string `protobuf:"bytes,9,opt,name=FinalUrl" json:"FinalUrl,omitempty"`
|
||||
ApiCpuMilliseconds *int64 `protobuf:"varint,10,opt,name=ApiCpuMilliseconds,def=0" json:"ApiCpuMilliseconds,omitempty"`
|
||||
ApiBytesSent *int64 `protobuf:"varint,11,opt,name=ApiBytesSent,def=0" json:"ApiBytesSent,omitempty"`
|
||||
ApiBytesReceived *int64 `protobuf:"varint,12,opt,name=ApiBytesReceived,def=0" json:"ApiBytesReceived,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *URLFetchResponse) Reset() { *m = URLFetchResponse{} }
|
||||
func (m *URLFetchResponse) String() string { return proto.CompactTextString(m) }
|
||||
func (*URLFetchResponse) ProtoMessage() {}
|
||||
func (*URLFetchResponse) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_urlfetch_service_b245a7065f33bced, []int{2}
|
||||
}
|
||||
func (m *URLFetchResponse) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_URLFetchResponse.Unmarshal(m, b)
|
||||
}
|
||||
func (m *URLFetchResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_URLFetchResponse.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (dst *URLFetchResponse) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_URLFetchResponse.Merge(dst, src)
|
||||
}
|
||||
func (m *URLFetchResponse) XXX_Size() int {
|
||||
return xxx_messageInfo_URLFetchResponse.Size(m)
|
||||
}
|
||||
func (m *URLFetchResponse) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_URLFetchResponse.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_URLFetchResponse proto.InternalMessageInfo
|
||||
|
||||
const Default_URLFetchResponse_ContentWasTruncated bool = false
|
||||
const Default_URLFetchResponse_ApiCpuMilliseconds int64 = 0
|
||||
const Default_URLFetchResponse_ApiBytesSent int64 = 0
|
||||
const Default_URLFetchResponse_ApiBytesReceived int64 = 0
|
||||
|
||||
func (m *URLFetchResponse) GetContent() []byte {
|
||||
if m != nil {
|
||||
return m.Content
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *URLFetchResponse) GetStatusCode() int32 {
|
||||
if m != nil && m.StatusCode != nil {
|
||||
return *m.StatusCode
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (m *URLFetchResponse) GetHeader() []*URLFetchResponse_Header {
|
||||
if m != nil {
|
||||
return m.Header
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *URLFetchResponse) GetContentWasTruncated() bool {
|
||||
if m != nil && m.ContentWasTruncated != nil {
|
||||
return *m.ContentWasTruncated
|
||||
}
|
||||
return Default_URLFetchResponse_ContentWasTruncated
|
||||
}
|
||||
|
||||
func (m *URLFetchResponse) GetExternalBytesSent() int64 {
|
||||
if m != nil && m.ExternalBytesSent != nil {
|
||||
return *m.ExternalBytesSent
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (m *URLFetchResponse) GetExternalBytesReceived() int64 {
|
||||
if m != nil && m.ExternalBytesReceived != nil {
|
||||
return *m.ExternalBytesReceived
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (m *URLFetchResponse) GetFinalUrl() string {
|
||||
if m != nil && m.FinalUrl != nil {
|
||||
return *m.FinalUrl
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *URLFetchResponse) GetApiCpuMilliseconds() int64 {
|
||||
if m != nil && m.ApiCpuMilliseconds != nil {
|
||||
return *m.ApiCpuMilliseconds
|
||||
}
|
||||
return Default_URLFetchResponse_ApiCpuMilliseconds
|
||||
}
|
||||
|
||||
func (m *URLFetchResponse) GetApiBytesSent() int64 {
|
||||
if m != nil && m.ApiBytesSent != nil {
|
||||
return *m.ApiBytesSent
|
||||
}
|
||||
return Default_URLFetchResponse_ApiBytesSent
|
||||
}
|
||||
|
||||
func (m *URLFetchResponse) GetApiBytesReceived() int64 {
|
||||
if m != nil && m.ApiBytesReceived != nil {
|
||||
return *m.ApiBytesReceived
|
||||
}
|
||||
return Default_URLFetchResponse_ApiBytesReceived
|
||||
}
|
||||
|
||||
type URLFetchResponse_Header struct {
|
||||
Key *string `protobuf:"bytes,4,req,name=Key" json:"Key,omitempty"`
|
||||
Value *string `protobuf:"bytes,5,req,name=Value" json:"Value,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *URLFetchResponse_Header) Reset() { *m = URLFetchResponse_Header{} }
|
||||
func (m *URLFetchResponse_Header) String() string { return proto.CompactTextString(m) }
|
||||
func (*URLFetchResponse_Header) ProtoMessage() {}
|
||||
func (*URLFetchResponse_Header) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_urlfetch_service_b245a7065f33bced, []int{2, 0}
|
||||
}
|
||||
func (m *URLFetchResponse_Header) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_URLFetchResponse_Header.Unmarshal(m, b)
|
||||
}
|
||||
func (m *URLFetchResponse_Header) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_URLFetchResponse_Header.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (dst *URLFetchResponse_Header) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_URLFetchResponse_Header.Merge(dst, src)
|
||||
}
|
||||
func (m *URLFetchResponse_Header) XXX_Size() int {
|
||||
return xxx_messageInfo_URLFetchResponse_Header.Size(m)
|
||||
}
|
||||
func (m *URLFetchResponse_Header) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_URLFetchResponse_Header.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_URLFetchResponse_Header proto.InternalMessageInfo
|
||||
|
||||
func (m *URLFetchResponse_Header) GetKey() string {
|
||||
if m != nil && m.Key != nil {
|
||||
return *m.Key
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *URLFetchResponse_Header) GetValue() string {
|
||||
if m != nil && m.Value != nil {
|
||||
return *m.Value
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func init() {
|
||||
proto.RegisterType((*URLFetchServiceError)(nil), "appengine.URLFetchServiceError")
|
||||
proto.RegisterType((*URLFetchRequest)(nil), "appengine.URLFetchRequest")
|
||||
proto.RegisterType((*URLFetchRequest_Header)(nil), "appengine.URLFetchRequest.Header")
|
||||
proto.RegisterType((*URLFetchResponse)(nil), "appengine.URLFetchResponse")
|
||||
proto.RegisterType((*URLFetchResponse_Header)(nil), "appengine.URLFetchResponse.Header")
|
||||
}
|
||||
|
||||
func init() {
|
||||
proto.RegisterFile("google.golang.org/appengine/internal/urlfetch/urlfetch_service.proto", fileDescriptor_urlfetch_service_b245a7065f33bced)
|
||||
}
|
||||
|
||||
var fileDescriptor_urlfetch_service_b245a7065f33bced = []byte{
|
||||
// 770 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x54, 0xdd, 0x6e, 0xe3, 0x54,
|
||||
0x10, 0xc6, 0x76, 0x7e, 0xa7, 0x5d, 0x7a, 0x76, 0xb6, 0x45, 0x66, 0xb5, 0xa0, 0x10, 0x09, 0x29,
|
||||
0x17, 0x90, 0x2e, 0x2b, 0x24, 0x44, 0xaf, 0x70, 0xed, 0x93, 0xad, 0xa9, 0x63, 0x47, 0xc7, 0x4e,
|
||||
0x61, 0xb9, 0xb1, 0xac, 0x78, 0x9a, 0x5a, 0xb2, 0xec, 0x60, 0x9f, 0x2c, 0xf4, 0x35, 0x78, 0x0d,
|
||||
0xde, 0x87, 0xa7, 0xe1, 0x02, 0x9d, 0xc4, 0xc9, 0x6e, 0xbb, 0xd1, 0x4a, 0x5c, 0x65, 0xe6, 0x9b,
|
||||
0xef, 0xcc, 0x99, 0x7c, 0xdf, 0xf8, 0x80, 0xb3, 0x2c, 0xcb, 0x65, 0x4e, 0xe3, 0x65, 0x99, 0x27,
|
||||
0xc5, 0x72, 0x5c, 0x56, 0xcb, 0xf3, 0x64, 0xb5, 0xa2, 0x62, 0x99, 0x15, 0x74, 0x9e, 0x15, 0x92,
|
||||
0xaa, 0x22, 0xc9, 0xcf, 0xd7, 0x55, 0x7e, 0x4b, 0x72, 0x71, 0xb7, 0x0f, 0xe2, 0x9a, 0xaa, 0xb7,
|
||||
0xd9, 0x82, 0xc6, 0xab, 0xaa, 0x94, 0x25, 0xf6, 0xf7, 0x67, 0x86, 0x7f, 0xeb, 0x70, 0x3a, 0x17,
|
||||
0xde, 0x44, 0xb1, 0xc2, 0x2d, 0x89, 0x57, 0x55, 0x59, 0x0d, 0xff, 0xd2, 0xa1, 0xbf, 0x89, 0xec,
|
||||
0x32, 0x25, 0xec, 0x80, 0x1e, 0x5c, 0xb3, 0x4f, 0xf0, 0x04, 0x8e, 0x5c, 0xff, 0xc6, 0xf2, 0x5c,
|
||||
0x27, 0x9e, 0x0b, 0x8f, 0x69, 0x0a, 0x98, 0xf0, 0xc8, 0xbe, 0x8a, 0xb9, 0x10, 0x81, 0x60, 0x3a,
|
||||
0x9e, 0xc1, 0xd3, 0xb9, 0x1f, 0xce, 0xb8, 0xed, 0x4e, 0x5c, 0xee, 0x34, 0xb0, 0x81, 0x9f, 0x01,
|
||||
0x0a, 0x1e, 0xce, 0x02, 0x3f, 0xe4, 0x71, 0x14, 0x04, 0xb1, 0x67, 0x89, 0xd7, 0x9c, 0xb5, 0x14,
|
||||
0xdd, 0xe1, 0x96, 0xe3, 0xb9, 0x3e, 0x8f, 0xf9, 0xaf, 0x36, 0xe7, 0x0e, 0x77, 0x58, 0x1b, 0x3f,
|
||||
0x87, 0xb3, 0x30, 0xf4, 0x62, 0x9b, 0x8b, 0xc8, 0x9d, 0xb8, 0xb6, 0x15, 0xf1, 0xa6, 0x53, 0x07,
|
||||
0x9f, 0x40, 0xdf, 0xf1, 0xc3, 0x26, 0xed, 0x22, 0x40, 0xc7, 0xf6, 0x82, 0x90, 0x3b, 0xac, 0x87,
|
||||
0x2f, 0xc0, 0x74, 0xfd, 0x88, 0x0b, 0xdf, 0xf2, 0xe2, 0x48, 0x58, 0x7e, 0xe8, 0x72, 0x3f, 0x6a,
|
||||
0x98, 0x7d, 0x35, 0x82, 0xba, 0x79, 0x6a, 0xf9, 0x6f, 0x62, 0xc1, 0x1d, 0x57, 0x70, 0x3b, 0x0a,
|
||||
0x19, 0xe0, 0x33, 0x38, 0x99, 0x5a, 0xde, 0x24, 0x10, 0x53, 0xee, 0xc4, 0x82, 0xcf, 0xbc, 0x37,
|
||||
0xec, 0x08, 0x4f, 0x81, 0xd9, 0x81, 0xef, 0x73, 0x3b, 0x72, 0x03, 0xbf, 0x69, 0x71, 0x3c, 0xfc,
|
||||
0xc7, 0x80, 0x93, 0x9d, 0x5a, 0x82, 0x7e, 0x5f, 0x53, 0x2d, 0xf1, 0x27, 0xe8, 0x4c, 0x49, 0xde,
|
||||
0x95, 0xa9, 0xa9, 0x0d, 0xf4, 0xd1, 0xa7, 0xaf, 0x46, 0xe3, 0xbd, 0xba, 0xe3, 0x47, 0xdc, 0x71,
|
||||
0xf3, 0xbb, 0xe5, 0x8b, 0xe6, 0x1c, 0x32, 0x30, 0xe6, 0x55, 0x6e, 0xea, 0x03, 0x7d, 0xd4, 0x17,
|
||||
0x2a, 0xc4, 0x1f, 0xa1, 0x73, 0x47, 0x49, 0x4a, 0x95, 0x69, 0x0c, 0x8c, 0x11, 0xbc, 0xfa, 0xea,
|
||||
0x23, 0x3d, 0xaf, 0x36, 0x44, 0xd1, 0x1c, 0xc0, 0x17, 0xd0, 0x9d, 0x25, 0xf7, 0x79, 0x99, 0xa4,
|
||||
0x66, 0x67, 0xa0, 0x8d, 0x8e, 0x2f, 0xf5, 0x9e, 0x26, 0x76, 0x10, 0x8e, 0xe1, 0x64, 0x52, 0xe6,
|
||||
0x79, 0xf9, 0x87, 0xa0, 0x34, 0xab, 0x68, 0x21, 0x6b, 0xb3, 0x3b, 0xd0, 0x46, 0xbd, 0x8b, 0x96,
|
||||
0xac, 0xd6, 0x24, 0x1e, 0x17, 0xf1, 0x39, 0xf4, 0x1c, 0x4a, 0xd2, 0x3c, 0x2b, 0xc8, 0xec, 0x0d,
|
||||
0xb4, 0x91, 0x26, 0xf6, 0x39, 0xfe, 0x0c, 0x5f, 0x4c, 0xd7, 0xb5, 0xbc, 0x49, 0xf2, 0x2c, 0x4d,
|
||||
0x24, 0xa9, 0xed, 0xa1, 0xca, 0xa6, 0x4a, 0x66, 0xb7, 0xd9, 0x22, 0x91, 0x64, 0xf6, 0xdf, 0xeb,
|
||||
0xfc, 0x71, 0xea, 0xf3, 0x97, 0xd0, 0xd9, 0xfe, 0x0f, 0x25, 0xc6, 0x35, 0xdd, 0x9b, 0xad, 0xad,
|
||||
0x18, 0xd7, 0x74, 0x8f, 0xa7, 0xd0, 0xbe, 0x49, 0xf2, 0x35, 0x99, 0xed, 0x0d, 0xb6, 0x4d, 0x86,
|
||||
0x1e, 0x3c, 0x79, 0xa0, 0x26, 0x76, 0xc1, 0x78, 0xcd, 0x23, 0xa6, 0x61, 0x0f, 0x5a, 0xb3, 0x20,
|
||||
0x8c, 0x98, 0xae, 0xa2, 0x2b, 0x6e, 0x39, 0xcc, 0x50, 0xc5, 0xd9, 0x3c, 0x62, 0x2d, 0xb5, 0x2e,
|
||||
0x0e, 0xf7, 0x78, 0xc4, 0x59, 0x1b, 0xfb, 0xd0, 0x9e, 0x59, 0x91, 0x7d, 0xc5, 0x3a, 0xc3, 0x7f,
|
||||
0x0d, 0x60, 0xef, 0x84, 0xad, 0x57, 0x65, 0x51, 0x13, 0x9a, 0xd0, 0xb5, 0xcb, 0x42, 0x52, 0x21,
|
||||
0x4d, 0x4d, 0x49, 0x29, 0x76, 0x29, 0x7e, 0x09, 0x10, 0xca, 0x44, 0xae, 0x6b, 0xf5, 0x71, 0x6c,
|
||||
0x8c, 0x6b, 0x8b, 0xf7, 0x10, 0xbc, 0x78, 0xe4, 0xdf, 0xf0, 0xa0, 0x7f, 0xdb, 0x6b, 0x1e, 0x1b,
|
||||
0xf8, 0x03, 0x3c, 0x6b, 0xae, 0xf9, 0x25, 0xa9, 0xa3, 0x6a, 0x5d, 0x28, 0x81, 0xb6, 0x66, 0xf6,
|
||||
0x2e, 0xda, 0xb7, 0x49, 0x5e, 0x93, 0x38, 0xc4, 0xc0, 0x6f, 0xe0, 0x29, 0xff, 0x73, 0xfb, 0x02,
|
||||
0x5c, 0xde, 0x4b, 0xaa, 0x43, 0x35, 0xb8, 0x72, 0xd7, 0x10, 0x1f, 0x16, 0xf0, 0x7b, 0x38, 0x7b,
|
||||
0x00, 0x0a, 0x5a, 0x50, 0xf6, 0x96, 0xd2, 0x8d, 0xcd, 0x86, 0x38, 0x5c, 0x54, 0xfb, 0x30, 0xc9,
|
||||
0x8a, 0x24, 0x57, 0xfb, 0xaa, 0xec, 0xed, 0x8b, 0x7d, 0x8e, 0xdf, 0x01, 0x5a, 0xab, 0xcc, 0x5e,
|
||||
0xad, 0xa7, 0x59, 0x9e, 0x67, 0x35, 0x2d, 0xca, 0x22, 0xad, 0x4d, 0x50, 0xed, 0x2e, 0xb4, 0x97,
|
||||
0xe2, 0x40, 0x11, 0xbf, 0x86, 0x63, 0x6b, 0x95, 0xbd, 0x9b, 0xf6, 0x68, 0x47, 0x7e, 0x00, 0xe3,
|
||||
0xb7, 0xc0, 0x76, 0xf9, 0x7e, 0xcc, 0xe3, 0x1d, 0xf5, 0x83, 0xd2, 0xff, 0x5f, 0xa6, 0x4b, 0xf8,
|
||||
0xad, 0xb7, 0x7b, 0x2a, 0xff, 0x0b, 0x00, 0x00, 0xff, 0xff, 0x1d, 0x9f, 0x6d, 0x24, 0x63, 0x05,
|
||||
0x00, 0x00,
|
||||
}
|
64
vendor/google.golang.org/appengine/internal/urlfetch/urlfetch_service.proto
generated
vendored
Normal file
64
vendor/google.golang.org/appengine/internal/urlfetch/urlfetch_service.proto
generated
vendored
Normal file
@ -0,0 +1,64 @@
|
||||
syntax = "proto2";
|
||||
option go_package = "urlfetch";
|
||||
|
||||
package appengine;
|
||||
|
||||
message URLFetchServiceError {
|
||||
enum ErrorCode {
|
||||
OK = 0;
|
||||
INVALID_URL = 1;
|
||||
FETCH_ERROR = 2;
|
||||
UNSPECIFIED_ERROR = 3;
|
||||
RESPONSE_TOO_LARGE = 4;
|
||||
DEADLINE_EXCEEDED = 5;
|
||||
SSL_CERTIFICATE_ERROR = 6;
|
||||
DNS_ERROR = 7;
|
||||
CLOSED = 8;
|
||||
INTERNAL_TRANSIENT_ERROR = 9;
|
||||
TOO_MANY_REDIRECTS = 10;
|
||||
MALFORMED_REPLY = 11;
|
||||
CONNECTION_ERROR = 12;
|
||||
}
|
||||
}
|
||||
|
||||
message URLFetchRequest {
|
||||
enum RequestMethod {
|
||||
GET = 1;
|
||||
POST = 2;
|
||||
HEAD = 3;
|
||||
PUT = 4;
|
||||
DELETE = 5;
|
||||
PATCH = 6;
|
||||
}
|
||||
required RequestMethod Method = 1;
|
||||
required string Url = 2;
|
||||
repeated group Header = 3 {
|
||||
required string Key = 4;
|
||||
required string Value = 5;
|
||||
}
|
||||
optional bytes Payload = 6 [ctype=CORD];
|
||||
|
||||
optional bool FollowRedirects = 7 [default=true];
|
||||
|
||||
optional double Deadline = 8;
|
||||
|
||||
optional bool MustValidateServerCertificate = 9 [default=true];
|
||||
}
|
||||
|
||||
message URLFetchResponse {
|
||||
optional bytes Content = 1;
|
||||
required int32 StatusCode = 2;
|
||||
repeated group Header = 3 {
|
||||
required string Key = 4;
|
||||
required string Value = 5;
|
||||
}
|
||||
optional bool ContentWasTruncated = 6 [default=false];
|
||||
optional int64 ExternalBytesSent = 7;
|
||||
optional int64 ExternalBytesReceived = 8;
|
||||
|
||||
optional string FinalUrl = 9;
|
||||
|
||||
optional int64 ApiCpuMilliseconds = 10 [default=0];
|
||||
optional int64 ApiBytesSent = 11 [default=0];
|
||||
optional int64 ApiBytesReceived = 12 [default=0];
|
||||
}
|
210
vendor/google.golang.org/appengine/urlfetch/urlfetch.go
generated
vendored
Normal file
210
vendor/google.golang.org/appengine/urlfetch/urlfetch.go
generated
vendored
Normal file
@ -0,0 +1,210 @@
|
||||
// Copyright 2011 Google Inc. All rights reserved.
|
||||
// Use of this source code is governed by the Apache 2.0
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package urlfetch provides an http.RoundTripper implementation
|
||||
// for fetching URLs via App Engine's urlfetch service.
|
||||
package urlfetch // import "google.golang.org/appengine/urlfetch"
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
"golang.org/x/net/context"
|
||||
|
||||
"google.golang.org/appengine/internal"
|
||||
pb "google.golang.org/appengine/internal/urlfetch"
|
||||
)
|
||||
|
||||
// Transport is an implementation of http.RoundTripper for
|
||||
// App Engine. Users should generally create an http.Client using
|
||||
// this transport and use the Client rather than using this transport
|
||||
// directly.
|
||||
type Transport struct {
|
||||
Context context.Context
|
||||
|
||||
// Controls whether the application checks the validity of SSL certificates
|
||||
// over HTTPS connections. A value of false (the default) instructs the
|
||||
// application to send a request to the server only if the certificate is
|
||||
// valid and signed by a trusted certificate authority (CA), and also
|
||||
// includes a hostname that matches the certificate. A value of true
|
||||
// instructs the application to perform no certificate validation.
|
||||
AllowInvalidServerCertificate bool
|
||||
}
|
||||
|
||||
// Verify statically that *Transport implements http.RoundTripper.
|
||||
var _ http.RoundTripper = (*Transport)(nil)
|
||||
|
||||
// Client returns an *http.Client using a default urlfetch Transport. This
|
||||
// client will have the default deadline of 5 seconds, and will check the
|
||||
// validity of SSL certificates.
|
||||
//
|
||||
// Any deadline of the provided context will be used for requests through this client;
|
||||
// if the client does not have a deadline then a 5 second default is used.
|
||||
func Client(ctx context.Context) *http.Client {
|
||||
return &http.Client{
|
||||
Transport: &Transport{
|
||||
Context: ctx,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
type bodyReader struct {
|
||||
content []byte
|
||||
truncated bool
|
||||
closed bool
|
||||
}
|
||||
|
||||
// ErrTruncatedBody is the error returned after the final Read() from a
|
||||
// response's Body if the body has been truncated by App Engine's proxy.
|
||||
var ErrTruncatedBody = errors.New("urlfetch: truncated body")
|
||||
|
||||
func statusCodeToText(code int) string {
|
||||
if t := http.StatusText(code); t != "" {
|
||||
return t
|
||||
}
|
||||
return strconv.Itoa(code)
|
||||
}
|
||||
|
||||
func (br *bodyReader) Read(p []byte) (n int, err error) {
|
||||
if br.closed {
|
||||
if br.truncated {
|
||||
return 0, ErrTruncatedBody
|
||||
}
|
||||
return 0, io.EOF
|
||||
}
|
||||
n = copy(p, br.content)
|
||||
if n > 0 {
|
||||
br.content = br.content[n:]
|
||||
return
|
||||
}
|
||||
if br.truncated {
|
||||
br.closed = true
|
||||
return 0, ErrTruncatedBody
|
||||
}
|
||||
return 0, io.EOF
|
||||
}
|
||||
|
||||
func (br *bodyReader) Close() error {
|
||||
br.closed = true
|
||||
br.content = nil
|
||||
return nil
|
||||
}
|
||||
|
||||
// A map of the URL Fetch-accepted methods that take a request body.
|
||||
var methodAcceptsRequestBody = map[string]bool{
|
||||
"POST": true,
|
||||
"PUT": true,
|
||||
"PATCH": true,
|
||||
}
|
||||
|
||||
// urlString returns a valid string given a URL. This function is necessary because
|
||||
// the String method of URL doesn't correctly handle URLs with non-empty Opaque values.
|
||||
// See http://code.google.com/p/go/issues/detail?id=4860.
|
||||
func urlString(u *url.URL) string {
|
||||
if u.Opaque == "" || strings.HasPrefix(u.Opaque, "//") {
|
||||
return u.String()
|
||||
}
|
||||
aux := *u
|
||||
aux.Opaque = "//" + aux.Host + aux.Opaque
|
||||
return aux.String()
|
||||
}
|
||||
|
||||
// RoundTrip issues a single HTTP request and returns its response. Per the
|
||||
// http.RoundTripper interface, RoundTrip only returns an error if there
|
||||
// was an unsupported request or the URL Fetch proxy fails.
|
||||
// Note that HTTP response codes such as 5xx, 403, 404, etc are not
|
||||
// errors as far as the transport is concerned and will be returned
|
||||
// with err set to nil.
|
||||
func (t *Transport) RoundTrip(req *http.Request) (res *http.Response, err error) {
|
||||
methNum, ok := pb.URLFetchRequest_RequestMethod_value[req.Method]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("urlfetch: unsupported HTTP method %q", req.Method)
|
||||
}
|
||||
|
||||
method := pb.URLFetchRequest_RequestMethod(methNum)
|
||||
|
||||
freq := &pb.URLFetchRequest{
|
||||
Method: &method,
|
||||
Url: proto.String(urlString(req.URL)),
|
||||
FollowRedirects: proto.Bool(false), // http.Client's responsibility
|
||||
MustValidateServerCertificate: proto.Bool(!t.AllowInvalidServerCertificate),
|
||||
}
|
||||
if deadline, ok := t.Context.Deadline(); ok {
|
||||
freq.Deadline = proto.Float64(deadline.Sub(time.Now()).Seconds())
|
||||
}
|
||||
|
||||
for k, vals := range req.Header {
|
||||
for _, val := range vals {
|
||||
freq.Header = append(freq.Header, &pb.URLFetchRequest_Header{
|
||||
Key: proto.String(k),
|
||||
Value: proto.String(val),
|
||||
})
|
||||
}
|
||||
}
|
||||
if methodAcceptsRequestBody[req.Method] && req.Body != nil {
|
||||
// Avoid a []byte copy if req.Body has a Bytes method.
|
||||
switch b := req.Body.(type) {
|
||||
case interface {
|
||||
Bytes() []byte
|
||||
}:
|
||||
freq.Payload = b.Bytes()
|
||||
default:
|
||||
freq.Payload, err = ioutil.ReadAll(req.Body)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fres := &pb.URLFetchResponse{}
|
||||
if err := internal.Call(t.Context, "urlfetch", "Fetch", freq, fres); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
res = &http.Response{}
|
||||
res.StatusCode = int(*fres.StatusCode)
|
||||
res.Status = fmt.Sprintf("%d %s", res.StatusCode, statusCodeToText(res.StatusCode))
|
||||
res.Header = make(http.Header)
|
||||
res.Request = req
|
||||
|
||||
// Faked:
|
||||
res.ProtoMajor = 1
|
||||
res.ProtoMinor = 1
|
||||
res.Proto = "HTTP/1.1"
|
||||
res.Close = true
|
||||
|
||||
for _, h := range fres.Header {
|
||||
hkey := http.CanonicalHeaderKey(*h.Key)
|
||||
hval := *h.Value
|
||||
if hkey == "Content-Length" {
|
||||
// Will get filled in below for all but HEAD requests.
|
||||
if req.Method == "HEAD" {
|
||||
res.ContentLength, _ = strconv.ParseInt(hval, 10, 64)
|
||||
}
|
||||
continue
|
||||
}
|
||||
res.Header.Add(hkey, hval)
|
||||
}
|
||||
|
||||
if req.Method != "HEAD" {
|
||||
res.ContentLength = int64(len(fres.Content))
|
||||
}
|
||||
|
||||
truncated := fres.GetContentWasTruncated()
|
||||
res.Body = &bodyReader{content: fres.Content, truncated: truncated}
|
||||
return
|
||||
}
|
||||
|
||||
func init() {
|
||||
internal.RegisterErrorCodeMap("urlfetch", pb.URLFetchServiceError_ErrorCode_name)
|
||||
internal.RegisterTimeoutErrorCode("urlfetch", int32(pb.URLFetchServiceError_DEADLINE_EXCEEDED))
|
||||
}
|
1
vendor/gopkg.in/square/go-jose.v2/.gitcookies.sh.enc
generated
vendored
Normal file
1
vendor/gopkg.in/square/go-jose.v2/.gitcookies.sh.enc
generated
vendored
Normal file
@ -0,0 +1 @@
|
||||
'|Ę&{tÄU|gGę(ěŹCy=+¨śňcű:u:/pś#~žü["±4¤!nŮAŞDK<Šuf˙hĹażÂ:şü¸ˇ´B/ŁŘ¤ą¤ň_<C588>hÎŰSăT*wĚxĽŻťą-ç|ťŕŔÓ<C594>ŃÄäóĚ㣗A$$â6ŁÁâG)8nĎpűĆˡ3ĚšśoďĎvŽB–3ż]xÝ“Ó2l§G•|qRŢŻ
ö2
5R–Ó×Ç$´ń˝YčˇŢÝ™l‘Ë«yAI"ŰŚ<C5B0>®íĂ»ąĽkÄ|Kĺţ[9ĆâŇĺ=°ú˙źń|@S•3ó#ćťx?ľV„,ľ‚SĆÝőśwPíogŇ6&V6 ©D.dBŠ7
|
8
vendor/gopkg.in/square/go-jose.v2/.gitignore
generated
vendored
Normal file
8
vendor/gopkg.in/square/go-jose.v2/.gitignore
generated
vendored
Normal file
@ -0,0 +1,8 @@
|
||||
*~
|
||||
.*.swp
|
||||
*.out
|
||||
*.test
|
||||
*.pem
|
||||
*.cov
|
||||
jose-util/jose-util
|
||||
jose-util.t.err
|
45
vendor/gopkg.in/square/go-jose.v2/.travis.yml
generated
vendored
Normal file
45
vendor/gopkg.in/square/go-jose.v2/.travis.yml
generated
vendored
Normal file
@ -0,0 +1,45 @@
|
||||
language: go
|
||||
|
||||
sudo: false
|
||||
|
||||
matrix:
|
||||
fast_finish: true
|
||||
allow_failures:
|
||||
- go: tip
|
||||
|
||||
go:
|
||||
- '1.11.x'
|
||||
- '1.12.x'
|
||||
- tip
|
||||
|
||||
go_import_path: gopkg.in/square/go-jose.v2
|
||||
|
||||
before_script:
|
||||
- export PATH=$HOME/.local/bin:$PATH
|
||||
|
||||
before_install:
|
||||
# Install encrypted gitcookies to get around bandwidth-limits
|
||||
# that is causing Travis-CI builds to fail. For more info, see
|
||||
# https://github.com/golang/go/issues/12933
|
||||
- openssl aes-256-cbc -K $encrypted_1528c3c2cafd_key -iv $encrypted_1528c3c2cafd_iv -in .gitcookies.sh.enc -out .gitcookies.sh -d || true
|
||||
- bash .gitcookies.sh || true
|
||||
- go get github.com/wadey/gocovmerge
|
||||
- go get github.com/mattn/goveralls
|
||||
- go get github.com/stretchr/testify/assert
|
||||
- go get github.com/stretchr/testify/require
|
||||
- go get github.com/google/go-cmp/cmp
|
||||
- go get golang.org/x/tools/cmd/cover || true
|
||||
- go get code.google.com/p/go.tools/cmd/cover || true
|
||||
- pip install cram --user
|
||||
|
||||
script:
|
||||
- go test . -v -covermode=count -coverprofile=profile.cov
|
||||
- go test ./cipher -v -covermode=count -coverprofile=cipher/profile.cov
|
||||
- go test ./jwt -v -covermode=count -coverprofile=jwt/profile.cov
|
||||
- go test ./json -v # no coverage for forked encoding/json package
|
||||
- cd jose-util && go build && PATH=$PWD:$PATH cram -v jose-util.t # cram tests jose-util
|
||||
- cd ..
|
||||
|
||||
after_success:
|
||||
- gocovmerge *.cov */*.cov > merged.coverprofile
|
||||
- $HOME/gopath/bin/goveralls -coverprofile merged.coverprofile -service=travis-ci
|
10
vendor/gopkg.in/square/go-jose.v2/BUG-BOUNTY.md
generated
vendored
Normal file
10
vendor/gopkg.in/square/go-jose.v2/BUG-BOUNTY.md
generated
vendored
Normal file
@ -0,0 +1,10 @@
|
||||
Serious about security
|
||||
======================
|
||||
|
||||
Square recognizes the important contributions the security research community
|
||||
can make. We therefore encourage reporting security issues with the code
|
||||
contained in this repository.
|
||||
|
||||
If you believe you have discovered a security vulnerability, please follow the
|
||||
guidelines at <https://bugcrowd.com/squareopensource>.
|
||||
|
14
vendor/gopkg.in/square/go-jose.v2/CONTRIBUTING.md
generated
vendored
Normal file
14
vendor/gopkg.in/square/go-jose.v2/CONTRIBUTING.md
generated
vendored
Normal file
@ -0,0 +1,14 @@
|
||||
# Contributing
|
||||
|
||||
If you would like to contribute code to go-jose you can do so through GitHub by
|
||||
forking the repository and sending a pull request.
|
||||
|
||||
When submitting code, please make every effort to follow existing conventions
|
||||
and style in order to keep the code as readable as possible. Please also make
|
||||
sure all tests pass by running `go test`, and format your code with `go fmt`.
|
||||
We also recommend using `golint` and `errcheck`.
|
||||
|
||||
Before your code can be accepted into the project you must also sign the
|
||||
[Individual Contributor License Agreement][1].
|
||||
|
||||
[1]: https://spreadsheets.google.com/spreadsheet/viewform?formkey=dDViT2xzUHAwRkI3X3k5Z0lQM091OGc6MQ&ndplr=1
|
202
vendor/gopkg.in/square/go-jose.v2/LICENSE
generated
vendored
Normal file
202
vendor/gopkg.in/square/go-jose.v2/LICENSE
generated
vendored
Normal file
@ -0,0 +1,202 @@
|
||||
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
118
vendor/gopkg.in/square/go-jose.v2/README.md
generated
vendored
Normal file
118
vendor/gopkg.in/square/go-jose.v2/README.md
generated
vendored
Normal file
@ -0,0 +1,118 @@
|
||||
# Go JOSE
|
||||
|
||||
[![godoc](http://img.shields.io/badge/godoc-version_1-blue.svg?style=flat)](https://godoc.org/gopkg.in/square/go-jose.v1)
|
||||
[![godoc](http://img.shields.io/badge/godoc-version_2-blue.svg?style=flat)](https://godoc.org/gopkg.in/square/go-jose.v2)
|
||||
[![license](http://img.shields.io/badge/license-apache_2.0-blue.svg?style=flat)](https://raw.githubusercontent.com/square/go-jose/master/LICENSE)
|
||||
[![build](https://travis-ci.org/square/go-jose.svg?branch=v2)](https://travis-ci.org/square/go-jose)
|
||||
[![coverage](https://coveralls.io/repos/github/square/go-jose/badge.svg?branch=v2)](https://coveralls.io/r/square/go-jose)
|
||||
|
||||
Package jose aims to provide an implementation of the Javascript Object Signing
|
||||
and Encryption set of standards. This includes support for JSON Web Encryption,
|
||||
JSON Web Signature, and JSON Web Token standards.
|
||||
|
||||
**Disclaimer**: This library contains encryption software that is subject to
|
||||
the U.S. Export Administration Regulations. You may not export, re-export,
|
||||
transfer or download this code or any part of it in violation of any United
|
||||
States law, directive or regulation. In particular this software may not be
|
||||
exported or re-exported in any form or on any media to Iran, North Sudan,
|
||||
Syria, Cuba, or North Korea, or to denied persons or entities mentioned on any
|
||||
US maintained blocked list.
|
||||
|
||||
## Overview
|
||||
|
||||
The implementation follows the
|
||||
[JSON Web Encryption](http://dx.doi.org/10.17487/RFC7516) (RFC 7516),
|
||||
[JSON Web Signature](http://dx.doi.org/10.17487/RFC7515) (RFC 7515), and
|
||||
[JSON Web Token](http://dx.doi.org/10.17487/RFC7519) (RFC 7519).
|
||||
Tables of supported algorithms are shown below. The library supports both
|
||||
the compact and full serialization formats, and has optional support for
|
||||
multiple recipients. It also comes with a small command-line utility
|
||||
([`jose-util`](https://github.com/square/go-jose/tree/v2/jose-util))
|
||||
for dealing with JOSE messages in a shell.
|
||||
|
||||
**Note**: We use a forked version of the `encoding/json` package from the Go
|
||||
standard library which uses case-sensitive matching for member names (instead
|
||||
of [case-insensitive matching](https://www.ietf.org/mail-archive/web/json/current/msg03763.html)).
|
||||
This is to avoid differences in interpretation of messages between go-jose and
|
||||
libraries in other languages.
|
||||
|
||||
### Versions
|
||||
|
||||
We use [gopkg.in](https://gopkg.in) for versioning.
|
||||
|
||||
[Version 2](https://gopkg.in/square/go-jose.v2)
|
||||
([branch](https://github.com/square/go-jose/tree/v2),
|
||||
[doc](https://godoc.org/gopkg.in/square/go-jose.v2)) is the current version:
|
||||
|
||||
import "gopkg.in/square/go-jose.v2"
|
||||
|
||||
The old `v1` branch ([go-jose.v1](https://gopkg.in/square/go-jose.v1)) will
|
||||
still receive backported bug fixes and security fixes, but otherwise
|
||||
development is frozen. All new feature development takes place on the `v2`
|
||||
branch. Version 2 also contains additional sub-packages such as the
|
||||
[jwt](https://godoc.org/gopkg.in/square/go-jose.v2/jwt) implementation
|
||||
contributed by [@shaxbee](https://github.com/shaxbee).
|
||||
|
||||
### Supported algorithms
|
||||
|
||||
See below for a table of supported algorithms. Algorithm identifiers match
|
||||
the names in the [JSON Web Algorithms](http://dx.doi.org/10.17487/RFC7518)
|
||||
standard where possible. The Godoc reference has a list of constants.
|
||||
|
||||
Key encryption | Algorithm identifier(s)
|
||||
:------------------------- | :------------------------------
|
||||
RSA-PKCS#1v1.5 | RSA1_5
|
||||
RSA-OAEP | RSA-OAEP, RSA-OAEP-256
|
||||
AES key wrap | A128KW, A192KW, A256KW
|
||||
AES-GCM key wrap | A128GCMKW, A192GCMKW, A256GCMKW
|
||||
ECDH-ES + AES key wrap | ECDH-ES+A128KW, ECDH-ES+A192KW, ECDH-ES+A256KW
|
||||
ECDH-ES (direct) | ECDH-ES<sup>1</sup>
|
||||
Direct encryption | dir<sup>1</sup>
|
||||
|
||||
<sup>1. Not supported in multi-recipient mode</sup>
|
||||
|
||||
Signing / MAC | Algorithm identifier(s)
|
||||
:------------------------- | :------------------------------
|
||||
RSASSA-PKCS#1v1.5 | RS256, RS384, RS512
|
||||
RSASSA-PSS | PS256, PS384, PS512
|
||||
HMAC | HS256, HS384, HS512
|
||||
ECDSA | ES256, ES384, ES512
|
||||
Ed25519 | EdDSA<sup>2</sup>
|
||||
|
||||
<sup>2. Only available in version 2 of the package</sup>
|
||||
|
||||
Content encryption | Algorithm identifier(s)
|
||||
:------------------------- | :------------------------------
|
||||
AES-CBC+HMAC | A128CBC-HS256, A192CBC-HS384, A256CBC-HS512
|
||||
AES-GCM | A128GCM, A192GCM, A256GCM
|
||||
|
||||
Compression | Algorithm identifiers(s)
|
||||
:------------------------- | -------------------------------
|
||||
DEFLATE (RFC 1951) | DEF
|
||||
|
||||
### Supported key types
|
||||
|
||||
See below for a table of supported key types. These are understood by the
|
||||
library, and can be passed to corresponding functions such as `NewEncrypter` or
|
||||
`NewSigner`. Each of these keys can also be wrapped in a JWK if desired, which
|
||||
allows attaching a key id.
|
||||
|
||||
Algorithm(s) | Corresponding types
|
||||
:------------------------- | -------------------------------
|
||||
RSA | *[rsa.PublicKey](http://golang.org/pkg/crypto/rsa/#PublicKey), *[rsa.PrivateKey](http://golang.org/pkg/crypto/rsa/#PrivateKey)
|
||||
ECDH, ECDSA | *[ecdsa.PublicKey](http://golang.org/pkg/crypto/ecdsa/#PublicKey), *[ecdsa.PrivateKey](http://golang.org/pkg/crypto/ecdsa/#PrivateKey)
|
||||
EdDSA<sup>1</sup> | [ed25519.PublicKey](https://godoc.org/golang.org/x/crypto/ed25519#PublicKey), [ed25519.PrivateKey](https://godoc.org/golang.org/x/crypto/ed25519#PrivateKey)
|
||||
AES, HMAC | []byte
|
||||
|
||||
<sup>1. Only available in version 2 of the package</sup>
|
||||
|
||||
## Examples
|
||||
|
||||
[![godoc](http://img.shields.io/badge/godoc-version_1-blue.svg?style=flat)](https://godoc.org/gopkg.in/square/go-jose.v1)
|
||||
[![godoc](http://img.shields.io/badge/godoc-version_2-blue.svg?style=flat)](https://godoc.org/gopkg.in/square/go-jose.v2)
|
||||
|
||||
Examples can be found in the Godoc
|
||||
reference for this package. The
|
||||
[`jose-util`](https://github.com/square/go-jose/tree/v2/jose-util)
|
||||
subdirectory also contains a small command-line utility which might be useful
|
||||
as an example.
|
592
vendor/gopkg.in/square/go-jose.v2/asymmetric.go
generated
vendored
Normal file
592
vendor/gopkg.in/square/go-jose.v2/asymmetric.go
generated
vendored
Normal file
@ -0,0 +1,592 @@
|
||||
/*-
|
||||
* Copyright 2014 Square Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package jose
|
||||
|
||||
import (
|
||||
"crypto"
|
||||
"crypto/aes"
|
||||
"crypto/ecdsa"
|
||||
"crypto/rand"
|
||||
"crypto/rsa"
|
||||
"crypto/sha1"
|
||||
"crypto/sha256"
|
||||
"errors"
|
||||
"fmt"
|
||||
"math/big"
|
||||
|
||||
"golang.org/x/crypto/ed25519"
|
||||
josecipher "gopkg.in/square/go-jose.v2/cipher"
|
||||
"gopkg.in/square/go-jose.v2/json"
|
||||
)
|
||||
|
||||
// A generic RSA-based encrypter/verifier
|
||||
type rsaEncrypterVerifier struct {
|
||||
publicKey *rsa.PublicKey
|
||||
}
|
||||
|
||||
// A generic RSA-based decrypter/signer
|
||||
type rsaDecrypterSigner struct {
|
||||
privateKey *rsa.PrivateKey
|
||||
}
|
||||
|
||||
// A generic EC-based encrypter/verifier
|
||||
type ecEncrypterVerifier struct {
|
||||
publicKey *ecdsa.PublicKey
|
||||
}
|
||||
|
||||
type edEncrypterVerifier struct {
|
||||
publicKey ed25519.PublicKey
|
||||
}
|
||||
|
||||
// A key generator for ECDH-ES
|
||||
type ecKeyGenerator struct {
|
||||
size int
|
||||
algID string
|
||||
publicKey *ecdsa.PublicKey
|
||||
}
|
||||
|
||||
// A generic EC-based decrypter/signer
|
||||
type ecDecrypterSigner struct {
|
||||
privateKey *ecdsa.PrivateKey
|
||||
}
|
||||
|
||||
type edDecrypterSigner struct {
|
||||
privateKey ed25519.PrivateKey
|
||||
}
|
||||
|
||||
// newRSARecipient creates recipientKeyInfo based on the given key.
|
||||
func newRSARecipient(keyAlg KeyAlgorithm, publicKey *rsa.PublicKey) (recipientKeyInfo, error) {
|
||||
// Verify that key management algorithm is supported by this encrypter
|
||||
switch keyAlg {
|
||||
case RSA1_5, RSA_OAEP, RSA_OAEP_256:
|
||||
default:
|
||||
return recipientKeyInfo{}, ErrUnsupportedAlgorithm
|
||||
}
|
||||
|
||||
if publicKey == nil {
|
||||
return recipientKeyInfo{}, errors.New("invalid public key")
|
||||
}
|
||||
|
||||
return recipientKeyInfo{
|
||||
keyAlg: keyAlg,
|
||||
keyEncrypter: &rsaEncrypterVerifier{
|
||||
publicKey: publicKey,
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
// newRSASigner creates a recipientSigInfo based on the given key.
|
||||
func newRSASigner(sigAlg SignatureAlgorithm, privateKey *rsa.PrivateKey) (recipientSigInfo, error) {
|
||||
// Verify that key management algorithm is supported by this encrypter
|
||||
switch sigAlg {
|
||||
case RS256, RS384, RS512, PS256, PS384, PS512:
|
||||
default:
|
||||
return recipientSigInfo{}, ErrUnsupportedAlgorithm
|
||||
}
|
||||
|
||||
if privateKey == nil {
|
||||
return recipientSigInfo{}, errors.New("invalid private key")
|
||||
}
|
||||
|
||||
return recipientSigInfo{
|
||||
sigAlg: sigAlg,
|
||||
publicKey: staticPublicKey(&JSONWebKey{
|
||||
Key: privateKey.Public(),
|
||||
}),
|
||||
signer: &rsaDecrypterSigner{
|
||||
privateKey: privateKey,
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
func newEd25519Signer(sigAlg SignatureAlgorithm, privateKey ed25519.PrivateKey) (recipientSigInfo, error) {
|
||||
if sigAlg != EdDSA {
|
||||
return recipientSigInfo{}, ErrUnsupportedAlgorithm
|
||||
}
|
||||
|
||||
if privateKey == nil {
|
||||
return recipientSigInfo{}, errors.New("invalid private key")
|
||||
}
|
||||
return recipientSigInfo{
|
||||
sigAlg: sigAlg,
|
||||
publicKey: staticPublicKey(&JSONWebKey{
|
||||
Key: privateKey.Public(),
|
||||
}),
|
||||
signer: &edDecrypterSigner{
|
||||
privateKey: privateKey,
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
// newECDHRecipient creates recipientKeyInfo based on the given key.
|
||||
func newECDHRecipient(keyAlg KeyAlgorithm, publicKey *ecdsa.PublicKey) (recipientKeyInfo, error) {
|
||||
// Verify that key management algorithm is supported by this encrypter
|
||||
switch keyAlg {
|
||||
case ECDH_ES, ECDH_ES_A128KW, ECDH_ES_A192KW, ECDH_ES_A256KW:
|
||||
default:
|
||||
return recipientKeyInfo{}, ErrUnsupportedAlgorithm
|
||||
}
|
||||
|
||||
if publicKey == nil || !publicKey.Curve.IsOnCurve(publicKey.X, publicKey.Y) {
|
||||
return recipientKeyInfo{}, errors.New("invalid public key")
|
||||
}
|
||||
|
||||
return recipientKeyInfo{
|
||||
keyAlg: keyAlg,
|
||||
keyEncrypter: &ecEncrypterVerifier{
|
||||
publicKey: publicKey,
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
// newECDSASigner creates a recipientSigInfo based on the given key.
|
||||
func newECDSASigner(sigAlg SignatureAlgorithm, privateKey *ecdsa.PrivateKey) (recipientSigInfo, error) {
|
||||
// Verify that key management algorithm is supported by this encrypter
|
||||
switch sigAlg {
|
||||
case ES256, ES384, ES512:
|
||||
default:
|
||||
return recipientSigInfo{}, ErrUnsupportedAlgorithm
|
||||
}
|
||||
|
||||
if privateKey == nil {
|
||||
return recipientSigInfo{}, errors.New("invalid private key")
|
||||
}
|
||||
|
||||
return recipientSigInfo{
|
||||
sigAlg: sigAlg,
|
||||
publicKey: staticPublicKey(&JSONWebKey{
|
||||
Key: privateKey.Public(),
|
||||
}),
|
||||
signer: &ecDecrypterSigner{
|
||||
privateKey: privateKey,
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Encrypt the given payload and update the object.
|
||||
func (ctx rsaEncrypterVerifier) encryptKey(cek []byte, alg KeyAlgorithm) (recipientInfo, error) {
|
||||
encryptedKey, err := ctx.encrypt(cek, alg)
|
||||
if err != nil {
|
||||
return recipientInfo{}, err
|
||||
}
|
||||
|
||||
return recipientInfo{
|
||||
encryptedKey: encryptedKey,
|
||||
header: &rawHeader{},
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Encrypt the given payload. Based on the key encryption algorithm,
|
||||
// this will either use RSA-PKCS1v1.5 or RSA-OAEP (with SHA-1 or SHA-256).
|
||||
func (ctx rsaEncrypterVerifier) encrypt(cek []byte, alg KeyAlgorithm) ([]byte, error) {
|
||||
switch alg {
|
||||
case RSA1_5:
|
||||
return rsa.EncryptPKCS1v15(RandReader, ctx.publicKey, cek)
|
||||
case RSA_OAEP:
|
||||
return rsa.EncryptOAEP(sha1.New(), RandReader, ctx.publicKey, cek, []byte{})
|
||||
case RSA_OAEP_256:
|
||||
return rsa.EncryptOAEP(sha256.New(), RandReader, ctx.publicKey, cek, []byte{})
|
||||
}
|
||||
|
||||
return nil, ErrUnsupportedAlgorithm
|
||||
}
|
||||
|
||||
// Decrypt the given payload and return the content encryption key.
|
||||
func (ctx rsaDecrypterSigner) decryptKey(headers rawHeader, recipient *recipientInfo, generator keyGenerator) ([]byte, error) {
|
||||
return ctx.decrypt(recipient.encryptedKey, headers.getAlgorithm(), generator)
|
||||
}
|
||||
|
||||
// Decrypt the given payload. Based on the key encryption algorithm,
|
||||
// this will either use RSA-PKCS1v1.5 or RSA-OAEP (with SHA-1 or SHA-256).
|
||||
func (ctx rsaDecrypterSigner) decrypt(jek []byte, alg KeyAlgorithm, generator keyGenerator) ([]byte, error) {
|
||||
// Note: The random reader on decrypt operations is only used for blinding,
|
||||
// so stubbing is meanlingless (hence the direct use of rand.Reader).
|
||||
switch alg {
|
||||
case RSA1_5:
|
||||
defer func() {
|
||||
// DecryptPKCS1v15SessionKey sometimes panics on an invalid payload
|
||||
// because of an index out of bounds error, which we want to ignore.
|
||||
// This has been fixed in Go 1.3.1 (released 2014/08/13), the recover()
|
||||
// only exists for preventing crashes with unpatched versions.
|
||||
// See: https://groups.google.com/forum/#!topic/golang-dev/7ihX6Y6kx9k
|
||||
// See: https://code.google.com/p/go/source/detail?r=58ee390ff31602edb66af41ed10901ec95904d33
|
||||
_ = recover()
|
||||
}()
|
||||
|
||||
// Perform some input validation.
|
||||
keyBytes := ctx.privateKey.PublicKey.N.BitLen() / 8
|
||||
if keyBytes != len(jek) {
|
||||
// Input size is incorrect, the encrypted payload should always match
|
||||
// the size of the public modulus (e.g. using a 2048 bit key will
|
||||
// produce 256 bytes of output). Reject this since it's invalid input.
|
||||
return nil, ErrCryptoFailure
|
||||
}
|
||||
|
||||
cek, _, err := generator.genKey()
|
||||
if err != nil {
|
||||
return nil, ErrCryptoFailure
|
||||
}
|
||||
|
||||
// When decrypting an RSA-PKCS1v1.5 payload, we must take precautions to
|
||||
// prevent chosen-ciphertext attacks as described in RFC 3218, "Preventing
|
||||
// the Million Message Attack on Cryptographic Message Syntax". We are
|
||||
// therefore deliberately ignoring errors here.
|
||||
_ = rsa.DecryptPKCS1v15SessionKey(rand.Reader, ctx.privateKey, jek, cek)
|
||||
|
||||
return cek, nil
|
||||
case RSA_OAEP:
|
||||
// Use rand.Reader for RSA blinding
|
||||
return rsa.DecryptOAEP(sha1.New(), rand.Reader, ctx.privateKey, jek, []byte{})
|
||||
case RSA_OAEP_256:
|
||||
// Use rand.Reader for RSA blinding
|
||||
return rsa.DecryptOAEP(sha256.New(), rand.Reader, ctx.privateKey, jek, []byte{})
|
||||
}
|
||||
|
||||
return nil, ErrUnsupportedAlgorithm
|
||||
}
|
||||
|
||||
// Sign the given payload
|
||||
func (ctx rsaDecrypterSigner) signPayload(payload []byte, alg SignatureAlgorithm) (Signature, error) {
|
||||
var hash crypto.Hash
|
||||
|
||||
switch alg {
|
||||
case RS256, PS256:
|
||||
hash = crypto.SHA256
|
||||
case RS384, PS384:
|
||||
hash = crypto.SHA384
|
||||
case RS512, PS512:
|
||||
hash = crypto.SHA512
|
||||
default:
|
||||
return Signature{}, ErrUnsupportedAlgorithm
|
||||
}
|
||||
|
||||
hasher := hash.New()
|
||||
|
||||
// According to documentation, Write() on hash never fails
|
||||
_, _ = hasher.Write(payload)
|
||||
hashed := hasher.Sum(nil)
|
||||
|
||||
var out []byte
|
||||
var err error
|
||||
|
||||
switch alg {
|
||||
case RS256, RS384, RS512:
|
||||
out, err = rsa.SignPKCS1v15(RandReader, ctx.privateKey, hash, hashed)
|
||||
case PS256, PS384, PS512:
|
||||
out, err = rsa.SignPSS(RandReader, ctx.privateKey, hash, hashed, &rsa.PSSOptions{
|
||||
SaltLength: rsa.PSSSaltLengthEqualsHash,
|
||||
})
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return Signature{}, err
|
||||
}
|
||||
|
||||
return Signature{
|
||||
Signature: out,
|
||||
protected: &rawHeader{},
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Verify the given payload
|
||||
func (ctx rsaEncrypterVerifier) verifyPayload(payload []byte, signature []byte, alg SignatureAlgorithm) error {
|
||||
var hash crypto.Hash
|
||||
|
||||
switch alg {
|
||||
case RS256, PS256:
|
||||
hash = crypto.SHA256
|
||||
case RS384, PS384:
|
||||
hash = crypto.SHA384
|
||||
case RS512, PS512:
|
||||
hash = crypto.SHA512
|
||||
default:
|
||||
return ErrUnsupportedAlgorithm
|
||||
}
|
||||
|
||||
hasher := hash.New()
|
||||
|
||||
// According to documentation, Write() on hash never fails
|
||||
_, _ = hasher.Write(payload)
|
||||
hashed := hasher.Sum(nil)
|
||||
|
||||
switch alg {
|
||||
case RS256, RS384, RS512:
|
||||
return rsa.VerifyPKCS1v15(ctx.publicKey, hash, hashed, signature)
|
||||
case PS256, PS384, PS512:
|
||||
return rsa.VerifyPSS(ctx.publicKey, hash, hashed, signature, nil)
|
||||
}
|
||||
|
||||
return ErrUnsupportedAlgorithm
|
||||
}
|
||||
|
||||
// Encrypt the given payload and update the object.
|
||||
func (ctx ecEncrypterVerifier) encryptKey(cek []byte, alg KeyAlgorithm) (recipientInfo, error) {
|
||||
switch alg {
|
||||
case ECDH_ES:
|
||||
// ECDH-ES mode doesn't wrap a key, the shared secret is used directly as the key.
|
||||
return recipientInfo{
|
||||
header: &rawHeader{},
|
||||
}, nil
|
||||
case ECDH_ES_A128KW, ECDH_ES_A192KW, ECDH_ES_A256KW:
|
||||
default:
|
||||
return recipientInfo{}, ErrUnsupportedAlgorithm
|
||||
}
|
||||
|
||||
generator := ecKeyGenerator{
|
||||
algID: string(alg),
|
||||
publicKey: ctx.publicKey,
|
||||
}
|
||||
|
||||
switch alg {
|
||||
case ECDH_ES_A128KW:
|
||||
generator.size = 16
|
||||
case ECDH_ES_A192KW:
|
||||
generator.size = 24
|
||||
case ECDH_ES_A256KW:
|
||||
generator.size = 32
|
||||
}
|
||||
|
||||
kek, header, err := generator.genKey()
|
||||
if err != nil {
|
||||
return recipientInfo{}, err
|
||||
}
|
||||
|
||||
block, err := aes.NewCipher(kek)
|
||||
if err != nil {
|
||||
return recipientInfo{}, err
|
||||
}
|
||||
|
||||
jek, err := josecipher.KeyWrap(block, cek)
|
||||
if err != nil {
|
||||
return recipientInfo{}, err
|
||||
}
|
||||
|
||||
return recipientInfo{
|
||||
encryptedKey: jek,
|
||||
header: &header,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Get key size for EC key generator
|
||||
func (ctx ecKeyGenerator) keySize() int {
|
||||
return ctx.size
|
||||
}
|
||||
|
||||
// Get a content encryption key for ECDH-ES
|
||||
func (ctx ecKeyGenerator) genKey() ([]byte, rawHeader, error) {
|
||||
priv, err := ecdsa.GenerateKey(ctx.publicKey.Curve, RandReader)
|
||||
if err != nil {
|
||||
return nil, rawHeader{}, err
|
||||
}
|
||||
|
||||
out := josecipher.DeriveECDHES(ctx.algID, []byte{}, []byte{}, priv, ctx.publicKey, ctx.size)
|
||||
|
||||
b, err := json.Marshal(&JSONWebKey{
|
||||
Key: &priv.PublicKey,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
headers := rawHeader{
|
||||
headerEPK: makeRawMessage(b),
|
||||
}
|
||||
|
||||
return out, headers, nil
|
||||
}
|
||||
|
||||
// Decrypt the given payload and return the content encryption key.
|
||||
func (ctx ecDecrypterSigner) decryptKey(headers rawHeader, recipient *recipientInfo, generator keyGenerator) ([]byte, error) {
|
||||
epk, err := headers.getEPK()
|
||||
if err != nil {
|
||||
return nil, errors.New("square/go-jose: invalid epk header")
|
||||
}
|
||||
if epk == nil {
|
||||
return nil, errors.New("square/go-jose: missing epk header")
|
||||
}
|
||||
|
||||
publicKey, ok := epk.Key.(*ecdsa.PublicKey)
|
||||
if publicKey == nil || !ok {
|
||||
return nil, errors.New("square/go-jose: invalid epk header")
|
||||
}
|
||||
|
||||
if !ctx.privateKey.Curve.IsOnCurve(publicKey.X, publicKey.Y) {
|
||||
return nil, errors.New("square/go-jose: invalid public key in epk header")
|
||||
}
|
||||
|
||||
apuData, err := headers.getAPU()
|
||||
if err != nil {
|
||||
return nil, errors.New("square/go-jose: invalid apu header")
|
||||
}
|
||||
apvData, err := headers.getAPV()
|
||||
if err != nil {
|
||||
return nil, errors.New("square/go-jose: invalid apv header")
|
||||
}
|
||||
|
||||
deriveKey := func(algID string, size int) []byte {
|
||||
return josecipher.DeriveECDHES(algID, apuData.bytes(), apvData.bytes(), ctx.privateKey, publicKey, size)
|
||||
}
|
||||
|
||||
var keySize int
|
||||
|
||||
algorithm := headers.getAlgorithm()
|
||||
switch algorithm {
|
||||
case ECDH_ES:
|
||||
// ECDH-ES uses direct key agreement, no key unwrapping necessary.
|
||||
return deriveKey(string(headers.getEncryption()), generator.keySize()), nil
|
||||
case ECDH_ES_A128KW:
|
||||
keySize = 16
|
||||
case ECDH_ES_A192KW:
|
||||
keySize = 24
|
||||
case ECDH_ES_A256KW:
|
||||
keySize = 32
|
||||
default:
|
||||
return nil, ErrUnsupportedAlgorithm
|
||||
}
|
||||
|
||||
key := deriveKey(string(algorithm), keySize)
|
||||
block, err := aes.NewCipher(key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return josecipher.KeyUnwrap(block, recipient.encryptedKey)
|
||||
}
|
||||
|
||||
func (ctx edDecrypterSigner) signPayload(payload []byte, alg SignatureAlgorithm) (Signature, error) {
|
||||
if alg != EdDSA {
|
||||
return Signature{}, ErrUnsupportedAlgorithm
|
||||
}
|
||||
|
||||
sig, err := ctx.privateKey.Sign(RandReader, payload, crypto.Hash(0))
|
||||
if err != nil {
|
||||
return Signature{}, err
|
||||
}
|
||||
|
||||
return Signature{
|
||||
Signature: sig,
|
||||
protected: &rawHeader{},
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (ctx edEncrypterVerifier) verifyPayload(payload []byte, signature []byte, alg SignatureAlgorithm) error {
|
||||
if alg != EdDSA {
|
||||
return ErrUnsupportedAlgorithm
|
||||
}
|
||||
ok := ed25519.Verify(ctx.publicKey, payload, signature)
|
||||
if !ok {
|
||||
return errors.New("square/go-jose: ed25519 signature failed to verify")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Sign the given payload
|
||||
func (ctx ecDecrypterSigner) signPayload(payload []byte, alg SignatureAlgorithm) (Signature, error) {
|
||||
var expectedBitSize int
|
||||
var hash crypto.Hash
|
||||
|
||||
switch alg {
|
||||
case ES256:
|
||||
expectedBitSize = 256
|
||||
hash = crypto.SHA256
|
||||
case ES384:
|
||||
expectedBitSize = 384
|
||||
hash = crypto.SHA384
|
||||
case ES512:
|
||||
expectedBitSize = 521
|
||||
hash = crypto.SHA512
|
||||
}
|
||||
|
||||
curveBits := ctx.privateKey.Curve.Params().BitSize
|
||||
if expectedBitSize != curveBits {
|
||||
return Signature{}, fmt.Errorf("square/go-jose: expected %d bit key, got %d bits instead", expectedBitSize, curveBits)
|
||||
}
|
||||
|
||||
hasher := hash.New()
|
||||
|
||||
// According to documentation, Write() on hash never fails
|
||||
_, _ = hasher.Write(payload)
|
||||
hashed := hasher.Sum(nil)
|
||||
|
||||
r, s, err := ecdsa.Sign(RandReader, ctx.privateKey, hashed)
|
||||
if err != nil {
|
||||
return Signature{}, err
|
||||
}
|
||||
|
||||
keyBytes := curveBits / 8
|
||||
if curveBits%8 > 0 {
|
||||
keyBytes++
|
||||
}
|
||||
|
||||
// We serialize the outputs (r and s) into big-endian byte arrays and pad
|
||||
// them with zeros on the left to make sure the sizes work out. Both arrays
|
||||
// must be keyBytes long, and the output must be 2*keyBytes long.
|
||||
rBytes := r.Bytes()
|
||||
rBytesPadded := make([]byte, keyBytes)
|
||||
copy(rBytesPadded[keyBytes-len(rBytes):], rBytes)
|
||||
|
||||
sBytes := s.Bytes()
|
||||
sBytesPadded := make([]byte, keyBytes)
|
||||
copy(sBytesPadded[keyBytes-len(sBytes):], sBytes)
|
||||
|
||||
out := append(rBytesPadded, sBytesPadded...)
|
||||
|
||||
return Signature{
|
||||
Signature: out,
|
||||
protected: &rawHeader{},
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Verify the given payload
|
||||
func (ctx ecEncrypterVerifier) verifyPayload(payload []byte, signature []byte, alg SignatureAlgorithm) error {
|
||||
var keySize int
|
||||
var hash crypto.Hash
|
||||
|
||||
switch alg {
|
||||
case ES256:
|
||||
keySize = 32
|
||||
hash = crypto.SHA256
|
||||
case ES384:
|
||||
keySize = 48
|
||||
hash = crypto.SHA384
|
||||
case ES512:
|
||||
keySize = 66
|
||||
hash = crypto.SHA512
|
||||
default:
|
||||
return ErrUnsupportedAlgorithm
|
||||
}
|
||||
|
||||
if len(signature) != 2*keySize {
|
||||
return fmt.Errorf("square/go-jose: invalid signature size, have %d bytes, wanted %d", len(signature), 2*keySize)
|
||||
}
|
||||
|
||||
hasher := hash.New()
|
||||
|
||||
// According to documentation, Write() on hash never fails
|
||||
_, _ = hasher.Write(payload)
|
||||
hashed := hasher.Sum(nil)
|
||||
|
||||
r := big.NewInt(0).SetBytes(signature[:keySize])
|
||||
s := big.NewInt(0).SetBytes(signature[keySize:])
|
||||
|
||||
match := ecdsa.Verify(ctx.publicKey, hashed, r, s)
|
||||
if !match {
|
||||
return errors.New("square/go-jose: ecdsa signature failed to verify")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
196
vendor/gopkg.in/square/go-jose.v2/cipher/cbc_hmac.go
generated
vendored
Normal file
196
vendor/gopkg.in/square/go-jose.v2/cipher/cbc_hmac.go
generated
vendored
Normal file
@ -0,0 +1,196 @@
|
||||
/*-
|
||||
* Copyright 2014 Square Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package josecipher
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/cipher"
|
||||
"crypto/hmac"
|
||||
"crypto/sha256"
|
||||
"crypto/sha512"
|
||||
"crypto/subtle"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"hash"
|
||||
)
|
||||
|
||||
const (
|
||||
nonceBytes = 16
|
||||
)
|
||||
|
||||
// NewCBCHMAC instantiates a new AEAD based on CBC+HMAC.
|
||||
func NewCBCHMAC(key []byte, newBlockCipher func([]byte) (cipher.Block, error)) (cipher.AEAD, error) {
|
||||
keySize := len(key) / 2
|
||||
integrityKey := key[:keySize]
|
||||
encryptionKey := key[keySize:]
|
||||
|
||||
blockCipher, err := newBlockCipher(encryptionKey)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var hash func() hash.Hash
|
||||
switch keySize {
|
||||
case 16:
|
||||
hash = sha256.New
|
||||
case 24:
|
||||
hash = sha512.New384
|
||||
case 32:
|
||||
hash = sha512.New
|
||||
}
|
||||
|
||||
return &cbcAEAD{
|
||||
hash: hash,
|
||||
blockCipher: blockCipher,
|
||||
authtagBytes: keySize,
|
||||
integrityKey: integrityKey,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// An AEAD based on CBC+HMAC
|
||||
type cbcAEAD struct {
|
||||
hash func() hash.Hash
|
||||
authtagBytes int
|
||||
integrityKey []byte
|
||||
blockCipher cipher.Block
|
||||
}
|
||||
|
||||
func (ctx *cbcAEAD) NonceSize() int {
|
||||
return nonceBytes
|
||||
}
|
||||
|
||||
func (ctx *cbcAEAD) Overhead() int {
|
||||
// Maximum overhead is block size (for padding) plus auth tag length, where
|
||||
// the length of the auth tag is equivalent to the key size.
|
||||
return ctx.blockCipher.BlockSize() + ctx.authtagBytes
|
||||
}
|
||||
|
||||
// Seal encrypts and authenticates the plaintext.
|
||||
func (ctx *cbcAEAD) Seal(dst, nonce, plaintext, data []byte) []byte {
|
||||
// Output buffer -- must take care not to mangle plaintext input.
|
||||
ciphertext := make([]byte, uint64(len(plaintext))+uint64(ctx.Overhead()))[:len(plaintext)]
|
||||
copy(ciphertext, plaintext)
|
||||
ciphertext = padBuffer(ciphertext, ctx.blockCipher.BlockSize())
|
||||
|
||||
cbc := cipher.NewCBCEncrypter(ctx.blockCipher, nonce)
|
||||
|
||||
cbc.CryptBlocks(ciphertext, ciphertext)
|
||||
authtag := ctx.computeAuthTag(data, nonce, ciphertext)
|
||||
|
||||
ret, out := resize(dst, uint64(len(dst))+uint64(len(ciphertext))+uint64(len(authtag)))
|
||||
copy(out, ciphertext)
|
||||
copy(out[len(ciphertext):], authtag)
|
||||
|
||||
return ret
|
||||
}
|
||||
|
||||
// Open decrypts and authenticates the ciphertext.
|
||||
func (ctx *cbcAEAD) Open(dst, nonce, ciphertext, data []byte) ([]byte, error) {
|
||||
if len(ciphertext) < ctx.authtagBytes {
|
||||
return nil, errors.New("square/go-jose: invalid ciphertext (too short)")
|
||||
}
|
||||
|
||||
offset := len(ciphertext) - ctx.authtagBytes
|
||||
expectedTag := ctx.computeAuthTag(data, nonce, ciphertext[:offset])
|
||||
match := subtle.ConstantTimeCompare(expectedTag, ciphertext[offset:])
|
||||
if match != 1 {
|
||||
return nil, errors.New("square/go-jose: invalid ciphertext (auth tag mismatch)")
|
||||
}
|
||||
|
||||
cbc := cipher.NewCBCDecrypter(ctx.blockCipher, nonce)
|
||||
|
||||
// Make copy of ciphertext buffer, don't want to modify in place
|
||||
buffer := append([]byte{}, []byte(ciphertext[:offset])...)
|
||||
|
||||
if len(buffer)%ctx.blockCipher.BlockSize() > 0 {
|
||||
return nil, errors.New("square/go-jose: invalid ciphertext (invalid length)")
|
||||
}
|
||||
|
||||
cbc.CryptBlocks(buffer, buffer)
|
||||
|
||||
// Remove padding
|
||||
plaintext, err := unpadBuffer(buffer, ctx.blockCipher.BlockSize())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ret, out := resize(dst, uint64(len(dst))+uint64(len(plaintext)))
|
||||
copy(out, plaintext)
|
||||
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
// Compute an authentication tag
|
||||
func (ctx *cbcAEAD) computeAuthTag(aad, nonce, ciphertext []byte) []byte {
|
||||
buffer := make([]byte, uint64(len(aad))+uint64(len(nonce))+uint64(len(ciphertext))+8)
|
||||
n := 0
|
||||
n += copy(buffer, aad)
|
||||
n += copy(buffer[n:], nonce)
|
||||
n += copy(buffer[n:], ciphertext)
|
||||
binary.BigEndian.PutUint64(buffer[n:], uint64(len(aad))*8)
|
||||
|
||||
// According to documentation, Write() on hash.Hash never fails.
|
||||
hmac := hmac.New(ctx.hash, ctx.integrityKey)
|
||||
_, _ = hmac.Write(buffer)
|
||||
|
||||
return hmac.Sum(nil)[:ctx.authtagBytes]
|
||||
}
|
||||
|
||||
// resize ensures the the given slice has a capacity of at least n bytes.
|
||||
// If the capacity of the slice is less than n, a new slice is allocated
|
||||
// and the existing data will be copied.
|
||||
func resize(in []byte, n uint64) (head, tail []byte) {
|
||||
if uint64(cap(in)) >= n {
|
||||
head = in[:n]
|
||||
} else {
|
||||
head = make([]byte, n)
|
||||
copy(head, in)
|
||||
}
|
||||
|
||||
tail = head[len(in):]
|
||||
return
|
||||
}
|
||||
|
||||
// Apply padding
|
||||
func padBuffer(buffer []byte, blockSize int) []byte {
|
||||
missing := blockSize - (len(buffer) % blockSize)
|
||||
ret, out := resize(buffer, uint64(len(buffer))+uint64(missing))
|
||||
padding := bytes.Repeat([]byte{byte(missing)}, missing)
|
||||
copy(out, padding)
|
||||
return ret
|
||||
}
|
||||
|
||||
// Remove padding
|
||||
func unpadBuffer(buffer []byte, blockSize int) ([]byte, error) {
|
||||
if len(buffer)%blockSize != 0 {
|
||||
return nil, errors.New("square/go-jose: invalid padding")
|
||||
}
|
||||
|
||||
last := buffer[len(buffer)-1]
|
||||
count := int(last)
|
||||
|
||||
if count == 0 || count > blockSize || count > len(buffer) {
|
||||
return nil, errors.New("square/go-jose: invalid padding")
|
||||
}
|
||||
|
||||
padding := bytes.Repeat([]byte{last}, count)
|
||||
if !bytes.HasSuffix(buffer, padding) {
|
||||
return nil, errors.New("square/go-jose: invalid padding")
|
||||
}
|
||||
|
||||
return buffer[:len(buffer)-count], nil
|
||||
}
|
75
vendor/gopkg.in/square/go-jose.v2/cipher/concat_kdf.go
generated
vendored
Normal file
75
vendor/gopkg.in/square/go-jose.v2/cipher/concat_kdf.go
generated
vendored
Normal file
@ -0,0 +1,75 @@
|
||||
/*-
|
||||
* Copyright 2014 Square Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package josecipher
|
||||
|
||||
import (
|
||||
"crypto"
|
||||
"encoding/binary"
|
||||
"hash"
|
||||
"io"
|
||||
)
|
||||
|
||||
type concatKDF struct {
|
||||
z, info []byte
|
||||
i uint32
|
||||
cache []byte
|
||||
hasher hash.Hash
|
||||
}
|
||||
|
||||
// NewConcatKDF builds a KDF reader based on the given inputs.
|
||||
func NewConcatKDF(hash crypto.Hash, z, algID, ptyUInfo, ptyVInfo, supPubInfo, supPrivInfo []byte) io.Reader {
|
||||
buffer := make([]byte, uint64(len(algID))+uint64(len(ptyUInfo))+uint64(len(ptyVInfo))+uint64(len(supPubInfo))+uint64(len(supPrivInfo)))
|
||||
n := 0
|
||||
n += copy(buffer, algID)
|
||||
n += copy(buffer[n:], ptyUInfo)
|
||||
n += copy(buffer[n:], ptyVInfo)
|
||||
n += copy(buffer[n:], supPubInfo)
|
||||
copy(buffer[n:], supPrivInfo)
|
||||
|
||||
hasher := hash.New()
|
||||
|
||||
return &concatKDF{
|
||||
z: z,
|
||||
info: buffer,
|
||||
hasher: hasher,
|
||||
cache: []byte{},
|
||||
i: 1,
|
||||
}
|
||||
}
|
||||
|
||||
func (ctx *concatKDF) Read(out []byte) (int, error) {
|
||||
copied := copy(out, ctx.cache)
|
||||
ctx.cache = ctx.cache[copied:]
|
||||
|
||||
for copied < len(out) {
|
||||
ctx.hasher.Reset()
|
||||
|
||||
// Write on a hash.Hash never fails
|
||||
_ = binary.Write(ctx.hasher, binary.BigEndian, ctx.i)
|
||||
_, _ = ctx.hasher.Write(ctx.z)
|
||||
_, _ = ctx.hasher.Write(ctx.info)
|
||||
|
||||
hash := ctx.hasher.Sum(nil)
|
||||
chunkCopied := copy(out[copied:], hash)
|
||||
copied += chunkCopied
|
||||
ctx.cache = hash[chunkCopied:]
|
||||
|
||||
ctx.i++
|
||||
}
|
||||
|
||||
return copied, nil
|
||||
}
|
86
vendor/gopkg.in/square/go-jose.v2/cipher/ecdh_es.go
generated
vendored
Normal file
86
vendor/gopkg.in/square/go-jose.v2/cipher/ecdh_es.go
generated
vendored
Normal file
@ -0,0 +1,86 @@
|
||||
/*-
|
||||
* Copyright 2014 Square Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package josecipher
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto"
|
||||
"crypto/ecdsa"
|
||||
"crypto/elliptic"
|
||||
"encoding/binary"
|
||||
)
|
||||
|
||||
// DeriveECDHES derives a shared encryption key using ECDH/ConcatKDF as described in JWE/JWA.
|
||||
// It is an error to call this function with a private/public key that are not on the same
|
||||
// curve. Callers must ensure that the keys are valid before calling this function. Output
|
||||
// size may be at most 1<<16 bytes (64 KiB).
|
||||
func DeriveECDHES(alg string, apuData, apvData []byte, priv *ecdsa.PrivateKey, pub *ecdsa.PublicKey, size int) []byte {
|
||||
if size > 1<<16 {
|
||||
panic("ECDH-ES output size too large, must be less than or equal to 1<<16")
|
||||
}
|
||||
|
||||
// algId, partyUInfo, partyVInfo inputs must be prefixed with the length
|
||||
algID := lengthPrefixed([]byte(alg))
|
||||
ptyUInfo := lengthPrefixed(apuData)
|
||||
ptyVInfo := lengthPrefixed(apvData)
|
||||
|
||||
// suppPubInfo is the encoded length of the output size in bits
|
||||
supPubInfo := make([]byte, 4)
|
||||
binary.BigEndian.PutUint32(supPubInfo, uint32(size)*8)
|
||||
|
||||
if !priv.PublicKey.Curve.IsOnCurve(pub.X, pub.Y) {
|
||||
panic("public key not on same curve as private key")
|
||||
}
|
||||
|
||||
z, _ := priv.Curve.ScalarMult(pub.X, pub.Y, priv.D.Bytes())
|
||||
zBytes := z.Bytes()
|
||||
|
||||
// Note that calling z.Bytes() on a big.Int may strip leading zero bytes from
|
||||
// the returned byte array. This can lead to a problem where zBytes will be
|
||||
// shorter than expected which breaks the key derivation. Therefore we must pad
|
||||
// to the full length of the expected coordinate here before calling the KDF.
|
||||
octSize := dSize(priv.Curve)
|
||||
if len(zBytes) != octSize {
|
||||
zBytes = append(bytes.Repeat([]byte{0}, octSize-len(zBytes)), zBytes...)
|
||||
}
|
||||
|
||||
reader := NewConcatKDF(crypto.SHA256, zBytes, algID, ptyUInfo, ptyVInfo, supPubInfo, []byte{})
|
||||
key := make([]byte, size)
|
||||
|
||||
// Read on the KDF will never fail
|
||||
_, _ = reader.Read(key)
|
||||
|
||||
return key
|
||||
}
|
||||
|
||||
// dSize returns the size in octets for a coordinate on a elliptic curve.
|
||||
func dSize(curve elliptic.Curve) int {
|
||||
order := curve.Params().P
|
||||
bitLen := order.BitLen()
|
||||
size := bitLen / 8
|
||||
if bitLen%8 != 0 {
|
||||
size++
|
||||
}
|
||||
return size
|
||||
}
|
||||
|
||||
func lengthPrefixed(data []byte) []byte {
|
||||
out := make([]byte, len(data)+4)
|
||||
binary.BigEndian.PutUint32(out, uint32(len(data)))
|
||||
copy(out[4:], data)
|
||||
return out
|
||||
}
|
109
vendor/gopkg.in/square/go-jose.v2/cipher/key_wrap.go
generated
vendored
Normal file
109
vendor/gopkg.in/square/go-jose.v2/cipher/key_wrap.go
generated
vendored
Normal file
@ -0,0 +1,109 @@
|
||||
/*-
|
||||
* Copyright 2014 Square Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package josecipher
|
||||
|
||||
import (
|
||||
"crypto/cipher"
|
||||
"crypto/subtle"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
)
|
||||
|
||||
var defaultIV = []byte{0xA6, 0xA6, 0xA6, 0xA6, 0xA6, 0xA6, 0xA6, 0xA6}
|
||||
|
||||
// KeyWrap implements NIST key wrapping; it wraps a content encryption key (cek) with the given block cipher.
|
||||
func KeyWrap(block cipher.Block, cek []byte) ([]byte, error) {
|
||||
if len(cek)%8 != 0 {
|
||||
return nil, errors.New("square/go-jose: key wrap input must be 8 byte blocks")
|
||||
}
|
||||
|
||||
n := len(cek) / 8
|
||||
r := make([][]byte, n)
|
||||
|
||||
for i := range r {
|
||||
r[i] = make([]byte, 8)
|
||||
copy(r[i], cek[i*8:])
|
||||
}
|
||||
|
||||
buffer := make([]byte, 16)
|
||||
tBytes := make([]byte, 8)
|
||||
copy(buffer, defaultIV)
|
||||
|
||||
for t := 0; t < 6*n; t++ {
|
||||
copy(buffer[8:], r[t%n])
|
||||
|
||||
block.Encrypt(buffer, buffer)
|
||||
|
||||
binary.BigEndian.PutUint64(tBytes, uint64(t+1))
|
||||
|
||||
for i := 0; i < 8; i++ {
|
||||
buffer[i] = buffer[i] ^ tBytes[i]
|
||||
}
|
||||
copy(r[t%n], buffer[8:])
|
||||
}
|
||||
|
||||
out := make([]byte, (n+1)*8)
|
||||
copy(out, buffer[:8])
|
||||
for i := range r {
|
||||
copy(out[(i+1)*8:], r[i])
|
||||
}
|
||||
|
||||
return out, nil
|
||||
}
|
||||
|
||||
// KeyUnwrap implements NIST key unwrapping; it unwraps a content encryption key (cek) with the given block cipher.
|
||||
func KeyUnwrap(block cipher.Block, ciphertext []byte) ([]byte, error) {
|
||||
if len(ciphertext)%8 != 0 {
|
||||
return nil, errors.New("square/go-jose: key wrap input must be 8 byte blocks")
|
||||
}
|
||||
|
||||
n := (len(ciphertext) / 8) - 1
|
||||
r := make([][]byte, n)
|
||||
|
||||
for i := range r {
|
||||
r[i] = make([]byte, 8)
|
||||
copy(r[i], ciphertext[(i+1)*8:])
|
||||
}
|
||||
|
||||
buffer := make([]byte, 16)
|
||||
tBytes := make([]byte, 8)
|
||||
copy(buffer[:8], ciphertext[:8])
|
||||
|
||||
for t := 6*n - 1; t >= 0; t-- {
|
||||
binary.BigEndian.PutUint64(tBytes, uint64(t+1))
|
||||
|
||||
for i := 0; i < 8; i++ {
|
||||
buffer[i] = buffer[i] ^ tBytes[i]
|
||||
}
|
||||
copy(buffer[8:], r[t%n])
|
||||
|
||||
block.Decrypt(buffer, buffer)
|
||||
|
||||
copy(r[t%n], buffer[8:])
|
||||
}
|
||||
|
||||
if subtle.ConstantTimeCompare(buffer[:8], defaultIV) == 0 {
|
||||
return nil, errors.New("square/go-jose: failed to unwrap key")
|
||||
}
|
||||
|
||||
out := make([]byte, n*8)
|
||||
for i := range r {
|
||||
copy(out[i*8:], r[i])
|
||||
}
|
||||
|
||||
return out, nil
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user