diff --git a/go.mod b/go.mod
index c765a2e5c..0bfaa606a 100644
--- a/go.mod
+++ b/go.mod
@@ -35,6 +35,7 @@ require (
github.com/gin-gonic/gin v1.10.0
github.com/go-playground/form/v4 v4.2.1
github.com/go-swagger/go-swagger v0.31.0
+ github.com/google/go-cmp v0.6.0
github.com/google/uuid v1.6.0
github.com/gorilla/feeds v1.1.2
github.com/gorilla/websocket v1.5.2
diff --git a/internal/gtsmodel/workertask.go b/internal/gtsmodel/workertask.go
new file mode 100644
index 000000000..cc8433199
--- /dev/null
+++ b/internal/gtsmodel/workertask.go
@@ -0,0 +1,41 @@
+// GoToSocial
+// Copyright (C) GoToSocial Authors admin@gotosocial.org
+// SPDX-License-Identifier: AGPL-3.0-or-later
+//
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with this program. If not, see .
+
+package gtsmodel
+
+import "time"
+
+type WorkerType uint8
+
+const (
+ DeliveryWorker WorkerType = 1
+ FederatorWorker WorkerType = 2
+ ClientWorker WorkerType = 3
+)
+
+// WorkerTask represents a queued worker task
+// that was persisted to the database on shutdown.
+// This is only ever used on startup to pickup
+// where we left off, and on shutdown to prevent
+// queued tasks from being lost. It is simply a
+// means to store a blob of serialized task data.
+type WorkerTask struct {
+ ID uint `bun:""`
+ WorkerType uint8 `bun:""`
+ TaskData []byte `bun:""`
+ CreatedAt time.Time `bun:""`
+}
diff --git a/internal/httpclient/sign.go b/internal/httpclient/transport.go
similarity index 89%
rename from internal/httpclient/sign.go
rename to internal/httpclient/transport.go
index eff20be49..350d24fab 100644
--- a/internal/httpclient/sign.go
+++ b/internal/httpclient/transport.go
@@ -21,7 +21,6 @@
"net/http"
"time"
- "codeberg.org/gruf/go-byteutil"
"github.com/superseriousbusiness/gotosocial/internal/gtscontext"
)
@@ -45,12 +44,6 @@ func (t *signingtransport) RoundTrip(r *http.Request) (*http.Response, error) {
r.Header.Del("Signature")
r.Header.Del("Digest")
- // Rewind body reader and content-length if set.
- if rc, ok := r.Body.(*byteutil.ReadNopCloser); ok {
- rc.Rewind() // set len AFTER rewind
- r.ContentLength = int64(rc.Len())
- }
-
// Sign the outgoing request.
if err := sign(r); err != nil {
return nil, err
diff --git a/internal/messages/messages.go b/internal/messages/messages.go
index c5488d586..7779633ba 100644
--- a/internal/messages/messages.go
+++ b/internal/messages/messages.go
@@ -18,9 +18,15 @@
package messages
import (
+ "context"
+ "encoding/json"
"net/url"
+ "reflect"
"codeberg.org/gruf/go-structr"
+ "github.com/superseriousbusiness/activity/streams"
+ "github.com/superseriousbusiness/activity/streams/vocab"
+ "github.com/superseriousbusiness/gotosocial/internal/gtserror"
"github.com/superseriousbusiness/gotosocial/internal/gtsmodel"
)
@@ -50,6 +56,105 @@ type FromClientAPI struct {
Target *gtsmodel.Account
}
+// fromClientAPI is an internal type
+// for FromClientAPI that provides a
+// json serialize / deserialize -able
+// shape that minimizes required data.
+type fromClientAPI struct {
+ APObjectType string `json:"ap_object_type,omitempty"`
+ APActivityType string `json:"ap_activity_type,omitempty"`
+ GTSModel json.RawMessage `json:"gts_model,omitempty"`
+ GTSModelType string `json:"gts_model_type,omitempty"`
+ TargetURI string `json:"target_uri,omitempty"`
+ OriginID string `json:"origin_id,omitempty"`
+ TargetID string `json:"target_id,omitempty"`
+}
+
+// Serialize will serialize the worker data as data blob for storage,
+// note that this will flatten some of the data e.g. only account IDs.
+func (msg *FromClientAPI) Serialize() ([]byte, error) {
+ var (
+ modelType string
+ originID string
+ targetID string
+ )
+
+ // Set database model type if any provided.
+ if t := reflect.TypeOf(msg.GTSModel); t != nil {
+ modelType = t.String()
+ }
+
+ // Set origin account ID.
+ if msg.Origin != nil {
+ originID = msg.Origin.ID
+ }
+
+ // Set target account ID.
+ if msg.Target != nil {
+ targetID = msg.Target.ID
+ }
+
+ // Marshal GTS model as raw JSON block.
+ modelJSON, err := json.Marshal(msg.GTSModel)
+ if err != nil {
+ return nil, err
+ }
+
+ // Marshal as internal JSON type.
+ return json.Marshal(fromClientAPI{
+ APObjectType: msg.APObjectType,
+ APActivityType: msg.APActivityType,
+ GTSModel: modelJSON,
+ GTSModelType: modelType,
+ TargetURI: msg.TargetURI,
+ OriginID: originID,
+ TargetID: targetID,
+ })
+}
+
+// Deserialize will attempt to deserialize a blob of task data,
+// which will involve unflattening previously serialized data and
+// leave some message structures as placeholders to holding IDs.
+func (msg *FromClientAPI) Deserialize(data []byte) error {
+ var imsg fromClientAPI
+
+ // Unmarshal as internal JSON type.
+ err := json.Unmarshal(data, &imsg)
+ if err != nil {
+ return err
+ }
+
+ // Copy over the simplest fields.
+ msg.APObjectType = imsg.APObjectType
+ msg.APActivityType = imsg.APActivityType
+ msg.TargetURI = imsg.TargetURI
+
+ // Resolve Go type from JSON data.
+ msg.GTSModel, err = resolveGTSModel(
+ imsg.GTSModelType,
+ imsg.GTSModel,
+ )
+ if err != nil {
+ return err
+ }
+
+ if imsg.OriginID != "" {
+ // Set origin account ID using a
+ // barebones model (later filled in).
+ msg.Origin = new(gtsmodel.Account)
+ msg.Origin.ID = imsg.OriginID
+ }
+
+ if imsg.TargetID != "" {
+ // Set target account ID using a
+ // barebones model (later filled in).
+ msg.Target = new(gtsmodel.Account)
+ msg.Target.ID = imsg.TargetID
+ }
+
+ return nil
+}
+
// ClientMsgIndices defines queue indices this
// message type should be accessible / stored under.
func ClientMsgIndices() []structr.IndexConfig {
@@ -91,6 +196,133 @@ type FromFediAPI struct {
Receiving *gtsmodel.Account
}
+// fromFediAPI is an internal type
+// for FromFediAPI that provides a
+// json serialize / deserialize -able
+// shape that minimizes required data.
+type fromFediAPI struct {
+ APObjectType string `json:"ap_object_type,omitempty"`
+ APActivityType string `json:"ap_activity_type,omitempty"`
+ APIRI string `json:"ap_iri,omitempty"`
+ APObject map[string]interface{} `json:"ap_object,omitempty"`
+ GTSModel json.RawMessage `json:"gts_model,omitempty"`
+ GTSModelType string `json:"gts_model_type,omitempty"`
+ TargetURI string `json:"target_uri,omitempty"`
+ RequestingID string `json:"requesting_id,omitempty"`
+ ReceivingID string `json:"receiving_id,omitempty"`
+}
+
+// Serialize will serialize the worker data as data blob for storage,
+// note that this will flatten some of the data e.g. only account IDs.
+func (msg *FromFediAPI) Serialize() ([]byte, error) {
+ var (
+ gtsModelType string
+ apIRI string
+ apObject map[string]interface{}
+ requestingID string
+ receivingID string
+ )
+
+ // Set AP IRI string.
+ if msg.APIRI != nil {
+ apIRI = msg.APIRI.String()
+ }
+
+ // Set serialized AP object data if set.
+ if t, ok := msg.APObject.(vocab.Type); ok {
+ obj, err := t.Serialize()
+ if err != nil {
+ return nil, err
+ }
+ apObject = obj
+ }
+
+ // Set database model type if any provided.
+ if t := reflect.TypeOf(msg.GTSModel); t != nil {
+ gtsModelType = t.String()
+ }
+
+ // Set requesting account ID.
+ if msg.Requesting != nil {
+ requestingID = msg.Requesting.ID
+ }
+
+ // Set receiving account ID.
+ if msg.Receiving != nil {
+ receivingID = msg.Receiving.ID
+ }
+
+ // Marshal GTS model as raw JSON block.
+ modelJSON, err := json.Marshal(msg.GTSModel)
+ if err != nil {
+ return nil, err
+ }
+
+ // Marshal as internal JSON type.
+ return json.Marshal(fromFediAPI{
+ APObjectType: msg.APObjectType,
+ APActivityType: msg.APActivityType,
+ APIRI: apIRI,
+ APObject: apObject,
+ GTSModel: modelJSON,
+ GTSModelType: gtsModelType,
+ TargetURI: msg.TargetURI,
+ RequestingID: requestingID,
+ ReceivingID: receivingID,
+ })
+}
+
+// Deserialize will attempt to deserialize a blob of task data,
+// which will involve unflattening previously serialized data and
+// leave some message structures as placeholders to holding IDs.
+func (msg *FromFediAPI) Deserialize(data []byte) error {
+ var imsg fromFediAPI
+
+ // Unmarshal as internal JSON type.
+ err := json.Unmarshal(data, &imsg)
+ if err != nil {
+ return err
+ }
+
+ // Copy over the simplest fields.
+ msg.APObjectType = imsg.APObjectType
+ msg.APActivityType = imsg.APActivityType
+ msg.TargetURI = imsg.TargetURI
+
+ // Resolve AP object from JSON data.
+ msg.APObject, err = resolveAPObject(
+ imsg.APObject,
+ )
+ if err != nil {
+ return err
+ }
+
+ // Resolve Go type from JSON data.
+ msg.GTSModel, err = resolveGTSModel(
+ imsg.GTSModelType,
+ imsg.GTSModel,
+ )
+ if err != nil {
+ return err
+ }
+
+ if imsg.RequestingID != "" {
+ // Set requesting account ID using a
+ // barebones model (later filled in).
+ msg.Requesting = new(gtsmodel.Account)
+ msg.Requesting.ID = imsg.RequestingID
+ }
+
+ if imsg.ReceivingID != "" {
+ // Set target account ID using a
+ // barebones model (later filled in).
+ msg.Receiving = new(gtsmodel.Account)
+ msg.Receiving.ID = imsg.ReceivingID
+ }
+
+ return nil
+}
+
// FederatorMsgIndices defines queue indices this
// message type should be accessible / stored under.
func FederatorMsgIndices() []structr.IndexConfig {
@@ -101,3 +333,61 @@ func FederatorMsgIndices() []structr.IndexConfig {
{Fields: "Receiving.ID", Multiple: true},
}
}
+
+// resolveAPObject resolves an ActivityPub object from its "serialized" JSON map
+// (yes the terminology here is weird, but that's how go-fed/activity is written).
+func resolveAPObject(data map[string]interface{}) (interface{}, error) {
+ if len(data) == 0 {
+ // No data given.
+ return nil, nil
+ }
+
+ // Resolve vocab.Type from "raw" input data map.
+ return streams.ToType(context.Background(), data)
+}
+
+// resolveGTSModel is unfortunately where things get messy... our data is stored as JSON
+// in the database, which serializes struct types as key-value pairs surrounded by curly
+// braces. Deserializing from that gives us back a data blob of key-value pairs, which
+// we then need to wrangle back into the original type. So we also store the type name
+// and use this to determine the appropriate Go structure type to unmarshal into to.
+func resolveGTSModel(typ string, data []byte) (interface{}, error) {
+ if typ == "" && data == nil {
+ // No data given.
+ return nil, nil
+ }
+
+ var value interface{}
+
+ switch typ {
+ case reflect.TypeOf((*gtsmodel.Account)(nil)).String():
+ value = new(gtsmodel.Account)
+ case reflect.TypeOf((*gtsmodel.Block)(nil)).String():
+ value = new(gtsmodel.Block)
+ case reflect.TypeOf((*gtsmodel.Follow)(nil)).String():
+ value = new(gtsmodel.Follow)
+ case reflect.TypeOf((*gtsmodel.FollowRequest)(nil)).String():
+ value = new(gtsmodel.FollowRequest)
+ case reflect.TypeOf((*gtsmodel.Move)(nil)).String():
+ value = new(gtsmodel.Move)
+ case reflect.TypeOf((*gtsmodel.Poll)(nil)).String():
+ value = new(gtsmodel.Poll)
+ case reflect.TypeOf((*gtsmodel.PollVote)(nil)).String():
+ value = new(*gtsmodel.PollVote)
+ case reflect.TypeOf((*gtsmodel.Report)(nil)).String():
+ value = new(gtsmodel.Report)
+ case reflect.TypeOf((*gtsmodel.Status)(nil)).String():
+ value = new(gtsmodel.Status)
+ case reflect.TypeOf((*gtsmodel.StatusFave)(nil)).String():
+ value = new(gtsmodel.StatusFave)
+ default:
+ return nil, gtserror.Newf("unknown type: %s", typ)
+ }
+
+ // Attempt to unmarshal value JSON into destination.
+ if err := json.Unmarshal(data, &value); err != nil {
+ return nil, gtserror.Newf("error unmarshaling %s value data: %w", typ, err)
+ }
+
+ return value, nil
+}
diff --git a/internal/messages/messages_test.go b/internal/messages/messages_test.go
new file mode 100644
index 000000000..e5b2a2841
--- /dev/null
+++ b/internal/messages/messages_test.go
@@ -0,0 +1,292 @@
+// GoToSocial
+// Copyright (C) GoToSocial Authors admin@gotosocial.org
+// SPDX-License-Identifier: AGPL-3.0-or-later
+//
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with this program. If not, see .
+
+package messages_test
+
+import (
+ "bytes"
+ "encoding/json"
+ "net/url"
+ "testing"
+
+ "github.com/superseriousbusiness/gotosocial/internal/ap"
+ "github.com/superseriousbusiness/gotosocial/internal/gtsmodel"
+ "github.com/superseriousbusiness/gotosocial/internal/messages"
+ "github.com/superseriousbusiness/gotosocial/testrig"
+
+ "github.com/google/go-cmp/cmp"
+)
+
+var testStatus = testrig.NewTestStatuses()["admin_account_status_1"]
+
+var testAccount = testrig.NewTestAccounts()["admin_account"]
+
+var fromClientAPICases = []struct {
+ msg messages.FromClientAPI
+ data []byte
+}{
+ {
+ msg: messages.FromClientAPI{
+ APObjectType: ap.ObjectNote,
+ APActivityType: ap.ActivityCreate,
+ GTSModel: testStatus,
+ TargetURI: "https://gotosocial.org",
+ Origin: >smodel.Account{ID: "654321"},
+ Target: >smodel.Account{ID: "123456"},
+ },
+ data: toJSON(map[string]any{
+ "ap_object_type": ap.ObjectNote,
+ "ap_activity_type": ap.ActivityCreate,
+ "gts_model": json.RawMessage(toJSON(testStatus)),
+ "gts_model_type": "*gtsmodel.Status",
+ "target_uri": "https://gotosocial.org",
+ "origin_id": "654321",
+ "target_id": "123456",
+ }),
+ },
+ {
+ msg: messages.FromClientAPI{
+ APObjectType: ap.ObjectProfile,
+ APActivityType: ap.ActivityUpdate,
+ GTSModel: testAccount,
+ TargetURI: "https://uk-queen-is-dead.org",
+ Origin: >smodel.Account{ID: "123456"},
+ Target: >smodel.Account{ID: "654321"},
+ },
+ data: toJSON(map[string]any{
+ "ap_object_type": ap.ObjectProfile,
+ "ap_activity_type": ap.ActivityUpdate,
+ "gts_model": json.RawMessage(toJSON(testAccount)),
+ "gts_model_type": "*gtsmodel.Account",
+ "target_uri": "https://uk-queen-is-dead.org",
+ "origin_id": "123456",
+ "target_id": "654321",
+ }),
+ },
+}
+
+var fromFediAPICases = []struct {
+ msg messages.FromFediAPI
+ data []byte
+}{
+ {
+ msg: messages.FromFediAPI{
+ APObjectType: ap.ObjectNote,
+ APActivityType: ap.ActivityCreate,
+ GTSModel: testStatus,
+ TargetURI: "https://gotosocial.org",
+ Requesting: >smodel.Account{ID: "654321"},
+ Receiving: >smodel.Account{ID: "123456"},
+ },
+ data: toJSON(map[string]any{
+ "ap_object_type": ap.ObjectNote,
+ "ap_activity_type": ap.ActivityCreate,
+ "gts_model": json.RawMessage(toJSON(testStatus)),
+ "gts_model_type": "*gtsmodel.Status",
+ "target_uri": "https://gotosocial.org",
+ "requesting_id": "654321",
+ "receiving_id": "123456",
+ }),
+ },
+ {
+ msg: messages.FromFediAPI{
+ APObjectType: ap.ObjectProfile,
+ APActivityType: ap.ActivityUpdate,
+ GTSModel: testAccount,
+ TargetURI: "https://uk-queen-is-dead.org",
+ Requesting: >smodel.Account{ID: "123456"},
+ Receiving: >smodel.Account{ID: "654321"},
+ },
+ data: toJSON(map[string]any{
+ "ap_object_type": ap.ObjectProfile,
+ "ap_activity_type": ap.ActivityUpdate,
+ "gts_model": json.RawMessage(toJSON(testAccount)),
+ "gts_model_type": "*gtsmodel.Account",
+ "target_uri": "https://uk-queen-is-dead.org",
+ "requesting_id": "123456",
+ "receiving_id": "654321",
+ }),
+ },
+}
+
+func TestSerializeFromClientAPI(t *testing.T) {
+ for _, test := range fromClientAPICases {
+ // Serialize test message to blob.
+ data, err := test.msg.Serialize()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Check serialized JSON data as expected.
+ assertJSONEqual(t, test.data, data)
+ }
+}
+
+func TestDeserializeFromClientAPI(t *testing.T) {
+ for _, test := range fromClientAPICases {
+ var msg messages.FromClientAPI
+
+ // Deserialize test message blob.
+ err := msg.Deserialize(test.data)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Check that msg is as expected.
+ assertEqual(t, test.msg.APActivityType, msg.APActivityType)
+ assertEqual(t, test.msg.APObjectType, msg.APObjectType)
+ assertEqual(t, test.msg.GTSModel, msg.GTSModel)
+ assertEqual(t, test.msg.TargetURI, msg.TargetURI)
+ assertEqual(t, accountID(test.msg.Origin), accountID(msg.Origin))
+ assertEqual(t, accountID(test.msg.Target), accountID(msg.Target))
+
+ // Perform final check to ensure
+ // account model keys deserialized.
+ assertEqualRSA(t, test.msg.GTSModel, msg.GTSModel)
+ }
+}
+
+func TestSerializeFromFediAPI(t *testing.T) {
+ for _, test := range fromFediAPICases {
+ // Serialize test message to blob.
+ data, err := test.msg.Serialize()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Check serialized JSON data as expected.
+ assertJSONEqual(t, test.data, data)
+ }
+}
+
+func TestDeserializeFromFediAPI(t *testing.T) {
+ for _, test := range fromFediAPICases {
+ var msg messages.FromFediAPI
+
+ // Deserialize test message blob.
+ err := msg.Deserialize(test.data)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Check that msg is as expected.
+ assertEqual(t, test.msg.APActivityType, msg.APActivityType)
+ assertEqual(t, test.msg.APObjectType, msg.APObjectType)
+ assertEqual(t, urlStr(test.msg.APIRI), urlStr(msg.APIRI))
+ assertEqual(t, test.msg.APObject, msg.APObject)
+ assertEqual(t, test.msg.GTSModel, msg.GTSModel)
+ assertEqual(t, test.msg.TargetURI, msg.TargetURI)
+ assertEqual(t, accountID(test.msg.Receiving), accountID(msg.Receiving))
+ assertEqual(t, accountID(test.msg.Requesting), accountID(msg.Requesting))
+
+ // Perform final check to ensure
+ // account model keys deserialized.
+ assertEqualRSA(t, test.msg.GTSModel, msg.GTSModel)
+ }
+}
+
+// assertEqualRSA asserts that test account model RSA keys are equal.
+func assertEqualRSA(t *testing.T, expect, receive any) bool {
+ t.Helper()
+
+ account1, ok1 := expect.(*gtsmodel.Account)
+
+ account2, ok2 := receive.(*gtsmodel.Account)
+
+ if ok1 != ok2 {
+ t.Errorf("different model types: expect=%T receive=%T", expect, receive)
+ return false
+ } else if !ok1 {
+ return true
+ }
+
+ if !account1.PublicKey.Equal(account2.PublicKey) {
+ t.Error("public keys do not match")
+ return false
+ }
+
+ t.Logf("publickey=%v", account1.PublicKey)
+
+ if !account1.PrivateKey.Equal(account2.PrivateKey) {
+ t.Error("private keys do not match")
+ return false
+ }
+
+ t.Logf("privatekey=%v", account1.PrivateKey)
+
+ return true
+}
+
+// assertEqual asserts that two values (of any type!) are equal,
+// note we use the 'cmp' library here as it's much more useful in
+// outputting debug information than testify, and handles more complex
+// types like rsa public / private key comparisons correctly.
+func assertEqual(t *testing.T, expect, receive any) bool {
+ t.Helper()
+ if diff := cmp.Diff(expect, receive); diff != "" {
+ t.Error(diff)
+ return false
+ }
+ return true
+}
+
+// assertJSONEqual asserts that two slices of JSON data are equal.
+func assertJSONEqual(t *testing.T, expect, receive []byte) bool {
+ t.Helper()
+ return assertEqual(t, fromJSON(expect), fromJSON(receive))
+}
+
+// urlStr returns url as string, or empty.
+func urlStr(url *url.URL) string {
+ if url == nil {
+ return ""
+ }
+ return url.String()
+}
+
+// accountID returns account's ID, or empty.
+func accountID(account *gtsmodel.Account) string {
+ if account == nil {
+ return ""
+ }
+ return account.ID
+}
+
+// fromJSON unmarshals input data as JSON.
+func fromJSON(b []byte) any {
+ r := bytes.NewReader(b)
+ d := json.NewDecoder(r)
+ d.UseNumber()
+ var a any
+ err := d.Decode(&a)
+ if err != nil {
+ panic(err)
+ }
+ if d.More() {
+ panic("multiple json values in b")
+ }
+ return a
+}
+
+// toJSON marshals input type as JSON data.
+func toJSON(a any) []byte {
+ b, err := json.Marshal(a)
+ if err != nil {
+ panic(err)
+ }
+ return b
+}
diff --git a/internal/transport/deliver.go b/internal/transport/deliver.go
index a7e73465d..30435b86f 100644
--- a/internal/transport/deliver.go
+++ b/internal/transport/deliver.go
@@ -18,12 +18,12 @@
package transport
import (
+ "bytes"
"context"
"encoding/json"
"net/http"
"net/url"
- "codeberg.org/gruf/go-byteutil"
apiutil "github.com/superseriousbusiness/gotosocial/internal/api/util"
"github.com/superseriousbusiness/gotosocial/internal/config"
"github.com/superseriousbusiness/gotosocial/internal/gtscontext"
@@ -130,25 +130,28 @@ func (t *transport) prepare(
*delivery.Delivery,
error,
) {
- url := to.String()
-
- // Use rewindable reader for body.
- var body byteutil.ReadNopCloser
- body.Reset(data)
-
// Prepare POST signer.
sign := t.signPOST(data)
+ // Use *bytes.Reader for request body,
+ // as NewRequest() automatically will
+ // set .GetBody and content-length.
+ // (this handles necessary rewinding).
+ body := bytes.NewReader(data)
+
// Update to-be-used request context with signing details.
ctx = gtscontext.SetOutgoingPublicKeyID(ctx, t.pubKeyID)
ctx = gtscontext.SetHTTPClientSignFunc(ctx, sign)
// Prepare a new request with data body directed at URL.
- r, err := http.NewRequestWithContext(ctx, "POST", url, &body)
+ r, err := http.NewRequestWithContext(ctx, "POST", to.String(), body)
if err != nil {
return nil, gtserror.Newf("error preparing request: %w", err)
}
+ // Set our predefined controller user-agent.
+ r.Header.Set("User-Agent", t.controller.userAgent)
+
// Set the standard ActivityPub content-type + charset headers.
r.Header.Add("Content-Type", string(apiutil.AppActivityLDJSON))
r.Header.Add("Accept-Charset", "utf-8")
diff --git a/internal/transport/delivery/delivery.go b/internal/transport/delivery/delivery.go
index 1e9126b2e..1e3ebb054 100644
--- a/internal/transport/delivery/delivery.go
+++ b/internal/transport/delivery/delivery.go
@@ -18,16 +18,13 @@
package delivery
import (
- "context"
- "slices"
+ "bytes"
+ "encoding/json"
+ "io"
+ "net/http"
"time"
- "codeberg.org/gruf/go-runners"
- "codeberg.org/gruf/go-structr"
- "github.com/superseriousbusiness/gotosocial/internal/gtscontext"
"github.com/superseriousbusiness/gotosocial/internal/httpclient"
- "github.com/superseriousbusiness/gotosocial/internal/queue"
- "github.com/superseriousbusiness/gotosocial/internal/util"
)
// Delivery wraps an httpclient.Request{}
@@ -36,6 +33,9 @@
// be indexed (and so, dropped from queue)
// by any of these possible ID IRIs.
type Delivery struct {
+ // PubKeyID is the signing public key
+ // ID of the actor performing request.
+ PubKeyID string
// ActorID contains the ActivityPub
// actor ID IRI (if any) of the activity
@@ -61,273 +61,98 @@ type Delivery struct {
next time.Time
}
+// delivery is an internal type
+// for Delivery{} that provides
+// a json serialize / deserialize
+// able shape that minimizes data.
+type delivery struct {
+ PubKeyID string `json:"pub_key_id,omitempty"`
+ ActorID string `json:"actor_id,omitempty"`
+ ObjectID string `json:"object_id,omitempty"`
+ TargetID string `json:"target_id,omitempty"`
+ Method string `json:"method,omitempty"`
+ Header map[string][]string `json:"header,omitempty"`
+ URL string `json:"url,omitempty"`
+ Body []byte `json:"body,omitempty"`
+}
+
+// Serialize will serialize the delivery data as data blob for storage,
+// note that this will flatten some of the data, dropping signing funcs.
+func (dlv *Delivery) Serialize() ([]byte, error) {
+ var body []byte
+
+ if dlv.Request.GetBody != nil {
+ // Fetch a fresh copy of request body.
+ rbody, err := dlv.Request.GetBody()
+ if err != nil {
+ return nil, err
+ }
+
+ // Read request body into memory.
+ body, err = io.ReadAll(rbody)
+
+ // Done with body.
+ _ = rbody.Close()
+
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ // Marshal as internal JSON type.
+ return json.Marshal(delivery{
+ PubKeyID: dlv.PubKeyID,
+ ActorID: dlv.ActorID,
+ ObjectID: dlv.ObjectID,
+ TargetID: dlv.TargetID,
+ Method: dlv.Request.Method,
+ Header: dlv.Request.Header,
+ URL: dlv.Request.URL.String(),
+ Body: body,
+ })
+}
+
+// Deserialize will attempt to deserialize a blob of task data,
+// which will involve unflattening previously serialized data and
+// leave delivery incomplete, still requiring signing func setup.
+func (dlv *Delivery) Deserialize(data []byte) error {
+ var idlv delivery
+
+ // Unmarshal as internal JSON type.
+ err := json.Unmarshal(data, &idlv)
+ if err != nil {
+ return err
+ }
+
+ // Copy over simplest fields.
+ dlv.PubKeyID = idlv.PubKeyID
+ dlv.ActorID = idlv.ActorID
+ dlv.ObjectID = idlv.ObjectID
+ dlv.TargetID = idlv.TargetID
+
+ var body io.Reader
+
+ if idlv.Body != nil {
+ // Create new body reader from data.
+ body = bytes.NewReader(idlv.Body)
+ }
+
+ // Create a new request object from unmarshaled details.
+ r, err := http.NewRequest(idlv.Method, idlv.URL, body)
+ if err != nil {
+ return err
+ }
+
+ // Wrap request in httpclient type.
+ dlv.Request = httpclient.WrapRequest(r)
+
+ return nil
+}
+
+// backoff returns a valid (>= 0) backoff duration.
func (dlv *Delivery) backoff() time.Duration {
if dlv.next.IsZero() {
return 0
}
return time.Until(dlv.next)
}
-
-// WorkerPool wraps multiple Worker{}s in
-// a singular struct for easy multi start/stop.
-type WorkerPool struct {
-
- // Client defines httpclient.Client{}
- // passed to each of delivery pool Worker{}s.
- Client *httpclient.Client
-
- // Queue is the embedded queue.StructQueue{}
- // passed to each of delivery pool Worker{}s.
- Queue queue.StructQueue[*Delivery]
-
- // internal fields.
- workers []*Worker
-}
-
-// Init will initialize the Worker{} pool
-// with given http client, request queue to pull
-// from and number of delivery workers to spawn.
-func (p *WorkerPool) Init(client *httpclient.Client) {
- p.Client = client
- p.Queue.Init(structr.QueueConfig[*Delivery]{
- Indices: []structr.IndexConfig{
- {Fields: "ActorID", Multiple: true},
- {Fields: "ObjectID", Multiple: true},
- {Fields: "TargetID", Multiple: true},
- },
- })
-}
-
-// Start will attempt to start 'n' Worker{}s.
-func (p *WorkerPool) Start(n int) {
- // Check whether workers are
- // set (is already running).
- ok := (len(p.workers) > 0)
- if ok {
- return
- }
-
- // Allocate new workers slice.
- p.workers = make([]*Worker, n)
- for i := range p.workers {
-
- // Allocate new Worker{}.
- p.workers[i] = new(Worker)
- p.workers[i].Client = p.Client
- p.workers[i].Queue = &p.Queue
-
- // Attempt to start worker.
- // Return bool not useful
- // here, as true = started,
- // false = already running.
- _ = p.workers[i].Start()
- }
-}
-
-// Stop will attempt to stop contained Worker{}s.
-func (p *WorkerPool) Stop() {
- // Check whether workers are
- // set (is currently running).
- ok := (len(p.workers) == 0)
- if ok {
- return
- }
-
- // Stop all running workers.
- for i := range p.workers {
-
- // return bool not useful
- // here, as true = stopped,
- // false = never running.
- _ = p.workers[i].Stop()
- }
-
- // Unset workers slice.
- p.workers = p.workers[:0]
-}
-
-// Worker wraps an httpclient.Client{} to feed
-// from queue.StructQueue{} for ActivityPub reqs
-// to deliver. It does so while prioritizing new
-// queued requests over backlogged retries.
-type Worker struct {
-
- // Client is the httpclient.Client{} that
- // delivery worker will use for requests.
- Client *httpclient.Client
-
- // Queue is the Delivery{} message queue
- // that delivery worker will feed from.
- Queue *queue.StructQueue[*Delivery]
-
- // internal fields.
- backlog []*Delivery
- service runners.Service
-}
-
-// Start will attempt to start the Worker{}.
-func (w *Worker) Start() bool {
- return w.service.GoRun(w.run)
-}
-
-// Stop will attempt to stop the Worker{}.
-func (w *Worker) Stop() bool {
- return w.service.Stop()
-}
-
-// run wraps process to restart on any panic.
-func (w *Worker) run(ctx context.Context) {
- if w.Client == nil || w.Queue == nil {
- panic("not yet initialized")
- }
- util.Must(func() { w.process(ctx) })
-}
-
-// process is the main delivery worker processing routine.
-func (w *Worker) process(ctx context.Context) bool {
- if w.Client == nil || w.Queue == nil {
- // we perform this check here just
- // to ensure the compiler knows these
- // variables aren't nil in the loop,
- // even if already checked by caller.
- panic("not yet initialized")
- }
-
-loop:
- for {
- // Get next delivery.
- dlv, ok := w.next(ctx)
- if !ok {
- return true
- }
-
- // Check whether backoff required.
- const min = 100 * time.Millisecond
- if d := dlv.backoff(); d > min {
-
- // Start backoff sleep timer.
- backoff := time.NewTimer(d)
-
- select {
- case <-ctx.Done():
- // Main ctx
- // cancelled.
- backoff.Stop()
- return true
-
- case <-w.Queue.Wait():
- // A new message was
- // queued, re-add this
- // to backlog + retry.
- w.pushBacklog(dlv)
- backoff.Stop()
- continue loop
-
- case <-backoff.C:
- // success!
- }
- }
-
- // Attempt delivery of AP request.
- rsp, retry, err := w.Client.DoOnce(
- &dlv.Request,
- )
-
- if err == nil {
- // Ensure body closed.
- _ = rsp.Body.Close()
- continue loop
- }
-
- if !retry {
- // Drop deliveries when no
- // retry requested, or they
- // reached max (either).
- continue loop
- }
-
- // Determine next delivery attempt.
- backoff := dlv.Request.BackOff()
- dlv.next = time.Now().Add(backoff)
-
- // Push to backlog.
- w.pushBacklog(dlv)
- }
-}
-
-// next gets the next available delivery, blocking until available if necessary.
-func (w *Worker) next(ctx context.Context) (*Delivery, bool) {
-loop:
- for {
- // Try pop next queued.
- dlv, ok := w.Queue.Pop()
-
- if !ok {
- // Check the backlog.
- if len(w.backlog) > 0 {
-
- // Sort by 'next' time.
- sortDeliveries(w.backlog)
-
- // Pop next delivery.
- dlv := w.popBacklog()
-
- return dlv, true
- }
-
- select {
- // Backlog is empty, we MUST
- // block until next enqueued.
- case <-w.Queue.Wait():
- continue loop
-
- // Worker was stopped.
- case <-ctx.Done():
- return nil, false
- }
- }
-
- // Replace request context for worker state canceling.
- ctx := gtscontext.WithValues(ctx, dlv.Request.Context())
- dlv.Request.Request = dlv.Request.Request.WithContext(ctx)
-
- return dlv, true
- }
-}
-
-// popBacklog pops next available from the backlog.
-func (w *Worker) popBacklog() *Delivery {
- if len(w.backlog) == 0 {
- return nil
- }
-
- // Pop from backlog.
- dlv := w.backlog[0]
-
- // Shift backlog down by one.
- copy(w.backlog, w.backlog[1:])
- w.backlog = w.backlog[:len(w.backlog)-1]
-
- return dlv
-}
-
-// pushBacklog pushes the given delivery to backlog.
-func (w *Worker) pushBacklog(dlv *Delivery) {
- w.backlog = append(w.backlog, dlv)
-}
-
-// sortDeliveries sorts deliveries according
-// to when is the first requiring re-attempt.
-func sortDeliveries(d []*Delivery) {
- slices.SortFunc(d, func(a, b *Delivery) int {
- const k = +1
- switch {
- case a.next.Before(b.next):
- return +k
- case b.next.Before(a.next):
- return -k
- default:
- return 0
- }
- })
-}
diff --git a/internal/transport/delivery/delivery_test.go b/internal/transport/delivery/delivery_test.go
index 48831f098..e9eaf8fd1 100644
--- a/internal/transport/delivery/delivery_test.go
+++ b/internal/transport/delivery/delivery_test.go
@@ -1,203 +1,134 @@
+// GoToSocial
+// Copyright (C) GoToSocial Authors admin@gotosocial.org
+// SPDX-License-Identifier: AGPL-3.0-or-later
+//
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with this program. If not, see .
+
package delivery_test
import (
- "fmt"
+ "bytes"
+ "encoding/json"
"io"
- "math/rand"
- "net"
"net/http"
- "strconv"
- "strings"
"testing"
- "codeberg.org/gruf/go-byteutil"
- "github.com/superseriousbusiness/gotosocial/internal/config"
+ "github.com/stretchr/testify/assert"
"github.com/superseriousbusiness/gotosocial/internal/httpclient"
- "github.com/superseriousbusiness/gotosocial/internal/queue"
"github.com/superseriousbusiness/gotosocial/internal/transport/delivery"
)
-func TestDeliveryWorkerPool(t *testing.T) {
- for _, i := range []int{1, 2, 4, 8, 16, 32} {
- t.Run("size="+strconv.Itoa(i), func(t *testing.T) {
- testDeliveryWorkerPool(t, i, generateInput(100*i))
- })
- }
-}
-
-func testDeliveryWorkerPool(t *testing.T, sz int, input []*testrequest) {
- wp := new(delivery.WorkerPool)
- wp.Init(httpclient.New(httpclient.Config{
- AllowRanges: config.MustParseIPPrefixes([]string{
- "127.0.0.0/8",
+var deliveryCases = []struct {
+ msg delivery.Delivery
+ data []byte
+}{
+ {
+ msg: delivery.Delivery{
+ PubKeyID: "https://google.com/users/bigboy#pubkey",
+ ActorID: "https://google.com/users/bigboy",
+ ObjectID: "https://google.com/users/bigboy/follow/1",
+ TargetID: "https://askjeeves.com/users/smallboy",
+ Request: toRequest("POST", "https://askjeeves.com/users/smallboy/inbox", []byte("data!")),
+ },
+ data: toJSON(map[string]any{
+ "pub_key_id": "https://google.com/users/bigboy#pubkey",
+ "actor_id": "https://google.com/users/bigboy",
+ "object_id": "https://google.com/users/bigboy/follow/1",
+ "target_id": "https://askjeeves.com/users/smallboy",
+ "method": "POST",
+ "url": "https://askjeeves.com/users/smallboy/inbox",
+ "body": []byte("data!"),
+ // "header": map[string][]string{},
}),
- }))
- wp.Start(sz)
- defer wp.Stop()
- test(t, &wp.Queue, input)
+ },
+ {
+ msg: delivery.Delivery{
+ Request: toRequest("GET", "https://google.com", []byte("uwu im just a wittle seawch engwin")),
+ },
+ data: toJSON(map[string]any{
+ "method": "GET",
+ "url": "https://google.com",
+ "body": []byte("uwu im just a wittle seawch engwin"),
+ // "header": map[string][]string{},
+ }),
+ },
}
-func test(
- t *testing.T,
- queue *queue.StructQueue[*delivery.Delivery],
- input []*testrequest,
-) {
- expect := make(chan *testrequest)
- errors := make(chan error)
-
- // Prepare an HTTP test handler that ensures expected delivery is received.
- handler := http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) {
- errors <- (<-expect).Equal(r)
- })
-
- // Start new HTTP test server listener.
- l, err := net.Listen("tcp", "127.0.0.1:0")
- if err != nil {
- t.Fatal(err)
- }
- defer l.Close()
-
- // Start the HTTP server.
- //
- // specifically not using httptest.Server{} here as httptest
- // links that server with its own http.Client{}, whereas we're
- // using an httpclient.Client{} (well, delivery routine is).
- srv := new(http.Server)
- srv.Addr = "http://" + l.Addr().String()
- srv.Handler = handler
- go srv.Serve(l)
- defer srv.Close()
-
- // Range over test input.
- for _, test := range input {
-
- // Generate req for input.
- req := test.Generate(srv.Addr)
- r := httpclient.WrapRequest(req)
-
- // Wrap the request in delivery.
- dlv := new(delivery.Delivery)
- dlv.Request = r
-
- // Enqueue delivery!
- queue.Push(dlv)
- expect <- test
-
- // Wait for errors from handler.
- if err := <-errors; err != nil {
- t.Error(err)
+func TestSerializeDelivery(t *testing.T) {
+ for _, test := range deliveryCases {
+ // Serialize test message to blob.
+ data, err := test.msg.Serialize()
+ if err != nil {
+ t.Fatal(err)
}
+
+ // Check that serialized JSON data is as expected.
+ assert.JSONEq(t, string(test.data), string(data))
}
}
-type testrequest struct {
- method string
- uri string
- body []byte
-}
+func TestDeserializeDelivery(t *testing.T) {
+ for _, test := range deliveryCases {
+ var msg delivery.Delivery
-// generateInput generates 'n' many testrequest cases.
-func generateInput(n int) []*testrequest {
- tests := make([]*testrequest, n)
- for i := range tests {
- tests[i] = new(testrequest)
- tests[i].method = randomMethod()
- tests[i].uri = randomURI()
- tests[i].body = randomBody(tests[i].method)
- }
- return tests
-}
+ // Deserialize test message blob.
+ err := msg.Deserialize(test.data)
+ if err != nil {
+ t.Fatal(err)
+ }
-var methods = []string{
- http.MethodConnect,
- http.MethodDelete,
- http.MethodGet,
- http.MethodHead,
- http.MethodOptions,
- http.MethodPatch,
- http.MethodPost,
- http.MethodPut,
- http.MethodTrace,
-}
-
-// randomMethod generates a random http method.
-func randomMethod() string {
- return methods[rand.Intn(len(methods))]
-}
-
-// randomURI generates a random http uri.
-func randomURI() string {
- n := rand.Intn(5)
- p := make([]string, n)
- for i := range p {
- p[i] = strconv.Itoa(rand.Int())
- }
- return "/" + strings.Join(p, "/")
-}
-
-// randomBody generates a random http body DEPENDING on method.
-func randomBody(method string) []byte {
- if requiresBody(method) {
- return []byte(method + " " + randomURI())
- }
- return nil
-}
-
-// requiresBody returns whether method requires body.
-func requiresBody(method string) bool {
- switch method {
- case http.MethodPatch,
- http.MethodPost,
- http.MethodPut:
- return true
- default:
- return false
+ // Check that delivery fields are as expected.
+ assert.Equal(t, test.msg.PubKeyID, msg.PubKeyID)
+ assert.Equal(t, test.msg.ActorID, msg.ActorID)
+ assert.Equal(t, test.msg.ObjectID, msg.ObjectID)
+ assert.Equal(t, test.msg.TargetID, msg.TargetID)
+ assert.Equal(t, test.msg.Request.Method, msg.Request.Method)
+ assert.Equal(t, test.msg.Request.URL, msg.Request.URL)
+ assert.Equal(t, readBody(test.msg.Request.Body), readBody(msg.Request.Body))
}
}
-// Generate will generate a real http.Request{} from test data.
-func (t *testrequest) Generate(addr string) *http.Request {
- var body io.ReadCloser
- if t.body != nil {
- var b byteutil.ReadNopCloser
- b.Reset(t.body)
- body = &b
+// toRequest creates httpclient.Request from HTTP method, URL and body data.
+func toRequest(method string, url string, body []byte) httpclient.Request {
+ var rbody io.Reader
+ if body != nil {
+ rbody = bytes.NewReader(body)
}
- req, err := http.NewRequest(t.method, addr+t.uri, body)
+ req, err := http.NewRequest(method, url, rbody)
if err != nil {
panic(err)
}
- return req
+ return httpclient.WrapRequest(req)
}
-// Equal checks if request matches receiving test request.
-func (t *testrequest) Equal(r *http.Request) error {
- // Ensure methods match.
- if t.method != r.Method {
- return fmt.Errorf("differing request methods: t=%q r=%q", t.method, r.Method)
+// readBody reads the content of body io.ReadCloser into memory as byte slice.
+func readBody(r io.ReadCloser) []byte {
+ if r == nil {
+ return nil
}
-
- // Ensure request URIs match.
- if t.uri != r.URL.RequestURI() {
- return fmt.Errorf("differing request urls: t=%q r=%q", t.uri, r.URL.RequestURI())
+ b, err := io.ReadAll(r)
+ if err != nil {
+ panic(err)
}
-
- // Ensure body cases match.
- if requiresBody(t.method) {
-
- // Read request into memory.
- b, err := io.ReadAll(r.Body)
- if err != nil {
- return fmt.Errorf("error reading request body: %v", err)
- }
-
- // Compare the request bodies.
- st := strings.TrimSpace(string(t.body))
- sr := strings.TrimSpace(string(b))
- if st != sr {
- return fmt.Errorf("differing request bodies: t=%q r=%q", st, sr)
- }
- }
-
- return nil
+ return b
+}
+
+// toJSON marshals input type as JSON data.
+func toJSON(a any) []byte {
+ b, err := json.Marshal(a)
+ if err != nil {
+ panic(err)
+ }
+ return b
}
diff --git a/internal/transport/delivery/worker.go b/internal/transport/delivery/worker.go
new file mode 100644
index 000000000..1ed974e84
--- /dev/null
+++ b/internal/transport/delivery/worker.go
@@ -0,0 +1,298 @@
+// GoToSocial
+// Copyright (C) GoToSocial Authors admin@gotosocial.org
+// SPDX-License-Identifier: AGPL-3.0-or-later
+//
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with this program. If not, see .
+
+package delivery
+
+import (
+ "context"
+ "slices"
+ "time"
+
+ "codeberg.org/gruf/go-runners"
+ "codeberg.org/gruf/go-structr"
+ "github.com/superseriousbusiness/gotosocial/internal/gtscontext"
+ "github.com/superseriousbusiness/gotosocial/internal/httpclient"
+ "github.com/superseriousbusiness/gotosocial/internal/log"
+ "github.com/superseriousbusiness/gotosocial/internal/queue"
+ "github.com/superseriousbusiness/gotosocial/internal/util"
+)
+
+// WorkerPool wraps multiple Worker{}s in
+// a singular struct for easy multi start/stop.
+type WorkerPool struct {
+
+ // Client defines httpclient.Client{}
+ // passed to each of delivery pool Worker{}s.
+ Client *httpclient.Client
+
+ // Queue is the embedded queue.StructQueue{}
+ // passed to each of delivery pool Worker{}s.
+ Queue queue.StructQueue[*Delivery]
+
+ // internal fields.
+ workers []*Worker
+}
+
+// Init will initialize the Worker{} pool
+// with given http client, request queue to pull
+// from and number of delivery workers to spawn.
+func (p *WorkerPool) Init(client *httpclient.Client) {
+ p.Client = client
+ p.Queue.Init(structr.QueueConfig[*Delivery]{
+ Indices: []structr.IndexConfig{
+ {Fields: "ActorID", Multiple: true},
+ {Fields: "ObjectID", Multiple: true},
+ {Fields: "TargetID", Multiple: true},
+ },
+ })
+}
+
+// Start will attempt to start 'n' Worker{}s.
+func (p *WorkerPool) Start(n int) {
+ // Check whether workers are
+ // set (is already running).
+ ok := (len(p.workers) > 0)
+ if ok {
+ return
+ }
+
+ // Allocate new workers slice.
+ p.workers = make([]*Worker, n)
+ for i := range p.workers {
+
+ // Allocate new Worker{}.
+ p.workers[i] = new(Worker)
+ p.workers[i].Client = p.Client
+ p.workers[i].Queue = &p.Queue
+
+ // Attempt to start worker.
+ // Return bool not useful
+ // here, as true = started,
+ // false = already running.
+ _ = p.workers[i].Start()
+ }
+}
+
+// Stop will attempt to stop contained Worker{}s.
+func (p *WorkerPool) Stop() {
+ // Check whether workers are
+ // set (is currently running).
+ ok := (len(p.workers) == 0)
+ if ok {
+ return
+ }
+
+ // Stop all running workers.
+ for i := range p.workers {
+
+ // return bool not useful
+ // here, as true = stopped,
+ // false = never running.
+ _ = p.workers[i].Stop()
+ }
+
+ // Unset workers slice.
+ p.workers = p.workers[:0]
+}
+
+// Worker wraps an httpclient.Client{} to feed
+// from queue.StructQueue{} for ActivityPub reqs
+// to deliver. It does so while prioritizing new
+// queued requests over backlogged retries.
+type Worker struct {
+
+ // Client is the httpclient.Client{} that
+ // delivery worker will use for requests.
+ Client *httpclient.Client
+
+ // Queue is the Delivery{} message queue
+ // that delivery worker will feed from.
+ Queue *queue.StructQueue[*Delivery]
+
+ // internal fields.
+ backlog []*Delivery
+ service runners.Service
+}
+
+// Start will attempt to start the Worker{}.
+func (w *Worker) Start() bool {
+ return w.service.GoRun(w.run)
+}
+
+// Stop will attempt to stop the Worker{}.
+func (w *Worker) Stop() bool {
+ return w.service.Stop()
+}
+
+// run wraps process to restart on any panic.
+func (w *Worker) run(ctx context.Context) {
+ if w.Client == nil || w.Queue == nil {
+ panic("not yet initialized")
+ }
+ log.Infof(ctx, "%p: starting worker", w)
+ defer log.Infof(ctx, "%p: stopped worker", w)
+ util.Must(func() { w.process(ctx) })
+}
+
+// process is the main delivery worker processing routine.
+func (w *Worker) process(ctx context.Context) bool {
+ if w.Client == nil || w.Queue == nil {
+ // we perform this check here just
+ // to ensure the compiler knows these
+ // variables aren't nil in the loop,
+ // even if already checked by caller.
+ panic("not yet initialized")
+ }
+
+loop:
+ for {
+ // Get next delivery.
+ dlv, ok := w.next(ctx)
+ if !ok {
+ return true
+ }
+
+ // Check whether backoff required.
+ const min = 100 * time.Millisecond
+ if d := dlv.backoff(); d > min {
+
+ // Start backoff sleep timer.
+ backoff := time.NewTimer(d)
+
+ select {
+ case <-ctx.Done():
+ // Main ctx
+ // cancelled.
+ backoff.Stop()
+ return true
+
+ case <-w.Queue.Wait():
+ // A new message was
+ // queued, re-add this
+ // to backlog + retry.
+ w.pushBacklog(dlv)
+ backoff.Stop()
+ continue loop
+
+ case <-backoff.C:
+ // success!
+ }
+ }
+
+ // Attempt delivery of AP request.
+ rsp, retry, err := w.Client.DoOnce(
+ &dlv.Request,
+ )
+
+ if err == nil {
+ // Ensure body closed.
+ _ = rsp.Body.Close()
+ continue loop
+ }
+
+ if !retry {
+ // Drop deliveries when no
+ // retry requested, or they
+ // reached max (either).
+ continue loop
+ }
+
+ // Determine next delivery attempt.
+ backoff := dlv.Request.BackOff()
+ dlv.next = time.Now().Add(backoff)
+
+ // Push to backlog.
+ w.pushBacklog(dlv)
+ }
+}
+
+// next gets the next available delivery, blocking until available if necessary.
+func (w *Worker) next(ctx context.Context) (*Delivery, bool) {
+loop:
+ for {
+ // Try pop next queued.
+ dlv, ok := w.Queue.Pop()
+
+ if !ok {
+ // Check the backlog.
+ if len(w.backlog) > 0 {
+
+ // Sort by 'next' time.
+ sortDeliveries(w.backlog)
+
+ // Pop next delivery.
+ dlv := w.popBacklog()
+
+ return dlv, true
+ }
+
+ select {
+ // Backlog is empty, we MUST
+ // block until next enqueued.
+ case <-w.Queue.Wait():
+ continue loop
+
+ // Worker was stopped.
+ case <-ctx.Done():
+ return nil, false
+ }
+ }
+
+ // Replace request context for worker state canceling.
+ ctx := gtscontext.WithValues(ctx, dlv.Request.Context())
+ dlv.Request.Request = dlv.Request.Request.WithContext(ctx)
+
+ return dlv, true
+ }
+}
+
+// popBacklog pops next available from the backlog.
+func (w *Worker) popBacklog() *Delivery {
+ if len(w.backlog) == 0 {
+ return nil
+ }
+
+ // Pop from backlog.
+ dlv := w.backlog[0]
+
+ // Shift backlog down by one.
+ copy(w.backlog, w.backlog[1:])
+ w.backlog = w.backlog[:len(w.backlog)-1]
+
+ return dlv
+}
+
+// pushBacklog pushes the given delivery to backlog.
+func (w *Worker) pushBacklog(dlv *Delivery) {
+ w.backlog = append(w.backlog, dlv)
+}
+
+// sortDeliveries sorts deliveries according
+// to when is the first requiring re-attempt.
+func sortDeliveries(d []*Delivery) {
+ slices.SortFunc(d, func(a, b *Delivery) int {
+ const k = +1
+ switch {
+ case a.next.Before(b.next):
+ return +k
+ case b.next.Before(a.next):
+ return -k
+ default:
+ return 0
+ }
+ })
+}
diff --git a/internal/transport/delivery/worker_test.go b/internal/transport/delivery/worker_test.go
new file mode 100644
index 000000000..936ce6e1d
--- /dev/null
+++ b/internal/transport/delivery/worker_test.go
@@ -0,0 +1,220 @@
+// GoToSocial
+// Copyright (C) GoToSocial Authors admin@gotosocial.org
+// SPDX-License-Identifier: AGPL-3.0-or-later
+//
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with this program. If not, see .
+
+package delivery_test
+
+import (
+ "fmt"
+ "io"
+ "math/rand"
+ "net"
+ "net/http"
+ "strconv"
+ "strings"
+ "testing"
+
+ "codeberg.org/gruf/go-byteutil"
+ "github.com/superseriousbusiness/gotosocial/internal/config"
+ "github.com/superseriousbusiness/gotosocial/internal/httpclient"
+ "github.com/superseriousbusiness/gotosocial/internal/queue"
+ "github.com/superseriousbusiness/gotosocial/internal/transport/delivery"
+)
+
+func TestDeliveryWorkerPool(t *testing.T) {
+ for _, i := range []int{1, 2, 4, 8, 16, 32} {
+ t.Run("size="+strconv.Itoa(i), func(t *testing.T) {
+ testDeliveryWorkerPool(t, i, generateInput(100*i))
+ })
+ }
+}
+
+func testDeliveryWorkerPool(t *testing.T, sz int, input []*testrequest) {
+ wp := new(delivery.WorkerPool)
+ wp.Init(httpclient.New(httpclient.Config{
+ AllowRanges: config.MustParseIPPrefixes([]string{
+ "127.0.0.0/8",
+ }),
+ }))
+ wp.Start(sz)
+ defer wp.Stop()
+ test(t, &wp.Queue, input)
+}
+
+func test(
+ t *testing.T,
+ queue *queue.StructQueue[*delivery.Delivery],
+ input []*testrequest,
+) {
+ expect := make(chan *testrequest)
+ errors := make(chan error)
+
+ // Prepare an HTTP test handler that ensures expected delivery is received.
+ handler := http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) {
+ errors <- (<-expect).Equal(r)
+ })
+
+ // Start new HTTP test server listener.
+ l, err := net.Listen("tcp", "127.0.0.1:0")
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer l.Close()
+
+ // Start the HTTP server.
+ //
+ // specifically not using httptest.Server{} here as httptest
+ // links that server with its own http.Client{}, whereas we're
+ // using an httpclient.Client{} (well, delivery routine is).
+ srv := new(http.Server)
+ srv.Addr = "http://" + l.Addr().String()
+ srv.Handler = handler
+ go srv.Serve(l)
+ defer srv.Close()
+
+ // Range over test input.
+ for _, test := range input {
+
+ // Generate req for input.
+ req := test.Generate(srv.Addr)
+ r := httpclient.WrapRequest(req)
+
+ // Wrap the request in delivery.
+ dlv := new(delivery.Delivery)
+ dlv.Request = r
+
+ // Enqueue delivery!
+ queue.Push(dlv)
+ expect <- test
+
+ // Wait for errors from handler.
+ if err := <-errors; err != nil {
+ t.Error(err)
+ }
+ }
+}
+
+type testrequest struct {
+ method string
+ uri string
+ body []byte
+}
+
+// generateInput generates 'n' many testrequest cases.
+func generateInput(n int) []*testrequest {
+ tests := make([]*testrequest, n)
+ for i := range tests {
+ tests[i] = new(testrequest)
+ tests[i].method = randomMethod()
+ tests[i].uri = randomURI()
+ tests[i].body = randomBody(tests[i].method)
+ }
+ return tests
+}
+
+var methods = []string{
+ http.MethodConnect,
+ http.MethodDelete,
+ http.MethodGet,
+ http.MethodHead,
+ http.MethodOptions,
+ http.MethodPatch,
+ http.MethodPost,
+ http.MethodPut,
+ http.MethodTrace,
+}
+
+// randomMethod generates a random http method.
+func randomMethod() string {
+ return methods[rand.Intn(len(methods))]
+}
+
+// randomURI generates a random http uri.
+func randomURI() string {
+ n := rand.Intn(5)
+ p := make([]string, n)
+ for i := range p {
+ p[i] = strconv.Itoa(rand.Int())
+ }
+ return "/" + strings.Join(p, "/")
+}
+
+// randomBody generates a random http body DEPENDING on method.
+func randomBody(method string) []byte {
+ if requiresBody(method) {
+ return []byte(method + " " + randomURI())
+ }
+ return nil
+}
+
+// requiresBody returns whether method requires body.
+func requiresBody(method string) bool {
+ switch method {
+ case http.MethodPatch,
+ http.MethodPost,
+ http.MethodPut:
+ return true
+ default:
+ return false
+ }
+}
+
+// Generate will generate a real http.Request{} from test data.
+func (t *testrequest) Generate(addr string) *http.Request {
+ var body io.ReadCloser
+ if t.body != nil {
+ var b byteutil.ReadNopCloser
+ b.Reset(t.body)
+ body = &b
+ }
+ req, err := http.NewRequest(t.method, addr+t.uri, body)
+ if err != nil {
+ panic(err)
+ }
+ return req
+}
+
+// Equal checks if request matches receiving test request.
+func (t *testrequest) Equal(r *http.Request) error {
+ // Ensure methods match.
+ if t.method != r.Method {
+ return fmt.Errorf("differing request methods: t=%q r=%q", t.method, r.Method)
+ }
+
+ // Ensure request URIs match.
+ if t.uri != r.URL.RequestURI() {
+ return fmt.Errorf("differing request urls: t=%q r=%q", t.uri, r.URL.RequestURI())
+ }
+
+ // Ensure body cases match.
+ if requiresBody(t.method) {
+
+ // Read request into memory.
+ b, err := io.ReadAll(r.Body)
+ if err != nil {
+ return fmt.Errorf("error reading request body: %v", err)
+ }
+
+ // Compare the request bodies.
+ st := strings.TrimSpace(string(t.body))
+ sr := strings.TrimSpace(string(b))
+ if st != sr {
+ return fmt.Errorf("differing request bodies: t=%q r=%q", st, sr)
+ }
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/google/go-cmp/LICENSE b/vendor/github.com/google/go-cmp/LICENSE
new file mode 100644
index 000000000..32017f8fa
--- /dev/null
+++ b/vendor/github.com/google/go-cmp/LICENSE
@@ -0,0 +1,27 @@
+Copyright (c) 2017 The Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/google/go-cmp/cmp/compare.go b/vendor/github.com/google/go-cmp/cmp/compare.go
new file mode 100644
index 000000000..0f5b8a48c
--- /dev/null
+++ b/vendor/github.com/google/go-cmp/cmp/compare.go
@@ -0,0 +1,671 @@
+// Copyright 2017, The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package cmp determines equality of values.
+//
+// This package is intended to be a more powerful and safer alternative to
+// [reflect.DeepEqual] for comparing whether two values are semantically equal.
+// It is intended to only be used in tests, as performance is not a goal and
+// it may panic if it cannot compare the values. Its propensity towards
+// panicking means that its unsuitable for production environments where a
+// spurious panic may be fatal.
+//
+// The primary features of cmp are:
+//
+// - When the default behavior of equality does not suit the test's needs,
+// custom equality functions can override the equality operation.
+// For example, an equality function may report floats as equal so long as
+// they are within some tolerance of each other.
+//
+// - Types with an Equal method (e.g., [time.Time.Equal]) may use that method
+// to determine equality. This allows package authors to determine
+// the equality operation for the types that they define.
+//
+// - If no custom equality functions are used and no Equal method is defined,
+// equality is determined by recursively comparing the primitive kinds on
+// both values, much like [reflect.DeepEqual]. Unlike [reflect.DeepEqual],
+// unexported fields are not compared by default; they result in panics
+// unless suppressed by using an [Ignore] option
+// (see [github.com/google/go-cmp/cmp/cmpopts.IgnoreUnexported])
+// or explicitly compared using the [Exporter] option.
+package cmp
+
+import (
+ "fmt"
+ "reflect"
+ "strings"
+
+ "github.com/google/go-cmp/cmp/internal/diff"
+ "github.com/google/go-cmp/cmp/internal/function"
+ "github.com/google/go-cmp/cmp/internal/value"
+)
+
+// TODO(≥go1.18): Use any instead of interface{}.
+
+// Equal reports whether x and y are equal by recursively applying the
+// following rules in the given order to x and y and all of their sub-values:
+//
+// - Let S be the set of all [Ignore], [Transformer], and [Comparer] options that
+// remain after applying all path filters, value filters, and type filters.
+// If at least one [Ignore] exists in S, then the comparison is ignored.
+// If the number of [Transformer] and [Comparer] options in S is non-zero,
+// then Equal panics because it is ambiguous which option to use.
+// If S contains a single [Transformer], then use that to transform
+// the current values and recursively call Equal on the output values.
+// If S contains a single [Comparer], then use that to compare the current values.
+// Otherwise, evaluation proceeds to the next rule.
+//
+// - If the values have an Equal method of the form "(T) Equal(T) bool" or
+// "(T) Equal(I) bool" where T is assignable to I, then use the result of
+// x.Equal(y) even if x or y is nil. Otherwise, no such method exists and
+// evaluation proceeds to the next rule.
+//
+// - Lastly, try to compare x and y based on their basic kinds.
+// Simple kinds like booleans, integers, floats, complex numbers, strings,
+// and channels are compared using the equivalent of the == operator in Go.
+// Functions are only equal if they are both nil, otherwise they are unequal.
+//
+// Structs are equal if recursively calling Equal on all fields report equal.
+// If a struct contains unexported fields, Equal panics unless an [Ignore] option
+// (e.g., [github.com/google/go-cmp/cmp/cmpopts.IgnoreUnexported]) ignores that field
+// or the [Exporter] option explicitly permits comparing the unexported field.
+//
+// Slices are equal if they are both nil or both non-nil, where recursively
+// calling Equal on all non-ignored slice or array elements report equal.
+// Empty non-nil slices and nil slices are not equal; to equate empty slices,
+// consider using [github.com/google/go-cmp/cmp/cmpopts.EquateEmpty].
+//
+// Maps are equal if they are both nil or both non-nil, where recursively
+// calling Equal on all non-ignored map entries report equal.
+// Map keys are equal according to the == operator.
+// To use custom comparisons for map keys, consider using
+// [github.com/google/go-cmp/cmp/cmpopts.SortMaps].
+// Empty non-nil maps and nil maps are not equal; to equate empty maps,
+// consider using [github.com/google/go-cmp/cmp/cmpopts.EquateEmpty].
+//
+// Pointers and interfaces are equal if they are both nil or both non-nil,
+// where they have the same underlying concrete type and recursively
+// calling Equal on the underlying values reports equal.
+//
+// Before recursing into a pointer, slice element, or map, the current path
+// is checked to detect whether the address has already been visited.
+// If there is a cycle, then the pointed at values are considered equal
+// only if both addresses were previously visited in the same path step.
+func Equal(x, y interface{}, opts ...Option) bool {
+ s := newState(opts)
+ s.compareAny(rootStep(x, y))
+ return s.result.Equal()
+}
+
+// Diff returns a human-readable report of the differences between two values:
+// y - x. It returns an empty string if and only if Equal returns true for the
+// same input values and options.
+//
+// The output is displayed as a literal in pseudo-Go syntax.
+// At the start of each line, a "-" prefix indicates an element removed from x,
+// a "+" prefix to indicates an element added from y, and the lack of a prefix
+// indicates an element common to both x and y. If possible, the output
+// uses fmt.Stringer.String or error.Error methods to produce more humanly
+// readable outputs. In such cases, the string is prefixed with either an
+// 's' or 'e' character, respectively, to indicate that the method was called.
+//
+// Do not depend on this output being stable. If you need the ability to
+// programmatically interpret the difference, consider using a custom Reporter.
+func Diff(x, y interface{}, opts ...Option) string {
+ s := newState(opts)
+
+ // Optimization: If there are no other reporters, we can optimize for the
+ // common case where the result is equal (and thus no reported difference).
+ // This avoids the expensive construction of a difference tree.
+ if len(s.reporters) == 0 {
+ s.compareAny(rootStep(x, y))
+ if s.result.Equal() {
+ return ""
+ }
+ s.result = diff.Result{} // Reset results
+ }
+
+ r := new(defaultReporter)
+ s.reporters = append(s.reporters, reporter{r})
+ s.compareAny(rootStep(x, y))
+ d := r.String()
+ if (d == "") != s.result.Equal() {
+ panic("inconsistent difference and equality results")
+ }
+ return d
+}
+
+// rootStep constructs the first path step. If x and y have differing types,
+// then they are stored within an empty interface type.
+func rootStep(x, y interface{}) PathStep {
+ vx := reflect.ValueOf(x)
+ vy := reflect.ValueOf(y)
+
+ // If the inputs are different types, auto-wrap them in an empty interface
+ // so that they have the same parent type.
+ var t reflect.Type
+ if !vx.IsValid() || !vy.IsValid() || vx.Type() != vy.Type() {
+ t = anyType
+ if vx.IsValid() {
+ vvx := reflect.New(t).Elem()
+ vvx.Set(vx)
+ vx = vvx
+ }
+ if vy.IsValid() {
+ vvy := reflect.New(t).Elem()
+ vvy.Set(vy)
+ vy = vvy
+ }
+ } else {
+ t = vx.Type()
+ }
+
+ return &pathStep{t, vx, vy}
+}
+
+type state struct {
+ // These fields represent the "comparison state".
+ // Calling statelessCompare must not result in observable changes to these.
+ result diff.Result // The current result of comparison
+ curPath Path // The current path in the value tree
+ curPtrs pointerPath // The current set of visited pointers
+ reporters []reporter // Optional reporters
+
+ // recChecker checks for infinite cycles applying the same set of
+ // transformers upon the output of itself.
+ recChecker recChecker
+
+ // dynChecker triggers pseudo-random checks for option correctness.
+ // It is safe for statelessCompare to mutate this value.
+ dynChecker dynChecker
+
+ // These fields, once set by processOption, will not change.
+ exporters []exporter // List of exporters for structs with unexported fields
+ opts Options // List of all fundamental and filter options
+}
+
+func newState(opts []Option) *state {
+ // Always ensure a validator option exists to validate the inputs.
+ s := &state{opts: Options{validator{}}}
+ s.curPtrs.Init()
+ s.processOption(Options(opts))
+ return s
+}
+
+func (s *state) processOption(opt Option) {
+ switch opt := opt.(type) {
+ case nil:
+ case Options:
+ for _, o := range opt {
+ s.processOption(o)
+ }
+ case coreOption:
+ type filtered interface {
+ isFiltered() bool
+ }
+ if fopt, ok := opt.(filtered); ok && !fopt.isFiltered() {
+ panic(fmt.Sprintf("cannot use an unfiltered option: %v", opt))
+ }
+ s.opts = append(s.opts, opt)
+ case exporter:
+ s.exporters = append(s.exporters, opt)
+ case reporter:
+ s.reporters = append(s.reporters, opt)
+ default:
+ panic(fmt.Sprintf("unknown option %T", opt))
+ }
+}
+
+// statelessCompare compares two values and returns the result.
+// This function is stateless in that it does not alter the current result,
+// or output to any registered reporters.
+func (s *state) statelessCompare(step PathStep) diff.Result {
+ // We do not save and restore curPath and curPtrs because all of the
+ // compareX methods should properly push and pop from them.
+ // It is an implementation bug if the contents of the paths differ from
+ // when calling this function to when returning from it.
+
+ oldResult, oldReporters := s.result, s.reporters
+ s.result = diff.Result{} // Reset result
+ s.reporters = nil // Remove reporters to avoid spurious printouts
+ s.compareAny(step)
+ res := s.result
+ s.result, s.reporters = oldResult, oldReporters
+ return res
+}
+
+func (s *state) compareAny(step PathStep) {
+ // Update the path stack.
+ s.curPath.push(step)
+ defer s.curPath.pop()
+ for _, r := range s.reporters {
+ r.PushStep(step)
+ defer r.PopStep()
+ }
+ s.recChecker.Check(s.curPath)
+
+ // Cycle-detection for slice elements (see NOTE in compareSlice).
+ t := step.Type()
+ vx, vy := step.Values()
+ if si, ok := step.(SliceIndex); ok && si.isSlice && vx.IsValid() && vy.IsValid() {
+ px, py := vx.Addr(), vy.Addr()
+ if eq, visited := s.curPtrs.Push(px, py); visited {
+ s.report(eq, reportByCycle)
+ return
+ }
+ defer s.curPtrs.Pop(px, py)
+ }
+
+ // Rule 1: Check whether an option applies on this node in the value tree.
+ if s.tryOptions(t, vx, vy) {
+ return
+ }
+
+ // Rule 2: Check whether the type has a valid Equal method.
+ if s.tryMethod(t, vx, vy) {
+ return
+ }
+
+ // Rule 3: Compare based on the underlying kind.
+ switch t.Kind() {
+ case reflect.Bool:
+ s.report(vx.Bool() == vy.Bool(), 0)
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ s.report(vx.Int() == vy.Int(), 0)
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ s.report(vx.Uint() == vy.Uint(), 0)
+ case reflect.Float32, reflect.Float64:
+ s.report(vx.Float() == vy.Float(), 0)
+ case reflect.Complex64, reflect.Complex128:
+ s.report(vx.Complex() == vy.Complex(), 0)
+ case reflect.String:
+ s.report(vx.String() == vy.String(), 0)
+ case reflect.Chan, reflect.UnsafePointer:
+ s.report(vx.Pointer() == vy.Pointer(), 0)
+ case reflect.Func:
+ s.report(vx.IsNil() && vy.IsNil(), 0)
+ case reflect.Struct:
+ s.compareStruct(t, vx, vy)
+ case reflect.Slice, reflect.Array:
+ s.compareSlice(t, vx, vy)
+ case reflect.Map:
+ s.compareMap(t, vx, vy)
+ case reflect.Ptr:
+ s.comparePtr(t, vx, vy)
+ case reflect.Interface:
+ s.compareInterface(t, vx, vy)
+ default:
+ panic(fmt.Sprintf("%v kind not handled", t.Kind()))
+ }
+}
+
+func (s *state) tryOptions(t reflect.Type, vx, vy reflect.Value) bool {
+ // Evaluate all filters and apply the remaining options.
+ if opt := s.opts.filter(s, t, vx, vy); opt != nil {
+ opt.apply(s, vx, vy)
+ return true
+ }
+ return false
+}
+
+func (s *state) tryMethod(t reflect.Type, vx, vy reflect.Value) bool {
+ // Check if this type even has an Equal method.
+ m, ok := t.MethodByName("Equal")
+ if !ok || !function.IsType(m.Type, function.EqualAssignable) {
+ return false
+ }
+
+ eq := s.callTTBFunc(m.Func, vx, vy)
+ s.report(eq, reportByMethod)
+ return true
+}
+
+func (s *state) callTRFunc(f, v reflect.Value, step Transform) reflect.Value {
+ if !s.dynChecker.Next() {
+ return f.Call([]reflect.Value{v})[0]
+ }
+
+ // Run the function twice and ensure that we get the same results back.
+ // We run in goroutines so that the race detector (if enabled) can detect
+ // unsafe mutations to the input.
+ c := make(chan reflect.Value)
+ go detectRaces(c, f, v)
+ got := <-c
+ want := f.Call([]reflect.Value{v})[0]
+ if step.vx, step.vy = got, want; !s.statelessCompare(step).Equal() {
+ // To avoid false-positives with non-reflexive equality operations,
+ // we sanity check whether a value is equal to itself.
+ if step.vx, step.vy = want, want; !s.statelessCompare(step).Equal() {
+ return want
+ }
+ panic(fmt.Sprintf("non-deterministic function detected: %s", function.NameOf(f)))
+ }
+ return want
+}
+
+func (s *state) callTTBFunc(f, x, y reflect.Value) bool {
+ if !s.dynChecker.Next() {
+ return f.Call([]reflect.Value{x, y})[0].Bool()
+ }
+
+ // Swapping the input arguments is sufficient to check that
+ // f is symmetric and deterministic.
+ // We run in goroutines so that the race detector (if enabled) can detect
+ // unsafe mutations to the input.
+ c := make(chan reflect.Value)
+ go detectRaces(c, f, y, x)
+ got := <-c
+ want := f.Call([]reflect.Value{x, y})[0].Bool()
+ if !got.IsValid() || got.Bool() != want {
+ panic(fmt.Sprintf("non-deterministic or non-symmetric function detected: %s", function.NameOf(f)))
+ }
+ return want
+}
+
+func detectRaces(c chan<- reflect.Value, f reflect.Value, vs ...reflect.Value) {
+ var ret reflect.Value
+ defer func() {
+ recover() // Ignore panics, let the other call to f panic instead
+ c <- ret
+ }()
+ ret = f.Call(vs)[0]
+}
+
+func (s *state) compareStruct(t reflect.Type, vx, vy reflect.Value) {
+ var addr bool
+ var vax, vay reflect.Value // Addressable versions of vx and vy
+
+ var mayForce, mayForceInit bool
+ step := StructField{&structField{}}
+ for i := 0; i < t.NumField(); i++ {
+ step.typ = t.Field(i).Type
+ step.vx = vx.Field(i)
+ step.vy = vy.Field(i)
+ step.name = t.Field(i).Name
+ step.idx = i
+ step.unexported = !isExported(step.name)
+ if step.unexported {
+ if step.name == "_" {
+ continue
+ }
+ // Defer checking of unexported fields until later to give an
+ // Ignore a chance to ignore the field.
+ if !vax.IsValid() || !vay.IsValid() {
+ // For retrieveUnexportedField to work, the parent struct must
+ // be addressable. Create a new copy of the values if
+ // necessary to make them addressable.
+ addr = vx.CanAddr() || vy.CanAddr()
+ vax = makeAddressable(vx)
+ vay = makeAddressable(vy)
+ }
+ if !mayForceInit {
+ for _, xf := range s.exporters {
+ mayForce = mayForce || xf(t)
+ }
+ mayForceInit = true
+ }
+ step.mayForce = mayForce
+ step.paddr = addr
+ step.pvx = vax
+ step.pvy = vay
+ step.field = t.Field(i)
+ }
+ s.compareAny(step)
+ }
+}
+
+func (s *state) compareSlice(t reflect.Type, vx, vy reflect.Value) {
+ isSlice := t.Kind() == reflect.Slice
+ if isSlice && (vx.IsNil() || vy.IsNil()) {
+ s.report(vx.IsNil() && vy.IsNil(), 0)
+ return
+ }
+
+ // NOTE: It is incorrect to call curPtrs.Push on the slice header pointer
+ // since slices represents a list of pointers, rather than a single pointer.
+ // The pointer checking logic must be handled on a per-element basis
+ // in compareAny.
+ //
+ // A slice header (see reflect.SliceHeader) in Go is a tuple of a starting
+ // pointer P, a length N, and a capacity C. Supposing each slice element has
+ // a memory size of M, then the slice is equivalent to the list of pointers:
+ // [P+i*M for i in range(N)]
+ //
+ // For example, v[:0] and v[:1] are slices with the same starting pointer,
+ // but they are clearly different values. Using the slice pointer alone
+ // violates the assumption that equal pointers implies equal values.
+
+ step := SliceIndex{&sliceIndex{pathStep: pathStep{typ: t.Elem()}, isSlice: isSlice}}
+ withIndexes := func(ix, iy int) SliceIndex {
+ if ix >= 0 {
+ step.vx, step.xkey = vx.Index(ix), ix
+ } else {
+ step.vx, step.xkey = reflect.Value{}, -1
+ }
+ if iy >= 0 {
+ step.vy, step.ykey = vy.Index(iy), iy
+ } else {
+ step.vy, step.ykey = reflect.Value{}, -1
+ }
+ return step
+ }
+
+ // Ignore options are able to ignore missing elements in a slice.
+ // However, detecting these reliably requires an optimal differencing
+ // algorithm, for which diff.Difference is not.
+ //
+ // Instead, we first iterate through both slices to detect which elements
+ // would be ignored if standing alone. The index of non-discarded elements
+ // are stored in a separate slice, which diffing is then performed on.
+ var indexesX, indexesY []int
+ var ignoredX, ignoredY []bool
+ for ix := 0; ix < vx.Len(); ix++ {
+ ignored := s.statelessCompare(withIndexes(ix, -1)).NumDiff == 0
+ if !ignored {
+ indexesX = append(indexesX, ix)
+ }
+ ignoredX = append(ignoredX, ignored)
+ }
+ for iy := 0; iy < vy.Len(); iy++ {
+ ignored := s.statelessCompare(withIndexes(-1, iy)).NumDiff == 0
+ if !ignored {
+ indexesY = append(indexesY, iy)
+ }
+ ignoredY = append(ignoredY, ignored)
+ }
+
+ // Compute an edit-script for slices vx and vy (excluding ignored elements).
+ edits := diff.Difference(len(indexesX), len(indexesY), func(ix, iy int) diff.Result {
+ return s.statelessCompare(withIndexes(indexesX[ix], indexesY[iy]))
+ })
+
+ // Replay the ignore-scripts and the edit-script.
+ var ix, iy int
+ for ix < vx.Len() || iy < vy.Len() {
+ var e diff.EditType
+ switch {
+ case ix < len(ignoredX) && ignoredX[ix]:
+ e = diff.UniqueX
+ case iy < len(ignoredY) && ignoredY[iy]:
+ e = diff.UniqueY
+ default:
+ e, edits = edits[0], edits[1:]
+ }
+ switch e {
+ case diff.UniqueX:
+ s.compareAny(withIndexes(ix, -1))
+ ix++
+ case diff.UniqueY:
+ s.compareAny(withIndexes(-1, iy))
+ iy++
+ default:
+ s.compareAny(withIndexes(ix, iy))
+ ix++
+ iy++
+ }
+ }
+}
+
+func (s *state) compareMap(t reflect.Type, vx, vy reflect.Value) {
+ if vx.IsNil() || vy.IsNil() {
+ s.report(vx.IsNil() && vy.IsNil(), 0)
+ return
+ }
+
+ // Cycle-detection for maps.
+ if eq, visited := s.curPtrs.Push(vx, vy); visited {
+ s.report(eq, reportByCycle)
+ return
+ }
+ defer s.curPtrs.Pop(vx, vy)
+
+ // We combine and sort the two map keys so that we can perform the
+ // comparisons in a deterministic order.
+ step := MapIndex{&mapIndex{pathStep: pathStep{typ: t.Elem()}}}
+ for _, k := range value.SortKeys(append(vx.MapKeys(), vy.MapKeys()...)) {
+ step.vx = vx.MapIndex(k)
+ step.vy = vy.MapIndex(k)
+ step.key = k
+ if !step.vx.IsValid() && !step.vy.IsValid() {
+ // It is possible for both vx and vy to be invalid if the
+ // key contained a NaN value in it.
+ //
+ // Even with the ability to retrieve NaN keys in Go 1.12,
+ // there still isn't a sensible way to compare the values since
+ // a NaN key may map to multiple unordered values.
+ // The most reasonable way to compare NaNs would be to compare the
+ // set of values. However, this is impossible to do efficiently
+ // since set equality is provably an O(n^2) operation given only
+ // an Equal function. If we had a Less function or Hash function,
+ // this could be done in O(n*log(n)) or O(n), respectively.
+ //
+ // Rather than adding complex logic to deal with NaNs, make it
+ // the user's responsibility to compare such obscure maps.
+ const help = "consider providing a Comparer to compare the map"
+ panic(fmt.Sprintf("%#v has map key with NaNs\n%s", s.curPath, help))
+ }
+ s.compareAny(step)
+ }
+}
+
+func (s *state) comparePtr(t reflect.Type, vx, vy reflect.Value) {
+ if vx.IsNil() || vy.IsNil() {
+ s.report(vx.IsNil() && vy.IsNil(), 0)
+ return
+ }
+
+ // Cycle-detection for pointers.
+ if eq, visited := s.curPtrs.Push(vx, vy); visited {
+ s.report(eq, reportByCycle)
+ return
+ }
+ defer s.curPtrs.Pop(vx, vy)
+
+ vx, vy = vx.Elem(), vy.Elem()
+ s.compareAny(Indirect{&indirect{pathStep{t.Elem(), vx, vy}}})
+}
+
+func (s *state) compareInterface(t reflect.Type, vx, vy reflect.Value) {
+ if vx.IsNil() || vy.IsNil() {
+ s.report(vx.IsNil() && vy.IsNil(), 0)
+ return
+ }
+ vx, vy = vx.Elem(), vy.Elem()
+ if vx.Type() != vy.Type() {
+ s.report(false, 0)
+ return
+ }
+ s.compareAny(TypeAssertion{&typeAssertion{pathStep{vx.Type(), vx, vy}}})
+}
+
+func (s *state) report(eq bool, rf resultFlags) {
+ if rf&reportByIgnore == 0 {
+ if eq {
+ s.result.NumSame++
+ rf |= reportEqual
+ } else {
+ s.result.NumDiff++
+ rf |= reportUnequal
+ }
+ }
+ for _, r := range s.reporters {
+ r.Report(Result{flags: rf})
+ }
+}
+
+// recChecker tracks the state needed to periodically perform checks that
+// user provided transformers are not stuck in an infinitely recursive cycle.
+type recChecker struct{ next int }
+
+// Check scans the Path for any recursive transformers and panics when any
+// recursive transformers are detected. Note that the presence of a
+// recursive Transformer does not necessarily imply an infinite cycle.
+// As such, this check only activates after some minimal number of path steps.
+func (rc *recChecker) Check(p Path) {
+ const minLen = 1 << 16
+ if rc.next == 0 {
+ rc.next = minLen
+ }
+ if len(p) < rc.next {
+ return
+ }
+ rc.next <<= 1
+
+ // Check whether the same transformer has appeared at least twice.
+ var ss []string
+ m := map[Option]int{}
+ for _, ps := range p {
+ if t, ok := ps.(Transform); ok {
+ t := t.Option()
+ if m[t] == 1 { // Transformer was used exactly once before
+ tf := t.(*transformer).fnc.Type()
+ ss = append(ss, fmt.Sprintf("%v: %v => %v", t, tf.In(0), tf.Out(0)))
+ }
+ m[t]++
+ }
+ }
+ if len(ss) > 0 {
+ const warning = "recursive set of Transformers detected"
+ const help = "consider using cmpopts.AcyclicTransformer"
+ set := strings.Join(ss, "\n\t")
+ panic(fmt.Sprintf("%s:\n\t%s\n%s", warning, set, help))
+ }
+}
+
+// dynChecker tracks the state needed to periodically perform checks that
+// user provided functions are symmetric and deterministic.
+// The zero value is safe for immediate use.
+type dynChecker struct{ curr, next int }
+
+// Next increments the state and reports whether a check should be performed.
+//
+// Checks occur every Nth function call, where N is a triangular number:
+//
+// 0 1 3 6 10 15 21 28 36 45 55 66 78 91 105 120 136 153 171 190 ...
+//
+// See https://en.wikipedia.org/wiki/Triangular_number
+//
+// This sequence ensures that the cost of checks drops significantly as
+// the number of functions calls grows larger.
+func (dc *dynChecker) Next() bool {
+ ok := dc.curr == dc.next
+ if ok {
+ dc.curr = 0
+ dc.next++
+ }
+ dc.curr++
+ return ok
+}
+
+// makeAddressable returns a value that is always addressable.
+// It returns the input verbatim if it is already addressable,
+// otherwise it creates a new value and returns an addressable copy.
+func makeAddressable(v reflect.Value) reflect.Value {
+ if v.CanAddr() {
+ return v
+ }
+ vc := reflect.New(v.Type()).Elem()
+ vc.Set(v)
+ return vc
+}
diff --git a/vendor/github.com/google/go-cmp/cmp/export.go b/vendor/github.com/google/go-cmp/cmp/export.go
new file mode 100644
index 000000000..29f82fe6b
--- /dev/null
+++ b/vendor/github.com/google/go-cmp/cmp/export.go
@@ -0,0 +1,31 @@
+// Copyright 2017, The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package cmp
+
+import (
+ "reflect"
+ "unsafe"
+)
+
+// retrieveUnexportedField uses unsafe to forcibly retrieve any field from
+// a struct such that the value has read-write permissions.
+//
+// The parent struct, v, must be addressable, while f must be a StructField
+// describing the field to retrieve. If addr is false,
+// then the returned value will be shallowed copied to be non-addressable.
+func retrieveUnexportedField(v reflect.Value, f reflect.StructField, addr bool) reflect.Value {
+ ve := reflect.NewAt(f.Type, unsafe.Pointer(uintptr(unsafe.Pointer(v.UnsafeAddr()))+f.Offset)).Elem()
+ if !addr {
+ // A field is addressable if and only if the struct is addressable.
+ // If the original parent value was not addressable, shallow copy the
+ // value to make it non-addressable to avoid leaking an implementation
+ // detail of how forcibly exporting a field works.
+ if ve.Kind() == reflect.Interface && ve.IsNil() {
+ return reflect.Zero(f.Type)
+ }
+ return reflect.ValueOf(ve.Interface()).Convert(f.Type)
+ }
+ return ve
+}
diff --git a/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_disable.go b/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_disable.go
new file mode 100644
index 000000000..36062a604
--- /dev/null
+++ b/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_disable.go
@@ -0,0 +1,18 @@
+// Copyright 2017, The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !cmp_debug
+// +build !cmp_debug
+
+package diff
+
+var debug debugger
+
+type debugger struct{}
+
+func (debugger) Begin(_, _ int, f EqualFunc, _, _ *EditScript) EqualFunc {
+ return f
+}
+func (debugger) Update() {}
+func (debugger) Finish() {}
diff --git a/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_enable.go b/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_enable.go
new file mode 100644
index 000000000..a3b97a1ad
--- /dev/null
+++ b/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_enable.go
@@ -0,0 +1,123 @@
+// Copyright 2017, The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build cmp_debug
+// +build cmp_debug
+
+package diff
+
+import (
+ "fmt"
+ "strings"
+ "sync"
+ "time"
+)
+
+// The algorithm can be seen running in real-time by enabling debugging:
+// go test -tags=cmp_debug -v
+//
+// Example output:
+// === RUN TestDifference/#34
+// ┌───────────────────────────────┐
+// │ \ · · · · · · · · · · · · · · │
+// │ · # · · · · · · · · · · · · · │
+// │ · \ · · · · · · · · · · · · · │
+// │ · · \ · · · · · · · · · · · · │
+// │ · · · X # · · · · · · · · · · │
+// │ · · · # \ · · · · · · · · · · │
+// │ · · · · · # # · · · · · · · · │
+// │ · · · · · # \ · · · · · · · · │
+// │ · · · · · · · \ · · · · · · · │
+// │ · · · · · · · · \ · · · · · · │
+// │ · · · · · · · · · \ · · · · · │
+// │ · · · · · · · · · · \ · · # · │
+// │ · · · · · · · · · · · \ # # · │
+// │ · · · · · · · · · · · # # # · │
+// │ · · · · · · · · · · # # # # · │
+// │ · · · · · · · · · # # # # # · │
+// │ · · · · · · · · · · · · · · \ │
+// └───────────────────────────────┘
+// [.Y..M.XY......YXYXY.|]
+//
+// The grid represents the edit-graph where the horizontal axis represents
+// list X and the vertical axis represents list Y. The start of the two lists
+// is the top-left, while the ends are the bottom-right. The '·' represents
+// an unexplored node in the graph. The '\' indicates that the two symbols
+// from list X and Y are equal. The 'X' indicates that two symbols are similar
+// (but not exactly equal) to each other. The '#' indicates that the two symbols
+// are different (and not similar). The algorithm traverses this graph trying to
+// make the paths starting in the top-left and the bottom-right connect.
+//
+// The series of '.', 'X', 'Y', and 'M' characters at the bottom represents
+// the currently established path from the forward and reverse searches,
+// separated by a '|' character.
+
+const (
+ updateDelay = 100 * time.Millisecond
+ finishDelay = 500 * time.Millisecond
+ ansiTerminal = true // ANSI escape codes used to move terminal cursor
+)
+
+var debug debugger
+
+type debugger struct {
+ sync.Mutex
+ p1, p2 EditScript
+ fwdPath, revPath *EditScript
+ grid []byte
+ lines int
+}
+
+func (dbg *debugger) Begin(nx, ny int, f EqualFunc, p1, p2 *EditScript) EqualFunc {
+ dbg.Lock()
+ dbg.fwdPath, dbg.revPath = p1, p2
+ top := "┌─" + strings.Repeat("──", nx) + "┐\n"
+ row := "│ " + strings.Repeat("· ", nx) + "│\n"
+ btm := "└─" + strings.Repeat("──", nx) + "┘\n"
+ dbg.grid = []byte(top + strings.Repeat(row, ny) + btm)
+ dbg.lines = strings.Count(dbg.String(), "\n")
+ fmt.Print(dbg)
+
+ // Wrap the EqualFunc so that we can intercept each result.
+ return func(ix, iy int) (r Result) {
+ cell := dbg.grid[len(top)+iy*len(row):][len("│ ")+len("· ")*ix:][:len("·")]
+ for i := range cell {
+ cell[i] = 0 // Zero out the multiple bytes of UTF-8 middle-dot
+ }
+ switch r = f(ix, iy); {
+ case r.Equal():
+ cell[0] = '\\'
+ case r.Similar():
+ cell[0] = 'X'
+ default:
+ cell[0] = '#'
+ }
+ return
+ }
+}
+
+func (dbg *debugger) Update() {
+ dbg.print(updateDelay)
+}
+
+func (dbg *debugger) Finish() {
+ dbg.print(finishDelay)
+ dbg.Unlock()
+}
+
+func (dbg *debugger) String() string {
+ dbg.p1, dbg.p2 = *dbg.fwdPath, dbg.p2[:0]
+ for i := len(*dbg.revPath) - 1; i >= 0; i-- {
+ dbg.p2 = append(dbg.p2, (*dbg.revPath)[i])
+ }
+ return fmt.Sprintf("%s[%v|%v]\n\n", dbg.grid, dbg.p1, dbg.p2)
+}
+
+func (dbg *debugger) print(d time.Duration) {
+ if ansiTerminal {
+ fmt.Printf("\x1b[%dA", dbg.lines) // Reset terminal cursor
+ }
+ fmt.Print(dbg)
+ time.Sleep(d)
+}
diff --git a/vendor/github.com/google/go-cmp/cmp/internal/diff/diff.go b/vendor/github.com/google/go-cmp/cmp/internal/diff/diff.go
new file mode 100644
index 000000000..a248e5436
--- /dev/null
+++ b/vendor/github.com/google/go-cmp/cmp/internal/diff/diff.go
@@ -0,0 +1,402 @@
+// Copyright 2017, The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package diff implements an algorithm for producing edit-scripts.
+// The edit-script is a sequence of operations needed to transform one list
+// of symbols into another (or vice-versa). The edits allowed are insertions,
+// deletions, and modifications. The summation of all edits is called the
+// Levenshtein distance as this problem is well-known in computer science.
+//
+// This package prioritizes performance over accuracy. That is, the run time
+// is more important than obtaining a minimal Levenshtein distance.
+package diff
+
+import (
+ "math/rand"
+ "time"
+
+ "github.com/google/go-cmp/cmp/internal/flags"
+)
+
+// EditType represents a single operation within an edit-script.
+type EditType uint8
+
+const (
+ // Identity indicates that a symbol pair is identical in both list X and Y.
+ Identity EditType = iota
+ // UniqueX indicates that a symbol only exists in X and not Y.
+ UniqueX
+ // UniqueY indicates that a symbol only exists in Y and not X.
+ UniqueY
+ // Modified indicates that a symbol pair is a modification of each other.
+ Modified
+)
+
+// EditScript represents the series of differences between two lists.
+type EditScript []EditType
+
+// String returns a human-readable string representing the edit-script where
+// Identity, UniqueX, UniqueY, and Modified are represented by the
+// '.', 'X', 'Y', and 'M' characters, respectively.
+func (es EditScript) String() string {
+ b := make([]byte, len(es))
+ for i, e := range es {
+ switch e {
+ case Identity:
+ b[i] = '.'
+ case UniqueX:
+ b[i] = 'X'
+ case UniqueY:
+ b[i] = 'Y'
+ case Modified:
+ b[i] = 'M'
+ default:
+ panic("invalid edit-type")
+ }
+ }
+ return string(b)
+}
+
+// stats returns a histogram of the number of each type of edit operation.
+func (es EditScript) stats() (s struct{ NI, NX, NY, NM int }) {
+ for _, e := range es {
+ switch e {
+ case Identity:
+ s.NI++
+ case UniqueX:
+ s.NX++
+ case UniqueY:
+ s.NY++
+ case Modified:
+ s.NM++
+ default:
+ panic("invalid edit-type")
+ }
+ }
+ return
+}
+
+// Dist is the Levenshtein distance and is guaranteed to be 0 if and only if
+// lists X and Y are equal.
+func (es EditScript) Dist() int { return len(es) - es.stats().NI }
+
+// LenX is the length of the X list.
+func (es EditScript) LenX() int { return len(es) - es.stats().NY }
+
+// LenY is the length of the Y list.
+func (es EditScript) LenY() int { return len(es) - es.stats().NX }
+
+// EqualFunc reports whether the symbols at indexes ix and iy are equal.
+// When called by Difference, the index is guaranteed to be within nx and ny.
+type EqualFunc func(ix int, iy int) Result
+
+// Result is the result of comparison.
+// NumSame is the number of sub-elements that are equal.
+// NumDiff is the number of sub-elements that are not equal.
+type Result struct{ NumSame, NumDiff int }
+
+// BoolResult returns a Result that is either Equal or not Equal.
+func BoolResult(b bool) Result {
+ if b {
+ return Result{NumSame: 1} // Equal, Similar
+ } else {
+ return Result{NumDiff: 2} // Not Equal, not Similar
+ }
+}
+
+// Equal indicates whether the symbols are equal. Two symbols are equal
+// if and only if NumDiff == 0. If Equal, then they are also Similar.
+func (r Result) Equal() bool { return r.NumDiff == 0 }
+
+// Similar indicates whether two symbols are similar and may be represented
+// by using the Modified type. As a special case, we consider binary comparisons
+// (i.e., those that return Result{1, 0} or Result{0, 1}) to be similar.
+//
+// The exact ratio of NumSame to NumDiff to determine similarity may change.
+func (r Result) Similar() bool {
+ // Use NumSame+1 to offset NumSame so that binary comparisons are similar.
+ return r.NumSame+1 >= r.NumDiff
+}
+
+var randBool = rand.New(rand.NewSource(time.Now().Unix())).Intn(2) == 0
+
+// Difference reports whether two lists of lengths nx and ny are equal
+// given the definition of equality provided as f.
+//
+// This function returns an edit-script, which is a sequence of operations
+// needed to convert one list into the other. The following invariants for
+// the edit-script are maintained:
+// - eq == (es.Dist()==0)
+// - nx == es.LenX()
+// - ny == es.LenY()
+//
+// This algorithm is not guaranteed to be an optimal solution (i.e., one that
+// produces an edit-script with a minimal Levenshtein distance). This algorithm
+// favors performance over optimality. The exact output is not guaranteed to
+// be stable and may change over time.
+func Difference(nx, ny int, f EqualFunc) (es EditScript) {
+ // This algorithm is based on traversing what is known as an "edit-graph".
+ // See Figure 1 from "An O(ND) Difference Algorithm and Its Variations"
+ // by Eugene W. Myers. Since D can be as large as N itself, this is
+ // effectively O(N^2). Unlike the algorithm from that paper, we are not
+ // interested in the optimal path, but at least some "decent" path.
+ //
+ // For example, let X and Y be lists of symbols:
+ // X = [A B C A B B A]
+ // Y = [C B A B A C]
+ //
+ // The edit-graph can be drawn as the following:
+ // A B C A B B A
+ // ┌─────────────┐
+ // C │_|_|\|_|_|_|_│ 0
+ // B │_|\|_|_|\|\|_│ 1
+ // A │\|_|_|\|_|_|\│ 2
+ // B │_|\|_|_|\|\|_│ 3
+ // A │\|_|_|\|_|_|\│ 4
+ // C │ | |\| | | | │ 5
+ // └─────────────┘ 6
+ // 0 1 2 3 4 5 6 7
+ //
+ // List X is written along the horizontal axis, while list Y is written
+ // along the vertical axis. At any point on this grid, if the symbol in
+ // list X matches the corresponding symbol in list Y, then a '\' is drawn.
+ // The goal of any minimal edit-script algorithm is to find a path from the
+ // top-left corner to the bottom-right corner, while traveling through the
+ // fewest horizontal or vertical edges.
+ // A horizontal edge is equivalent to inserting a symbol from list X.
+ // A vertical edge is equivalent to inserting a symbol from list Y.
+ // A diagonal edge is equivalent to a matching symbol between both X and Y.
+
+ // Invariants:
+ // - 0 ≤ fwdPath.X ≤ (fwdFrontier.X, revFrontier.X) ≤ revPath.X ≤ nx
+ // - 0 ≤ fwdPath.Y ≤ (fwdFrontier.Y, revFrontier.Y) ≤ revPath.Y ≤ ny
+ //
+ // In general:
+ // - fwdFrontier.X < revFrontier.X
+ // - fwdFrontier.Y < revFrontier.Y
+ //
+ // Unless, it is time for the algorithm to terminate.
+ fwdPath := path{+1, point{0, 0}, make(EditScript, 0, (nx+ny)/2)}
+ revPath := path{-1, point{nx, ny}, make(EditScript, 0)}
+ fwdFrontier := fwdPath.point // Forward search frontier
+ revFrontier := revPath.point // Reverse search frontier
+
+ // Search budget bounds the cost of searching for better paths.
+ // The longest sequence of non-matching symbols that can be tolerated is
+ // approximately the square-root of the search budget.
+ searchBudget := 4 * (nx + ny) // O(n)
+
+ // Running the tests with the "cmp_debug" build tag prints a visualization
+ // of the algorithm running in real-time. This is educational for
+ // understanding how the algorithm works. See debug_enable.go.
+ f = debug.Begin(nx, ny, f, &fwdPath.es, &revPath.es)
+
+ // The algorithm below is a greedy, meet-in-the-middle algorithm for
+ // computing sub-optimal edit-scripts between two lists.
+ //
+ // The algorithm is approximately as follows:
+ // - Searching for differences switches back-and-forth between
+ // a search that starts at the beginning (the top-left corner), and
+ // a search that starts at the end (the bottom-right corner).
+ // The goal of the search is connect with the search
+ // from the opposite corner.
+ // - As we search, we build a path in a greedy manner,
+ // where the first match seen is added to the path (this is sub-optimal,
+ // but provides a decent result in practice). When matches are found,
+ // we try the next pair of symbols in the lists and follow all matches
+ // as far as possible.
+ // - When searching for matches, we search along a diagonal going through
+ // through the "frontier" point. If no matches are found,
+ // we advance the frontier towards the opposite corner.
+ // - This algorithm terminates when either the X coordinates or the
+ // Y coordinates of the forward and reverse frontier points ever intersect.
+
+ // This algorithm is correct even if searching only in the forward direction
+ // or in the reverse direction. We do both because it is commonly observed
+ // that two lists commonly differ because elements were added to the front
+ // or end of the other list.
+ //
+ // Non-deterministically start with either the forward or reverse direction
+ // to introduce some deliberate instability so that we have the flexibility
+ // to change this algorithm in the future.
+ if flags.Deterministic || randBool {
+ goto forwardSearch
+ } else {
+ goto reverseSearch
+ }
+
+forwardSearch:
+ {
+ // Forward search from the beginning.
+ if fwdFrontier.X >= revFrontier.X || fwdFrontier.Y >= revFrontier.Y || searchBudget == 0 {
+ goto finishSearch
+ }
+ for stop1, stop2, i := false, false, 0; !(stop1 && stop2) && searchBudget > 0; i++ {
+ // Search in a diagonal pattern for a match.
+ z := zigzag(i)
+ p := point{fwdFrontier.X + z, fwdFrontier.Y - z}
+ switch {
+ case p.X >= revPath.X || p.Y < fwdPath.Y:
+ stop1 = true // Hit top-right corner
+ case p.Y >= revPath.Y || p.X < fwdPath.X:
+ stop2 = true // Hit bottom-left corner
+ case f(p.X, p.Y).Equal():
+ // Match found, so connect the path to this point.
+ fwdPath.connect(p, f)
+ fwdPath.append(Identity)
+ // Follow sequence of matches as far as possible.
+ for fwdPath.X < revPath.X && fwdPath.Y < revPath.Y {
+ if !f(fwdPath.X, fwdPath.Y).Equal() {
+ break
+ }
+ fwdPath.append(Identity)
+ }
+ fwdFrontier = fwdPath.point
+ stop1, stop2 = true, true
+ default:
+ searchBudget-- // Match not found
+ }
+ debug.Update()
+ }
+ // Advance the frontier towards reverse point.
+ if revPath.X-fwdFrontier.X >= revPath.Y-fwdFrontier.Y {
+ fwdFrontier.X++
+ } else {
+ fwdFrontier.Y++
+ }
+ goto reverseSearch
+ }
+
+reverseSearch:
+ {
+ // Reverse search from the end.
+ if fwdFrontier.X >= revFrontier.X || fwdFrontier.Y >= revFrontier.Y || searchBudget == 0 {
+ goto finishSearch
+ }
+ for stop1, stop2, i := false, false, 0; !(stop1 && stop2) && searchBudget > 0; i++ {
+ // Search in a diagonal pattern for a match.
+ z := zigzag(i)
+ p := point{revFrontier.X - z, revFrontier.Y + z}
+ switch {
+ case fwdPath.X >= p.X || revPath.Y < p.Y:
+ stop1 = true // Hit bottom-left corner
+ case fwdPath.Y >= p.Y || revPath.X < p.X:
+ stop2 = true // Hit top-right corner
+ case f(p.X-1, p.Y-1).Equal():
+ // Match found, so connect the path to this point.
+ revPath.connect(p, f)
+ revPath.append(Identity)
+ // Follow sequence of matches as far as possible.
+ for fwdPath.X < revPath.X && fwdPath.Y < revPath.Y {
+ if !f(revPath.X-1, revPath.Y-1).Equal() {
+ break
+ }
+ revPath.append(Identity)
+ }
+ revFrontier = revPath.point
+ stop1, stop2 = true, true
+ default:
+ searchBudget-- // Match not found
+ }
+ debug.Update()
+ }
+ // Advance the frontier towards forward point.
+ if revFrontier.X-fwdPath.X >= revFrontier.Y-fwdPath.Y {
+ revFrontier.X--
+ } else {
+ revFrontier.Y--
+ }
+ goto forwardSearch
+ }
+
+finishSearch:
+ // Join the forward and reverse paths and then append the reverse path.
+ fwdPath.connect(revPath.point, f)
+ for i := len(revPath.es) - 1; i >= 0; i-- {
+ t := revPath.es[i]
+ revPath.es = revPath.es[:i]
+ fwdPath.append(t)
+ }
+ debug.Finish()
+ return fwdPath.es
+}
+
+type path struct {
+ dir int // +1 if forward, -1 if reverse
+ point // Leading point of the EditScript path
+ es EditScript
+}
+
+// connect appends any necessary Identity, Modified, UniqueX, or UniqueY types
+// to the edit-script to connect p.point to dst.
+func (p *path) connect(dst point, f EqualFunc) {
+ if p.dir > 0 {
+ // Connect in forward direction.
+ for dst.X > p.X && dst.Y > p.Y {
+ switch r := f(p.X, p.Y); {
+ case r.Equal():
+ p.append(Identity)
+ case r.Similar():
+ p.append(Modified)
+ case dst.X-p.X >= dst.Y-p.Y:
+ p.append(UniqueX)
+ default:
+ p.append(UniqueY)
+ }
+ }
+ for dst.X > p.X {
+ p.append(UniqueX)
+ }
+ for dst.Y > p.Y {
+ p.append(UniqueY)
+ }
+ } else {
+ // Connect in reverse direction.
+ for p.X > dst.X && p.Y > dst.Y {
+ switch r := f(p.X-1, p.Y-1); {
+ case r.Equal():
+ p.append(Identity)
+ case r.Similar():
+ p.append(Modified)
+ case p.Y-dst.Y >= p.X-dst.X:
+ p.append(UniqueY)
+ default:
+ p.append(UniqueX)
+ }
+ }
+ for p.X > dst.X {
+ p.append(UniqueX)
+ }
+ for p.Y > dst.Y {
+ p.append(UniqueY)
+ }
+ }
+}
+
+func (p *path) append(t EditType) {
+ p.es = append(p.es, t)
+ switch t {
+ case Identity, Modified:
+ p.add(p.dir, p.dir)
+ case UniqueX:
+ p.add(p.dir, 0)
+ case UniqueY:
+ p.add(0, p.dir)
+ }
+ debug.Update()
+}
+
+type point struct{ X, Y int }
+
+func (p *point) add(dx, dy int) { p.X += dx; p.Y += dy }
+
+// zigzag maps a consecutive sequence of integers to a zig-zag sequence.
+//
+// [0 1 2 3 4 5 ...] => [0 -1 +1 -2 +2 ...]
+func zigzag(x int) int {
+ if x&1 != 0 {
+ x = ^x
+ }
+ return x >> 1
+}
diff --git a/vendor/github.com/google/go-cmp/cmp/internal/flags/flags.go b/vendor/github.com/google/go-cmp/cmp/internal/flags/flags.go
new file mode 100644
index 000000000..d8e459c9b
--- /dev/null
+++ b/vendor/github.com/google/go-cmp/cmp/internal/flags/flags.go
@@ -0,0 +1,9 @@
+// Copyright 2019, The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package flags
+
+// Deterministic controls whether the output of Diff should be deterministic.
+// This is only used for testing.
+var Deterministic bool
diff --git a/vendor/github.com/google/go-cmp/cmp/internal/function/func.go b/vendor/github.com/google/go-cmp/cmp/internal/function/func.go
new file mode 100644
index 000000000..d127d4362
--- /dev/null
+++ b/vendor/github.com/google/go-cmp/cmp/internal/function/func.go
@@ -0,0 +1,99 @@
+// Copyright 2017, The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package function provides functionality for identifying function types.
+package function
+
+import (
+ "reflect"
+ "regexp"
+ "runtime"
+ "strings"
+)
+
+type funcType int
+
+const (
+ _ funcType = iota
+
+ tbFunc // func(T) bool
+ ttbFunc // func(T, T) bool
+ trbFunc // func(T, R) bool
+ tibFunc // func(T, I) bool
+ trFunc // func(T) R
+
+ Equal = ttbFunc // func(T, T) bool
+ EqualAssignable = tibFunc // func(T, I) bool; encapsulates func(T, T) bool
+ Transformer = trFunc // func(T) R
+ ValueFilter = ttbFunc // func(T, T) bool
+ Less = ttbFunc // func(T, T) bool
+ ValuePredicate = tbFunc // func(T) bool
+ KeyValuePredicate = trbFunc // func(T, R) bool
+)
+
+var boolType = reflect.TypeOf(true)
+
+// IsType reports whether the reflect.Type is of the specified function type.
+func IsType(t reflect.Type, ft funcType) bool {
+ if t == nil || t.Kind() != reflect.Func || t.IsVariadic() {
+ return false
+ }
+ ni, no := t.NumIn(), t.NumOut()
+ switch ft {
+ case tbFunc: // func(T) bool
+ if ni == 1 && no == 1 && t.Out(0) == boolType {
+ return true
+ }
+ case ttbFunc: // func(T, T) bool
+ if ni == 2 && no == 1 && t.In(0) == t.In(1) && t.Out(0) == boolType {
+ return true
+ }
+ case trbFunc: // func(T, R) bool
+ if ni == 2 && no == 1 && t.Out(0) == boolType {
+ return true
+ }
+ case tibFunc: // func(T, I) bool
+ if ni == 2 && no == 1 && t.In(0).AssignableTo(t.In(1)) && t.Out(0) == boolType {
+ return true
+ }
+ case trFunc: // func(T) R
+ if ni == 1 && no == 1 {
+ return true
+ }
+ }
+ return false
+}
+
+var lastIdentRx = regexp.MustCompile(`[_\p{L}][_\p{L}\p{N}]*$`)
+
+// NameOf returns the name of the function value.
+func NameOf(v reflect.Value) string {
+ fnc := runtime.FuncForPC(v.Pointer())
+ if fnc == nil {
+ return ""
+ }
+ fullName := fnc.Name() // e.g., "long/path/name/mypkg.(*MyType).(long/path/name/mypkg.myMethod)-fm"
+
+ // Method closures have a "-fm" suffix.
+ fullName = strings.TrimSuffix(fullName, "-fm")
+
+ var name string
+ for len(fullName) > 0 {
+ inParen := strings.HasSuffix(fullName, ")")
+ fullName = strings.TrimSuffix(fullName, ")")
+
+ s := lastIdentRx.FindString(fullName)
+ if s == "" {
+ break
+ }
+ name = s + "." + name
+ fullName = strings.TrimSuffix(fullName, s)
+
+ if i := strings.LastIndexByte(fullName, '('); inParen && i >= 0 {
+ fullName = fullName[:i]
+ }
+ fullName = strings.TrimSuffix(fullName, ".")
+ }
+ return strings.TrimSuffix(name, ".")
+}
diff --git a/vendor/github.com/google/go-cmp/cmp/internal/value/name.go b/vendor/github.com/google/go-cmp/cmp/internal/value/name.go
new file mode 100644
index 000000000..7b498bb2c
--- /dev/null
+++ b/vendor/github.com/google/go-cmp/cmp/internal/value/name.go
@@ -0,0 +1,164 @@
+// Copyright 2020, The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package value
+
+import (
+ "reflect"
+ "strconv"
+)
+
+var anyType = reflect.TypeOf((*interface{})(nil)).Elem()
+
+// TypeString is nearly identical to reflect.Type.String,
+// but has an additional option to specify that full type names be used.
+func TypeString(t reflect.Type, qualified bool) string {
+ return string(appendTypeName(nil, t, qualified, false))
+}
+
+func appendTypeName(b []byte, t reflect.Type, qualified, elideFunc bool) []byte {
+ // BUG: Go reflection provides no way to disambiguate two named types
+ // of the same name and within the same package,
+ // but declared within the namespace of different functions.
+
+ // Use the "any" alias instead of "interface{}" for better readability.
+ if t == anyType {
+ return append(b, "any"...)
+ }
+
+ // Named type.
+ if t.Name() != "" {
+ if qualified && t.PkgPath() != "" {
+ b = append(b, '"')
+ b = append(b, t.PkgPath()...)
+ b = append(b, '"')
+ b = append(b, '.')
+ b = append(b, t.Name()...)
+ } else {
+ b = append(b, t.String()...)
+ }
+ return b
+ }
+
+ // Unnamed type.
+ switch k := t.Kind(); k {
+ case reflect.Bool, reflect.String, reflect.UnsafePointer,
+ reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64,
+ reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr,
+ reflect.Float32, reflect.Float64, reflect.Complex64, reflect.Complex128:
+ b = append(b, k.String()...)
+ case reflect.Chan:
+ if t.ChanDir() == reflect.RecvDir {
+ b = append(b, "<-"...)
+ }
+ b = append(b, "chan"...)
+ if t.ChanDir() == reflect.SendDir {
+ b = append(b, "<-"...)
+ }
+ b = append(b, ' ')
+ b = appendTypeName(b, t.Elem(), qualified, false)
+ case reflect.Func:
+ if !elideFunc {
+ b = append(b, "func"...)
+ }
+ b = append(b, '(')
+ for i := 0; i < t.NumIn(); i++ {
+ if i > 0 {
+ b = append(b, ", "...)
+ }
+ if i == t.NumIn()-1 && t.IsVariadic() {
+ b = append(b, "..."...)
+ b = appendTypeName(b, t.In(i).Elem(), qualified, false)
+ } else {
+ b = appendTypeName(b, t.In(i), qualified, false)
+ }
+ }
+ b = append(b, ')')
+ switch t.NumOut() {
+ case 0:
+ // Do nothing
+ case 1:
+ b = append(b, ' ')
+ b = appendTypeName(b, t.Out(0), qualified, false)
+ default:
+ b = append(b, " ("...)
+ for i := 0; i < t.NumOut(); i++ {
+ if i > 0 {
+ b = append(b, ", "...)
+ }
+ b = appendTypeName(b, t.Out(i), qualified, false)
+ }
+ b = append(b, ')')
+ }
+ case reflect.Struct:
+ b = append(b, "struct{ "...)
+ for i := 0; i < t.NumField(); i++ {
+ if i > 0 {
+ b = append(b, "; "...)
+ }
+ sf := t.Field(i)
+ if !sf.Anonymous {
+ if qualified && sf.PkgPath != "" {
+ b = append(b, '"')
+ b = append(b, sf.PkgPath...)
+ b = append(b, '"')
+ b = append(b, '.')
+ }
+ b = append(b, sf.Name...)
+ b = append(b, ' ')
+ }
+ b = appendTypeName(b, sf.Type, qualified, false)
+ if sf.Tag != "" {
+ b = append(b, ' ')
+ b = strconv.AppendQuote(b, string(sf.Tag))
+ }
+ }
+ if b[len(b)-1] == ' ' {
+ b = b[:len(b)-1]
+ } else {
+ b = append(b, ' ')
+ }
+ b = append(b, '}')
+ case reflect.Slice, reflect.Array:
+ b = append(b, '[')
+ if k == reflect.Array {
+ b = strconv.AppendUint(b, uint64(t.Len()), 10)
+ }
+ b = append(b, ']')
+ b = appendTypeName(b, t.Elem(), qualified, false)
+ case reflect.Map:
+ b = append(b, "map["...)
+ b = appendTypeName(b, t.Key(), qualified, false)
+ b = append(b, ']')
+ b = appendTypeName(b, t.Elem(), qualified, false)
+ case reflect.Ptr:
+ b = append(b, '*')
+ b = appendTypeName(b, t.Elem(), qualified, false)
+ case reflect.Interface:
+ b = append(b, "interface{ "...)
+ for i := 0; i < t.NumMethod(); i++ {
+ if i > 0 {
+ b = append(b, "; "...)
+ }
+ m := t.Method(i)
+ if qualified && m.PkgPath != "" {
+ b = append(b, '"')
+ b = append(b, m.PkgPath...)
+ b = append(b, '"')
+ b = append(b, '.')
+ }
+ b = append(b, m.Name...)
+ b = appendTypeName(b, m.Type, qualified, true)
+ }
+ if b[len(b)-1] == ' ' {
+ b = b[:len(b)-1]
+ } else {
+ b = append(b, ' ')
+ }
+ b = append(b, '}')
+ default:
+ panic("invalid kind: " + k.String())
+ }
+ return b
+}
diff --git a/vendor/github.com/google/go-cmp/cmp/internal/value/pointer.go b/vendor/github.com/google/go-cmp/cmp/internal/value/pointer.go
new file mode 100644
index 000000000..e5dfff69a
--- /dev/null
+++ b/vendor/github.com/google/go-cmp/cmp/internal/value/pointer.go
@@ -0,0 +1,34 @@
+// Copyright 2018, The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package value
+
+import (
+ "reflect"
+ "unsafe"
+)
+
+// Pointer is an opaque typed pointer and is guaranteed to be comparable.
+type Pointer struct {
+ p unsafe.Pointer
+ t reflect.Type
+}
+
+// PointerOf returns a Pointer from v, which must be a
+// reflect.Ptr, reflect.Slice, or reflect.Map.
+func PointerOf(v reflect.Value) Pointer {
+ // The proper representation of a pointer is unsafe.Pointer,
+ // which is necessary if the GC ever uses a moving collector.
+ return Pointer{unsafe.Pointer(v.Pointer()), v.Type()}
+}
+
+// IsNil reports whether the pointer is nil.
+func (p Pointer) IsNil() bool {
+ return p.p == nil
+}
+
+// Uintptr returns the pointer as a uintptr.
+func (p Pointer) Uintptr() uintptr {
+ return uintptr(p.p)
+}
diff --git a/vendor/github.com/google/go-cmp/cmp/internal/value/sort.go b/vendor/github.com/google/go-cmp/cmp/internal/value/sort.go
new file mode 100644
index 000000000..98533b036
--- /dev/null
+++ b/vendor/github.com/google/go-cmp/cmp/internal/value/sort.go
@@ -0,0 +1,106 @@
+// Copyright 2017, The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package value
+
+import (
+ "fmt"
+ "math"
+ "reflect"
+ "sort"
+)
+
+// SortKeys sorts a list of map keys, deduplicating keys if necessary.
+// The type of each value must be comparable.
+func SortKeys(vs []reflect.Value) []reflect.Value {
+ if len(vs) == 0 {
+ return vs
+ }
+
+ // Sort the map keys.
+ sort.SliceStable(vs, func(i, j int) bool { return isLess(vs[i], vs[j]) })
+
+ // Deduplicate keys (fails for NaNs).
+ vs2 := vs[:1]
+ for _, v := range vs[1:] {
+ if isLess(vs2[len(vs2)-1], v) {
+ vs2 = append(vs2, v)
+ }
+ }
+ return vs2
+}
+
+// isLess is a generic function for sorting arbitrary map keys.
+// The inputs must be of the same type and must be comparable.
+func isLess(x, y reflect.Value) bool {
+ switch x.Type().Kind() {
+ case reflect.Bool:
+ return !x.Bool() && y.Bool()
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return x.Int() < y.Int()
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ return x.Uint() < y.Uint()
+ case reflect.Float32, reflect.Float64:
+ // NOTE: This does not sort -0 as less than +0
+ // since Go maps treat -0 and +0 as equal keys.
+ fx, fy := x.Float(), y.Float()
+ return fx < fy || math.IsNaN(fx) && !math.IsNaN(fy)
+ case reflect.Complex64, reflect.Complex128:
+ cx, cy := x.Complex(), y.Complex()
+ rx, ix, ry, iy := real(cx), imag(cx), real(cy), imag(cy)
+ if rx == ry || (math.IsNaN(rx) && math.IsNaN(ry)) {
+ return ix < iy || math.IsNaN(ix) && !math.IsNaN(iy)
+ }
+ return rx < ry || math.IsNaN(rx) && !math.IsNaN(ry)
+ case reflect.Ptr, reflect.UnsafePointer, reflect.Chan:
+ return x.Pointer() < y.Pointer()
+ case reflect.String:
+ return x.String() < y.String()
+ case reflect.Array:
+ for i := 0; i < x.Len(); i++ {
+ if isLess(x.Index(i), y.Index(i)) {
+ return true
+ }
+ if isLess(y.Index(i), x.Index(i)) {
+ return false
+ }
+ }
+ return false
+ case reflect.Struct:
+ for i := 0; i < x.NumField(); i++ {
+ if isLess(x.Field(i), y.Field(i)) {
+ return true
+ }
+ if isLess(y.Field(i), x.Field(i)) {
+ return false
+ }
+ }
+ return false
+ case reflect.Interface:
+ vx, vy := x.Elem(), y.Elem()
+ if !vx.IsValid() || !vy.IsValid() {
+ return !vx.IsValid() && vy.IsValid()
+ }
+ tx, ty := vx.Type(), vy.Type()
+ if tx == ty {
+ return isLess(x.Elem(), y.Elem())
+ }
+ if tx.Kind() != ty.Kind() {
+ return vx.Kind() < vy.Kind()
+ }
+ if tx.String() != ty.String() {
+ return tx.String() < ty.String()
+ }
+ if tx.PkgPath() != ty.PkgPath() {
+ return tx.PkgPath() < ty.PkgPath()
+ }
+ // This can happen in rare situations, so we fallback to just comparing
+ // the unique pointer for a reflect.Type. This guarantees deterministic
+ // ordering within a program, but it is obviously not stable.
+ return reflect.ValueOf(vx.Type()).Pointer() < reflect.ValueOf(vy.Type()).Pointer()
+ default:
+ // Must be Func, Map, or Slice; which are not comparable.
+ panic(fmt.Sprintf("%T is not comparable", x.Type()))
+ }
+}
diff --git a/vendor/github.com/google/go-cmp/cmp/options.go b/vendor/github.com/google/go-cmp/cmp/options.go
new file mode 100644
index 000000000..754496f3b
--- /dev/null
+++ b/vendor/github.com/google/go-cmp/cmp/options.go
@@ -0,0 +1,554 @@
+// Copyright 2017, The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package cmp
+
+import (
+ "fmt"
+ "reflect"
+ "regexp"
+ "strings"
+
+ "github.com/google/go-cmp/cmp/internal/function"
+)
+
+// Option configures for specific behavior of [Equal] and [Diff]. In particular,
+// the fundamental Option functions ([Ignore], [Transformer], and [Comparer]),
+// configure how equality is determined.
+//
+// The fundamental options may be composed with filters ([FilterPath] and
+// [FilterValues]) to control the scope over which they are applied.
+//
+// The [github.com/google/go-cmp/cmp/cmpopts] package provides helper functions
+// for creating options that may be used with [Equal] and [Diff].
+type Option interface {
+ // filter applies all filters and returns the option that remains.
+ // Each option may only read s.curPath and call s.callTTBFunc.
+ //
+ // An Options is returned only if multiple comparers or transformers
+ // can apply simultaneously and will only contain values of those types
+ // or sub-Options containing values of those types.
+ filter(s *state, t reflect.Type, vx, vy reflect.Value) applicableOption
+}
+
+// applicableOption represents the following types:
+//
+// Fundamental: ignore | validator | *comparer | *transformer
+// Grouping: Options
+type applicableOption interface {
+ Option
+
+ // apply executes the option, which may mutate s or panic.
+ apply(s *state, vx, vy reflect.Value)
+}
+
+// coreOption represents the following types:
+//
+// Fundamental: ignore | validator | *comparer | *transformer
+// Filters: *pathFilter | *valuesFilter
+type coreOption interface {
+ Option
+ isCore()
+}
+
+type core struct{}
+
+func (core) isCore() {}
+
+// Options is a list of [Option] values that also satisfies the [Option] interface.
+// Helper comparison packages may return an Options value when packing multiple
+// [Option] values into a single [Option]. When this package processes an Options,
+// it will be implicitly expanded into a flat list.
+//
+// Applying a filter on an Options is equivalent to applying that same filter
+// on all individual options held within.
+type Options []Option
+
+func (opts Options) filter(s *state, t reflect.Type, vx, vy reflect.Value) (out applicableOption) {
+ for _, opt := range opts {
+ switch opt := opt.filter(s, t, vx, vy); opt.(type) {
+ case ignore:
+ return ignore{} // Only ignore can short-circuit evaluation
+ case validator:
+ out = validator{} // Takes precedence over comparer or transformer
+ case *comparer, *transformer, Options:
+ switch out.(type) {
+ case nil:
+ out = opt
+ case validator:
+ // Keep validator
+ case *comparer, *transformer, Options:
+ out = Options{out, opt} // Conflicting comparers or transformers
+ }
+ }
+ }
+ return out
+}
+
+func (opts Options) apply(s *state, _, _ reflect.Value) {
+ const warning = "ambiguous set of applicable options"
+ const help = "consider using filters to ensure at most one Comparer or Transformer may apply"
+ var ss []string
+ for _, opt := range flattenOptions(nil, opts) {
+ ss = append(ss, fmt.Sprint(opt))
+ }
+ set := strings.Join(ss, "\n\t")
+ panic(fmt.Sprintf("%s at %#v:\n\t%s\n%s", warning, s.curPath, set, help))
+}
+
+func (opts Options) String() string {
+ var ss []string
+ for _, opt := range opts {
+ ss = append(ss, fmt.Sprint(opt))
+ }
+ return fmt.Sprintf("Options{%s}", strings.Join(ss, ", "))
+}
+
+// FilterPath returns a new [Option] where opt is only evaluated if filter f
+// returns true for the current [Path] in the value tree.
+//
+// This filter is called even if a slice element or map entry is missing and
+// provides an opportunity to ignore such cases. The filter function must be
+// symmetric such that the filter result is identical regardless of whether the
+// missing value is from x or y.
+//
+// The option passed in may be an [Ignore], [Transformer], [Comparer], [Options], or
+// a previously filtered [Option].
+func FilterPath(f func(Path) bool, opt Option) Option {
+ if f == nil {
+ panic("invalid path filter function")
+ }
+ if opt := normalizeOption(opt); opt != nil {
+ return &pathFilter{fnc: f, opt: opt}
+ }
+ return nil
+}
+
+type pathFilter struct {
+ core
+ fnc func(Path) bool
+ opt Option
+}
+
+func (f pathFilter) filter(s *state, t reflect.Type, vx, vy reflect.Value) applicableOption {
+ if f.fnc(s.curPath) {
+ return f.opt.filter(s, t, vx, vy)
+ }
+ return nil
+}
+
+func (f pathFilter) String() string {
+ return fmt.Sprintf("FilterPath(%s, %v)", function.NameOf(reflect.ValueOf(f.fnc)), f.opt)
+}
+
+// FilterValues returns a new [Option] where opt is only evaluated if filter f,
+// which is a function of the form "func(T, T) bool", returns true for the
+// current pair of values being compared. If either value is invalid or
+// the type of the values is not assignable to T, then this filter implicitly
+// returns false.
+//
+// The filter function must be
+// symmetric (i.e., agnostic to the order of the inputs) and
+// deterministic (i.e., produces the same result when given the same inputs).
+// If T is an interface, it is possible that f is called with two values with
+// different concrete types that both implement T.
+//
+// The option passed in may be an [Ignore], [Transformer], [Comparer], [Options], or
+// a previously filtered [Option].
+func FilterValues(f interface{}, opt Option) Option {
+ v := reflect.ValueOf(f)
+ if !function.IsType(v.Type(), function.ValueFilter) || v.IsNil() {
+ panic(fmt.Sprintf("invalid values filter function: %T", f))
+ }
+ if opt := normalizeOption(opt); opt != nil {
+ vf := &valuesFilter{fnc: v, opt: opt}
+ if ti := v.Type().In(0); ti.Kind() != reflect.Interface || ti.NumMethod() > 0 {
+ vf.typ = ti
+ }
+ return vf
+ }
+ return nil
+}
+
+type valuesFilter struct {
+ core
+ typ reflect.Type // T
+ fnc reflect.Value // func(T, T) bool
+ opt Option
+}
+
+func (f valuesFilter) filter(s *state, t reflect.Type, vx, vy reflect.Value) applicableOption {
+ if !vx.IsValid() || !vx.CanInterface() || !vy.IsValid() || !vy.CanInterface() {
+ return nil
+ }
+ if (f.typ == nil || t.AssignableTo(f.typ)) && s.callTTBFunc(f.fnc, vx, vy) {
+ return f.opt.filter(s, t, vx, vy)
+ }
+ return nil
+}
+
+func (f valuesFilter) String() string {
+ return fmt.Sprintf("FilterValues(%s, %v)", function.NameOf(f.fnc), f.opt)
+}
+
+// Ignore is an [Option] that causes all comparisons to be ignored.
+// This value is intended to be combined with [FilterPath] or [FilterValues].
+// It is an error to pass an unfiltered Ignore option to [Equal].
+func Ignore() Option { return ignore{} }
+
+type ignore struct{ core }
+
+func (ignore) isFiltered() bool { return false }
+func (ignore) filter(_ *state, _ reflect.Type, _, _ reflect.Value) applicableOption { return ignore{} }
+func (ignore) apply(s *state, _, _ reflect.Value) { s.report(true, reportByIgnore) }
+func (ignore) String() string { return "Ignore()" }
+
+// validator is a sentinel Option type to indicate that some options could not
+// be evaluated due to unexported fields, missing slice elements, or
+// missing map entries. Both values are validator only for unexported fields.
+type validator struct{ core }
+
+func (validator) filter(_ *state, _ reflect.Type, vx, vy reflect.Value) applicableOption {
+ if !vx.IsValid() || !vy.IsValid() {
+ return validator{}
+ }
+ if !vx.CanInterface() || !vy.CanInterface() {
+ return validator{}
+ }
+ return nil
+}
+func (validator) apply(s *state, vx, vy reflect.Value) {
+ // Implies missing slice element or map entry.
+ if !vx.IsValid() || !vy.IsValid() {
+ s.report(vx.IsValid() == vy.IsValid(), 0)
+ return
+ }
+
+ // Unable to Interface implies unexported field without visibility access.
+ if !vx.CanInterface() || !vy.CanInterface() {
+ help := "consider using a custom Comparer; if you control the implementation of type, you can also consider using an Exporter, AllowUnexported, or cmpopts.IgnoreUnexported"
+ var name string
+ if t := s.curPath.Index(-2).Type(); t.Name() != "" {
+ // Named type with unexported fields.
+ name = fmt.Sprintf("%q.%v", t.PkgPath(), t.Name()) // e.g., "path/to/package".MyType
+ if _, ok := reflect.New(t).Interface().(error); ok {
+ help = "consider using cmpopts.EquateErrors to compare error values"
+ } else if t.Comparable() {
+ help = "consider using cmpopts.EquateComparable to compare comparable Go types"
+ }
+ } else {
+ // Unnamed type with unexported fields. Derive PkgPath from field.
+ var pkgPath string
+ for i := 0; i < t.NumField() && pkgPath == ""; i++ {
+ pkgPath = t.Field(i).PkgPath
+ }
+ name = fmt.Sprintf("%q.(%v)", pkgPath, t.String()) // e.g., "path/to/package".(struct { a int })
+ }
+ panic(fmt.Sprintf("cannot handle unexported field at %#v:\n\t%v\n%s", s.curPath, name, help))
+ }
+
+ panic("not reachable")
+}
+
+// identRx represents a valid identifier according to the Go specification.
+const identRx = `[_\p{L}][_\p{L}\p{N}]*`
+
+var identsRx = regexp.MustCompile(`^` + identRx + `(\.` + identRx + `)*$`)
+
+// Transformer returns an [Option] that applies a transformation function that
+// converts values of a certain type into that of another.
+//
+// The transformer f must be a function "func(T) R" that converts values of
+// type T to those of type R and is implicitly filtered to input values
+// assignable to T. The transformer must not mutate T in any way.
+//
+// To help prevent some cases of infinite recursive cycles applying the
+// same transform to the output of itself (e.g., in the case where the
+// input and output types are the same), an implicit filter is added such that
+// a transformer is applicable only if that exact transformer is not already
+// in the tail of the [Path] since the last non-[Transform] step.
+// For situations where the implicit filter is still insufficient,
+// consider using [github.com/google/go-cmp/cmp/cmpopts.AcyclicTransformer],
+// which adds a filter to prevent the transformer from
+// being recursively applied upon itself.
+//
+// The name is a user provided label that is used as the [Transform.Name] in the
+// transformation [PathStep] (and eventually shown in the [Diff] output).
+// The name must be a valid identifier or qualified identifier in Go syntax.
+// If empty, an arbitrary name is used.
+func Transformer(name string, f interface{}) Option {
+ v := reflect.ValueOf(f)
+ if !function.IsType(v.Type(), function.Transformer) || v.IsNil() {
+ panic(fmt.Sprintf("invalid transformer function: %T", f))
+ }
+ if name == "" {
+ name = function.NameOf(v)
+ if !identsRx.MatchString(name) {
+ name = "λ" // Lambda-symbol as placeholder name
+ }
+ } else if !identsRx.MatchString(name) {
+ panic(fmt.Sprintf("invalid name: %q", name))
+ }
+ tr := &transformer{name: name, fnc: reflect.ValueOf(f)}
+ if ti := v.Type().In(0); ti.Kind() != reflect.Interface || ti.NumMethod() > 0 {
+ tr.typ = ti
+ }
+ return tr
+}
+
+type transformer struct {
+ core
+ name string
+ typ reflect.Type // T
+ fnc reflect.Value // func(T) R
+}
+
+func (tr *transformer) isFiltered() bool { return tr.typ != nil }
+
+func (tr *transformer) filter(s *state, t reflect.Type, _, _ reflect.Value) applicableOption {
+ for i := len(s.curPath) - 1; i >= 0; i-- {
+ if t, ok := s.curPath[i].(Transform); !ok {
+ break // Hit most recent non-Transform step
+ } else if tr == t.trans {
+ return nil // Cannot directly use same Transform
+ }
+ }
+ if tr.typ == nil || t.AssignableTo(tr.typ) {
+ return tr
+ }
+ return nil
+}
+
+func (tr *transformer) apply(s *state, vx, vy reflect.Value) {
+ step := Transform{&transform{pathStep{typ: tr.fnc.Type().Out(0)}, tr}}
+ vvx := s.callTRFunc(tr.fnc, vx, step)
+ vvy := s.callTRFunc(tr.fnc, vy, step)
+ step.vx, step.vy = vvx, vvy
+ s.compareAny(step)
+}
+
+func (tr transformer) String() string {
+ return fmt.Sprintf("Transformer(%s, %s)", tr.name, function.NameOf(tr.fnc))
+}
+
+// Comparer returns an [Option] that determines whether two values are equal
+// to each other.
+//
+// The comparer f must be a function "func(T, T) bool" and is implicitly
+// filtered to input values assignable to T. If T is an interface, it is
+// possible that f is called with two values of different concrete types that
+// both implement T.
+//
+// The equality function must be:
+// - Symmetric: equal(x, y) == equal(y, x)
+// - Deterministic: equal(x, y) == equal(x, y)
+// - Pure: equal(x, y) does not modify x or y
+func Comparer(f interface{}) Option {
+ v := reflect.ValueOf(f)
+ if !function.IsType(v.Type(), function.Equal) || v.IsNil() {
+ panic(fmt.Sprintf("invalid comparer function: %T", f))
+ }
+ cm := &comparer{fnc: v}
+ if ti := v.Type().In(0); ti.Kind() != reflect.Interface || ti.NumMethod() > 0 {
+ cm.typ = ti
+ }
+ return cm
+}
+
+type comparer struct {
+ core
+ typ reflect.Type // T
+ fnc reflect.Value // func(T, T) bool
+}
+
+func (cm *comparer) isFiltered() bool { return cm.typ != nil }
+
+func (cm *comparer) filter(_ *state, t reflect.Type, _, _ reflect.Value) applicableOption {
+ if cm.typ == nil || t.AssignableTo(cm.typ) {
+ return cm
+ }
+ return nil
+}
+
+func (cm *comparer) apply(s *state, vx, vy reflect.Value) {
+ eq := s.callTTBFunc(cm.fnc, vx, vy)
+ s.report(eq, reportByFunc)
+}
+
+func (cm comparer) String() string {
+ return fmt.Sprintf("Comparer(%s)", function.NameOf(cm.fnc))
+}
+
+// Exporter returns an [Option] that specifies whether [Equal] is allowed to
+// introspect into the unexported fields of certain struct types.
+//
+// Users of this option must understand that comparing on unexported fields
+// from external packages is not safe since changes in the internal
+// implementation of some external package may cause the result of [Equal]
+// to unexpectedly change. However, it may be valid to use this option on types
+// defined in an internal package where the semantic meaning of an unexported
+// field is in the control of the user.
+//
+// In many cases, a custom [Comparer] should be used instead that defines
+// equality as a function of the public API of a type rather than the underlying
+// unexported implementation.
+//
+// For example, the [reflect.Type] documentation defines equality to be determined
+// by the == operator on the interface (essentially performing a shallow pointer
+// comparison) and most attempts to compare *[regexp.Regexp] types are interested
+// in only checking that the regular expression strings are equal.
+// Both of these are accomplished using [Comparer] options:
+//
+// Comparer(func(x, y reflect.Type) bool { return x == y })
+// Comparer(func(x, y *regexp.Regexp) bool { return x.String() == y.String() })
+//
+// In other cases, the [github.com/google/go-cmp/cmp/cmpopts.IgnoreUnexported]
+// option can be used to ignore all unexported fields on specified struct types.
+func Exporter(f func(reflect.Type) bool) Option {
+ return exporter(f)
+}
+
+type exporter func(reflect.Type) bool
+
+func (exporter) filter(_ *state, _ reflect.Type, _, _ reflect.Value) applicableOption {
+ panic("not implemented")
+}
+
+// AllowUnexported returns an [Option] that allows [Equal] to forcibly introspect
+// unexported fields of the specified struct types.
+//
+// See [Exporter] for the proper use of this option.
+func AllowUnexported(types ...interface{}) Option {
+ m := make(map[reflect.Type]bool)
+ for _, typ := range types {
+ t := reflect.TypeOf(typ)
+ if t.Kind() != reflect.Struct {
+ panic(fmt.Sprintf("invalid struct type: %T", typ))
+ }
+ m[t] = true
+ }
+ return exporter(func(t reflect.Type) bool { return m[t] })
+}
+
+// Result represents the comparison result for a single node and
+// is provided by cmp when calling Report (see [Reporter]).
+type Result struct {
+ _ [0]func() // Make Result incomparable
+ flags resultFlags
+}
+
+// Equal reports whether the node was determined to be equal or not.
+// As a special case, ignored nodes are considered equal.
+func (r Result) Equal() bool {
+ return r.flags&(reportEqual|reportByIgnore) != 0
+}
+
+// ByIgnore reports whether the node is equal because it was ignored.
+// This never reports true if [Result.Equal] reports false.
+func (r Result) ByIgnore() bool {
+ return r.flags&reportByIgnore != 0
+}
+
+// ByMethod reports whether the Equal method determined equality.
+func (r Result) ByMethod() bool {
+ return r.flags&reportByMethod != 0
+}
+
+// ByFunc reports whether a [Comparer] function determined equality.
+func (r Result) ByFunc() bool {
+ return r.flags&reportByFunc != 0
+}
+
+// ByCycle reports whether a reference cycle was detected.
+func (r Result) ByCycle() bool {
+ return r.flags&reportByCycle != 0
+}
+
+type resultFlags uint
+
+const (
+ _ resultFlags = (1 << iota) / 2
+
+ reportEqual
+ reportUnequal
+ reportByIgnore
+ reportByMethod
+ reportByFunc
+ reportByCycle
+)
+
+// Reporter is an [Option] that can be passed to [Equal]. When [Equal] traverses
+// the value trees, it calls PushStep as it descends into each node in the
+// tree and PopStep as it ascend out of the node. The leaves of the tree are
+// either compared (determined to be equal or not equal) or ignored and reported
+// as such by calling the Report method.
+func Reporter(r interface {
+ // PushStep is called when a tree-traversal operation is performed.
+ // The PathStep itself is only valid until the step is popped.
+ // The PathStep.Values are valid for the duration of the entire traversal
+ // and must not be mutated.
+ //
+ // Equal always calls PushStep at the start to provide an operation-less
+ // PathStep used to report the root values.
+ //
+ // Within a slice, the exact set of inserted, removed, or modified elements
+ // is unspecified and may change in future implementations.
+ // The entries of a map are iterated through in an unspecified order.
+ PushStep(PathStep)
+
+ // Report is called exactly once on leaf nodes to report whether the
+ // comparison identified the node as equal, unequal, or ignored.
+ // A leaf node is one that is immediately preceded by and followed by
+ // a pair of PushStep and PopStep calls.
+ Report(Result)
+
+ // PopStep ascends back up the value tree.
+ // There is always a matching pop call for every push call.
+ PopStep()
+}) Option {
+ return reporter{r}
+}
+
+type reporter struct{ reporterIface }
+type reporterIface interface {
+ PushStep(PathStep)
+ Report(Result)
+ PopStep()
+}
+
+func (reporter) filter(_ *state, _ reflect.Type, _, _ reflect.Value) applicableOption {
+ panic("not implemented")
+}
+
+// normalizeOption normalizes the input options such that all Options groups
+// are flattened and groups with a single element are reduced to that element.
+// Only coreOptions and Options containing coreOptions are allowed.
+func normalizeOption(src Option) Option {
+ switch opts := flattenOptions(nil, Options{src}); len(opts) {
+ case 0:
+ return nil
+ case 1:
+ return opts[0]
+ default:
+ return opts
+ }
+}
+
+// flattenOptions copies all options in src to dst as a flat list.
+// Only coreOptions and Options containing coreOptions are allowed.
+func flattenOptions(dst, src Options) Options {
+ for _, opt := range src {
+ switch opt := opt.(type) {
+ case nil:
+ continue
+ case Options:
+ dst = flattenOptions(dst, opt)
+ case coreOption:
+ dst = append(dst, opt)
+ default:
+ panic(fmt.Sprintf("invalid option type: %T", opt))
+ }
+ }
+ return dst
+}
diff --git a/vendor/github.com/google/go-cmp/cmp/path.go b/vendor/github.com/google/go-cmp/cmp/path.go
new file mode 100644
index 000000000..c3c145642
--- /dev/null
+++ b/vendor/github.com/google/go-cmp/cmp/path.go
@@ -0,0 +1,390 @@
+// Copyright 2017, The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package cmp
+
+import (
+ "fmt"
+ "reflect"
+ "strings"
+ "unicode"
+ "unicode/utf8"
+
+ "github.com/google/go-cmp/cmp/internal/value"
+)
+
+// Path is a list of [PathStep] describing the sequence of operations to get
+// from some root type to the current position in the value tree.
+// The first Path element is always an operation-less [PathStep] that exists
+// simply to identify the initial type.
+//
+// When traversing structs with embedded structs, the embedded struct will
+// always be accessed as a field before traversing the fields of the
+// embedded struct themselves. That is, an exported field from the
+// embedded struct will never be accessed directly from the parent struct.
+type Path []PathStep
+
+// PathStep is a union-type for specific operations to traverse
+// a value's tree structure. Users of this package never need to implement
+// these types as values of this type will be returned by this package.
+//
+// Implementations of this interface:
+// - [StructField]
+// - [SliceIndex]
+// - [MapIndex]
+// - [Indirect]
+// - [TypeAssertion]
+// - [Transform]
+type PathStep interface {
+ String() string
+
+ // Type is the resulting type after performing the path step.
+ Type() reflect.Type
+
+ // Values is the resulting values after performing the path step.
+ // The type of each valid value is guaranteed to be identical to Type.
+ //
+ // In some cases, one or both may be invalid or have restrictions:
+ // - For StructField, both are not interface-able if the current field
+ // is unexported and the struct type is not explicitly permitted by
+ // an Exporter to traverse unexported fields.
+ // - For SliceIndex, one may be invalid if an element is missing from
+ // either the x or y slice.
+ // - For MapIndex, one may be invalid if an entry is missing from
+ // either the x or y map.
+ //
+ // The provided values must not be mutated.
+ Values() (vx, vy reflect.Value)
+}
+
+var (
+ _ PathStep = StructField{}
+ _ PathStep = SliceIndex{}
+ _ PathStep = MapIndex{}
+ _ PathStep = Indirect{}
+ _ PathStep = TypeAssertion{}
+ _ PathStep = Transform{}
+)
+
+func (pa *Path) push(s PathStep) {
+ *pa = append(*pa, s)
+}
+
+func (pa *Path) pop() {
+ *pa = (*pa)[:len(*pa)-1]
+}
+
+// Last returns the last [PathStep] in the Path.
+// If the path is empty, this returns a non-nil [PathStep]
+// that reports a nil [PathStep.Type].
+func (pa Path) Last() PathStep {
+ return pa.Index(-1)
+}
+
+// Index returns the ith step in the Path and supports negative indexing.
+// A negative index starts counting from the tail of the Path such that -1
+// refers to the last step, -2 refers to the second-to-last step, and so on.
+// If index is invalid, this returns a non-nil [PathStep]
+// that reports a nil [PathStep.Type].
+func (pa Path) Index(i int) PathStep {
+ if i < 0 {
+ i = len(pa) + i
+ }
+ if i < 0 || i >= len(pa) {
+ return pathStep{}
+ }
+ return pa[i]
+}
+
+// String returns the simplified path to a node.
+// The simplified path only contains struct field accesses.
+//
+// For example:
+//
+// MyMap.MySlices.MyField
+func (pa Path) String() string {
+ var ss []string
+ for _, s := range pa {
+ if _, ok := s.(StructField); ok {
+ ss = append(ss, s.String())
+ }
+ }
+ return strings.TrimPrefix(strings.Join(ss, ""), ".")
+}
+
+// GoString returns the path to a specific node using Go syntax.
+//
+// For example:
+//
+// (*root.MyMap["key"].(*mypkg.MyStruct).MySlices)[2][3].MyField
+func (pa Path) GoString() string {
+ var ssPre, ssPost []string
+ var numIndirect int
+ for i, s := range pa {
+ var nextStep PathStep
+ if i+1 < len(pa) {
+ nextStep = pa[i+1]
+ }
+ switch s := s.(type) {
+ case Indirect:
+ numIndirect++
+ pPre, pPost := "(", ")"
+ switch nextStep.(type) {
+ case Indirect:
+ continue // Next step is indirection, so let them batch up
+ case StructField:
+ numIndirect-- // Automatic indirection on struct fields
+ case nil:
+ pPre, pPost = "", "" // Last step; no need for parenthesis
+ }
+ if numIndirect > 0 {
+ ssPre = append(ssPre, pPre+strings.Repeat("*", numIndirect))
+ ssPost = append(ssPost, pPost)
+ }
+ numIndirect = 0
+ continue
+ case Transform:
+ ssPre = append(ssPre, s.trans.name+"(")
+ ssPost = append(ssPost, ")")
+ continue
+ }
+ ssPost = append(ssPost, s.String())
+ }
+ for i, j := 0, len(ssPre)-1; i < j; i, j = i+1, j-1 {
+ ssPre[i], ssPre[j] = ssPre[j], ssPre[i]
+ }
+ return strings.Join(ssPre, "") + strings.Join(ssPost, "")
+}
+
+type pathStep struct {
+ typ reflect.Type
+ vx, vy reflect.Value
+}
+
+func (ps pathStep) Type() reflect.Type { return ps.typ }
+func (ps pathStep) Values() (vx, vy reflect.Value) { return ps.vx, ps.vy }
+func (ps pathStep) String() string {
+ if ps.typ == nil {
+ return ""
+ }
+ s := value.TypeString(ps.typ, false)
+ if s == "" || strings.ContainsAny(s, "{}\n") {
+ return "root" // Type too simple or complex to print
+ }
+ return fmt.Sprintf("{%s}", s)
+}
+
+// StructField is a [PathStep] that represents a struct field access
+// on a field called [StructField.Name].
+type StructField struct{ *structField }
+type structField struct {
+ pathStep
+ name string
+ idx int
+
+ // These fields are used for forcibly accessing an unexported field.
+ // pvx, pvy, and field are only valid if unexported is true.
+ unexported bool
+ mayForce bool // Forcibly allow visibility
+ paddr bool // Was parent addressable?
+ pvx, pvy reflect.Value // Parent values (always addressable)
+ field reflect.StructField // Field information
+}
+
+func (sf StructField) Type() reflect.Type { return sf.typ }
+func (sf StructField) Values() (vx, vy reflect.Value) {
+ if !sf.unexported {
+ return sf.vx, sf.vy // CanInterface reports true
+ }
+
+ // Forcibly obtain read-write access to an unexported struct field.
+ if sf.mayForce {
+ vx = retrieveUnexportedField(sf.pvx, sf.field, sf.paddr)
+ vy = retrieveUnexportedField(sf.pvy, sf.field, sf.paddr)
+ return vx, vy // CanInterface reports true
+ }
+ return sf.vx, sf.vy // CanInterface reports false
+}
+func (sf StructField) String() string { return fmt.Sprintf(".%s", sf.name) }
+
+// Name is the field name.
+func (sf StructField) Name() string { return sf.name }
+
+// Index is the index of the field in the parent struct type.
+// See [reflect.Type.Field].
+func (sf StructField) Index() int { return sf.idx }
+
+// SliceIndex is a [PathStep] that represents an index operation on
+// a slice or array at some index [SliceIndex.Key].
+type SliceIndex struct{ *sliceIndex }
+type sliceIndex struct {
+ pathStep
+ xkey, ykey int
+ isSlice bool // False for reflect.Array
+}
+
+func (si SliceIndex) Type() reflect.Type { return si.typ }
+func (si SliceIndex) Values() (vx, vy reflect.Value) { return si.vx, si.vy }
+func (si SliceIndex) String() string {
+ switch {
+ case si.xkey == si.ykey:
+ return fmt.Sprintf("[%d]", si.xkey)
+ case si.ykey == -1:
+ // [5->?] means "I don't know where X[5] went"
+ return fmt.Sprintf("[%d->?]", si.xkey)
+ case si.xkey == -1:
+ // [?->3] means "I don't know where Y[3] came from"
+ return fmt.Sprintf("[?->%d]", si.ykey)
+ default:
+ // [5->3] means "X[5] moved to Y[3]"
+ return fmt.Sprintf("[%d->%d]", si.xkey, si.ykey)
+ }
+}
+
+// Key is the index key; it may return -1 if in a split state
+func (si SliceIndex) Key() int {
+ if si.xkey != si.ykey {
+ return -1
+ }
+ return si.xkey
+}
+
+// SplitKeys are the indexes for indexing into slices in the
+// x and y values, respectively. These indexes may differ due to the
+// insertion or removal of an element in one of the slices, causing
+// all of the indexes to be shifted. If an index is -1, then that
+// indicates that the element does not exist in the associated slice.
+//
+// [SliceIndex.Key] is guaranteed to return -1 if and only if the indexes
+// returned by SplitKeys are not the same. SplitKeys will never return -1 for
+// both indexes.
+func (si SliceIndex) SplitKeys() (ix, iy int) { return si.xkey, si.ykey }
+
+// MapIndex is a [PathStep] that represents an index operation on a map at some index Key.
+type MapIndex struct{ *mapIndex }
+type mapIndex struct {
+ pathStep
+ key reflect.Value
+}
+
+func (mi MapIndex) Type() reflect.Type { return mi.typ }
+func (mi MapIndex) Values() (vx, vy reflect.Value) { return mi.vx, mi.vy }
+func (mi MapIndex) String() string { return fmt.Sprintf("[%#v]", mi.key) }
+
+// Key is the value of the map key.
+func (mi MapIndex) Key() reflect.Value { return mi.key }
+
+// Indirect is a [PathStep] that represents pointer indirection on the parent type.
+type Indirect struct{ *indirect }
+type indirect struct {
+ pathStep
+}
+
+func (in Indirect) Type() reflect.Type { return in.typ }
+func (in Indirect) Values() (vx, vy reflect.Value) { return in.vx, in.vy }
+func (in Indirect) String() string { return "*" }
+
+// TypeAssertion is a [PathStep] that represents a type assertion on an interface.
+type TypeAssertion struct{ *typeAssertion }
+type typeAssertion struct {
+ pathStep
+}
+
+func (ta TypeAssertion) Type() reflect.Type { return ta.typ }
+func (ta TypeAssertion) Values() (vx, vy reflect.Value) { return ta.vx, ta.vy }
+func (ta TypeAssertion) String() string { return fmt.Sprintf(".(%v)", value.TypeString(ta.typ, false)) }
+
+// Transform is a [PathStep] that represents a transformation
+// from the parent type to the current type.
+type Transform struct{ *transform }
+type transform struct {
+ pathStep
+ trans *transformer
+}
+
+func (tf Transform) Type() reflect.Type { return tf.typ }
+func (tf Transform) Values() (vx, vy reflect.Value) { return tf.vx, tf.vy }
+func (tf Transform) String() string { return fmt.Sprintf("%s()", tf.trans.name) }
+
+// Name is the name of the [Transformer].
+func (tf Transform) Name() string { return tf.trans.name }
+
+// Func is the function pointer to the transformer function.
+func (tf Transform) Func() reflect.Value { return tf.trans.fnc }
+
+// Option returns the originally constructed [Transformer] option.
+// The == operator can be used to detect the exact option used.
+func (tf Transform) Option() Option { return tf.trans }
+
+// pointerPath represents a dual-stack of pointers encountered when
+// recursively traversing the x and y values. This data structure supports
+// detection of cycles and determining whether the cycles are equal.
+// In Go, cycles can occur via pointers, slices, and maps.
+//
+// The pointerPath uses a map to represent a stack; where descension into a
+// pointer pushes the address onto the stack, and ascension from a pointer
+// pops the address from the stack. Thus, when traversing into a pointer from
+// reflect.Ptr, reflect.Slice element, or reflect.Map, we can detect cycles
+// by checking whether the pointer has already been visited. The cycle detection
+// uses a separate stack for the x and y values.
+//
+// If a cycle is detected we need to determine whether the two pointers
+// should be considered equal. The definition of equality chosen by Equal
+// requires two graphs to have the same structure. To determine this, both the
+// x and y values must have a cycle where the previous pointers were also
+// encountered together as a pair.
+//
+// Semantically, this is equivalent to augmenting Indirect, SliceIndex, and
+// MapIndex with pointer information for the x and y values.
+// Suppose px and py are two pointers to compare, we then search the
+// Path for whether px was ever encountered in the Path history of x, and
+// similarly so with py. If either side has a cycle, the comparison is only
+// equal if both px and py have a cycle resulting from the same PathStep.
+//
+// Using a map as a stack is more performant as we can perform cycle detection
+// in O(1) instead of O(N) where N is len(Path).
+type pointerPath struct {
+ // mx is keyed by x pointers, where the value is the associated y pointer.
+ mx map[value.Pointer]value.Pointer
+ // my is keyed by y pointers, where the value is the associated x pointer.
+ my map[value.Pointer]value.Pointer
+}
+
+func (p *pointerPath) Init() {
+ p.mx = make(map[value.Pointer]value.Pointer)
+ p.my = make(map[value.Pointer]value.Pointer)
+}
+
+// Push indicates intent to descend into pointers vx and vy where
+// visited reports whether either has been seen before. If visited before,
+// equal reports whether both pointers were encountered together.
+// Pop must be called if and only if the pointers were never visited.
+//
+// The pointers vx and vy must be a reflect.Ptr, reflect.Slice, or reflect.Map
+// and be non-nil.
+func (p pointerPath) Push(vx, vy reflect.Value) (equal, visited bool) {
+ px := value.PointerOf(vx)
+ py := value.PointerOf(vy)
+ _, ok1 := p.mx[px]
+ _, ok2 := p.my[py]
+ if ok1 || ok2 {
+ equal = p.mx[px] == py && p.my[py] == px // Pointers paired together
+ return equal, true
+ }
+ p.mx[px] = py
+ p.my[py] = px
+ return false, false
+}
+
+// Pop ascends from pointers vx and vy.
+func (p pointerPath) Pop(vx, vy reflect.Value) {
+ delete(p.mx, value.PointerOf(vx))
+ delete(p.my, value.PointerOf(vy))
+}
+
+// isExported reports whether the identifier is exported.
+func isExported(id string) bool {
+ r, _ := utf8.DecodeRuneInString(id)
+ return unicode.IsUpper(r)
+}
diff --git a/vendor/github.com/google/go-cmp/cmp/report.go b/vendor/github.com/google/go-cmp/cmp/report.go
new file mode 100644
index 000000000..f43cd12eb
--- /dev/null
+++ b/vendor/github.com/google/go-cmp/cmp/report.go
@@ -0,0 +1,54 @@
+// Copyright 2017, The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package cmp
+
+// defaultReporter implements the reporter interface.
+//
+// As Equal serially calls the PushStep, Report, and PopStep methods, the
+// defaultReporter constructs a tree-based representation of the compared value
+// and the result of each comparison (see valueNode).
+//
+// When the String method is called, the FormatDiff method transforms the
+// valueNode tree into a textNode tree, which is a tree-based representation
+// of the textual output (see textNode).
+//
+// Lastly, the textNode.String method produces the final report as a string.
+type defaultReporter struct {
+ root *valueNode
+ curr *valueNode
+}
+
+func (r *defaultReporter) PushStep(ps PathStep) {
+ r.curr = r.curr.PushStep(ps)
+ if r.root == nil {
+ r.root = r.curr
+ }
+}
+func (r *defaultReporter) Report(rs Result) {
+ r.curr.Report(rs)
+}
+func (r *defaultReporter) PopStep() {
+ r.curr = r.curr.PopStep()
+}
+
+// String provides a full report of the differences detected as a structured
+// literal in pseudo-Go syntax. String may only be called after the entire tree
+// has been traversed.
+func (r *defaultReporter) String() string {
+ assert(r.root != nil && r.curr == nil)
+ if r.root.NumDiff == 0 {
+ return ""
+ }
+ ptrs := new(pointerReferences)
+ text := formatOptions{}.FormatDiff(r.root, ptrs)
+ resolveReferences(text)
+ return text.String()
+}
+
+func assert(ok bool) {
+ if !ok {
+ panic("assertion failure")
+ }
+}
diff --git a/vendor/github.com/google/go-cmp/cmp/report_compare.go b/vendor/github.com/google/go-cmp/cmp/report_compare.go
new file mode 100644
index 000000000..2050bf6b4
--- /dev/null
+++ b/vendor/github.com/google/go-cmp/cmp/report_compare.go
@@ -0,0 +1,433 @@
+// Copyright 2019, The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package cmp
+
+import (
+ "fmt"
+ "reflect"
+)
+
+// numContextRecords is the number of surrounding equal records to print.
+const numContextRecords = 2
+
+type diffMode byte
+
+const (
+ diffUnknown diffMode = 0
+ diffIdentical diffMode = ' '
+ diffRemoved diffMode = '-'
+ diffInserted diffMode = '+'
+)
+
+type typeMode int
+
+const (
+ // emitType always prints the type.
+ emitType typeMode = iota
+ // elideType never prints the type.
+ elideType
+ // autoType prints the type only for composite kinds
+ // (i.e., structs, slices, arrays, and maps).
+ autoType
+)
+
+type formatOptions struct {
+ // DiffMode controls the output mode of FormatDiff.
+ //
+ // If diffUnknown, then produce a diff of the x and y values.
+ // If diffIdentical, then emit values as if they were equal.
+ // If diffRemoved, then only emit x values (ignoring y values).
+ // If diffInserted, then only emit y values (ignoring x values).
+ DiffMode diffMode
+
+ // TypeMode controls whether to print the type for the current node.
+ //
+ // As a general rule of thumb, we always print the type of the next node
+ // after an interface, and always elide the type of the next node after
+ // a slice or map node.
+ TypeMode typeMode
+
+ // formatValueOptions are options specific to printing reflect.Values.
+ formatValueOptions
+}
+
+func (opts formatOptions) WithDiffMode(d diffMode) formatOptions {
+ opts.DiffMode = d
+ return opts
+}
+func (opts formatOptions) WithTypeMode(t typeMode) formatOptions {
+ opts.TypeMode = t
+ return opts
+}
+func (opts formatOptions) WithVerbosity(level int) formatOptions {
+ opts.VerbosityLevel = level
+ opts.LimitVerbosity = true
+ return opts
+}
+func (opts formatOptions) verbosity() uint {
+ switch {
+ case opts.VerbosityLevel < 0:
+ return 0
+ case opts.VerbosityLevel > 16:
+ return 16 // some reasonable maximum to avoid shift overflow
+ default:
+ return uint(opts.VerbosityLevel)
+ }
+}
+
+const maxVerbosityPreset = 6
+
+// verbosityPreset modifies the verbosity settings given an index
+// between 0 and maxVerbosityPreset, inclusive.
+func verbosityPreset(opts formatOptions, i int) formatOptions {
+ opts.VerbosityLevel = int(opts.verbosity()) + 2*i
+ if i > 0 {
+ opts.AvoidStringer = true
+ }
+ if i >= maxVerbosityPreset {
+ opts.PrintAddresses = true
+ opts.QualifiedNames = true
+ }
+ return opts
+}
+
+// FormatDiff converts a valueNode tree into a textNode tree, where the later
+// is a textual representation of the differences detected in the former.
+func (opts formatOptions) FormatDiff(v *valueNode, ptrs *pointerReferences) (out textNode) {
+ if opts.DiffMode == diffIdentical {
+ opts = opts.WithVerbosity(1)
+ } else if opts.verbosity() < 3 {
+ opts = opts.WithVerbosity(3)
+ }
+
+ // Check whether we have specialized formatting for this node.
+ // This is not necessary, but helpful for producing more readable outputs.
+ if opts.CanFormatDiffSlice(v) {
+ return opts.FormatDiffSlice(v)
+ }
+
+ var parentKind reflect.Kind
+ if v.parent != nil && v.parent.TransformerName == "" {
+ parentKind = v.parent.Type.Kind()
+ }
+
+ // For leaf nodes, format the value based on the reflect.Values alone.
+ // As a special case, treat equal []byte as a leaf nodes.
+ isBytes := v.Type.Kind() == reflect.Slice && v.Type.Elem() == byteType
+ isEqualBytes := isBytes && v.NumDiff+v.NumIgnored+v.NumTransformed == 0
+ if v.MaxDepth == 0 || isEqualBytes {
+ switch opts.DiffMode {
+ case diffUnknown, diffIdentical:
+ // Format Equal.
+ if v.NumDiff == 0 {
+ outx := opts.FormatValue(v.ValueX, parentKind, ptrs)
+ outy := opts.FormatValue(v.ValueY, parentKind, ptrs)
+ if v.NumIgnored > 0 && v.NumSame == 0 {
+ return textEllipsis
+ } else if outx.Len() < outy.Len() {
+ return outx
+ } else {
+ return outy
+ }
+ }
+
+ // Format unequal.
+ assert(opts.DiffMode == diffUnknown)
+ var list textList
+ outx := opts.WithTypeMode(elideType).FormatValue(v.ValueX, parentKind, ptrs)
+ outy := opts.WithTypeMode(elideType).FormatValue(v.ValueY, parentKind, ptrs)
+ for i := 0; i <= maxVerbosityPreset && outx != nil && outy != nil && outx.Equal(outy); i++ {
+ opts2 := verbosityPreset(opts, i).WithTypeMode(elideType)
+ outx = opts2.FormatValue(v.ValueX, parentKind, ptrs)
+ outy = opts2.FormatValue(v.ValueY, parentKind, ptrs)
+ }
+ if outx != nil {
+ list = append(list, textRecord{Diff: '-', Value: outx})
+ }
+ if outy != nil {
+ list = append(list, textRecord{Diff: '+', Value: outy})
+ }
+ return opts.WithTypeMode(emitType).FormatType(v.Type, list)
+ case diffRemoved:
+ return opts.FormatValue(v.ValueX, parentKind, ptrs)
+ case diffInserted:
+ return opts.FormatValue(v.ValueY, parentKind, ptrs)
+ default:
+ panic("invalid diff mode")
+ }
+ }
+
+ // Register slice element to support cycle detection.
+ if parentKind == reflect.Slice {
+ ptrRefs := ptrs.PushPair(v.ValueX, v.ValueY, opts.DiffMode, true)
+ defer ptrs.Pop()
+ defer func() { out = wrapTrunkReferences(ptrRefs, out) }()
+ }
+
+ // Descend into the child value node.
+ if v.TransformerName != "" {
+ out := opts.WithTypeMode(emitType).FormatDiff(v.Value, ptrs)
+ out = &textWrap{Prefix: "Inverse(" + v.TransformerName + ", ", Value: out, Suffix: ")"}
+ return opts.FormatType(v.Type, out)
+ } else {
+ switch k := v.Type.Kind(); k {
+ case reflect.Struct, reflect.Array, reflect.Slice:
+ out = opts.formatDiffList(v.Records, k, ptrs)
+ out = opts.FormatType(v.Type, out)
+ case reflect.Map:
+ // Register map to support cycle detection.
+ ptrRefs := ptrs.PushPair(v.ValueX, v.ValueY, opts.DiffMode, false)
+ defer ptrs.Pop()
+
+ out = opts.formatDiffList(v.Records, k, ptrs)
+ out = wrapTrunkReferences(ptrRefs, out)
+ out = opts.FormatType(v.Type, out)
+ case reflect.Ptr:
+ // Register pointer to support cycle detection.
+ ptrRefs := ptrs.PushPair(v.ValueX, v.ValueY, opts.DiffMode, false)
+ defer ptrs.Pop()
+
+ out = opts.FormatDiff(v.Value, ptrs)
+ out = wrapTrunkReferences(ptrRefs, out)
+ out = &textWrap{Prefix: "&", Value: out}
+ case reflect.Interface:
+ out = opts.WithTypeMode(emitType).FormatDiff(v.Value, ptrs)
+ default:
+ panic(fmt.Sprintf("%v cannot have children", k))
+ }
+ return out
+ }
+}
+
+func (opts formatOptions) formatDiffList(recs []reportRecord, k reflect.Kind, ptrs *pointerReferences) textNode {
+ // Derive record name based on the data structure kind.
+ var name string
+ var formatKey func(reflect.Value) string
+ switch k {
+ case reflect.Struct:
+ name = "field"
+ opts = opts.WithTypeMode(autoType)
+ formatKey = func(v reflect.Value) string { return v.String() }
+ case reflect.Slice, reflect.Array:
+ name = "element"
+ opts = opts.WithTypeMode(elideType)
+ formatKey = func(reflect.Value) string { return "" }
+ case reflect.Map:
+ name = "entry"
+ opts = opts.WithTypeMode(elideType)
+ formatKey = func(v reflect.Value) string { return formatMapKey(v, false, ptrs) }
+ }
+
+ maxLen := -1
+ if opts.LimitVerbosity {
+ if opts.DiffMode == diffIdentical {
+ maxLen = ((1 << opts.verbosity()) >> 1) << 2 // 0, 4, 8, 16, 32, etc...
+ } else {
+ maxLen = (1 << opts.verbosity()) << 1 // 2, 4, 8, 16, 32, 64, etc...
+ }
+ opts.VerbosityLevel--
+ }
+
+ // Handle unification.
+ switch opts.DiffMode {
+ case diffIdentical, diffRemoved, diffInserted:
+ var list textList
+ var deferredEllipsis bool // Add final "..." to indicate records were dropped
+ for _, r := range recs {
+ if len(list) == maxLen {
+ deferredEllipsis = true
+ break
+ }
+
+ // Elide struct fields that are zero value.
+ if k == reflect.Struct {
+ var isZero bool
+ switch opts.DiffMode {
+ case diffIdentical:
+ isZero = r.Value.ValueX.IsZero() || r.Value.ValueY.IsZero()
+ case diffRemoved:
+ isZero = r.Value.ValueX.IsZero()
+ case diffInserted:
+ isZero = r.Value.ValueY.IsZero()
+ }
+ if isZero {
+ continue
+ }
+ }
+ // Elide ignored nodes.
+ if r.Value.NumIgnored > 0 && r.Value.NumSame+r.Value.NumDiff == 0 {
+ deferredEllipsis = !(k == reflect.Slice || k == reflect.Array)
+ if !deferredEllipsis {
+ list.AppendEllipsis(diffStats{})
+ }
+ continue
+ }
+ if out := opts.FormatDiff(r.Value, ptrs); out != nil {
+ list = append(list, textRecord{Key: formatKey(r.Key), Value: out})
+ }
+ }
+ if deferredEllipsis {
+ list.AppendEllipsis(diffStats{})
+ }
+ return &textWrap{Prefix: "{", Value: list, Suffix: "}"}
+ case diffUnknown:
+ default:
+ panic("invalid diff mode")
+ }
+
+ // Handle differencing.
+ var numDiffs int
+ var list textList
+ var keys []reflect.Value // invariant: len(list) == len(keys)
+ groups := coalesceAdjacentRecords(name, recs)
+ maxGroup := diffStats{Name: name}
+ for i, ds := range groups {
+ if maxLen >= 0 && numDiffs >= maxLen {
+ maxGroup = maxGroup.Append(ds)
+ continue
+ }
+
+ // Handle equal records.
+ if ds.NumDiff() == 0 {
+ // Compute the number of leading and trailing records to print.
+ var numLo, numHi int
+ numEqual := ds.NumIgnored + ds.NumIdentical
+ for numLo < numContextRecords && numLo+numHi < numEqual && i != 0 {
+ if r := recs[numLo].Value; r.NumIgnored > 0 && r.NumSame+r.NumDiff == 0 {
+ break
+ }
+ numLo++
+ }
+ for numHi < numContextRecords && numLo+numHi < numEqual && i != len(groups)-1 {
+ if r := recs[numEqual-numHi-1].Value; r.NumIgnored > 0 && r.NumSame+r.NumDiff == 0 {
+ break
+ }
+ numHi++
+ }
+ if numEqual-(numLo+numHi) == 1 && ds.NumIgnored == 0 {
+ numHi++ // Avoid pointless coalescing of a single equal record
+ }
+
+ // Format the equal values.
+ for _, r := range recs[:numLo] {
+ out := opts.WithDiffMode(diffIdentical).FormatDiff(r.Value, ptrs)
+ list = append(list, textRecord{Key: formatKey(r.Key), Value: out})
+ keys = append(keys, r.Key)
+ }
+ if numEqual > numLo+numHi {
+ ds.NumIdentical -= numLo + numHi
+ list.AppendEllipsis(ds)
+ for len(keys) < len(list) {
+ keys = append(keys, reflect.Value{})
+ }
+ }
+ for _, r := range recs[numEqual-numHi : numEqual] {
+ out := opts.WithDiffMode(diffIdentical).FormatDiff(r.Value, ptrs)
+ list = append(list, textRecord{Key: formatKey(r.Key), Value: out})
+ keys = append(keys, r.Key)
+ }
+ recs = recs[numEqual:]
+ continue
+ }
+
+ // Handle unequal records.
+ for _, r := range recs[:ds.NumDiff()] {
+ switch {
+ case opts.CanFormatDiffSlice(r.Value):
+ out := opts.FormatDiffSlice(r.Value)
+ list = append(list, textRecord{Key: formatKey(r.Key), Value: out})
+ keys = append(keys, r.Key)
+ case r.Value.NumChildren == r.Value.MaxDepth:
+ outx := opts.WithDiffMode(diffRemoved).FormatDiff(r.Value, ptrs)
+ outy := opts.WithDiffMode(diffInserted).FormatDiff(r.Value, ptrs)
+ for i := 0; i <= maxVerbosityPreset && outx != nil && outy != nil && outx.Equal(outy); i++ {
+ opts2 := verbosityPreset(opts, i)
+ outx = opts2.WithDiffMode(diffRemoved).FormatDiff(r.Value, ptrs)
+ outy = opts2.WithDiffMode(diffInserted).FormatDiff(r.Value, ptrs)
+ }
+ if outx != nil {
+ list = append(list, textRecord{Diff: diffRemoved, Key: formatKey(r.Key), Value: outx})
+ keys = append(keys, r.Key)
+ }
+ if outy != nil {
+ list = append(list, textRecord{Diff: diffInserted, Key: formatKey(r.Key), Value: outy})
+ keys = append(keys, r.Key)
+ }
+ default:
+ out := opts.FormatDiff(r.Value, ptrs)
+ list = append(list, textRecord{Key: formatKey(r.Key), Value: out})
+ keys = append(keys, r.Key)
+ }
+ }
+ recs = recs[ds.NumDiff():]
+ numDiffs += ds.NumDiff()
+ }
+ if maxGroup.IsZero() {
+ assert(len(recs) == 0)
+ } else {
+ list.AppendEllipsis(maxGroup)
+ for len(keys) < len(list) {
+ keys = append(keys, reflect.Value{})
+ }
+ }
+ assert(len(list) == len(keys))
+
+ // For maps, the default formatting logic uses fmt.Stringer which may
+ // produce ambiguous output. Avoid calling String to disambiguate.
+ if k == reflect.Map {
+ var ambiguous bool
+ seenKeys := map[string]reflect.Value{}
+ for i, currKey := range keys {
+ if currKey.IsValid() {
+ strKey := list[i].Key
+ prevKey, seen := seenKeys[strKey]
+ if seen && prevKey.CanInterface() && currKey.CanInterface() {
+ ambiguous = prevKey.Interface() != currKey.Interface()
+ if ambiguous {
+ break
+ }
+ }
+ seenKeys[strKey] = currKey
+ }
+ }
+ if ambiguous {
+ for i, k := range keys {
+ if k.IsValid() {
+ list[i].Key = formatMapKey(k, true, ptrs)
+ }
+ }
+ }
+ }
+
+ return &textWrap{Prefix: "{", Value: list, Suffix: "}"}
+}
+
+// coalesceAdjacentRecords coalesces the list of records into groups of
+// adjacent equal, or unequal counts.
+func coalesceAdjacentRecords(name string, recs []reportRecord) (groups []diffStats) {
+ var prevCase int // Arbitrary index into which case last occurred
+ lastStats := func(i int) *diffStats {
+ if prevCase != i {
+ groups = append(groups, diffStats{Name: name})
+ prevCase = i
+ }
+ return &groups[len(groups)-1]
+ }
+ for _, r := range recs {
+ switch rv := r.Value; {
+ case rv.NumIgnored > 0 && rv.NumSame+rv.NumDiff == 0:
+ lastStats(1).NumIgnored++
+ case rv.NumDiff == 0:
+ lastStats(1).NumIdentical++
+ case rv.NumDiff > 0 && !rv.ValueY.IsValid():
+ lastStats(2).NumRemoved++
+ case rv.NumDiff > 0 && !rv.ValueX.IsValid():
+ lastStats(2).NumInserted++
+ default:
+ lastStats(2).NumModified++
+ }
+ }
+ return groups
+}
diff --git a/vendor/github.com/google/go-cmp/cmp/report_references.go b/vendor/github.com/google/go-cmp/cmp/report_references.go
new file mode 100644
index 000000000..be31b33a9
--- /dev/null
+++ b/vendor/github.com/google/go-cmp/cmp/report_references.go
@@ -0,0 +1,264 @@
+// Copyright 2020, The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package cmp
+
+import (
+ "fmt"
+ "reflect"
+ "strings"
+
+ "github.com/google/go-cmp/cmp/internal/flags"
+ "github.com/google/go-cmp/cmp/internal/value"
+)
+
+const (
+ pointerDelimPrefix = "⟪"
+ pointerDelimSuffix = "⟫"
+)
+
+// formatPointer prints the address of the pointer.
+func formatPointer(p value.Pointer, withDelims bool) string {
+ v := p.Uintptr()
+ if flags.Deterministic {
+ v = 0xdeadf00f // Only used for stable testing purposes
+ }
+ if withDelims {
+ return pointerDelimPrefix + formatHex(uint64(v)) + pointerDelimSuffix
+ }
+ return formatHex(uint64(v))
+}
+
+// pointerReferences is a stack of pointers visited so far.
+type pointerReferences [][2]value.Pointer
+
+func (ps *pointerReferences) PushPair(vx, vy reflect.Value, d diffMode, deref bool) (pp [2]value.Pointer) {
+ if deref && vx.IsValid() {
+ vx = vx.Addr()
+ }
+ if deref && vy.IsValid() {
+ vy = vy.Addr()
+ }
+ switch d {
+ case diffUnknown, diffIdentical:
+ pp = [2]value.Pointer{value.PointerOf(vx), value.PointerOf(vy)}
+ case diffRemoved:
+ pp = [2]value.Pointer{value.PointerOf(vx), value.Pointer{}}
+ case diffInserted:
+ pp = [2]value.Pointer{value.Pointer{}, value.PointerOf(vy)}
+ }
+ *ps = append(*ps, pp)
+ return pp
+}
+
+func (ps *pointerReferences) Push(v reflect.Value) (p value.Pointer, seen bool) {
+ p = value.PointerOf(v)
+ for _, pp := range *ps {
+ if p == pp[0] || p == pp[1] {
+ return p, true
+ }
+ }
+ *ps = append(*ps, [2]value.Pointer{p, p})
+ return p, false
+}
+
+func (ps *pointerReferences) Pop() {
+ *ps = (*ps)[:len(*ps)-1]
+}
+
+// trunkReferences is metadata for a textNode indicating that the sub-tree
+// represents the value for either pointer in a pair of references.
+type trunkReferences struct{ pp [2]value.Pointer }
+
+// trunkReference is metadata for a textNode indicating that the sub-tree
+// represents the value for the given pointer reference.
+type trunkReference struct{ p value.Pointer }
+
+// leafReference is metadata for a textNode indicating that the value is
+// truncated as it refers to another part of the tree (i.e., a trunk).
+type leafReference struct{ p value.Pointer }
+
+func wrapTrunkReferences(pp [2]value.Pointer, s textNode) textNode {
+ switch {
+ case pp[0].IsNil():
+ return &textWrap{Value: s, Metadata: trunkReference{pp[1]}}
+ case pp[1].IsNil():
+ return &textWrap{Value: s, Metadata: trunkReference{pp[0]}}
+ case pp[0] == pp[1]:
+ return &textWrap{Value: s, Metadata: trunkReference{pp[0]}}
+ default:
+ return &textWrap{Value: s, Metadata: trunkReferences{pp}}
+ }
+}
+func wrapTrunkReference(p value.Pointer, printAddress bool, s textNode) textNode {
+ var prefix string
+ if printAddress {
+ prefix = formatPointer(p, true)
+ }
+ return &textWrap{Prefix: prefix, Value: s, Metadata: trunkReference{p}}
+}
+func makeLeafReference(p value.Pointer, printAddress bool) textNode {
+ out := &textWrap{Prefix: "(", Value: textEllipsis, Suffix: ")"}
+ var prefix string
+ if printAddress {
+ prefix = formatPointer(p, true)
+ }
+ return &textWrap{Prefix: prefix, Value: out, Metadata: leafReference{p}}
+}
+
+// resolveReferences walks the textNode tree searching for any leaf reference
+// metadata and resolves each against the corresponding trunk references.
+// Since pointer addresses in memory are not particularly readable to the user,
+// it replaces each pointer value with an arbitrary and unique reference ID.
+func resolveReferences(s textNode) {
+ var walkNodes func(textNode, func(textNode))
+ walkNodes = func(s textNode, f func(textNode)) {
+ f(s)
+ switch s := s.(type) {
+ case *textWrap:
+ walkNodes(s.Value, f)
+ case textList:
+ for _, r := range s {
+ walkNodes(r.Value, f)
+ }
+ }
+ }
+
+ // Collect all trunks and leaves with reference metadata.
+ var trunks, leaves []*textWrap
+ walkNodes(s, func(s textNode) {
+ if s, ok := s.(*textWrap); ok {
+ switch s.Metadata.(type) {
+ case leafReference:
+ leaves = append(leaves, s)
+ case trunkReference, trunkReferences:
+ trunks = append(trunks, s)
+ }
+ }
+ })
+
+ // No leaf references to resolve.
+ if len(leaves) == 0 {
+ return
+ }
+
+ // Collect the set of all leaf references to resolve.
+ leafPtrs := make(map[value.Pointer]bool)
+ for _, leaf := range leaves {
+ leafPtrs[leaf.Metadata.(leafReference).p] = true
+ }
+
+ // Collect the set of trunk pointers that are always paired together.
+ // This allows us to assign a single ID to both pointers for brevity.
+ // If a pointer in a pair ever occurs by itself or as a different pair,
+ // then the pair is broken.
+ pairedTrunkPtrs := make(map[value.Pointer]value.Pointer)
+ unpair := func(p value.Pointer) {
+ if !pairedTrunkPtrs[p].IsNil() {
+ pairedTrunkPtrs[pairedTrunkPtrs[p]] = value.Pointer{} // invalidate other half
+ }
+ pairedTrunkPtrs[p] = value.Pointer{} // invalidate this half
+ }
+ for _, trunk := range trunks {
+ switch p := trunk.Metadata.(type) {
+ case trunkReference:
+ unpair(p.p) // standalone pointer cannot be part of a pair
+ case trunkReferences:
+ p0, ok0 := pairedTrunkPtrs[p.pp[0]]
+ p1, ok1 := pairedTrunkPtrs[p.pp[1]]
+ switch {
+ case !ok0 && !ok1:
+ // Register the newly seen pair.
+ pairedTrunkPtrs[p.pp[0]] = p.pp[1]
+ pairedTrunkPtrs[p.pp[1]] = p.pp[0]
+ case ok0 && ok1 && p0 == p.pp[1] && p1 == p.pp[0]:
+ // Exact pair already seen; do nothing.
+ default:
+ // Pair conflicts with some other pair; break all pairs.
+ unpair(p.pp[0])
+ unpair(p.pp[1])
+ }
+ }
+ }
+
+ // Correlate each pointer referenced by leaves to a unique identifier,
+ // and print the IDs for each trunk that matches those pointers.
+ var nextID uint
+ ptrIDs := make(map[value.Pointer]uint)
+ newID := func() uint {
+ id := nextID
+ nextID++
+ return id
+ }
+ for _, trunk := range trunks {
+ switch p := trunk.Metadata.(type) {
+ case trunkReference:
+ if print := leafPtrs[p.p]; print {
+ id, ok := ptrIDs[p.p]
+ if !ok {
+ id = newID()
+ ptrIDs[p.p] = id
+ }
+ trunk.Prefix = updateReferencePrefix(trunk.Prefix, formatReference(id))
+ }
+ case trunkReferences:
+ print0 := leafPtrs[p.pp[0]]
+ print1 := leafPtrs[p.pp[1]]
+ if print0 || print1 {
+ id0, ok0 := ptrIDs[p.pp[0]]
+ id1, ok1 := ptrIDs[p.pp[1]]
+ isPair := pairedTrunkPtrs[p.pp[0]] == p.pp[1] && pairedTrunkPtrs[p.pp[1]] == p.pp[0]
+ if isPair {
+ var id uint
+ assert(ok0 == ok1) // must be seen together or not at all
+ if ok0 {
+ assert(id0 == id1) // must have the same ID
+ id = id0
+ } else {
+ id = newID()
+ ptrIDs[p.pp[0]] = id
+ ptrIDs[p.pp[1]] = id
+ }
+ trunk.Prefix = updateReferencePrefix(trunk.Prefix, formatReference(id))
+ } else {
+ if print0 && !ok0 {
+ id0 = newID()
+ ptrIDs[p.pp[0]] = id0
+ }
+ if print1 && !ok1 {
+ id1 = newID()
+ ptrIDs[p.pp[1]] = id1
+ }
+ switch {
+ case print0 && print1:
+ trunk.Prefix = updateReferencePrefix(trunk.Prefix, formatReference(id0)+","+formatReference(id1))
+ case print0:
+ trunk.Prefix = updateReferencePrefix(trunk.Prefix, formatReference(id0))
+ case print1:
+ trunk.Prefix = updateReferencePrefix(trunk.Prefix, formatReference(id1))
+ }
+ }
+ }
+ }
+ }
+
+ // Update all leaf references with the unique identifier.
+ for _, leaf := range leaves {
+ if id, ok := ptrIDs[leaf.Metadata.(leafReference).p]; ok {
+ leaf.Prefix = updateReferencePrefix(leaf.Prefix, formatReference(id))
+ }
+ }
+}
+
+func formatReference(id uint) string {
+ return fmt.Sprintf("ref#%d", id)
+}
+
+func updateReferencePrefix(prefix, ref string) string {
+ if prefix == "" {
+ return pointerDelimPrefix + ref + pointerDelimSuffix
+ }
+ suffix := strings.TrimPrefix(prefix, pointerDelimPrefix)
+ return pointerDelimPrefix + ref + ": " + suffix
+}
diff --git a/vendor/github.com/google/go-cmp/cmp/report_reflect.go b/vendor/github.com/google/go-cmp/cmp/report_reflect.go
new file mode 100644
index 000000000..e39f42284
--- /dev/null
+++ b/vendor/github.com/google/go-cmp/cmp/report_reflect.go
@@ -0,0 +1,414 @@
+// Copyright 2019, The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package cmp
+
+import (
+ "bytes"
+ "fmt"
+ "reflect"
+ "strconv"
+ "strings"
+ "unicode"
+ "unicode/utf8"
+
+ "github.com/google/go-cmp/cmp/internal/value"
+)
+
+var (
+ anyType = reflect.TypeOf((*interface{})(nil)).Elem()
+ stringType = reflect.TypeOf((*string)(nil)).Elem()
+ bytesType = reflect.TypeOf((*[]byte)(nil)).Elem()
+ byteType = reflect.TypeOf((*byte)(nil)).Elem()
+)
+
+type formatValueOptions struct {
+ // AvoidStringer controls whether to avoid calling custom stringer
+ // methods like error.Error or fmt.Stringer.String.
+ AvoidStringer bool
+
+ // PrintAddresses controls whether to print the address of all pointers,
+ // slice elements, and maps.
+ PrintAddresses bool
+
+ // QualifiedNames controls whether FormatType uses the fully qualified name
+ // (including the full package path as opposed to just the package name).
+ QualifiedNames bool
+
+ // VerbosityLevel controls the amount of output to produce.
+ // A higher value produces more output. A value of zero or lower produces
+ // no output (represented using an ellipsis).
+ // If LimitVerbosity is false, then the level is treated as infinite.
+ VerbosityLevel int
+
+ // LimitVerbosity specifies that formatting should respect VerbosityLevel.
+ LimitVerbosity bool
+}
+
+// FormatType prints the type as if it were wrapping s.
+// This may return s as-is depending on the current type and TypeMode mode.
+func (opts formatOptions) FormatType(t reflect.Type, s textNode) textNode {
+ // Check whether to emit the type or not.
+ switch opts.TypeMode {
+ case autoType:
+ switch t.Kind() {
+ case reflect.Struct, reflect.Slice, reflect.Array, reflect.Map:
+ if s.Equal(textNil) {
+ return s
+ }
+ default:
+ return s
+ }
+ if opts.DiffMode == diffIdentical {
+ return s // elide type for identical nodes
+ }
+ case elideType:
+ return s
+ }
+
+ // Determine the type label, applying special handling for unnamed types.
+ typeName := value.TypeString(t, opts.QualifiedNames)
+ if t.Name() == "" {
+ // According to Go grammar, certain type literals contain symbols that
+ // do not strongly bind to the next lexicographical token (e.g., *T).
+ switch t.Kind() {
+ case reflect.Chan, reflect.Func, reflect.Ptr:
+ typeName = "(" + typeName + ")"
+ }
+ }
+ return &textWrap{Prefix: typeName, Value: wrapParens(s)}
+}
+
+// wrapParens wraps s with a set of parenthesis, but avoids it if the
+// wrapped node itself is already surrounded by a pair of parenthesis or braces.
+// It handles unwrapping one level of pointer-reference nodes.
+func wrapParens(s textNode) textNode {
+ var refNode *textWrap
+ if s2, ok := s.(*textWrap); ok {
+ // Unwrap a single pointer reference node.
+ switch s2.Metadata.(type) {
+ case leafReference, trunkReference, trunkReferences:
+ refNode = s2
+ if s3, ok := refNode.Value.(*textWrap); ok {
+ s2 = s3
+ }
+ }
+
+ // Already has delimiters that make parenthesis unnecessary.
+ hasParens := strings.HasPrefix(s2.Prefix, "(") && strings.HasSuffix(s2.Suffix, ")")
+ hasBraces := strings.HasPrefix(s2.Prefix, "{") && strings.HasSuffix(s2.Suffix, "}")
+ if hasParens || hasBraces {
+ return s
+ }
+ }
+ if refNode != nil {
+ refNode.Value = &textWrap{Prefix: "(", Value: refNode.Value, Suffix: ")"}
+ return s
+ }
+ return &textWrap{Prefix: "(", Value: s, Suffix: ")"}
+}
+
+// FormatValue prints the reflect.Value, taking extra care to avoid descending
+// into pointers already in ptrs. As pointers are visited, ptrs is also updated.
+func (opts formatOptions) FormatValue(v reflect.Value, parentKind reflect.Kind, ptrs *pointerReferences) (out textNode) {
+ if !v.IsValid() {
+ return nil
+ }
+ t := v.Type()
+
+ // Check slice element for cycles.
+ if parentKind == reflect.Slice {
+ ptrRef, visited := ptrs.Push(v.Addr())
+ if visited {
+ return makeLeafReference(ptrRef, false)
+ }
+ defer ptrs.Pop()
+ defer func() { out = wrapTrunkReference(ptrRef, false, out) }()
+ }
+
+ // Check whether there is an Error or String method to call.
+ if !opts.AvoidStringer && v.CanInterface() {
+ // Avoid calling Error or String methods on nil receivers since many
+ // implementations crash when doing so.
+ if (t.Kind() != reflect.Ptr && t.Kind() != reflect.Interface) || !v.IsNil() {
+ var prefix, strVal string
+ func() {
+ // Swallow and ignore any panics from String or Error.
+ defer func() { recover() }()
+ switch v := v.Interface().(type) {
+ case error:
+ strVal = v.Error()
+ prefix = "e"
+ case fmt.Stringer:
+ strVal = v.String()
+ prefix = "s"
+ }
+ }()
+ if prefix != "" {
+ return opts.formatString(prefix, strVal)
+ }
+ }
+ }
+
+ // Check whether to explicitly wrap the result with the type.
+ var skipType bool
+ defer func() {
+ if !skipType {
+ out = opts.FormatType(t, out)
+ }
+ }()
+
+ switch t.Kind() {
+ case reflect.Bool:
+ return textLine(fmt.Sprint(v.Bool()))
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return textLine(fmt.Sprint(v.Int()))
+ case reflect.Uint, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+ return textLine(fmt.Sprint(v.Uint()))
+ case reflect.Uint8:
+ if parentKind == reflect.Slice || parentKind == reflect.Array {
+ return textLine(formatHex(v.Uint()))
+ }
+ return textLine(fmt.Sprint(v.Uint()))
+ case reflect.Uintptr:
+ return textLine(formatHex(v.Uint()))
+ case reflect.Float32, reflect.Float64:
+ return textLine(fmt.Sprint(v.Float()))
+ case reflect.Complex64, reflect.Complex128:
+ return textLine(fmt.Sprint(v.Complex()))
+ case reflect.String:
+ return opts.formatString("", v.String())
+ case reflect.UnsafePointer, reflect.Chan, reflect.Func:
+ return textLine(formatPointer(value.PointerOf(v), true))
+ case reflect.Struct:
+ var list textList
+ v := makeAddressable(v) // needed for retrieveUnexportedField
+ maxLen := v.NumField()
+ if opts.LimitVerbosity {
+ maxLen = ((1 << opts.verbosity()) >> 1) << 2 // 0, 4, 8, 16, 32, etc...
+ opts.VerbosityLevel--
+ }
+ for i := 0; i < v.NumField(); i++ {
+ vv := v.Field(i)
+ if vv.IsZero() {
+ continue // Elide fields with zero values
+ }
+ if len(list) == maxLen {
+ list.AppendEllipsis(diffStats{})
+ break
+ }
+ sf := t.Field(i)
+ if !isExported(sf.Name) {
+ vv = retrieveUnexportedField(v, sf, true)
+ }
+ s := opts.WithTypeMode(autoType).FormatValue(vv, t.Kind(), ptrs)
+ list = append(list, textRecord{Key: sf.Name, Value: s})
+ }
+ return &textWrap{Prefix: "{", Value: list, Suffix: "}"}
+ case reflect.Slice:
+ if v.IsNil() {
+ return textNil
+ }
+
+ // Check whether this is a []byte of text data.
+ if t.Elem() == byteType {
+ b := v.Bytes()
+ isPrintSpace := func(r rune) bool { return unicode.IsPrint(r) || unicode.IsSpace(r) }
+ if len(b) > 0 && utf8.Valid(b) && len(bytes.TrimFunc(b, isPrintSpace)) == 0 {
+ out = opts.formatString("", string(b))
+ skipType = true
+ return opts.FormatType(t, out)
+ }
+ }
+
+ fallthrough
+ case reflect.Array:
+ maxLen := v.Len()
+ if opts.LimitVerbosity {
+ maxLen = ((1 << opts.verbosity()) >> 1) << 2 // 0, 4, 8, 16, 32, etc...
+ opts.VerbosityLevel--
+ }
+ var list textList
+ for i := 0; i < v.Len(); i++ {
+ if len(list) == maxLen {
+ list.AppendEllipsis(diffStats{})
+ break
+ }
+ s := opts.WithTypeMode(elideType).FormatValue(v.Index(i), t.Kind(), ptrs)
+ list = append(list, textRecord{Value: s})
+ }
+
+ out = &textWrap{Prefix: "{", Value: list, Suffix: "}"}
+ if t.Kind() == reflect.Slice && opts.PrintAddresses {
+ header := fmt.Sprintf("ptr:%v, len:%d, cap:%d", formatPointer(value.PointerOf(v), false), v.Len(), v.Cap())
+ out = &textWrap{Prefix: pointerDelimPrefix + header + pointerDelimSuffix, Value: out}
+ }
+ return out
+ case reflect.Map:
+ if v.IsNil() {
+ return textNil
+ }
+
+ // Check pointer for cycles.
+ ptrRef, visited := ptrs.Push(v)
+ if visited {
+ return makeLeafReference(ptrRef, opts.PrintAddresses)
+ }
+ defer ptrs.Pop()
+
+ maxLen := v.Len()
+ if opts.LimitVerbosity {
+ maxLen = ((1 << opts.verbosity()) >> 1) << 2 // 0, 4, 8, 16, 32, etc...
+ opts.VerbosityLevel--
+ }
+ var list textList
+ for _, k := range value.SortKeys(v.MapKeys()) {
+ if len(list) == maxLen {
+ list.AppendEllipsis(diffStats{})
+ break
+ }
+ sk := formatMapKey(k, false, ptrs)
+ sv := opts.WithTypeMode(elideType).FormatValue(v.MapIndex(k), t.Kind(), ptrs)
+ list = append(list, textRecord{Key: sk, Value: sv})
+ }
+
+ out = &textWrap{Prefix: "{", Value: list, Suffix: "}"}
+ out = wrapTrunkReference(ptrRef, opts.PrintAddresses, out)
+ return out
+ case reflect.Ptr:
+ if v.IsNil() {
+ return textNil
+ }
+
+ // Check pointer for cycles.
+ ptrRef, visited := ptrs.Push(v)
+ if visited {
+ out = makeLeafReference(ptrRef, opts.PrintAddresses)
+ return &textWrap{Prefix: "&", Value: out}
+ }
+ defer ptrs.Pop()
+
+ // Skip the name only if this is an unnamed pointer type.
+ // Otherwise taking the address of a value does not reproduce
+ // the named pointer type.
+ if v.Type().Name() == "" {
+ skipType = true // Let the underlying value print the type instead
+ }
+ out = opts.FormatValue(v.Elem(), t.Kind(), ptrs)
+ out = wrapTrunkReference(ptrRef, opts.PrintAddresses, out)
+ out = &textWrap{Prefix: "&", Value: out}
+ return out
+ case reflect.Interface:
+ if v.IsNil() {
+ return textNil
+ }
+ // Interfaces accept different concrete types,
+ // so configure the underlying value to explicitly print the type.
+ return opts.WithTypeMode(emitType).FormatValue(v.Elem(), t.Kind(), ptrs)
+ default:
+ panic(fmt.Sprintf("%v kind not handled", v.Kind()))
+ }
+}
+
+func (opts formatOptions) formatString(prefix, s string) textNode {
+ maxLen := len(s)
+ maxLines := strings.Count(s, "\n") + 1
+ if opts.LimitVerbosity {
+ maxLen = (1 << opts.verbosity()) << 5 // 32, 64, 128, 256, etc...
+ maxLines = (1 << opts.verbosity()) << 2 // 4, 8, 16, 32, 64, etc...
+ }
+
+ // For multiline strings, use the triple-quote syntax,
+ // but only use it when printing removed or inserted nodes since
+ // we only want the extra verbosity for those cases.
+ lines := strings.Split(strings.TrimSuffix(s, "\n"), "\n")
+ isTripleQuoted := len(lines) >= 4 && (opts.DiffMode == '-' || opts.DiffMode == '+')
+ for i := 0; i < len(lines) && isTripleQuoted; i++ {
+ lines[i] = strings.TrimPrefix(strings.TrimSuffix(lines[i], "\r"), "\r") // trim leading/trailing carriage returns for legacy Windows endline support
+ isPrintable := func(r rune) bool {
+ return unicode.IsPrint(r) || r == '\t' // specially treat tab as printable
+ }
+ line := lines[i]
+ isTripleQuoted = !strings.HasPrefix(strings.TrimPrefix(line, prefix), `"""`) && !strings.HasPrefix(line, "...") && strings.TrimFunc(line, isPrintable) == "" && len(line) <= maxLen
+ }
+ if isTripleQuoted {
+ var list textList
+ list = append(list, textRecord{Diff: opts.DiffMode, Value: textLine(prefix + `"""`), ElideComma: true})
+ for i, line := range lines {
+ if numElided := len(lines) - i; i == maxLines-1 && numElided > 1 {
+ comment := commentString(fmt.Sprintf("%d elided lines", numElided))
+ list = append(list, textRecord{Diff: opts.DiffMode, Value: textEllipsis, ElideComma: true, Comment: comment})
+ break
+ }
+ list = append(list, textRecord{Diff: opts.DiffMode, Value: textLine(line), ElideComma: true})
+ }
+ list = append(list, textRecord{Diff: opts.DiffMode, Value: textLine(prefix + `"""`), ElideComma: true})
+ return &textWrap{Prefix: "(", Value: list, Suffix: ")"}
+ }
+
+ // Format the string as a single-line quoted string.
+ if len(s) > maxLen+len(textEllipsis) {
+ return textLine(prefix + formatString(s[:maxLen]) + string(textEllipsis))
+ }
+ return textLine(prefix + formatString(s))
+}
+
+// formatMapKey formats v as if it were a map key.
+// The result is guaranteed to be a single line.
+func formatMapKey(v reflect.Value, disambiguate bool, ptrs *pointerReferences) string {
+ var opts formatOptions
+ opts.DiffMode = diffIdentical
+ opts.TypeMode = elideType
+ opts.PrintAddresses = disambiguate
+ opts.AvoidStringer = disambiguate
+ opts.QualifiedNames = disambiguate
+ opts.VerbosityLevel = maxVerbosityPreset
+ opts.LimitVerbosity = true
+ s := opts.FormatValue(v, reflect.Map, ptrs).String()
+ return strings.TrimSpace(s)
+}
+
+// formatString prints s as a double-quoted or backtick-quoted string.
+func formatString(s string) string {
+ // Use quoted string if it the same length as a raw string literal.
+ // Otherwise, attempt to use the raw string form.
+ qs := strconv.Quote(s)
+ if len(qs) == 1+len(s)+1 {
+ return qs
+ }
+
+ // Disallow newlines to ensure output is a single line.
+ // Only allow printable runes for readability purposes.
+ rawInvalid := func(r rune) bool {
+ return r == '`' || r == '\n' || !(unicode.IsPrint(r) || r == '\t')
+ }
+ if utf8.ValidString(s) && strings.IndexFunc(s, rawInvalid) < 0 {
+ return "`" + s + "`"
+ }
+ return qs
+}
+
+// formatHex prints u as a hexadecimal integer in Go notation.
+func formatHex(u uint64) string {
+ var f string
+ switch {
+ case u <= 0xff:
+ f = "0x%02x"
+ case u <= 0xffff:
+ f = "0x%04x"
+ case u <= 0xffffff:
+ f = "0x%06x"
+ case u <= 0xffffffff:
+ f = "0x%08x"
+ case u <= 0xffffffffff:
+ f = "0x%010x"
+ case u <= 0xffffffffffff:
+ f = "0x%012x"
+ case u <= 0xffffffffffffff:
+ f = "0x%014x"
+ case u <= 0xffffffffffffffff:
+ f = "0x%016x"
+ }
+ return fmt.Sprintf(f, u)
+}
diff --git a/vendor/github.com/google/go-cmp/cmp/report_slices.go b/vendor/github.com/google/go-cmp/cmp/report_slices.go
new file mode 100644
index 000000000..23e444f62
--- /dev/null
+++ b/vendor/github.com/google/go-cmp/cmp/report_slices.go
@@ -0,0 +1,614 @@
+// Copyright 2019, The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package cmp
+
+import (
+ "bytes"
+ "fmt"
+ "math"
+ "reflect"
+ "strconv"
+ "strings"
+ "unicode"
+ "unicode/utf8"
+
+ "github.com/google/go-cmp/cmp/internal/diff"
+)
+
+// CanFormatDiffSlice reports whether we support custom formatting for nodes
+// that are slices of primitive kinds or strings.
+func (opts formatOptions) CanFormatDiffSlice(v *valueNode) bool {
+ switch {
+ case opts.DiffMode != diffUnknown:
+ return false // Must be formatting in diff mode
+ case v.NumDiff == 0:
+ return false // No differences detected
+ case !v.ValueX.IsValid() || !v.ValueY.IsValid():
+ return false // Both values must be valid
+ case v.NumIgnored > 0:
+ return false // Some ignore option was used
+ case v.NumTransformed > 0:
+ return false // Some transform option was used
+ case v.NumCompared > 1:
+ return false // More than one comparison was used
+ case v.NumCompared == 1 && v.Type.Name() != "":
+ // The need for cmp to check applicability of options on every element
+ // in a slice is a significant performance detriment for large []byte.
+ // The workaround is to specify Comparer(bytes.Equal),
+ // which enables cmp to compare []byte more efficiently.
+ // If they differ, we still want to provide batched diffing.
+ // The logic disallows named types since they tend to have their own
+ // String method, with nicer formatting than what this provides.
+ return false
+ }
+
+ // Check whether this is an interface with the same concrete types.
+ t := v.Type
+ vx, vy := v.ValueX, v.ValueY
+ if t.Kind() == reflect.Interface && !vx.IsNil() && !vy.IsNil() && vx.Elem().Type() == vy.Elem().Type() {
+ vx, vy = vx.Elem(), vy.Elem()
+ t = vx.Type()
+ }
+
+ // Check whether we provide specialized diffing for this type.
+ switch t.Kind() {
+ case reflect.String:
+ case reflect.Array, reflect.Slice:
+ // Only slices of primitive types have specialized handling.
+ switch t.Elem().Kind() {
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64,
+ reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr,
+ reflect.Bool, reflect.Float32, reflect.Float64, reflect.Complex64, reflect.Complex128:
+ default:
+ return false
+ }
+
+ // Both slice values have to be non-empty.
+ if t.Kind() == reflect.Slice && (vx.Len() == 0 || vy.Len() == 0) {
+ return false
+ }
+
+ // If a sufficient number of elements already differ,
+ // use specialized formatting even if length requirement is not met.
+ if v.NumDiff > v.NumSame {
+ return true
+ }
+ default:
+ return false
+ }
+
+ // Use specialized string diffing for longer slices or strings.
+ const minLength = 32
+ return vx.Len() >= minLength && vy.Len() >= minLength
+}
+
+// FormatDiffSlice prints a diff for the slices (or strings) represented by v.
+// This provides custom-tailored logic to make printing of differences in
+// textual strings and slices of primitive kinds more readable.
+func (opts formatOptions) FormatDiffSlice(v *valueNode) textNode {
+ assert(opts.DiffMode == diffUnknown)
+ t, vx, vy := v.Type, v.ValueX, v.ValueY
+ if t.Kind() == reflect.Interface {
+ vx, vy = vx.Elem(), vy.Elem()
+ t = vx.Type()
+ opts = opts.WithTypeMode(emitType)
+ }
+
+ // Auto-detect the type of the data.
+ var sx, sy string
+ var ssx, ssy []string
+ var isString, isMostlyText, isPureLinedText, isBinary bool
+ switch {
+ case t.Kind() == reflect.String:
+ sx, sy = vx.String(), vy.String()
+ isString = true
+ case t.Kind() == reflect.Slice && t.Elem() == byteType:
+ sx, sy = string(vx.Bytes()), string(vy.Bytes())
+ isString = true
+ case t.Kind() == reflect.Array:
+ // Arrays need to be addressable for slice operations to work.
+ vx2, vy2 := reflect.New(t).Elem(), reflect.New(t).Elem()
+ vx2.Set(vx)
+ vy2.Set(vy)
+ vx, vy = vx2, vy2
+ }
+ if isString {
+ var numTotalRunes, numValidRunes, numLines, lastLineIdx, maxLineLen int
+ for i, r := range sx + sy {
+ numTotalRunes++
+ if (unicode.IsPrint(r) || unicode.IsSpace(r)) && r != utf8.RuneError {
+ numValidRunes++
+ }
+ if r == '\n' {
+ if maxLineLen < i-lastLineIdx {
+ maxLineLen = i - lastLineIdx
+ }
+ lastLineIdx = i + 1
+ numLines++
+ }
+ }
+ isPureText := numValidRunes == numTotalRunes
+ isMostlyText = float64(numValidRunes) > math.Floor(0.90*float64(numTotalRunes))
+ isPureLinedText = isPureText && numLines >= 4 && maxLineLen <= 1024
+ isBinary = !isMostlyText
+
+ // Avoid diffing by lines if it produces a significantly more complex
+ // edit script than diffing by bytes.
+ if isPureLinedText {
+ ssx = strings.Split(sx, "\n")
+ ssy = strings.Split(sy, "\n")
+ esLines := diff.Difference(len(ssx), len(ssy), func(ix, iy int) diff.Result {
+ return diff.BoolResult(ssx[ix] == ssy[iy])
+ })
+ esBytes := diff.Difference(len(sx), len(sy), func(ix, iy int) diff.Result {
+ return diff.BoolResult(sx[ix] == sy[iy])
+ })
+ efficiencyLines := float64(esLines.Dist()) / float64(len(esLines))
+ efficiencyBytes := float64(esBytes.Dist()) / float64(len(esBytes))
+ quotedLength := len(strconv.Quote(sx + sy))
+ unquotedLength := len(sx) + len(sy)
+ escapeExpansionRatio := float64(quotedLength) / float64(unquotedLength)
+ isPureLinedText = efficiencyLines < 4*efficiencyBytes || escapeExpansionRatio > 1.1
+ }
+ }
+
+ // Format the string into printable records.
+ var list textList
+ var delim string
+ switch {
+ // If the text appears to be multi-lined text,
+ // then perform differencing across individual lines.
+ case isPureLinedText:
+ list = opts.formatDiffSlice(
+ reflect.ValueOf(ssx), reflect.ValueOf(ssy), 1, "line",
+ func(v reflect.Value, d diffMode) textRecord {
+ s := formatString(v.Index(0).String())
+ return textRecord{Diff: d, Value: textLine(s)}
+ },
+ )
+ delim = "\n"
+
+ // If possible, use a custom triple-quote (""") syntax for printing
+ // differences in a string literal. This format is more readable,
+ // but has edge-cases where differences are visually indistinguishable.
+ // This format is avoided under the following conditions:
+ // - A line starts with `"""`
+ // - A line starts with "..."
+ // - A line contains non-printable characters
+ // - Adjacent different lines differ only by whitespace
+ //
+ // For example:
+ //
+ // """
+ // ... // 3 identical lines
+ // foo
+ // bar
+ // - baz
+ // + BAZ
+ // """
+ isTripleQuoted := true
+ prevRemoveLines := map[string]bool{}
+ prevInsertLines := map[string]bool{}
+ var list2 textList
+ list2 = append(list2, textRecord{Value: textLine(`"""`), ElideComma: true})
+ for _, r := range list {
+ if !r.Value.Equal(textEllipsis) {
+ line, _ := strconv.Unquote(string(r.Value.(textLine)))
+ line = strings.TrimPrefix(strings.TrimSuffix(line, "\r"), "\r") // trim leading/trailing carriage returns for legacy Windows endline support
+ normLine := strings.Map(func(r rune) rune {
+ if unicode.IsSpace(r) {
+ return -1 // drop whitespace to avoid visually indistinguishable output
+ }
+ return r
+ }, line)
+ isPrintable := func(r rune) bool {
+ return unicode.IsPrint(r) || r == '\t' // specially treat tab as printable
+ }
+ isTripleQuoted = !strings.HasPrefix(line, `"""`) && !strings.HasPrefix(line, "...") && strings.TrimFunc(line, isPrintable) == ""
+ switch r.Diff {
+ case diffRemoved:
+ isTripleQuoted = isTripleQuoted && !prevInsertLines[normLine]
+ prevRemoveLines[normLine] = true
+ case diffInserted:
+ isTripleQuoted = isTripleQuoted && !prevRemoveLines[normLine]
+ prevInsertLines[normLine] = true
+ }
+ if !isTripleQuoted {
+ break
+ }
+ r.Value = textLine(line)
+ r.ElideComma = true
+ }
+ if !(r.Diff == diffRemoved || r.Diff == diffInserted) { // start a new non-adjacent difference group
+ prevRemoveLines = map[string]bool{}
+ prevInsertLines = map[string]bool{}
+ }
+ list2 = append(list2, r)
+ }
+ if r := list2[len(list2)-1]; r.Diff == diffIdentical && len(r.Value.(textLine)) == 0 {
+ list2 = list2[:len(list2)-1] // elide single empty line at the end
+ }
+ list2 = append(list2, textRecord{Value: textLine(`"""`), ElideComma: true})
+ if isTripleQuoted {
+ var out textNode = &textWrap{Prefix: "(", Value: list2, Suffix: ")"}
+ switch t.Kind() {
+ case reflect.String:
+ if t != stringType {
+ out = opts.FormatType(t, out)
+ }
+ case reflect.Slice:
+ // Always emit type for slices since the triple-quote syntax
+ // looks like a string (not a slice).
+ opts = opts.WithTypeMode(emitType)
+ out = opts.FormatType(t, out)
+ }
+ return out
+ }
+
+ // If the text appears to be single-lined text,
+ // then perform differencing in approximately fixed-sized chunks.
+ // The output is printed as quoted strings.
+ case isMostlyText:
+ list = opts.formatDiffSlice(
+ reflect.ValueOf(sx), reflect.ValueOf(sy), 64, "byte",
+ func(v reflect.Value, d diffMode) textRecord {
+ s := formatString(v.String())
+ return textRecord{Diff: d, Value: textLine(s)}
+ },
+ )
+
+ // If the text appears to be binary data,
+ // then perform differencing in approximately fixed-sized chunks.
+ // The output is inspired by hexdump.
+ case isBinary:
+ list = opts.formatDiffSlice(
+ reflect.ValueOf(sx), reflect.ValueOf(sy), 16, "byte",
+ func(v reflect.Value, d diffMode) textRecord {
+ var ss []string
+ for i := 0; i < v.Len(); i++ {
+ ss = append(ss, formatHex(v.Index(i).Uint()))
+ }
+ s := strings.Join(ss, ", ")
+ comment := commentString(fmt.Sprintf("%c|%v|", d, formatASCII(v.String())))
+ return textRecord{Diff: d, Value: textLine(s), Comment: comment}
+ },
+ )
+
+ // For all other slices of primitive types,
+ // then perform differencing in approximately fixed-sized chunks.
+ // The size of each chunk depends on the width of the element kind.
+ default:
+ var chunkSize int
+ if t.Elem().Kind() == reflect.Bool {
+ chunkSize = 16
+ } else {
+ switch t.Elem().Bits() {
+ case 8:
+ chunkSize = 16
+ case 16:
+ chunkSize = 12
+ case 32:
+ chunkSize = 8
+ default:
+ chunkSize = 8
+ }
+ }
+ list = opts.formatDiffSlice(
+ vx, vy, chunkSize, t.Elem().Kind().String(),
+ func(v reflect.Value, d diffMode) textRecord {
+ var ss []string
+ for i := 0; i < v.Len(); i++ {
+ switch t.Elem().Kind() {
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ ss = append(ss, fmt.Sprint(v.Index(i).Int()))
+ case reflect.Uint, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+ ss = append(ss, fmt.Sprint(v.Index(i).Uint()))
+ case reflect.Uint8, reflect.Uintptr:
+ ss = append(ss, formatHex(v.Index(i).Uint()))
+ case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Complex64, reflect.Complex128:
+ ss = append(ss, fmt.Sprint(v.Index(i).Interface()))
+ }
+ }
+ s := strings.Join(ss, ", ")
+ return textRecord{Diff: d, Value: textLine(s)}
+ },
+ )
+ }
+
+ // Wrap the output with appropriate type information.
+ var out textNode = &textWrap{Prefix: "{", Value: list, Suffix: "}"}
+ if !isMostlyText {
+ // The "{...}" byte-sequence literal is not valid Go syntax for strings.
+ // Emit the type for extra clarity (e.g. "string{...}").
+ if t.Kind() == reflect.String {
+ opts = opts.WithTypeMode(emitType)
+ }
+ return opts.FormatType(t, out)
+ }
+ switch t.Kind() {
+ case reflect.String:
+ out = &textWrap{Prefix: "strings.Join(", Value: out, Suffix: fmt.Sprintf(", %q)", delim)}
+ if t != stringType {
+ out = opts.FormatType(t, out)
+ }
+ case reflect.Slice:
+ out = &textWrap{Prefix: "bytes.Join(", Value: out, Suffix: fmt.Sprintf(", %q)", delim)}
+ if t != bytesType {
+ out = opts.FormatType(t, out)
+ }
+ }
+ return out
+}
+
+// formatASCII formats s as an ASCII string.
+// This is useful for printing binary strings in a semi-legible way.
+func formatASCII(s string) string {
+ b := bytes.Repeat([]byte{'.'}, len(s))
+ for i := 0; i < len(s); i++ {
+ if ' ' <= s[i] && s[i] <= '~' {
+ b[i] = s[i]
+ }
+ }
+ return string(b)
+}
+
+func (opts formatOptions) formatDiffSlice(
+ vx, vy reflect.Value, chunkSize int, name string,
+ makeRec func(reflect.Value, diffMode) textRecord,
+) (list textList) {
+ eq := func(ix, iy int) bool {
+ return vx.Index(ix).Interface() == vy.Index(iy).Interface()
+ }
+ es := diff.Difference(vx.Len(), vy.Len(), func(ix, iy int) diff.Result {
+ return diff.BoolResult(eq(ix, iy))
+ })
+
+ appendChunks := func(v reflect.Value, d diffMode) int {
+ n0 := v.Len()
+ for v.Len() > 0 {
+ n := chunkSize
+ if n > v.Len() {
+ n = v.Len()
+ }
+ list = append(list, makeRec(v.Slice(0, n), d))
+ v = v.Slice(n, v.Len())
+ }
+ return n0 - v.Len()
+ }
+
+ var numDiffs int
+ maxLen := -1
+ if opts.LimitVerbosity {
+ maxLen = (1 << opts.verbosity()) << 2 // 4, 8, 16, 32, 64, etc...
+ opts.VerbosityLevel--
+ }
+
+ groups := coalesceAdjacentEdits(name, es)
+ groups = coalesceInterveningIdentical(groups, chunkSize/4)
+ groups = cleanupSurroundingIdentical(groups, eq)
+ maxGroup := diffStats{Name: name}
+ for i, ds := range groups {
+ if maxLen >= 0 && numDiffs >= maxLen {
+ maxGroup = maxGroup.Append(ds)
+ continue
+ }
+
+ // Print equal.
+ if ds.NumDiff() == 0 {
+ // Compute the number of leading and trailing equal bytes to print.
+ var numLo, numHi int
+ numEqual := ds.NumIgnored + ds.NumIdentical
+ for numLo < chunkSize*numContextRecords && numLo+numHi < numEqual && i != 0 {
+ numLo++
+ }
+ for numHi < chunkSize*numContextRecords && numLo+numHi < numEqual && i != len(groups)-1 {
+ numHi++
+ }
+ if numEqual-(numLo+numHi) <= chunkSize && ds.NumIgnored == 0 {
+ numHi = numEqual - numLo // Avoid pointless coalescing of single equal row
+ }
+
+ // Print the equal bytes.
+ appendChunks(vx.Slice(0, numLo), diffIdentical)
+ if numEqual > numLo+numHi {
+ ds.NumIdentical -= numLo + numHi
+ list.AppendEllipsis(ds)
+ }
+ appendChunks(vx.Slice(numEqual-numHi, numEqual), diffIdentical)
+ vx = vx.Slice(numEqual, vx.Len())
+ vy = vy.Slice(numEqual, vy.Len())
+ continue
+ }
+
+ // Print unequal.
+ len0 := len(list)
+ nx := appendChunks(vx.Slice(0, ds.NumIdentical+ds.NumRemoved+ds.NumModified), diffRemoved)
+ vx = vx.Slice(nx, vx.Len())
+ ny := appendChunks(vy.Slice(0, ds.NumIdentical+ds.NumInserted+ds.NumModified), diffInserted)
+ vy = vy.Slice(ny, vy.Len())
+ numDiffs += len(list) - len0
+ }
+ if maxGroup.IsZero() {
+ assert(vx.Len() == 0 && vy.Len() == 0)
+ } else {
+ list.AppendEllipsis(maxGroup)
+ }
+ return list
+}
+
+// coalesceAdjacentEdits coalesces the list of edits into groups of adjacent
+// equal or unequal counts.
+//
+// Example:
+//
+// Input: "..XXY...Y"
+// Output: [
+// {NumIdentical: 2},
+// {NumRemoved: 2, NumInserted 1},
+// {NumIdentical: 3},
+// {NumInserted: 1},
+// ]
+func coalesceAdjacentEdits(name string, es diff.EditScript) (groups []diffStats) {
+ var prevMode byte
+ lastStats := func(mode byte) *diffStats {
+ if prevMode != mode {
+ groups = append(groups, diffStats{Name: name})
+ prevMode = mode
+ }
+ return &groups[len(groups)-1]
+ }
+ for _, e := range es {
+ switch e {
+ case diff.Identity:
+ lastStats('=').NumIdentical++
+ case diff.UniqueX:
+ lastStats('!').NumRemoved++
+ case diff.UniqueY:
+ lastStats('!').NumInserted++
+ case diff.Modified:
+ lastStats('!').NumModified++
+ }
+ }
+ return groups
+}
+
+// coalesceInterveningIdentical coalesces sufficiently short (<= windowSize)
+// equal groups into adjacent unequal groups that currently result in a
+// dual inserted/removed printout. This acts as a high-pass filter to smooth
+// out high-frequency changes within the windowSize.
+//
+// Example:
+//
+// WindowSize: 16,
+// Input: [
+// {NumIdentical: 61}, // group 0
+// {NumRemoved: 3, NumInserted: 1}, // group 1
+// {NumIdentical: 6}, // ├── coalesce
+// {NumInserted: 2}, // ├── coalesce
+// {NumIdentical: 1}, // ├── coalesce
+// {NumRemoved: 9}, // └── coalesce
+// {NumIdentical: 64}, // group 2
+// {NumRemoved: 3, NumInserted: 1}, // group 3
+// {NumIdentical: 6}, // ├── coalesce
+// {NumInserted: 2}, // ├── coalesce
+// {NumIdentical: 1}, // ├── coalesce
+// {NumRemoved: 7}, // ├── coalesce
+// {NumIdentical: 1}, // ├── coalesce
+// {NumRemoved: 2}, // └── coalesce
+// {NumIdentical: 63}, // group 4
+// ]
+// Output: [
+// {NumIdentical: 61},
+// {NumIdentical: 7, NumRemoved: 12, NumInserted: 3},
+// {NumIdentical: 64},
+// {NumIdentical: 8, NumRemoved: 12, NumInserted: 3},
+// {NumIdentical: 63},
+// ]
+func coalesceInterveningIdentical(groups []diffStats, windowSize int) []diffStats {
+ groups, groupsOrig := groups[:0], groups
+ for i, ds := range groupsOrig {
+ if len(groups) >= 2 && ds.NumDiff() > 0 {
+ prev := &groups[len(groups)-2] // Unequal group
+ curr := &groups[len(groups)-1] // Equal group
+ next := &groupsOrig[i] // Unequal group
+ hadX, hadY := prev.NumRemoved > 0, prev.NumInserted > 0
+ hasX, hasY := next.NumRemoved > 0, next.NumInserted > 0
+ if ((hadX || hasX) && (hadY || hasY)) && curr.NumIdentical <= windowSize {
+ *prev = prev.Append(*curr).Append(*next)
+ groups = groups[:len(groups)-1] // Truncate off equal group
+ continue
+ }
+ }
+ groups = append(groups, ds)
+ }
+ return groups
+}
+
+// cleanupSurroundingIdentical scans through all unequal groups, and
+// moves any leading sequence of equal elements to the preceding equal group and
+// moves and trailing sequence of equal elements to the succeeding equal group.
+//
+// This is necessary since coalesceInterveningIdentical may coalesce edit groups
+// together such that leading/trailing spans of equal elements becomes possible.
+// Note that this can occur even with an optimal diffing algorithm.
+//
+// Example:
+//
+// Input: [
+// {NumIdentical: 61},
+// {NumIdentical: 1 , NumRemoved: 11, NumInserted: 2}, // assume 3 leading identical elements
+// {NumIdentical: 67},
+// {NumIdentical: 7, NumRemoved: 12, NumInserted: 3}, // assume 10 trailing identical elements
+// {NumIdentical: 54},
+// ]
+// Output: [
+// {NumIdentical: 64}, // incremented by 3
+// {NumRemoved: 9},
+// {NumIdentical: 67},
+// {NumRemoved: 9},
+// {NumIdentical: 64}, // incremented by 10
+// ]
+func cleanupSurroundingIdentical(groups []diffStats, eq func(i, j int) bool) []diffStats {
+ var ix, iy int // indexes into sequence x and y
+ for i, ds := range groups {
+ // Handle equal group.
+ if ds.NumDiff() == 0 {
+ ix += ds.NumIdentical
+ iy += ds.NumIdentical
+ continue
+ }
+
+ // Handle unequal group.
+ nx := ds.NumIdentical + ds.NumRemoved + ds.NumModified
+ ny := ds.NumIdentical + ds.NumInserted + ds.NumModified
+ var numLeadingIdentical, numTrailingIdentical int
+ for j := 0; j < nx && j < ny && eq(ix+j, iy+j); j++ {
+ numLeadingIdentical++
+ }
+ for j := 0; j < nx && j < ny && eq(ix+nx-1-j, iy+ny-1-j); j++ {
+ numTrailingIdentical++
+ }
+ if numIdentical := numLeadingIdentical + numTrailingIdentical; numIdentical > 0 {
+ if numLeadingIdentical > 0 {
+ // Remove leading identical span from this group and
+ // insert it into the preceding group.
+ if i-1 >= 0 {
+ groups[i-1].NumIdentical += numLeadingIdentical
+ } else {
+ // No preceding group exists, so prepend a new group,
+ // but do so after we finish iterating over all groups.
+ defer func() {
+ groups = append([]diffStats{{Name: groups[0].Name, NumIdentical: numLeadingIdentical}}, groups...)
+ }()
+ }
+ // Increment indexes since the preceding group would have handled this.
+ ix += numLeadingIdentical
+ iy += numLeadingIdentical
+ }
+ if numTrailingIdentical > 0 {
+ // Remove trailing identical span from this group and
+ // insert it into the succeeding group.
+ if i+1 < len(groups) {
+ groups[i+1].NumIdentical += numTrailingIdentical
+ } else {
+ // No succeeding group exists, so append a new group,
+ // but do so after we finish iterating over all groups.
+ defer func() {
+ groups = append(groups, diffStats{Name: groups[len(groups)-1].Name, NumIdentical: numTrailingIdentical})
+ }()
+ }
+ // Do not increment indexes since the succeeding group will handle this.
+ }
+
+ // Update this group since some identical elements were removed.
+ nx -= numIdentical
+ ny -= numIdentical
+ groups[i] = diffStats{Name: ds.Name, NumRemoved: nx, NumInserted: ny}
+ }
+ ix += nx
+ iy += ny
+ }
+ return groups
+}
diff --git a/vendor/github.com/google/go-cmp/cmp/report_text.go b/vendor/github.com/google/go-cmp/cmp/report_text.go
new file mode 100644
index 000000000..388fcf571
--- /dev/null
+++ b/vendor/github.com/google/go-cmp/cmp/report_text.go
@@ -0,0 +1,432 @@
+// Copyright 2019, The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package cmp
+
+import (
+ "bytes"
+ "fmt"
+ "math/rand"
+ "strings"
+ "time"
+ "unicode/utf8"
+
+ "github.com/google/go-cmp/cmp/internal/flags"
+)
+
+var randBool = rand.New(rand.NewSource(time.Now().Unix())).Intn(2) == 0
+
+const maxColumnLength = 80
+
+type indentMode int
+
+func (n indentMode) appendIndent(b []byte, d diffMode) []byte {
+ // The output of Diff is documented as being unstable to provide future
+ // flexibility in changing the output for more humanly readable reports.
+ // This logic intentionally introduces instability to the exact output
+ // so that users can detect accidental reliance on stability early on,
+ // rather than much later when an actual change to the format occurs.
+ if flags.Deterministic || randBool {
+ // Use regular spaces (U+0020).
+ switch d {
+ case diffUnknown, diffIdentical:
+ b = append(b, " "...)
+ case diffRemoved:
+ b = append(b, "- "...)
+ case diffInserted:
+ b = append(b, "+ "...)
+ }
+ } else {
+ // Use non-breaking spaces (U+00a0).
+ switch d {
+ case diffUnknown, diffIdentical:
+ b = append(b, " "...)
+ case diffRemoved:
+ b = append(b, "- "...)
+ case diffInserted:
+ b = append(b, "+ "...)
+ }
+ }
+ return repeatCount(n).appendChar(b, '\t')
+}
+
+type repeatCount int
+
+func (n repeatCount) appendChar(b []byte, c byte) []byte {
+ for ; n > 0; n-- {
+ b = append(b, c)
+ }
+ return b
+}
+
+// textNode is a simplified tree-based representation of structured text.
+// Possible node types are textWrap, textList, or textLine.
+type textNode interface {
+ // Len reports the length in bytes of a single-line version of the tree.
+ // Nested textRecord.Diff and textRecord.Comment fields are ignored.
+ Len() int
+ // Equal reports whether the two trees are structurally identical.
+ // Nested textRecord.Diff and textRecord.Comment fields are compared.
+ Equal(textNode) bool
+ // String returns the string representation of the text tree.
+ // It is not guaranteed that len(x.String()) == x.Len(),
+ // nor that x.String() == y.String() implies that x.Equal(y).
+ String() string
+
+ // formatCompactTo formats the contents of the tree as a single-line string
+ // to the provided buffer. Any nested textRecord.Diff and textRecord.Comment
+ // fields are ignored.
+ //
+ // However, not all nodes in the tree should be collapsed as a single-line.
+ // If a node can be collapsed as a single-line, it is replaced by a textLine
+ // node. Since the top-level node cannot replace itself, this also returns
+ // the current node itself.
+ //
+ // This does not mutate the receiver.
+ formatCompactTo([]byte, diffMode) ([]byte, textNode)
+ // formatExpandedTo formats the contents of the tree as a multi-line string
+ // to the provided buffer. In order for column alignment to operate well,
+ // formatCompactTo must be called before calling formatExpandedTo.
+ formatExpandedTo([]byte, diffMode, indentMode) []byte
+}
+
+// textWrap is a wrapper that concatenates a prefix and/or a suffix
+// to the underlying node.
+type textWrap struct {
+ Prefix string // e.g., "bytes.Buffer{"
+ Value textNode // textWrap | textList | textLine
+ Suffix string // e.g., "}"
+ Metadata interface{} // arbitrary metadata; has no effect on formatting
+}
+
+func (s *textWrap) Len() int {
+ return len(s.Prefix) + s.Value.Len() + len(s.Suffix)
+}
+func (s1 *textWrap) Equal(s2 textNode) bool {
+ if s2, ok := s2.(*textWrap); ok {
+ return s1.Prefix == s2.Prefix && s1.Value.Equal(s2.Value) && s1.Suffix == s2.Suffix
+ }
+ return false
+}
+func (s *textWrap) String() string {
+ var d diffMode
+ var n indentMode
+ _, s2 := s.formatCompactTo(nil, d)
+ b := n.appendIndent(nil, d) // Leading indent
+ b = s2.formatExpandedTo(b, d, n) // Main body
+ b = append(b, '\n') // Trailing newline
+ return string(b)
+}
+func (s *textWrap) formatCompactTo(b []byte, d diffMode) ([]byte, textNode) {
+ n0 := len(b) // Original buffer length
+ b = append(b, s.Prefix...)
+ b, s.Value = s.Value.formatCompactTo(b, d)
+ b = append(b, s.Suffix...)
+ if _, ok := s.Value.(textLine); ok {
+ return b, textLine(b[n0:])
+ }
+ return b, s
+}
+func (s *textWrap) formatExpandedTo(b []byte, d diffMode, n indentMode) []byte {
+ b = append(b, s.Prefix...)
+ b = s.Value.formatExpandedTo(b, d, n)
+ b = append(b, s.Suffix...)
+ return b
+}
+
+// textList is a comma-separated list of textWrap or textLine nodes.
+// The list may be formatted as multi-lines or single-line at the discretion
+// of the textList.formatCompactTo method.
+type textList []textRecord
+type textRecord struct {
+ Diff diffMode // e.g., 0 or '-' or '+'
+ Key string // e.g., "MyField"
+ Value textNode // textWrap | textLine
+ ElideComma bool // avoid trailing comma
+ Comment fmt.Stringer // e.g., "6 identical fields"
+}
+
+// AppendEllipsis appends a new ellipsis node to the list if none already
+// exists at the end. If cs is non-zero it coalesces the statistics with the
+// previous diffStats.
+func (s *textList) AppendEllipsis(ds diffStats) {
+ hasStats := !ds.IsZero()
+ if len(*s) == 0 || !(*s)[len(*s)-1].Value.Equal(textEllipsis) {
+ if hasStats {
+ *s = append(*s, textRecord{Value: textEllipsis, ElideComma: true, Comment: ds})
+ } else {
+ *s = append(*s, textRecord{Value: textEllipsis, ElideComma: true})
+ }
+ return
+ }
+ if hasStats {
+ (*s)[len(*s)-1].Comment = (*s)[len(*s)-1].Comment.(diffStats).Append(ds)
+ }
+}
+
+func (s textList) Len() (n int) {
+ for i, r := range s {
+ n += len(r.Key)
+ if r.Key != "" {
+ n += len(": ")
+ }
+ n += r.Value.Len()
+ if i < len(s)-1 {
+ n += len(", ")
+ }
+ }
+ return n
+}
+
+func (s1 textList) Equal(s2 textNode) bool {
+ if s2, ok := s2.(textList); ok {
+ if len(s1) != len(s2) {
+ return false
+ }
+ for i := range s1 {
+ r1, r2 := s1[i], s2[i]
+ if !(r1.Diff == r2.Diff && r1.Key == r2.Key && r1.Value.Equal(r2.Value) && r1.Comment == r2.Comment) {
+ return false
+ }
+ }
+ return true
+ }
+ return false
+}
+
+func (s textList) String() string {
+ return (&textWrap{Prefix: "{", Value: s, Suffix: "}"}).String()
+}
+
+func (s textList) formatCompactTo(b []byte, d diffMode) ([]byte, textNode) {
+ s = append(textList(nil), s...) // Avoid mutating original
+
+ // Determine whether we can collapse this list as a single line.
+ n0 := len(b) // Original buffer length
+ var multiLine bool
+ for i, r := range s {
+ if r.Diff == diffInserted || r.Diff == diffRemoved {
+ multiLine = true
+ }
+ b = append(b, r.Key...)
+ if r.Key != "" {
+ b = append(b, ": "...)
+ }
+ b, s[i].Value = r.Value.formatCompactTo(b, d|r.Diff)
+ if _, ok := s[i].Value.(textLine); !ok {
+ multiLine = true
+ }
+ if r.Comment != nil {
+ multiLine = true
+ }
+ if i < len(s)-1 {
+ b = append(b, ", "...)
+ }
+ }
+ // Force multi-lined output when printing a removed/inserted node that
+ // is sufficiently long.
+ if (d == diffInserted || d == diffRemoved) && len(b[n0:]) > maxColumnLength {
+ multiLine = true
+ }
+ if !multiLine {
+ return b, textLine(b[n0:])
+ }
+ return b, s
+}
+
+func (s textList) formatExpandedTo(b []byte, d diffMode, n indentMode) []byte {
+ alignKeyLens := s.alignLens(
+ func(r textRecord) bool {
+ _, isLine := r.Value.(textLine)
+ return r.Key == "" || !isLine
+ },
+ func(r textRecord) int { return utf8.RuneCountInString(r.Key) },
+ )
+ alignValueLens := s.alignLens(
+ func(r textRecord) bool {
+ _, isLine := r.Value.(textLine)
+ return !isLine || r.Value.Equal(textEllipsis) || r.Comment == nil
+ },
+ func(r textRecord) int { return utf8.RuneCount(r.Value.(textLine)) },
+ )
+
+ // Format lists of simple lists in a batched form.
+ // If the list is sequence of only textLine values,
+ // then batch multiple values on a single line.
+ var isSimple bool
+ for _, r := range s {
+ _, isLine := r.Value.(textLine)
+ isSimple = r.Diff == 0 && r.Key == "" && isLine && r.Comment == nil
+ if !isSimple {
+ break
+ }
+ }
+ if isSimple {
+ n++
+ var batch []byte
+ emitBatch := func() {
+ if len(batch) > 0 {
+ b = n.appendIndent(append(b, '\n'), d)
+ b = append(b, bytes.TrimRight(batch, " ")...)
+ batch = batch[:0]
+ }
+ }
+ for _, r := range s {
+ line := r.Value.(textLine)
+ if len(batch)+len(line)+len(", ") > maxColumnLength {
+ emitBatch()
+ }
+ batch = append(batch, line...)
+ batch = append(batch, ", "...)
+ }
+ emitBatch()
+ n--
+ return n.appendIndent(append(b, '\n'), d)
+ }
+
+ // Format the list as a multi-lined output.
+ n++
+ for i, r := range s {
+ b = n.appendIndent(append(b, '\n'), d|r.Diff)
+ if r.Key != "" {
+ b = append(b, r.Key+": "...)
+ }
+ b = alignKeyLens[i].appendChar(b, ' ')
+
+ b = r.Value.formatExpandedTo(b, d|r.Diff, n)
+ if !r.ElideComma {
+ b = append(b, ',')
+ }
+ b = alignValueLens[i].appendChar(b, ' ')
+
+ if r.Comment != nil {
+ b = append(b, " // "+r.Comment.String()...)
+ }
+ }
+ n--
+
+ return n.appendIndent(append(b, '\n'), d)
+}
+
+func (s textList) alignLens(
+ skipFunc func(textRecord) bool,
+ lenFunc func(textRecord) int,
+) []repeatCount {
+ var startIdx, endIdx, maxLen int
+ lens := make([]repeatCount, len(s))
+ for i, r := range s {
+ if skipFunc(r) {
+ for j := startIdx; j < endIdx && j < len(s); j++ {
+ lens[j] = repeatCount(maxLen - lenFunc(s[j]))
+ }
+ startIdx, endIdx, maxLen = i+1, i+1, 0
+ } else {
+ if maxLen < lenFunc(r) {
+ maxLen = lenFunc(r)
+ }
+ endIdx = i + 1
+ }
+ }
+ for j := startIdx; j < endIdx && j < len(s); j++ {
+ lens[j] = repeatCount(maxLen - lenFunc(s[j]))
+ }
+ return lens
+}
+
+// textLine is a single-line segment of text and is always a leaf node
+// in the textNode tree.
+type textLine []byte
+
+var (
+ textNil = textLine("nil")
+ textEllipsis = textLine("...")
+)
+
+func (s textLine) Len() int {
+ return len(s)
+}
+func (s1 textLine) Equal(s2 textNode) bool {
+ if s2, ok := s2.(textLine); ok {
+ return bytes.Equal([]byte(s1), []byte(s2))
+ }
+ return false
+}
+func (s textLine) String() string {
+ return string(s)
+}
+func (s textLine) formatCompactTo(b []byte, d diffMode) ([]byte, textNode) {
+ return append(b, s...), s
+}
+func (s textLine) formatExpandedTo(b []byte, _ diffMode, _ indentMode) []byte {
+ return append(b, s...)
+}
+
+type diffStats struct {
+ Name string
+ NumIgnored int
+ NumIdentical int
+ NumRemoved int
+ NumInserted int
+ NumModified int
+}
+
+func (s diffStats) IsZero() bool {
+ s.Name = ""
+ return s == diffStats{}
+}
+
+func (s diffStats) NumDiff() int {
+ return s.NumRemoved + s.NumInserted + s.NumModified
+}
+
+func (s diffStats) Append(ds diffStats) diffStats {
+ assert(s.Name == ds.Name)
+ s.NumIgnored += ds.NumIgnored
+ s.NumIdentical += ds.NumIdentical
+ s.NumRemoved += ds.NumRemoved
+ s.NumInserted += ds.NumInserted
+ s.NumModified += ds.NumModified
+ return s
+}
+
+// String prints a humanly-readable summary of coalesced records.
+//
+// Example:
+//
+// diffStats{Name: "Field", NumIgnored: 5}.String() => "5 ignored fields"
+func (s diffStats) String() string {
+ var ss []string
+ var sum int
+ labels := [...]string{"ignored", "identical", "removed", "inserted", "modified"}
+ counts := [...]int{s.NumIgnored, s.NumIdentical, s.NumRemoved, s.NumInserted, s.NumModified}
+ for i, n := range counts {
+ if n > 0 {
+ ss = append(ss, fmt.Sprintf("%d %v", n, labels[i]))
+ }
+ sum += n
+ }
+
+ // Pluralize the name (adjusting for some obscure English grammar rules).
+ name := s.Name
+ if sum > 1 {
+ name += "s"
+ if strings.HasSuffix(name, "ys") {
+ name = name[:len(name)-2] + "ies" // e.g., "entrys" => "entries"
+ }
+ }
+
+ // Format the list according to English grammar (with Oxford comma).
+ switch n := len(ss); n {
+ case 0:
+ return ""
+ case 1, 2:
+ return strings.Join(ss, " and ") + " " + name
+ default:
+ return strings.Join(ss[:n-1], ", ") + ", and " + ss[n-1] + " " + name
+ }
+}
+
+type commentString string
+
+func (s commentString) String() string { return string(s) }
diff --git a/vendor/github.com/google/go-cmp/cmp/report_value.go b/vendor/github.com/google/go-cmp/cmp/report_value.go
new file mode 100644
index 000000000..668d470fd
--- /dev/null
+++ b/vendor/github.com/google/go-cmp/cmp/report_value.go
@@ -0,0 +1,121 @@
+// Copyright 2019, The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package cmp
+
+import "reflect"
+
+// valueNode represents a single node within a report, which is a
+// structured representation of the value tree, containing information
+// regarding which nodes are equal or not.
+type valueNode struct {
+ parent *valueNode
+
+ Type reflect.Type
+ ValueX reflect.Value
+ ValueY reflect.Value
+
+ // NumSame is the number of leaf nodes that are equal.
+ // All descendants are equal only if NumDiff is 0.
+ NumSame int
+ // NumDiff is the number of leaf nodes that are not equal.
+ NumDiff int
+ // NumIgnored is the number of leaf nodes that are ignored.
+ NumIgnored int
+ // NumCompared is the number of leaf nodes that were compared
+ // using an Equal method or Comparer function.
+ NumCompared int
+ // NumTransformed is the number of non-leaf nodes that were transformed.
+ NumTransformed int
+ // NumChildren is the number of transitive descendants of this node.
+ // This counts from zero; thus, leaf nodes have no descendants.
+ NumChildren int
+ // MaxDepth is the maximum depth of the tree. This counts from zero;
+ // thus, leaf nodes have a depth of zero.
+ MaxDepth int
+
+ // Records is a list of struct fields, slice elements, or map entries.
+ Records []reportRecord // If populated, implies Value is not populated
+
+ // Value is the result of a transformation, pointer indirect, of
+ // type assertion.
+ Value *valueNode // If populated, implies Records is not populated
+
+ // TransformerName is the name of the transformer.
+ TransformerName string // If non-empty, implies Value is populated
+}
+type reportRecord struct {
+ Key reflect.Value // Invalid for slice element
+ Value *valueNode
+}
+
+func (parent *valueNode) PushStep(ps PathStep) (child *valueNode) {
+ vx, vy := ps.Values()
+ child = &valueNode{parent: parent, Type: ps.Type(), ValueX: vx, ValueY: vy}
+ switch s := ps.(type) {
+ case StructField:
+ assert(parent.Value == nil)
+ parent.Records = append(parent.Records, reportRecord{Key: reflect.ValueOf(s.Name()), Value: child})
+ case SliceIndex:
+ assert(parent.Value == nil)
+ parent.Records = append(parent.Records, reportRecord{Value: child})
+ case MapIndex:
+ assert(parent.Value == nil)
+ parent.Records = append(parent.Records, reportRecord{Key: s.Key(), Value: child})
+ case Indirect:
+ assert(parent.Value == nil && parent.Records == nil)
+ parent.Value = child
+ case TypeAssertion:
+ assert(parent.Value == nil && parent.Records == nil)
+ parent.Value = child
+ case Transform:
+ assert(parent.Value == nil && parent.Records == nil)
+ parent.Value = child
+ parent.TransformerName = s.Name()
+ parent.NumTransformed++
+ default:
+ assert(parent == nil) // Must be the root step
+ }
+ return child
+}
+
+func (r *valueNode) Report(rs Result) {
+ assert(r.MaxDepth == 0) // May only be called on leaf nodes
+
+ if rs.ByIgnore() {
+ r.NumIgnored++
+ } else {
+ if rs.Equal() {
+ r.NumSame++
+ } else {
+ r.NumDiff++
+ }
+ }
+ assert(r.NumSame+r.NumDiff+r.NumIgnored == 1)
+
+ if rs.ByMethod() {
+ r.NumCompared++
+ }
+ if rs.ByFunc() {
+ r.NumCompared++
+ }
+ assert(r.NumCompared <= 1)
+}
+
+func (child *valueNode) PopStep() (parent *valueNode) {
+ if child.parent == nil {
+ return nil
+ }
+ parent = child.parent
+ parent.NumSame += child.NumSame
+ parent.NumDiff += child.NumDiff
+ parent.NumIgnored += child.NumIgnored
+ parent.NumCompared += child.NumCompared
+ parent.NumTransformed += child.NumTransformed
+ parent.NumChildren += child.NumChildren + 1
+ if parent.MaxDepth < child.MaxDepth+1 {
+ parent.MaxDepth = child.MaxDepth + 1
+ }
+ return parent
+}
diff --git a/vendor/modules.txt b/vendor/modules.txt
index 1a35e4dfb..e73da1052 100644
--- a/vendor/modules.txt
+++ b/vendor/modules.txt
@@ -353,6 +353,13 @@ github.com/golang/geo/r2
github.com/golang/geo/r3
github.com/golang/geo/s1
github.com/golang/geo/s2
+# github.com/google/go-cmp v0.6.0
+## explicit; go 1.13
+github.com/google/go-cmp/cmp
+github.com/google/go-cmp/cmp/internal/diff
+github.com/google/go-cmp/cmp/internal/flags
+github.com/google/go-cmp/cmp/internal/function
+github.com/google/go-cmp/cmp/internal/value
# github.com/google/uuid v1.6.0
## explicit
github.com/google/uuid