mirror of
https://github.com/TwiN/gatus.git
synced 2024-11-25 01:13:40 +01:00
Update dependencies
This commit is contained in:
parent
75b7a41c9d
commit
fefc728201
7
go.mod
7
go.mod
@ -3,6 +3,9 @@ module github.com/TwinProduction/gatus
|
||||
go 1.15
|
||||
|
||||
require (
|
||||
github.com/prometheus/client_golang v1.2.1
|
||||
gopkg.in/yaml.v2 v2.2.2
|
||||
github.com/prometheus/client_golang v1.7.1
|
||||
github.com/prometheus/common v0.13.0 // indirect
|
||||
golang.org/x/sys v0.0.0-20200916084744-dbad9cb7cb7a // indirect
|
||||
google.golang.org/protobuf v1.25.0 // indirect
|
||||
gopkg.in/yaml.v2 v2.3.0
|
||||
)
|
||||
|
351
go.sum
351
go.sum
@ -1,79 +1,414 @@
|
||||
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||
cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||
github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0=
|
||||
github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo=
|
||||
github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI=
|
||||
github.com/VividCortex/gohistogram v1.0.0/go.mod h1:Pf5mBqqDxYaXu3hDrrU+w6nw50o/4+TcAqDqk/vUH7g=
|
||||
github.com/afex/hystrix-go v0.0.0-20180502004556-fa1af6a1f4f5/go.mod h1:SkGFH1ia65gfNATL8TAiHDNxPzPdmEL5uirI2Uyuz6c=
|
||||
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
||||
github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
||||
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
|
||||
github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
|
||||
github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho=
|
||||
github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ=
|
||||
github.com/apache/thrift v0.13.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ=
|
||||
github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o=
|
||||
github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY=
|
||||
github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
|
||||
github.com/aryann/difflib v0.0.0-20170710044230-e206f873d14a/go.mod h1:DAHtR1m6lCRdSC2Tm3DSWRPvIPr6xNKyeHdqDQSQT+A=
|
||||
github.com/aws/aws-lambda-go v1.13.3/go.mod h1:4UKl9IzQMoD+QF79YdCuzCwp8VbmG4VAQwij/eHl5CU=
|
||||
github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
|
||||
github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g=
|
||||
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
|
||||
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
|
||||
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
|
||||
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
|
||||
github.com/cespare/xxhash/v2 v2.1.0 h1:yTUvW7Vhb89inJ+8irsUqiWjh8iT6sQPZiQzI6ReGkA=
|
||||
github.com/cespare/xxhash/v2 v2.1.0/go.mod h1:dgIUBU3pDso/gPgZ1osOZ0iQf77oPR28Tjxl5dIMyVM=
|
||||
github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
|
||||
github.com/casbin/casbin/v2 v2.1.2/go.mod h1:YcPU1XXisHhLzuxH9coDNf2FbKpjGlbCg3n9yuLkIJQ=
|
||||
github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM=
|
||||
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
||||
github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY=
|
||||
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/clbanning/x2j v0.0.0-20191024224557-825249438eec/go.mod h1:jMjuTZXRI4dUb/I5gc9Hdhagfvm9+RyrPryS/auMzxE=
|
||||
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
||||
github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8=
|
||||
github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI=
|
||||
github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
|
||||
github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
|
||||
github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
|
||||
github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
|
||||
github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
|
||||
github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs=
|
||||
github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU=
|
||||
github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I=
|
||||
github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M=
|
||||
github.com/envoyproxy/go-control-plane v0.6.9/go.mod h1:SBwIajubJHhxtWwsL9s8ss4safvEdbitLhGGK48rN6g=
|
||||
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
|
||||
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
|
||||
github.com/franela/goblin v0.0.0-20200105215937-c9ffbefa60db/go.mod h1:7dvUGVsVBjqR7JHJk0brhHOZYGmfBYOrK0ZhYMEtBr4=
|
||||
github.com/franela/goreq v0.0.0-20171204163338-bcd34c9993f8/go.mod h1:ZhphrRTfi2rbfLwlschooIH4+wKKDR4Pdxhh+TRoA20=
|
||||
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
||||
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
|
||||
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
||||
github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
||||
github.com/go-kit/kit v0.10.0/go.mod h1:xUsJbQ/Fp4kEt7AFgCuvyX4a71u8h9jB8tj/ORgOZ7o=
|
||||
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
|
||||
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
|
||||
github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A=
|
||||
github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
|
||||
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
|
||||
github.com/gogo/googleapis v1.1.0/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s=
|
||||
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
|
||||
github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
|
||||
github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4=
|
||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
||||
github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
||||
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs=
|
||||
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
|
||||
github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
|
||||
github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
|
||||
github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
|
||||
github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
|
||||
github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
|
||||
github.com/golang/protobuf v1.4.2 h1:+Z5KGCizgyZCbGh1KZqA0fcLLkwbsjIzS4aV2v7wJX0=
|
||||
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
|
||||
github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
||||
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
|
||||
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
|
||||
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
|
||||
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.0 h1:/QaMHBdZ26BB3SSst0Iwl10Epc+xhTquomWX0oZEB6w=
|
||||
github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
|
||||
github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
|
||||
github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg=
|
||||
github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
|
||||
github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
|
||||
github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
|
||||
github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
|
||||
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
|
||||
github.com/hashicorp/consul/api v1.3.0/go.mod h1:MmDNSzIMUjNpY/mQ398R4bk2FnqQLoPndWW5VkKPlCE=
|
||||
github.com/hashicorp/consul/sdk v0.3.0/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8=
|
||||
github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
|
||||
github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80=
|
||||
github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
|
||||
github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM=
|
||||
github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk=
|
||||
github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU=
|
||||
github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU=
|
||||
github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4=
|
||||
github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
|
||||
github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
|
||||
github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
|
||||
github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90=
|
||||
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
|
||||
github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
|
||||
github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64=
|
||||
github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ=
|
||||
github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I=
|
||||
github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc=
|
||||
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
|
||||
github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg=
|
||||
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
|
||||
github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo=
|
||||
github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
|
||||
github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo=
|
||||
github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4=
|
||||
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
|
||||
github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
|
||||
github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
|
||||
github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
|
||||
github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
|
||||
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
|
||||
github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM=
|
||||
github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
|
||||
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
|
||||
github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
|
||||
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
|
||||
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||
github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
|
||||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||
github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM=
|
||||
github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4=
|
||||
github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ=
|
||||
github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
|
||||
github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
|
||||
github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
|
||||
github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
|
||||
github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg=
|
||||
github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc=
|
||||
github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
|
||||
github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI=
|
||||
github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg=
|
||||
github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY=
|
||||
github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
|
||||
github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
|
||||
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
|
||||
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
|
||||
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
|
||||
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
|
||||
github.com/nats-io/jwt v0.3.0/go.mod h1:fRYCDE99xlTsqUzISS1Bi75UBJ6ljOJQOAAu5VglpSg=
|
||||
github.com/nats-io/jwt v0.3.2/go.mod h1:/euKqTS1ZD+zzjYrY7pseZrTtWQSjujC7xjPc8wL6eU=
|
||||
github.com/nats-io/nats-server/v2 v2.1.2/go.mod h1:Afk+wRZqkMQs/p45uXdrVLuab3gwv3Z8C4HTBu8GD/k=
|
||||
github.com/nats-io/nats.go v1.9.1/go.mod h1:ZjDU1L/7fJ09jvUSRVBR2e7+RnLiiIQyqyzEE/Zbp4w=
|
||||
github.com/nats-io/nkeys v0.1.0/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w=
|
||||
github.com/nats-io/nkeys v0.1.3/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w=
|
||||
github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c=
|
||||
github.com/oklog/oklog v0.3.2/go.mod h1:FCV+B7mhrz4o+ueLpx+KqkyXRGMWOYEvfiXtdGtbWGs=
|
||||
github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA=
|
||||
github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo=
|
||||
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
|
||||
github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk=
|
||||
github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492/go.mod h1:Ngi6UdF0k5OKD5t5wlmGhe/EDKPoUM3BXZSSfIuJbis=
|
||||
github.com/opentracing/basictracer-go v1.0.0/go.mod h1:QfBfYuafItcjQuMwinw9GhYKwFXS9KnPs5lxoYwgW74=
|
||||
github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
|
||||
github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
|
||||
github.com/openzipkin-contrib/zipkin-go-opentracing v0.4.5/go.mod h1:/wsWhb9smxSfWAKL3wpBW7V8scJMt8N8gnaMCS9E/cA=
|
||||
github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw=
|
||||
github.com/openzipkin/zipkin-go v0.2.1/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4=
|
||||
github.com/openzipkin/zipkin-go v0.2.2/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4=
|
||||
github.com/pact-foundation/pact-go v1.0.4/go.mod h1:uExwJY4kCzNPcHRj+hCR/HBbOOIwwtUjcrb0b5/5kLM=
|
||||
github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
|
||||
github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k=
|
||||
github.com/performancecopilot/speed v3.0.0+incompatible/go.mod h1:/CLtqpZ5gBg1M9iaPbIdPPGyKcA8hKdoy6hAWba7Yac=
|
||||
github.com/pierrec/lz4 v1.0.2-0.20190131084431-473cd7ce01a1/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc=
|
||||
github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY=
|
||||
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkg/profile v1.2.1/go.mod h1:hJw3o1OdXxsrSjjVksARp5W95eeEaEfptyVZyv6JUPA=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI=
|
||||
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
|
||||
github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs=
|
||||
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
|
||||
github.com/prometheus/client_golang v1.2.1 h1:JnMpQc6ppsNgw9QPAGF6Dod479itz7lvlsMzzNayLOI=
|
||||
github.com/prometheus/client_golang v1.2.1/go.mod h1:XMU6Z2MjaRKVu/dC1qupJI9SiNkDYzz3xecMgSW/F+U=
|
||||
github.com/prometheus/client_golang v1.3.0/go.mod h1:hJaj2vgQTGQmVCsAACORcieXFeDPbaTKGT+JTgUa3og=
|
||||
github.com/prometheus/client_golang v1.7.1 h1:NTGy1Ja9pByO+xAeH/qiWnLrKtr3hJPNjaVUwnjpdpA=
|
||||
github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
|
||||
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
||||
github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
||||
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4 h1:gQz4mCbXsO+nc9n1hCxHcGA3Zx3Eo+UHZoInFGUIXNM=
|
||||
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/client_model v0.1.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M=
|
||||
github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
|
||||
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
|
||||
github.com/prometheus/common v0.7.0 h1:L+1lyG48J1zAQXA3RBX/nG/B3gjlHq0zTt2tlbJLyCY=
|
||||
github.com/prometheus/common v0.7.0/go.mod h1:DjGbpBbp5NYNiECxcL/VnbXCCaQpKd3tt26CguLLsqA=
|
||||
github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
|
||||
github.com/prometheus/common v0.13.0 h1:vJlpe9wPgDRM1Z+7Wj3zUUjY1nr6/1jNKyl7llliccg=
|
||||
github.com/prometheus/common v0.13.0/go.mod h1:U+gB1OBLb1lF3O42bTCL+FK18tX9Oar16Clt/msog/s=
|
||||
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||
github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
|
||||
github.com/prometheus/procfs v0.0.5 h1:3+auTFlqw+ZaQYJARz6ArODtkaIwtvBTx3N2NehQlL8=
|
||||
github.com/prometheus/procfs v0.0.5/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ=
|
||||
github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A=
|
||||
github.com/prometheus/procfs v0.1.3 h1:F0+tqvhOksq22sc6iCHF5WGlWjdwj92p0udFh1VFBS8=
|
||||
github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
|
||||
github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
|
||||
github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
|
||||
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
|
||||
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
|
||||
github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E=
|
||||
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc=
|
||||
github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
|
||||
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
|
||||
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
|
||||
github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88=
|
||||
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
|
||||
github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
|
||||
github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM=
|
||||
github.com/sony/gobreaker v0.4.1/go.mod h1:ZKptC7FHNvhBz7dN2LGjPVBz2sZJmc0/PkyDJOjmxWY=
|
||||
github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
|
||||
github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
|
||||
github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw=
|
||||
github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw=
|
||||
github.com/streadway/handy v0.0.0-20190108123426-d5acb3125c2a/go.mod h1:qNTQ5P5JnDBl6z3cMAg/SywNDC5ABu5ApDIw6lUbRmI=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
|
||||
github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
|
||||
github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA=
|
||||
github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
|
||||
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
|
||||
go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
|
||||
go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg=
|
||||
go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk=
|
||||
go.opencensus.io v0.20.2/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk=
|
||||
go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
||||
go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
|
||||
go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
|
||||
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
|
||||
go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4=
|
||||
go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA=
|
||||
go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
|
||||
go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM=
|
||||
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
|
||||
golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||
golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||
golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc=
|
||||
golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
|
||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
|
||||
golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191010194322-b09406accb47 h1:/XfQ9z7ib8eEJX2hdgFTZJ/ntt0swNk5oYBziWeTCvY=
|
||||
golang.org/x/sys v0.0.0-20191010194322-b09406accb47/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191220142924-d4481acd189f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200916084744-dbad9cb7cb7a h1:chkwkn8HYWVtTE5DCQNKYlkyptadXYY0+PuyaVdyMo4=
|
||||
golang.org/x/sys v0.0.0-20200916084744-dbad9cb7cb7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
||||
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
|
||||
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
||||
golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
|
||||
golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20200103221440-774c71fcf114/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
google.golang.org/api v0.3.1/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk=
|
||||
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
||||
google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
|
||||
google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
||||
google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
||||
google.golang.org/genproto v0.0.0-20190530194941-fb225487d101/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s=
|
||||
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
|
||||
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
|
||||
google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs=
|
||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||
google.golang.org/grpc v1.20.0/go.mod h1:chYK+tFQF0nDUGJgXMSgLCQk3phJEuONr2DCgLDdAQM=
|
||||
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
|
||||
google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
|
||||
google.golang.org/grpc v1.22.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
|
||||
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
|
||||
google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
|
||||
google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
|
||||
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
|
||||
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
|
||||
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
|
||||
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
|
||||
google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
|
||||
google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
|
||||
google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||
google.golang.org/protobuf v1.23.0 h1:4MY060fB1DLGMB/7MBTLnwQUY6+F09GEiz6SsrNqyzM=
|
||||
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||
google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||
google.golang.org/protobuf v1.25.0 h1:Ejskq+SyPohKW+1uil0JJMtmHCgJPJ/qWTxr8qp+R4c=
|
||||
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
|
||||
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo=
|
||||
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw=
|
||||
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
|
||||
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
|
||||
gopkg.in/gcfg.v1 v1.2.3/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o=
|
||||
gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo=
|
||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
|
||||
gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI=
|
||||
gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74=
|
||||
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw=
|
||||
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.3.0 h1:clyUAQHOM3G0M3f5vQj7LuJrETvjVot3Z5el9nffUtU=
|
||||
gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
|
||||
sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o=
|
||||
sourcegraph.com/sourcegraph/appdash v0.0.0-20190731080439-ebfcffb1b5c0/go.mod h1:hI742Nqp5OhwiqlzhgfbWU4mW4yO10fP+LoT9WOswdU=
|
||||
|
12
vendor/github.com/cespare/xxhash/v2/README.md
generated
vendored
12
vendor/github.com/cespare/xxhash/v2/README.md
generated
vendored
@ -28,6 +28,18 @@ func (*Digest) Sum64() uint64
|
||||
This implementation provides a fast pure-Go implementation and an even faster
|
||||
assembly implementation for amd64.
|
||||
|
||||
## Compatibility
|
||||
|
||||
This package is in a module and the latest code is in version 2 of the module.
|
||||
You need a version of Go with at least "minimal module compatibility" to use
|
||||
github.com/cespare/xxhash/v2:
|
||||
|
||||
* 1.9.7+ for Go 1.9
|
||||
* 1.10.3+ for Go 1.10
|
||||
* Go 1.11 or later
|
||||
|
||||
I recommend using the latest release of Go.
|
||||
|
||||
## Benchmarks
|
||||
|
||||
Here are some quick benchmarks comparing the pure-Go and assembly
|
||||
|
2
vendor/github.com/cespare/xxhash/v2/go.mod
generated
vendored
2
vendor/github.com/cespare/xxhash/v2/go.mod
generated
vendored
@ -1,3 +1,3 @@
|
||||
module github.com/cespare/xxhash/v2
|
||||
|
||||
go 1.13
|
||||
go 1.11
|
||||
|
2
vendor/github.com/cespare/xxhash/v2/xxhash_amd64.go
generated
vendored
2
vendor/github.com/cespare/xxhash/v2/xxhash_amd64.go
generated
vendored
@ -10,4 +10,4 @@ package xxhash
|
||||
func Sum64(b []byte) uint64
|
||||
|
||||
//go:noescape
|
||||
func writeBlocks(*Digest, []byte) int
|
||||
func writeBlocks(d *Digest, b []byte) int
|
||||
|
324
vendor/github.com/golang/protobuf/proto/buffer.go
generated
vendored
Normal file
324
vendor/github.com/golang/protobuf/proto/buffer.go
generated
vendored
Normal file
@ -0,0 +1,324 @@
|
||||
// Copyright 2019 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package proto
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"google.golang.org/protobuf/encoding/prototext"
|
||||
"google.golang.org/protobuf/encoding/protowire"
|
||||
"google.golang.org/protobuf/runtime/protoimpl"
|
||||
)
|
||||
|
||||
const (
|
||||
WireVarint = 0
|
||||
WireFixed32 = 5
|
||||
WireFixed64 = 1
|
||||
WireBytes = 2
|
||||
WireStartGroup = 3
|
||||
WireEndGroup = 4
|
||||
)
|
||||
|
||||
// EncodeVarint returns the varint encoded bytes of v.
|
||||
func EncodeVarint(v uint64) []byte {
|
||||
return protowire.AppendVarint(nil, v)
|
||||
}
|
||||
|
||||
// SizeVarint returns the length of the varint encoded bytes of v.
|
||||
// This is equal to len(EncodeVarint(v)).
|
||||
func SizeVarint(v uint64) int {
|
||||
return protowire.SizeVarint(v)
|
||||
}
|
||||
|
||||
// DecodeVarint parses a varint encoded integer from b,
|
||||
// returning the integer value and the length of the varint.
|
||||
// It returns (0, 0) if there is a parse error.
|
||||
func DecodeVarint(b []byte) (uint64, int) {
|
||||
v, n := protowire.ConsumeVarint(b)
|
||||
if n < 0 {
|
||||
return 0, 0
|
||||
}
|
||||
return v, n
|
||||
}
|
||||
|
||||
// Buffer is a buffer for encoding and decoding the protobuf wire format.
|
||||
// It may be reused between invocations to reduce memory usage.
|
||||
type Buffer struct {
|
||||
buf []byte
|
||||
idx int
|
||||
deterministic bool
|
||||
}
|
||||
|
||||
// NewBuffer allocates a new Buffer initialized with buf,
|
||||
// where the contents of buf are considered the unread portion of the buffer.
|
||||
func NewBuffer(buf []byte) *Buffer {
|
||||
return &Buffer{buf: buf}
|
||||
}
|
||||
|
||||
// SetDeterministic specifies whether to use deterministic serialization.
|
||||
//
|
||||
// Deterministic serialization guarantees that for a given binary, equal
|
||||
// messages will always be serialized to the same bytes. This implies:
|
||||
//
|
||||
// - Repeated serialization of a message will return the same bytes.
|
||||
// - Different processes of the same binary (which may be executing on
|
||||
// different machines) will serialize equal messages to the same bytes.
|
||||
//
|
||||
// Note that the deterministic serialization is NOT canonical across
|
||||
// languages. It is not guaranteed to remain stable over time. It is unstable
|
||||
// across different builds with schema changes due to unknown fields.
|
||||
// Users who need canonical serialization (e.g., persistent storage in a
|
||||
// canonical form, fingerprinting, etc.) should define their own
|
||||
// canonicalization specification and implement their own serializer rather
|
||||
// than relying on this API.
|
||||
//
|
||||
// If deterministic serialization is requested, map entries will be sorted
|
||||
// by keys in lexographical order. This is an implementation detail and
|
||||
// subject to change.
|
||||
func (b *Buffer) SetDeterministic(deterministic bool) {
|
||||
b.deterministic = deterministic
|
||||
}
|
||||
|
||||
// SetBuf sets buf as the internal buffer,
|
||||
// where the contents of buf are considered the unread portion of the buffer.
|
||||
func (b *Buffer) SetBuf(buf []byte) {
|
||||
b.buf = buf
|
||||
b.idx = 0
|
||||
}
|
||||
|
||||
// Reset clears the internal buffer of all written and unread data.
|
||||
func (b *Buffer) Reset() {
|
||||
b.buf = b.buf[:0]
|
||||
b.idx = 0
|
||||
}
|
||||
|
||||
// Bytes returns the internal buffer.
|
||||
func (b *Buffer) Bytes() []byte {
|
||||
return b.buf
|
||||
}
|
||||
|
||||
// Unread returns the unread portion of the buffer.
|
||||
func (b *Buffer) Unread() []byte {
|
||||
return b.buf[b.idx:]
|
||||
}
|
||||
|
||||
// Marshal appends the wire-format encoding of m to the buffer.
|
||||
func (b *Buffer) Marshal(m Message) error {
|
||||
var err error
|
||||
b.buf, err = marshalAppend(b.buf, m, b.deterministic)
|
||||
return err
|
||||
}
|
||||
|
||||
// Unmarshal parses the wire-format message in the buffer and
|
||||
// places the decoded results in m.
|
||||
// It does not reset m before unmarshaling.
|
||||
func (b *Buffer) Unmarshal(m Message) error {
|
||||
err := UnmarshalMerge(b.Unread(), m)
|
||||
b.idx = len(b.buf)
|
||||
return err
|
||||
}
|
||||
|
||||
type unknownFields struct{ XXX_unrecognized protoimpl.UnknownFields }
|
||||
|
||||
func (m *unknownFields) String() string { panic("not implemented") }
|
||||
func (m *unknownFields) Reset() { panic("not implemented") }
|
||||
func (m *unknownFields) ProtoMessage() { panic("not implemented") }
|
||||
|
||||
// DebugPrint dumps the encoded bytes of b with a header and footer including s
|
||||
// to stdout. This is only intended for debugging.
|
||||
func (*Buffer) DebugPrint(s string, b []byte) {
|
||||
m := MessageReflect(new(unknownFields))
|
||||
m.SetUnknown(b)
|
||||
b, _ = prototext.MarshalOptions{AllowPartial: true, Indent: "\t"}.Marshal(m.Interface())
|
||||
fmt.Printf("==== %s ====\n%s==== %s ====\n", s, b, s)
|
||||
}
|
||||
|
||||
// EncodeVarint appends an unsigned varint encoding to the buffer.
|
||||
func (b *Buffer) EncodeVarint(v uint64) error {
|
||||
b.buf = protowire.AppendVarint(b.buf, v)
|
||||
return nil
|
||||
}
|
||||
|
||||
// EncodeZigzag32 appends a 32-bit zig-zag varint encoding to the buffer.
|
||||
func (b *Buffer) EncodeZigzag32(v uint64) error {
|
||||
return b.EncodeVarint(uint64((uint32(v) << 1) ^ uint32((int32(v) >> 31))))
|
||||
}
|
||||
|
||||
// EncodeZigzag64 appends a 64-bit zig-zag varint encoding to the buffer.
|
||||
func (b *Buffer) EncodeZigzag64(v uint64) error {
|
||||
return b.EncodeVarint(uint64((uint64(v) << 1) ^ uint64((int64(v) >> 63))))
|
||||
}
|
||||
|
||||
// EncodeFixed32 appends a 32-bit little-endian integer to the buffer.
|
||||
func (b *Buffer) EncodeFixed32(v uint64) error {
|
||||
b.buf = protowire.AppendFixed32(b.buf, uint32(v))
|
||||
return nil
|
||||
}
|
||||
|
||||
// EncodeFixed64 appends a 64-bit little-endian integer to the buffer.
|
||||
func (b *Buffer) EncodeFixed64(v uint64) error {
|
||||
b.buf = protowire.AppendFixed64(b.buf, uint64(v))
|
||||
return nil
|
||||
}
|
||||
|
||||
// EncodeRawBytes appends a length-prefixed raw bytes to the buffer.
|
||||
func (b *Buffer) EncodeRawBytes(v []byte) error {
|
||||
b.buf = protowire.AppendBytes(b.buf, v)
|
||||
return nil
|
||||
}
|
||||
|
||||
// EncodeStringBytes appends a length-prefixed raw bytes to the buffer.
|
||||
// It does not validate whether v contains valid UTF-8.
|
||||
func (b *Buffer) EncodeStringBytes(v string) error {
|
||||
b.buf = protowire.AppendString(b.buf, v)
|
||||
return nil
|
||||
}
|
||||
|
||||
// EncodeMessage appends a length-prefixed encoded message to the buffer.
|
||||
func (b *Buffer) EncodeMessage(m Message) error {
|
||||
var err error
|
||||
b.buf = protowire.AppendVarint(b.buf, uint64(Size(m)))
|
||||
b.buf, err = marshalAppend(b.buf, m, b.deterministic)
|
||||
return err
|
||||
}
|
||||
|
||||
// DecodeVarint consumes an encoded unsigned varint from the buffer.
|
||||
func (b *Buffer) DecodeVarint() (uint64, error) {
|
||||
v, n := protowire.ConsumeVarint(b.buf[b.idx:])
|
||||
if n < 0 {
|
||||
return 0, protowire.ParseError(n)
|
||||
}
|
||||
b.idx += n
|
||||
return uint64(v), nil
|
||||
}
|
||||
|
||||
// DecodeZigzag32 consumes an encoded 32-bit zig-zag varint from the buffer.
|
||||
func (b *Buffer) DecodeZigzag32() (uint64, error) {
|
||||
v, err := b.DecodeVarint()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return uint64((uint32(v) >> 1) ^ uint32((int32(v&1)<<31)>>31)), nil
|
||||
}
|
||||
|
||||
// DecodeZigzag64 consumes an encoded 64-bit zig-zag varint from the buffer.
|
||||
func (b *Buffer) DecodeZigzag64() (uint64, error) {
|
||||
v, err := b.DecodeVarint()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return uint64((uint64(v) >> 1) ^ uint64((int64(v&1)<<63)>>63)), nil
|
||||
}
|
||||
|
||||
// DecodeFixed32 consumes a 32-bit little-endian integer from the buffer.
|
||||
func (b *Buffer) DecodeFixed32() (uint64, error) {
|
||||
v, n := protowire.ConsumeFixed32(b.buf[b.idx:])
|
||||
if n < 0 {
|
||||
return 0, protowire.ParseError(n)
|
||||
}
|
||||
b.idx += n
|
||||
return uint64(v), nil
|
||||
}
|
||||
|
||||
// DecodeFixed64 consumes a 64-bit little-endian integer from the buffer.
|
||||
func (b *Buffer) DecodeFixed64() (uint64, error) {
|
||||
v, n := protowire.ConsumeFixed64(b.buf[b.idx:])
|
||||
if n < 0 {
|
||||
return 0, protowire.ParseError(n)
|
||||
}
|
||||
b.idx += n
|
||||
return uint64(v), nil
|
||||
}
|
||||
|
||||
// DecodeRawBytes consumes a length-prefixed raw bytes from the buffer.
|
||||
// If alloc is specified, it returns a copy the raw bytes
|
||||
// rather than a sub-slice of the buffer.
|
||||
func (b *Buffer) DecodeRawBytes(alloc bool) ([]byte, error) {
|
||||
v, n := protowire.ConsumeBytes(b.buf[b.idx:])
|
||||
if n < 0 {
|
||||
return nil, protowire.ParseError(n)
|
||||
}
|
||||
b.idx += n
|
||||
if alloc {
|
||||
v = append([]byte(nil), v...)
|
||||
}
|
||||
return v, nil
|
||||
}
|
||||
|
||||
// DecodeStringBytes consumes a length-prefixed raw bytes from the buffer.
|
||||
// It does not validate whether the raw bytes contain valid UTF-8.
|
||||
func (b *Buffer) DecodeStringBytes() (string, error) {
|
||||
v, n := protowire.ConsumeString(b.buf[b.idx:])
|
||||
if n < 0 {
|
||||
return "", protowire.ParseError(n)
|
||||
}
|
||||
b.idx += n
|
||||
return v, nil
|
||||
}
|
||||
|
||||
// DecodeMessage consumes a length-prefixed message from the buffer.
|
||||
// It does not reset m before unmarshaling.
|
||||
func (b *Buffer) DecodeMessage(m Message) error {
|
||||
v, err := b.DecodeRawBytes(false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return UnmarshalMerge(v, m)
|
||||
}
|
||||
|
||||
// DecodeGroup consumes a message group from the buffer.
|
||||
// It assumes that the start group marker has already been consumed and
|
||||
// consumes all bytes until (and including the end group marker).
|
||||
// It does not reset m before unmarshaling.
|
||||
func (b *Buffer) DecodeGroup(m Message) error {
|
||||
v, n, err := consumeGroup(b.buf[b.idx:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
b.idx += n
|
||||
return UnmarshalMerge(v, m)
|
||||
}
|
||||
|
||||
// consumeGroup parses b until it finds an end group marker, returning
|
||||
// the raw bytes of the message (excluding the end group marker) and the
|
||||
// the total length of the message (including the end group marker).
|
||||
func consumeGroup(b []byte) ([]byte, int, error) {
|
||||
b0 := b
|
||||
depth := 1 // assume this follows a start group marker
|
||||
for {
|
||||
_, wtyp, tagLen := protowire.ConsumeTag(b)
|
||||
if tagLen < 0 {
|
||||
return nil, 0, protowire.ParseError(tagLen)
|
||||
}
|
||||
b = b[tagLen:]
|
||||
|
||||
var valLen int
|
||||
switch wtyp {
|
||||
case protowire.VarintType:
|
||||
_, valLen = protowire.ConsumeVarint(b)
|
||||
case protowire.Fixed32Type:
|
||||
_, valLen = protowire.ConsumeFixed32(b)
|
||||
case protowire.Fixed64Type:
|
||||
_, valLen = protowire.ConsumeFixed64(b)
|
||||
case protowire.BytesType:
|
||||
_, valLen = protowire.ConsumeBytes(b)
|
||||
case protowire.StartGroupType:
|
||||
depth++
|
||||
case protowire.EndGroupType:
|
||||
depth--
|
||||
default:
|
||||
return nil, 0, errors.New("proto: cannot parse reserved wire type")
|
||||
}
|
||||
if valLen < 0 {
|
||||
return nil, 0, protowire.ParseError(valLen)
|
||||
}
|
||||
b = b[valLen:]
|
||||
|
||||
if depth == 0 {
|
||||
return b0[:len(b0)-len(b)-tagLen], len(b0) - len(b), nil
|
||||
}
|
||||
}
|
||||
}
|
253
vendor/github.com/golang/protobuf/proto/clone.go
generated
vendored
253
vendor/github.com/golang/protobuf/proto/clone.go
generated
vendored
@ -1,253 +0,0 @@
|
||||
// Go support for Protocol Buffers - Google's data interchange format
|
||||
//
|
||||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// https://github.com/golang/protobuf
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
// Protocol buffer deep copy and merge.
|
||||
// TODO: RawMessage.
|
||||
|
||||
package proto
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"reflect"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Clone returns a deep copy of a protocol buffer.
|
||||
func Clone(src Message) Message {
|
||||
in := reflect.ValueOf(src)
|
||||
if in.IsNil() {
|
||||
return src
|
||||
}
|
||||
out := reflect.New(in.Type().Elem())
|
||||
dst := out.Interface().(Message)
|
||||
Merge(dst, src)
|
||||
return dst
|
||||
}
|
||||
|
||||
// Merger is the interface representing objects that can merge messages of the same type.
|
||||
type Merger interface {
|
||||
// Merge merges src into this message.
|
||||
// Required and optional fields that are set in src will be set to that value in dst.
|
||||
// Elements of repeated fields will be appended.
|
||||
//
|
||||
// Merge may panic if called with a different argument type than the receiver.
|
||||
Merge(src Message)
|
||||
}
|
||||
|
||||
// generatedMerger is the custom merge method that generated protos will have.
|
||||
// We must add this method since a generate Merge method will conflict with
|
||||
// many existing protos that have a Merge data field already defined.
|
||||
type generatedMerger interface {
|
||||
XXX_Merge(src Message)
|
||||
}
|
||||
|
||||
// Merge merges src into dst.
|
||||
// Required and optional fields that are set in src will be set to that value in dst.
|
||||
// Elements of repeated fields will be appended.
|
||||
// Merge panics if src and dst are not the same type, or if dst is nil.
|
||||
func Merge(dst, src Message) {
|
||||
if m, ok := dst.(Merger); ok {
|
||||
m.Merge(src)
|
||||
return
|
||||
}
|
||||
|
||||
in := reflect.ValueOf(src)
|
||||
out := reflect.ValueOf(dst)
|
||||
if out.IsNil() {
|
||||
panic("proto: nil destination")
|
||||
}
|
||||
if in.Type() != out.Type() {
|
||||
panic(fmt.Sprintf("proto.Merge(%T, %T) type mismatch", dst, src))
|
||||
}
|
||||
if in.IsNil() {
|
||||
return // Merge from nil src is a noop
|
||||
}
|
||||
if m, ok := dst.(generatedMerger); ok {
|
||||
m.XXX_Merge(src)
|
||||
return
|
||||
}
|
||||
mergeStruct(out.Elem(), in.Elem())
|
||||
}
|
||||
|
||||
func mergeStruct(out, in reflect.Value) {
|
||||
sprop := GetProperties(in.Type())
|
||||
for i := 0; i < in.NumField(); i++ {
|
||||
f := in.Type().Field(i)
|
||||
if strings.HasPrefix(f.Name, "XXX_") {
|
||||
continue
|
||||
}
|
||||
mergeAny(out.Field(i), in.Field(i), false, sprop.Prop[i])
|
||||
}
|
||||
|
||||
if emIn, err := extendable(in.Addr().Interface()); err == nil {
|
||||
emOut, _ := extendable(out.Addr().Interface())
|
||||
mIn, muIn := emIn.extensionsRead()
|
||||
if mIn != nil {
|
||||
mOut := emOut.extensionsWrite()
|
||||
muIn.Lock()
|
||||
mergeExtension(mOut, mIn)
|
||||
muIn.Unlock()
|
||||
}
|
||||
}
|
||||
|
||||
uf := in.FieldByName("XXX_unrecognized")
|
||||
if !uf.IsValid() {
|
||||
return
|
||||
}
|
||||
uin := uf.Bytes()
|
||||
if len(uin) > 0 {
|
||||
out.FieldByName("XXX_unrecognized").SetBytes(append([]byte(nil), uin...))
|
||||
}
|
||||
}
|
||||
|
||||
// mergeAny performs a merge between two values of the same type.
|
||||
// viaPtr indicates whether the values were indirected through a pointer (implying proto2).
|
||||
// prop is set if this is a struct field (it may be nil).
|
||||
func mergeAny(out, in reflect.Value, viaPtr bool, prop *Properties) {
|
||||
if in.Type() == protoMessageType {
|
||||
if !in.IsNil() {
|
||||
if out.IsNil() {
|
||||
out.Set(reflect.ValueOf(Clone(in.Interface().(Message))))
|
||||
} else {
|
||||
Merge(out.Interface().(Message), in.Interface().(Message))
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
switch in.Kind() {
|
||||
case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64,
|
||||
reflect.String, reflect.Uint32, reflect.Uint64:
|
||||
if !viaPtr && isProto3Zero(in) {
|
||||
return
|
||||
}
|
||||
out.Set(in)
|
||||
case reflect.Interface:
|
||||
// Probably a oneof field; copy non-nil values.
|
||||
if in.IsNil() {
|
||||
return
|
||||
}
|
||||
// Allocate destination if it is not set, or set to a different type.
|
||||
// Otherwise we will merge as normal.
|
||||
if out.IsNil() || out.Elem().Type() != in.Elem().Type() {
|
||||
out.Set(reflect.New(in.Elem().Elem().Type())) // interface -> *T -> T -> new(T)
|
||||
}
|
||||
mergeAny(out.Elem(), in.Elem(), false, nil)
|
||||
case reflect.Map:
|
||||
if in.Len() == 0 {
|
||||
return
|
||||
}
|
||||
if out.IsNil() {
|
||||
out.Set(reflect.MakeMap(in.Type()))
|
||||
}
|
||||
// For maps with value types of *T or []byte we need to deep copy each value.
|
||||
elemKind := in.Type().Elem().Kind()
|
||||
for _, key := range in.MapKeys() {
|
||||
var val reflect.Value
|
||||
switch elemKind {
|
||||
case reflect.Ptr:
|
||||
val = reflect.New(in.Type().Elem().Elem())
|
||||
mergeAny(val, in.MapIndex(key), false, nil)
|
||||
case reflect.Slice:
|
||||
val = in.MapIndex(key)
|
||||
val = reflect.ValueOf(append([]byte{}, val.Bytes()...))
|
||||
default:
|
||||
val = in.MapIndex(key)
|
||||
}
|
||||
out.SetMapIndex(key, val)
|
||||
}
|
||||
case reflect.Ptr:
|
||||
if in.IsNil() {
|
||||
return
|
||||
}
|
||||
if out.IsNil() {
|
||||
out.Set(reflect.New(in.Elem().Type()))
|
||||
}
|
||||
mergeAny(out.Elem(), in.Elem(), true, nil)
|
||||
case reflect.Slice:
|
||||
if in.IsNil() {
|
||||
return
|
||||
}
|
||||
if in.Type().Elem().Kind() == reflect.Uint8 {
|
||||
// []byte is a scalar bytes field, not a repeated field.
|
||||
|
||||
// Edge case: if this is in a proto3 message, a zero length
|
||||
// bytes field is considered the zero value, and should not
|
||||
// be merged.
|
||||
if prop != nil && prop.proto3 && in.Len() == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
// Make a deep copy.
|
||||
// Append to []byte{} instead of []byte(nil) so that we never end up
|
||||
// with a nil result.
|
||||
out.SetBytes(append([]byte{}, in.Bytes()...))
|
||||
return
|
||||
}
|
||||
n := in.Len()
|
||||
if out.IsNil() {
|
||||
out.Set(reflect.MakeSlice(in.Type(), 0, n))
|
||||
}
|
||||
switch in.Type().Elem().Kind() {
|
||||
case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64,
|
||||
reflect.String, reflect.Uint32, reflect.Uint64:
|
||||
out.Set(reflect.AppendSlice(out, in))
|
||||
default:
|
||||
for i := 0; i < n; i++ {
|
||||
x := reflect.Indirect(reflect.New(in.Type().Elem()))
|
||||
mergeAny(x, in.Index(i), false, nil)
|
||||
out.Set(reflect.Append(out, x))
|
||||
}
|
||||
}
|
||||
case reflect.Struct:
|
||||
mergeStruct(out, in)
|
||||
default:
|
||||
// unknown type, so not a protocol buffer
|
||||
log.Printf("proto: don't know how to copy %v", in)
|
||||
}
|
||||
}
|
||||
|
||||
func mergeExtension(out, in map[int32]Extension) {
|
||||
for extNum, eIn := range in {
|
||||
eOut := Extension{desc: eIn.desc}
|
||||
if eIn.value != nil {
|
||||
v := reflect.New(reflect.TypeOf(eIn.value)).Elem()
|
||||
mergeAny(v, reflect.ValueOf(eIn.value), false, nil)
|
||||
eOut.value = v.Interface()
|
||||
}
|
||||
if eIn.enc != nil {
|
||||
eOut.enc = make([]byte, len(eIn.enc))
|
||||
copy(eOut.enc, eIn.enc)
|
||||
}
|
||||
|
||||
out[extNum] = eOut
|
||||
}
|
||||
}
|
427
vendor/github.com/golang/protobuf/proto/decode.go
generated
vendored
427
vendor/github.com/golang/protobuf/proto/decode.go
generated
vendored
@ -1,427 +0,0 @@
|
||||
// Go support for Protocol Buffers - Google's data interchange format
|
||||
//
|
||||
// Copyright 2010 The Go Authors. All rights reserved.
|
||||
// https://github.com/golang/protobuf
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
package proto
|
||||
|
||||
/*
|
||||
* Routines for decoding protocol buffer data to construct in-memory representations.
|
||||
*/
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
)
|
||||
|
||||
// errOverflow is returned when an integer is too large to be represented.
|
||||
var errOverflow = errors.New("proto: integer overflow")
|
||||
|
||||
// ErrInternalBadWireType is returned by generated code when an incorrect
|
||||
// wire type is encountered. It does not get returned to user code.
|
||||
var ErrInternalBadWireType = errors.New("proto: internal error: bad wiretype for oneof")
|
||||
|
||||
// DecodeVarint reads a varint-encoded integer from the slice.
|
||||
// It returns the integer and the number of bytes consumed, or
|
||||
// zero if there is not enough.
|
||||
// This is the format for the
|
||||
// int32, int64, uint32, uint64, bool, and enum
|
||||
// protocol buffer types.
|
||||
func DecodeVarint(buf []byte) (x uint64, n int) {
|
||||
for shift := uint(0); shift < 64; shift += 7 {
|
||||
if n >= len(buf) {
|
||||
return 0, 0
|
||||
}
|
||||
b := uint64(buf[n])
|
||||
n++
|
||||
x |= (b & 0x7F) << shift
|
||||
if (b & 0x80) == 0 {
|
||||
return x, n
|
||||
}
|
||||
}
|
||||
|
||||
// The number is too large to represent in a 64-bit value.
|
||||
return 0, 0
|
||||
}
|
||||
|
||||
func (p *Buffer) decodeVarintSlow() (x uint64, err error) {
|
||||
i := p.index
|
||||
l := len(p.buf)
|
||||
|
||||
for shift := uint(0); shift < 64; shift += 7 {
|
||||
if i >= l {
|
||||
err = io.ErrUnexpectedEOF
|
||||
return
|
||||
}
|
||||
b := p.buf[i]
|
||||
i++
|
||||
x |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
p.index = i
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// The number is too large to represent in a 64-bit value.
|
||||
err = errOverflow
|
||||
return
|
||||
}
|
||||
|
||||
// DecodeVarint reads a varint-encoded integer from the Buffer.
|
||||
// This is the format for the
|
||||
// int32, int64, uint32, uint64, bool, and enum
|
||||
// protocol buffer types.
|
||||
func (p *Buffer) DecodeVarint() (x uint64, err error) {
|
||||
i := p.index
|
||||
buf := p.buf
|
||||
|
||||
if i >= len(buf) {
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
} else if buf[i] < 0x80 {
|
||||
p.index++
|
||||
return uint64(buf[i]), nil
|
||||
} else if len(buf)-i < 10 {
|
||||
return p.decodeVarintSlow()
|
||||
}
|
||||
|
||||
var b uint64
|
||||
// we already checked the first byte
|
||||
x = uint64(buf[i]) - 0x80
|
||||
i++
|
||||
|
||||
b = uint64(buf[i])
|
||||
i++
|
||||
x += b << 7
|
||||
if b&0x80 == 0 {
|
||||
goto done
|
||||
}
|
||||
x -= 0x80 << 7
|
||||
|
||||
b = uint64(buf[i])
|
||||
i++
|
||||
x += b << 14
|
||||
if b&0x80 == 0 {
|
||||
goto done
|
||||
}
|
||||
x -= 0x80 << 14
|
||||
|
||||
b = uint64(buf[i])
|
||||
i++
|
||||
x += b << 21
|
||||
if b&0x80 == 0 {
|
||||
goto done
|
||||
}
|
||||
x -= 0x80 << 21
|
||||
|
||||
b = uint64(buf[i])
|
||||
i++
|
||||
x += b << 28
|
||||
if b&0x80 == 0 {
|
||||
goto done
|
||||
}
|
||||
x -= 0x80 << 28
|
||||
|
||||
b = uint64(buf[i])
|
||||
i++
|
||||
x += b << 35
|
||||
if b&0x80 == 0 {
|
||||
goto done
|
||||
}
|
||||
x -= 0x80 << 35
|
||||
|
||||
b = uint64(buf[i])
|
||||
i++
|
||||
x += b << 42
|
||||
if b&0x80 == 0 {
|
||||
goto done
|
||||
}
|
||||
x -= 0x80 << 42
|
||||
|
||||
b = uint64(buf[i])
|
||||
i++
|
||||
x += b << 49
|
||||
if b&0x80 == 0 {
|
||||
goto done
|
||||
}
|
||||
x -= 0x80 << 49
|
||||
|
||||
b = uint64(buf[i])
|
||||
i++
|
||||
x += b << 56
|
||||
if b&0x80 == 0 {
|
||||
goto done
|
||||
}
|
||||
x -= 0x80 << 56
|
||||
|
||||
b = uint64(buf[i])
|
||||
i++
|
||||
x += b << 63
|
||||
if b&0x80 == 0 {
|
||||
goto done
|
||||
}
|
||||
|
||||
return 0, errOverflow
|
||||
|
||||
done:
|
||||
p.index = i
|
||||
return x, nil
|
||||
}
|
||||
|
||||
// DecodeFixed64 reads a 64-bit integer from the Buffer.
|
||||
// This is the format for the
|
||||
// fixed64, sfixed64, and double protocol buffer types.
|
||||
func (p *Buffer) DecodeFixed64() (x uint64, err error) {
|
||||
// x, err already 0
|
||||
i := p.index + 8
|
||||
if i < 0 || i > len(p.buf) {
|
||||
err = io.ErrUnexpectedEOF
|
||||
return
|
||||
}
|
||||
p.index = i
|
||||
|
||||
x = uint64(p.buf[i-8])
|
||||
x |= uint64(p.buf[i-7]) << 8
|
||||
x |= uint64(p.buf[i-6]) << 16
|
||||
x |= uint64(p.buf[i-5]) << 24
|
||||
x |= uint64(p.buf[i-4]) << 32
|
||||
x |= uint64(p.buf[i-3]) << 40
|
||||
x |= uint64(p.buf[i-2]) << 48
|
||||
x |= uint64(p.buf[i-1]) << 56
|
||||
return
|
||||
}
|
||||
|
||||
// DecodeFixed32 reads a 32-bit integer from the Buffer.
|
||||
// This is the format for the
|
||||
// fixed32, sfixed32, and float protocol buffer types.
|
||||
func (p *Buffer) DecodeFixed32() (x uint64, err error) {
|
||||
// x, err already 0
|
||||
i := p.index + 4
|
||||
if i < 0 || i > len(p.buf) {
|
||||
err = io.ErrUnexpectedEOF
|
||||
return
|
||||
}
|
||||
p.index = i
|
||||
|
||||
x = uint64(p.buf[i-4])
|
||||
x |= uint64(p.buf[i-3]) << 8
|
||||
x |= uint64(p.buf[i-2]) << 16
|
||||
x |= uint64(p.buf[i-1]) << 24
|
||||
return
|
||||
}
|
||||
|
||||
// DecodeZigzag64 reads a zigzag-encoded 64-bit integer
|
||||
// from the Buffer.
|
||||
// This is the format used for the sint64 protocol buffer type.
|
||||
func (p *Buffer) DecodeZigzag64() (x uint64, err error) {
|
||||
x, err = p.DecodeVarint()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
x = (x >> 1) ^ uint64((int64(x&1)<<63)>>63)
|
||||
return
|
||||
}
|
||||
|
||||
// DecodeZigzag32 reads a zigzag-encoded 32-bit integer
|
||||
// from the Buffer.
|
||||
// This is the format used for the sint32 protocol buffer type.
|
||||
func (p *Buffer) DecodeZigzag32() (x uint64, err error) {
|
||||
x, err = p.DecodeVarint()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
x = uint64((uint32(x) >> 1) ^ uint32((int32(x&1)<<31)>>31))
|
||||
return
|
||||
}
|
||||
|
||||
// DecodeRawBytes reads a count-delimited byte buffer from the Buffer.
|
||||
// This is the format used for the bytes protocol buffer
|
||||
// type and for embedded messages.
|
||||
func (p *Buffer) DecodeRawBytes(alloc bool) (buf []byte, err error) {
|
||||
n, err := p.DecodeVarint()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
nb := int(n)
|
||||
if nb < 0 {
|
||||
return nil, fmt.Errorf("proto: bad byte length %d", nb)
|
||||
}
|
||||
end := p.index + nb
|
||||
if end < p.index || end > len(p.buf) {
|
||||
return nil, io.ErrUnexpectedEOF
|
||||
}
|
||||
|
||||
if !alloc {
|
||||
// todo: check if can get more uses of alloc=false
|
||||
buf = p.buf[p.index:end]
|
||||
p.index += nb
|
||||
return
|
||||
}
|
||||
|
||||
buf = make([]byte, nb)
|
||||
copy(buf, p.buf[p.index:])
|
||||
p.index += nb
|
||||
return
|
||||
}
|
||||
|
||||
// DecodeStringBytes reads an encoded string from the Buffer.
|
||||
// This is the format used for the proto2 string type.
|
||||
func (p *Buffer) DecodeStringBytes() (s string, err error) {
|
||||
buf, err := p.DecodeRawBytes(false)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
return string(buf), nil
|
||||
}
|
||||
|
||||
// Unmarshaler is the interface representing objects that can
|
||||
// unmarshal themselves. The argument points to data that may be
|
||||
// overwritten, so implementations should not keep references to the
|
||||
// buffer.
|
||||
// Unmarshal implementations should not clear the receiver.
|
||||
// Any unmarshaled data should be merged into the receiver.
|
||||
// Callers of Unmarshal that do not want to retain existing data
|
||||
// should Reset the receiver before calling Unmarshal.
|
||||
type Unmarshaler interface {
|
||||
Unmarshal([]byte) error
|
||||
}
|
||||
|
||||
// newUnmarshaler is the interface representing objects that can
|
||||
// unmarshal themselves. The semantics are identical to Unmarshaler.
|
||||
//
|
||||
// This exists to support protoc-gen-go generated messages.
|
||||
// The proto package will stop type-asserting to this interface in the future.
|
||||
//
|
||||
// DO NOT DEPEND ON THIS.
|
||||
type newUnmarshaler interface {
|
||||
XXX_Unmarshal([]byte) error
|
||||
}
|
||||
|
||||
// Unmarshal parses the protocol buffer representation in buf and places the
|
||||
// decoded result in pb. If the struct underlying pb does not match
|
||||
// the data in buf, the results can be unpredictable.
|
||||
//
|
||||
// Unmarshal resets pb before starting to unmarshal, so any
|
||||
// existing data in pb is always removed. Use UnmarshalMerge
|
||||
// to preserve and append to existing data.
|
||||
func Unmarshal(buf []byte, pb Message) error {
|
||||
pb.Reset()
|
||||
if u, ok := pb.(newUnmarshaler); ok {
|
||||
return u.XXX_Unmarshal(buf)
|
||||
}
|
||||
if u, ok := pb.(Unmarshaler); ok {
|
||||
return u.Unmarshal(buf)
|
||||
}
|
||||
return NewBuffer(buf).Unmarshal(pb)
|
||||
}
|
||||
|
||||
// UnmarshalMerge parses the protocol buffer representation in buf and
|
||||
// writes the decoded result to pb. If the struct underlying pb does not match
|
||||
// the data in buf, the results can be unpredictable.
|
||||
//
|
||||
// UnmarshalMerge merges into existing data in pb.
|
||||
// Most code should use Unmarshal instead.
|
||||
func UnmarshalMerge(buf []byte, pb Message) error {
|
||||
if u, ok := pb.(newUnmarshaler); ok {
|
||||
return u.XXX_Unmarshal(buf)
|
||||
}
|
||||
if u, ok := pb.(Unmarshaler); ok {
|
||||
// NOTE: The history of proto have unfortunately been inconsistent
|
||||
// whether Unmarshaler should or should not implicitly clear itself.
|
||||
// Some implementations do, most do not.
|
||||
// Thus, calling this here may or may not do what people want.
|
||||
//
|
||||
// See https://github.com/golang/protobuf/issues/424
|
||||
return u.Unmarshal(buf)
|
||||
}
|
||||
return NewBuffer(buf).Unmarshal(pb)
|
||||
}
|
||||
|
||||
// DecodeMessage reads a count-delimited message from the Buffer.
|
||||
func (p *Buffer) DecodeMessage(pb Message) error {
|
||||
enc, err := p.DecodeRawBytes(false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return NewBuffer(enc).Unmarshal(pb)
|
||||
}
|
||||
|
||||
// DecodeGroup reads a tag-delimited group from the Buffer.
|
||||
// StartGroup tag is already consumed. This function consumes
|
||||
// EndGroup tag.
|
||||
func (p *Buffer) DecodeGroup(pb Message) error {
|
||||
b := p.buf[p.index:]
|
||||
x, y := findEndGroup(b)
|
||||
if x < 0 {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
err := Unmarshal(b[:x], pb)
|
||||
p.index += y
|
||||
return err
|
||||
}
|
||||
|
||||
// Unmarshal parses the protocol buffer representation in the
|
||||
// Buffer and places the decoded result in pb. If the struct
|
||||
// underlying pb does not match the data in the buffer, the results can be
|
||||
// unpredictable.
|
||||
//
|
||||
// Unlike proto.Unmarshal, this does not reset pb before starting to unmarshal.
|
||||
func (p *Buffer) Unmarshal(pb Message) error {
|
||||
// If the object can unmarshal itself, let it.
|
||||
if u, ok := pb.(newUnmarshaler); ok {
|
||||
err := u.XXX_Unmarshal(p.buf[p.index:])
|
||||
p.index = len(p.buf)
|
||||
return err
|
||||
}
|
||||
if u, ok := pb.(Unmarshaler); ok {
|
||||
// NOTE: The history of proto have unfortunately been inconsistent
|
||||
// whether Unmarshaler should or should not implicitly clear itself.
|
||||
// Some implementations do, most do not.
|
||||
// Thus, calling this here may or may not do what people want.
|
||||
//
|
||||
// See https://github.com/golang/protobuf/issues/424
|
||||
err := u.Unmarshal(p.buf[p.index:])
|
||||
p.index = len(p.buf)
|
||||
return err
|
||||
}
|
||||
|
||||
// Slow workaround for messages that aren't Unmarshalers.
|
||||
// This includes some hand-coded .pb.go files and
|
||||
// bootstrap protos.
|
||||
// TODO: fix all of those and then add Unmarshal to
|
||||
// the Message interface. Then:
|
||||
// The cast above and code below can be deleted.
|
||||
// The old unmarshaler can be deleted.
|
||||
// Clients can call Unmarshal directly (can already do that, actually).
|
||||
var info InternalMessageInfo
|
||||
err := info.Unmarshal(pb, p.buf[p.index:])
|
||||
p.index = len(p.buf)
|
||||
return err
|
||||
}
|
63
vendor/github.com/golang/protobuf/proto/defaults.go
generated
vendored
Normal file
63
vendor/github.com/golang/protobuf/proto/defaults.go
generated
vendored
Normal file
@ -0,0 +1,63 @@
|
||||
// Copyright 2019 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package proto
|
||||
|
||||
import (
|
||||
"google.golang.org/protobuf/reflect/protoreflect"
|
||||
)
|
||||
|
||||
// SetDefaults sets unpopulated scalar fields to their default values.
|
||||
// Fields within a oneof are not set even if they have a default value.
|
||||
// SetDefaults is recursively called upon any populated message fields.
|
||||
func SetDefaults(m Message) {
|
||||
if m != nil {
|
||||
setDefaults(MessageReflect(m))
|
||||
}
|
||||
}
|
||||
|
||||
func setDefaults(m protoreflect.Message) {
|
||||
fds := m.Descriptor().Fields()
|
||||
for i := 0; i < fds.Len(); i++ {
|
||||
fd := fds.Get(i)
|
||||
if !m.Has(fd) {
|
||||
if fd.HasDefault() && fd.ContainingOneof() == nil {
|
||||
v := fd.Default()
|
||||
if fd.Kind() == protoreflect.BytesKind {
|
||||
v = protoreflect.ValueOf(append([]byte(nil), v.Bytes()...)) // copy the default bytes
|
||||
}
|
||||
m.Set(fd, v)
|
||||
}
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
m.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool {
|
||||
switch {
|
||||
// Handle singular message.
|
||||
case fd.Cardinality() != protoreflect.Repeated:
|
||||
if fd.Message() != nil {
|
||||
setDefaults(m.Get(fd).Message())
|
||||
}
|
||||
// Handle list of messages.
|
||||
case fd.IsList():
|
||||
if fd.Message() != nil {
|
||||
ls := m.Get(fd).List()
|
||||
for i := 0; i < ls.Len(); i++ {
|
||||
setDefaults(ls.Get(i).Message())
|
||||
}
|
||||
}
|
||||
// Handle map of messages.
|
||||
case fd.IsMap():
|
||||
if fd.MapValue().Message() != nil {
|
||||
ms := m.Get(fd).Map()
|
||||
ms.Range(func(_ protoreflect.MapKey, v protoreflect.Value) bool {
|
||||
setDefaults(v.Message())
|
||||
return true
|
||||
})
|
||||
}
|
||||
}
|
||||
return true
|
||||
})
|
||||
}
|
126
vendor/github.com/golang/protobuf/proto/deprecated.go
generated
vendored
126
vendor/github.com/golang/protobuf/proto/deprecated.go
generated
vendored
@ -1,63 +1,113 @@
|
||||
// Go support for Protocol Buffers - Google's data interchange format
|
||||
//
|
||||
// Copyright 2018 The Go Authors. All rights reserved.
|
||||
// https://github.com/golang/protobuf
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
// Copyright 2018 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package proto
|
||||
|
||||
import "errors"
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strconv"
|
||||
|
||||
// Deprecated: do not use.
|
||||
protoV2 "google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
var (
|
||||
// Deprecated: No longer returned.
|
||||
ErrNil = errors.New("proto: Marshal called with nil")
|
||||
|
||||
// Deprecated: No longer returned.
|
||||
ErrTooLarge = errors.New("proto: message encodes to over 2 GB")
|
||||
|
||||
// Deprecated: No longer returned.
|
||||
ErrInternalBadWireType = errors.New("proto: internal error: bad wiretype for oneof")
|
||||
)
|
||||
|
||||
// Deprecated: Do not use.
|
||||
type Stats struct{ Emalloc, Dmalloc, Encode, Decode, Chit, Cmiss, Size uint64 }
|
||||
|
||||
// Deprecated: do not use.
|
||||
// Deprecated: Do not use.
|
||||
func GetStats() Stats { return Stats{} }
|
||||
|
||||
// Deprecated: do not use.
|
||||
// Deprecated: Do not use.
|
||||
func MarshalMessageSet(interface{}) ([]byte, error) {
|
||||
return nil, errors.New("proto: not implemented")
|
||||
}
|
||||
|
||||
// Deprecated: do not use.
|
||||
// Deprecated: Do not use.
|
||||
func UnmarshalMessageSet([]byte, interface{}) error {
|
||||
return errors.New("proto: not implemented")
|
||||
}
|
||||
|
||||
// Deprecated: do not use.
|
||||
// Deprecated: Do not use.
|
||||
func MarshalMessageSetJSON(interface{}) ([]byte, error) {
|
||||
return nil, errors.New("proto: not implemented")
|
||||
}
|
||||
|
||||
// Deprecated: do not use.
|
||||
// Deprecated: Do not use.
|
||||
func UnmarshalMessageSetJSON([]byte, interface{}) error {
|
||||
return errors.New("proto: not implemented")
|
||||
}
|
||||
|
||||
// Deprecated: do not use.
|
||||
// Deprecated: Do not use.
|
||||
func RegisterMessageSetType(Message, int32, string) {}
|
||||
|
||||
// Deprecated: Do not use.
|
||||
func EnumName(m map[int32]string, v int32) string {
|
||||
s, ok := m[v]
|
||||
if ok {
|
||||
return s
|
||||
}
|
||||
return strconv.Itoa(int(v))
|
||||
}
|
||||
|
||||
// Deprecated: Do not use.
|
||||
func UnmarshalJSONEnum(m map[string]int32, data []byte, enumName string) (int32, error) {
|
||||
if data[0] == '"' {
|
||||
// New style: enums are strings.
|
||||
var repr string
|
||||
if err := json.Unmarshal(data, &repr); err != nil {
|
||||
return -1, err
|
||||
}
|
||||
val, ok := m[repr]
|
||||
if !ok {
|
||||
return 0, fmt.Errorf("unrecognized enum %s value %q", enumName, repr)
|
||||
}
|
||||
return val, nil
|
||||
}
|
||||
// Old style: enums are ints.
|
||||
var val int32
|
||||
if err := json.Unmarshal(data, &val); err != nil {
|
||||
return 0, fmt.Errorf("cannot unmarshal %#q into enum %s", data, enumName)
|
||||
}
|
||||
return val, nil
|
||||
}
|
||||
|
||||
// Deprecated: Do not use; this type existed for intenal-use only.
|
||||
type InternalMessageInfo struct{}
|
||||
|
||||
// Deprecated: Do not use; this method existed for intenal-use only.
|
||||
func (*InternalMessageInfo) DiscardUnknown(m Message) {
|
||||
DiscardUnknown(m)
|
||||
}
|
||||
|
||||
// Deprecated: Do not use; this method existed for intenal-use only.
|
||||
func (*InternalMessageInfo) Marshal(b []byte, m Message, deterministic bool) ([]byte, error) {
|
||||
return protoV2.MarshalOptions{Deterministic: deterministic}.MarshalAppend(b, MessageV2(m))
|
||||
}
|
||||
|
||||
// Deprecated: Do not use; this method existed for intenal-use only.
|
||||
func (*InternalMessageInfo) Merge(dst, src Message) {
|
||||
protoV2.Merge(MessageV2(dst), MessageV2(src))
|
||||
}
|
||||
|
||||
// Deprecated: Do not use; this method existed for intenal-use only.
|
||||
func (*InternalMessageInfo) Size(m Message) int {
|
||||
return protoV2.Size(MessageV2(m))
|
||||
}
|
||||
|
||||
// Deprecated: Do not use; this method existed for intenal-use only.
|
||||
func (*InternalMessageInfo) Unmarshal(m Message, b []byte) error {
|
||||
return protoV2.UnmarshalOptions{Merge: true}.Unmarshal(b, MessageV2(m))
|
||||
}
|
||||
|
356
vendor/github.com/golang/protobuf/proto/discard.go
generated
vendored
356
vendor/github.com/golang/protobuf/proto/discard.go
generated
vendored
@ -1,48 +1,13 @@
|
||||
// Go support for Protocol Buffers - Google's data interchange format
|
||||
//
|
||||
// Copyright 2017 The Go Authors. All rights reserved.
|
||||
// https://github.com/golang/protobuf
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
// Copyright 2019 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package proto
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"google.golang.org/protobuf/reflect/protoreflect"
|
||||
)
|
||||
|
||||
type generatedDiscarder interface {
|
||||
XXX_DiscardUnknown()
|
||||
}
|
||||
|
||||
// DiscardUnknown recursively discards all unknown fields from this message
|
||||
// and all embedded messages.
|
||||
//
|
||||
@ -51,300 +16,43 @@ type generatedDiscarder interface {
|
||||
// marshal to be able to produce a message that continues to have those
|
||||
// unrecognized fields. To avoid this, DiscardUnknown is used to
|
||||
// explicitly clear the unknown fields after unmarshaling.
|
||||
//
|
||||
// For proto2 messages, the unknown fields of message extensions are only
|
||||
// discarded from messages that have been accessed via GetExtension.
|
||||
func DiscardUnknown(m Message) {
|
||||
if m, ok := m.(generatedDiscarder); ok {
|
||||
m.XXX_DiscardUnknown()
|
||||
return
|
||||
if m != nil {
|
||||
discardUnknown(MessageReflect(m))
|
||||
}
|
||||
// TODO: Dynamically populate a InternalMessageInfo for legacy messages,
|
||||
// but the master branch has no implementation for InternalMessageInfo,
|
||||
// so it would be more work to replicate that approach.
|
||||
discardLegacy(m)
|
||||
}
|
||||
|
||||
// DiscardUnknown recursively discards all unknown fields.
|
||||
func (a *InternalMessageInfo) DiscardUnknown(m Message) {
|
||||
di := atomicLoadDiscardInfo(&a.discard)
|
||||
if di == nil {
|
||||
di = getDiscardInfo(reflect.TypeOf(m).Elem())
|
||||
atomicStoreDiscardInfo(&a.discard, di)
|
||||
}
|
||||
di.discard(toPointer(&m))
|
||||
}
|
||||
|
||||
type discardInfo struct {
|
||||
typ reflect.Type
|
||||
|
||||
initialized int32 // 0: only typ is valid, 1: everything is valid
|
||||
lock sync.Mutex
|
||||
|
||||
fields []discardFieldInfo
|
||||
unrecognized field
|
||||
}
|
||||
|
||||
type discardFieldInfo struct {
|
||||
field field // Offset of field, guaranteed to be valid
|
||||
discard func(src pointer)
|
||||
}
|
||||
|
||||
var (
|
||||
discardInfoMap = map[reflect.Type]*discardInfo{}
|
||||
discardInfoLock sync.Mutex
|
||||
)
|
||||
|
||||
func getDiscardInfo(t reflect.Type) *discardInfo {
|
||||
discardInfoLock.Lock()
|
||||
defer discardInfoLock.Unlock()
|
||||
di := discardInfoMap[t]
|
||||
if di == nil {
|
||||
di = &discardInfo{typ: t}
|
||||
discardInfoMap[t] = di
|
||||
}
|
||||
return di
|
||||
}
|
||||
|
||||
func (di *discardInfo) discard(src pointer) {
|
||||
if src.isNil() {
|
||||
return // Nothing to do.
|
||||
}
|
||||
|
||||
if atomic.LoadInt32(&di.initialized) == 0 {
|
||||
di.computeDiscardInfo()
|
||||
}
|
||||
|
||||
for _, fi := range di.fields {
|
||||
sfp := src.offset(fi.field)
|
||||
fi.discard(sfp)
|
||||
}
|
||||
|
||||
// For proto2 messages, only discard unknown fields in message extensions
|
||||
// that have been accessed via GetExtension.
|
||||
if em, err := extendable(src.asPointerTo(di.typ).Interface()); err == nil {
|
||||
// Ignore lock since DiscardUnknown is not concurrency safe.
|
||||
emm, _ := em.extensionsRead()
|
||||
for _, mx := range emm {
|
||||
if m, ok := mx.value.(Message); ok {
|
||||
DiscardUnknown(m)
|
||||
func discardUnknown(m protoreflect.Message) {
|
||||
m.Range(func(fd protoreflect.FieldDescriptor, val protoreflect.Value) bool {
|
||||
switch {
|
||||
// Handle singular message.
|
||||
case fd.Cardinality() != protoreflect.Repeated:
|
||||
if fd.Message() != nil {
|
||||
discardUnknown(m.Get(fd).Message())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if di.unrecognized.IsValid() {
|
||||
*src.offset(di.unrecognized).toBytes() = nil
|
||||
}
|
||||
}
|
||||
|
||||
func (di *discardInfo) computeDiscardInfo() {
|
||||
di.lock.Lock()
|
||||
defer di.lock.Unlock()
|
||||
if di.initialized != 0 {
|
||||
return
|
||||
}
|
||||
t := di.typ
|
||||
n := t.NumField()
|
||||
|
||||
for i := 0; i < n; i++ {
|
||||
f := t.Field(i)
|
||||
if strings.HasPrefix(f.Name, "XXX_") {
|
||||
continue
|
||||
}
|
||||
|
||||
dfi := discardFieldInfo{field: toField(&f)}
|
||||
tf := f.Type
|
||||
|
||||
// Unwrap tf to get its most basic type.
|
||||
var isPointer, isSlice bool
|
||||
if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 {
|
||||
isSlice = true
|
||||
tf = tf.Elem()
|
||||
}
|
||||
if tf.Kind() == reflect.Ptr {
|
||||
isPointer = true
|
||||
tf = tf.Elem()
|
||||
}
|
||||
if isPointer && isSlice && tf.Kind() != reflect.Struct {
|
||||
panic(fmt.Sprintf("%v.%s cannot be a slice of pointers to primitive types", t, f.Name))
|
||||
}
|
||||
|
||||
switch tf.Kind() {
|
||||
case reflect.Struct:
|
||||
switch {
|
||||
case !isPointer:
|
||||
panic(fmt.Sprintf("%v.%s cannot be a direct struct value", t, f.Name))
|
||||
case isSlice: // E.g., []*pb.T
|
||||
di := getDiscardInfo(tf)
|
||||
dfi.discard = func(src pointer) {
|
||||
sps := src.getPointerSlice()
|
||||
for _, sp := range sps {
|
||||
if !sp.isNil() {
|
||||
di.discard(sp)
|
||||
}
|
||||
}
|
||||
}
|
||||
default: // E.g., *pb.T
|
||||
di := getDiscardInfo(tf)
|
||||
dfi.discard = func(src pointer) {
|
||||
sp := src.getPointer()
|
||||
if !sp.isNil() {
|
||||
di.discard(sp)
|
||||
}
|
||||
// Handle list of messages.
|
||||
case fd.IsList():
|
||||
if fd.Message() != nil {
|
||||
ls := m.Get(fd).List()
|
||||
for i := 0; i < ls.Len(); i++ {
|
||||
discardUnknown(ls.Get(i).Message())
|
||||
}
|
||||
}
|
||||
case reflect.Map:
|
||||
switch {
|
||||
case isPointer || isSlice:
|
||||
panic(fmt.Sprintf("%v.%s cannot be a pointer to a map or a slice of map values", t, f.Name))
|
||||
default: // E.g., map[K]V
|
||||
if tf.Elem().Kind() == reflect.Ptr { // Proto struct (e.g., *T)
|
||||
dfi.discard = func(src pointer) {
|
||||
sm := src.asPointerTo(tf).Elem()
|
||||
if sm.Len() == 0 {
|
||||
return
|
||||
}
|
||||
for _, key := range sm.MapKeys() {
|
||||
val := sm.MapIndex(key)
|
||||
DiscardUnknown(val.Interface().(Message))
|
||||
}
|
||||
}
|
||||
} else {
|
||||
dfi.discard = func(pointer) {} // Noop
|
||||
}
|
||||
}
|
||||
case reflect.Interface:
|
||||
// Must be oneof field.
|
||||
switch {
|
||||
case isPointer || isSlice:
|
||||
panic(fmt.Sprintf("%v.%s cannot be a pointer to a interface or a slice of interface values", t, f.Name))
|
||||
default: // E.g., interface{}
|
||||
// TODO: Make this faster?
|
||||
dfi.discard = func(src pointer) {
|
||||
su := src.asPointerTo(tf).Elem()
|
||||
if !su.IsNil() {
|
||||
sv := su.Elem().Elem().Field(0)
|
||||
if sv.Kind() == reflect.Ptr && sv.IsNil() {
|
||||
return
|
||||
}
|
||||
switch sv.Type().Kind() {
|
||||
case reflect.Ptr: // Proto struct (e.g., *T)
|
||||
DiscardUnknown(sv.Interface().(Message))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
default:
|
||||
continue
|
||||
}
|
||||
di.fields = append(di.fields, dfi)
|
||||
}
|
||||
|
||||
di.unrecognized = invalidField
|
||||
if f, ok := t.FieldByName("XXX_unrecognized"); ok {
|
||||
if f.Type != reflect.TypeOf([]byte{}) {
|
||||
panic("expected XXX_unrecognized to be of type []byte")
|
||||
}
|
||||
di.unrecognized = toField(&f)
|
||||
}
|
||||
|
||||
atomic.StoreInt32(&di.initialized, 1)
|
||||
}
|
||||
|
||||
func discardLegacy(m Message) {
|
||||
v := reflect.ValueOf(m)
|
||||
if v.Kind() != reflect.Ptr || v.IsNil() {
|
||||
return
|
||||
}
|
||||
v = v.Elem()
|
||||
if v.Kind() != reflect.Struct {
|
||||
return
|
||||
}
|
||||
t := v.Type()
|
||||
|
||||
for i := 0; i < v.NumField(); i++ {
|
||||
f := t.Field(i)
|
||||
if strings.HasPrefix(f.Name, "XXX_") {
|
||||
continue
|
||||
}
|
||||
vf := v.Field(i)
|
||||
tf := f.Type
|
||||
|
||||
// Unwrap tf to get its most basic type.
|
||||
var isPointer, isSlice bool
|
||||
if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 {
|
||||
isSlice = true
|
||||
tf = tf.Elem()
|
||||
}
|
||||
if tf.Kind() == reflect.Ptr {
|
||||
isPointer = true
|
||||
tf = tf.Elem()
|
||||
}
|
||||
if isPointer && isSlice && tf.Kind() != reflect.Struct {
|
||||
panic(fmt.Sprintf("%T.%s cannot be a slice of pointers to primitive types", m, f.Name))
|
||||
}
|
||||
|
||||
switch tf.Kind() {
|
||||
case reflect.Struct:
|
||||
switch {
|
||||
case !isPointer:
|
||||
panic(fmt.Sprintf("%T.%s cannot be a direct struct value", m, f.Name))
|
||||
case isSlice: // E.g., []*pb.T
|
||||
for j := 0; j < vf.Len(); j++ {
|
||||
discardLegacy(vf.Index(j).Interface().(Message))
|
||||
}
|
||||
default: // E.g., *pb.T
|
||||
discardLegacy(vf.Interface().(Message))
|
||||
}
|
||||
case reflect.Map:
|
||||
switch {
|
||||
case isPointer || isSlice:
|
||||
panic(fmt.Sprintf("%T.%s cannot be a pointer to a map or a slice of map values", m, f.Name))
|
||||
default: // E.g., map[K]V
|
||||
tv := vf.Type().Elem()
|
||||
if tv.Kind() == reflect.Ptr && tv.Implements(protoMessageType) { // Proto struct (e.g., *T)
|
||||
for _, key := range vf.MapKeys() {
|
||||
val := vf.MapIndex(key)
|
||||
discardLegacy(val.Interface().(Message))
|
||||
}
|
||||
}
|
||||
}
|
||||
case reflect.Interface:
|
||||
// Must be oneof field.
|
||||
switch {
|
||||
case isPointer || isSlice:
|
||||
panic(fmt.Sprintf("%T.%s cannot be a pointer to a interface or a slice of interface values", m, f.Name))
|
||||
default: // E.g., test_proto.isCommunique_Union interface
|
||||
if !vf.IsNil() && f.Tag.Get("protobuf_oneof") != "" {
|
||||
vf = vf.Elem() // E.g., *test_proto.Communique_Msg
|
||||
if !vf.IsNil() {
|
||||
vf = vf.Elem() // E.g., test_proto.Communique_Msg
|
||||
vf = vf.Field(0) // E.g., Proto struct (e.g., *T) or primitive value
|
||||
if vf.Kind() == reflect.Ptr {
|
||||
discardLegacy(vf.Interface().(Message))
|
||||
}
|
||||
}
|
||||
}
|
||||
// Handle map of messages.
|
||||
case fd.IsMap():
|
||||
if fd.MapValue().Message() != nil {
|
||||
ms := m.Get(fd).Map()
|
||||
ms.Range(func(_ protoreflect.MapKey, v protoreflect.Value) bool {
|
||||
discardUnknown(v.Message())
|
||||
return true
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
return true
|
||||
})
|
||||
|
||||
if vf := v.FieldByName("XXX_unrecognized"); vf.IsValid() {
|
||||
if vf.Type() != reflect.TypeOf([]byte{}) {
|
||||
panic("expected XXX_unrecognized to be of type []byte")
|
||||
}
|
||||
vf.Set(reflect.ValueOf([]byte(nil)))
|
||||
}
|
||||
|
||||
// For proto2 messages, only discard unknown fields in message extensions
|
||||
// that have been accessed via GetExtension.
|
||||
if em, err := extendable(m); err == nil {
|
||||
// Ignore lock since discardLegacy is not concurrency safe.
|
||||
emm, _ := em.extensionsRead()
|
||||
for _, mx := range emm {
|
||||
if m, ok := mx.value.(Message); ok {
|
||||
discardLegacy(m)
|
||||
}
|
||||
}
|
||||
// Discard unknown fields.
|
||||
if len(m.GetUnknown()) > 0 {
|
||||
m.SetUnknown(nil)
|
||||
}
|
||||
}
|
||||
|
203
vendor/github.com/golang/protobuf/proto/encode.go
generated
vendored
203
vendor/github.com/golang/protobuf/proto/encode.go
generated
vendored
@ -1,203 +0,0 @@
|
||||
// Go support for Protocol Buffers - Google's data interchange format
|
||||
//
|
||||
// Copyright 2010 The Go Authors. All rights reserved.
|
||||
// https://github.com/golang/protobuf
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
package proto
|
||||
|
||||
/*
|
||||
* Routines for encoding data into the wire format for protocol buffers.
|
||||
*/
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"reflect"
|
||||
)
|
||||
|
||||
var (
|
||||
// errRepeatedHasNil is the error returned if Marshal is called with
|
||||
// a struct with a repeated field containing a nil element.
|
||||
errRepeatedHasNil = errors.New("proto: repeated field has nil element")
|
||||
|
||||
// errOneofHasNil is the error returned if Marshal is called with
|
||||
// a struct with a oneof field containing a nil element.
|
||||
errOneofHasNil = errors.New("proto: oneof field has nil value")
|
||||
|
||||
// ErrNil is the error returned if Marshal is called with nil.
|
||||
ErrNil = errors.New("proto: Marshal called with nil")
|
||||
|
||||
// ErrTooLarge is the error returned if Marshal is called with a
|
||||
// message that encodes to >2GB.
|
||||
ErrTooLarge = errors.New("proto: message encodes to over 2 GB")
|
||||
)
|
||||
|
||||
// The fundamental encoders that put bytes on the wire.
|
||||
// Those that take integer types all accept uint64 and are
|
||||
// therefore of type valueEncoder.
|
||||
|
||||
const maxVarintBytes = 10 // maximum length of a varint
|
||||
|
||||
// EncodeVarint returns the varint encoding of x.
|
||||
// This is the format for the
|
||||
// int32, int64, uint32, uint64, bool, and enum
|
||||
// protocol buffer types.
|
||||
// Not used by the package itself, but helpful to clients
|
||||
// wishing to use the same encoding.
|
||||
func EncodeVarint(x uint64) []byte {
|
||||
var buf [maxVarintBytes]byte
|
||||
var n int
|
||||
for n = 0; x > 127; n++ {
|
||||
buf[n] = 0x80 | uint8(x&0x7F)
|
||||
x >>= 7
|
||||
}
|
||||
buf[n] = uint8(x)
|
||||
n++
|
||||
return buf[0:n]
|
||||
}
|
||||
|
||||
// EncodeVarint writes a varint-encoded integer to the Buffer.
|
||||
// This is the format for the
|
||||
// int32, int64, uint32, uint64, bool, and enum
|
||||
// protocol buffer types.
|
||||
func (p *Buffer) EncodeVarint(x uint64) error {
|
||||
for x >= 1<<7 {
|
||||
p.buf = append(p.buf, uint8(x&0x7f|0x80))
|
||||
x >>= 7
|
||||
}
|
||||
p.buf = append(p.buf, uint8(x))
|
||||
return nil
|
||||
}
|
||||
|
||||
// SizeVarint returns the varint encoding size of an integer.
|
||||
func SizeVarint(x uint64) int {
|
||||
switch {
|
||||
case x < 1<<7:
|
||||
return 1
|
||||
case x < 1<<14:
|
||||
return 2
|
||||
case x < 1<<21:
|
||||
return 3
|
||||
case x < 1<<28:
|
||||
return 4
|
||||
case x < 1<<35:
|
||||
return 5
|
||||
case x < 1<<42:
|
||||
return 6
|
||||
case x < 1<<49:
|
||||
return 7
|
||||
case x < 1<<56:
|
||||
return 8
|
||||
case x < 1<<63:
|
||||
return 9
|
||||
}
|
||||
return 10
|
||||
}
|
||||
|
||||
// EncodeFixed64 writes a 64-bit integer to the Buffer.
|
||||
// This is the format for the
|
||||
// fixed64, sfixed64, and double protocol buffer types.
|
||||
func (p *Buffer) EncodeFixed64(x uint64) error {
|
||||
p.buf = append(p.buf,
|
||||
uint8(x),
|
||||
uint8(x>>8),
|
||||
uint8(x>>16),
|
||||
uint8(x>>24),
|
||||
uint8(x>>32),
|
||||
uint8(x>>40),
|
||||
uint8(x>>48),
|
||||
uint8(x>>56))
|
||||
return nil
|
||||
}
|
||||
|
||||
// EncodeFixed32 writes a 32-bit integer to the Buffer.
|
||||
// This is the format for the
|
||||
// fixed32, sfixed32, and float protocol buffer types.
|
||||
func (p *Buffer) EncodeFixed32(x uint64) error {
|
||||
p.buf = append(p.buf,
|
||||
uint8(x),
|
||||
uint8(x>>8),
|
||||
uint8(x>>16),
|
||||
uint8(x>>24))
|
||||
return nil
|
||||
}
|
||||
|
||||
// EncodeZigzag64 writes a zigzag-encoded 64-bit integer
|
||||
// to the Buffer.
|
||||
// This is the format used for the sint64 protocol buffer type.
|
||||
func (p *Buffer) EncodeZigzag64(x uint64) error {
|
||||
// use signed number to get arithmetic right shift.
|
||||
return p.EncodeVarint(uint64((x << 1) ^ uint64((int64(x) >> 63))))
|
||||
}
|
||||
|
||||
// EncodeZigzag32 writes a zigzag-encoded 32-bit integer
|
||||
// to the Buffer.
|
||||
// This is the format used for the sint32 protocol buffer type.
|
||||
func (p *Buffer) EncodeZigzag32(x uint64) error {
|
||||
// use signed number to get arithmetic right shift.
|
||||
return p.EncodeVarint(uint64((uint32(x) << 1) ^ uint32((int32(x) >> 31))))
|
||||
}
|
||||
|
||||
// EncodeRawBytes writes a count-delimited byte buffer to the Buffer.
|
||||
// This is the format used for the bytes protocol buffer
|
||||
// type and for embedded messages.
|
||||
func (p *Buffer) EncodeRawBytes(b []byte) error {
|
||||
p.EncodeVarint(uint64(len(b)))
|
||||
p.buf = append(p.buf, b...)
|
||||
return nil
|
||||
}
|
||||
|
||||
// EncodeStringBytes writes an encoded string to the Buffer.
|
||||
// This is the format used for the proto2 string type.
|
||||
func (p *Buffer) EncodeStringBytes(s string) error {
|
||||
p.EncodeVarint(uint64(len(s)))
|
||||
p.buf = append(p.buf, s...)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Marshaler is the interface representing objects that can marshal themselves.
|
||||
type Marshaler interface {
|
||||
Marshal() ([]byte, error)
|
||||
}
|
||||
|
||||
// EncodeMessage writes the protocol buffer to the Buffer,
|
||||
// prefixed by a varint-encoded length.
|
||||
func (p *Buffer) EncodeMessage(pb Message) error {
|
||||
siz := Size(pb)
|
||||
p.EncodeVarint(uint64(siz))
|
||||
return p.Marshal(pb)
|
||||
}
|
||||
|
||||
// All protocol buffer fields are nillable, but be careful.
|
||||
func isNil(v reflect.Value) bool {
|
||||
switch v.Kind() {
|
||||
case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice:
|
||||
return v.IsNil()
|
||||
}
|
||||
return false
|
||||
}
|
301
vendor/github.com/golang/protobuf/proto/equal.go
generated
vendored
301
vendor/github.com/golang/protobuf/proto/equal.go
generated
vendored
@ -1,301 +0,0 @@
|
||||
// Go support for Protocol Buffers - Google's data interchange format
|
||||
//
|
||||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// https://github.com/golang/protobuf
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
// Protocol buffer comparison.
|
||||
|
||||
package proto
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"log"
|
||||
"reflect"
|
||||
"strings"
|
||||
)
|
||||
|
||||
/*
|
||||
Equal returns true iff protocol buffers a and b are equal.
|
||||
The arguments must both be pointers to protocol buffer structs.
|
||||
|
||||
Equality is defined in this way:
|
||||
- Two messages are equal iff they are the same type,
|
||||
corresponding fields are equal, unknown field sets
|
||||
are equal, and extensions sets are equal.
|
||||
- Two set scalar fields are equal iff their values are equal.
|
||||
If the fields are of a floating-point type, remember that
|
||||
NaN != x for all x, including NaN. If the message is defined
|
||||
in a proto3 .proto file, fields are not "set"; specifically,
|
||||
zero length proto3 "bytes" fields are equal (nil == {}).
|
||||
- Two repeated fields are equal iff their lengths are the same,
|
||||
and their corresponding elements are equal. Note a "bytes" field,
|
||||
although represented by []byte, is not a repeated field and the
|
||||
rule for the scalar fields described above applies.
|
||||
- Two unset fields are equal.
|
||||
- Two unknown field sets are equal if their current
|
||||
encoded state is equal.
|
||||
- Two extension sets are equal iff they have corresponding
|
||||
elements that are pairwise equal.
|
||||
- Two map fields are equal iff their lengths are the same,
|
||||
and they contain the same set of elements. Zero-length map
|
||||
fields are equal.
|
||||
- Every other combination of things are not equal.
|
||||
|
||||
The return value is undefined if a and b are not protocol buffers.
|
||||
*/
|
||||
func Equal(a, b Message) bool {
|
||||
if a == nil || b == nil {
|
||||
return a == b
|
||||
}
|
||||
v1, v2 := reflect.ValueOf(a), reflect.ValueOf(b)
|
||||
if v1.Type() != v2.Type() {
|
||||
return false
|
||||
}
|
||||
if v1.Kind() == reflect.Ptr {
|
||||
if v1.IsNil() {
|
||||
return v2.IsNil()
|
||||
}
|
||||
if v2.IsNil() {
|
||||
return false
|
||||
}
|
||||
v1, v2 = v1.Elem(), v2.Elem()
|
||||
}
|
||||
if v1.Kind() != reflect.Struct {
|
||||
return false
|
||||
}
|
||||
return equalStruct(v1, v2)
|
||||
}
|
||||
|
||||
// v1 and v2 are known to have the same type.
|
||||
func equalStruct(v1, v2 reflect.Value) bool {
|
||||
sprop := GetProperties(v1.Type())
|
||||
for i := 0; i < v1.NumField(); i++ {
|
||||
f := v1.Type().Field(i)
|
||||
if strings.HasPrefix(f.Name, "XXX_") {
|
||||
continue
|
||||
}
|
||||
f1, f2 := v1.Field(i), v2.Field(i)
|
||||
if f.Type.Kind() == reflect.Ptr {
|
||||
if n1, n2 := f1.IsNil(), f2.IsNil(); n1 && n2 {
|
||||
// both unset
|
||||
continue
|
||||
} else if n1 != n2 {
|
||||
// set/unset mismatch
|
||||
return false
|
||||
}
|
||||
f1, f2 = f1.Elem(), f2.Elem()
|
||||
}
|
||||
if !equalAny(f1, f2, sprop.Prop[i]) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
if em1 := v1.FieldByName("XXX_InternalExtensions"); em1.IsValid() {
|
||||
em2 := v2.FieldByName("XXX_InternalExtensions")
|
||||
if !equalExtensions(v1.Type(), em1.Interface().(XXX_InternalExtensions), em2.Interface().(XXX_InternalExtensions)) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
if em1 := v1.FieldByName("XXX_extensions"); em1.IsValid() {
|
||||
em2 := v2.FieldByName("XXX_extensions")
|
||||
if !equalExtMap(v1.Type(), em1.Interface().(map[int32]Extension), em2.Interface().(map[int32]Extension)) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
uf := v1.FieldByName("XXX_unrecognized")
|
||||
if !uf.IsValid() {
|
||||
return true
|
||||
}
|
||||
|
||||
u1 := uf.Bytes()
|
||||
u2 := v2.FieldByName("XXX_unrecognized").Bytes()
|
||||
return bytes.Equal(u1, u2)
|
||||
}
|
||||
|
||||
// v1 and v2 are known to have the same type.
|
||||
// prop may be nil.
|
||||
func equalAny(v1, v2 reflect.Value, prop *Properties) bool {
|
||||
if v1.Type() == protoMessageType {
|
||||
m1, _ := v1.Interface().(Message)
|
||||
m2, _ := v2.Interface().(Message)
|
||||
return Equal(m1, m2)
|
||||
}
|
||||
switch v1.Kind() {
|
||||
case reflect.Bool:
|
||||
return v1.Bool() == v2.Bool()
|
||||
case reflect.Float32, reflect.Float64:
|
||||
return v1.Float() == v2.Float()
|
||||
case reflect.Int32, reflect.Int64:
|
||||
return v1.Int() == v2.Int()
|
||||
case reflect.Interface:
|
||||
// Probably a oneof field; compare the inner values.
|
||||
n1, n2 := v1.IsNil(), v2.IsNil()
|
||||
if n1 || n2 {
|
||||
return n1 == n2
|
||||
}
|
||||
e1, e2 := v1.Elem(), v2.Elem()
|
||||
if e1.Type() != e2.Type() {
|
||||
return false
|
||||
}
|
||||
return equalAny(e1, e2, nil)
|
||||
case reflect.Map:
|
||||
if v1.Len() != v2.Len() {
|
||||
return false
|
||||
}
|
||||
for _, key := range v1.MapKeys() {
|
||||
val2 := v2.MapIndex(key)
|
||||
if !val2.IsValid() {
|
||||
// This key was not found in the second map.
|
||||
return false
|
||||
}
|
||||
if !equalAny(v1.MapIndex(key), val2, nil) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
case reflect.Ptr:
|
||||
// Maps may have nil values in them, so check for nil.
|
||||
if v1.IsNil() && v2.IsNil() {
|
||||
return true
|
||||
}
|
||||
if v1.IsNil() != v2.IsNil() {
|
||||
return false
|
||||
}
|
||||
return equalAny(v1.Elem(), v2.Elem(), prop)
|
||||
case reflect.Slice:
|
||||
if v1.Type().Elem().Kind() == reflect.Uint8 {
|
||||
// short circuit: []byte
|
||||
|
||||
// Edge case: if this is in a proto3 message, a zero length
|
||||
// bytes field is considered the zero value.
|
||||
if prop != nil && prop.proto3 && v1.Len() == 0 && v2.Len() == 0 {
|
||||
return true
|
||||
}
|
||||
if v1.IsNil() != v2.IsNil() {
|
||||
return false
|
||||
}
|
||||
return bytes.Equal(v1.Interface().([]byte), v2.Interface().([]byte))
|
||||
}
|
||||
|
||||
if v1.Len() != v2.Len() {
|
||||
return false
|
||||
}
|
||||
for i := 0; i < v1.Len(); i++ {
|
||||
if !equalAny(v1.Index(i), v2.Index(i), prop) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
case reflect.String:
|
||||
return v1.Interface().(string) == v2.Interface().(string)
|
||||
case reflect.Struct:
|
||||
return equalStruct(v1, v2)
|
||||
case reflect.Uint32, reflect.Uint64:
|
||||
return v1.Uint() == v2.Uint()
|
||||
}
|
||||
|
||||
// unknown type, so not a protocol buffer
|
||||
log.Printf("proto: don't know how to compare %v", v1)
|
||||
return false
|
||||
}
|
||||
|
||||
// base is the struct type that the extensions are based on.
|
||||
// x1 and x2 are InternalExtensions.
|
||||
func equalExtensions(base reflect.Type, x1, x2 XXX_InternalExtensions) bool {
|
||||
em1, _ := x1.extensionsRead()
|
||||
em2, _ := x2.extensionsRead()
|
||||
return equalExtMap(base, em1, em2)
|
||||
}
|
||||
|
||||
func equalExtMap(base reflect.Type, em1, em2 map[int32]Extension) bool {
|
||||
if len(em1) != len(em2) {
|
||||
return false
|
||||
}
|
||||
|
||||
for extNum, e1 := range em1 {
|
||||
e2, ok := em2[extNum]
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
|
||||
m1 := extensionAsLegacyType(e1.value)
|
||||
m2 := extensionAsLegacyType(e2.value)
|
||||
|
||||
if m1 == nil && m2 == nil {
|
||||
// Both have only encoded form.
|
||||
if bytes.Equal(e1.enc, e2.enc) {
|
||||
continue
|
||||
}
|
||||
// The bytes are different, but the extensions might still be
|
||||
// equal. We need to decode them to compare.
|
||||
}
|
||||
|
||||
if m1 != nil && m2 != nil {
|
||||
// Both are unencoded.
|
||||
if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) {
|
||||
return false
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
// At least one is encoded. To do a semantically correct comparison
|
||||
// we need to unmarshal them first.
|
||||
var desc *ExtensionDesc
|
||||
if m := extensionMaps[base]; m != nil {
|
||||
desc = m[extNum]
|
||||
}
|
||||
if desc == nil {
|
||||
// If both have only encoded form and the bytes are the same,
|
||||
// it is handled above. We get here when the bytes are different.
|
||||
// We don't know how to decode it, so just compare them as byte
|
||||
// slices.
|
||||
log.Printf("proto: don't know how to compare extension %d of %v", extNum, base)
|
||||
return false
|
||||
}
|
||||
var err error
|
||||
if m1 == nil {
|
||||
m1, err = decodeExtension(e1.enc, desc)
|
||||
}
|
||||
if m2 == nil && err == nil {
|
||||
m2, err = decodeExtension(e2.enc, desc)
|
||||
}
|
||||
if err != nil {
|
||||
// The encoded form is invalid.
|
||||
log.Printf("proto: badly encoded extension %d of %v: %v", extNum, base, err)
|
||||
return false
|
||||
}
|
||||
if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
845
vendor/github.com/golang/protobuf/proto/extensions.go
generated
vendored
845
vendor/github.com/golang/protobuf/proto/extensions.go
generated
vendored
@ -1,607 +1,356 @@
|
||||
// Go support for Protocol Buffers - Google's data interchange format
|
||||
//
|
||||
// Copyright 2010 The Go Authors. All rights reserved.
|
||||
// https://github.com/golang/protobuf
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
// Copyright 2010 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package proto
|
||||
|
||||
/*
|
||||
* Types and routines for supporting protocol buffer extensions.
|
||||
*/
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"sync"
|
||||
|
||||
"google.golang.org/protobuf/encoding/protowire"
|
||||
"google.golang.org/protobuf/proto"
|
||||
"google.golang.org/protobuf/reflect/protoreflect"
|
||||
"google.golang.org/protobuf/reflect/protoregistry"
|
||||
"google.golang.org/protobuf/runtime/protoiface"
|
||||
"google.golang.org/protobuf/runtime/protoimpl"
|
||||
)
|
||||
|
||||
// ErrMissingExtension is the error returned by GetExtension if the named extension is not in the message.
|
||||
type (
|
||||
// ExtensionDesc represents an extension descriptor and
|
||||
// is used to interact with an extension field in a message.
|
||||
//
|
||||
// Variables of this type are generated in code by protoc-gen-go.
|
||||
ExtensionDesc = protoimpl.ExtensionInfo
|
||||
|
||||
// ExtensionRange represents a range of message extensions.
|
||||
// Used in code generated by protoc-gen-go.
|
||||
ExtensionRange = protoiface.ExtensionRangeV1
|
||||
|
||||
// Deprecated: Do not use; this is an internal type.
|
||||
Extension = protoimpl.ExtensionFieldV1
|
||||
|
||||
// Deprecated: Do not use; this is an internal type.
|
||||
XXX_InternalExtensions = protoimpl.ExtensionFields
|
||||
)
|
||||
|
||||
// ErrMissingExtension reports whether the extension was not present.
|
||||
var ErrMissingExtension = errors.New("proto: missing extension")
|
||||
|
||||
// ExtensionRange represents a range of message extensions for a protocol buffer.
|
||||
// Used in code generated by the protocol compiler.
|
||||
type ExtensionRange struct {
|
||||
Start, End int32 // both inclusive
|
||||
}
|
||||
|
||||
// extendableProto is an interface implemented by any protocol buffer generated by the current
|
||||
// proto compiler that may be extended.
|
||||
type extendableProto interface {
|
||||
Message
|
||||
ExtensionRangeArray() []ExtensionRange
|
||||
extensionsWrite() map[int32]Extension
|
||||
extensionsRead() (map[int32]Extension, sync.Locker)
|
||||
}
|
||||
|
||||
// extendableProtoV1 is an interface implemented by a protocol buffer generated by the previous
|
||||
// version of the proto compiler that may be extended.
|
||||
type extendableProtoV1 interface {
|
||||
Message
|
||||
ExtensionRangeArray() []ExtensionRange
|
||||
ExtensionMap() map[int32]Extension
|
||||
}
|
||||
|
||||
// extensionAdapter is a wrapper around extendableProtoV1 that implements extendableProto.
|
||||
type extensionAdapter struct {
|
||||
extendableProtoV1
|
||||
}
|
||||
|
||||
func (e extensionAdapter) extensionsWrite() map[int32]Extension {
|
||||
return e.ExtensionMap()
|
||||
}
|
||||
|
||||
func (e extensionAdapter) extensionsRead() (map[int32]Extension, sync.Locker) {
|
||||
return e.ExtensionMap(), notLocker{}
|
||||
}
|
||||
|
||||
// notLocker is a sync.Locker whose Lock and Unlock methods are nops.
|
||||
type notLocker struct{}
|
||||
|
||||
func (n notLocker) Lock() {}
|
||||
func (n notLocker) Unlock() {}
|
||||
|
||||
// extendable returns the extendableProto interface for the given generated proto message.
|
||||
// If the proto message has the old extension format, it returns a wrapper that implements
|
||||
// the extendableProto interface.
|
||||
func extendable(p interface{}) (extendableProto, error) {
|
||||
switch p := p.(type) {
|
||||
case extendableProto:
|
||||
if isNilPtr(p) {
|
||||
return nil, fmt.Errorf("proto: nil %T is not extendable", p)
|
||||
}
|
||||
return p, nil
|
||||
case extendableProtoV1:
|
||||
if isNilPtr(p) {
|
||||
return nil, fmt.Errorf("proto: nil %T is not extendable", p)
|
||||
}
|
||||
return extensionAdapter{p}, nil
|
||||
}
|
||||
// Don't allocate a specific error containing %T:
|
||||
// this is the hot path for Clone and MarshalText.
|
||||
return nil, errNotExtendable
|
||||
}
|
||||
|
||||
var errNotExtendable = errors.New("proto: not an extendable proto.Message")
|
||||
|
||||
func isNilPtr(x interface{}) bool {
|
||||
v := reflect.ValueOf(x)
|
||||
return v.Kind() == reflect.Ptr && v.IsNil()
|
||||
}
|
||||
|
||||
// XXX_InternalExtensions is an internal representation of proto extensions.
|
||||
//
|
||||
// Each generated message struct type embeds an anonymous XXX_InternalExtensions field,
|
||||
// thus gaining the unexported 'extensions' method, which can be called only from the proto package.
|
||||
//
|
||||
// The methods of XXX_InternalExtensions are not concurrency safe in general,
|
||||
// but calls to logically read-only methods such as has and get may be executed concurrently.
|
||||
type XXX_InternalExtensions struct {
|
||||
// The struct must be indirect so that if a user inadvertently copies a
|
||||
// generated message and its embedded XXX_InternalExtensions, they
|
||||
// avoid the mayhem of a copied mutex.
|
||||
//
|
||||
// The mutex serializes all logically read-only operations to p.extensionMap.
|
||||
// It is up to the client to ensure that write operations to p.extensionMap are
|
||||
// mutually exclusive with other accesses.
|
||||
p *struct {
|
||||
mu sync.Mutex
|
||||
extensionMap map[int32]Extension
|
||||
// HasExtension reports whether the extension field is present in m
|
||||
// either as an explicitly populated field or as an unknown field.
|
||||
func HasExtension(m Message, xt *ExtensionDesc) (has bool) {
|
||||
mr := MessageReflect(m)
|
||||
if mr == nil || !mr.IsValid() {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// extensionsWrite returns the extension map, creating it on first use.
|
||||
func (e *XXX_InternalExtensions) extensionsWrite() map[int32]Extension {
|
||||
if e.p == nil {
|
||||
e.p = new(struct {
|
||||
mu sync.Mutex
|
||||
extensionMap map[int32]Extension
|
||||
// Check whether any populated known field matches the field number.
|
||||
xtd := xt.TypeDescriptor()
|
||||
if isValidExtension(mr.Descriptor(), xtd) {
|
||||
has = mr.Has(xtd)
|
||||
} else {
|
||||
mr.Range(func(fd protoreflect.FieldDescriptor, _ protoreflect.Value) bool {
|
||||
has = int32(fd.Number()) == xt.Field
|
||||
return !has
|
||||
})
|
||||
e.p.extensionMap = make(map[int32]Extension)
|
||||
}
|
||||
return e.p.extensionMap
|
||||
}
|
||||
|
||||
// extensionsRead returns the extensions map for read-only use. It may be nil.
|
||||
// The caller must hold the returned mutex's lock when accessing Elements within the map.
|
||||
func (e *XXX_InternalExtensions) extensionsRead() (map[int32]Extension, sync.Locker) {
|
||||
if e.p == nil {
|
||||
return nil, nil
|
||||
// Check whether any unknown field matches the field number.
|
||||
for b := mr.GetUnknown(); !has && len(b) > 0; {
|
||||
num, _, n := protowire.ConsumeField(b)
|
||||
has = int32(num) == xt.Field
|
||||
b = b[n:]
|
||||
}
|
||||
return e.p.extensionMap, &e.p.mu
|
||||
return has
|
||||
}
|
||||
|
||||
// ExtensionDesc represents an extension specification.
|
||||
// Used in generated code from the protocol compiler.
|
||||
type ExtensionDesc struct {
|
||||
ExtendedType Message // nil pointer to the type that is being extended
|
||||
ExtensionType interface{} // nil pointer to the extension type
|
||||
Field int32 // field number
|
||||
Name string // fully-qualified name of extension, for text formatting
|
||||
Tag string // protobuf tag style
|
||||
Filename string // name of the file in which the extension is defined
|
||||
}
|
||||
|
||||
func (ed *ExtensionDesc) repeated() bool {
|
||||
t := reflect.TypeOf(ed.ExtensionType)
|
||||
return t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8
|
||||
}
|
||||
|
||||
// Extension represents an extension in a message.
|
||||
type Extension struct {
|
||||
// When an extension is stored in a message using SetExtension
|
||||
// only desc and value are set. When the message is marshaled
|
||||
// enc will be set to the encoded form of the message.
|
||||
//
|
||||
// When a message is unmarshaled and contains extensions, each
|
||||
// extension will have only enc set. When such an extension is
|
||||
// accessed using GetExtension (or GetExtensions) desc and value
|
||||
// will be set.
|
||||
desc *ExtensionDesc
|
||||
|
||||
// value is a concrete value for the extension field. Let the type of
|
||||
// desc.ExtensionType be the "API type" and the type of Extension.value
|
||||
// be the "storage type". The API type and storage type are the same except:
|
||||
// * For scalars (except []byte), the API type uses *T,
|
||||
// while the storage type uses T.
|
||||
// * For repeated fields, the API type uses []T, while the storage type
|
||||
// uses *[]T.
|
||||
//
|
||||
// The reason for the divergence is so that the storage type more naturally
|
||||
// matches what is expected of when retrieving the values through the
|
||||
// protobuf reflection APIs.
|
||||
//
|
||||
// The value may only be populated if desc is also populated.
|
||||
value interface{}
|
||||
|
||||
// enc is the raw bytes for the extension field.
|
||||
enc []byte
|
||||
}
|
||||
|
||||
// SetRawExtension is for testing only.
|
||||
func SetRawExtension(base Message, id int32, b []byte) {
|
||||
epb, err := extendable(base)
|
||||
if err != nil {
|
||||
// ClearExtension removes the extension field from m
|
||||
// either as an explicitly populated field or as an unknown field.
|
||||
func ClearExtension(m Message, xt *ExtensionDesc) {
|
||||
mr := MessageReflect(m)
|
||||
if mr == nil || !mr.IsValid() {
|
||||
return
|
||||
}
|
||||
extmap := epb.extensionsWrite()
|
||||
extmap[id] = Extension{enc: b}
|
||||
}
|
||||
|
||||
// isExtensionField returns true iff the given field number is in an extension range.
|
||||
func isExtensionField(pb extendableProto, field int32) bool {
|
||||
for _, er := range pb.ExtensionRangeArray() {
|
||||
if er.Start <= field && field <= er.End {
|
||||
xtd := xt.TypeDescriptor()
|
||||
if isValidExtension(mr.Descriptor(), xtd) {
|
||||
mr.Clear(xtd)
|
||||
} else {
|
||||
mr.Range(func(fd protoreflect.FieldDescriptor, _ protoreflect.Value) bool {
|
||||
if int32(fd.Number()) == xt.Field {
|
||||
mr.Clear(fd)
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
})
|
||||
}
|
||||
return false
|
||||
clearUnknown(mr, fieldNum(xt.Field))
|
||||
}
|
||||
|
||||
// checkExtensionTypes checks that the given extension is valid for pb.
|
||||
func checkExtensionTypes(pb extendableProto, extension *ExtensionDesc) error {
|
||||
var pbi interface{} = pb
|
||||
// Check the extended type.
|
||||
if ea, ok := pbi.(extensionAdapter); ok {
|
||||
pbi = ea.extendableProtoV1
|
||||
}
|
||||
if a, b := reflect.TypeOf(pbi), reflect.TypeOf(extension.ExtendedType); a != b {
|
||||
return fmt.Errorf("proto: bad extended type; %v does not extend %v", b, a)
|
||||
}
|
||||
// Check the range.
|
||||
if !isExtensionField(pb, extension.Field) {
|
||||
return errors.New("proto: bad extension number; not in declared ranges")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// extPropKey is sufficient to uniquely identify an extension.
|
||||
type extPropKey struct {
|
||||
base reflect.Type
|
||||
field int32
|
||||
}
|
||||
|
||||
var extProp = struct {
|
||||
sync.RWMutex
|
||||
m map[extPropKey]*Properties
|
||||
}{
|
||||
m: make(map[extPropKey]*Properties),
|
||||
}
|
||||
|
||||
func extensionProperties(ed *ExtensionDesc) *Properties {
|
||||
key := extPropKey{base: reflect.TypeOf(ed.ExtendedType), field: ed.Field}
|
||||
|
||||
extProp.RLock()
|
||||
if prop, ok := extProp.m[key]; ok {
|
||||
extProp.RUnlock()
|
||||
return prop
|
||||
}
|
||||
extProp.RUnlock()
|
||||
|
||||
extProp.Lock()
|
||||
defer extProp.Unlock()
|
||||
// Check again.
|
||||
if prop, ok := extProp.m[key]; ok {
|
||||
return prop
|
||||
}
|
||||
|
||||
prop := new(Properties)
|
||||
prop.Init(reflect.TypeOf(ed.ExtensionType), "unknown_name", ed.Tag, nil)
|
||||
extProp.m[key] = prop
|
||||
return prop
|
||||
}
|
||||
|
||||
// HasExtension returns whether the given extension is present in pb.
|
||||
func HasExtension(pb Message, extension *ExtensionDesc) bool {
|
||||
// TODO: Check types, field numbers, etc.?
|
||||
epb, err := extendable(pb)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
extmap, mu := epb.extensionsRead()
|
||||
if extmap == nil {
|
||||
return false
|
||||
}
|
||||
mu.Lock()
|
||||
_, ok := extmap[extension.Field]
|
||||
mu.Unlock()
|
||||
return ok
|
||||
}
|
||||
|
||||
// ClearExtension removes the given extension from pb.
|
||||
func ClearExtension(pb Message, extension *ExtensionDesc) {
|
||||
epb, err := extendable(pb)
|
||||
if err != nil {
|
||||
// ClearAllExtensions clears all extensions from m.
|
||||
// This includes populated fields and unknown fields in the extension range.
|
||||
func ClearAllExtensions(m Message) {
|
||||
mr := MessageReflect(m)
|
||||
if mr == nil || !mr.IsValid() {
|
||||
return
|
||||
}
|
||||
// TODO: Check types, field numbers, etc.?
|
||||
extmap := epb.extensionsWrite()
|
||||
delete(extmap, extension.Field)
|
||||
|
||||
mr.Range(func(fd protoreflect.FieldDescriptor, _ protoreflect.Value) bool {
|
||||
if fd.IsExtension() {
|
||||
mr.Clear(fd)
|
||||
}
|
||||
return true
|
||||
})
|
||||
clearUnknown(mr, mr.Descriptor().ExtensionRanges())
|
||||
}
|
||||
|
||||
// GetExtension retrieves a proto2 extended field from pb.
|
||||
// GetExtension retrieves a proto2 extended field from m.
|
||||
//
|
||||
// If the descriptor is type complete (i.e., ExtensionDesc.ExtensionType is non-nil),
|
||||
// then GetExtension parses the encoded field and returns a Go value of the specified type.
|
||||
// If the field is not present, then the default value is returned (if one is specified),
|
||||
// otherwise ErrMissingExtension is reported.
|
||||
//
|
||||
// If the descriptor is not type complete (i.e., ExtensionDesc.ExtensionType is nil),
|
||||
// then GetExtension returns the raw encoded bytes of the field extension.
|
||||
func GetExtension(pb Message, extension *ExtensionDesc) (interface{}, error) {
|
||||
epb, err := extendable(pb)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
// If the descriptor is type incomplete (i.e., ExtensionDesc.ExtensionType is nil),
|
||||
// then GetExtension returns the raw encoded bytes for the extension field.
|
||||
func GetExtension(m Message, xt *ExtensionDesc) (interface{}, error) {
|
||||
mr := MessageReflect(m)
|
||||
if mr == nil || !mr.IsValid() || mr.Descriptor().ExtensionRanges().Len() == 0 {
|
||||
return nil, errNotExtendable
|
||||
}
|
||||
|
||||
if extension.ExtendedType != nil {
|
||||
// can only check type if this is a complete descriptor
|
||||
if err := checkExtensionTypes(epb, extension); err != nil {
|
||||
// Retrieve the unknown fields for this extension field.
|
||||
var bo protoreflect.RawFields
|
||||
for bi := mr.GetUnknown(); len(bi) > 0; {
|
||||
num, _, n := protowire.ConsumeField(bi)
|
||||
if int32(num) == xt.Field {
|
||||
bo = append(bo, bi[:n]...)
|
||||
}
|
||||
bi = bi[n:]
|
||||
}
|
||||
|
||||
// For type incomplete descriptors, only retrieve the unknown fields.
|
||||
if xt.ExtensionType == nil {
|
||||
return []byte(bo), nil
|
||||
}
|
||||
|
||||
// If the extension field only exists as unknown fields, unmarshal it.
|
||||
// This is rarely done since proto.Unmarshal eagerly unmarshals extensions.
|
||||
xtd := xt.TypeDescriptor()
|
||||
if !isValidExtension(mr.Descriptor(), xtd) {
|
||||
return nil, fmt.Errorf("proto: bad extended type; %T does not extend %T", xt.ExtendedType, m)
|
||||
}
|
||||
if !mr.Has(xtd) && len(bo) > 0 {
|
||||
m2 := mr.New()
|
||||
if err := (proto.UnmarshalOptions{
|
||||
Resolver: extensionResolver{xt},
|
||||
}.Unmarshal(bo, m2.Interface())); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
emap, mu := epb.extensionsRead()
|
||||
if emap == nil {
|
||||
return defaultExtensionValue(extension)
|
||||
}
|
||||
mu.Lock()
|
||||
defer mu.Unlock()
|
||||
e, ok := emap[extension.Field]
|
||||
if !ok {
|
||||
// defaultExtensionValue returns the default value or
|
||||
// ErrMissingExtension if there is no default.
|
||||
return defaultExtensionValue(extension)
|
||||
}
|
||||
|
||||
if e.value != nil {
|
||||
// Already decoded. Check the descriptor, though.
|
||||
if e.desc != extension {
|
||||
// This shouldn't happen. If it does, it means that
|
||||
// GetExtension was called twice with two different
|
||||
// descriptors with the same field number.
|
||||
return nil, errors.New("proto: descriptor conflict")
|
||||
if m2.Has(xtd) {
|
||||
mr.Set(xtd, m2.Get(xtd))
|
||||
clearUnknown(mr, fieldNum(xt.Field))
|
||||
}
|
||||
return extensionAsLegacyType(e.value), nil
|
||||
}
|
||||
|
||||
if extension.ExtensionType == nil {
|
||||
// incomplete descriptor
|
||||
return e.enc, nil
|
||||
}
|
||||
|
||||
v, err := decodeExtension(e.enc, extension)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Remember the decoded version and drop the encoded version.
|
||||
// That way it is safe to mutate what we return.
|
||||
e.value = extensionAsStorageType(v)
|
||||
e.desc = extension
|
||||
e.enc = nil
|
||||
emap[extension.Field] = e
|
||||
return extensionAsLegacyType(e.value), nil
|
||||
}
|
||||
|
||||
// defaultExtensionValue returns the default value for extension.
|
||||
// If no default for an extension is defined ErrMissingExtension is returned.
|
||||
func defaultExtensionValue(extension *ExtensionDesc) (interface{}, error) {
|
||||
if extension.ExtensionType == nil {
|
||||
// incomplete descriptor, so no default
|
||||
// Check whether the message has the extension field set or a default.
|
||||
var pv protoreflect.Value
|
||||
switch {
|
||||
case mr.Has(xtd):
|
||||
pv = mr.Get(xtd)
|
||||
case xtd.HasDefault():
|
||||
pv = xtd.Default()
|
||||
default:
|
||||
return nil, ErrMissingExtension
|
||||
}
|
||||
|
||||
t := reflect.TypeOf(extension.ExtensionType)
|
||||
props := extensionProperties(extension)
|
||||
|
||||
sf, _, err := fieldDefault(t, props)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if sf == nil || sf.value == nil {
|
||||
// There is no default value.
|
||||
return nil, ErrMissingExtension
|
||||
}
|
||||
|
||||
if t.Kind() != reflect.Ptr {
|
||||
// We do not need to return a Ptr, we can directly return sf.value.
|
||||
return sf.value, nil
|
||||
}
|
||||
|
||||
// We need to return an interface{} that is a pointer to sf.value.
|
||||
value := reflect.New(t).Elem()
|
||||
value.Set(reflect.New(value.Type().Elem()))
|
||||
if sf.kind == reflect.Int32 {
|
||||
// We may have an int32 or an enum, but the underlying data is int32.
|
||||
// Since we can't set an int32 into a non int32 reflect.value directly
|
||||
// set it as a int32.
|
||||
value.Elem().SetInt(int64(sf.value.(int32)))
|
||||
} else {
|
||||
value.Elem().Set(reflect.ValueOf(sf.value))
|
||||
}
|
||||
return value.Interface(), nil
|
||||
}
|
||||
|
||||
// decodeExtension decodes an extension encoded in b.
|
||||
func decodeExtension(b []byte, extension *ExtensionDesc) (interface{}, error) {
|
||||
t := reflect.TypeOf(extension.ExtensionType)
|
||||
unmarshal := typeUnmarshaler(t, extension.Tag)
|
||||
|
||||
// t is a pointer to a struct, pointer to basic type or a slice.
|
||||
// Allocate space to store the pointer/slice.
|
||||
value := reflect.New(t).Elem()
|
||||
|
||||
var err error
|
||||
for {
|
||||
x, n := decodeVarint(b)
|
||||
if n == 0 {
|
||||
return nil, io.ErrUnexpectedEOF
|
||||
}
|
||||
b = b[n:]
|
||||
wire := int(x) & 7
|
||||
|
||||
b, err = unmarshal(b, valToPointer(value.Addr()), wire)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(b) == 0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
return value.Interface(), nil
|
||||
}
|
||||
|
||||
// GetExtensions returns a slice of the extensions present in pb that are also listed in es.
|
||||
// The returned slice has the same length as es; missing extensions will appear as nil elements.
|
||||
func GetExtensions(pb Message, es []*ExtensionDesc) (extensions []interface{}, err error) {
|
||||
epb, err := extendable(pb)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
extensions = make([]interface{}, len(es))
|
||||
for i, e := range es {
|
||||
extensions[i], err = GetExtension(epb, e)
|
||||
if err == ErrMissingExtension {
|
||||
err = nil
|
||||
}
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// ExtensionDescs returns a new slice containing pb's extension descriptors, in undefined order.
|
||||
// For non-registered extensions, ExtensionDescs returns an incomplete descriptor containing
|
||||
// just the Field field, which defines the extension's field number.
|
||||
func ExtensionDescs(pb Message) ([]*ExtensionDesc, error) {
|
||||
epb, err := extendable(pb)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
registeredExtensions := RegisteredExtensions(pb)
|
||||
|
||||
emap, mu := epb.extensionsRead()
|
||||
if emap == nil {
|
||||
return nil, nil
|
||||
}
|
||||
mu.Lock()
|
||||
defer mu.Unlock()
|
||||
extensions := make([]*ExtensionDesc, 0, len(emap))
|
||||
for extid, e := range emap {
|
||||
desc := e.desc
|
||||
if desc == nil {
|
||||
desc = registeredExtensions[extid]
|
||||
if desc == nil {
|
||||
desc = &ExtensionDesc{Field: extid}
|
||||
}
|
||||
}
|
||||
|
||||
extensions = append(extensions, desc)
|
||||
}
|
||||
return extensions, nil
|
||||
}
|
||||
|
||||
// SetExtension sets the specified extension of pb to the specified value.
|
||||
func SetExtension(pb Message, extension *ExtensionDesc, value interface{}) error {
|
||||
epb, err := extendable(pb)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := checkExtensionTypes(epb, extension); err != nil {
|
||||
return err
|
||||
}
|
||||
typ := reflect.TypeOf(extension.ExtensionType)
|
||||
if typ != reflect.TypeOf(value) {
|
||||
return fmt.Errorf("proto: bad extension value type. got: %T, want: %T", value, extension.ExtensionType)
|
||||
}
|
||||
// nil extension values need to be caught early, because the
|
||||
// encoder can't distinguish an ErrNil due to a nil extension
|
||||
// from an ErrNil due to a missing field. Extensions are
|
||||
// always optional, so the encoder would just swallow the error
|
||||
// and drop all the extensions from the encoded message.
|
||||
if reflect.ValueOf(value).IsNil() {
|
||||
return fmt.Errorf("proto: SetExtension called with nil value of type %T", value)
|
||||
}
|
||||
|
||||
extmap := epb.extensionsWrite()
|
||||
extmap[extension.Field] = Extension{desc: extension, value: extensionAsStorageType(value)}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ClearAllExtensions clears all extensions from pb.
|
||||
func ClearAllExtensions(pb Message) {
|
||||
epb, err := extendable(pb)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
m := epb.extensionsWrite()
|
||||
for k := range m {
|
||||
delete(m, k)
|
||||
}
|
||||
}
|
||||
|
||||
// A global registry of extensions.
|
||||
// The generated code will register the generated descriptors by calling RegisterExtension.
|
||||
|
||||
var extensionMaps = make(map[reflect.Type]map[int32]*ExtensionDesc)
|
||||
|
||||
// RegisterExtension is called from the generated code.
|
||||
func RegisterExtension(desc *ExtensionDesc) {
|
||||
st := reflect.TypeOf(desc.ExtendedType).Elem()
|
||||
m := extensionMaps[st]
|
||||
if m == nil {
|
||||
m = make(map[int32]*ExtensionDesc)
|
||||
extensionMaps[st] = m
|
||||
}
|
||||
if _, ok := m[desc.Field]; ok {
|
||||
panic("proto: duplicate extension registered: " + st.String() + " " + strconv.Itoa(int(desc.Field)))
|
||||
}
|
||||
m[desc.Field] = desc
|
||||
}
|
||||
|
||||
// RegisteredExtensions returns a map of the registered extensions of a
|
||||
// protocol buffer struct, indexed by the extension number.
|
||||
// The argument pb should be a nil pointer to the struct type.
|
||||
func RegisteredExtensions(pb Message) map[int32]*ExtensionDesc {
|
||||
return extensionMaps[reflect.TypeOf(pb).Elem()]
|
||||
}
|
||||
|
||||
// extensionAsLegacyType converts an value in the storage type as the API type.
|
||||
// See Extension.value.
|
||||
func extensionAsLegacyType(v interface{}) interface{} {
|
||||
switch rv := reflect.ValueOf(v); rv.Kind() {
|
||||
case reflect.Bool, reflect.Int32, reflect.Int64, reflect.Uint32, reflect.Uint64, reflect.Float32, reflect.Float64, reflect.String:
|
||||
// Represent primitive types as a pointer to the value.
|
||||
v := xt.InterfaceOf(pv)
|
||||
rv := reflect.ValueOf(v)
|
||||
if isScalarKind(rv.Kind()) {
|
||||
rv2 := reflect.New(rv.Type())
|
||||
rv2.Elem().Set(rv)
|
||||
v = rv2.Interface()
|
||||
case reflect.Ptr:
|
||||
// Represent slice types as the value itself.
|
||||
switch rv.Type().Elem().Kind() {
|
||||
case reflect.Slice:
|
||||
if rv.IsNil() {
|
||||
v = reflect.Zero(rv.Type().Elem()).Interface()
|
||||
} else {
|
||||
v = rv.Elem().Interface()
|
||||
}
|
||||
}
|
||||
}
|
||||
return v
|
||||
return v, nil
|
||||
}
|
||||
|
||||
// extensionAsStorageType converts an value in the API type as the storage type.
|
||||
// See Extension.value.
|
||||
func extensionAsStorageType(v interface{}) interface{} {
|
||||
switch rv := reflect.ValueOf(v); rv.Kind() {
|
||||
case reflect.Ptr:
|
||||
// Represent slice types as the value itself.
|
||||
switch rv.Type().Elem().Kind() {
|
||||
case reflect.Bool, reflect.Int32, reflect.Int64, reflect.Uint32, reflect.Uint64, reflect.Float32, reflect.Float64, reflect.String:
|
||||
if rv.IsNil() {
|
||||
v = reflect.Zero(rv.Type().Elem()).Interface()
|
||||
} else {
|
||||
v = rv.Elem().Interface()
|
||||
// extensionResolver is a custom extension resolver that stores a single
|
||||
// extension type that takes precedence over the global registry.
|
||||
type extensionResolver struct{ xt protoreflect.ExtensionType }
|
||||
|
||||
func (r extensionResolver) FindExtensionByName(field protoreflect.FullName) (protoreflect.ExtensionType, error) {
|
||||
if xtd := r.xt.TypeDescriptor(); xtd.FullName() == field {
|
||||
return r.xt, nil
|
||||
}
|
||||
return protoregistry.GlobalTypes.FindExtensionByName(field)
|
||||
}
|
||||
|
||||
func (r extensionResolver) FindExtensionByNumber(message protoreflect.FullName, field protoreflect.FieldNumber) (protoreflect.ExtensionType, error) {
|
||||
if xtd := r.xt.TypeDescriptor(); xtd.ContainingMessage().FullName() == message && xtd.Number() == field {
|
||||
return r.xt, nil
|
||||
}
|
||||
return protoregistry.GlobalTypes.FindExtensionByNumber(message, field)
|
||||
}
|
||||
|
||||
// GetExtensions returns a list of the extensions values present in m,
|
||||
// corresponding with the provided list of extension descriptors, xts.
|
||||
// If an extension is missing in m, the corresponding value is nil.
|
||||
func GetExtensions(m Message, xts []*ExtensionDesc) ([]interface{}, error) {
|
||||
mr := MessageReflect(m)
|
||||
if mr == nil || !mr.IsValid() {
|
||||
return nil, errNotExtendable
|
||||
}
|
||||
|
||||
vs := make([]interface{}, len(xts))
|
||||
for i, xt := range xts {
|
||||
v, err := GetExtension(m, xt)
|
||||
if err != nil {
|
||||
if err == ErrMissingExtension {
|
||||
continue
|
||||
}
|
||||
return vs, err
|
||||
}
|
||||
case reflect.Slice:
|
||||
// Represent slice types as a pointer to the value.
|
||||
if rv.Type().Elem().Kind() != reflect.Uint8 {
|
||||
rv2 := reflect.New(rv.Type())
|
||||
rv2.Elem().Set(rv)
|
||||
v = rv2.Interface()
|
||||
vs[i] = v
|
||||
}
|
||||
return vs, nil
|
||||
}
|
||||
|
||||
// SetExtension sets an extension field in m to the provided value.
|
||||
func SetExtension(m Message, xt *ExtensionDesc, v interface{}) error {
|
||||
mr := MessageReflect(m)
|
||||
if mr == nil || !mr.IsValid() || mr.Descriptor().ExtensionRanges().Len() == 0 {
|
||||
return errNotExtendable
|
||||
}
|
||||
|
||||
rv := reflect.ValueOf(v)
|
||||
if reflect.TypeOf(v) != reflect.TypeOf(xt.ExtensionType) {
|
||||
return fmt.Errorf("proto: bad extension value type. got: %T, want: %T", v, xt.ExtensionType)
|
||||
}
|
||||
if rv.Kind() == reflect.Ptr {
|
||||
if rv.IsNil() {
|
||||
return fmt.Errorf("proto: SetExtension called with nil value of type %T", v)
|
||||
}
|
||||
if isScalarKind(rv.Elem().Kind()) {
|
||||
v = rv.Elem().Interface()
|
||||
}
|
||||
}
|
||||
return v
|
||||
|
||||
xtd := xt.TypeDescriptor()
|
||||
if !isValidExtension(mr.Descriptor(), xtd) {
|
||||
return fmt.Errorf("proto: bad extended type; %T does not extend %T", xt.ExtendedType, m)
|
||||
}
|
||||
mr.Set(xtd, xt.ValueOf(v))
|
||||
clearUnknown(mr, fieldNum(xt.Field))
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetRawExtension inserts b into the unknown fields of m.
|
||||
//
|
||||
// Deprecated: Use Message.ProtoReflect.SetUnknown instead.
|
||||
func SetRawExtension(m Message, fnum int32, b []byte) {
|
||||
mr := MessageReflect(m)
|
||||
if mr == nil || !mr.IsValid() {
|
||||
return
|
||||
}
|
||||
|
||||
// Verify that the raw field is valid.
|
||||
for b0 := b; len(b0) > 0; {
|
||||
num, _, n := protowire.ConsumeField(b0)
|
||||
if int32(num) != fnum {
|
||||
panic(fmt.Sprintf("mismatching field number: got %d, want %d", num, fnum))
|
||||
}
|
||||
b0 = b0[n:]
|
||||
}
|
||||
|
||||
ClearExtension(m, &ExtensionDesc{Field: fnum})
|
||||
mr.SetUnknown(append(mr.GetUnknown(), b...))
|
||||
}
|
||||
|
||||
// ExtensionDescs returns a list of extension descriptors found in m,
|
||||
// containing descriptors for both populated extension fields in m and
|
||||
// also unknown fields of m that are in the extension range.
|
||||
// For the later case, an type incomplete descriptor is provided where only
|
||||
// the ExtensionDesc.Field field is populated.
|
||||
// The order of the extension descriptors is undefined.
|
||||
func ExtensionDescs(m Message) ([]*ExtensionDesc, error) {
|
||||
mr := MessageReflect(m)
|
||||
if mr == nil || !mr.IsValid() || mr.Descriptor().ExtensionRanges().Len() == 0 {
|
||||
return nil, errNotExtendable
|
||||
}
|
||||
|
||||
// Collect a set of known extension descriptors.
|
||||
extDescs := make(map[protoreflect.FieldNumber]*ExtensionDesc)
|
||||
mr.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool {
|
||||
if fd.IsExtension() {
|
||||
xt := fd.(protoreflect.ExtensionTypeDescriptor)
|
||||
if xd, ok := xt.Type().(*ExtensionDesc); ok {
|
||||
extDescs[fd.Number()] = xd
|
||||
}
|
||||
}
|
||||
return true
|
||||
})
|
||||
|
||||
// Collect a set of unknown extension descriptors.
|
||||
extRanges := mr.Descriptor().ExtensionRanges()
|
||||
for b := mr.GetUnknown(); len(b) > 0; {
|
||||
num, _, n := protowire.ConsumeField(b)
|
||||
if extRanges.Has(num) && extDescs[num] == nil {
|
||||
extDescs[num] = nil
|
||||
}
|
||||
b = b[n:]
|
||||
}
|
||||
|
||||
// Transpose the set of descriptors into a list.
|
||||
var xts []*ExtensionDesc
|
||||
for num, xt := range extDescs {
|
||||
if xt == nil {
|
||||
xt = &ExtensionDesc{Field: int32(num)}
|
||||
}
|
||||
xts = append(xts, xt)
|
||||
}
|
||||
return xts, nil
|
||||
}
|
||||
|
||||
// isValidExtension reports whether xtd is a valid extension descriptor for md.
|
||||
func isValidExtension(md protoreflect.MessageDescriptor, xtd protoreflect.ExtensionTypeDescriptor) bool {
|
||||
return xtd.ContainingMessage() == md && md.ExtensionRanges().Has(xtd.Number())
|
||||
}
|
||||
|
||||
// isScalarKind reports whether k is a protobuf scalar kind (except bytes).
|
||||
// This function exists for historical reasons since the representation of
|
||||
// scalars differs between v1 and v2, where v1 uses *T and v2 uses T.
|
||||
func isScalarKind(k reflect.Kind) bool {
|
||||
switch k {
|
||||
case reflect.Bool, reflect.Int32, reflect.Int64, reflect.Uint32, reflect.Uint64, reflect.Float32, reflect.Float64, reflect.String:
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// clearUnknown removes unknown fields from m where remover.Has reports true.
|
||||
func clearUnknown(m protoreflect.Message, remover interface {
|
||||
Has(protoreflect.FieldNumber) bool
|
||||
}) {
|
||||
var bo protoreflect.RawFields
|
||||
for bi := m.GetUnknown(); len(bi) > 0; {
|
||||
num, _, n := protowire.ConsumeField(bi)
|
||||
if !remover.Has(num) {
|
||||
bo = append(bo, bi[:n]...)
|
||||
}
|
||||
bi = bi[n:]
|
||||
}
|
||||
if bi := m.GetUnknown(); len(bi) != len(bo) {
|
||||
m.SetUnknown(bo)
|
||||
}
|
||||
}
|
||||
|
||||
type fieldNum protoreflect.FieldNumber
|
||||
|
||||
func (n1 fieldNum) Has(n2 protoreflect.FieldNumber) bool {
|
||||
return protoreflect.FieldNumber(n1) == n2
|
||||
}
|
||||
|
965
vendor/github.com/golang/protobuf/proto/lib.go
generated
vendored
965
vendor/github.com/golang/protobuf/proto/lib.go
generated
vendored
@ -1,965 +0,0 @@
|
||||
// Go support for Protocol Buffers - Google's data interchange format
|
||||
//
|
||||
// Copyright 2010 The Go Authors. All rights reserved.
|
||||
// https://github.com/golang/protobuf
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
/*
|
||||
Package proto converts data structures to and from the wire format of
|
||||
protocol buffers. It works in concert with the Go source code generated
|
||||
for .proto files by the protocol compiler.
|
||||
|
||||
A summary of the properties of the protocol buffer interface
|
||||
for a protocol buffer variable v:
|
||||
|
||||
- Names are turned from camel_case to CamelCase for export.
|
||||
- There are no methods on v to set fields; just treat
|
||||
them as structure fields.
|
||||
- There are getters that return a field's value if set,
|
||||
and return the field's default value if unset.
|
||||
The getters work even if the receiver is a nil message.
|
||||
- The zero value for a struct is its correct initialization state.
|
||||
All desired fields must be set before marshaling.
|
||||
- A Reset() method will restore a protobuf struct to its zero state.
|
||||
- Non-repeated fields are pointers to the values; nil means unset.
|
||||
That is, optional or required field int32 f becomes F *int32.
|
||||
- Repeated fields are slices.
|
||||
- Helper functions are available to aid the setting of fields.
|
||||
msg.Foo = proto.String("hello") // set field
|
||||
- Constants are defined to hold the default values of all fields that
|
||||
have them. They have the form Default_StructName_FieldName.
|
||||
Because the getter methods handle defaulted values,
|
||||
direct use of these constants should be rare.
|
||||
- Enums are given type names and maps from names to values.
|
||||
Enum values are prefixed by the enclosing message's name, or by the
|
||||
enum's type name if it is a top-level enum. Enum types have a String
|
||||
method, and a Enum method to assist in message construction.
|
||||
- Nested messages, groups and enums have type names prefixed with the name of
|
||||
the surrounding message type.
|
||||
- Extensions are given descriptor names that start with E_,
|
||||
followed by an underscore-delimited list of the nested messages
|
||||
that contain it (if any) followed by the CamelCased name of the
|
||||
extension field itself. HasExtension, ClearExtension, GetExtension
|
||||
and SetExtension are functions for manipulating extensions.
|
||||
- Oneof field sets are given a single field in their message,
|
||||
with distinguished wrapper types for each possible field value.
|
||||
- Marshal and Unmarshal are functions to encode and decode the wire format.
|
||||
|
||||
When the .proto file specifies `syntax="proto3"`, there are some differences:
|
||||
|
||||
- Non-repeated fields of non-message type are values instead of pointers.
|
||||
- Enum types do not get an Enum method.
|
||||
|
||||
The simplest way to describe this is to see an example.
|
||||
Given file test.proto, containing
|
||||
|
||||
package example;
|
||||
|
||||
enum FOO { X = 17; }
|
||||
|
||||
message Test {
|
||||
required string label = 1;
|
||||
optional int32 type = 2 [default=77];
|
||||
repeated int64 reps = 3;
|
||||
optional group OptionalGroup = 4 {
|
||||
required string RequiredField = 5;
|
||||
}
|
||||
oneof union {
|
||||
int32 number = 6;
|
||||
string name = 7;
|
||||
}
|
||||
}
|
||||
|
||||
The resulting file, test.pb.go, is:
|
||||
|
||||
package example
|
||||
|
||||
import proto "github.com/golang/protobuf/proto"
|
||||
import math "math"
|
||||
|
||||
type FOO int32
|
||||
const (
|
||||
FOO_X FOO = 17
|
||||
)
|
||||
var FOO_name = map[int32]string{
|
||||
17: "X",
|
||||
}
|
||||
var FOO_value = map[string]int32{
|
||||
"X": 17,
|
||||
}
|
||||
|
||||
func (x FOO) Enum() *FOO {
|
||||
p := new(FOO)
|
||||
*p = x
|
||||
return p
|
||||
}
|
||||
func (x FOO) String() string {
|
||||
return proto.EnumName(FOO_name, int32(x))
|
||||
}
|
||||
func (x *FOO) UnmarshalJSON(data []byte) error {
|
||||
value, err := proto.UnmarshalJSONEnum(FOO_value, data)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*x = FOO(value)
|
||||
return nil
|
||||
}
|
||||
|
||||
type Test struct {
|
||||
Label *string `protobuf:"bytes,1,req,name=label" json:"label,omitempty"`
|
||||
Type *int32 `protobuf:"varint,2,opt,name=type,def=77" json:"type,omitempty"`
|
||||
Reps []int64 `protobuf:"varint,3,rep,name=reps" json:"reps,omitempty"`
|
||||
Optionalgroup *Test_OptionalGroup `protobuf:"group,4,opt,name=OptionalGroup" json:"optionalgroup,omitempty"`
|
||||
// Types that are valid to be assigned to Union:
|
||||
// *Test_Number
|
||||
// *Test_Name
|
||||
Union isTest_Union `protobuf_oneof:"union"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
}
|
||||
func (m *Test) Reset() { *m = Test{} }
|
||||
func (m *Test) String() string { return proto.CompactTextString(m) }
|
||||
func (*Test) ProtoMessage() {}
|
||||
|
||||
type isTest_Union interface {
|
||||
isTest_Union()
|
||||
}
|
||||
|
||||
type Test_Number struct {
|
||||
Number int32 `protobuf:"varint,6,opt,name=number"`
|
||||
}
|
||||
type Test_Name struct {
|
||||
Name string `protobuf:"bytes,7,opt,name=name"`
|
||||
}
|
||||
|
||||
func (*Test_Number) isTest_Union() {}
|
||||
func (*Test_Name) isTest_Union() {}
|
||||
|
||||
func (m *Test) GetUnion() isTest_Union {
|
||||
if m != nil {
|
||||
return m.Union
|
||||
}
|
||||
return nil
|
||||
}
|
||||
const Default_Test_Type int32 = 77
|
||||
|
||||
func (m *Test) GetLabel() string {
|
||||
if m != nil && m.Label != nil {
|
||||
return *m.Label
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *Test) GetType() int32 {
|
||||
if m != nil && m.Type != nil {
|
||||
return *m.Type
|
||||
}
|
||||
return Default_Test_Type
|
||||
}
|
||||
|
||||
func (m *Test) GetOptionalgroup() *Test_OptionalGroup {
|
||||
if m != nil {
|
||||
return m.Optionalgroup
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type Test_OptionalGroup struct {
|
||||
RequiredField *string `protobuf:"bytes,5,req" json:"RequiredField,omitempty"`
|
||||
}
|
||||
func (m *Test_OptionalGroup) Reset() { *m = Test_OptionalGroup{} }
|
||||
func (m *Test_OptionalGroup) String() string { return proto.CompactTextString(m) }
|
||||
|
||||
func (m *Test_OptionalGroup) GetRequiredField() string {
|
||||
if m != nil && m.RequiredField != nil {
|
||||
return *m.RequiredField
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *Test) GetNumber() int32 {
|
||||
if x, ok := m.GetUnion().(*Test_Number); ok {
|
||||
return x.Number
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (m *Test) GetName() string {
|
||||
if x, ok := m.GetUnion().(*Test_Name); ok {
|
||||
return x.Name
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func init() {
|
||||
proto.RegisterEnum("example.FOO", FOO_name, FOO_value)
|
||||
}
|
||||
|
||||
To create and play with a Test object:
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"log"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
pb "./example.pb"
|
||||
)
|
||||
|
||||
func main() {
|
||||
test := &pb.Test{
|
||||
Label: proto.String("hello"),
|
||||
Type: proto.Int32(17),
|
||||
Reps: []int64{1, 2, 3},
|
||||
Optionalgroup: &pb.Test_OptionalGroup{
|
||||
RequiredField: proto.String("good bye"),
|
||||
},
|
||||
Union: &pb.Test_Name{"fred"},
|
||||
}
|
||||
data, err := proto.Marshal(test)
|
||||
if err != nil {
|
||||
log.Fatal("marshaling error: ", err)
|
||||
}
|
||||
newTest := &pb.Test{}
|
||||
err = proto.Unmarshal(data, newTest)
|
||||
if err != nil {
|
||||
log.Fatal("unmarshaling error: ", err)
|
||||
}
|
||||
// Now test and newTest contain the same data.
|
||||
if test.GetLabel() != newTest.GetLabel() {
|
||||
log.Fatalf("data mismatch %q != %q", test.GetLabel(), newTest.GetLabel())
|
||||
}
|
||||
// Use a type switch to determine which oneof was set.
|
||||
switch u := test.Union.(type) {
|
||||
case *pb.Test_Number: // u.Number contains the number.
|
||||
case *pb.Test_Name: // u.Name contains the string.
|
||||
}
|
||||
// etc.
|
||||
}
|
||||
*/
|
||||
package proto
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"log"
|
||||
"reflect"
|
||||
"sort"
|
||||
"strconv"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// RequiredNotSetError is an error type returned by either Marshal or Unmarshal.
|
||||
// Marshal reports this when a required field is not initialized.
|
||||
// Unmarshal reports this when a required field is missing from the wire data.
|
||||
type RequiredNotSetError struct{ field string }
|
||||
|
||||
func (e *RequiredNotSetError) Error() string {
|
||||
if e.field == "" {
|
||||
return fmt.Sprintf("proto: required field not set")
|
||||
}
|
||||
return fmt.Sprintf("proto: required field %q not set", e.field)
|
||||
}
|
||||
func (e *RequiredNotSetError) RequiredNotSet() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
type invalidUTF8Error struct{ field string }
|
||||
|
||||
func (e *invalidUTF8Error) Error() string {
|
||||
if e.field == "" {
|
||||
return "proto: invalid UTF-8 detected"
|
||||
}
|
||||
return fmt.Sprintf("proto: field %q contains invalid UTF-8", e.field)
|
||||
}
|
||||
func (e *invalidUTF8Error) InvalidUTF8() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// errInvalidUTF8 is a sentinel error to identify fields with invalid UTF-8.
|
||||
// This error should not be exposed to the external API as such errors should
|
||||
// be recreated with the field information.
|
||||
var errInvalidUTF8 = &invalidUTF8Error{}
|
||||
|
||||
// isNonFatal reports whether the error is either a RequiredNotSet error
|
||||
// or a InvalidUTF8 error.
|
||||
func isNonFatal(err error) bool {
|
||||
if re, ok := err.(interface{ RequiredNotSet() bool }); ok && re.RequiredNotSet() {
|
||||
return true
|
||||
}
|
||||
if re, ok := err.(interface{ InvalidUTF8() bool }); ok && re.InvalidUTF8() {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
type nonFatal struct{ E error }
|
||||
|
||||
// Merge merges err into nf and reports whether it was successful.
|
||||
// Otherwise it returns false for any fatal non-nil errors.
|
||||
func (nf *nonFatal) Merge(err error) (ok bool) {
|
||||
if err == nil {
|
||||
return true // not an error
|
||||
}
|
||||
if !isNonFatal(err) {
|
||||
return false // fatal error
|
||||
}
|
||||
if nf.E == nil {
|
||||
nf.E = err // store first instance of non-fatal error
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// Message is implemented by generated protocol buffer messages.
|
||||
type Message interface {
|
||||
Reset()
|
||||
String() string
|
||||
ProtoMessage()
|
||||
}
|
||||
|
||||
// A Buffer is a buffer manager for marshaling and unmarshaling
|
||||
// protocol buffers. It may be reused between invocations to
|
||||
// reduce memory usage. It is not necessary to use a Buffer;
|
||||
// the global functions Marshal and Unmarshal create a
|
||||
// temporary Buffer and are fine for most applications.
|
||||
type Buffer struct {
|
||||
buf []byte // encode/decode byte stream
|
||||
index int // read point
|
||||
|
||||
deterministic bool
|
||||
}
|
||||
|
||||
// NewBuffer allocates a new Buffer and initializes its internal data to
|
||||
// the contents of the argument slice.
|
||||
func NewBuffer(e []byte) *Buffer {
|
||||
return &Buffer{buf: e}
|
||||
}
|
||||
|
||||
// Reset resets the Buffer, ready for marshaling a new protocol buffer.
|
||||
func (p *Buffer) Reset() {
|
||||
p.buf = p.buf[0:0] // for reading/writing
|
||||
p.index = 0 // for reading
|
||||
}
|
||||
|
||||
// SetBuf replaces the internal buffer with the slice,
|
||||
// ready for unmarshaling the contents of the slice.
|
||||
func (p *Buffer) SetBuf(s []byte) {
|
||||
p.buf = s
|
||||
p.index = 0
|
||||
}
|
||||
|
||||
// Bytes returns the contents of the Buffer.
|
||||
func (p *Buffer) Bytes() []byte { return p.buf }
|
||||
|
||||
// SetDeterministic sets whether to use deterministic serialization.
|
||||
//
|
||||
// Deterministic serialization guarantees that for a given binary, equal
|
||||
// messages will always be serialized to the same bytes. This implies:
|
||||
//
|
||||
// - Repeated serialization of a message will return the same bytes.
|
||||
// - Different processes of the same binary (which may be executing on
|
||||
// different machines) will serialize equal messages to the same bytes.
|
||||
//
|
||||
// Note that the deterministic serialization is NOT canonical across
|
||||
// languages. It is not guaranteed to remain stable over time. It is unstable
|
||||
// across different builds with schema changes due to unknown fields.
|
||||
// Users who need canonical serialization (e.g., persistent storage in a
|
||||
// canonical form, fingerprinting, etc.) should define their own
|
||||
// canonicalization specification and implement their own serializer rather
|
||||
// than relying on this API.
|
||||
//
|
||||
// If deterministic serialization is requested, map entries will be sorted
|
||||
// by keys in lexographical order. This is an implementation detail and
|
||||
// subject to change.
|
||||
func (p *Buffer) SetDeterministic(deterministic bool) {
|
||||
p.deterministic = deterministic
|
||||
}
|
||||
|
||||
/*
|
||||
* Helper routines for simplifying the creation of optional fields of basic type.
|
||||
*/
|
||||
|
||||
// Bool is a helper routine that allocates a new bool value
|
||||
// to store v and returns a pointer to it.
|
||||
func Bool(v bool) *bool {
|
||||
return &v
|
||||
}
|
||||
|
||||
// Int32 is a helper routine that allocates a new int32 value
|
||||
// to store v and returns a pointer to it.
|
||||
func Int32(v int32) *int32 {
|
||||
return &v
|
||||
}
|
||||
|
||||
// Int is a helper routine that allocates a new int32 value
|
||||
// to store v and returns a pointer to it, but unlike Int32
|
||||
// its argument value is an int.
|
||||
func Int(v int) *int32 {
|
||||
p := new(int32)
|
||||
*p = int32(v)
|
||||
return p
|
||||
}
|
||||
|
||||
// Int64 is a helper routine that allocates a new int64 value
|
||||
// to store v and returns a pointer to it.
|
||||
func Int64(v int64) *int64 {
|
||||
return &v
|
||||
}
|
||||
|
||||
// Float32 is a helper routine that allocates a new float32 value
|
||||
// to store v and returns a pointer to it.
|
||||
func Float32(v float32) *float32 {
|
||||
return &v
|
||||
}
|
||||
|
||||
// Float64 is a helper routine that allocates a new float64 value
|
||||
// to store v and returns a pointer to it.
|
||||
func Float64(v float64) *float64 {
|
||||
return &v
|
||||
}
|
||||
|
||||
// Uint32 is a helper routine that allocates a new uint32 value
|
||||
// to store v and returns a pointer to it.
|
||||
func Uint32(v uint32) *uint32 {
|
||||
return &v
|
||||
}
|
||||
|
||||
// Uint64 is a helper routine that allocates a new uint64 value
|
||||
// to store v and returns a pointer to it.
|
||||
func Uint64(v uint64) *uint64 {
|
||||
return &v
|
||||
}
|
||||
|
||||
// String is a helper routine that allocates a new string value
|
||||
// to store v and returns a pointer to it.
|
||||
func String(v string) *string {
|
||||
return &v
|
||||
}
|
||||
|
||||
// EnumName is a helper function to simplify printing protocol buffer enums
|
||||
// by name. Given an enum map and a value, it returns a useful string.
|
||||
func EnumName(m map[int32]string, v int32) string {
|
||||
s, ok := m[v]
|
||||
if ok {
|
||||
return s
|
||||
}
|
||||
return strconv.Itoa(int(v))
|
||||
}
|
||||
|
||||
// UnmarshalJSONEnum is a helper function to simplify recovering enum int values
|
||||
// from their JSON-encoded representation. Given a map from the enum's symbolic
|
||||
// names to its int values, and a byte buffer containing the JSON-encoded
|
||||
// value, it returns an int32 that can be cast to the enum type by the caller.
|
||||
//
|
||||
// The function can deal with both JSON representations, numeric and symbolic.
|
||||
func UnmarshalJSONEnum(m map[string]int32, data []byte, enumName string) (int32, error) {
|
||||
if data[0] == '"' {
|
||||
// New style: enums are strings.
|
||||
var repr string
|
||||
if err := json.Unmarshal(data, &repr); err != nil {
|
||||
return -1, err
|
||||
}
|
||||
val, ok := m[repr]
|
||||
if !ok {
|
||||
return 0, fmt.Errorf("unrecognized enum %s value %q", enumName, repr)
|
||||
}
|
||||
return val, nil
|
||||
}
|
||||
// Old style: enums are ints.
|
||||
var val int32
|
||||
if err := json.Unmarshal(data, &val); err != nil {
|
||||
return 0, fmt.Errorf("cannot unmarshal %#q into enum %s", data, enumName)
|
||||
}
|
||||
return val, nil
|
||||
}
|
||||
|
||||
// DebugPrint dumps the encoded data in b in a debugging format with a header
|
||||
// including the string s. Used in testing but made available for general debugging.
|
||||
func (p *Buffer) DebugPrint(s string, b []byte) {
|
||||
var u uint64
|
||||
|
||||
obuf := p.buf
|
||||
index := p.index
|
||||
p.buf = b
|
||||
p.index = 0
|
||||
depth := 0
|
||||
|
||||
fmt.Printf("\n--- %s ---\n", s)
|
||||
|
||||
out:
|
||||
for {
|
||||
for i := 0; i < depth; i++ {
|
||||
fmt.Print(" ")
|
||||
}
|
||||
|
||||
index := p.index
|
||||
if index == len(p.buf) {
|
||||
break
|
||||
}
|
||||
|
||||
op, err := p.DecodeVarint()
|
||||
if err != nil {
|
||||
fmt.Printf("%3d: fetching op err %v\n", index, err)
|
||||
break out
|
||||
}
|
||||
tag := op >> 3
|
||||
wire := op & 7
|
||||
|
||||
switch wire {
|
||||
default:
|
||||
fmt.Printf("%3d: t=%3d unknown wire=%d\n",
|
||||
index, tag, wire)
|
||||
break out
|
||||
|
||||
case WireBytes:
|
||||
var r []byte
|
||||
|
||||
r, err = p.DecodeRawBytes(false)
|
||||
if err != nil {
|
||||
break out
|
||||
}
|
||||
fmt.Printf("%3d: t=%3d bytes [%d]", index, tag, len(r))
|
||||
if len(r) <= 6 {
|
||||
for i := 0; i < len(r); i++ {
|
||||
fmt.Printf(" %.2x", r[i])
|
||||
}
|
||||
} else {
|
||||
for i := 0; i < 3; i++ {
|
||||
fmt.Printf(" %.2x", r[i])
|
||||
}
|
||||
fmt.Printf(" ..")
|
||||
for i := len(r) - 3; i < len(r); i++ {
|
||||
fmt.Printf(" %.2x", r[i])
|
||||
}
|
||||
}
|
||||
fmt.Printf("\n")
|
||||
|
||||
case WireFixed32:
|
||||
u, err = p.DecodeFixed32()
|
||||
if err != nil {
|
||||
fmt.Printf("%3d: t=%3d fix32 err %v\n", index, tag, err)
|
||||
break out
|
||||
}
|
||||
fmt.Printf("%3d: t=%3d fix32 %d\n", index, tag, u)
|
||||
|
||||
case WireFixed64:
|
||||
u, err = p.DecodeFixed64()
|
||||
if err != nil {
|
||||
fmt.Printf("%3d: t=%3d fix64 err %v\n", index, tag, err)
|
||||
break out
|
||||
}
|
||||
fmt.Printf("%3d: t=%3d fix64 %d\n", index, tag, u)
|
||||
|
||||
case WireVarint:
|
||||
u, err = p.DecodeVarint()
|
||||
if err != nil {
|
||||
fmt.Printf("%3d: t=%3d varint err %v\n", index, tag, err)
|
||||
break out
|
||||
}
|
||||
fmt.Printf("%3d: t=%3d varint %d\n", index, tag, u)
|
||||
|
||||
case WireStartGroup:
|
||||
fmt.Printf("%3d: t=%3d start\n", index, tag)
|
||||
depth++
|
||||
|
||||
case WireEndGroup:
|
||||
depth--
|
||||
fmt.Printf("%3d: t=%3d end\n", index, tag)
|
||||
}
|
||||
}
|
||||
|
||||
if depth != 0 {
|
||||
fmt.Printf("%3d: start-end not balanced %d\n", p.index, depth)
|
||||
}
|
||||
fmt.Printf("\n")
|
||||
|
||||
p.buf = obuf
|
||||
p.index = index
|
||||
}
|
||||
|
||||
// SetDefaults sets unset protocol buffer fields to their default values.
|
||||
// It only modifies fields that are both unset and have defined defaults.
|
||||
// It recursively sets default values in any non-nil sub-messages.
|
||||
func SetDefaults(pb Message) {
|
||||
setDefaults(reflect.ValueOf(pb), true, false)
|
||||
}
|
||||
|
||||
// v is a pointer to a struct.
|
||||
func setDefaults(v reflect.Value, recur, zeros bool) {
|
||||
v = v.Elem()
|
||||
|
||||
defaultMu.RLock()
|
||||
dm, ok := defaults[v.Type()]
|
||||
defaultMu.RUnlock()
|
||||
if !ok {
|
||||
dm = buildDefaultMessage(v.Type())
|
||||
defaultMu.Lock()
|
||||
defaults[v.Type()] = dm
|
||||
defaultMu.Unlock()
|
||||
}
|
||||
|
||||
for _, sf := range dm.scalars {
|
||||
f := v.Field(sf.index)
|
||||
if !f.IsNil() {
|
||||
// field already set
|
||||
continue
|
||||
}
|
||||
dv := sf.value
|
||||
if dv == nil && !zeros {
|
||||
// no explicit default, and don't want to set zeros
|
||||
continue
|
||||
}
|
||||
fptr := f.Addr().Interface() // **T
|
||||
// TODO: Consider batching the allocations we do here.
|
||||
switch sf.kind {
|
||||
case reflect.Bool:
|
||||
b := new(bool)
|
||||
if dv != nil {
|
||||
*b = dv.(bool)
|
||||
}
|
||||
*(fptr.(**bool)) = b
|
||||
case reflect.Float32:
|
||||
f := new(float32)
|
||||
if dv != nil {
|
||||
*f = dv.(float32)
|
||||
}
|
||||
*(fptr.(**float32)) = f
|
||||
case reflect.Float64:
|
||||
f := new(float64)
|
||||
if dv != nil {
|
||||
*f = dv.(float64)
|
||||
}
|
||||
*(fptr.(**float64)) = f
|
||||
case reflect.Int32:
|
||||
// might be an enum
|
||||
if ft := f.Type(); ft != int32PtrType {
|
||||
// enum
|
||||
f.Set(reflect.New(ft.Elem()))
|
||||
if dv != nil {
|
||||
f.Elem().SetInt(int64(dv.(int32)))
|
||||
}
|
||||
} else {
|
||||
// int32 field
|
||||
i := new(int32)
|
||||
if dv != nil {
|
||||
*i = dv.(int32)
|
||||
}
|
||||
*(fptr.(**int32)) = i
|
||||
}
|
||||
case reflect.Int64:
|
||||
i := new(int64)
|
||||
if dv != nil {
|
||||
*i = dv.(int64)
|
||||
}
|
||||
*(fptr.(**int64)) = i
|
||||
case reflect.String:
|
||||
s := new(string)
|
||||
if dv != nil {
|
||||
*s = dv.(string)
|
||||
}
|
||||
*(fptr.(**string)) = s
|
||||
case reflect.Uint8:
|
||||
// exceptional case: []byte
|
||||
var b []byte
|
||||
if dv != nil {
|
||||
db := dv.([]byte)
|
||||
b = make([]byte, len(db))
|
||||
copy(b, db)
|
||||
} else {
|
||||
b = []byte{}
|
||||
}
|
||||
*(fptr.(*[]byte)) = b
|
||||
case reflect.Uint32:
|
||||
u := new(uint32)
|
||||
if dv != nil {
|
||||
*u = dv.(uint32)
|
||||
}
|
||||
*(fptr.(**uint32)) = u
|
||||
case reflect.Uint64:
|
||||
u := new(uint64)
|
||||
if dv != nil {
|
||||
*u = dv.(uint64)
|
||||
}
|
||||
*(fptr.(**uint64)) = u
|
||||
default:
|
||||
log.Printf("proto: can't set default for field %v (sf.kind=%v)", f, sf.kind)
|
||||
}
|
||||
}
|
||||
|
||||
for _, ni := range dm.nested {
|
||||
f := v.Field(ni)
|
||||
// f is *T or []*T or map[T]*T
|
||||
switch f.Kind() {
|
||||
case reflect.Ptr:
|
||||
if f.IsNil() {
|
||||
continue
|
||||
}
|
||||
setDefaults(f, recur, zeros)
|
||||
|
||||
case reflect.Slice:
|
||||
for i := 0; i < f.Len(); i++ {
|
||||
e := f.Index(i)
|
||||
if e.IsNil() {
|
||||
continue
|
||||
}
|
||||
setDefaults(e, recur, zeros)
|
||||
}
|
||||
|
||||
case reflect.Map:
|
||||
for _, k := range f.MapKeys() {
|
||||
e := f.MapIndex(k)
|
||||
if e.IsNil() {
|
||||
continue
|
||||
}
|
||||
setDefaults(e, recur, zeros)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var (
|
||||
// defaults maps a protocol buffer struct type to a slice of the fields,
|
||||
// with its scalar fields set to their proto-declared non-zero default values.
|
||||
defaultMu sync.RWMutex
|
||||
defaults = make(map[reflect.Type]defaultMessage)
|
||||
|
||||
int32PtrType = reflect.TypeOf((*int32)(nil))
|
||||
)
|
||||
|
||||
// defaultMessage represents information about the default values of a message.
|
||||
type defaultMessage struct {
|
||||
scalars []scalarField
|
||||
nested []int // struct field index of nested messages
|
||||
}
|
||||
|
||||
type scalarField struct {
|
||||
index int // struct field index
|
||||
kind reflect.Kind // element type (the T in *T or []T)
|
||||
value interface{} // the proto-declared default value, or nil
|
||||
}
|
||||
|
||||
// t is a struct type.
|
||||
func buildDefaultMessage(t reflect.Type) (dm defaultMessage) {
|
||||
sprop := GetProperties(t)
|
||||
for _, prop := range sprop.Prop {
|
||||
fi, ok := sprop.decoderTags.get(prop.Tag)
|
||||
if !ok {
|
||||
// XXX_unrecognized
|
||||
continue
|
||||
}
|
||||
ft := t.Field(fi).Type
|
||||
|
||||
sf, nested, err := fieldDefault(ft, prop)
|
||||
switch {
|
||||
case err != nil:
|
||||
log.Print(err)
|
||||
case nested:
|
||||
dm.nested = append(dm.nested, fi)
|
||||
case sf != nil:
|
||||
sf.index = fi
|
||||
dm.scalars = append(dm.scalars, *sf)
|
||||
}
|
||||
}
|
||||
|
||||
return dm
|
||||
}
|
||||
|
||||
// fieldDefault returns the scalarField for field type ft.
|
||||
// sf will be nil if the field can not have a default.
|
||||
// nestedMessage will be true if this is a nested message.
|
||||
// Note that sf.index is not set on return.
|
||||
func fieldDefault(ft reflect.Type, prop *Properties) (sf *scalarField, nestedMessage bool, err error) {
|
||||
var canHaveDefault bool
|
||||
switch ft.Kind() {
|
||||
case reflect.Ptr:
|
||||
if ft.Elem().Kind() == reflect.Struct {
|
||||
nestedMessage = true
|
||||
} else {
|
||||
canHaveDefault = true // proto2 scalar field
|
||||
}
|
||||
|
||||
case reflect.Slice:
|
||||
switch ft.Elem().Kind() {
|
||||
case reflect.Ptr:
|
||||
nestedMessage = true // repeated message
|
||||
case reflect.Uint8:
|
||||
canHaveDefault = true // bytes field
|
||||
}
|
||||
|
||||
case reflect.Map:
|
||||
if ft.Elem().Kind() == reflect.Ptr {
|
||||
nestedMessage = true // map with message values
|
||||
}
|
||||
}
|
||||
|
||||
if !canHaveDefault {
|
||||
if nestedMessage {
|
||||
return nil, true, nil
|
||||
}
|
||||
return nil, false, nil
|
||||
}
|
||||
|
||||
// We now know that ft is a pointer or slice.
|
||||
sf = &scalarField{kind: ft.Elem().Kind()}
|
||||
|
||||
// scalar fields without defaults
|
||||
if !prop.HasDefault {
|
||||
return sf, false, nil
|
||||
}
|
||||
|
||||
// a scalar field: either *T or []byte
|
||||
switch ft.Elem().Kind() {
|
||||
case reflect.Bool:
|
||||
x, err := strconv.ParseBool(prop.Default)
|
||||
if err != nil {
|
||||
return nil, false, fmt.Errorf("proto: bad default bool %q: %v", prop.Default, err)
|
||||
}
|
||||
sf.value = x
|
||||
case reflect.Float32:
|
||||
x, err := strconv.ParseFloat(prop.Default, 32)
|
||||
if err != nil {
|
||||
return nil, false, fmt.Errorf("proto: bad default float32 %q: %v", prop.Default, err)
|
||||
}
|
||||
sf.value = float32(x)
|
||||
case reflect.Float64:
|
||||
x, err := strconv.ParseFloat(prop.Default, 64)
|
||||
if err != nil {
|
||||
return nil, false, fmt.Errorf("proto: bad default float64 %q: %v", prop.Default, err)
|
||||
}
|
||||
sf.value = x
|
||||
case reflect.Int32:
|
||||
x, err := strconv.ParseInt(prop.Default, 10, 32)
|
||||
if err != nil {
|
||||
return nil, false, fmt.Errorf("proto: bad default int32 %q: %v", prop.Default, err)
|
||||
}
|
||||
sf.value = int32(x)
|
||||
case reflect.Int64:
|
||||
x, err := strconv.ParseInt(prop.Default, 10, 64)
|
||||
if err != nil {
|
||||
return nil, false, fmt.Errorf("proto: bad default int64 %q: %v", prop.Default, err)
|
||||
}
|
||||
sf.value = x
|
||||
case reflect.String:
|
||||
sf.value = prop.Default
|
||||
case reflect.Uint8:
|
||||
// []byte (not *uint8)
|
||||
sf.value = []byte(prop.Default)
|
||||
case reflect.Uint32:
|
||||
x, err := strconv.ParseUint(prop.Default, 10, 32)
|
||||
if err != nil {
|
||||
return nil, false, fmt.Errorf("proto: bad default uint32 %q: %v", prop.Default, err)
|
||||
}
|
||||
sf.value = uint32(x)
|
||||
case reflect.Uint64:
|
||||
x, err := strconv.ParseUint(prop.Default, 10, 64)
|
||||
if err != nil {
|
||||
return nil, false, fmt.Errorf("proto: bad default uint64 %q: %v", prop.Default, err)
|
||||
}
|
||||
sf.value = x
|
||||
default:
|
||||
return nil, false, fmt.Errorf("proto: unhandled def kind %v", ft.Elem().Kind())
|
||||
}
|
||||
|
||||
return sf, false, nil
|
||||
}
|
||||
|
||||
// mapKeys returns a sort.Interface to be used for sorting the map keys.
|
||||
// Map fields may have key types of non-float scalars, strings and enums.
|
||||
func mapKeys(vs []reflect.Value) sort.Interface {
|
||||
s := mapKeySorter{vs: vs}
|
||||
|
||||
// Type specialization per https://developers.google.com/protocol-buffers/docs/proto#maps.
|
||||
if len(vs) == 0 {
|
||||
return s
|
||||
}
|
||||
switch vs[0].Kind() {
|
||||
case reflect.Int32, reflect.Int64:
|
||||
s.less = func(a, b reflect.Value) bool { return a.Int() < b.Int() }
|
||||
case reflect.Uint32, reflect.Uint64:
|
||||
s.less = func(a, b reflect.Value) bool { return a.Uint() < b.Uint() }
|
||||
case reflect.Bool:
|
||||
s.less = func(a, b reflect.Value) bool { return !a.Bool() && b.Bool() } // false < true
|
||||
case reflect.String:
|
||||
s.less = func(a, b reflect.Value) bool { return a.String() < b.String() }
|
||||
default:
|
||||
panic(fmt.Sprintf("unsupported map key type: %v", vs[0].Kind()))
|
||||
}
|
||||
|
||||
return s
|
||||
}
|
||||
|
||||
type mapKeySorter struct {
|
||||
vs []reflect.Value
|
||||
less func(a, b reflect.Value) bool
|
||||
}
|
||||
|
||||
func (s mapKeySorter) Len() int { return len(s.vs) }
|
||||
func (s mapKeySorter) Swap(i, j int) { s.vs[i], s.vs[j] = s.vs[j], s.vs[i] }
|
||||
func (s mapKeySorter) Less(i, j int) bool {
|
||||
return s.less(s.vs[i], s.vs[j])
|
||||
}
|
||||
|
||||
// isProto3Zero reports whether v is a zero proto3 value.
|
||||
func isProto3Zero(v reflect.Value) bool {
|
||||
switch v.Kind() {
|
||||
case reflect.Bool:
|
||||
return !v.Bool()
|
||||
case reflect.Int32, reflect.Int64:
|
||||
return v.Int() == 0
|
||||
case reflect.Uint32, reflect.Uint64:
|
||||
return v.Uint() == 0
|
||||
case reflect.Float32, reflect.Float64:
|
||||
return v.Float() == 0
|
||||
case reflect.String:
|
||||
return v.String() == ""
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
const (
|
||||
// ProtoPackageIsVersion3 is referenced from generated protocol buffer files
|
||||
// to assert that that code is compatible with this version of the proto package.
|
||||
ProtoPackageIsVersion3 = true
|
||||
|
||||
// ProtoPackageIsVersion2 is referenced from generated protocol buffer files
|
||||
// to assert that that code is compatible with this version of the proto package.
|
||||
ProtoPackageIsVersion2 = true
|
||||
|
||||
// ProtoPackageIsVersion1 is referenced from generated protocol buffer files
|
||||
// to assert that that code is compatible with this version of the proto package.
|
||||
ProtoPackageIsVersion1 = true
|
||||
)
|
||||
|
||||
// InternalMessageInfo is a type used internally by generated .pb.go files.
|
||||
// This type is not intended to be used by non-generated code.
|
||||
// This type is not subject to any compatibility guarantee.
|
||||
type InternalMessageInfo struct {
|
||||
marshal *marshalInfo
|
||||
unmarshal *unmarshalInfo
|
||||
merge *mergeInfo
|
||||
discard *discardInfo
|
||||
}
|
181
vendor/github.com/golang/protobuf/proto/message_set.go
generated
vendored
181
vendor/github.com/golang/protobuf/proto/message_set.go
generated
vendored
@ -1,181 +0,0 @@
|
||||
// Go support for Protocol Buffers - Google's data interchange format
|
||||
//
|
||||
// Copyright 2010 The Go Authors. All rights reserved.
|
||||
// https://github.com/golang/protobuf
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
package proto
|
||||
|
||||
/*
|
||||
* Support for message sets.
|
||||
*/
|
||||
|
||||
import (
|
||||
"errors"
|
||||
)
|
||||
|
||||
// errNoMessageTypeID occurs when a protocol buffer does not have a message type ID.
|
||||
// A message type ID is required for storing a protocol buffer in a message set.
|
||||
var errNoMessageTypeID = errors.New("proto does not have a message type ID")
|
||||
|
||||
// The first two types (_MessageSet_Item and messageSet)
|
||||
// model what the protocol compiler produces for the following protocol message:
|
||||
// message MessageSet {
|
||||
// repeated group Item = 1 {
|
||||
// required int32 type_id = 2;
|
||||
// required string message = 3;
|
||||
// };
|
||||
// }
|
||||
// That is the MessageSet wire format. We can't use a proto to generate these
|
||||
// because that would introduce a circular dependency between it and this package.
|
||||
|
||||
type _MessageSet_Item struct {
|
||||
TypeId *int32 `protobuf:"varint,2,req,name=type_id"`
|
||||
Message []byte `protobuf:"bytes,3,req,name=message"`
|
||||
}
|
||||
|
||||
type messageSet struct {
|
||||
Item []*_MessageSet_Item `protobuf:"group,1,rep"`
|
||||
XXX_unrecognized []byte
|
||||
// TODO: caching?
|
||||
}
|
||||
|
||||
// Make sure messageSet is a Message.
|
||||
var _ Message = (*messageSet)(nil)
|
||||
|
||||
// messageTypeIder is an interface satisfied by a protocol buffer type
|
||||
// that may be stored in a MessageSet.
|
||||
type messageTypeIder interface {
|
||||
MessageTypeId() int32
|
||||
}
|
||||
|
||||
func (ms *messageSet) find(pb Message) *_MessageSet_Item {
|
||||
mti, ok := pb.(messageTypeIder)
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
id := mti.MessageTypeId()
|
||||
for _, item := range ms.Item {
|
||||
if *item.TypeId == id {
|
||||
return item
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ms *messageSet) Has(pb Message) bool {
|
||||
return ms.find(pb) != nil
|
||||
}
|
||||
|
||||
func (ms *messageSet) Unmarshal(pb Message) error {
|
||||
if item := ms.find(pb); item != nil {
|
||||
return Unmarshal(item.Message, pb)
|
||||
}
|
||||
if _, ok := pb.(messageTypeIder); !ok {
|
||||
return errNoMessageTypeID
|
||||
}
|
||||
return nil // TODO: return error instead?
|
||||
}
|
||||
|
||||
func (ms *messageSet) Marshal(pb Message) error {
|
||||
msg, err := Marshal(pb)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if item := ms.find(pb); item != nil {
|
||||
// reuse existing item
|
||||
item.Message = msg
|
||||
return nil
|
||||
}
|
||||
|
||||
mti, ok := pb.(messageTypeIder)
|
||||
if !ok {
|
||||
return errNoMessageTypeID
|
||||
}
|
||||
|
||||
mtid := mti.MessageTypeId()
|
||||
ms.Item = append(ms.Item, &_MessageSet_Item{
|
||||
TypeId: &mtid,
|
||||
Message: msg,
|
||||
})
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ms *messageSet) Reset() { *ms = messageSet{} }
|
||||
func (ms *messageSet) String() string { return CompactTextString(ms) }
|
||||
func (*messageSet) ProtoMessage() {}
|
||||
|
||||
// Support for the message_set_wire_format message option.
|
||||
|
||||
func skipVarint(buf []byte) []byte {
|
||||
i := 0
|
||||
for ; buf[i]&0x80 != 0; i++ {
|
||||
}
|
||||
return buf[i+1:]
|
||||
}
|
||||
|
||||
// unmarshalMessageSet decodes the extension map encoded in buf in the message set wire format.
|
||||
// It is called by Unmarshal methods on protocol buffer messages with the message_set_wire_format option.
|
||||
func unmarshalMessageSet(buf []byte, exts interface{}) error {
|
||||
var m map[int32]Extension
|
||||
switch exts := exts.(type) {
|
||||
case *XXX_InternalExtensions:
|
||||
m = exts.extensionsWrite()
|
||||
case map[int32]Extension:
|
||||
m = exts
|
||||
default:
|
||||
return errors.New("proto: not an extension map")
|
||||
}
|
||||
|
||||
ms := new(messageSet)
|
||||
if err := Unmarshal(buf, ms); err != nil {
|
||||
return err
|
||||
}
|
||||
for _, item := range ms.Item {
|
||||
id := *item.TypeId
|
||||
msg := item.Message
|
||||
|
||||
// Restore wire type and field number varint, plus length varint.
|
||||
// Be careful to preserve duplicate items.
|
||||
b := EncodeVarint(uint64(id)<<3 | WireBytes)
|
||||
if ext, ok := m[id]; ok {
|
||||
// Existing data; rip off the tag and length varint
|
||||
// so we join the new data correctly.
|
||||
// We can assume that ext.enc is set because we are unmarshaling.
|
||||
o := ext.enc[len(b):] // skip wire type and field number
|
||||
_, n := DecodeVarint(o) // calculate length of length varint
|
||||
o = o[n:] // skip length varint
|
||||
msg = append(o, msg...) // join old data and new data
|
||||
}
|
||||
b = append(b, EncodeVarint(uint64(len(msg)))...)
|
||||
b = append(b, msg...)
|
||||
|
||||
m[id] = Extension{enc: b}
|
||||
}
|
||||
return nil
|
||||
}
|
360
vendor/github.com/golang/protobuf/proto/pointer_reflect.go
generated
vendored
360
vendor/github.com/golang/protobuf/proto/pointer_reflect.go
generated
vendored
@ -1,360 +0,0 @@
|
||||
// Go support for Protocol Buffers - Google's data interchange format
|
||||
//
|
||||
// Copyright 2012 The Go Authors. All rights reserved.
|
||||
// https://github.com/golang/protobuf
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
// +build purego appengine js
|
||||
|
||||
// This file contains an implementation of proto field accesses using package reflect.
|
||||
// It is slower than the code in pointer_unsafe.go but it avoids package unsafe and can
|
||||
// be used on App Engine.
|
||||
|
||||
package proto
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"sync"
|
||||
)
|
||||
|
||||
const unsafeAllowed = false
|
||||
|
||||
// A field identifies a field in a struct, accessible from a pointer.
|
||||
// In this implementation, a field is identified by the sequence of field indices
|
||||
// passed to reflect's FieldByIndex.
|
||||
type field []int
|
||||
|
||||
// toField returns a field equivalent to the given reflect field.
|
||||
func toField(f *reflect.StructField) field {
|
||||
return f.Index
|
||||
}
|
||||
|
||||
// invalidField is an invalid field identifier.
|
||||
var invalidField = field(nil)
|
||||
|
||||
// zeroField is a noop when calling pointer.offset.
|
||||
var zeroField = field([]int{})
|
||||
|
||||
// IsValid reports whether the field identifier is valid.
|
||||
func (f field) IsValid() bool { return f != nil }
|
||||
|
||||
// The pointer type is for the table-driven decoder.
|
||||
// The implementation here uses a reflect.Value of pointer type to
|
||||
// create a generic pointer. In pointer_unsafe.go we use unsafe
|
||||
// instead of reflect to implement the same (but faster) interface.
|
||||
type pointer struct {
|
||||
v reflect.Value
|
||||
}
|
||||
|
||||
// toPointer converts an interface of pointer type to a pointer
|
||||
// that points to the same target.
|
||||
func toPointer(i *Message) pointer {
|
||||
return pointer{v: reflect.ValueOf(*i)}
|
||||
}
|
||||
|
||||
// toAddrPointer converts an interface to a pointer that points to
|
||||
// the interface data.
|
||||
func toAddrPointer(i *interface{}, isptr, deref bool) pointer {
|
||||
v := reflect.ValueOf(*i)
|
||||
u := reflect.New(v.Type())
|
||||
u.Elem().Set(v)
|
||||
if deref {
|
||||
u = u.Elem()
|
||||
}
|
||||
return pointer{v: u}
|
||||
}
|
||||
|
||||
// valToPointer converts v to a pointer. v must be of pointer type.
|
||||
func valToPointer(v reflect.Value) pointer {
|
||||
return pointer{v: v}
|
||||
}
|
||||
|
||||
// offset converts from a pointer to a structure to a pointer to
|
||||
// one of its fields.
|
||||
func (p pointer) offset(f field) pointer {
|
||||
return pointer{v: p.v.Elem().FieldByIndex(f).Addr()}
|
||||
}
|
||||
|
||||
func (p pointer) isNil() bool {
|
||||
return p.v.IsNil()
|
||||
}
|
||||
|
||||
// grow updates the slice s in place to make it one element longer.
|
||||
// s must be addressable.
|
||||
// Returns the (addressable) new element.
|
||||
func grow(s reflect.Value) reflect.Value {
|
||||
n, m := s.Len(), s.Cap()
|
||||
if n < m {
|
||||
s.SetLen(n + 1)
|
||||
} else {
|
||||
s.Set(reflect.Append(s, reflect.Zero(s.Type().Elem())))
|
||||
}
|
||||
return s.Index(n)
|
||||
}
|
||||
|
||||
func (p pointer) toInt64() *int64 {
|
||||
return p.v.Interface().(*int64)
|
||||
}
|
||||
func (p pointer) toInt64Ptr() **int64 {
|
||||
return p.v.Interface().(**int64)
|
||||
}
|
||||
func (p pointer) toInt64Slice() *[]int64 {
|
||||
return p.v.Interface().(*[]int64)
|
||||
}
|
||||
|
||||
var int32ptr = reflect.TypeOf((*int32)(nil))
|
||||
|
||||
func (p pointer) toInt32() *int32 {
|
||||
return p.v.Convert(int32ptr).Interface().(*int32)
|
||||
}
|
||||
|
||||
// The toInt32Ptr/Slice methods don't work because of enums.
|
||||
// Instead, we must use set/get methods for the int32ptr/slice case.
|
||||
/*
|
||||
func (p pointer) toInt32Ptr() **int32 {
|
||||
return p.v.Interface().(**int32)
|
||||
}
|
||||
func (p pointer) toInt32Slice() *[]int32 {
|
||||
return p.v.Interface().(*[]int32)
|
||||
}
|
||||
*/
|
||||
func (p pointer) getInt32Ptr() *int32 {
|
||||
if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) {
|
||||
// raw int32 type
|
||||
return p.v.Elem().Interface().(*int32)
|
||||
}
|
||||
// an enum
|
||||
return p.v.Elem().Convert(int32PtrType).Interface().(*int32)
|
||||
}
|
||||
func (p pointer) setInt32Ptr(v int32) {
|
||||
// Allocate value in a *int32. Possibly convert that to a *enum.
|
||||
// Then assign it to a **int32 or **enum.
|
||||
// Note: we can convert *int32 to *enum, but we can't convert
|
||||
// **int32 to **enum!
|
||||
p.v.Elem().Set(reflect.ValueOf(&v).Convert(p.v.Type().Elem()))
|
||||
}
|
||||
|
||||
// getInt32Slice copies []int32 from p as a new slice.
|
||||
// This behavior differs from the implementation in pointer_unsafe.go.
|
||||
func (p pointer) getInt32Slice() []int32 {
|
||||
if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) {
|
||||
// raw int32 type
|
||||
return p.v.Elem().Interface().([]int32)
|
||||
}
|
||||
// an enum
|
||||
// Allocate a []int32, then assign []enum's values into it.
|
||||
// Note: we can't convert []enum to []int32.
|
||||
slice := p.v.Elem()
|
||||
s := make([]int32, slice.Len())
|
||||
for i := 0; i < slice.Len(); i++ {
|
||||
s[i] = int32(slice.Index(i).Int())
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// setInt32Slice copies []int32 into p as a new slice.
|
||||
// This behavior differs from the implementation in pointer_unsafe.go.
|
||||
func (p pointer) setInt32Slice(v []int32) {
|
||||
if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) {
|
||||
// raw int32 type
|
||||
p.v.Elem().Set(reflect.ValueOf(v))
|
||||
return
|
||||
}
|
||||
// an enum
|
||||
// Allocate a []enum, then assign []int32's values into it.
|
||||
// Note: we can't convert []enum to []int32.
|
||||
slice := reflect.MakeSlice(p.v.Type().Elem(), len(v), cap(v))
|
||||
for i, x := range v {
|
||||
slice.Index(i).SetInt(int64(x))
|
||||
}
|
||||
p.v.Elem().Set(slice)
|
||||
}
|
||||
func (p pointer) appendInt32Slice(v int32) {
|
||||
grow(p.v.Elem()).SetInt(int64(v))
|
||||
}
|
||||
|
||||
func (p pointer) toUint64() *uint64 {
|
||||
return p.v.Interface().(*uint64)
|
||||
}
|
||||
func (p pointer) toUint64Ptr() **uint64 {
|
||||
return p.v.Interface().(**uint64)
|
||||
}
|
||||
func (p pointer) toUint64Slice() *[]uint64 {
|
||||
return p.v.Interface().(*[]uint64)
|
||||
}
|
||||
func (p pointer) toUint32() *uint32 {
|
||||
return p.v.Interface().(*uint32)
|
||||
}
|
||||
func (p pointer) toUint32Ptr() **uint32 {
|
||||
return p.v.Interface().(**uint32)
|
||||
}
|
||||
func (p pointer) toUint32Slice() *[]uint32 {
|
||||
return p.v.Interface().(*[]uint32)
|
||||
}
|
||||
func (p pointer) toBool() *bool {
|
||||
return p.v.Interface().(*bool)
|
||||
}
|
||||
func (p pointer) toBoolPtr() **bool {
|
||||
return p.v.Interface().(**bool)
|
||||
}
|
||||
func (p pointer) toBoolSlice() *[]bool {
|
||||
return p.v.Interface().(*[]bool)
|
||||
}
|
||||
func (p pointer) toFloat64() *float64 {
|
||||
return p.v.Interface().(*float64)
|
||||
}
|
||||
func (p pointer) toFloat64Ptr() **float64 {
|
||||
return p.v.Interface().(**float64)
|
||||
}
|
||||
func (p pointer) toFloat64Slice() *[]float64 {
|
||||
return p.v.Interface().(*[]float64)
|
||||
}
|
||||
func (p pointer) toFloat32() *float32 {
|
||||
return p.v.Interface().(*float32)
|
||||
}
|
||||
func (p pointer) toFloat32Ptr() **float32 {
|
||||
return p.v.Interface().(**float32)
|
||||
}
|
||||
func (p pointer) toFloat32Slice() *[]float32 {
|
||||
return p.v.Interface().(*[]float32)
|
||||
}
|
||||
func (p pointer) toString() *string {
|
||||
return p.v.Interface().(*string)
|
||||
}
|
||||
func (p pointer) toStringPtr() **string {
|
||||
return p.v.Interface().(**string)
|
||||
}
|
||||
func (p pointer) toStringSlice() *[]string {
|
||||
return p.v.Interface().(*[]string)
|
||||
}
|
||||
func (p pointer) toBytes() *[]byte {
|
||||
return p.v.Interface().(*[]byte)
|
||||
}
|
||||
func (p pointer) toBytesSlice() *[][]byte {
|
||||
return p.v.Interface().(*[][]byte)
|
||||
}
|
||||
func (p pointer) toExtensions() *XXX_InternalExtensions {
|
||||
return p.v.Interface().(*XXX_InternalExtensions)
|
||||
}
|
||||
func (p pointer) toOldExtensions() *map[int32]Extension {
|
||||
return p.v.Interface().(*map[int32]Extension)
|
||||
}
|
||||
func (p pointer) getPointer() pointer {
|
||||
return pointer{v: p.v.Elem()}
|
||||
}
|
||||
func (p pointer) setPointer(q pointer) {
|
||||
p.v.Elem().Set(q.v)
|
||||
}
|
||||
func (p pointer) appendPointer(q pointer) {
|
||||
grow(p.v.Elem()).Set(q.v)
|
||||
}
|
||||
|
||||
// getPointerSlice copies []*T from p as a new []pointer.
|
||||
// This behavior differs from the implementation in pointer_unsafe.go.
|
||||
func (p pointer) getPointerSlice() []pointer {
|
||||
if p.v.IsNil() {
|
||||
return nil
|
||||
}
|
||||
n := p.v.Elem().Len()
|
||||
s := make([]pointer, n)
|
||||
for i := 0; i < n; i++ {
|
||||
s[i] = pointer{v: p.v.Elem().Index(i)}
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// setPointerSlice copies []pointer into p as a new []*T.
|
||||
// This behavior differs from the implementation in pointer_unsafe.go.
|
||||
func (p pointer) setPointerSlice(v []pointer) {
|
||||
if v == nil {
|
||||
p.v.Elem().Set(reflect.New(p.v.Elem().Type()).Elem())
|
||||
return
|
||||
}
|
||||
s := reflect.MakeSlice(p.v.Elem().Type(), 0, len(v))
|
||||
for _, p := range v {
|
||||
s = reflect.Append(s, p.v)
|
||||
}
|
||||
p.v.Elem().Set(s)
|
||||
}
|
||||
|
||||
// getInterfacePointer returns a pointer that points to the
|
||||
// interface data of the interface pointed by p.
|
||||
func (p pointer) getInterfacePointer() pointer {
|
||||
if p.v.Elem().IsNil() {
|
||||
return pointer{v: p.v.Elem()}
|
||||
}
|
||||
return pointer{v: p.v.Elem().Elem().Elem().Field(0).Addr()} // *interface -> interface -> *struct -> struct
|
||||
}
|
||||
|
||||
func (p pointer) asPointerTo(t reflect.Type) reflect.Value {
|
||||
// TODO: check that p.v.Type().Elem() == t?
|
||||
return p.v
|
||||
}
|
||||
|
||||
func atomicLoadUnmarshalInfo(p **unmarshalInfo) *unmarshalInfo {
|
||||
atomicLock.Lock()
|
||||
defer atomicLock.Unlock()
|
||||
return *p
|
||||
}
|
||||
func atomicStoreUnmarshalInfo(p **unmarshalInfo, v *unmarshalInfo) {
|
||||
atomicLock.Lock()
|
||||
defer atomicLock.Unlock()
|
||||
*p = v
|
||||
}
|
||||
func atomicLoadMarshalInfo(p **marshalInfo) *marshalInfo {
|
||||
atomicLock.Lock()
|
||||
defer atomicLock.Unlock()
|
||||
return *p
|
||||
}
|
||||
func atomicStoreMarshalInfo(p **marshalInfo, v *marshalInfo) {
|
||||
atomicLock.Lock()
|
||||
defer atomicLock.Unlock()
|
||||
*p = v
|
||||
}
|
||||
func atomicLoadMergeInfo(p **mergeInfo) *mergeInfo {
|
||||
atomicLock.Lock()
|
||||
defer atomicLock.Unlock()
|
||||
return *p
|
||||
}
|
||||
func atomicStoreMergeInfo(p **mergeInfo, v *mergeInfo) {
|
||||
atomicLock.Lock()
|
||||
defer atomicLock.Unlock()
|
||||
*p = v
|
||||
}
|
||||
func atomicLoadDiscardInfo(p **discardInfo) *discardInfo {
|
||||
atomicLock.Lock()
|
||||
defer atomicLock.Unlock()
|
||||
return *p
|
||||
}
|
||||
func atomicStoreDiscardInfo(p **discardInfo, v *discardInfo) {
|
||||
atomicLock.Lock()
|
||||
defer atomicLock.Unlock()
|
||||
*p = v
|
||||
}
|
||||
|
||||
var atomicLock sync.Mutex
|
313
vendor/github.com/golang/protobuf/proto/pointer_unsafe.go
generated
vendored
313
vendor/github.com/golang/protobuf/proto/pointer_unsafe.go
generated
vendored
@ -1,313 +0,0 @@
|
||||
// Go support for Protocol Buffers - Google's data interchange format
|
||||
//
|
||||
// Copyright 2012 The Go Authors. All rights reserved.
|
||||
// https://github.com/golang/protobuf
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
// +build !purego,!appengine,!js
|
||||
|
||||
// This file contains the implementation of the proto field accesses using package unsafe.
|
||||
|
||||
package proto
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"sync/atomic"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
const unsafeAllowed = true
|
||||
|
||||
// A field identifies a field in a struct, accessible from a pointer.
|
||||
// In this implementation, a field is identified by its byte offset from the start of the struct.
|
||||
type field uintptr
|
||||
|
||||
// toField returns a field equivalent to the given reflect field.
|
||||
func toField(f *reflect.StructField) field {
|
||||
return field(f.Offset)
|
||||
}
|
||||
|
||||
// invalidField is an invalid field identifier.
|
||||
const invalidField = ^field(0)
|
||||
|
||||
// zeroField is a noop when calling pointer.offset.
|
||||
const zeroField = field(0)
|
||||
|
||||
// IsValid reports whether the field identifier is valid.
|
||||
func (f field) IsValid() bool {
|
||||
return f != invalidField
|
||||
}
|
||||
|
||||
// The pointer type below is for the new table-driven encoder/decoder.
|
||||
// The implementation here uses unsafe.Pointer to create a generic pointer.
|
||||
// In pointer_reflect.go we use reflect instead of unsafe to implement
|
||||
// the same (but slower) interface.
|
||||
type pointer struct {
|
||||
p unsafe.Pointer
|
||||
}
|
||||
|
||||
// size of pointer
|
||||
var ptrSize = unsafe.Sizeof(uintptr(0))
|
||||
|
||||
// toPointer converts an interface of pointer type to a pointer
|
||||
// that points to the same target.
|
||||
func toPointer(i *Message) pointer {
|
||||
// Super-tricky - read pointer out of data word of interface value.
|
||||
// Saves ~25ns over the equivalent:
|
||||
// return valToPointer(reflect.ValueOf(*i))
|
||||
return pointer{p: (*[2]unsafe.Pointer)(unsafe.Pointer(i))[1]}
|
||||
}
|
||||
|
||||
// toAddrPointer converts an interface to a pointer that points to
|
||||
// the interface data.
|
||||
func toAddrPointer(i *interface{}, isptr, deref bool) (p pointer) {
|
||||
// Super-tricky - read or get the address of data word of interface value.
|
||||
if isptr {
|
||||
// The interface is of pointer type, thus it is a direct interface.
|
||||
// The data word is the pointer data itself. We take its address.
|
||||
p = pointer{p: unsafe.Pointer(uintptr(unsafe.Pointer(i)) + ptrSize)}
|
||||
} else {
|
||||
// The interface is not of pointer type. The data word is the pointer
|
||||
// to the data.
|
||||
p = pointer{p: (*[2]unsafe.Pointer)(unsafe.Pointer(i))[1]}
|
||||
}
|
||||
if deref {
|
||||
p.p = *(*unsafe.Pointer)(p.p)
|
||||
}
|
||||
return p
|
||||
}
|
||||
|
||||
// valToPointer converts v to a pointer. v must be of pointer type.
|
||||
func valToPointer(v reflect.Value) pointer {
|
||||
return pointer{p: unsafe.Pointer(v.Pointer())}
|
||||
}
|
||||
|
||||
// offset converts from a pointer to a structure to a pointer to
|
||||
// one of its fields.
|
||||
func (p pointer) offset(f field) pointer {
|
||||
// For safety, we should panic if !f.IsValid, however calling panic causes
|
||||
// this to no longer be inlineable, which is a serious performance cost.
|
||||
/*
|
||||
if !f.IsValid() {
|
||||
panic("invalid field")
|
||||
}
|
||||
*/
|
||||
return pointer{p: unsafe.Pointer(uintptr(p.p) + uintptr(f))}
|
||||
}
|
||||
|
||||
func (p pointer) isNil() bool {
|
||||
return p.p == nil
|
||||
}
|
||||
|
||||
func (p pointer) toInt64() *int64 {
|
||||
return (*int64)(p.p)
|
||||
}
|
||||
func (p pointer) toInt64Ptr() **int64 {
|
||||
return (**int64)(p.p)
|
||||
}
|
||||
func (p pointer) toInt64Slice() *[]int64 {
|
||||
return (*[]int64)(p.p)
|
||||
}
|
||||
func (p pointer) toInt32() *int32 {
|
||||
return (*int32)(p.p)
|
||||
}
|
||||
|
||||
// See pointer_reflect.go for why toInt32Ptr/Slice doesn't exist.
|
||||
/*
|
||||
func (p pointer) toInt32Ptr() **int32 {
|
||||
return (**int32)(p.p)
|
||||
}
|
||||
func (p pointer) toInt32Slice() *[]int32 {
|
||||
return (*[]int32)(p.p)
|
||||
}
|
||||
*/
|
||||
func (p pointer) getInt32Ptr() *int32 {
|
||||
return *(**int32)(p.p)
|
||||
}
|
||||
func (p pointer) setInt32Ptr(v int32) {
|
||||
*(**int32)(p.p) = &v
|
||||
}
|
||||
|
||||
// getInt32Slice loads a []int32 from p.
|
||||
// The value returned is aliased with the original slice.
|
||||
// This behavior differs from the implementation in pointer_reflect.go.
|
||||
func (p pointer) getInt32Slice() []int32 {
|
||||
return *(*[]int32)(p.p)
|
||||
}
|
||||
|
||||
// setInt32Slice stores a []int32 to p.
|
||||
// The value set is aliased with the input slice.
|
||||
// This behavior differs from the implementation in pointer_reflect.go.
|
||||
func (p pointer) setInt32Slice(v []int32) {
|
||||
*(*[]int32)(p.p) = v
|
||||
}
|
||||
|
||||
// TODO: Can we get rid of appendInt32Slice and use setInt32Slice instead?
|
||||
func (p pointer) appendInt32Slice(v int32) {
|
||||
s := (*[]int32)(p.p)
|
||||
*s = append(*s, v)
|
||||
}
|
||||
|
||||
func (p pointer) toUint64() *uint64 {
|
||||
return (*uint64)(p.p)
|
||||
}
|
||||
func (p pointer) toUint64Ptr() **uint64 {
|
||||
return (**uint64)(p.p)
|
||||
}
|
||||
func (p pointer) toUint64Slice() *[]uint64 {
|
||||
return (*[]uint64)(p.p)
|
||||
}
|
||||
func (p pointer) toUint32() *uint32 {
|
||||
return (*uint32)(p.p)
|
||||
}
|
||||
func (p pointer) toUint32Ptr() **uint32 {
|
||||
return (**uint32)(p.p)
|
||||
}
|
||||
func (p pointer) toUint32Slice() *[]uint32 {
|
||||
return (*[]uint32)(p.p)
|
||||
}
|
||||
func (p pointer) toBool() *bool {
|
||||
return (*bool)(p.p)
|
||||
}
|
||||
func (p pointer) toBoolPtr() **bool {
|
||||
return (**bool)(p.p)
|
||||
}
|
||||
func (p pointer) toBoolSlice() *[]bool {
|
||||
return (*[]bool)(p.p)
|
||||
}
|
||||
func (p pointer) toFloat64() *float64 {
|
||||
return (*float64)(p.p)
|
||||
}
|
||||
func (p pointer) toFloat64Ptr() **float64 {
|
||||
return (**float64)(p.p)
|
||||
}
|
||||
func (p pointer) toFloat64Slice() *[]float64 {
|
||||
return (*[]float64)(p.p)
|
||||
}
|
||||
func (p pointer) toFloat32() *float32 {
|
||||
return (*float32)(p.p)
|
||||
}
|
||||
func (p pointer) toFloat32Ptr() **float32 {
|
||||
return (**float32)(p.p)
|
||||
}
|
||||
func (p pointer) toFloat32Slice() *[]float32 {
|
||||
return (*[]float32)(p.p)
|
||||
}
|
||||
func (p pointer) toString() *string {
|
||||
return (*string)(p.p)
|
||||
}
|
||||
func (p pointer) toStringPtr() **string {
|
||||
return (**string)(p.p)
|
||||
}
|
||||
func (p pointer) toStringSlice() *[]string {
|
||||
return (*[]string)(p.p)
|
||||
}
|
||||
func (p pointer) toBytes() *[]byte {
|
||||
return (*[]byte)(p.p)
|
||||
}
|
||||
func (p pointer) toBytesSlice() *[][]byte {
|
||||
return (*[][]byte)(p.p)
|
||||
}
|
||||
func (p pointer) toExtensions() *XXX_InternalExtensions {
|
||||
return (*XXX_InternalExtensions)(p.p)
|
||||
}
|
||||
func (p pointer) toOldExtensions() *map[int32]Extension {
|
||||
return (*map[int32]Extension)(p.p)
|
||||
}
|
||||
|
||||
// getPointerSlice loads []*T from p as a []pointer.
|
||||
// The value returned is aliased with the original slice.
|
||||
// This behavior differs from the implementation in pointer_reflect.go.
|
||||
func (p pointer) getPointerSlice() []pointer {
|
||||
// Super-tricky - p should point to a []*T where T is a
|
||||
// message type. We load it as []pointer.
|
||||
return *(*[]pointer)(p.p)
|
||||
}
|
||||
|
||||
// setPointerSlice stores []pointer into p as a []*T.
|
||||
// The value set is aliased with the input slice.
|
||||
// This behavior differs from the implementation in pointer_reflect.go.
|
||||
func (p pointer) setPointerSlice(v []pointer) {
|
||||
// Super-tricky - p should point to a []*T where T is a
|
||||
// message type. We store it as []pointer.
|
||||
*(*[]pointer)(p.p) = v
|
||||
}
|
||||
|
||||
// getPointer loads the pointer at p and returns it.
|
||||
func (p pointer) getPointer() pointer {
|
||||
return pointer{p: *(*unsafe.Pointer)(p.p)}
|
||||
}
|
||||
|
||||
// setPointer stores the pointer q at p.
|
||||
func (p pointer) setPointer(q pointer) {
|
||||
*(*unsafe.Pointer)(p.p) = q.p
|
||||
}
|
||||
|
||||
// append q to the slice pointed to by p.
|
||||
func (p pointer) appendPointer(q pointer) {
|
||||
s := (*[]unsafe.Pointer)(p.p)
|
||||
*s = append(*s, q.p)
|
||||
}
|
||||
|
||||
// getInterfacePointer returns a pointer that points to the
|
||||
// interface data of the interface pointed by p.
|
||||
func (p pointer) getInterfacePointer() pointer {
|
||||
// Super-tricky - read pointer out of data word of interface value.
|
||||
return pointer{p: (*(*[2]unsafe.Pointer)(p.p))[1]}
|
||||
}
|
||||
|
||||
// asPointerTo returns a reflect.Value that is a pointer to an
|
||||
// object of type t stored at p.
|
||||
func (p pointer) asPointerTo(t reflect.Type) reflect.Value {
|
||||
return reflect.NewAt(t, p.p)
|
||||
}
|
||||
|
||||
func atomicLoadUnmarshalInfo(p **unmarshalInfo) *unmarshalInfo {
|
||||
return (*unmarshalInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p))))
|
||||
}
|
||||
func atomicStoreUnmarshalInfo(p **unmarshalInfo, v *unmarshalInfo) {
|
||||
atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v))
|
||||
}
|
||||
func atomicLoadMarshalInfo(p **marshalInfo) *marshalInfo {
|
||||
return (*marshalInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p))))
|
||||
}
|
||||
func atomicStoreMarshalInfo(p **marshalInfo, v *marshalInfo) {
|
||||
atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v))
|
||||
}
|
||||
func atomicLoadMergeInfo(p **mergeInfo) *mergeInfo {
|
||||
return (*mergeInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p))))
|
||||
}
|
||||
func atomicStoreMergeInfo(p **mergeInfo, v *mergeInfo) {
|
||||
atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v))
|
||||
}
|
||||
func atomicLoadDiscardInfo(p **discardInfo) *discardInfo {
|
||||
return (*discardInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p))))
|
||||
}
|
||||
func atomicStoreDiscardInfo(p **discardInfo, v *discardInfo) {
|
||||
atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v))
|
||||
}
|
652
vendor/github.com/golang/protobuf/proto/properties.go
generated
vendored
652
vendor/github.com/golang/protobuf/proto/properties.go
generated
vendored
@ -1,162 +1,104 @@
|
||||
// Go support for Protocol Buffers - Google's data interchange format
|
||||
//
|
||||
// Copyright 2010 The Go Authors. All rights reserved.
|
||||
// https://github.com/golang/protobuf
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
// Copyright 2010 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package proto
|
||||
|
||||
/*
|
||||
* Routines for encoding data into the wire format for protocol buffers.
|
||||
*/
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"reflect"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"google.golang.org/protobuf/reflect/protoreflect"
|
||||
"google.golang.org/protobuf/runtime/protoimpl"
|
||||
)
|
||||
|
||||
const debug bool = false
|
||||
|
||||
// Constants that identify the encoding of a value on the wire.
|
||||
const (
|
||||
WireVarint = 0
|
||||
WireFixed64 = 1
|
||||
WireBytes = 2
|
||||
WireStartGroup = 3
|
||||
WireEndGroup = 4
|
||||
WireFixed32 = 5
|
||||
)
|
||||
|
||||
// tagMap is an optimization over map[int]int for typical protocol buffer
|
||||
// use-cases. Encoded protocol buffers are often in tag order with small tag
|
||||
// numbers.
|
||||
type tagMap struct {
|
||||
fastTags []int
|
||||
slowTags map[int]int
|
||||
}
|
||||
|
||||
// tagMapFastLimit is the upper bound on the tag number that will be stored in
|
||||
// the tagMap slice rather than its map.
|
||||
const tagMapFastLimit = 1024
|
||||
|
||||
func (p *tagMap) get(t int) (int, bool) {
|
||||
if t > 0 && t < tagMapFastLimit {
|
||||
if t >= len(p.fastTags) {
|
||||
return 0, false
|
||||
}
|
||||
fi := p.fastTags[t]
|
||||
return fi, fi >= 0
|
||||
}
|
||||
fi, ok := p.slowTags[t]
|
||||
return fi, ok
|
||||
}
|
||||
|
||||
func (p *tagMap) put(t int, fi int) {
|
||||
if t > 0 && t < tagMapFastLimit {
|
||||
for len(p.fastTags) < t+1 {
|
||||
p.fastTags = append(p.fastTags, -1)
|
||||
}
|
||||
p.fastTags[t] = fi
|
||||
return
|
||||
}
|
||||
if p.slowTags == nil {
|
||||
p.slowTags = make(map[int]int)
|
||||
}
|
||||
p.slowTags[t] = fi
|
||||
}
|
||||
|
||||
// StructProperties represents properties for all the fields of a struct.
|
||||
// decoderTags and decoderOrigNames should only be used by the decoder.
|
||||
// StructProperties represents protocol buffer type information for a
|
||||
// generated protobuf message in the open-struct API.
|
||||
//
|
||||
// Deprecated: Do not use.
|
||||
type StructProperties struct {
|
||||
Prop []*Properties // properties for each field
|
||||
reqCount int // required count
|
||||
decoderTags tagMap // map from proto tag to struct field number
|
||||
decoderOrigNames map[string]int // map from original name to struct field number
|
||||
order []int // list of struct field numbers in tag order
|
||||
// Prop are the properties for each field.
|
||||
//
|
||||
// Fields belonging to a oneof are stored in OneofTypes instead, with a
|
||||
// single Properties representing the parent oneof held here.
|
||||
//
|
||||
// The order of Prop matches the order of fields in the Go struct.
|
||||
// Struct fields that are not related to protobufs have a "XXX_" prefix
|
||||
// in the Properties.Name and must be ignored by the user.
|
||||
Prop []*Properties
|
||||
|
||||
// OneofTypes contains information about the oneof fields in this message.
|
||||
// It is keyed by the original name of a field.
|
||||
// It is keyed by the protobuf field name.
|
||||
OneofTypes map[string]*OneofProperties
|
||||
}
|
||||
|
||||
// OneofProperties represents information about a specific field in a oneof.
|
||||
type OneofProperties struct {
|
||||
Type reflect.Type // pointer to generated struct type for this oneof field
|
||||
Field int // struct field number of the containing oneof in the message
|
||||
Prop *Properties
|
||||
}
|
||||
|
||||
// Implement the sorting interface so we can sort the fields in tag order, as recommended by the spec.
|
||||
// See encode.go, (*Buffer).enc_struct.
|
||||
|
||||
func (sp *StructProperties) Len() int { return len(sp.order) }
|
||||
func (sp *StructProperties) Less(i, j int) bool {
|
||||
return sp.Prop[sp.order[i]].Tag < sp.Prop[sp.order[j]].Tag
|
||||
}
|
||||
func (sp *StructProperties) Swap(i, j int) { sp.order[i], sp.order[j] = sp.order[j], sp.order[i] }
|
||||
|
||||
// Properties represents the protocol-specific behavior of a single struct field.
|
||||
// Properties represents the type information for a protobuf message field.
|
||||
//
|
||||
// Deprecated: Do not use.
|
||||
type Properties struct {
|
||||
Name string // name of the field, for error messages
|
||||
OrigName string // original name before protocol compiler (always set)
|
||||
JSONName string // name to use for JSON; determined by protoc
|
||||
Wire string
|
||||
// Name is a placeholder name with little meaningful semantic value.
|
||||
// If the name has an "XXX_" prefix, the entire Properties must be ignored.
|
||||
Name string
|
||||
// OrigName is the protobuf field name or oneof name.
|
||||
OrigName string
|
||||
// JSONName is the JSON name for the protobuf field.
|
||||
JSONName string
|
||||
// Enum is a placeholder name for enums.
|
||||
// For historical reasons, this is neither the Go name for the enum,
|
||||
// nor the protobuf name for the enum.
|
||||
Enum string // Deprecated: Do not use.
|
||||
// Weak contains the full name of the weakly referenced message.
|
||||
Weak string
|
||||
// Wire is a string representation of the wire type.
|
||||
Wire string
|
||||
// WireType is the protobuf wire type for the field.
|
||||
WireType int
|
||||
Tag int
|
||||
// Tag is the protobuf field number.
|
||||
Tag int
|
||||
// Required reports whether this is a required field.
|
||||
Required bool
|
||||
// Optional reports whether this is a optional field.
|
||||
Optional bool
|
||||
// Repeated reports whether this is a repeated field.
|
||||
Repeated bool
|
||||
Packed bool // relevant for repeated primitives only
|
||||
Enum string // set for enum types only
|
||||
proto3 bool // whether this is known to be a proto3 field
|
||||
oneof bool // whether this is a oneof field
|
||||
// Packed reports whether this is a packed repeated field of scalars.
|
||||
Packed bool
|
||||
// Proto3 reports whether this field operates under the proto3 syntax.
|
||||
Proto3 bool
|
||||
// Oneof reports whether this field belongs within a oneof.
|
||||
Oneof bool
|
||||
|
||||
Default string // default value
|
||||
HasDefault bool // whether an explicit default was provided
|
||||
// Default is the default value in string form.
|
||||
Default string
|
||||
// HasDefault reports whether the field has a default value.
|
||||
HasDefault bool
|
||||
|
||||
stype reflect.Type // set for struct types only
|
||||
sprop *StructProperties // set for struct types only
|
||||
// MapKeyProp is the properties for the key field for a map field.
|
||||
MapKeyProp *Properties
|
||||
// MapValProp is the properties for the value field for a map field.
|
||||
MapValProp *Properties
|
||||
}
|
||||
|
||||
mtype reflect.Type // set for map types only
|
||||
MapKeyProp *Properties // set for map types only
|
||||
MapValProp *Properties // set for map types only
|
||||
// OneofProperties represents the type information for a protobuf oneof.
|
||||
//
|
||||
// Deprecated: Do not use.
|
||||
type OneofProperties struct {
|
||||
// Type is a pointer to the generated wrapper type for the field value.
|
||||
// This is nil for messages that are not in the open-struct API.
|
||||
Type reflect.Type
|
||||
// Field is the index into StructProperties.Prop for the containing oneof.
|
||||
Field int
|
||||
// Prop is the properties for the field.
|
||||
Prop *Properties
|
||||
}
|
||||
|
||||
// String formats the properties in the protobuf struct field tag style.
|
||||
func (p *Properties) String() string {
|
||||
s := p.Wire
|
||||
s += ","
|
||||
s += strconv.Itoa(p.Tag)
|
||||
s += "," + strconv.Itoa(p.Tag)
|
||||
if p.Required {
|
||||
s += ",req"
|
||||
}
|
||||
@ -170,18 +112,21 @@ func (p *Properties) String() string {
|
||||
s += ",packed"
|
||||
}
|
||||
s += ",name=" + p.OrigName
|
||||
if p.JSONName != p.OrigName {
|
||||
if p.JSONName != "" {
|
||||
s += ",json=" + p.JSONName
|
||||
}
|
||||
if p.proto3 {
|
||||
s += ",proto3"
|
||||
}
|
||||
if p.oneof {
|
||||
s += ",oneof"
|
||||
}
|
||||
if len(p.Enum) > 0 {
|
||||
s += ",enum=" + p.Enum
|
||||
}
|
||||
if len(p.Weak) > 0 {
|
||||
s += ",weak=" + p.Weak
|
||||
}
|
||||
if p.Proto3 {
|
||||
s += ",proto3"
|
||||
}
|
||||
if p.Oneof {
|
||||
s += ",oneof"
|
||||
}
|
||||
if p.HasDefault {
|
||||
s += ",def=" + p.Default
|
||||
}
|
||||
@ -189,356 +134,173 @@ func (p *Properties) String() string {
|
||||
}
|
||||
|
||||
// Parse populates p by parsing a string in the protobuf struct field tag style.
|
||||
func (p *Properties) Parse(s string) {
|
||||
// "bytes,49,opt,name=foo,def=hello!"
|
||||
fields := strings.Split(s, ",") // breaks def=, but handled below.
|
||||
if len(fields) < 2 {
|
||||
log.Printf("proto: tag has too few fields: %q", s)
|
||||
return
|
||||
}
|
||||
|
||||
p.Wire = fields[0]
|
||||
switch p.Wire {
|
||||
case "varint":
|
||||
p.WireType = WireVarint
|
||||
case "fixed32":
|
||||
p.WireType = WireFixed32
|
||||
case "fixed64":
|
||||
p.WireType = WireFixed64
|
||||
case "zigzag32":
|
||||
p.WireType = WireVarint
|
||||
case "zigzag64":
|
||||
p.WireType = WireVarint
|
||||
case "bytes", "group":
|
||||
p.WireType = WireBytes
|
||||
// no numeric converter for non-numeric types
|
||||
default:
|
||||
log.Printf("proto: tag has unknown wire type: %q", s)
|
||||
return
|
||||
}
|
||||
|
||||
var err error
|
||||
p.Tag, err = strconv.Atoi(fields[1])
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
outer:
|
||||
for i := 2; i < len(fields); i++ {
|
||||
f := fields[i]
|
||||
switch {
|
||||
case f == "req":
|
||||
p.Required = true
|
||||
case f == "opt":
|
||||
func (p *Properties) Parse(tag string) {
|
||||
// For example: "bytes,49,opt,name=foo,def=hello!"
|
||||
for len(tag) > 0 {
|
||||
i := strings.IndexByte(tag, ',')
|
||||
if i < 0 {
|
||||
i = len(tag)
|
||||
}
|
||||
switch s := tag[:i]; {
|
||||
case strings.HasPrefix(s, "name="):
|
||||
p.OrigName = s[len("name="):]
|
||||
case strings.HasPrefix(s, "json="):
|
||||
p.JSONName = s[len("json="):]
|
||||
case strings.HasPrefix(s, "enum="):
|
||||
p.Enum = s[len("enum="):]
|
||||
case strings.HasPrefix(s, "weak="):
|
||||
p.Weak = s[len("weak="):]
|
||||
case strings.Trim(s, "0123456789") == "":
|
||||
n, _ := strconv.ParseUint(s, 10, 32)
|
||||
p.Tag = int(n)
|
||||
case s == "opt":
|
||||
p.Optional = true
|
||||
case f == "rep":
|
||||
case s == "req":
|
||||
p.Required = true
|
||||
case s == "rep":
|
||||
p.Repeated = true
|
||||
case f == "packed":
|
||||
case s == "varint" || s == "zigzag32" || s == "zigzag64":
|
||||
p.Wire = s
|
||||
p.WireType = WireVarint
|
||||
case s == "fixed32":
|
||||
p.Wire = s
|
||||
p.WireType = WireFixed32
|
||||
case s == "fixed64":
|
||||
p.Wire = s
|
||||
p.WireType = WireFixed64
|
||||
case s == "bytes":
|
||||
p.Wire = s
|
||||
p.WireType = WireBytes
|
||||
case s == "group":
|
||||
p.Wire = s
|
||||
p.WireType = WireStartGroup
|
||||
case s == "packed":
|
||||
p.Packed = true
|
||||
case strings.HasPrefix(f, "name="):
|
||||
p.OrigName = f[5:]
|
||||
case strings.HasPrefix(f, "json="):
|
||||
p.JSONName = f[5:]
|
||||
case strings.HasPrefix(f, "enum="):
|
||||
p.Enum = f[5:]
|
||||
case f == "proto3":
|
||||
p.proto3 = true
|
||||
case f == "oneof":
|
||||
p.oneof = true
|
||||
case strings.HasPrefix(f, "def="):
|
||||
case s == "proto3":
|
||||
p.Proto3 = true
|
||||
case s == "oneof":
|
||||
p.Oneof = true
|
||||
case strings.HasPrefix(s, "def="):
|
||||
// The default tag is special in that everything afterwards is the
|
||||
// default regardless of the presence of commas.
|
||||
p.HasDefault = true
|
||||
p.Default = f[4:] // rest of string
|
||||
if i+1 < len(fields) {
|
||||
// Commas aren't escaped, and def is always last.
|
||||
p.Default += "," + strings.Join(fields[i+1:], ",")
|
||||
break outer
|
||||
}
|
||||
p.Default, i = tag[len("def="):], len(tag)
|
||||
}
|
||||
tag = strings.TrimPrefix(tag[i:], ",")
|
||||
}
|
||||
}
|
||||
|
||||
var protoMessageType = reflect.TypeOf((*Message)(nil)).Elem()
|
||||
|
||||
// setFieldProps initializes the field properties for submessages and maps.
|
||||
func (p *Properties) setFieldProps(typ reflect.Type, f *reflect.StructField, lockGetProp bool) {
|
||||
switch t1 := typ; t1.Kind() {
|
||||
case reflect.Ptr:
|
||||
if t1.Elem().Kind() == reflect.Struct {
|
||||
p.stype = t1.Elem()
|
||||
}
|
||||
|
||||
case reflect.Slice:
|
||||
if t2 := t1.Elem(); t2.Kind() == reflect.Ptr && t2.Elem().Kind() == reflect.Struct {
|
||||
p.stype = t2.Elem()
|
||||
}
|
||||
|
||||
case reflect.Map:
|
||||
p.mtype = t1
|
||||
p.MapKeyProp = &Properties{}
|
||||
p.MapKeyProp.init(reflect.PtrTo(p.mtype.Key()), "Key", f.Tag.Get("protobuf_key"), nil, lockGetProp)
|
||||
p.MapValProp = &Properties{}
|
||||
vtype := p.mtype.Elem()
|
||||
if vtype.Kind() != reflect.Ptr && vtype.Kind() != reflect.Slice {
|
||||
// The value type is not a message (*T) or bytes ([]byte),
|
||||
// so we need encoders for the pointer to this type.
|
||||
vtype = reflect.PtrTo(vtype)
|
||||
}
|
||||
p.MapValProp.init(vtype, "Value", f.Tag.Get("protobuf_val"), nil, lockGetProp)
|
||||
}
|
||||
|
||||
if p.stype != nil {
|
||||
if lockGetProp {
|
||||
p.sprop = GetProperties(p.stype)
|
||||
} else {
|
||||
p.sprop = getPropertiesLocked(p.stype)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var (
|
||||
marshalerType = reflect.TypeOf((*Marshaler)(nil)).Elem()
|
||||
)
|
||||
|
||||
// Init populates the properties from a protocol buffer struct tag.
|
||||
//
|
||||
// Deprecated: Do not use.
|
||||
func (p *Properties) Init(typ reflect.Type, name, tag string, f *reflect.StructField) {
|
||||
p.init(typ, name, tag, f, true)
|
||||
}
|
||||
|
||||
func (p *Properties) init(typ reflect.Type, name, tag string, f *reflect.StructField, lockGetProp bool) {
|
||||
// "bytes,49,opt,def=hello!"
|
||||
p.Name = name
|
||||
p.OrigName = name
|
||||
if tag == "" {
|
||||
return
|
||||
}
|
||||
p.Parse(tag)
|
||||
p.setFieldProps(typ, f, lockGetProp)
|
||||
|
||||
if typ != nil && typ.Kind() == reflect.Map {
|
||||
p.MapKeyProp = new(Properties)
|
||||
p.MapKeyProp.Init(nil, "Key", f.Tag.Get("protobuf_key"), nil)
|
||||
p.MapValProp = new(Properties)
|
||||
p.MapValProp.Init(nil, "Value", f.Tag.Get("protobuf_val"), nil)
|
||||
}
|
||||
}
|
||||
|
||||
var (
|
||||
propertiesMu sync.RWMutex
|
||||
propertiesMap = make(map[reflect.Type]*StructProperties)
|
||||
)
|
||||
var propertiesCache sync.Map // map[reflect.Type]*StructProperties
|
||||
|
||||
// GetProperties returns the list of properties for the type represented by t.
|
||||
// t must represent a generated struct type of a protocol message.
|
||||
// GetProperties returns the list of properties for the type represented by t,
|
||||
// which must be a generated protocol buffer message in the open-struct API,
|
||||
// where protobuf message fields are represented by exported Go struct fields.
|
||||
//
|
||||
// Deprecated: Use protobuf reflection instead.
|
||||
func GetProperties(t reflect.Type) *StructProperties {
|
||||
if t.Kind() != reflect.Struct {
|
||||
panic("proto: type must have kind struct")
|
||||
if p, ok := propertiesCache.Load(t); ok {
|
||||
return p.(*StructProperties)
|
||||
}
|
||||
|
||||
// Most calls to GetProperties in a long-running program will be
|
||||
// retrieving details for types we have seen before.
|
||||
propertiesMu.RLock()
|
||||
sprop, ok := propertiesMap[t]
|
||||
propertiesMu.RUnlock()
|
||||
if ok {
|
||||
return sprop
|
||||
}
|
||||
|
||||
propertiesMu.Lock()
|
||||
sprop = getPropertiesLocked(t)
|
||||
propertiesMu.Unlock()
|
||||
return sprop
|
||||
p, _ := propertiesCache.LoadOrStore(t, newProperties(t))
|
||||
return p.(*StructProperties)
|
||||
}
|
||||
|
||||
type (
|
||||
oneofFuncsIface interface {
|
||||
XXX_OneofFuncs() (func(Message, *Buffer) error, func(Message, int, int, *Buffer) (bool, error), func(Message) int, []interface{})
|
||||
}
|
||||
oneofWrappersIface interface {
|
||||
XXX_OneofWrappers() []interface{}
|
||||
}
|
||||
)
|
||||
|
||||
// getPropertiesLocked requires that propertiesMu is held.
|
||||
func getPropertiesLocked(t reflect.Type) *StructProperties {
|
||||
if prop, ok := propertiesMap[t]; ok {
|
||||
return prop
|
||||
func newProperties(t reflect.Type) *StructProperties {
|
||||
if t.Kind() != reflect.Struct {
|
||||
panic(fmt.Sprintf("%v is not a generated message in the open-struct API", t))
|
||||
}
|
||||
|
||||
var hasOneof bool
|
||||
prop := new(StructProperties)
|
||||
// in case of recursive protos, fill this in now.
|
||||
propertiesMap[t] = prop
|
||||
|
||||
// build properties
|
||||
prop.Prop = make([]*Properties, t.NumField())
|
||||
prop.order = make([]int, t.NumField())
|
||||
|
||||
// Construct a list of properties for each field in the struct.
|
||||
for i := 0; i < t.NumField(); i++ {
|
||||
f := t.Field(i)
|
||||
p := new(Properties)
|
||||
name := f.Name
|
||||
p.init(f.Type, name, f.Tag.Get("protobuf"), &f, false)
|
||||
f := t.Field(i)
|
||||
tagField := f.Tag.Get("protobuf")
|
||||
p.Init(f.Type, f.Name, tagField, &f)
|
||||
|
||||
oneof := f.Tag.Get("protobuf_oneof") // special case
|
||||
if oneof != "" {
|
||||
// Oneof fields don't use the traditional protobuf tag.
|
||||
p.OrigName = oneof
|
||||
tagOneof := f.Tag.Get("protobuf_oneof")
|
||||
if tagOneof != "" {
|
||||
hasOneof = true
|
||||
p.OrigName = tagOneof
|
||||
}
|
||||
prop.Prop[i] = p
|
||||
prop.order[i] = i
|
||||
if debug {
|
||||
print(i, " ", f.Name, " ", t.String(), " ")
|
||||
if p.Tag > 0 {
|
||||
print(p.String())
|
||||
|
||||
// Rename unrelated struct fields with the "XXX_" prefix since so much
|
||||
// user code simply checks for this to exclude special fields.
|
||||
if tagField == "" && tagOneof == "" && !strings.HasPrefix(p.Name, "XXX_") {
|
||||
p.Name = "XXX_" + p.Name
|
||||
p.OrigName = "XXX_" + p.OrigName
|
||||
} else if p.Weak != "" {
|
||||
p.Name = p.OrigName // avoid possible "XXX_" prefix on weak field
|
||||
}
|
||||
|
||||
prop.Prop = append(prop.Prop, p)
|
||||
}
|
||||
|
||||
// Construct a mapping of oneof field names to properties.
|
||||
if hasOneof {
|
||||
var oneofWrappers []interface{}
|
||||
if fn, ok := reflect.PtrTo(t).MethodByName("XXX_OneofFuncs"); ok {
|
||||
oneofWrappers = fn.Func.Call([]reflect.Value{reflect.Zero(fn.Type.In(0))})[3].Interface().([]interface{})
|
||||
}
|
||||
if fn, ok := reflect.PtrTo(t).MethodByName("XXX_OneofWrappers"); ok {
|
||||
oneofWrappers = fn.Func.Call([]reflect.Value{reflect.Zero(fn.Type.In(0))})[0].Interface().([]interface{})
|
||||
}
|
||||
if m, ok := reflect.Zero(reflect.PtrTo(t)).Interface().(protoreflect.ProtoMessage); ok {
|
||||
if m, ok := m.ProtoReflect().(interface{ ProtoMessageInfo() *protoimpl.MessageInfo }); ok {
|
||||
oneofWrappers = m.ProtoMessageInfo().OneofWrappers
|
||||
}
|
||||
print("\n")
|
||||
}
|
||||
}
|
||||
|
||||
// Re-order prop.order.
|
||||
sort.Sort(prop)
|
||||
|
||||
var oots []interface{}
|
||||
switch m := reflect.Zero(reflect.PtrTo(t)).Interface().(type) {
|
||||
case oneofFuncsIface:
|
||||
_, _, _, oots = m.XXX_OneofFuncs()
|
||||
case oneofWrappersIface:
|
||||
oots = m.XXX_OneofWrappers()
|
||||
}
|
||||
if len(oots) > 0 {
|
||||
// Interpret oneof metadata.
|
||||
prop.OneofTypes = make(map[string]*OneofProperties)
|
||||
for _, oot := range oots {
|
||||
oop := &OneofProperties{
|
||||
Type: reflect.ValueOf(oot).Type(), // *T
|
||||
for _, wrapper := range oneofWrappers {
|
||||
p := &OneofProperties{
|
||||
Type: reflect.ValueOf(wrapper).Type(), // *T
|
||||
Prop: new(Properties),
|
||||
}
|
||||
sft := oop.Type.Elem().Field(0)
|
||||
oop.Prop.Name = sft.Name
|
||||
oop.Prop.Parse(sft.Tag.Get("protobuf"))
|
||||
// There will be exactly one interface field that
|
||||
// this new value is assignable to.
|
||||
for i := 0; i < t.NumField(); i++ {
|
||||
f := t.Field(i)
|
||||
if f.Type.Kind() != reflect.Interface {
|
||||
continue
|
||||
}
|
||||
if !oop.Type.AssignableTo(f.Type) {
|
||||
continue
|
||||
}
|
||||
oop.Field = i
|
||||
break
|
||||
}
|
||||
prop.OneofTypes[oop.Prop.OrigName] = oop
|
||||
}
|
||||
}
|
||||
f := p.Type.Elem().Field(0)
|
||||
p.Prop.Name = f.Name
|
||||
p.Prop.Parse(f.Tag.Get("protobuf"))
|
||||
|
||||
// build required counts
|
||||
// build tags
|
||||
reqCount := 0
|
||||
prop.decoderOrigNames = make(map[string]int)
|
||||
for i, p := range prop.Prop {
|
||||
if strings.HasPrefix(p.Name, "XXX_") {
|
||||
// Internal fields should not appear in tags/origNames maps.
|
||||
// They are handled specially when encoding and decoding.
|
||||
continue
|
||||
// Determine the struct field that contains this oneof.
|
||||
// Each wrapper is assignable to exactly one parent field.
|
||||
var foundOneof bool
|
||||
for i := 0; i < t.NumField() && !foundOneof; i++ {
|
||||
if p.Type.AssignableTo(t.Field(i).Type) {
|
||||
p.Field = i
|
||||
foundOneof = true
|
||||
}
|
||||
}
|
||||
if !foundOneof {
|
||||
panic(fmt.Sprintf("%v is not a generated message in the open-struct API", t))
|
||||
}
|
||||
prop.OneofTypes[p.Prop.OrigName] = p
|
||||
}
|
||||
if p.Required {
|
||||
reqCount++
|
||||
}
|
||||
prop.decoderTags.put(p.Tag, i)
|
||||
prop.decoderOrigNames[p.OrigName] = i
|
||||
}
|
||||
prop.reqCount = reqCount
|
||||
|
||||
return prop
|
||||
}
|
||||
|
||||
// A global registry of enum types.
|
||||
// The generated code will register the generated maps by calling RegisterEnum.
|
||||
|
||||
var enumValueMaps = make(map[string]map[string]int32)
|
||||
|
||||
// RegisterEnum is called from the generated code to install the enum descriptor
|
||||
// maps into the global table to aid parsing text format protocol buffers.
|
||||
func RegisterEnum(typeName string, unusedNameMap map[int32]string, valueMap map[string]int32) {
|
||||
if _, ok := enumValueMaps[typeName]; ok {
|
||||
panic("proto: duplicate enum registered: " + typeName)
|
||||
}
|
||||
enumValueMaps[typeName] = valueMap
|
||||
}
|
||||
|
||||
// EnumValueMap returns the mapping from names to integers of the
|
||||
// enum type enumType, or a nil if not found.
|
||||
func EnumValueMap(enumType string) map[string]int32 {
|
||||
return enumValueMaps[enumType]
|
||||
}
|
||||
|
||||
// A registry of all linked message types.
|
||||
// The string is a fully-qualified proto name ("pkg.Message").
|
||||
var (
|
||||
protoTypedNils = make(map[string]Message) // a map from proto names to typed nil pointers
|
||||
protoMapTypes = make(map[string]reflect.Type) // a map from proto names to map types
|
||||
revProtoTypes = make(map[reflect.Type]string)
|
||||
)
|
||||
|
||||
// RegisterType is called from generated code and maps from the fully qualified
|
||||
// proto name to the type (pointer to struct) of the protocol buffer.
|
||||
func RegisterType(x Message, name string) {
|
||||
if _, ok := protoTypedNils[name]; ok {
|
||||
// TODO: Some day, make this a panic.
|
||||
log.Printf("proto: duplicate proto type registered: %s", name)
|
||||
return
|
||||
}
|
||||
t := reflect.TypeOf(x)
|
||||
if v := reflect.ValueOf(x); v.Kind() == reflect.Ptr && v.Pointer() == 0 {
|
||||
// Generated code always calls RegisterType with nil x.
|
||||
// This check is just for extra safety.
|
||||
protoTypedNils[name] = x
|
||||
} else {
|
||||
protoTypedNils[name] = reflect.Zero(t).Interface().(Message)
|
||||
}
|
||||
revProtoTypes[t] = name
|
||||
}
|
||||
|
||||
// RegisterMapType is called from generated code and maps from the fully qualified
|
||||
// proto name to the native map type of the proto map definition.
|
||||
func RegisterMapType(x interface{}, name string) {
|
||||
if reflect.TypeOf(x).Kind() != reflect.Map {
|
||||
panic(fmt.Sprintf("RegisterMapType(%T, %q); want map", x, name))
|
||||
}
|
||||
if _, ok := protoMapTypes[name]; ok {
|
||||
log.Printf("proto: duplicate proto type registered: %s", name)
|
||||
return
|
||||
}
|
||||
t := reflect.TypeOf(x)
|
||||
protoMapTypes[name] = t
|
||||
revProtoTypes[t] = name
|
||||
}
|
||||
|
||||
// MessageName returns the fully-qualified proto name for the given message type.
|
||||
func MessageName(x Message) string {
|
||||
type xname interface {
|
||||
XXX_MessageName() string
|
||||
}
|
||||
if m, ok := x.(xname); ok {
|
||||
return m.XXX_MessageName()
|
||||
}
|
||||
return revProtoTypes[reflect.TypeOf(x)]
|
||||
}
|
||||
|
||||
// MessageType returns the message type (pointer to struct) for a named message.
|
||||
// The type is not guaranteed to implement proto.Message if the name refers to a
|
||||
// map entry.
|
||||
func MessageType(name string) reflect.Type {
|
||||
if t, ok := protoTypedNils[name]; ok {
|
||||
return reflect.TypeOf(t)
|
||||
}
|
||||
return protoMapTypes[name]
|
||||
}
|
||||
|
||||
// A registry of all linked proto files.
|
||||
var (
|
||||
protoFiles = make(map[string][]byte) // file name => fileDescriptor
|
||||
)
|
||||
|
||||
// RegisterFile is called from generated code and maps from the
|
||||
// full file name of a .proto file to its compressed FileDescriptorProto.
|
||||
func RegisterFile(filename string, fileDescriptor []byte) {
|
||||
protoFiles[filename] = fileDescriptor
|
||||
}
|
||||
|
||||
// FileDescriptor returns the compressed FileDescriptorProto for a .proto file.
|
||||
func FileDescriptor(filename string) []byte { return protoFiles[filename] }
|
||||
func (sp *StructProperties) Len() int { return len(sp.Prop) }
|
||||
func (sp *StructProperties) Less(i, j int) bool { return false }
|
||||
func (sp *StructProperties) Swap(i, j int) { return }
|
||||
|
167
vendor/github.com/golang/protobuf/proto/proto.go
generated
vendored
Normal file
167
vendor/github.com/golang/protobuf/proto/proto.go
generated
vendored
Normal file
@ -0,0 +1,167 @@
|
||||
// Copyright 2019 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package proto provides functionality for handling protocol buffer messages.
|
||||
// In particular, it provides marshaling and unmarshaling between a protobuf
|
||||
// message and the binary wire format.
|
||||
//
|
||||
// See https://developers.google.com/protocol-buffers/docs/gotutorial for
|
||||
// more information.
|
||||
//
|
||||
// Deprecated: Use the "google.golang.org/protobuf/proto" package instead.
|
||||
package proto
|
||||
|
||||
import (
|
||||
protoV2 "google.golang.org/protobuf/proto"
|
||||
"google.golang.org/protobuf/reflect/protoreflect"
|
||||
"google.golang.org/protobuf/runtime/protoiface"
|
||||
"google.golang.org/protobuf/runtime/protoimpl"
|
||||
)
|
||||
|
||||
const (
|
||||
ProtoPackageIsVersion1 = true
|
||||
ProtoPackageIsVersion2 = true
|
||||
ProtoPackageIsVersion3 = true
|
||||
ProtoPackageIsVersion4 = true
|
||||
)
|
||||
|
||||
// GeneratedEnum is any enum type generated by protoc-gen-go
|
||||
// which is a named int32 kind.
|
||||
// This type exists for documentation purposes.
|
||||
type GeneratedEnum interface{}
|
||||
|
||||
// GeneratedMessage is any message type generated by protoc-gen-go
|
||||
// which is a pointer to a named struct kind.
|
||||
// This type exists for documentation purposes.
|
||||
type GeneratedMessage interface{}
|
||||
|
||||
// Message is a protocol buffer message.
|
||||
//
|
||||
// This is the v1 version of the message interface and is marginally better
|
||||
// than an empty interface as it lacks any method to programatically interact
|
||||
// with the contents of the message.
|
||||
//
|
||||
// A v2 message is declared in "google.golang.org/protobuf/proto".Message and
|
||||
// exposes protobuf reflection as a first-class feature of the interface.
|
||||
//
|
||||
// To convert a v1 message to a v2 message, use the MessageV2 function.
|
||||
// To convert a v2 message to a v1 message, use the MessageV1 function.
|
||||
type Message = protoiface.MessageV1
|
||||
|
||||
// MessageV1 converts either a v1 or v2 message to a v1 message.
|
||||
// It returns nil if m is nil.
|
||||
func MessageV1(m GeneratedMessage) protoiface.MessageV1 {
|
||||
return protoimpl.X.ProtoMessageV1Of(m)
|
||||
}
|
||||
|
||||
// MessageV2 converts either a v1 or v2 message to a v2 message.
|
||||
// It returns nil if m is nil.
|
||||
func MessageV2(m GeneratedMessage) protoV2.Message {
|
||||
return protoimpl.X.ProtoMessageV2Of(m)
|
||||
}
|
||||
|
||||
// MessageReflect returns a reflective view for a message.
|
||||
// It returns nil if m is nil.
|
||||
func MessageReflect(m Message) protoreflect.Message {
|
||||
return protoimpl.X.MessageOf(m)
|
||||
}
|
||||
|
||||
// Marshaler is implemented by messages that can marshal themselves.
|
||||
// This interface is used by the following functions: Size, Marshal,
|
||||
// Buffer.Marshal, and Buffer.EncodeMessage.
|
||||
//
|
||||
// Deprecated: Do not implement.
|
||||
type Marshaler interface {
|
||||
// Marshal formats the encoded bytes of the message.
|
||||
// It should be deterministic and emit valid protobuf wire data.
|
||||
// The caller takes ownership of the returned buffer.
|
||||
Marshal() ([]byte, error)
|
||||
}
|
||||
|
||||
// Unmarshaler is implemented by messages that can unmarshal themselves.
|
||||
// This interface is used by the following functions: Unmarshal, UnmarshalMerge,
|
||||
// Buffer.Unmarshal, Buffer.DecodeMessage, and Buffer.DecodeGroup.
|
||||
//
|
||||
// Deprecated: Do not implement.
|
||||
type Unmarshaler interface {
|
||||
// Unmarshal parses the encoded bytes of the protobuf wire input.
|
||||
// The provided buffer is only valid for during method call.
|
||||
// It should not reset the receiver message.
|
||||
Unmarshal([]byte) error
|
||||
}
|
||||
|
||||
// Merger is implemented by messages that can merge themselves.
|
||||
// This interface is used by the following functions: Clone and Merge.
|
||||
//
|
||||
// Deprecated: Do not implement.
|
||||
type Merger interface {
|
||||
// Merge merges the contents of src into the receiver message.
|
||||
// It clones all data structures in src such that it aliases no mutable
|
||||
// memory referenced by src.
|
||||
Merge(src Message)
|
||||
}
|
||||
|
||||
// RequiredNotSetError is an error type returned when
|
||||
// marshaling or unmarshaling a message with missing required fields.
|
||||
type RequiredNotSetError struct {
|
||||
err error
|
||||
}
|
||||
|
||||
func (e *RequiredNotSetError) Error() string {
|
||||
if e.err != nil {
|
||||
return e.err.Error()
|
||||
}
|
||||
return "proto: required field not set"
|
||||
}
|
||||
func (e *RequiredNotSetError) RequiredNotSet() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func checkRequiredNotSet(m protoV2.Message) error {
|
||||
if err := protoV2.CheckInitialized(m); err != nil {
|
||||
return &RequiredNotSetError{err: err}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Clone returns a deep copy of src.
|
||||
func Clone(src Message) Message {
|
||||
return MessageV1(protoV2.Clone(MessageV2(src)))
|
||||
}
|
||||
|
||||
// Merge merges src into dst, which must be messages of the same type.
|
||||
//
|
||||
// Populated scalar fields in src are copied to dst, while populated
|
||||
// singular messages in src are merged into dst by recursively calling Merge.
|
||||
// The elements of every list field in src is appended to the corresponded
|
||||
// list fields in dst. The entries of every map field in src is copied into
|
||||
// the corresponding map field in dst, possibly replacing existing entries.
|
||||
// The unknown fields of src are appended to the unknown fields of dst.
|
||||
func Merge(dst, src Message) {
|
||||
protoV2.Merge(MessageV2(dst), MessageV2(src))
|
||||
}
|
||||
|
||||
// Equal reports whether two messages are equal.
|
||||
// If two messages marshal to the same bytes under deterministic serialization,
|
||||
// then Equal is guaranteed to report true.
|
||||
//
|
||||
// Two messages are equal if they are the same protobuf message type,
|
||||
// have the same set of populated known and extension field values,
|
||||
// and the same set of unknown fields values.
|
||||
//
|
||||
// Scalar values are compared with the equivalent of the == operator in Go,
|
||||
// except bytes values which are compared using bytes.Equal and
|
||||
// floating point values which specially treat NaNs as equal.
|
||||
// Message values are compared by recursively calling Equal.
|
||||
// Lists are equal if each element value is also equal.
|
||||
// Maps are equal if they have the same set of keys, where the pair of values
|
||||
// for each key is also equal.
|
||||
func Equal(x, y Message) bool {
|
||||
return protoV2.Equal(MessageV2(x), MessageV2(y))
|
||||
}
|
||||
|
||||
func isMessageSet(md protoreflect.MessageDescriptor) bool {
|
||||
ms, ok := md.(interface{ IsMessageSet() bool })
|
||||
return ok && ms.IsMessageSet()
|
||||
}
|
323
vendor/github.com/golang/protobuf/proto/registry.go
generated
vendored
Normal file
323
vendor/github.com/golang/protobuf/proto/registry.go
generated
vendored
Normal file
@ -0,0 +1,323 @@
|
||||
// Copyright 2019 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package proto
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"reflect"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"google.golang.org/protobuf/reflect/protoreflect"
|
||||
"google.golang.org/protobuf/reflect/protoregistry"
|
||||
"google.golang.org/protobuf/runtime/protoimpl"
|
||||
)
|
||||
|
||||
// filePath is the path to the proto source file.
|
||||
type filePath = string // e.g., "google/protobuf/descriptor.proto"
|
||||
|
||||
// fileDescGZIP is the compressed contents of the encoded FileDescriptorProto.
|
||||
type fileDescGZIP = []byte
|
||||
|
||||
var fileCache sync.Map // map[filePath]fileDescGZIP
|
||||
|
||||
// RegisterFile is called from generated code to register the compressed
|
||||
// FileDescriptorProto with the file path for a proto source file.
|
||||
//
|
||||
// Deprecated: Use protoregistry.GlobalFiles.RegisterFile instead.
|
||||
func RegisterFile(s filePath, d fileDescGZIP) {
|
||||
// Decompress the descriptor.
|
||||
zr, err := gzip.NewReader(bytes.NewReader(d))
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("proto: invalid compressed file descriptor: %v", err))
|
||||
}
|
||||
b, err := ioutil.ReadAll(zr)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("proto: invalid compressed file descriptor: %v", err))
|
||||
}
|
||||
|
||||
// Construct a protoreflect.FileDescriptor from the raw descriptor.
|
||||
// Note that DescBuilder.Build automatically registers the constructed
|
||||
// file descriptor with the v2 registry.
|
||||
protoimpl.DescBuilder{RawDescriptor: b}.Build()
|
||||
|
||||
// Locally cache the raw descriptor form for the file.
|
||||
fileCache.Store(s, d)
|
||||
}
|
||||
|
||||
// FileDescriptor returns the compressed FileDescriptorProto given the file path
|
||||
// for a proto source file. It returns nil if not found.
|
||||
//
|
||||
// Deprecated: Use protoregistry.GlobalFiles.FindFileByPath instead.
|
||||
func FileDescriptor(s filePath) fileDescGZIP {
|
||||
if v, ok := fileCache.Load(s); ok {
|
||||
return v.(fileDescGZIP)
|
||||
}
|
||||
|
||||
// Find the descriptor in the v2 registry.
|
||||
var b []byte
|
||||
if fd, _ := protoregistry.GlobalFiles.FindFileByPath(s); fd != nil {
|
||||
if fd, ok := fd.(interface{ ProtoLegacyRawDesc() []byte }); ok {
|
||||
b = fd.ProtoLegacyRawDesc()
|
||||
} else {
|
||||
// TODO: Use protodesc.ToFileDescriptorProto to construct
|
||||
// a descriptorpb.FileDescriptorProto and marshal it.
|
||||
// However, doing so causes the proto package to have a dependency
|
||||
// on descriptorpb, leading to cyclic dependency issues.
|
||||
}
|
||||
}
|
||||
|
||||
// Locally cache the raw descriptor form for the file.
|
||||
if len(b) > 0 {
|
||||
v, _ := fileCache.LoadOrStore(s, protoimpl.X.CompressGZIP(b))
|
||||
return v.(fileDescGZIP)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// enumName is the name of an enum. For historical reasons, the enum name is
|
||||
// neither the full Go name nor the full protobuf name of the enum.
|
||||
// The name is the dot-separated combination of just the proto package that the
|
||||
// enum is declared within followed by the Go type name of the generated enum.
|
||||
type enumName = string // e.g., "my.proto.package.GoMessage_GoEnum"
|
||||
|
||||
// enumsByName maps enum values by name to their numeric counterpart.
|
||||
type enumsByName = map[string]int32
|
||||
|
||||
// enumsByNumber maps enum values by number to their name counterpart.
|
||||
type enumsByNumber = map[int32]string
|
||||
|
||||
var enumCache sync.Map // map[enumName]enumsByName
|
||||
var numFilesCache sync.Map // map[protoreflect.FullName]int
|
||||
|
||||
// RegisterEnum is called from the generated code to register the mapping of
|
||||
// enum value names to enum numbers for the enum identified by s.
|
||||
//
|
||||
// Deprecated: Use protoregistry.GlobalTypes.RegisterEnum instead.
|
||||
func RegisterEnum(s enumName, _ enumsByNumber, m enumsByName) {
|
||||
if _, ok := enumCache.Load(s); ok {
|
||||
panic("proto: duplicate enum registered: " + s)
|
||||
}
|
||||
enumCache.Store(s, m)
|
||||
|
||||
// This does not forward registration to the v2 registry since this API
|
||||
// lacks sufficient information to construct a complete v2 enum descriptor.
|
||||
}
|
||||
|
||||
// EnumValueMap returns the mapping from enum value names to enum numbers for
|
||||
// the enum of the given name. It returns nil if not found.
|
||||
//
|
||||
// Deprecated: Use protoregistry.GlobalTypes.FindEnumByName instead.
|
||||
func EnumValueMap(s enumName) enumsByName {
|
||||
if v, ok := enumCache.Load(s); ok {
|
||||
return v.(enumsByName)
|
||||
}
|
||||
|
||||
// Check whether the cache is stale. If the number of files in the current
|
||||
// package differs, then it means that some enums may have been recently
|
||||
// registered upstream that we do not know about.
|
||||
var protoPkg protoreflect.FullName
|
||||
if i := strings.LastIndexByte(s, '.'); i >= 0 {
|
||||
protoPkg = protoreflect.FullName(s[:i])
|
||||
}
|
||||
v, _ := numFilesCache.Load(protoPkg)
|
||||
numFiles, _ := v.(int)
|
||||
if protoregistry.GlobalFiles.NumFilesByPackage(protoPkg) == numFiles {
|
||||
return nil // cache is up-to-date; was not found earlier
|
||||
}
|
||||
|
||||
// Update the enum cache for all enums declared in the given proto package.
|
||||
numFiles = 0
|
||||
protoregistry.GlobalFiles.RangeFilesByPackage(protoPkg, func(fd protoreflect.FileDescriptor) bool {
|
||||
walkEnums(fd, func(ed protoreflect.EnumDescriptor) {
|
||||
name := protoimpl.X.LegacyEnumName(ed)
|
||||
if _, ok := enumCache.Load(name); !ok {
|
||||
m := make(enumsByName)
|
||||
evs := ed.Values()
|
||||
for i := evs.Len() - 1; i >= 0; i-- {
|
||||
ev := evs.Get(i)
|
||||
m[string(ev.Name())] = int32(ev.Number())
|
||||
}
|
||||
enumCache.LoadOrStore(name, m)
|
||||
}
|
||||
})
|
||||
numFiles++
|
||||
return true
|
||||
})
|
||||
numFilesCache.Store(protoPkg, numFiles)
|
||||
|
||||
// Check cache again for enum map.
|
||||
if v, ok := enumCache.Load(s); ok {
|
||||
return v.(enumsByName)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// walkEnums recursively walks all enums declared in d.
|
||||
func walkEnums(d interface {
|
||||
Enums() protoreflect.EnumDescriptors
|
||||
Messages() protoreflect.MessageDescriptors
|
||||
}, f func(protoreflect.EnumDescriptor)) {
|
||||
eds := d.Enums()
|
||||
for i := eds.Len() - 1; i >= 0; i-- {
|
||||
f(eds.Get(i))
|
||||
}
|
||||
mds := d.Messages()
|
||||
for i := mds.Len() - 1; i >= 0; i-- {
|
||||
walkEnums(mds.Get(i), f)
|
||||
}
|
||||
}
|
||||
|
||||
// messageName is the full name of protobuf message.
|
||||
type messageName = string
|
||||
|
||||
var messageTypeCache sync.Map // map[messageName]reflect.Type
|
||||
|
||||
// RegisterType is called from generated code to register the message Go type
|
||||
// for a message of the given name.
|
||||
//
|
||||
// Deprecated: Use protoregistry.GlobalTypes.RegisterMessage instead.
|
||||
func RegisterType(m Message, s messageName) {
|
||||
mt := protoimpl.X.LegacyMessageTypeOf(m, protoreflect.FullName(s))
|
||||
if err := protoregistry.GlobalTypes.RegisterMessage(mt); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
messageTypeCache.Store(s, reflect.TypeOf(m))
|
||||
}
|
||||
|
||||
// RegisterMapType is called from generated code to register the Go map type
|
||||
// for a protobuf message representing a map entry.
|
||||
//
|
||||
// Deprecated: Do not use.
|
||||
func RegisterMapType(m interface{}, s messageName) {
|
||||
t := reflect.TypeOf(m)
|
||||
if t.Kind() != reflect.Map {
|
||||
panic(fmt.Sprintf("invalid map kind: %v", t))
|
||||
}
|
||||
if _, ok := messageTypeCache.Load(s); ok {
|
||||
panic(fmt.Errorf("proto: duplicate proto message registered: %s", s))
|
||||
}
|
||||
messageTypeCache.Store(s, t)
|
||||
}
|
||||
|
||||
// MessageType returns the message type for a named message.
|
||||
// It returns nil if not found.
|
||||
//
|
||||
// Deprecated: Use protoregistry.GlobalTypes.FindMessageByName instead.
|
||||
func MessageType(s messageName) reflect.Type {
|
||||
if v, ok := messageTypeCache.Load(s); ok {
|
||||
return v.(reflect.Type)
|
||||
}
|
||||
|
||||
// Derive the message type from the v2 registry.
|
||||
var t reflect.Type
|
||||
if mt, _ := protoregistry.GlobalTypes.FindMessageByName(protoreflect.FullName(s)); mt != nil {
|
||||
t = messageGoType(mt)
|
||||
}
|
||||
|
||||
// If we could not get a concrete type, it is possible that it is a
|
||||
// pseudo-message for a map entry.
|
||||
if t == nil {
|
||||
d, _ := protoregistry.GlobalFiles.FindDescriptorByName(protoreflect.FullName(s))
|
||||
if md, _ := d.(protoreflect.MessageDescriptor); md != nil && md.IsMapEntry() {
|
||||
kt := goTypeForField(md.Fields().ByNumber(1))
|
||||
vt := goTypeForField(md.Fields().ByNumber(2))
|
||||
t = reflect.MapOf(kt, vt)
|
||||
}
|
||||
}
|
||||
|
||||
// Locally cache the message type for the given name.
|
||||
if t != nil {
|
||||
v, _ := messageTypeCache.LoadOrStore(s, t)
|
||||
return v.(reflect.Type)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func goTypeForField(fd protoreflect.FieldDescriptor) reflect.Type {
|
||||
switch k := fd.Kind(); k {
|
||||
case protoreflect.EnumKind:
|
||||
if et, _ := protoregistry.GlobalTypes.FindEnumByName(fd.Enum().FullName()); et != nil {
|
||||
return enumGoType(et)
|
||||
}
|
||||
return reflect.TypeOf(protoreflect.EnumNumber(0))
|
||||
case protoreflect.MessageKind, protoreflect.GroupKind:
|
||||
if mt, _ := protoregistry.GlobalTypes.FindMessageByName(fd.Message().FullName()); mt != nil {
|
||||
return messageGoType(mt)
|
||||
}
|
||||
return reflect.TypeOf((*protoreflect.Message)(nil)).Elem()
|
||||
default:
|
||||
return reflect.TypeOf(fd.Default().Interface())
|
||||
}
|
||||
}
|
||||
|
||||
func enumGoType(et protoreflect.EnumType) reflect.Type {
|
||||
return reflect.TypeOf(et.New(0))
|
||||
}
|
||||
|
||||
func messageGoType(mt protoreflect.MessageType) reflect.Type {
|
||||
return reflect.TypeOf(MessageV1(mt.Zero().Interface()))
|
||||
}
|
||||
|
||||
// MessageName returns the full protobuf name for the given message type.
|
||||
//
|
||||
// Deprecated: Use protoreflect.MessageDescriptor.FullName instead.
|
||||
func MessageName(m Message) messageName {
|
||||
if m == nil {
|
||||
return ""
|
||||
}
|
||||
if m, ok := m.(interface{ XXX_MessageName() messageName }); ok {
|
||||
return m.XXX_MessageName()
|
||||
}
|
||||
return messageName(protoimpl.X.MessageDescriptorOf(m).FullName())
|
||||
}
|
||||
|
||||
// RegisterExtension is called from the generated code to register
|
||||
// the extension descriptor.
|
||||
//
|
||||
// Deprecated: Use protoregistry.GlobalTypes.RegisterExtension instead.
|
||||
func RegisterExtension(d *ExtensionDesc) {
|
||||
if err := protoregistry.GlobalTypes.RegisterExtension(d); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
type extensionsByNumber = map[int32]*ExtensionDesc
|
||||
|
||||
var extensionCache sync.Map // map[messageName]extensionsByNumber
|
||||
|
||||
// RegisteredExtensions returns a map of the registered extensions for the
|
||||
// provided protobuf message, indexed by the extension field number.
|
||||
//
|
||||
// Deprecated: Use protoregistry.GlobalTypes.RangeExtensionsByMessage instead.
|
||||
func RegisteredExtensions(m Message) extensionsByNumber {
|
||||
// Check whether the cache is stale. If the number of extensions for
|
||||
// the given message differs, then it means that some extensions were
|
||||
// recently registered upstream that we do not know about.
|
||||
s := MessageName(m)
|
||||
v, _ := extensionCache.Load(s)
|
||||
xs, _ := v.(extensionsByNumber)
|
||||
if protoregistry.GlobalTypes.NumExtensionsByMessage(protoreflect.FullName(s)) == len(xs) {
|
||||
return xs // cache is up-to-date
|
||||
}
|
||||
|
||||
// Cache is stale, re-compute the extensions map.
|
||||
xs = make(extensionsByNumber)
|
||||
protoregistry.GlobalTypes.RangeExtensionsByMessage(protoreflect.FullName(s), func(xt protoreflect.ExtensionType) bool {
|
||||
if xd, ok := xt.(*ExtensionDesc); ok {
|
||||
xs[int32(xt.TypeDescriptor().Number())] = xd
|
||||
} else {
|
||||
// TODO: This implies that the protoreflect.ExtensionType is a
|
||||
// custom type not generated by protoc-gen-go. We could try and
|
||||
// convert the type to an ExtensionDesc.
|
||||
}
|
||||
return true
|
||||
})
|
||||
extensionCache.Store(s, xs)
|
||||
return xs
|
||||
}
|
2776
vendor/github.com/golang/protobuf/proto/table_marshal.go
generated
vendored
2776
vendor/github.com/golang/protobuf/proto/table_marshal.go
generated
vendored
File diff suppressed because it is too large
Load Diff
654
vendor/github.com/golang/protobuf/proto/table_merge.go
generated
vendored
654
vendor/github.com/golang/protobuf/proto/table_merge.go
generated
vendored
@ -1,654 +0,0 @@
|
||||
// Go support for Protocol Buffers - Google's data interchange format
|
||||
//
|
||||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// https://github.com/golang/protobuf
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
package proto
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
)
|
||||
|
||||
// Merge merges the src message into dst.
|
||||
// This assumes that dst and src of the same type and are non-nil.
|
||||
func (a *InternalMessageInfo) Merge(dst, src Message) {
|
||||
mi := atomicLoadMergeInfo(&a.merge)
|
||||
if mi == nil {
|
||||
mi = getMergeInfo(reflect.TypeOf(dst).Elem())
|
||||
atomicStoreMergeInfo(&a.merge, mi)
|
||||
}
|
||||
mi.merge(toPointer(&dst), toPointer(&src))
|
||||
}
|
||||
|
||||
type mergeInfo struct {
|
||||
typ reflect.Type
|
||||
|
||||
initialized int32 // 0: only typ is valid, 1: everything is valid
|
||||
lock sync.Mutex
|
||||
|
||||
fields []mergeFieldInfo
|
||||
unrecognized field // Offset of XXX_unrecognized
|
||||
}
|
||||
|
||||
type mergeFieldInfo struct {
|
||||
field field // Offset of field, guaranteed to be valid
|
||||
|
||||
// isPointer reports whether the value in the field is a pointer.
|
||||
// This is true for the following situations:
|
||||
// * Pointer to struct
|
||||
// * Pointer to basic type (proto2 only)
|
||||
// * Slice (first value in slice header is a pointer)
|
||||
// * String (first value in string header is a pointer)
|
||||
isPointer bool
|
||||
|
||||
// basicWidth reports the width of the field assuming that it is directly
|
||||
// embedded in the struct (as is the case for basic types in proto3).
|
||||
// The possible values are:
|
||||
// 0: invalid
|
||||
// 1: bool
|
||||
// 4: int32, uint32, float32
|
||||
// 8: int64, uint64, float64
|
||||
basicWidth int
|
||||
|
||||
// Where dst and src are pointers to the types being merged.
|
||||
merge func(dst, src pointer)
|
||||
}
|
||||
|
||||
var (
|
||||
mergeInfoMap = map[reflect.Type]*mergeInfo{}
|
||||
mergeInfoLock sync.Mutex
|
||||
)
|
||||
|
||||
func getMergeInfo(t reflect.Type) *mergeInfo {
|
||||
mergeInfoLock.Lock()
|
||||
defer mergeInfoLock.Unlock()
|
||||
mi := mergeInfoMap[t]
|
||||
if mi == nil {
|
||||
mi = &mergeInfo{typ: t}
|
||||
mergeInfoMap[t] = mi
|
||||
}
|
||||
return mi
|
||||
}
|
||||
|
||||
// merge merges src into dst assuming they are both of type *mi.typ.
|
||||
func (mi *mergeInfo) merge(dst, src pointer) {
|
||||
if dst.isNil() {
|
||||
panic("proto: nil destination")
|
||||
}
|
||||
if src.isNil() {
|
||||
return // Nothing to do.
|
||||
}
|
||||
|
||||
if atomic.LoadInt32(&mi.initialized) == 0 {
|
||||
mi.computeMergeInfo()
|
||||
}
|
||||
|
||||
for _, fi := range mi.fields {
|
||||
sfp := src.offset(fi.field)
|
||||
|
||||
// As an optimization, we can avoid the merge function call cost
|
||||
// if we know for sure that the source will have no effect
|
||||
// by checking if it is the zero value.
|
||||
if unsafeAllowed {
|
||||
if fi.isPointer && sfp.getPointer().isNil() { // Could be slice or string
|
||||
continue
|
||||
}
|
||||
if fi.basicWidth > 0 {
|
||||
switch {
|
||||
case fi.basicWidth == 1 && !*sfp.toBool():
|
||||
continue
|
||||
case fi.basicWidth == 4 && *sfp.toUint32() == 0:
|
||||
continue
|
||||
case fi.basicWidth == 8 && *sfp.toUint64() == 0:
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
dfp := dst.offset(fi.field)
|
||||
fi.merge(dfp, sfp)
|
||||
}
|
||||
|
||||
// TODO: Make this faster?
|
||||
out := dst.asPointerTo(mi.typ).Elem()
|
||||
in := src.asPointerTo(mi.typ).Elem()
|
||||
if emIn, err := extendable(in.Addr().Interface()); err == nil {
|
||||
emOut, _ := extendable(out.Addr().Interface())
|
||||
mIn, muIn := emIn.extensionsRead()
|
||||
if mIn != nil {
|
||||
mOut := emOut.extensionsWrite()
|
||||
muIn.Lock()
|
||||
mergeExtension(mOut, mIn)
|
||||
muIn.Unlock()
|
||||
}
|
||||
}
|
||||
|
||||
if mi.unrecognized.IsValid() {
|
||||
if b := *src.offset(mi.unrecognized).toBytes(); len(b) > 0 {
|
||||
*dst.offset(mi.unrecognized).toBytes() = append([]byte(nil), b...)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (mi *mergeInfo) computeMergeInfo() {
|
||||
mi.lock.Lock()
|
||||
defer mi.lock.Unlock()
|
||||
if mi.initialized != 0 {
|
||||
return
|
||||
}
|
||||
t := mi.typ
|
||||
n := t.NumField()
|
||||
|
||||
props := GetProperties(t)
|
||||
for i := 0; i < n; i++ {
|
||||
f := t.Field(i)
|
||||
if strings.HasPrefix(f.Name, "XXX_") {
|
||||
continue
|
||||
}
|
||||
|
||||
mfi := mergeFieldInfo{field: toField(&f)}
|
||||
tf := f.Type
|
||||
|
||||
// As an optimization, we can avoid the merge function call cost
|
||||
// if we know for sure that the source will have no effect
|
||||
// by checking if it is the zero value.
|
||||
if unsafeAllowed {
|
||||
switch tf.Kind() {
|
||||
case reflect.Ptr, reflect.Slice, reflect.String:
|
||||
// As a special case, we assume slices and strings are pointers
|
||||
// since we know that the first field in the SliceSlice or
|
||||
// StringHeader is a data pointer.
|
||||
mfi.isPointer = true
|
||||
case reflect.Bool:
|
||||
mfi.basicWidth = 1
|
||||
case reflect.Int32, reflect.Uint32, reflect.Float32:
|
||||
mfi.basicWidth = 4
|
||||
case reflect.Int64, reflect.Uint64, reflect.Float64:
|
||||
mfi.basicWidth = 8
|
||||
}
|
||||
}
|
||||
|
||||
// Unwrap tf to get at its most basic type.
|
||||
var isPointer, isSlice bool
|
||||
if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 {
|
||||
isSlice = true
|
||||
tf = tf.Elem()
|
||||
}
|
||||
if tf.Kind() == reflect.Ptr {
|
||||
isPointer = true
|
||||
tf = tf.Elem()
|
||||
}
|
||||
if isPointer && isSlice && tf.Kind() != reflect.Struct {
|
||||
panic("both pointer and slice for basic type in " + tf.Name())
|
||||
}
|
||||
|
||||
switch tf.Kind() {
|
||||
case reflect.Int32:
|
||||
switch {
|
||||
case isSlice: // E.g., []int32
|
||||
mfi.merge = func(dst, src pointer) {
|
||||
// NOTE: toInt32Slice is not defined (see pointer_reflect.go).
|
||||
/*
|
||||
sfsp := src.toInt32Slice()
|
||||
if *sfsp != nil {
|
||||
dfsp := dst.toInt32Slice()
|
||||
*dfsp = append(*dfsp, *sfsp...)
|
||||
if *dfsp == nil {
|
||||
*dfsp = []int64{}
|
||||
}
|
||||
}
|
||||
*/
|
||||
sfs := src.getInt32Slice()
|
||||
if sfs != nil {
|
||||
dfs := dst.getInt32Slice()
|
||||
dfs = append(dfs, sfs...)
|
||||
if dfs == nil {
|
||||
dfs = []int32{}
|
||||
}
|
||||
dst.setInt32Slice(dfs)
|
||||
}
|
||||
}
|
||||
case isPointer: // E.g., *int32
|
||||
mfi.merge = func(dst, src pointer) {
|
||||
// NOTE: toInt32Ptr is not defined (see pointer_reflect.go).
|
||||
/*
|
||||
sfpp := src.toInt32Ptr()
|
||||
if *sfpp != nil {
|
||||
dfpp := dst.toInt32Ptr()
|
||||
if *dfpp == nil {
|
||||
*dfpp = Int32(**sfpp)
|
||||
} else {
|
||||
**dfpp = **sfpp
|
||||
}
|
||||
}
|
||||
*/
|
||||
sfp := src.getInt32Ptr()
|
||||
if sfp != nil {
|
||||
dfp := dst.getInt32Ptr()
|
||||
if dfp == nil {
|
||||
dst.setInt32Ptr(*sfp)
|
||||
} else {
|
||||
*dfp = *sfp
|
||||
}
|
||||
}
|
||||
}
|
||||
default: // E.g., int32
|
||||
mfi.merge = func(dst, src pointer) {
|
||||
if v := *src.toInt32(); v != 0 {
|
||||
*dst.toInt32() = v
|
||||
}
|
||||
}
|
||||
}
|
||||
case reflect.Int64:
|
||||
switch {
|
||||
case isSlice: // E.g., []int64
|
||||
mfi.merge = func(dst, src pointer) {
|
||||
sfsp := src.toInt64Slice()
|
||||
if *sfsp != nil {
|
||||
dfsp := dst.toInt64Slice()
|
||||
*dfsp = append(*dfsp, *sfsp...)
|
||||
if *dfsp == nil {
|
||||
*dfsp = []int64{}
|
||||
}
|
||||
}
|
||||
}
|
||||
case isPointer: // E.g., *int64
|
||||
mfi.merge = func(dst, src pointer) {
|
||||
sfpp := src.toInt64Ptr()
|
||||
if *sfpp != nil {
|
||||
dfpp := dst.toInt64Ptr()
|
||||
if *dfpp == nil {
|
||||
*dfpp = Int64(**sfpp)
|
||||
} else {
|
||||
**dfpp = **sfpp
|
||||
}
|
||||
}
|
||||
}
|
||||
default: // E.g., int64
|
||||
mfi.merge = func(dst, src pointer) {
|
||||
if v := *src.toInt64(); v != 0 {
|
||||
*dst.toInt64() = v
|
||||
}
|
||||
}
|
||||
}
|
||||
case reflect.Uint32:
|
||||
switch {
|
||||
case isSlice: // E.g., []uint32
|
||||
mfi.merge = func(dst, src pointer) {
|
||||
sfsp := src.toUint32Slice()
|
||||
if *sfsp != nil {
|
||||
dfsp := dst.toUint32Slice()
|
||||
*dfsp = append(*dfsp, *sfsp...)
|
||||
if *dfsp == nil {
|
||||
*dfsp = []uint32{}
|
||||
}
|
||||
}
|
||||
}
|
||||
case isPointer: // E.g., *uint32
|
||||
mfi.merge = func(dst, src pointer) {
|
||||
sfpp := src.toUint32Ptr()
|
||||
if *sfpp != nil {
|
||||
dfpp := dst.toUint32Ptr()
|
||||
if *dfpp == nil {
|
||||
*dfpp = Uint32(**sfpp)
|
||||
} else {
|
||||
**dfpp = **sfpp
|
||||
}
|
||||
}
|
||||
}
|
||||
default: // E.g., uint32
|
||||
mfi.merge = func(dst, src pointer) {
|
||||
if v := *src.toUint32(); v != 0 {
|
||||
*dst.toUint32() = v
|
||||
}
|
||||
}
|
||||
}
|
||||
case reflect.Uint64:
|
||||
switch {
|
||||
case isSlice: // E.g., []uint64
|
||||
mfi.merge = func(dst, src pointer) {
|
||||
sfsp := src.toUint64Slice()
|
||||
if *sfsp != nil {
|
||||
dfsp := dst.toUint64Slice()
|
||||
*dfsp = append(*dfsp, *sfsp...)
|
||||
if *dfsp == nil {
|
||||
*dfsp = []uint64{}
|
||||
}
|
||||
}
|
||||
}
|
||||
case isPointer: // E.g., *uint64
|
||||
mfi.merge = func(dst, src pointer) {
|
||||
sfpp := src.toUint64Ptr()
|
||||
if *sfpp != nil {
|
||||
dfpp := dst.toUint64Ptr()
|
||||
if *dfpp == nil {
|
||||
*dfpp = Uint64(**sfpp)
|
||||
} else {
|
||||
**dfpp = **sfpp
|
||||
}
|
||||
}
|
||||
}
|
||||
default: // E.g., uint64
|
||||
mfi.merge = func(dst, src pointer) {
|
||||
if v := *src.toUint64(); v != 0 {
|
||||
*dst.toUint64() = v
|
||||
}
|
||||
}
|
||||
}
|
||||
case reflect.Float32:
|
||||
switch {
|
||||
case isSlice: // E.g., []float32
|
||||
mfi.merge = func(dst, src pointer) {
|
||||
sfsp := src.toFloat32Slice()
|
||||
if *sfsp != nil {
|
||||
dfsp := dst.toFloat32Slice()
|
||||
*dfsp = append(*dfsp, *sfsp...)
|
||||
if *dfsp == nil {
|
||||
*dfsp = []float32{}
|
||||
}
|
||||
}
|
||||
}
|
||||
case isPointer: // E.g., *float32
|
||||
mfi.merge = func(dst, src pointer) {
|
||||
sfpp := src.toFloat32Ptr()
|
||||
if *sfpp != nil {
|
||||
dfpp := dst.toFloat32Ptr()
|
||||
if *dfpp == nil {
|
||||
*dfpp = Float32(**sfpp)
|
||||
} else {
|
||||
**dfpp = **sfpp
|
||||
}
|
||||
}
|
||||
}
|
||||
default: // E.g., float32
|
||||
mfi.merge = func(dst, src pointer) {
|
||||
if v := *src.toFloat32(); v != 0 {
|
||||
*dst.toFloat32() = v
|
||||
}
|
||||
}
|
||||
}
|
||||
case reflect.Float64:
|
||||
switch {
|
||||
case isSlice: // E.g., []float64
|
||||
mfi.merge = func(dst, src pointer) {
|
||||
sfsp := src.toFloat64Slice()
|
||||
if *sfsp != nil {
|
||||
dfsp := dst.toFloat64Slice()
|
||||
*dfsp = append(*dfsp, *sfsp...)
|
||||
if *dfsp == nil {
|
||||
*dfsp = []float64{}
|
||||
}
|
||||
}
|
||||
}
|
||||
case isPointer: // E.g., *float64
|
||||
mfi.merge = func(dst, src pointer) {
|
||||
sfpp := src.toFloat64Ptr()
|
||||
if *sfpp != nil {
|
||||
dfpp := dst.toFloat64Ptr()
|
||||
if *dfpp == nil {
|
||||
*dfpp = Float64(**sfpp)
|
||||
} else {
|
||||
**dfpp = **sfpp
|
||||
}
|
||||
}
|
||||
}
|
||||
default: // E.g., float64
|
||||
mfi.merge = func(dst, src pointer) {
|
||||
if v := *src.toFloat64(); v != 0 {
|
||||
*dst.toFloat64() = v
|
||||
}
|
||||
}
|
||||
}
|
||||
case reflect.Bool:
|
||||
switch {
|
||||
case isSlice: // E.g., []bool
|
||||
mfi.merge = func(dst, src pointer) {
|
||||
sfsp := src.toBoolSlice()
|
||||
if *sfsp != nil {
|
||||
dfsp := dst.toBoolSlice()
|
||||
*dfsp = append(*dfsp, *sfsp...)
|
||||
if *dfsp == nil {
|
||||
*dfsp = []bool{}
|
||||
}
|
||||
}
|
||||
}
|
||||
case isPointer: // E.g., *bool
|
||||
mfi.merge = func(dst, src pointer) {
|
||||
sfpp := src.toBoolPtr()
|
||||
if *sfpp != nil {
|
||||
dfpp := dst.toBoolPtr()
|
||||
if *dfpp == nil {
|
||||
*dfpp = Bool(**sfpp)
|
||||
} else {
|
||||
**dfpp = **sfpp
|
||||
}
|
||||
}
|
||||
}
|
||||
default: // E.g., bool
|
||||
mfi.merge = func(dst, src pointer) {
|
||||
if v := *src.toBool(); v {
|
||||
*dst.toBool() = v
|
||||
}
|
||||
}
|
||||
}
|
||||
case reflect.String:
|
||||
switch {
|
||||
case isSlice: // E.g., []string
|
||||
mfi.merge = func(dst, src pointer) {
|
||||
sfsp := src.toStringSlice()
|
||||
if *sfsp != nil {
|
||||
dfsp := dst.toStringSlice()
|
||||
*dfsp = append(*dfsp, *sfsp...)
|
||||
if *dfsp == nil {
|
||||
*dfsp = []string{}
|
||||
}
|
||||
}
|
||||
}
|
||||
case isPointer: // E.g., *string
|
||||
mfi.merge = func(dst, src pointer) {
|
||||
sfpp := src.toStringPtr()
|
||||
if *sfpp != nil {
|
||||
dfpp := dst.toStringPtr()
|
||||
if *dfpp == nil {
|
||||
*dfpp = String(**sfpp)
|
||||
} else {
|
||||
**dfpp = **sfpp
|
||||
}
|
||||
}
|
||||
}
|
||||
default: // E.g., string
|
||||
mfi.merge = func(dst, src pointer) {
|
||||
if v := *src.toString(); v != "" {
|
||||
*dst.toString() = v
|
||||
}
|
||||
}
|
||||
}
|
||||
case reflect.Slice:
|
||||
isProto3 := props.Prop[i].proto3
|
||||
switch {
|
||||
case isPointer:
|
||||
panic("bad pointer in byte slice case in " + tf.Name())
|
||||
case tf.Elem().Kind() != reflect.Uint8:
|
||||
panic("bad element kind in byte slice case in " + tf.Name())
|
||||
case isSlice: // E.g., [][]byte
|
||||
mfi.merge = func(dst, src pointer) {
|
||||
sbsp := src.toBytesSlice()
|
||||
if *sbsp != nil {
|
||||
dbsp := dst.toBytesSlice()
|
||||
for _, sb := range *sbsp {
|
||||
if sb == nil {
|
||||
*dbsp = append(*dbsp, nil)
|
||||
} else {
|
||||
*dbsp = append(*dbsp, append([]byte{}, sb...))
|
||||
}
|
||||
}
|
||||
if *dbsp == nil {
|
||||
*dbsp = [][]byte{}
|
||||
}
|
||||
}
|
||||
}
|
||||
default: // E.g., []byte
|
||||
mfi.merge = func(dst, src pointer) {
|
||||
sbp := src.toBytes()
|
||||
if *sbp != nil {
|
||||
dbp := dst.toBytes()
|
||||
if !isProto3 || len(*sbp) > 0 {
|
||||
*dbp = append([]byte{}, *sbp...)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
case reflect.Struct:
|
||||
switch {
|
||||
case !isPointer:
|
||||
panic(fmt.Sprintf("message field %s without pointer", tf))
|
||||
case isSlice: // E.g., []*pb.T
|
||||
mi := getMergeInfo(tf)
|
||||
mfi.merge = func(dst, src pointer) {
|
||||
sps := src.getPointerSlice()
|
||||
if sps != nil {
|
||||
dps := dst.getPointerSlice()
|
||||
for _, sp := range sps {
|
||||
var dp pointer
|
||||
if !sp.isNil() {
|
||||
dp = valToPointer(reflect.New(tf))
|
||||
mi.merge(dp, sp)
|
||||
}
|
||||
dps = append(dps, dp)
|
||||
}
|
||||
if dps == nil {
|
||||
dps = []pointer{}
|
||||
}
|
||||
dst.setPointerSlice(dps)
|
||||
}
|
||||
}
|
||||
default: // E.g., *pb.T
|
||||
mi := getMergeInfo(tf)
|
||||
mfi.merge = func(dst, src pointer) {
|
||||
sp := src.getPointer()
|
||||
if !sp.isNil() {
|
||||
dp := dst.getPointer()
|
||||
if dp.isNil() {
|
||||
dp = valToPointer(reflect.New(tf))
|
||||
dst.setPointer(dp)
|
||||
}
|
||||
mi.merge(dp, sp)
|
||||
}
|
||||
}
|
||||
}
|
||||
case reflect.Map:
|
||||
switch {
|
||||
case isPointer || isSlice:
|
||||
panic("bad pointer or slice in map case in " + tf.Name())
|
||||
default: // E.g., map[K]V
|
||||
mfi.merge = func(dst, src pointer) {
|
||||
sm := src.asPointerTo(tf).Elem()
|
||||
if sm.Len() == 0 {
|
||||
return
|
||||
}
|
||||
dm := dst.asPointerTo(tf).Elem()
|
||||
if dm.IsNil() {
|
||||
dm.Set(reflect.MakeMap(tf))
|
||||
}
|
||||
|
||||
switch tf.Elem().Kind() {
|
||||
case reflect.Ptr: // Proto struct (e.g., *T)
|
||||
for _, key := range sm.MapKeys() {
|
||||
val := sm.MapIndex(key)
|
||||
val = reflect.ValueOf(Clone(val.Interface().(Message)))
|
||||
dm.SetMapIndex(key, val)
|
||||
}
|
||||
case reflect.Slice: // E.g. Bytes type (e.g., []byte)
|
||||
for _, key := range sm.MapKeys() {
|
||||
val := sm.MapIndex(key)
|
||||
val = reflect.ValueOf(append([]byte{}, val.Bytes()...))
|
||||
dm.SetMapIndex(key, val)
|
||||
}
|
||||
default: // Basic type (e.g., string)
|
||||
for _, key := range sm.MapKeys() {
|
||||
val := sm.MapIndex(key)
|
||||
dm.SetMapIndex(key, val)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
case reflect.Interface:
|
||||
// Must be oneof field.
|
||||
switch {
|
||||
case isPointer || isSlice:
|
||||
panic("bad pointer or slice in interface case in " + tf.Name())
|
||||
default: // E.g., interface{}
|
||||
// TODO: Make this faster?
|
||||
mfi.merge = func(dst, src pointer) {
|
||||
su := src.asPointerTo(tf).Elem()
|
||||
if !su.IsNil() {
|
||||
du := dst.asPointerTo(tf).Elem()
|
||||
typ := su.Elem().Type()
|
||||
if du.IsNil() || du.Elem().Type() != typ {
|
||||
du.Set(reflect.New(typ.Elem())) // Initialize interface if empty
|
||||
}
|
||||
sv := su.Elem().Elem().Field(0)
|
||||
if sv.Kind() == reflect.Ptr && sv.IsNil() {
|
||||
return
|
||||
}
|
||||
dv := du.Elem().Elem().Field(0)
|
||||
if dv.Kind() == reflect.Ptr && dv.IsNil() {
|
||||
dv.Set(reflect.New(sv.Type().Elem())) // Initialize proto message if empty
|
||||
}
|
||||
switch sv.Type().Kind() {
|
||||
case reflect.Ptr: // Proto struct (e.g., *T)
|
||||
Merge(dv.Interface().(Message), sv.Interface().(Message))
|
||||
case reflect.Slice: // E.g. Bytes type (e.g., []byte)
|
||||
dv.Set(reflect.ValueOf(append([]byte{}, sv.Bytes()...)))
|
||||
default: // Basic type (e.g., string)
|
||||
dv.Set(sv)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
default:
|
||||
panic(fmt.Sprintf("merger not found for type:%s", tf))
|
||||
}
|
||||
mi.fields = append(mi.fields, mfi)
|
||||
}
|
||||
|
||||
mi.unrecognized = invalidField
|
||||
if f, ok := t.FieldByName("XXX_unrecognized"); ok {
|
||||
if f.Type != reflect.TypeOf([]byte{}) {
|
||||
panic("expected XXX_unrecognized to be of type []byte")
|
||||
}
|
||||
mi.unrecognized = toField(&f)
|
||||
}
|
||||
|
||||
atomic.StoreInt32(&mi.initialized, 1)
|
||||
}
|
2053
vendor/github.com/golang/protobuf/proto/table_unmarshal.go
generated
vendored
2053
vendor/github.com/golang/protobuf/proto/table_unmarshal.go
generated
vendored
File diff suppressed because it is too large
Load Diff
843
vendor/github.com/golang/protobuf/proto/text.go
generated
vendored
843
vendor/github.com/golang/protobuf/proto/text.go
generated
vendored
@ -1,843 +0,0 @@
|
||||
// Go support for Protocol Buffers - Google's data interchange format
|
||||
//
|
||||
// Copyright 2010 The Go Authors. All rights reserved.
|
||||
// https://github.com/golang/protobuf
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
package proto
|
||||
|
||||
// Functions for writing the text protocol buffer format.
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"encoding"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"math"
|
||||
"reflect"
|
||||
"sort"
|
||||
"strings"
|
||||
)
|
||||
|
||||
var (
|
||||
newline = []byte("\n")
|
||||
spaces = []byte(" ")
|
||||
endBraceNewline = []byte("}\n")
|
||||
backslashN = []byte{'\\', 'n'}
|
||||
backslashR = []byte{'\\', 'r'}
|
||||
backslashT = []byte{'\\', 't'}
|
||||
backslashDQ = []byte{'\\', '"'}
|
||||
backslashBS = []byte{'\\', '\\'}
|
||||
posInf = []byte("inf")
|
||||
negInf = []byte("-inf")
|
||||
nan = []byte("nan")
|
||||
)
|
||||
|
||||
type writer interface {
|
||||
io.Writer
|
||||
WriteByte(byte) error
|
||||
}
|
||||
|
||||
// textWriter is an io.Writer that tracks its indentation level.
|
||||
type textWriter struct {
|
||||
ind int
|
||||
complete bool // if the current position is a complete line
|
||||
compact bool // whether to write out as a one-liner
|
||||
w writer
|
||||
}
|
||||
|
||||
func (w *textWriter) WriteString(s string) (n int, err error) {
|
||||
if !strings.Contains(s, "\n") {
|
||||
if !w.compact && w.complete {
|
||||
w.writeIndent()
|
||||
}
|
||||
w.complete = false
|
||||
return io.WriteString(w.w, s)
|
||||
}
|
||||
// WriteString is typically called without newlines, so this
|
||||
// codepath and its copy are rare. We copy to avoid
|
||||
// duplicating all of Write's logic here.
|
||||
return w.Write([]byte(s))
|
||||
}
|
||||
|
||||
func (w *textWriter) Write(p []byte) (n int, err error) {
|
||||
newlines := bytes.Count(p, newline)
|
||||
if newlines == 0 {
|
||||
if !w.compact && w.complete {
|
||||
w.writeIndent()
|
||||
}
|
||||
n, err = w.w.Write(p)
|
||||
w.complete = false
|
||||
return n, err
|
||||
}
|
||||
|
||||
frags := bytes.SplitN(p, newline, newlines+1)
|
||||
if w.compact {
|
||||
for i, frag := range frags {
|
||||
if i > 0 {
|
||||
if err := w.w.WriteByte(' '); err != nil {
|
||||
return n, err
|
||||
}
|
||||
n++
|
||||
}
|
||||
nn, err := w.w.Write(frag)
|
||||
n += nn
|
||||
if err != nil {
|
||||
return n, err
|
||||
}
|
||||
}
|
||||
return n, nil
|
||||
}
|
||||
|
||||
for i, frag := range frags {
|
||||
if w.complete {
|
||||
w.writeIndent()
|
||||
}
|
||||
nn, err := w.w.Write(frag)
|
||||
n += nn
|
||||
if err != nil {
|
||||
return n, err
|
||||
}
|
||||
if i+1 < len(frags) {
|
||||
if err := w.w.WriteByte('\n'); err != nil {
|
||||
return n, err
|
||||
}
|
||||
n++
|
||||
}
|
||||
}
|
||||
w.complete = len(frags[len(frags)-1]) == 0
|
||||
return n, nil
|
||||
}
|
||||
|
||||
func (w *textWriter) WriteByte(c byte) error {
|
||||
if w.compact && c == '\n' {
|
||||
c = ' '
|
||||
}
|
||||
if !w.compact && w.complete {
|
||||
w.writeIndent()
|
||||
}
|
||||
err := w.w.WriteByte(c)
|
||||
w.complete = c == '\n'
|
||||
return err
|
||||
}
|
||||
|
||||
func (w *textWriter) indent() { w.ind++ }
|
||||
|
||||
func (w *textWriter) unindent() {
|
||||
if w.ind == 0 {
|
||||
log.Print("proto: textWriter unindented too far")
|
||||
return
|
||||
}
|
||||
w.ind--
|
||||
}
|
||||
|
||||
func writeName(w *textWriter, props *Properties) error {
|
||||
if _, err := w.WriteString(props.OrigName); err != nil {
|
||||
return err
|
||||
}
|
||||
if props.Wire != "group" {
|
||||
return w.WriteByte(':')
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func requiresQuotes(u string) bool {
|
||||
// When type URL contains any characters except [0-9A-Za-z./\-]*, it must be quoted.
|
||||
for _, ch := range u {
|
||||
switch {
|
||||
case ch == '.' || ch == '/' || ch == '_':
|
||||
continue
|
||||
case '0' <= ch && ch <= '9':
|
||||
continue
|
||||
case 'A' <= ch && ch <= 'Z':
|
||||
continue
|
||||
case 'a' <= ch && ch <= 'z':
|
||||
continue
|
||||
default:
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// isAny reports whether sv is a google.protobuf.Any message
|
||||
func isAny(sv reflect.Value) bool {
|
||||
type wkt interface {
|
||||
XXX_WellKnownType() string
|
||||
}
|
||||
t, ok := sv.Addr().Interface().(wkt)
|
||||
return ok && t.XXX_WellKnownType() == "Any"
|
||||
}
|
||||
|
||||
// writeProto3Any writes an expanded google.protobuf.Any message.
|
||||
//
|
||||
// It returns (false, nil) if sv value can't be unmarshaled (e.g. because
|
||||
// required messages are not linked in).
|
||||
//
|
||||
// It returns (true, error) when sv was written in expanded format or an error
|
||||
// was encountered.
|
||||
func (tm *TextMarshaler) writeProto3Any(w *textWriter, sv reflect.Value) (bool, error) {
|
||||
turl := sv.FieldByName("TypeUrl")
|
||||
val := sv.FieldByName("Value")
|
||||
if !turl.IsValid() || !val.IsValid() {
|
||||
return true, errors.New("proto: invalid google.protobuf.Any message")
|
||||
}
|
||||
|
||||
b, ok := val.Interface().([]byte)
|
||||
if !ok {
|
||||
return true, errors.New("proto: invalid google.protobuf.Any message")
|
||||
}
|
||||
|
||||
parts := strings.Split(turl.String(), "/")
|
||||
mt := MessageType(parts[len(parts)-1])
|
||||
if mt == nil {
|
||||
return false, nil
|
||||
}
|
||||
m := reflect.New(mt.Elem())
|
||||
if err := Unmarshal(b, m.Interface().(Message)); err != nil {
|
||||
return false, nil
|
||||
}
|
||||
w.Write([]byte("["))
|
||||
u := turl.String()
|
||||
if requiresQuotes(u) {
|
||||
writeString(w, u)
|
||||
} else {
|
||||
w.Write([]byte(u))
|
||||
}
|
||||
if w.compact {
|
||||
w.Write([]byte("]:<"))
|
||||
} else {
|
||||
w.Write([]byte("]: <\n"))
|
||||
w.ind++
|
||||
}
|
||||
if err := tm.writeStruct(w, m.Elem()); err != nil {
|
||||
return true, err
|
||||
}
|
||||
if w.compact {
|
||||
w.Write([]byte("> "))
|
||||
} else {
|
||||
w.ind--
|
||||
w.Write([]byte(">\n"))
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error {
|
||||
if tm.ExpandAny && isAny(sv) {
|
||||
if canExpand, err := tm.writeProto3Any(w, sv); canExpand {
|
||||
return err
|
||||
}
|
||||
}
|
||||
st := sv.Type()
|
||||
sprops := GetProperties(st)
|
||||
for i := 0; i < sv.NumField(); i++ {
|
||||
fv := sv.Field(i)
|
||||
props := sprops.Prop[i]
|
||||
name := st.Field(i).Name
|
||||
|
||||
if name == "XXX_NoUnkeyedLiteral" {
|
||||
continue
|
||||
}
|
||||
|
||||
if strings.HasPrefix(name, "XXX_") {
|
||||
// There are two XXX_ fields:
|
||||
// XXX_unrecognized []byte
|
||||
// XXX_extensions map[int32]proto.Extension
|
||||
// The first is handled here;
|
||||
// the second is handled at the bottom of this function.
|
||||
if name == "XXX_unrecognized" && !fv.IsNil() {
|
||||
if err := writeUnknownStruct(w, fv.Interface().([]byte)); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
continue
|
||||
}
|
||||
if fv.Kind() == reflect.Ptr && fv.IsNil() {
|
||||
// Field not filled in. This could be an optional field or
|
||||
// a required field that wasn't filled in. Either way, there
|
||||
// isn't anything we can show for it.
|
||||
continue
|
||||
}
|
||||
if fv.Kind() == reflect.Slice && fv.IsNil() {
|
||||
// Repeated field that is empty, or a bytes field that is unused.
|
||||
continue
|
||||
}
|
||||
|
||||
if props.Repeated && fv.Kind() == reflect.Slice {
|
||||
// Repeated field.
|
||||
for j := 0; j < fv.Len(); j++ {
|
||||
if err := writeName(w, props); err != nil {
|
||||
return err
|
||||
}
|
||||
if !w.compact {
|
||||
if err := w.WriteByte(' '); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
v := fv.Index(j)
|
||||
if v.Kind() == reflect.Ptr && v.IsNil() {
|
||||
// A nil message in a repeated field is not valid,
|
||||
// but we can handle that more gracefully than panicking.
|
||||
if _, err := w.Write([]byte("<nil>\n")); err != nil {
|
||||
return err
|
||||
}
|
||||
continue
|
||||
}
|
||||
if err := tm.writeAny(w, v, props); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := w.WriteByte('\n'); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
continue
|
||||
}
|
||||
if fv.Kind() == reflect.Map {
|
||||
// Map fields are rendered as a repeated struct with key/value fields.
|
||||
keys := fv.MapKeys()
|
||||
sort.Sort(mapKeys(keys))
|
||||
for _, key := range keys {
|
||||
val := fv.MapIndex(key)
|
||||
if err := writeName(w, props); err != nil {
|
||||
return err
|
||||
}
|
||||
if !w.compact {
|
||||
if err := w.WriteByte(' '); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
// open struct
|
||||
if err := w.WriteByte('<'); err != nil {
|
||||
return err
|
||||
}
|
||||
if !w.compact {
|
||||
if err := w.WriteByte('\n'); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
w.indent()
|
||||
// key
|
||||
if _, err := w.WriteString("key:"); err != nil {
|
||||
return err
|
||||
}
|
||||
if !w.compact {
|
||||
if err := w.WriteByte(' '); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if err := tm.writeAny(w, key, props.MapKeyProp); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := w.WriteByte('\n'); err != nil {
|
||||
return err
|
||||
}
|
||||
// nil values aren't legal, but we can avoid panicking because of them.
|
||||
if val.Kind() != reflect.Ptr || !val.IsNil() {
|
||||
// value
|
||||
if _, err := w.WriteString("value:"); err != nil {
|
||||
return err
|
||||
}
|
||||
if !w.compact {
|
||||
if err := w.WriteByte(' '); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if err := tm.writeAny(w, val, props.MapValProp); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := w.WriteByte('\n'); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
// close struct
|
||||
w.unindent()
|
||||
if err := w.WriteByte('>'); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := w.WriteByte('\n'); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
continue
|
||||
}
|
||||
if props.proto3 && fv.Kind() == reflect.Slice && fv.Len() == 0 {
|
||||
// empty bytes field
|
||||
continue
|
||||
}
|
||||
if fv.Kind() != reflect.Ptr && fv.Kind() != reflect.Slice {
|
||||
// proto3 non-repeated scalar field; skip if zero value
|
||||
if isProto3Zero(fv) {
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
if fv.Kind() == reflect.Interface {
|
||||
// Check if it is a oneof.
|
||||
if st.Field(i).Tag.Get("protobuf_oneof") != "" {
|
||||
// fv is nil, or holds a pointer to generated struct.
|
||||
// That generated struct has exactly one field,
|
||||
// which has a protobuf struct tag.
|
||||
if fv.IsNil() {
|
||||
continue
|
||||
}
|
||||
inner := fv.Elem().Elem() // interface -> *T -> T
|
||||
tag := inner.Type().Field(0).Tag.Get("protobuf")
|
||||
props = new(Properties) // Overwrite the outer props var, but not its pointee.
|
||||
props.Parse(tag)
|
||||
// Write the value in the oneof, not the oneof itself.
|
||||
fv = inner.Field(0)
|
||||
|
||||
// Special case to cope with malformed messages gracefully:
|
||||
// If the value in the oneof is a nil pointer, don't panic
|
||||
// in writeAny.
|
||||
if fv.Kind() == reflect.Ptr && fv.IsNil() {
|
||||
// Use errors.New so writeAny won't render quotes.
|
||||
msg := errors.New("/* nil */")
|
||||
fv = reflect.ValueOf(&msg).Elem()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if err := writeName(w, props); err != nil {
|
||||
return err
|
||||
}
|
||||
if !w.compact {
|
||||
if err := w.WriteByte(' '); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Enums have a String method, so writeAny will work fine.
|
||||
if err := tm.writeAny(w, fv, props); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := w.WriteByte('\n'); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Extensions (the XXX_extensions field).
|
||||
pv := sv.Addr()
|
||||
if _, err := extendable(pv.Interface()); err == nil {
|
||||
if err := tm.writeExtensions(w, pv); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// writeAny writes an arbitrary field.
|
||||
func (tm *TextMarshaler) writeAny(w *textWriter, v reflect.Value, props *Properties) error {
|
||||
v = reflect.Indirect(v)
|
||||
|
||||
// Floats have special cases.
|
||||
if v.Kind() == reflect.Float32 || v.Kind() == reflect.Float64 {
|
||||
x := v.Float()
|
||||
var b []byte
|
||||
switch {
|
||||
case math.IsInf(x, 1):
|
||||
b = posInf
|
||||
case math.IsInf(x, -1):
|
||||
b = negInf
|
||||
case math.IsNaN(x):
|
||||
b = nan
|
||||
}
|
||||
if b != nil {
|
||||
_, err := w.Write(b)
|
||||
return err
|
||||
}
|
||||
// Other values are handled below.
|
||||
}
|
||||
|
||||
// We don't attempt to serialise every possible value type; only those
|
||||
// that can occur in protocol buffers.
|
||||
switch v.Kind() {
|
||||
case reflect.Slice:
|
||||
// Should only be a []byte; repeated fields are handled in writeStruct.
|
||||
if err := writeString(w, string(v.Bytes())); err != nil {
|
||||
return err
|
||||
}
|
||||
case reflect.String:
|
||||
if err := writeString(w, v.String()); err != nil {
|
||||
return err
|
||||
}
|
||||
case reflect.Struct:
|
||||
// Required/optional group/message.
|
||||
var bra, ket byte = '<', '>'
|
||||
if props != nil && props.Wire == "group" {
|
||||
bra, ket = '{', '}'
|
||||
}
|
||||
if err := w.WriteByte(bra); err != nil {
|
||||
return err
|
||||
}
|
||||
if !w.compact {
|
||||
if err := w.WriteByte('\n'); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
w.indent()
|
||||
if v.CanAddr() {
|
||||
// Calling v.Interface on a struct causes the reflect package to
|
||||
// copy the entire struct. This is racy with the new Marshaler
|
||||
// since we atomically update the XXX_sizecache.
|
||||
//
|
||||
// Thus, we retrieve a pointer to the struct if possible to avoid
|
||||
// a race since v.Interface on the pointer doesn't copy the struct.
|
||||
//
|
||||
// If v is not addressable, then we are not worried about a race
|
||||
// since it implies that the binary Marshaler cannot possibly be
|
||||
// mutating this value.
|
||||
v = v.Addr()
|
||||
}
|
||||
if etm, ok := v.Interface().(encoding.TextMarshaler); ok {
|
||||
text, err := etm.MarshalText()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err = w.Write(text); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
if v.Kind() == reflect.Ptr {
|
||||
v = v.Elem()
|
||||
}
|
||||
if err := tm.writeStruct(w, v); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
w.unindent()
|
||||
if err := w.WriteByte(ket); err != nil {
|
||||
return err
|
||||
}
|
||||
default:
|
||||
_, err := fmt.Fprint(w, v.Interface())
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// equivalent to C's isprint.
|
||||
func isprint(c byte) bool {
|
||||
return c >= 0x20 && c < 0x7f
|
||||
}
|
||||
|
||||
// writeString writes a string in the protocol buffer text format.
|
||||
// It is similar to strconv.Quote except we don't use Go escape sequences,
|
||||
// we treat the string as a byte sequence, and we use octal escapes.
|
||||
// These differences are to maintain interoperability with the other
|
||||
// languages' implementations of the text format.
|
||||
func writeString(w *textWriter, s string) error {
|
||||
// use WriteByte here to get any needed indent
|
||||
if err := w.WriteByte('"'); err != nil {
|
||||
return err
|
||||
}
|
||||
// Loop over the bytes, not the runes.
|
||||
for i := 0; i < len(s); i++ {
|
||||
var err error
|
||||
// Divergence from C++: we don't escape apostrophes.
|
||||
// There's no need to escape them, and the C++ parser
|
||||
// copes with a naked apostrophe.
|
||||
switch c := s[i]; c {
|
||||
case '\n':
|
||||
_, err = w.w.Write(backslashN)
|
||||
case '\r':
|
||||
_, err = w.w.Write(backslashR)
|
||||
case '\t':
|
||||
_, err = w.w.Write(backslashT)
|
||||
case '"':
|
||||
_, err = w.w.Write(backslashDQ)
|
||||
case '\\':
|
||||
_, err = w.w.Write(backslashBS)
|
||||
default:
|
||||
if isprint(c) {
|
||||
err = w.w.WriteByte(c)
|
||||
} else {
|
||||
_, err = fmt.Fprintf(w.w, "\\%03o", c)
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return w.WriteByte('"')
|
||||
}
|
||||
|
||||
func writeUnknownStruct(w *textWriter, data []byte) (err error) {
|
||||
if !w.compact {
|
||||
if _, err := fmt.Fprintf(w, "/* %d unknown bytes */\n", len(data)); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
b := NewBuffer(data)
|
||||
for b.index < len(b.buf) {
|
||||
x, err := b.DecodeVarint()
|
||||
if err != nil {
|
||||
_, err := fmt.Fprintf(w, "/* %v */\n", err)
|
||||
return err
|
||||
}
|
||||
wire, tag := x&7, x>>3
|
||||
if wire == WireEndGroup {
|
||||
w.unindent()
|
||||
if _, err := w.Write(endBraceNewline); err != nil {
|
||||
return err
|
||||
}
|
||||
continue
|
||||
}
|
||||
if _, err := fmt.Fprint(w, tag); err != nil {
|
||||
return err
|
||||
}
|
||||
if wire != WireStartGroup {
|
||||
if err := w.WriteByte(':'); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if !w.compact || wire == WireStartGroup {
|
||||
if err := w.WriteByte(' '); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
switch wire {
|
||||
case WireBytes:
|
||||
buf, e := b.DecodeRawBytes(false)
|
||||
if e == nil {
|
||||
_, err = fmt.Fprintf(w, "%q", buf)
|
||||
} else {
|
||||
_, err = fmt.Fprintf(w, "/* %v */", e)
|
||||
}
|
||||
case WireFixed32:
|
||||
x, err = b.DecodeFixed32()
|
||||
err = writeUnknownInt(w, x, err)
|
||||
case WireFixed64:
|
||||
x, err = b.DecodeFixed64()
|
||||
err = writeUnknownInt(w, x, err)
|
||||
case WireStartGroup:
|
||||
err = w.WriteByte('{')
|
||||
w.indent()
|
||||
case WireVarint:
|
||||
x, err = b.DecodeVarint()
|
||||
err = writeUnknownInt(w, x, err)
|
||||
default:
|
||||
_, err = fmt.Fprintf(w, "/* unknown wire type %d */", wire)
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err = w.WriteByte('\n'); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func writeUnknownInt(w *textWriter, x uint64, err error) error {
|
||||
if err == nil {
|
||||
_, err = fmt.Fprint(w, x)
|
||||
} else {
|
||||
_, err = fmt.Fprintf(w, "/* %v */", err)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
type int32Slice []int32
|
||||
|
||||
func (s int32Slice) Len() int { return len(s) }
|
||||
func (s int32Slice) Less(i, j int) bool { return s[i] < s[j] }
|
||||
func (s int32Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
|
||||
|
||||
// writeExtensions writes all the extensions in pv.
|
||||
// pv is assumed to be a pointer to a protocol message struct that is extendable.
|
||||
func (tm *TextMarshaler) writeExtensions(w *textWriter, pv reflect.Value) error {
|
||||
emap := extensionMaps[pv.Type().Elem()]
|
||||
ep, _ := extendable(pv.Interface())
|
||||
|
||||
// Order the extensions by ID.
|
||||
// This isn't strictly necessary, but it will give us
|
||||
// canonical output, which will also make testing easier.
|
||||
m, mu := ep.extensionsRead()
|
||||
if m == nil {
|
||||
return nil
|
||||
}
|
||||
mu.Lock()
|
||||
ids := make([]int32, 0, len(m))
|
||||
for id := range m {
|
||||
ids = append(ids, id)
|
||||
}
|
||||
sort.Sort(int32Slice(ids))
|
||||
mu.Unlock()
|
||||
|
||||
for _, extNum := range ids {
|
||||
ext := m[extNum]
|
||||
var desc *ExtensionDesc
|
||||
if emap != nil {
|
||||
desc = emap[extNum]
|
||||
}
|
||||
if desc == nil {
|
||||
// Unknown extension.
|
||||
if err := writeUnknownStruct(w, ext.enc); err != nil {
|
||||
return err
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
pb, err := GetExtension(ep, desc)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed getting extension: %v", err)
|
||||
}
|
||||
|
||||
// Repeated extensions will appear as a slice.
|
||||
if !desc.repeated() {
|
||||
if err := tm.writeExtension(w, desc.Name, pb); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
v := reflect.ValueOf(pb)
|
||||
for i := 0; i < v.Len(); i++ {
|
||||
if err := tm.writeExtension(w, desc.Name, v.Index(i).Interface()); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (tm *TextMarshaler) writeExtension(w *textWriter, name string, pb interface{}) error {
|
||||
if _, err := fmt.Fprintf(w, "[%s]:", name); err != nil {
|
||||
return err
|
||||
}
|
||||
if !w.compact {
|
||||
if err := w.WriteByte(' '); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if err := tm.writeAny(w, reflect.ValueOf(pb), nil); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := w.WriteByte('\n'); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (w *textWriter) writeIndent() {
|
||||
if !w.complete {
|
||||
return
|
||||
}
|
||||
remain := w.ind * 2
|
||||
for remain > 0 {
|
||||
n := remain
|
||||
if n > len(spaces) {
|
||||
n = len(spaces)
|
||||
}
|
||||
w.w.Write(spaces[:n])
|
||||
remain -= n
|
||||
}
|
||||
w.complete = false
|
||||
}
|
||||
|
||||
// TextMarshaler is a configurable text format marshaler.
|
||||
type TextMarshaler struct {
|
||||
Compact bool // use compact text format (one line).
|
||||
ExpandAny bool // expand google.protobuf.Any messages of known types
|
||||
}
|
||||
|
||||
// Marshal writes a given protocol buffer in text format.
|
||||
// The only errors returned are from w.
|
||||
func (tm *TextMarshaler) Marshal(w io.Writer, pb Message) error {
|
||||
val := reflect.ValueOf(pb)
|
||||
if pb == nil || val.IsNil() {
|
||||
w.Write([]byte("<nil>"))
|
||||
return nil
|
||||
}
|
||||
var bw *bufio.Writer
|
||||
ww, ok := w.(writer)
|
||||
if !ok {
|
||||
bw = bufio.NewWriter(w)
|
||||
ww = bw
|
||||
}
|
||||
aw := &textWriter{
|
||||
w: ww,
|
||||
complete: true,
|
||||
compact: tm.Compact,
|
||||
}
|
||||
|
||||
if etm, ok := pb.(encoding.TextMarshaler); ok {
|
||||
text, err := etm.MarshalText()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err = aw.Write(text); err != nil {
|
||||
return err
|
||||
}
|
||||
if bw != nil {
|
||||
return bw.Flush()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
// Dereference the received pointer so we don't have outer < and >.
|
||||
v := reflect.Indirect(val)
|
||||
if err := tm.writeStruct(aw, v); err != nil {
|
||||
return err
|
||||
}
|
||||
if bw != nil {
|
||||
return bw.Flush()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Text is the same as Marshal, but returns the string directly.
|
||||
func (tm *TextMarshaler) Text(pb Message) string {
|
||||
var buf bytes.Buffer
|
||||
tm.Marshal(&buf, pb)
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
var (
|
||||
defaultTextMarshaler = TextMarshaler{}
|
||||
compactTextMarshaler = TextMarshaler{Compact: true}
|
||||
)
|
||||
|
||||
// TODO: consider removing some of the Marshal functions below.
|
||||
|
||||
// MarshalText writes a given protocol buffer in text format.
|
||||
// The only errors returned are from w.
|
||||
func MarshalText(w io.Writer, pb Message) error { return defaultTextMarshaler.Marshal(w, pb) }
|
||||
|
||||
// MarshalTextString is the same as MarshalText, but returns the string directly.
|
||||
func MarshalTextString(pb Message) string { return defaultTextMarshaler.Text(pb) }
|
||||
|
||||
// CompactText writes a given protocol buffer in compact text format (one line).
|
||||
func CompactText(w io.Writer, pb Message) error { return compactTextMarshaler.Marshal(w, pb) }
|
||||
|
||||
// CompactTextString is the same as CompactText, but returns the string directly.
|
||||
func CompactTextString(pb Message) string { return compactTextMarshaler.Text(pb) }
|
801
vendor/github.com/golang/protobuf/proto/text_decode.go
generated
vendored
Normal file
801
vendor/github.com/golang/protobuf/proto/text_decode.go
generated
vendored
Normal file
@ -0,0 +1,801 @@
|
||||
// Copyright 2010 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package proto
|
||||
|
||||
import (
|
||||
"encoding"
|
||||
"errors"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"strings"
|
||||
"unicode/utf8"
|
||||
|
||||
"google.golang.org/protobuf/encoding/prototext"
|
||||
protoV2 "google.golang.org/protobuf/proto"
|
||||
"google.golang.org/protobuf/reflect/protoreflect"
|
||||
"google.golang.org/protobuf/reflect/protoregistry"
|
||||
)
|
||||
|
||||
const wrapTextUnmarshalV2 = false
|
||||
|
||||
// ParseError is returned by UnmarshalText.
|
||||
type ParseError struct {
|
||||
Message string
|
||||
|
||||
// Deprecated: Do not use.
|
||||
Line, Offset int
|
||||
}
|
||||
|
||||
func (e *ParseError) Error() string {
|
||||
if wrapTextUnmarshalV2 {
|
||||
return e.Message
|
||||
}
|
||||
if e.Line == 1 {
|
||||
return fmt.Sprintf("line 1.%d: %v", e.Offset, e.Message)
|
||||
}
|
||||
return fmt.Sprintf("line %d: %v", e.Line, e.Message)
|
||||
}
|
||||
|
||||
// UnmarshalText parses a proto text formatted string into m.
|
||||
func UnmarshalText(s string, m Message) error {
|
||||
if u, ok := m.(encoding.TextUnmarshaler); ok {
|
||||
return u.UnmarshalText([]byte(s))
|
||||
}
|
||||
|
||||
m.Reset()
|
||||
mi := MessageV2(m)
|
||||
|
||||
if wrapTextUnmarshalV2 {
|
||||
err := prototext.UnmarshalOptions{
|
||||
AllowPartial: true,
|
||||
}.Unmarshal([]byte(s), mi)
|
||||
if err != nil {
|
||||
return &ParseError{Message: err.Error()}
|
||||
}
|
||||
return checkRequiredNotSet(mi)
|
||||
} else {
|
||||
if err := newTextParser(s).unmarshalMessage(mi.ProtoReflect(), ""); err != nil {
|
||||
return err
|
||||
}
|
||||
return checkRequiredNotSet(mi)
|
||||
}
|
||||
}
|
||||
|
||||
type textParser struct {
|
||||
s string // remaining input
|
||||
done bool // whether the parsing is finished (success or error)
|
||||
backed bool // whether back() was called
|
||||
offset, line int
|
||||
cur token
|
||||
}
|
||||
|
||||
type token struct {
|
||||
value string
|
||||
err *ParseError
|
||||
line int // line number
|
||||
offset int // byte number from start of input, not start of line
|
||||
unquoted string // the unquoted version of value, if it was a quoted string
|
||||
}
|
||||
|
||||
func newTextParser(s string) *textParser {
|
||||
p := new(textParser)
|
||||
p.s = s
|
||||
p.line = 1
|
||||
p.cur.line = 1
|
||||
return p
|
||||
}
|
||||
|
||||
func (p *textParser) unmarshalMessage(m protoreflect.Message, terminator string) (err error) {
|
||||
md := m.Descriptor()
|
||||
fds := md.Fields()
|
||||
|
||||
// A struct is a sequence of "name: value", terminated by one of
|
||||
// '>' or '}', or the end of the input. A name may also be
|
||||
// "[extension]" or "[type/url]".
|
||||
//
|
||||
// The whole struct can also be an expanded Any message, like:
|
||||
// [type/url] < ... struct contents ... >
|
||||
seen := make(map[protoreflect.FieldNumber]bool)
|
||||
for {
|
||||
tok := p.next()
|
||||
if tok.err != nil {
|
||||
return tok.err
|
||||
}
|
||||
if tok.value == terminator {
|
||||
break
|
||||
}
|
||||
if tok.value == "[" {
|
||||
if err := p.unmarshalExtensionOrAny(m, seen); err != nil {
|
||||
return err
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
// This is a normal, non-extension field.
|
||||
name := protoreflect.Name(tok.value)
|
||||
fd := fds.ByName(name)
|
||||
switch {
|
||||
case fd == nil:
|
||||
gd := fds.ByName(protoreflect.Name(strings.ToLower(string(name))))
|
||||
if gd != nil && gd.Kind() == protoreflect.GroupKind && gd.Message().Name() == name {
|
||||
fd = gd
|
||||
}
|
||||
case fd.Kind() == protoreflect.GroupKind && fd.Message().Name() != name:
|
||||
fd = nil
|
||||
case fd.IsWeak() && fd.Message().IsPlaceholder():
|
||||
fd = nil
|
||||
}
|
||||
if fd == nil {
|
||||
typeName := string(md.FullName())
|
||||
if m, ok := m.Interface().(Message); ok {
|
||||
t := reflect.TypeOf(m)
|
||||
if t.Kind() == reflect.Ptr {
|
||||
typeName = t.Elem().String()
|
||||
}
|
||||
}
|
||||
return p.errorf("unknown field name %q in %v", name, typeName)
|
||||
}
|
||||
if od := fd.ContainingOneof(); od != nil && m.WhichOneof(od) != nil {
|
||||
return p.errorf("field '%s' would overwrite already parsed oneof '%s'", name, od.Name())
|
||||
}
|
||||
if fd.Cardinality() != protoreflect.Repeated && seen[fd.Number()] {
|
||||
return p.errorf("non-repeated field %q was repeated", fd.Name())
|
||||
}
|
||||
seen[fd.Number()] = true
|
||||
|
||||
// Consume any colon.
|
||||
if err := p.checkForColon(fd); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Parse into the field.
|
||||
v := m.Get(fd)
|
||||
if !m.Has(fd) && (fd.IsList() || fd.IsMap() || fd.Message() != nil) {
|
||||
v = m.Mutable(fd)
|
||||
}
|
||||
if v, err = p.unmarshalValue(v, fd); err != nil {
|
||||
return err
|
||||
}
|
||||
m.Set(fd, v)
|
||||
|
||||
if err := p.consumeOptionalSeparator(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *textParser) unmarshalExtensionOrAny(m protoreflect.Message, seen map[protoreflect.FieldNumber]bool) error {
|
||||
name, err := p.consumeExtensionOrAnyName()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// If it contains a slash, it's an Any type URL.
|
||||
if slashIdx := strings.LastIndex(name, "/"); slashIdx >= 0 {
|
||||
tok := p.next()
|
||||
if tok.err != nil {
|
||||
return tok.err
|
||||
}
|
||||
// consume an optional colon
|
||||
if tok.value == ":" {
|
||||
tok = p.next()
|
||||
if tok.err != nil {
|
||||
return tok.err
|
||||
}
|
||||
}
|
||||
|
||||
var terminator string
|
||||
switch tok.value {
|
||||
case "<":
|
||||
terminator = ">"
|
||||
case "{":
|
||||
terminator = "}"
|
||||
default:
|
||||
return p.errorf("expected '{' or '<', found %q", tok.value)
|
||||
}
|
||||
|
||||
mt, err := protoregistry.GlobalTypes.FindMessageByURL(name)
|
||||
if err != nil {
|
||||
return p.errorf("unrecognized message %q in google.protobuf.Any", name[slashIdx+len("/"):])
|
||||
}
|
||||
m2 := mt.New()
|
||||
if err := p.unmarshalMessage(m2, terminator); err != nil {
|
||||
return err
|
||||
}
|
||||
b, err := protoV2.Marshal(m2.Interface())
|
||||
if err != nil {
|
||||
return p.errorf("failed to marshal message of type %q: %v", name[slashIdx+len("/"):], err)
|
||||
}
|
||||
|
||||
urlFD := m.Descriptor().Fields().ByName("type_url")
|
||||
valFD := m.Descriptor().Fields().ByName("value")
|
||||
if seen[urlFD.Number()] {
|
||||
return p.errorf("Any message unpacked multiple times, or %q already set", urlFD.Name())
|
||||
}
|
||||
if seen[valFD.Number()] {
|
||||
return p.errorf("Any message unpacked multiple times, or %q already set", valFD.Name())
|
||||
}
|
||||
m.Set(urlFD, protoreflect.ValueOfString(name))
|
||||
m.Set(valFD, protoreflect.ValueOfBytes(b))
|
||||
seen[urlFD.Number()] = true
|
||||
seen[valFD.Number()] = true
|
||||
return nil
|
||||
}
|
||||
|
||||
xname := protoreflect.FullName(name)
|
||||
xt, _ := protoregistry.GlobalTypes.FindExtensionByName(xname)
|
||||
if xt == nil && isMessageSet(m.Descriptor()) {
|
||||
xt, _ = protoregistry.GlobalTypes.FindExtensionByName(xname.Append("message_set_extension"))
|
||||
}
|
||||
if xt == nil {
|
||||
return p.errorf("unrecognized extension %q", name)
|
||||
}
|
||||
fd := xt.TypeDescriptor()
|
||||
if fd.ContainingMessage().FullName() != m.Descriptor().FullName() {
|
||||
return p.errorf("extension field %q does not extend message %q", name, m.Descriptor().FullName())
|
||||
}
|
||||
|
||||
if err := p.checkForColon(fd); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
v := m.Get(fd)
|
||||
if !m.Has(fd) && (fd.IsList() || fd.IsMap() || fd.Message() != nil) {
|
||||
v = m.Mutable(fd)
|
||||
}
|
||||
v, err = p.unmarshalValue(v, fd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
m.Set(fd, v)
|
||||
return p.consumeOptionalSeparator()
|
||||
}
|
||||
|
||||
func (p *textParser) unmarshalValue(v protoreflect.Value, fd protoreflect.FieldDescriptor) (protoreflect.Value, error) {
|
||||
tok := p.next()
|
||||
if tok.err != nil {
|
||||
return v, tok.err
|
||||
}
|
||||
if tok.value == "" {
|
||||
return v, p.errorf("unexpected EOF")
|
||||
}
|
||||
|
||||
switch {
|
||||
case fd.IsList():
|
||||
lv := v.List()
|
||||
var err error
|
||||
if tok.value == "[" {
|
||||
// Repeated field with list notation, like [1,2,3].
|
||||
for {
|
||||
vv := lv.NewElement()
|
||||
vv, err = p.unmarshalSingularValue(vv, fd)
|
||||
if err != nil {
|
||||
return v, err
|
||||
}
|
||||
lv.Append(vv)
|
||||
|
||||
tok := p.next()
|
||||
if tok.err != nil {
|
||||
return v, tok.err
|
||||
}
|
||||
if tok.value == "]" {
|
||||
break
|
||||
}
|
||||
if tok.value != "," {
|
||||
return v, p.errorf("Expected ']' or ',' found %q", tok.value)
|
||||
}
|
||||
}
|
||||
return v, nil
|
||||
}
|
||||
|
||||
// One value of the repeated field.
|
||||
p.back()
|
||||
vv := lv.NewElement()
|
||||
vv, err = p.unmarshalSingularValue(vv, fd)
|
||||
if err != nil {
|
||||
return v, err
|
||||
}
|
||||
lv.Append(vv)
|
||||
return v, nil
|
||||
case fd.IsMap():
|
||||
// The map entry should be this sequence of tokens:
|
||||
// < key : KEY value : VALUE >
|
||||
// However, implementations may omit key or value, and technically
|
||||
// we should support them in any order.
|
||||
var terminator string
|
||||
switch tok.value {
|
||||
case "<":
|
||||
terminator = ">"
|
||||
case "{":
|
||||
terminator = "}"
|
||||
default:
|
||||
return v, p.errorf("expected '{' or '<', found %q", tok.value)
|
||||
}
|
||||
|
||||
keyFD := fd.MapKey()
|
||||
valFD := fd.MapValue()
|
||||
|
||||
mv := v.Map()
|
||||
kv := keyFD.Default()
|
||||
vv := mv.NewValue()
|
||||
for {
|
||||
tok := p.next()
|
||||
if tok.err != nil {
|
||||
return v, tok.err
|
||||
}
|
||||
if tok.value == terminator {
|
||||
break
|
||||
}
|
||||
var err error
|
||||
switch tok.value {
|
||||
case "key":
|
||||
if err := p.consumeToken(":"); err != nil {
|
||||
return v, err
|
||||
}
|
||||
if kv, err = p.unmarshalSingularValue(kv, keyFD); err != nil {
|
||||
return v, err
|
||||
}
|
||||
if err := p.consumeOptionalSeparator(); err != nil {
|
||||
return v, err
|
||||
}
|
||||
case "value":
|
||||
if err := p.checkForColon(valFD); err != nil {
|
||||
return v, err
|
||||
}
|
||||
if vv, err = p.unmarshalSingularValue(vv, valFD); err != nil {
|
||||
return v, err
|
||||
}
|
||||
if err := p.consumeOptionalSeparator(); err != nil {
|
||||
return v, err
|
||||
}
|
||||
default:
|
||||
p.back()
|
||||
return v, p.errorf(`expected "key", "value", or %q, found %q`, terminator, tok.value)
|
||||
}
|
||||
}
|
||||
mv.Set(kv.MapKey(), vv)
|
||||
return v, nil
|
||||
default:
|
||||
p.back()
|
||||
return p.unmarshalSingularValue(v, fd)
|
||||
}
|
||||
}
|
||||
|
||||
func (p *textParser) unmarshalSingularValue(v protoreflect.Value, fd protoreflect.FieldDescriptor) (protoreflect.Value, error) {
|
||||
tok := p.next()
|
||||
if tok.err != nil {
|
||||
return v, tok.err
|
||||
}
|
||||
if tok.value == "" {
|
||||
return v, p.errorf("unexpected EOF")
|
||||
}
|
||||
|
||||
switch fd.Kind() {
|
||||
case protoreflect.BoolKind:
|
||||
switch tok.value {
|
||||
case "true", "1", "t", "True":
|
||||
return protoreflect.ValueOfBool(true), nil
|
||||
case "false", "0", "f", "False":
|
||||
return protoreflect.ValueOfBool(false), nil
|
||||
}
|
||||
case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind:
|
||||
if x, err := strconv.ParseInt(tok.value, 0, 32); err == nil {
|
||||
return protoreflect.ValueOfInt32(int32(x)), nil
|
||||
}
|
||||
|
||||
// The C++ parser accepts large positive hex numbers that uses
|
||||
// two's complement arithmetic to represent negative numbers.
|
||||
// This feature is here for backwards compatibility with C++.
|
||||
if strings.HasPrefix(tok.value, "0x") {
|
||||
if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil {
|
||||
return protoreflect.ValueOfInt32(int32(-(int64(^x) + 1))), nil
|
||||
}
|
||||
}
|
||||
case protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind:
|
||||
if x, err := strconv.ParseInt(tok.value, 0, 64); err == nil {
|
||||
return protoreflect.ValueOfInt64(int64(x)), nil
|
||||
}
|
||||
|
||||
// The C++ parser accepts large positive hex numbers that uses
|
||||
// two's complement arithmetic to represent negative numbers.
|
||||
// This feature is here for backwards compatibility with C++.
|
||||
if strings.HasPrefix(tok.value, "0x") {
|
||||
if x, err := strconv.ParseUint(tok.value, 0, 64); err == nil {
|
||||
return protoreflect.ValueOfInt64(int64(-(int64(^x) + 1))), nil
|
||||
}
|
||||
}
|
||||
case protoreflect.Uint32Kind, protoreflect.Fixed32Kind:
|
||||
if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil {
|
||||
return protoreflect.ValueOfUint32(uint32(x)), nil
|
||||
}
|
||||
case protoreflect.Uint64Kind, protoreflect.Fixed64Kind:
|
||||
if x, err := strconv.ParseUint(tok.value, 0, 64); err == nil {
|
||||
return protoreflect.ValueOfUint64(uint64(x)), nil
|
||||
}
|
||||
case protoreflect.FloatKind:
|
||||
// Ignore 'f' for compatibility with output generated by C++,
|
||||
// but don't remove 'f' when the value is "-inf" or "inf".
|
||||
v := tok.value
|
||||
if strings.HasSuffix(v, "f") && v != "-inf" && v != "inf" {
|
||||
v = v[:len(v)-len("f")]
|
||||
}
|
||||
if x, err := strconv.ParseFloat(v, 32); err == nil {
|
||||
return protoreflect.ValueOfFloat32(float32(x)), nil
|
||||
}
|
||||
case protoreflect.DoubleKind:
|
||||
// Ignore 'f' for compatibility with output generated by C++,
|
||||
// but don't remove 'f' when the value is "-inf" or "inf".
|
||||
v := tok.value
|
||||
if strings.HasSuffix(v, "f") && v != "-inf" && v != "inf" {
|
||||
v = v[:len(v)-len("f")]
|
||||
}
|
||||
if x, err := strconv.ParseFloat(v, 64); err == nil {
|
||||
return protoreflect.ValueOfFloat64(float64(x)), nil
|
||||
}
|
||||
case protoreflect.StringKind:
|
||||
if isQuote(tok.value[0]) {
|
||||
return protoreflect.ValueOfString(tok.unquoted), nil
|
||||
}
|
||||
case protoreflect.BytesKind:
|
||||
if isQuote(tok.value[0]) {
|
||||
return protoreflect.ValueOfBytes([]byte(tok.unquoted)), nil
|
||||
}
|
||||
case protoreflect.EnumKind:
|
||||
if x, err := strconv.ParseInt(tok.value, 0, 32); err == nil {
|
||||
return protoreflect.ValueOfEnum(protoreflect.EnumNumber(x)), nil
|
||||
}
|
||||
vd := fd.Enum().Values().ByName(protoreflect.Name(tok.value))
|
||||
if vd != nil {
|
||||
return protoreflect.ValueOfEnum(vd.Number()), nil
|
||||
}
|
||||
case protoreflect.MessageKind, protoreflect.GroupKind:
|
||||
var terminator string
|
||||
switch tok.value {
|
||||
case "{":
|
||||
terminator = "}"
|
||||
case "<":
|
||||
terminator = ">"
|
||||
default:
|
||||
return v, p.errorf("expected '{' or '<', found %q", tok.value)
|
||||
}
|
||||
err := p.unmarshalMessage(v.Message(), terminator)
|
||||
return v, err
|
||||
default:
|
||||
panic(fmt.Sprintf("invalid kind %v", fd.Kind()))
|
||||
}
|
||||
return v, p.errorf("invalid %v: %v", fd.Kind(), tok.value)
|
||||
}
|
||||
|
||||
// Consume a ':' from the input stream (if the next token is a colon),
|
||||
// returning an error if a colon is needed but not present.
|
||||
func (p *textParser) checkForColon(fd protoreflect.FieldDescriptor) *ParseError {
|
||||
tok := p.next()
|
||||
if tok.err != nil {
|
||||
return tok.err
|
||||
}
|
||||
if tok.value != ":" {
|
||||
if fd.Message() == nil {
|
||||
return p.errorf("expected ':', found %q", tok.value)
|
||||
}
|
||||
p.back()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// consumeExtensionOrAnyName consumes an extension name or an Any type URL and
|
||||
// the following ']'. It returns the name or URL consumed.
|
||||
func (p *textParser) consumeExtensionOrAnyName() (string, error) {
|
||||
tok := p.next()
|
||||
if tok.err != nil {
|
||||
return "", tok.err
|
||||
}
|
||||
|
||||
// If extension name or type url is quoted, it's a single token.
|
||||
if len(tok.value) > 2 && isQuote(tok.value[0]) && tok.value[len(tok.value)-1] == tok.value[0] {
|
||||
name, err := unquoteC(tok.value[1:len(tok.value)-1], rune(tok.value[0]))
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return name, p.consumeToken("]")
|
||||
}
|
||||
|
||||
// Consume everything up to "]"
|
||||
var parts []string
|
||||
for tok.value != "]" {
|
||||
parts = append(parts, tok.value)
|
||||
tok = p.next()
|
||||
if tok.err != nil {
|
||||
return "", p.errorf("unrecognized type_url or extension name: %s", tok.err)
|
||||
}
|
||||
if p.done && tok.value != "]" {
|
||||
return "", p.errorf("unclosed type_url or extension name")
|
||||
}
|
||||
}
|
||||
return strings.Join(parts, ""), nil
|
||||
}
|
||||
|
||||
// consumeOptionalSeparator consumes an optional semicolon or comma.
|
||||
// It is used in unmarshalMessage to provide backward compatibility.
|
||||
func (p *textParser) consumeOptionalSeparator() error {
|
||||
tok := p.next()
|
||||
if tok.err != nil {
|
||||
return tok.err
|
||||
}
|
||||
if tok.value != ";" && tok.value != "," {
|
||||
p.back()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *textParser) errorf(format string, a ...interface{}) *ParseError {
|
||||
pe := &ParseError{fmt.Sprintf(format, a...), p.cur.line, p.cur.offset}
|
||||
p.cur.err = pe
|
||||
p.done = true
|
||||
return pe
|
||||
}
|
||||
|
||||
func (p *textParser) skipWhitespace() {
|
||||
i := 0
|
||||
for i < len(p.s) && (isWhitespace(p.s[i]) || p.s[i] == '#') {
|
||||
if p.s[i] == '#' {
|
||||
// comment; skip to end of line or input
|
||||
for i < len(p.s) && p.s[i] != '\n' {
|
||||
i++
|
||||
}
|
||||
if i == len(p.s) {
|
||||
break
|
||||
}
|
||||
}
|
||||
if p.s[i] == '\n' {
|
||||
p.line++
|
||||
}
|
||||
i++
|
||||
}
|
||||
p.offset += i
|
||||
p.s = p.s[i:len(p.s)]
|
||||
if len(p.s) == 0 {
|
||||
p.done = true
|
||||
}
|
||||
}
|
||||
|
||||
func (p *textParser) advance() {
|
||||
// Skip whitespace
|
||||
p.skipWhitespace()
|
||||
if p.done {
|
||||
return
|
||||
}
|
||||
|
||||
// Start of non-whitespace
|
||||
p.cur.err = nil
|
||||
p.cur.offset, p.cur.line = p.offset, p.line
|
||||
p.cur.unquoted = ""
|
||||
switch p.s[0] {
|
||||
case '<', '>', '{', '}', ':', '[', ']', ';', ',', '/':
|
||||
// Single symbol
|
||||
p.cur.value, p.s = p.s[0:1], p.s[1:len(p.s)]
|
||||
case '"', '\'':
|
||||
// Quoted string
|
||||
i := 1
|
||||
for i < len(p.s) && p.s[i] != p.s[0] && p.s[i] != '\n' {
|
||||
if p.s[i] == '\\' && i+1 < len(p.s) {
|
||||
// skip escaped char
|
||||
i++
|
||||
}
|
||||
i++
|
||||
}
|
||||
if i >= len(p.s) || p.s[i] != p.s[0] {
|
||||
p.errorf("unmatched quote")
|
||||
return
|
||||
}
|
||||
unq, err := unquoteC(p.s[1:i], rune(p.s[0]))
|
||||
if err != nil {
|
||||
p.errorf("invalid quoted string %s: %v", p.s[0:i+1], err)
|
||||
return
|
||||
}
|
||||
p.cur.value, p.s = p.s[0:i+1], p.s[i+1:len(p.s)]
|
||||
p.cur.unquoted = unq
|
||||
default:
|
||||
i := 0
|
||||
for i < len(p.s) && isIdentOrNumberChar(p.s[i]) {
|
||||
i++
|
||||
}
|
||||
if i == 0 {
|
||||
p.errorf("unexpected byte %#x", p.s[0])
|
||||
return
|
||||
}
|
||||
p.cur.value, p.s = p.s[0:i], p.s[i:len(p.s)]
|
||||
}
|
||||
p.offset += len(p.cur.value)
|
||||
}
|
||||
|
||||
// Back off the parser by one token. Can only be done between calls to next().
|
||||
// It makes the next advance() a no-op.
|
||||
func (p *textParser) back() { p.backed = true }
|
||||
|
||||
// Advances the parser and returns the new current token.
|
||||
func (p *textParser) next() *token {
|
||||
if p.backed || p.done {
|
||||
p.backed = false
|
||||
return &p.cur
|
||||
}
|
||||
p.advance()
|
||||
if p.done {
|
||||
p.cur.value = ""
|
||||
} else if len(p.cur.value) > 0 && isQuote(p.cur.value[0]) {
|
||||
// Look for multiple quoted strings separated by whitespace,
|
||||
// and concatenate them.
|
||||
cat := p.cur
|
||||
for {
|
||||
p.skipWhitespace()
|
||||
if p.done || !isQuote(p.s[0]) {
|
||||
break
|
||||
}
|
||||
p.advance()
|
||||
if p.cur.err != nil {
|
||||
return &p.cur
|
||||
}
|
||||
cat.value += " " + p.cur.value
|
||||
cat.unquoted += p.cur.unquoted
|
||||
}
|
||||
p.done = false // parser may have seen EOF, but we want to return cat
|
||||
p.cur = cat
|
||||
}
|
||||
return &p.cur
|
||||
}
|
||||
|
||||
func (p *textParser) consumeToken(s string) error {
|
||||
tok := p.next()
|
||||
if tok.err != nil {
|
||||
return tok.err
|
||||
}
|
||||
if tok.value != s {
|
||||
p.back()
|
||||
return p.errorf("expected %q, found %q", s, tok.value)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
var errBadUTF8 = errors.New("proto: bad UTF-8")
|
||||
|
||||
func unquoteC(s string, quote rune) (string, error) {
|
||||
// This is based on C++'s tokenizer.cc.
|
||||
// Despite its name, this is *not* parsing C syntax.
|
||||
// For instance, "\0" is an invalid quoted string.
|
||||
|
||||
// Avoid allocation in trivial cases.
|
||||
simple := true
|
||||
for _, r := range s {
|
||||
if r == '\\' || r == quote {
|
||||
simple = false
|
||||
break
|
||||
}
|
||||
}
|
||||
if simple {
|
||||
return s, nil
|
||||
}
|
||||
|
||||
buf := make([]byte, 0, 3*len(s)/2)
|
||||
for len(s) > 0 {
|
||||
r, n := utf8.DecodeRuneInString(s)
|
||||
if r == utf8.RuneError && n == 1 {
|
||||
return "", errBadUTF8
|
||||
}
|
||||
s = s[n:]
|
||||
if r != '\\' {
|
||||
if r < utf8.RuneSelf {
|
||||
buf = append(buf, byte(r))
|
||||
} else {
|
||||
buf = append(buf, string(r)...)
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
ch, tail, err := unescape(s)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
buf = append(buf, ch...)
|
||||
s = tail
|
||||
}
|
||||
return string(buf), nil
|
||||
}
|
||||
|
||||
func unescape(s string) (ch string, tail string, err error) {
|
||||
r, n := utf8.DecodeRuneInString(s)
|
||||
if r == utf8.RuneError && n == 1 {
|
||||
return "", "", errBadUTF8
|
||||
}
|
||||
s = s[n:]
|
||||
switch r {
|
||||
case 'a':
|
||||
return "\a", s, nil
|
||||
case 'b':
|
||||
return "\b", s, nil
|
||||
case 'f':
|
||||
return "\f", s, nil
|
||||
case 'n':
|
||||
return "\n", s, nil
|
||||
case 'r':
|
||||
return "\r", s, nil
|
||||
case 't':
|
||||
return "\t", s, nil
|
||||
case 'v':
|
||||
return "\v", s, nil
|
||||
case '?':
|
||||
return "?", s, nil // trigraph workaround
|
||||
case '\'', '"', '\\':
|
||||
return string(r), s, nil
|
||||
case '0', '1', '2', '3', '4', '5', '6', '7':
|
||||
if len(s) < 2 {
|
||||
return "", "", fmt.Errorf(`\%c requires 2 following digits`, r)
|
||||
}
|
||||
ss := string(r) + s[:2]
|
||||
s = s[2:]
|
||||
i, err := strconv.ParseUint(ss, 8, 8)
|
||||
if err != nil {
|
||||
return "", "", fmt.Errorf(`\%s contains non-octal digits`, ss)
|
||||
}
|
||||
return string([]byte{byte(i)}), s, nil
|
||||
case 'x', 'X', 'u', 'U':
|
||||
var n int
|
||||
switch r {
|
||||
case 'x', 'X':
|
||||
n = 2
|
||||
case 'u':
|
||||
n = 4
|
||||
case 'U':
|
||||
n = 8
|
||||
}
|
||||
if len(s) < n {
|
||||
return "", "", fmt.Errorf(`\%c requires %d following digits`, r, n)
|
||||
}
|
||||
ss := s[:n]
|
||||
s = s[n:]
|
||||
i, err := strconv.ParseUint(ss, 16, 64)
|
||||
if err != nil {
|
||||
return "", "", fmt.Errorf(`\%c%s contains non-hexadecimal digits`, r, ss)
|
||||
}
|
||||
if r == 'x' || r == 'X' {
|
||||
return string([]byte{byte(i)}), s, nil
|
||||
}
|
||||
if i > utf8.MaxRune {
|
||||
return "", "", fmt.Errorf(`\%c%s is not a valid Unicode code point`, r, ss)
|
||||
}
|
||||
return string(i), s, nil
|
||||
}
|
||||
return "", "", fmt.Errorf(`unknown escape \%c`, r)
|
||||
}
|
||||
|
||||
func isIdentOrNumberChar(c byte) bool {
|
||||
switch {
|
||||
case 'A' <= c && c <= 'Z', 'a' <= c && c <= 'z':
|
||||
return true
|
||||
case '0' <= c && c <= '9':
|
||||
return true
|
||||
}
|
||||
switch c {
|
||||
case '-', '+', '.', '_':
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func isWhitespace(c byte) bool {
|
||||
switch c {
|
||||
case ' ', '\t', '\n', '\r':
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func isQuote(c byte) bool {
|
||||
switch c {
|
||||
case '"', '\'':
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
560
vendor/github.com/golang/protobuf/proto/text_encode.go
generated
vendored
Normal file
560
vendor/github.com/golang/protobuf/proto/text_encode.go
generated
vendored
Normal file
@ -0,0 +1,560 @@
|
||||
// Copyright 2010 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package proto
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding"
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"google.golang.org/protobuf/encoding/prototext"
|
||||
"google.golang.org/protobuf/encoding/protowire"
|
||||
"google.golang.org/protobuf/proto"
|
||||
"google.golang.org/protobuf/reflect/protoreflect"
|
||||
"google.golang.org/protobuf/reflect/protoregistry"
|
||||
)
|
||||
|
||||
const wrapTextMarshalV2 = false
|
||||
|
||||
// TextMarshaler is a configurable text format marshaler.
|
||||
type TextMarshaler struct {
|
||||
Compact bool // use compact text format (one line)
|
||||
ExpandAny bool // expand google.protobuf.Any messages of known types
|
||||
}
|
||||
|
||||
// Marshal writes the proto text format of m to w.
|
||||
func (tm *TextMarshaler) Marshal(w io.Writer, m Message) error {
|
||||
b, err := tm.marshal(m)
|
||||
if len(b) > 0 {
|
||||
if _, err := w.Write(b); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// Text returns a proto text formatted string of m.
|
||||
func (tm *TextMarshaler) Text(m Message) string {
|
||||
b, _ := tm.marshal(m)
|
||||
return string(b)
|
||||
}
|
||||
|
||||
func (tm *TextMarshaler) marshal(m Message) ([]byte, error) {
|
||||
mr := MessageReflect(m)
|
||||
if mr == nil || !mr.IsValid() {
|
||||
return []byte("<nil>"), nil
|
||||
}
|
||||
|
||||
if wrapTextMarshalV2 {
|
||||
if m, ok := m.(encoding.TextMarshaler); ok {
|
||||
return m.MarshalText()
|
||||
}
|
||||
|
||||
opts := prototext.MarshalOptions{
|
||||
AllowPartial: true,
|
||||
EmitUnknown: true,
|
||||
}
|
||||
if !tm.Compact {
|
||||
opts.Indent = " "
|
||||
}
|
||||
if !tm.ExpandAny {
|
||||
opts.Resolver = (*protoregistry.Types)(nil)
|
||||
}
|
||||
return opts.Marshal(mr.Interface())
|
||||
} else {
|
||||
w := &textWriter{
|
||||
compact: tm.Compact,
|
||||
expandAny: tm.ExpandAny,
|
||||
complete: true,
|
||||
}
|
||||
|
||||
if m, ok := m.(encoding.TextMarshaler); ok {
|
||||
b, err := m.MarshalText()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
w.Write(b)
|
||||
return w.buf, nil
|
||||
}
|
||||
|
||||
err := w.writeMessage(mr)
|
||||
return w.buf, err
|
||||
}
|
||||
}
|
||||
|
||||
var (
|
||||
defaultTextMarshaler = TextMarshaler{}
|
||||
compactTextMarshaler = TextMarshaler{Compact: true}
|
||||
)
|
||||
|
||||
// MarshalText writes the proto text format of m to w.
|
||||
func MarshalText(w io.Writer, m Message) error { return defaultTextMarshaler.Marshal(w, m) }
|
||||
|
||||
// MarshalTextString returns a proto text formatted string of m.
|
||||
func MarshalTextString(m Message) string { return defaultTextMarshaler.Text(m) }
|
||||
|
||||
// CompactText writes the compact proto text format of m to w.
|
||||
func CompactText(w io.Writer, m Message) error { return compactTextMarshaler.Marshal(w, m) }
|
||||
|
||||
// CompactTextString returns a compact proto text formatted string of m.
|
||||
func CompactTextString(m Message) string { return compactTextMarshaler.Text(m) }
|
||||
|
||||
var (
|
||||
newline = []byte("\n")
|
||||
endBraceNewline = []byte("}\n")
|
||||
posInf = []byte("inf")
|
||||
negInf = []byte("-inf")
|
||||
nan = []byte("nan")
|
||||
)
|
||||
|
||||
// textWriter is an io.Writer that tracks its indentation level.
|
||||
type textWriter struct {
|
||||
compact bool // same as TextMarshaler.Compact
|
||||
expandAny bool // same as TextMarshaler.ExpandAny
|
||||
complete bool // whether the current position is a complete line
|
||||
indent int // indentation level; never negative
|
||||
buf []byte
|
||||
}
|
||||
|
||||
func (w *textWriter) Write(p []byte) (n int, _ error) {
|
||||
newlines := bytes.Count(p, newline)
|
||||
if newlines == 0 {
|
||||
if !w.compact && w.complete {
|
||||
w.writeIndent()
|
||||
}
|
||||
w.buf = append(w.buf, p...)
|
||||
w.complete = false
|
||||
return len(p), nil
|
||||
}
|
||||
|
||||
frags := bytes.SplitN(p, newline, newlines+1)
|
||||
if w.compact {
|
||||
for i, frag := range frags {
|
||||
if i > 0 {
|
||||
w.buf = append(w.buf, ' ')
|
||||
n++
|
||||
}
|
||||
w.buf = append(w.buf, frag...)
|
||||
n += len(frag)
|
||||
}
|
||||
return n, nil
|
||||
}
|
||||
|
||||
for i, frag := range frags {
|
||||
if w.complete {
|
||||
w.writeIndent()
|
||||
}
|
||||
w.buf = append(w.buf, frag...)
|
||||
n += len(frag)
|
||||
if i+1 < len(frags) {
|
||||
w.buf = append(w.buf, '\n')
|
||||
n++
|
||||
}
|
||||
}
|
||||
w.complete = len(frags[len(frags)-1]) == 0
|
||||
return n, nil
|
||||
}
|
||||
|
||||
func (w *textWriter) WriteByte(c byte) error {
|
||||
if w.compact && c == '\n' {
|
||||
c = ' '
|
||||
}
|
||||
if !w.compact && w.complete {
|
||||
w.writeIndent()
|
||||
}
|
||||
w.buf = append(w.buf, c)
|
||||
w.complete = c == '\n'
|
||||
return nil
|
||||
}
|
||||
|
||||
func (w *textWriter) writeName(fd protoreflect.FieldDescriptor) {
|
||||
if !w.compact && w.complete {
|
||||
w.writeIndent()
|
||||
}
|
||||
w.complete = false
|
||||
|
||||
if fd.Kind() != protoreflect.GroupKind {
|
||||
w.buf = append(w.buf, fd.Name()...)
|
||||
w.WriteByte(':')
|
||||
} else {
|
||||
// Use message type name for group field name.
|
||||
w.buf = append(w.buf, fd.Message().Name()...)
|
||||
}
|
||||
|
||||
if !w.compact {
|
||||
w.WriteByte(' ')
|
||||
}
|
||||
}
|
||||
|
||||
func requiresQuotes(u string) bool {
|
||||
// When type URL contains any characters except [0-9A-Za-z./\-]*, it must be quoted.
|
||||
for _, ch := range u {
|
||||
switch {
|
||||
case ch == '.' || ch == '/' || ch == '_':
|
||||
continue
|
||||
case '0' <= ch && ch <= '9':
|
||||
continue
|
||||
case 'A' <= ch && ch <= 'Z':
|
||||
continue
|
||||
case 'a' <= ch && ch <= 'z':
|
||||
continue
|
||||
default:
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// writeProto3Any writes an expanded google.protobuf.Any message.
|
||||
//
|
||||
// It returns (false, nil) if sv value can't be unmarshaled (e.g. because
|
||||
// required messages are not linked in).
|
||||
//
|
||||
// It returns (true, error) when sv was written in expanded format or an error
|
||||
// was encountered.
|
||||
func (w *textWriter) writeProto3Any(m protoreflect.Message) (bool, error) {
|
||||
md := m.Descriptor()
|
||||
fdURL := md.Fields().ByName("type_url")
|
||||
fdVal := md.Fields().ByName("value")
|
||||
|
||||
url := m.Get(fdURL).String()
|
||||
mt, err := protoregistry.GlobalTypes.FindMessageByURL(url)
|
||||
if err != nil {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
b := m.Get(fdVal).Bytes()
|
||||
m2 := mt.New()
|
||||
if err := proto.Unmarshal(b, m2.Interface()); err != nil {
|
||||
return false, nil
|
||||
}
|
||||
w.Write([]byte("["))
|
||||
if requiresQuotes(url) {
|
||||
w.writeQuotedString(url)
|
||||
} else {
|
||||
w.Write([]byte(url))
|
||||
}
|
||||
if w.compact {
|
||||
w.Write([]byte("]:<"))
|
||||
} else {
|
||||
w.Write([]byte("]: <\n"))
|
||||
w.indent++
|
||||
}
|
||||
if err := w.writeMessage(m2); err != nil {
|
||||
return true, err
|
||||
}
|
||||
if w.compact {
|
||||
w.Write([]byte("> "))
|
||||
} else {
|
||||
w.indent--
|
||||
w.Write([]byte(">\n"))
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func (w *textWriter) writeMessage(m protoreflect.Message) error {
|
||||
md := m.Descriptor()
|
||||
if w.expandAny && md.FullName() == "google.protobuf.Any" {
|
||||
if canExpand, err := w.writeProto3Any(m); canExpand {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
fds := md.Fields()
|
||||
for i := 0; i < fds.Len(); {
|
||||
fd := fds.Get(i)
|
||||
if od := fd.ContainingOneof(); od != nil {
|
||||
fd = m.WhichOneof(od)
|
||||
i += od.Fields().Len()
|
||||
} else {
|
||||
i++
|
||||
}
|
||||
if fd == nil || !m.Has(fd) {
|
||||
continue
|
||||
}
|
||||
|
||||
switch {
|
||||
case fd.IsList():
|
||||
lv := m.Get(fd).List()
|
||||
for j := 0; j < lv.Len(); j++ {
|
||||
w.writeName(fd)
|
||||
v := lv.Get(j)
|
||||
if err := w.writeSingularValue(v, fd); err != nil {
|
||||
return err
|
||||
}
|
||||
w.WriteByte('\n')
|
||||
}
|
||||
case fd.IsMap():
|
||||
kfd := fd.MapKey()
|
||||
vfd := fd.MapValue()
|
||||
mv := m.Get(fd).Map()
|
||||
|
||||
type entry struct{ key, val protoreflect.Value }
|
||||
var entries []entry
|
||||
mv.Range(func(k protoreflect.MapKey, v protoreflect.Value) bool {
|
||||
entries = append(entries, entry{k.Value(), v})
|
||||
return true
|
||||
})
|
||||
sort.Slice(entries, func(i, j int) bool {
|
||||
switch kfd.Kind() {
|
||||
case protoreflect.BoolKind:
|
||||
return !entries[i].key.Bool() && entries[j].key.Bool()
|
||||
case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind, protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind:
|
||||
return entries[i].key.Int() < entries[j].key.Int()
|
||||
case protoreflect.Uint32Kind, protoreflect.Fixed32Kind, protoreflect.Uint64Kind, protoreflect.Fixed64Kind:
|
||||
return entries[i].key.Uint() < entries[j].key.Uint()
|
||||
case protoreflect.StringKind:
|
||||
return entries[i].key.String() < entries[j].key.String()
|
||||
default:
|
||||
panic("invalid kind")
|
||||
}
|
||||
})
|
||||
for _, entry := range entries {
|
||||
w.writeName(fd)
|
||||
w.WriteByte('<')
|
||||
if !w.compact {
|
||||
w.WriteByte('\n')
|
||||
}
|
||||
w.indent++
|
||||
w.writeName(kfd)
|
||||
if err := w.writeSingularValue(entry.key, kfd); err != nil {
|
||||
return err
|
||||
}
|
||||
w.WriteByte('\n')
|
||||
w.writeName(vfd)
|
||||
if err := w.writeSingularValue(entry.val, vfd); err != nil {
|
||||
return err
|
||||
}
|
||||
w.WriteByte('\n')
|
||||
w.indent--
|
||||
w.WriteByte('>')
|
||||
w.WriteByte('\n')
|
||||
}
|
||||
default:
|
||||
w.writeName(fd)
|
||||
if err := w.writeSingularValue(m.Get(fd), fd); err != nil {
|
||||
return err
|
||||
}
|
||||
w.WriteByte('\n')
|
||||
}
|
||||
}
|
||||
|
||||
if b := m.GetUnknown(); len(b) > 0 {
|
||||
w.writeUnknownFields(b)
|
||||
}
|
||||
return w.writeExtensions(m)
|
||||
}
|
||||
|
||||
func (w *textWriter) writeSingularValue(v protoreflect.Value, fd protoreflect.FieldDescriptor) error {
|
||||
switch fd.Kind() {
|
||||
case protoreflect.FloatKind, protoreflect.DoubleKind:
|
||||
switch vf := v.Float(); {
|
||||
case math.IsInf(vf, +1):
|
||||
w.Write(posInf)
|
||||
case math.IsInf(vf, -1):
|
||||
w.Write(negInf)
|
||||
case math.IsNaN(vf):
|
||||
w.Write(nan)
|
||||
default:
|
||||
fmt.Fprint(w, v.Interface())
|
||||
}
|
||||
case protoreflect.StringKind:
|
||||
// NOTE: This does not validate UTF-8 for historical reasons.
|
||||
w.writeQuotedString(string(v.String()))
|
||||
case protoreflect.BytesKind:
|
||||
w.writeQuotedString(string(v.Bytes()))
|
||||
case protoreflect.MessageKind, protoreflect.GroupKind:
|
||||
var bra, ket byte = '<', '>'
|
||||
if fd.Kind() == protoreflect.GroupKind {
|
||||
bra, ket = '{', '}'
|
||||
}
|
||||
w.WriteByte(bra)
|
||||
if !w.compact {
|
||||
w.WriteByte('\n')
|
||||
}
|
||||
w.indent++
|
||||
m := v.Message()
|
||||
if m2, ok := m.Interface().(encoding.TextMarshaler); ok {
|
||||
b, err := m2.MarshalText()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
w.Write(b)
|
||||
} else {
|
||||
w.writeMessage(m)
|
||||
}
|
||||
w.indent--
|
||||
w.WriteByte(ket)
|
||||
case protoreflect.EnumKind:
|
||||
if ev := fd.Enum().Values().ByNumber(v.Enum()); ev != nil {
|
||||
fmt.Fprint(w, ev.Name())
|
||||
} else {
|
||||
fmt.Fprint(w, v.Enum())
|
||||
}
|
||||
default:
|
||||
fmt.Fprint(w, v.Interface())
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// writeQuotedString writes a quoted string in the protocol buffer text format.
|
||||
func (w *textWriter) writeQuotedString(s string) {
|
||||
w.WriteByte('"')
|
||||
for i := 0; i < len(s); i++ {
|
||||
switch c := s[i]; c {
|
||||
case '\n':
|
||||
w.buf = append(w.buf, `\n`...)
|
||||
case '\r':
|
||||
w.buf = append(w.buf, `\r`...)
|
||||
case '\t':
|
||||
w.buf = append(w.buf, `\t`...)
|
||||
case '"':
|
||||
w.buf = append(w.buf, `\"`...)
|
||||
case '\\':
|
||||
w.buf = append(w.buf, `\\`...)
|
||||
default:
|
||||
if isPrint := c >= 0x20 && c < 0x7f; isPrint {
|
||||
w.buf = append(w.buf, c)
|
||||
} else {
|
||||
w.buf = append(w.buf, fmt.Sprintf(`\%03o`, c)...)
|
||||
}
|
||||
}
|
||||
}
|
||||
w.WriteByte('"')
|
||||
}
|
||||
|
||||
func (w *textWriter) writeUnknownFields(b []byte) {
|
||||
if !w.compact {
|
||||
fmt.Fprintf(w, "/* %d unknown bytes */\n", len(b))
|
||||
}
|
||||
|
||||
for len(b) > 0 {
|
||||
num, wtyp, n := protowire.ConsumeTag(b)
|
||||
if n < 0 {
|
||||
return
|
||||
}
|
||||
b = b[n:]
|
||||
|
||||
if wtyp == protowire.EndGroupType {
|
||||
w.indent--
|
||||
w.Write(endBraceNewline)
|
||||
continue
|
||||
}
|
||||
fmt.Fprint(w, num)
|
||||
if wtyp != protowire.StartGroupType {
|
||||
w.WriteByte(':')
|
||||
}
|
||||
if !w.compact || wtyp == protowire.StartGroupType {
|
||||
w.WriteByte(' ')
|
||||
}
|
||||
switch wtyp {
|
||||
case protowire.VarintType:
|
||||
v, n := protowire.ConsumeVarint(b)
|
||||
if n < 0 {
|
||||
return
|
||||
}
|
||||
b = b[n:]
|
||||
fmt.Fprint(w, v)
|
||||
case protowire.Fixed32Type:
|
||||
v, n := protowire.ConsumeFixed32(b)
|
||||
if n < 0 {
|
||||
return
|
||||
}
|
||||
b = b[n:]
|
||||
fmt.Fprint(w, v)
|
||||
case protowire.Fixed64Type:
|
||||
v, n := protowire.ConsumeFixed64(b)
|
||||
if n < 0 {
|
||||
return
|
||||
}
|
||||
b = b[n:]
|
||||
fmt.Fprint(w, v)
|
||||
case protowire.BytesType:
|
||||
v, n := protowire.ConsumeBytes(b)
|
||||
if n < 0 {
|
||||
return
|
||||
}
|
||||
b = b[n:]
|
||||
fmt.Fprintf(w, "%q", v)
|
||||
case protowire.StartGroupType:
|
||||
w.WriteByte('{')
|
||||
w.indent++
|
||||
default:
|
||||
fmt.Fprintf(w, "/* unknown wire type %d */", wtyp)
|
||||
}
|
||||
w.WriteByte('\n')
|
||||
}
|
||||
}
|
||||
|
||||
// writeExtensions writes all the extensions in m.
|
||||
func (w *textWriter) writeExtensions(m protoreflect.Message) error {
|
||||
md := m.Descriptor()
|
||||
if md.ExtensionRanges().Len() == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
type ext struct {
|
||||
desc protoreflect.FieldDescriptor
|
||||
val protoreflect.Value
|
||||
}
|
||||
var exts []ext
|
||||
m.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool {
|
||||
if fd.IsExtension() {
|
||||
exts = append(exts, ext{fd, v})
|
||||
}
|
||||
return true
|
||||
})
|
||||
sort.Slice(exts, func(i, j int) bool {
|
||||
return exts[i].desc.Number() < exts[j].desc.Number()
|
||||
})
|
||||
|
||||
for _, ext := range exts {
|
||||
// For message set, use the name of the message as the extension name.
|
||||
name := string(ext.desc.FullName())
|
||||
if isMessageSet(ext.desc.ContainingMessage()) {
|
||||
name = strings.TrimSuffix(name, ".message_set_extension")
|
||||
}
|
||||
|
||||
if !ext.desc.IsList() {
|
||||
if err := w.writeSingularExtension(name, ext.val, ext.desc); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
lv := ext.val.List()
|
||||
for i := 0; i < lv.Len(); i++ {
|
||||
if err := w.writeSingularExtension(name, lv.Get(i), ext.desc); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (w *textWriter) writeSingularExtension(name string, v protoreflect.Value, fd protoreflect.FieldDescriptor) error {
|
||||
fmt.Fprintf(w, "[%s]:", name)
|
||||
if !w.compact {
|
||||
w.WriteByte(' ')
|
||||
}
|
||||
if err := w.writeSingularValue(v, fd); err != nil {
|
||||
return err
|
||||
}
|
||||
w.WriteByte('\n')
|
||||
return nil
|
||||
}
|
||||
|
||||
func (w *textWriter) writeIndent() {
|
||||
if !w.complete {
|
||||
return
|
||||
}
|
||||
for i := 0; i < w.indent*2; i++ {
|
||||
w.buf = append(w.buf, ' ')
|
||||
}
|
||||
w.complete = false
|
||||
}
|
880
vendor/github.com/golang/protobuf/proto/text_parser.go
generated
vendored
880
vendor/github.com/golang/protobuf/proto/text_parser.go
generated
vendored
@ -1,880 +0,0 @@
|
||||
// Go support for Protocol Buffers - Google's data interchange format
|
||||
//
|
||||
// Copyright 2010 The Go Authors. All rights reserved.
|
||||
// https://github.com/golang/protobuf
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
package proto
|
||||
|
||||
// Functions for parsing the Text protocol buffer format.
|
||||
// TODO: message sets.
|
||||
|
||||
import (
|
||||
"encoding"
|
||||
"errors"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"strings"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
// Error string emitted when deserializing Any and fields are already set
|
||||
const anyRepeatedlyUnpacked = "Any message unpacked multiple times, or %q already set"
|
||||
|
||||
type ParseError struct {
|
||||
Message string
|
||||
Line int // 1-based line number
|
||||
Offset int // 0-based byte offset from start of input
|
||||
}
|
||||
|
||||
func (p *ParseError) Error() string {
|
||||
if p.Line == 1 {
|
||||
// show offset only for first line
|
||||
return fmt.Sprintf("line 1.%d: %v", p.Offset, p.Message)
|
||||
}
|
||||
return fmt.Sprintf("line %d: %v", p.Line, p.Message)
|
||||
}
|
||||
|
||||
type token struct {
|
||||
value string
|
||||
err *ParseError
|
||||
line int // line number
|
||||
offset int // byte number from start of input, not start of line
|
||||
unquoted string // the unquoted version of value, if it was a quoted string
|
||||
}
|
||||
|
||||
func (t *token) String() string {
|
||||
if t.err == nil {
|
||||
return fmt.Sprintf("%q (line=%d, offset=%d)", t.value, t.line, t.offset)
|
||||
}
|
||||
return fmt.Sprintf("parse error: %v", t.err)
|
||||
}
|
||||
|
||||
type textParser struct {
|
||||
s string // remaining input
|
||||
done bool // whether the parsing is finished (success or error)
|
||||
backed bool // whether back() was called
|
||||
offset, line int
|
||||
cur token
|
||||
}
|
||||
|
||||
func newTextParser(s string) *textParser {
|
||||
p := new(textParser)
|
||||
p.s = s
|
||||
p.line = 1
|
||||
p.cur.line = 1
|
||||
return p
|
||||
}
|
||||
|
||||
func (p *textParser) errorf(format string, a ...interface{}) *ParseError {
|
||||
pe := &ParseError{fmt.Sprintf(format, a...), p.cur.line, p.cur.offset}
|
||||
p.cur.err = pe
|
||||
p.done = true
|
||||
return pe
|
||||
}
|
||||
|
||||
// Numbers and identifiers are matched by [-+._A-Za-z0-9]
|
||||
func isIdentOrNumberChar(c byte) bool {
|
||||
switch {
|
||||
case 'A' <= c && c <= 'Z', 'a' <= c && c <= 'z':
|
||||
return true
|
||||
case '0' <= c && c <= '9':
|
||||
return true
|
||||
}
|
||||
switch c {
|
||||
case '-', '+', '.', '_':
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func isWhitespace(c byte) bool {
|
||||
switch c {
|
||||
case ' ', '\t', '\n', '\r':
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func isQuote(c byte) bool {
|
||||
switch c {
|
||||
case '"', '\'':
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (p *textParser) skipWhitespace() {
|
||||
i := 0
|
||||
for i < len(p.s) && (isWhitespace(p.s[i]) || p.s[i] == '#') {
|
||||
if p.s[i] == '#' {
|
||||
// comment; skip to end of line or input
|
||||
for i < len(p.s) && p.s[i] != '\n' {
|
||||
i++
|
||||
}
|
||||
if i == len(p.s) {
|
||||
break
|
||||
}
|
||||
}
|
||||
if p.s[i] == '\n' {
|
||||
p.line++
|
||||
}
|
||||
i++
|
||||
}
|
||||
p.offset += i
|
||||
p.s = p.s[i:len(p.s)]
|
||||
if len(p.s) == 0 {
|
||||
p.done = true
|
||||
}
|
||||
}
|
||||
|
||||
func (p *textParser) advance() {
|
||||
// Skip whitespace
|
||||
p.skipWhitespace()
|
||||
if p.done {
|
||||
return
|
||||
}
|
||||
|
||||
// Start of non-whitespace
|
||||
p.cur.err = nil
|
||||
p.cur.offset, p.cur.line = p.offset, p.line
|
||||
p.cur.unquoted = ""
|
||||
switch p.s[0] {
|
||||
case '<', '>', '{', '}', ':', '[', ']', ';', ',', '/':
|
||||
// Single symbol
|
||||
p.cur.value, p.s = p.s[0:1], p.s[1:len(p.s)]
|
||||
case '"', '\'':
|
||||
// Quoted string
|
||||
i := 1
|
||||
for i < len(p.s) && p.s[i] != p.s[0] && p.s[i] != '\n' {
|
||||
if p.s[i] == '\\' && i+1 < len(p.s) {
|
||||
// skip escaped char
|
||||
i++
|
||||
}
|
||||
i++
|
||||
}
|
||||
if i >= len(p.s) || p.s[i] != p.s[0] {
|
||||
p.errorf("unmatched quote")
|
||||
return
|
||||
}
|
||||
unq, err := unquoteC(p.s[1:i], rune(p.s[0]))
|
||||
if err != nil {
|
||||
p.errorf("invalid quoted string %s: %v", p.s[0:i+1], err)
|
||||
return
|
||||
}
|
||||
p.cur.value, p.s = p.s[0:i+1], p.s[i+1:len(p.s)]
|
||||
p.cur.unquoted = unq
|
||||
default:
|
||||
i := 0
|
||||
for i < len(p.s) && isIdentOrNumberChar(p.s[i]) {
|
||||
i++
|
||||
}
|
||||
if i == 0 {
|
||||
p.errorf("unexpected byte %#x", p.s[0])
|
||||
return
|
||||
}
|
||||
p.cur.value, p.s = p.s[0:i], p.s[i:len(p.s)]
|
||||
}
|
||||
p.offset += len(p.cur.value)
|
||||
}
|
||||
|
||||
var (
|
||||
errBadUTF8 = errors.New("proto: bad UTF-8")
|
||||
)
|
||||
|
||||
func unquoteC(s string, quote rune) (string, error) {
|
||||
// This is based on C++'s tokenizer.cc.
|
||||
// Despite its name, this is *not* parsing C syntax.
|
||||
// For instance, "\0" is an invalid quoted string.
|
||||
|
||||
// Avoid allocation in trivial cases.
|
||||
simple := true
|
||||
for _, r := range s {
|
||||
if r == '\\' || r == quote {
|
||||
simple = false
|
||||
break
|
||||
}
|
||||
}
|
||||
if simple {
|
||||
return s, nil
|
||||
}
|
||||
|
||||
buf := make([]byte, 0, 3*len(s)/2)
|
||||
for len(s) > 0 {
|
||||
r, n := utf8.DecodeRuneInString(s)
|
||||
if r == utf8.RuneError && n == 1 {
|
||||
return "", errBadUTF8
|
||||
}
|
||||
s = s[n:]
|
||||
if r != '\\' {
|
||||
if r < utf8.RuneSelf {
|
||||
buf = append(buf, byte(r))
|
||||
} else {
|
||||
buf = append(buf, string(r)...)
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
ch, tail, err := unescape(s)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
buf = append(buf, ch...)
|
||||
s = tail
|
||||
}
|
||||
return string(buf), nil
|
||||
}
|
||||
|
||||
func unescape(s string) (ch string, tail string, err error) {
|
||||
r, n := utf8.DecodeRuneInString(s)
|
||||
if r == utf8.RuneError && n == 1 {
|
||||
return "", "", errBadUTF8
|
||||
}
|
||||
s = s[n:]
|
||||
switch r {
|
||||
case 'a':
|
||||
return "\a", s, nil
|
||||
case 'b':
|
||||
return "\b", s, nil
|
||||
case 'f':
|
||||
return "\f", s, nil
|
||||
case 'n':
|
||||
return "\n", s, nil
|
||||
case 'r':
|
||||
return "\r", s, nil
|
||||
case 't':
|
||||
return "\t", s, nil
|
||||
case 'v':
|
||||
return "\v", s, nil
|
||||
case '?':
|
||||
return "?", s, nil // trigraph workaround
|
||||
case '\'', '"', '\\':
|
||||
return string(r), s, nil
|
||||
case '0', '1', '2', '3', '4', '5', '6', '7':
|
||||
if len(s) < 2 {
|
||||
return "", "", fmt.Errorf(`\%c requires 2 following digits`, r)
|
||||
}
|
||||
ss := string(r) + s[:2]
|
||||
s = s[2:]
|
||||
i, err := strconv.ParseUint(ss, 8, 8)
|
||||
if err != nil {
|
||||
return "", "", fmt.Errorf(`\%s contains non-octal digits`, ss)
|
||||
}
|
||||
return string([]byte{byte(i)}), s, nil
|
||||
case 'x', 'X', 'u', 'U':
|
||||
var n int
|
||||
switch r {
|
||||
case 'x', 'X':
|
||||
n = 2
|
||||
case 'u':
|
||||
n = 4
|
||||
case 'U':
|
||||
n = 8
|
||||
}
|
||||
if len(s) < n {
|
||||
return "", "", fmt.Errorf(`\%c requires %d following digits`, r, n)
|
||||
}
|
||||
ss := s[:n]
|
||||
s = s[n:]
|
||||
i, err := strconv.ParseUint(ss, 16, 64)
|
||||
if err != nil {
|
||||
return "", "", fmt.Errorf(`\%c%s contains non-hexadecimal digits`, r, ss)
|
||||
}
|
||||
if r == 'x' || r == 'X' {
|
||||
return string([]byte{byte(i)}), s, nil
|
||||
}
|
||||
if i > utf8.MaxRune {
|
||||
return "", "", fmt.Errorf(`\%c%s is not a valid Unicode code point`, r, ss)
|
||||
}
|
||||
return string(i), s, nil
|
||||
}
|
||||
return "", "", fmt.Errorf(`unknown escape \%c`, r)
|
||||
}
|
||||
|
||||
// Back off the parser by one token. Can only be done between calls to next().
|
||||
// It makes the next advance() a no-op.
|
||||
func (p *textParser) back() { p.backed = true }
|
||||
|
||||
// Advances the parser and returns the new current token.
|
||||
func (p *textParser) next() *token {
|
||||
if p.backed || p.done {
|
||||
p.backed = false
|
||||
return &p.cur
|
||||
}
|
||||
p.advance()
|
||||
if p.done {
|
||||
p.cur.value = ""
|
||||
} else if len(p.cur.value) > 0 && isQuote(p.cur.value[0]) {
|
||||
// Look for multiple quoted strings separated by whitespace,
|
||||
// and concatenate them.
|
||||
cat := p.cur
|
||||
for {
|
||||
p.skipWhitespace()
|
||||
if p.done || !isQuote(p.s[0]) {
|
||||
break
|
||||
}
|
||||
p.advance()
|
||||
if p.cur.err != nil {
|
||||
return &p.cur
|
||||
}
|
||||
cat.value += " " + p.cur.value
|
||||
cat.unquoted += p.cur.unquoted
|
||||
}
|
||||
p.done = false // parser may have seen EOF, but we want to return cat
|
||||
p.cur = cat
|
||||
}
|
||||
return &p.cur
|
||||
}
|
||||
|
||||
func (p *textParser) consumeToken(s string) error {
|
||||
tok := p.next()
|
||||
if tok.err != nil {
|
||||
return tok.err
|
||||
}
|
||||
if tok.value != s {
|
||||
p.back()
|
||||
return p.errorf("expected %q, found %q", s, tok.value)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Return a RequiredNotSetError indicating which required field was not set.
|
||||
func (p *textParser) missingRequiredFieldError(sv reflect.Value) *RequiredNotSetError {
|
||||
st := sv.Type()
|
||||
sprops := GetProperties(st)
|
||||
for i := 0; i < st.NumField(); i++ {
|
||||
if !isNil(sv.Field(i)) {
|
||||
continue
|
||||
}
|
||||
|
||||
props := sprops.Prop[i]
|
||||
if props.Required {
|
||||
return &RequiredNotSetError{fmt.Sprintf("%v.%v", st, props.OrigName)}
|
||||
}
|
||||
}
|
||||
return &RequiredNotSetError{fmt.Sprintf("%v.<unknown field name>", st)} // should not happen
|
||||
}
|
||||
|
||||
// Returns the index in the struct for the named field, as well as the parsed tag properties.
|
||||
func structFieldByName(sprops *StructProperties, name string) (int, *Properties, bool) {
|
||||
i, ok := sprops.decoderOrigNames[name]
|
||||
if ok {
|
||||
return i, sprops.Prop[i], true
|
||||
}
|
||||
return -1, nil, false
|
||||
}
|
||||
|
||||
// Consume a ':' from the input stream (if the next token is a colon),
|
||||
// returning an error if a colon is needed but not present.
|
||||
func (p *textParser) checkForColon(props *Properties, typ reflect.Type) *ParseError {
|
||||
tok := p.next()
|
||||
if tok.err != nil {
|
||||
return tok.err
|
||||
}
|
||||
if tok.value != ":" {
|
||||
// Colon is optional when the field is a group or message.
|
||||
needColon := true
|
||||
switch props.Wire {
|
||||
case "group":
|
||||
needColon = false
|
||||
case "bytes":
|
||||
// A "bytes" field is either a message, a string, or a repeated field;
|
||||
// those three become *T, *string and []T respectively, so we can check for
|
||||
// this field being a pointer to a non-string.
|
||||
if typ.Kind() == reflect.Ptr {
|
||||
// *T or *string
|
||||
if typ.Elem().Kind() == reflect.String {
|
||||
break
|
||||
}
|
||||
} else if typ.Kind() == reflect.Slice {
|
||||
// []T or []*T
|
||||
if typ.Elem().Kind() != reflect.Ptr {
|
||||
break
|
||||
}
|
||||
} else if typ.Kind() == reflect.String {
|
||||
// The proto3 exception is for a string field,
|
||||
// which requires a colon.
|
||||
break
|
||||
}
|
||||
needColon = false
|
||||
}
|
||||
if needColon {
|
||||
return p.errorf("expected ':', found %q", tok.value)
|
||||
}
|
||||
p.back()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *textParser) readStruct(sv reflect.Value, terminator string) error {
|
||||
st := sv.Type()
|
||||
sprops := GetProperties(st)
|
||||
reqCount := sprops.reqCount
|
||||
var reqFieldErr error
|
||||
fieldSet := make(map[string]bool)
|
||||
// A struct is a sequence of "name: value", terminated by one of
|
||||
// '>' or '}', or the end of the input. A name may also be
|
||||
// "[extension]" or "[type/url]".
|
||||
//
|
||||
// The whole struct can also be an expanded Any message, like:
|
||||
// [type/url] < ... struct contents ... >
|
||||
for {
|
||||
tok := p.next()
|
||||
if tok.err != nil {
|
||||
return tok.err
|
||||
}
|
||||
if tok.value == terminator {
|
||||
break
|
||||
}
|
||||
if tok.value == "[" {
|
||||
// Looks like an extension or an Any.
|
||||
//
|
||||
// TODO: Check whether we need to handle
|
||||
// namespace rooted names (e.g. ".something.Foo").
|
||||
extName, err := p.consumeExtName()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if s := strings.LastIndex(extName, "/"); s >= 0 {
|
||||
// If it contains a slash, it's an Any type URL.
|
||||
messageName := extName[s+1:]
|
||||
mt := MessageType(messageName)
|
||||
if mt == nil {
|
||||
return p.errorf("unrecognized message %q in google.protobuf.Any", messageName)
|
||||
}
|
||||
tok = p.next()
|
||||
if tok.err != nil {
|
||||
return tok.err
|
||||
}
|
||||
// consume an optional colon
|
||||
if tok.value == ":" {
|
||||
tok = p.next()
|
||||
if tok.err != nil {
|
||||
return tok.err
|
||||
}
|
||||
}
|
||||
var terminator string
|
||||
switch tok.value {
|
||||
case "<":
|
||||
terminator = ">"
|
||||
case "{":
|
||||
terminator = "}"
|
||||
default:
|
||||
return p.errorf("expected '{' or '<', found %q", tok.value)
|
||||
}
|
||||
v := reflect.New(mt.Elem())
|
||||
if pe := p.readStruct(v.Elem(), terminator); pe != nil {
|
||||
return pe
|
||||
}
|
||||
b, err := Marshal(v.Interface().(Message))
|
||||
if err != nil {
|
||||
return p.errorf("failed to marshal message of type %q: %v", messageName, err)
|
||||
}
|
||||
if fieldSet["type_url"] {
|
||||
return p.errorf(anyRepeatedlyUnpacked, "type_url")
|
||||
}
|
||||
if fieldSet["value"] {
|
||||
return p.errorf(anyRepeatedlyUnpacked, "value")
|
||||
}
|
||||
sv.FieldByName("TypeUrl").SetString(extName)
|
||||
sv.FieldByName("Value").SetBytes(b)
|
||||
fieldSet["type_url"] = true
|
||||
fieldSet["value"] = true
|
||||
continue
|
||||
}
|
||||
|
||||
var desc *ExtensionDesc
|
||||
// This could be faster, but it's functional.
|
||||
// TODO: Do something smarter than a linear scan.
|
||||
for _, d := range RegisteredExtensions(reflect.New(st).Interface().(Message)) {
|
||||
if d.Name == extName {
|
||||
desc = d
|
||||
break
|
||||
}
|
||||
}
|
||||
if desc == nil {
|
||||
return p.errorf("unrecognized extension %q", extName)
|
||||
}
|
||||
|
||||
props := &Properties{}
|
||||
props.Parse(desc.Tag)
|
||||
|
||||
typ := reflect.TypeOf(desc.ExtensionType)
|
||||
if err := p.checkForColon(props, typ); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
rep := desc.repeated()
|
||||
|
||||
// Read the extension structure, and set it in
|
||||
// the value we're constructing.
|
||||
var ext reflect.Value
|
||||
if !rep {
|
||||
ext = reflect.New(typ).Elem()
|
||||
} else {
|
||||
ext = reflect.New(typ.Elem()).Elem()
|
||||
}
|
||||
if err := p.readAny(ext, props); err != nil {
|
||||
if _, ok := err.(*RequiredNotSetError); !ok {
|
||||
return err
|
||||
}
|
||||
reqFieldErr = err
|
||||
}
|
||||
ep := sv.Addr().Interface().(Message)
|
||||
if !rep {
|
||||
SetExtension(ep, desc, ext.Interface())
|
||||
} else {
|
||||
old, err := GetExtension(ep, desc)
|
||||
var sl reflect.Value
|
||||
if err == nil {
|
||||
sl = reflect.ValueOf(old) // existing slice
|
||||
} else {
|
||||
sl = reflect.MakeSlice(typ, 0, 1)
|
||||
}
|
||||
sl = reflect.Append(sl, ext)
|
||||
SetExtension(ep, desc, sl.Interface())
|
||||
}
|
||||
if err := p.consumeOptionalSeparator(); err != nil {
|
||||
return err
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
// This is a normal, non-extension field.
|
||||
name := tok.value
|
||||
var dst reflect.Value
|
||||
fi, props, ok := structFieldByName(sprops, name)
|
||||
if ok {
|
||||
dst = sv.Field(fi)
|
||||
} else if oop, ok := sprops.OneofTypes[name]; ok {
|
||||
// It is a oneof.
|
||||
props = oop.Prop
|
||||
nv := reflect.New(oop.Type.Elem())
|
||||
dst = nv.Elem().Field(0)
|
||||
field := sv.Field(oop.Field)
|
||||
if !field.IsNil() {
|
||||
return p.errorf("field '%s' would overwrite already parsed oneof '%s'", name, sv.Type().Field(oop.Field).Name)
|
||||
}
|
||||
field.Set(nv)
|
||||
}
|
||||
if !dst.IsValid() {
|
||||
return p.errorf("unknown field name %q in %v", name, st)
|
||||
}
|
||||
|
||||
if dst.Kind() == reflect.Map {
|
||||
// Consume any colon.
|
||||
if err := p.checkForColon(props, dst.Type()); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Construct the map if it doesn't already exist.
|
||||
if dst.IsNil() {
|
||||
dst.Set(reflect.MakeMap(dst.Type()))
|
||||
}
|
||||
key := reflect.New(dst.Type().Key()).Elem()
|
||||
val := reflect.New(dst.Type().Elem()).Elem()
|
||||
|
||||
// The map entry should be this sequence of tokens:
|
||||
// < key : KEY value : VALUE >
|
||||
// However, implementations may omit key or value, and technically
|
||||
// we should support them in any order. See b/28924776 for a time
|
||||
// this went wrong.
|
||||
|
||||
tok := p.next()
|
||||
var terminator string
|
||||
switch tok.value {
|
||||
case "<":
|
||||
terminator = ">"
|
||||
case "{":
|
||||
terminator = "}"
|
||||
default:
|
||||
return p.errorf("expected '{' or '<', found %q", tok.value)
|
||||
}
|
||||
for {
|
||||
tok := p.next()
|
||||
if tok.err != nil {
|
||||
return tok.err
|
||||
}
|
||||
if tok.value == terminator {
|
||||
break
|
||||
}
|
||||
switch tok.value {
|
||||
case "key":
|
||||
if err := p.consumeToken(":"); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := p.readAny(key, props.MapKeyProp); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := p.consumeOptionalSeparator(); err != nil {
|
||||
return err
|
||||
}
|
||||
case "value":
|
||||
if err := p.checkForColon(props.MapValProp, dst.Type().Elem()); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := p.readAny(val, props.MapValProp); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := p.consumeOptionalSeparator(); err != nil {
|
||||
return err
|
||||
}
|
||||
default:
|
||||
p.back()
|
||||
return p.errorf(`expected "key", "value", or %q, found %q`, terminator, tok.value)
|
||||
}
|
||||
}
|
||||
|
||||
dst.SetMapIndex(key, val)
|
||||
continue
|
||||
}
|
||||
|
||||
// Check that it's not already set if it's not a repeated field.
|
||||
if !props.Repeated && fieldSet[name] {
|
||||
return p.errorf("non-repeated field %q was repeated", name)
|
||||
}
|
||||
|
||||
if err := p.checkForColon(props, dst.Type()); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Parse into the field.
|
||||
fieldSet[name] = true
|
||||
if err := p.readAny(dst, props); err != nil {
|
||||
if _, ok := err.(*RequiredNotSetError); !ok {
|
||||
return err
|
||||
}
|
||||
reqFieldErr = err
|
||||
}
|
||||
if props.Required {
|
||||
reqCount--
|
||||
}
|
||||
|
||||
if err := p.consumeOptionalSeparator(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
if reqCount > 0 {
|
||||
return p.missingRequiredFieldError(sv)
|
||||
}
|
||||
return reqFieldErr
|
||||
}
|
||||
|
||||
// consumeExtName consumes extension name or expanded Any type URL and the
|
||||
// following ']'. It returns the name or URL consumed.
|
||||
func (p *textParser) consumeExtName() (string, error) {
|
||||
tok := p.next()
|
||||
if tok.err != nil {
|
||||
return "", tok.err
|
||||
}
|
||||
|
||||
// If extension name or type url is quoted, it's a single token.
|
||||
if len(tok.value) > 2 && isQuote(tok.value[0]) && tok.value[len(tok.value)-1] == tok.value[0] {
|
||||
name, err := unquoteC(tok.value[1:len(tok.value)-1], rune(tok.value[0]))
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return name, p.consumeToken("]")
|
||||
}
|
||||
|
||||
// Consume everything up to "]"
|
||||
var parts []string
|
||||
for tok.value != "]" {
|
||||
parts = append(parts, tok.value)
|
||||
tok = p.next()
|
||||
if tok.err != nil {
|
||||
return "", p.errorf("unrecognized type_url or extension name: %s", tok.err)
|
||||
}
|
||||
if p.done && tok.value != "]" {
|
||||
return "", p.errorf("unclosed type_url or extension name")
|
||||
}
|
||||
}
|
||||
return strings.Join(parts, ""), nil
|
||||
}
|
||||
|
||||
// consumeOptionalSeparator consumes an optional semicolon or comma.
|
||||
// It is used in readStruct to provide backward compatibility.
|
||||
func (p *textParser) consumeOptionalSeparator() error {
|
||||
tok := p.next()
|
||||
if tok.err != nil {
|
||||
return tok.err
|
||||
}
|
||||
if tok.value != ";" && tok.value != "," {
|
||||
p.back()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *textParser) readAny(v reflect.Value, props *Properties) error {
|
||||
tok := p.next()
|
||||
if tok.err != nil {
|
||||
return tok.err
|
||||
}
|
||||
if tok.value == "" {
|
||||
return p.errorf("unexpected EOF")
|
||||
}
|
||||
|
||||
switch fv := v; fv.Kind() {
|
||||
case reflect.Slice:
|
||||
at := v.Type()
|
||||
if at.Elem().Kind() == reflect.Uint8 {
|
||||
// Special case for []byte
|
||||
if tok.value[0] != '"' && tok.value[0] != '\'' {
|
||||
// Deliberately written out here, as the error after
|
||||
// this switch statement would write "invalid []byte: ...",
|
||||
// which is not as user-friendly.
|
||||
return p.errorf("invalid string: %v", tok.value)
|
||||
}
|
||||
bytes := []byte(tok.unquoted)
|
||||
fv.Set(reflect.ValueOf(bytes))
|
||||
return nil
|
||||
}
|
||||
// Repeated field.
|
||||
if tok.value == "[" {
|
||||
// Repeated field with list notation, like [1,2,3].
|
||||
for {
|
||||
fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem()))
|
||||
err := p.readAny(fv.Index(fv.Len()-1), props)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
tok := p.next()
|
||||
if tok.err != nil {
|
||||
return tok.err
|
||||
}
|
||||
if tok.value == "]" {
|
||||
break
|
||||
}
|
||||
if tok.value != "," {
|
||||
return p.errorf("Expected ']' or ',' found %q", tok.value)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
// One value of the repeated field.
|
||||
p.back()
|
||||
fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem()))
|
||||
return p.readAny(fv.Index(fv.Len()-1), props)
|
||||
case reflect.Bool:
|
||||
// true/1/t/True or false/f/0/False.
|
||||
switch tok.value {
|
||||
case "true", "1", "t", "True":
|
||||
fv.SetBool(true)
|
||||
return nil
|
||||
case "false", "0", "f", "False":
|
||||
fv.SetBool(false)
|
||||
return nil
|
||||
}
|
||||
case reflect.Float32, reflect.Float64:
|
||||
v := tok.value
|
||||
// Ignore 'f' for compatibility with output generated by C++, but don't
|
||||
// remove 'f' when the value is "-inf" or "inf".
|
||||
if strings.HasSuffix(v, "f") && tok.value != "-inf" && tok.value != "inf" {
|
||||
v = v[:len(v)-1]
|
||||
}
|
||||
if f, err := strconv.ParseFloat(v, fv.Type().Bits()); err == nil {
|
||||
fv.SetFloat(f)
|
||||
return nil
|
||||
}
|
||||
case reflect.Int32:
|
||||
if x, err := strconv.ParseInt(tok.value, 0, 32); err == nil {
|
||||
fv.SetInt(x)
|
||||
return nil
|
||||
}
|
||||
|
||||
if len(props.Enum) == 0 {
|
||||
break
|
||||
}
|
||||
m, ok := enumValueMaps[props.Enum]
|
||||
if !ok {
|
||||
break
|
||||
}
|
||||
x, ok := m[tok.value]
|
||||
if !ok {
|
||||
break
|
||||
}
|
||||
fv.SetInt(int64(x))
|
||||
return nil
|
||||
case reflect.Int64:
|
||||
if x, err := strconv.ParseInt(tok.value, 0, 64); err == nil {
|
||||
fv.SetInt(x)
|
||||
return nil
|
||||
}
|
||||
|
||||
case reflect.Ptr:
|
||||
// A basic field (indirected through pointer), or a repeated message/group
|
||||
p.back()
|
||||
fv.Set(reflect.New(fv.Type().Elem()))
|
||||
return p.readAny(fv.Elem(), props)
|
||||
case reflect.String:
|
||||
if tok.value[0] == '"' || tok.value[0] == '\'' {
|
||||
fv.SetString(tok.unquoted)
|
||||
return nil
|
||||
}
|
||||
case reflect.Struct:
|
||||
var terminator string
|
||||
switch tok.value {
|
||||
case "{":
|
||||
terminator = "}"
|
||||
case "<":
|
||||
terminator = ">"
|
||||
default:
|
||||
return p.errorf("expected '{' or '<', found %q", tok.value)
|
||||
}
|
||||
// TODO: Handle nested messages which implement encoding.TextUnmarshaler.
|
||||
return p.readStruct(fv, terminator)
|
||||
case reflect.Uint32:
|
||||
if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil {
|
||||
fv.SetUint(uint64(x))
|
||||
return nil
|
||||
}
|
||||
case reflect.Uint64:
|
||||
if x, err := strconv.ParseUint(tok.value, 0, 64); err == nil {
|
||||
fv.SetUint(x)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
return p.errorf("invalid %v: %v", v.Type(), tok.value)
|
||||
}
|
||||
|
||||
// UnmarshalText reads a protocol buffer in Text format. UnmarshalText resets pb
|
||||
// before starting to unmarshal, so any existing data in pb is always removed.
|
||||
// If a required field is not set and no other error occurs,
|
||||
// UnmarshalText returns *RequiredNotSetError.
|
||||
func UnmarshalText(s string, pb Message) error {
|
||||
if um, ok := pb.(encoding.TextUnmarshaler); ok {
|
||||
return um.UnmarshalText([]byte(s))
|
||||
}
|
||||
pb.Reset()
|
||||
v := reflect.ValueOf(pb)
|
||||
return newTextParser(s).readStruct(v.Elem(), "")
|
||||
}
|
78
vendor/github.com/golang/protobuf/proto/wire.go
generated
vendored
Normal file
78
vendor/github.com/golang/protobuf/proto/wire.go
generated
vendored
Normal file
@ -0,0 +1,78 @@
|
||||
// Copyright 2019 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package proto
|
||||
|
||||
import (
|
||||
protoV2 "google.golang.org/protobuf/proto"
|
||||
"google.golang.org/protobuf/runtime/protoiface"
|
||||
)
|
||||
|
||||
// Size returns the size in bytes of the wire-format encoding of m.
|
||||
func Size(m Message) int {
|
||||
if m == nil {
|
||||
return 0
|
||||
}
|
||||
mi := MessageV2(m)
|
||||
return protoV2.Size(mi)
|
||||
}
|
||||
|
||||
// Marshal returns the wire-format encoding of m.
|
||||
func Marshal(m Message) ([]byte, error) {
|
||||
b, err := marshalAppend(nil, m, false)
|
||||
if b == nil {
|
||||
b = zeroBytes
|
||||
}
|
||||
return b, err
|
||||
}
|
||||
|
||||
var zeroBytes = make([]byte, 0, 0)
|
||||
|
||||
func marshalAppend(buf []byte, m Message, deterministic bool) ([]byte, error) {
|
||||
if m == nil {
|
||||
return nil, ErrNil
|
||||
}
|
||||
mi := MessageV2(m)
|
||||
nbuf, err := protoV2.MarshalOptions{
|
||||
Deterministic: deterministic,
|
||||
AllowPartial: true,
|
||||
}.MarshalAppend(buf, mi)
|
||||
if err != nil {
|
||||
return buf, err
|
||||
}
|
||||
if len(buf) == len(nbuf) {
|
||||
if !mi.ProtoReflect().IsValid() {
|
||||
return buf, ErrNil
|
||||
}
|
||||
}
|
||||
return nbuf, checkRequiredNotSet(mi)
|
||||
}
|
||||
|
||||
// Unmarshal parses a wire-format message in b and places the decoded results in m.
|
||||
//
|
||||
// Unmarshal resets m before starting to unmarshal, so any existing data in m is always
|
||||
// removed. Use UnmarshalMerge to preserve and append to existing data.
|
||||
func Unmarshal(b []byte, m Message) error {
|
||||
m.Reset()
|
||||
return UnmarshalMerge(b, m)
|
||||
}
|
||||
|
||||
// UnmarshalMerge parses a wire-format message in b and places the decoded results in m.
|
||||
func UnmarshalMerge(b []byte, m Message) error {
|
||||
mi := MessageV2(m)
|
||||
out, err := protoV2.UnmarshalOptions{
|
||||
AllowPartial: true,
|
||||
Merge: true,
|
||||
}.UnmarshalState(protoiface.UnmarshalInput{
|
||||
Buf: b,
|
||||
Message: mi.ProtoReflect(),
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if out.Flags&protoiface.UnmarshalInitialized > 0 {
|
||||
return nil
|
||||
}
|
||||
return checkRequiredNotSet(mi)
|
||||
}
|
34
vendor/github.com/golang/protobuf/proto/wrappers.go
generated
vendored
Normal file
34
vendor/github.com/golang/protobuf/proto/wrappers.go
generated
vendored
Normal file
@ -0,0 +1,34 @@
|
||||
// Copyright 2019 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package proto
|
||||
|
||||
// Bool stores v in a new bool value and returns a pointer to it.
|
||||
func Bool(v bool) *bool { return &v }
|
||||
|
||||
// Int stores v in a new int32 value and returns a pointer to it.
|
||||
//
|
||||
// Deprecated: Use Int32 instead.
|
||||
func Int(v int) *int32 { return Int32(int32(v)) }
|
||||
|
||||
// Int32 stores v in a new int32 value and returns a pointer to it.
|
||||
func Int32(v int32) *int32 { return &v }
|
||||
|
||||
// Int64 stores v in a new int64 value and returns a pointer to it.
|
||||
func Int64(v int64) *int64 { return &v }
|
||||
|
||||
// Uint32 stores v in a new uint32 value and returns a pointer to it.
|
||||
func Uint32(v uint32) *uint32 { return &v }
|
||||
|
||||
// Uint64 stores v in a new uint64 value and returns a pointer to it.
|
||||
func Uint64(v uint64) *uint64 { return &v }
|
||||
|
||||
// Float32 stores v in a new float32 value and returns a pointer to it.
|
||||
func Float32(v float32) *float32 { return &v }
|
||||
|
||||
// Float64 stores v in a new float64 value and returns a pointer to it.
|
||||
func Float64(v float64) *float64 { return &v }
|
||||
|
||||
// String stores v in a new string value and returns a pointer to it.
|
||||
func String(v string) *string { return &v }
|
165
vendor/github.com/golang/protobuf/ptypes/any.go
generated
vendored
Normal file
165
vendor/github.com/golang/protobuf/ptypes/any.go
generated
vendored
Normal file
@ -0,0 +1,165 @@
|
||||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package ptypes
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
"google.golang.org/protobuf/reflect/protoreflect"
|
||||
"google.golang.org/protobuf/reflect/protoregistry"
|
||||
|
||||
anypb "github.com/golang/protobuf/ptypes/any"
|
||||
)
|
||||
|
||||
const urlPrefix = "type.googleapis.com/"
|
||||
|
||||
// AnyMessageName returns the message name contained in an anypb.Any message.
|
||||
// Most type assertions should use the Is function instead.
|
||||
func AnyMessageName(any *anypb.Any) (string, error) {
|
||||
name, err := anyMessageName(any)
|
||||
return string(name), err
|
||||
}
|
||||
func anyMessageName(any *anypb.Any) (protoreflect.FullName, error) {
|
||||
if any == nil {
|
||||
return "", fmt.Errorf("message is nil")
|
||||
}
|
||||
name := protoreflect.FullName(any.TypeUrl)
|
||||
if i := strings.LastIndex(any.TypeUrl, "/"); i >= 0 {
|
||||
name = name[i+len("/"):]
|
||||
}
|
||||
if !name.IsValid() {
|
||||
return "", fmt.Errorf("message type url %q is invalid", any.TypeUrl)
|
||||
}
|
||||
return name, nil
|
||||
}
|
||||
|
||||
// MarshalAny marshals the given message m into an anypb.Any message.
|
||||
func MarshalAny(m proto.Message) (*anypb.Any, error) {
|
||||
switch dm := m.(type) {
|
||||
case DynamicAny:
|
||||
m = dm.Message
|
||||
case *DynamicAny:
|
||||
if dm == nil {
|
||||
return nil, proto.ErrNil
|
||||
}
|
||||
m = dm.Message
|
||||
}
|
||||
b, err := proto.Marshal(m)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &anypb.Any{TypeUrl: urlPrefix + proto.MessageName(m), Value: b}, nil
|
||||
}
|
||||
|
||||
// Empty returns a new message of the type specified in an anypb.Any message.
|
||||
// It returns protoregistry.NotFound if the corresponding message type could not
|
||||
// be resolved in the global registry.
|
||||
func Empty(any *anypb.Any) (proto.Message, error) {
|
||||
name, err := anyMessageName(any)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
mt, err := protoregistry.GlobalTypes.FindMessageByName(name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return proto.MessageV1(mt.New().Interface()), nil
|
||||
}
|
||||
|
||||
// UnmarshalAny unmarshals the encoded value contained in the anypb.Any message
|
||||
// into the provided message m. It returns an error if the target message
|
||||
// does not match the type in the Any message or if an unmarshal error occurs.
|
||||
//
|
||||
// The target message m may be a *DynamicAny message. If the underlying message
|
||||
// type could not be resolved, then this returns protoregistry.NotFound.
|
||||
func UnmarshalAny(any *anypb.Any, m proto.Message) error {
|
||||
if dm, ok := m.(*DynamicAny); ok {
|
||||
if dm.Message == nil {
|
||||
var err error
|
||||
dm.Message, err = Empty(any)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
m = dm.Message
|
||||
}
|
||||
|
||||
anyName, err := AnyMessageName(any)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
msgName := proto.MessageName(m)
|
||||
if anyName != msgName {
|
||||
return fmt.Errorf("mismatched message type: got %q want %q", anyName, msgName)
|
||||
}
|
||||
return proto.Unmarshal(any.Value, m)
|
||||
}
|
||||
|
||||
// Is reports whether the Any message contains a message of the specified type.
|
||||
func Is(any *anypb.Any, m proto.Message) bool {
|
||||
if any == nil || m == nil {
|
||||
return false
|
||||
}
|
||||
name := proto.MessageName(m)
|
||||
if !strings.HasSuffix(any.TypeUrl, name) {
|
||||
return false
|
||||
}
|
||||
return len(any.TypeUrl) == len(name) || any.TypeUrl[len(any.TypeUrl)-len(name)-1] == '/'
|
||||
}
|
||||
|
||||
// DynamicAny is a value that can be passed to UnmarshalAny to automatically
|
||||
// allocate a proto.Message for the type specified in an anypb.Any message.
|
||||
// The allocated message is stored in the embedded proto.Message.
|
||||
//
|
||||
// Example:
|
||||
// var x ptypes.DynamicAny
|
||||
// if err := ptypes.UnmarshalAny(a, &x); err != nil { ... }
|
||||
// fmt.Printf("unmarshaled message: %v", x.Message)
|
||||
type DynamicAny struct{ proto.Message }
|
||||
|
||||
func (m DynamicAny) String() string {
|
||||
if m.Message == nil {
|
||||
return "<nil>"
|
||||
}
|
||||
return m.Message.String()
|
||||
}
|
||||
func (m DynamicAny) Reset() {
|
||||
if m.Message == nil {
|
||||
return
|
||||
}
|
||||
m.Message.Reset()
|
||||
}
|
||||
func (m DynamicAny) ProtoMessage() {
|
||||
return
|
||||
}
|
||||
func (m DynamicAny) ProtoReflect() protoreflect.Message {
|
||||
if m.Message == nil {
|
||||
return nil
|
||||
}
|
||||
return dynamicAny{proto.MessageReflect(m.Message)}
|
||||
}
|
||||
|
||||
type dynamicAny struct{ protoreflect.Message }
|
||||
|
||||
func (m dynamicAny) Type() protoreflect.MessageType {
|
||||
return dynamicAnyType{m.Message.Type()}
|
||||
}
|
||||
func (m dynamicAny) New() protoreflect.Message {
|
||||
return dynamicAnyType{m.Message.Type()}.New()
|
||||
}
|
||||
func (m dynamicAny) Interface() protoreflect.ProtoMessage {
|
||||
return DynamicAny{proto.MessageV1(m.Message.Interface())}
|
||||
}
|
||||
|
||||
type dynamicAnyType struct{ protoreflect.MessageType }
|
||||
|
||||
func (t dynamicAnyType) New() protoreflect.Message {
|
||||
return dynamicAny{t.MessageType.New()}
|
||||
}
|
||||
func (t dynamicAnyType) Zero() protoreflect.Message {
|
||||
return dynamicAny{t.MessageType.Zero()}
|
||||
}
|
62
vendor/github.com/golang/protobuf/ptypes/any/any.pb.go
generated
vendored
Normal file
62
vendor/github.com/golang/protobuf/ptypes/any/any.pb.go
generated
vendored
Normal file
@ -0,0 +1,62 @@
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// source: github.com/golang/protobuf/ptypes/any/any.proto
|
||||
|
||||
package any
|
||||
|
||||
import (
|
||||
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
|
||||
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
|
||||
anypb "google.golang.org/protobuf/types/known/anypb"
|
||||
reflect "reflect"
|
||||
)
|
||||
|
||||
// Symbols defined in public import of google/protobuf/any.proto.
|
||||
|
||||
type Any = anypb.Any
|
||||
|
||||
var File_github_com_golang_protobuf_ptypes_any_any_proto protoreflect.FileDescriptor
|
||||
|
||||
var file_github_com_golang_protobuf_ptypes_any_any_proto_rawDesc = []byte{
|
||||
0x0a, 0x2f, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c,
|
||||
0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79,
|
||||
0x70, 0x65, 0x73, 0x2f, 0x61, 0x6e, 0x79, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74,
|
||||
0x6f, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
|
||||
0x75, 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x42, 0x2b, 0x5a, 0x29,
|
||||
0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, 0x61, 0x6e,
|
||||
0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, 0x70, 0x65,
|
||||
0x73, 0x2f, 0x61, 0x6e, 0x79, 0x3b, 0x61, 0x6e, 0x79, 0x50, 0x00, 0x62, 0x06, 0x70, 0x72, 0x6f,
|
||||
0x74, 0x6f, 0x33,
|
||||
}
|
||||
|
||||
var file_github_com_golang_protobuf_ptypes_any_any_proto_goTypes = []interface{}{}
|
||||
var file_github_com_golang_protobuf_ptypes_any_any_proto_depIdxs = []int32{
|
||||
0, // [0:0] is the sub-list for method output_type
|
||||
0, // [0:0] is the sub-list for method input_type
|
||||
0, // [0:0] is the sub-list for extension type_name
|
||||
0, // [0:0] is the sub-list for extension extendee
|
||||
0, // [0:0] is the sub-list for field type_name
|
||||
}
|
||||
|
||||
func init() { file_github_com_golang_protobuf_ptypes_any_any_proto_init() }
|
||||
func file_github_com_golang_protobuf_ptypes_any_any_proto_init() {
|
||||
if File_github_com_golang_protobuf_ptypes_any_any_proto != nil {
|
||||
return
|
||||
}
|
||||
type x struct{}
|
||||
out := protoimpl.TypeBuilder{
|
||||
File: protoimpl.DescBuilder{
|
||||
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
|
||||
RawDescriptor: file_github_com_golang_protobuf_ptypes_any_any_proto_rawDesc,
|
||||
NumEnums: 0,
|
||||
NumMessages: 0,
|
||||
NumExtensions: 0,
|
||||
NumServices: 0,
|
||||
},
|
||||
GoTypes: file_github_com_golang_protobuf_ptypes_any_any_proto_goTypes,
|
||||
DependencyIndexes: file_github_com_golang_protobuf_ptypes_any_any_proto_depIdxs,
|
||||
}.Build()
|
||||
File_github_com_golang_protobuf_ptypes_any_any_proto = out.File
|
||||
file_github_com_golang_protobuf_ptypes_any_any_proto_rawDesc = nil
|
||||
file_github_com_golang_protobuf_ptypes_any_any_proto_goTypes = nil
|
||||
file_github_com_golang_protobuf_ptypes_any_any_proto_depIdxs = nil
|
||||
}
|
6
vendor/github.com/golang/protobuf/ptypes/doc.go
generated
vendored
Normal file
6
vendor/github.com/golang/protobuf/ptypes/doc.go
generated
vendored
Normal file
@ -0,0 +1,6 @@
|
||||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package ptypes provides functionality for interacting with well-known types.
|
||||
package ptypes
|
72
vendor/github.com/golang/protobuf/ptypes/duration.go
generated
vendored
Normal file
72
vendor/github.com/golang/protobuf/ptypes/duration.go
generated
vendored
Normal file
@ -0,0 +1,72 @@
|
||||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package ptypes
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
durationpb "github.com/golang/protobuf/ptypes/duration"
|
||||
)
|
||||
|
||||
// Range of google.protobuf.Duration as specified in duration.proto.
|
||||
// This is about 10,000 years in seconds.
|
||||
const (
|
||||
maxSeconds = int64(10000 * 365.25 * 24 * 60 * 60)
|
||||
minSeconds = -maxSeconds
|
||||
)
|
||||
|
||||
// Duration converts a durationpb.Duration to a time.Duration.
|
||||
// Duration returns an error if dur is invalid or overflows a time.Duration.
|
||||
func Duration(dur *durationpb.Duration) (time.Duration, error) {
|
||||
if err := validateDuration(dur); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
d := time.Duration(dur.Seconds) * time.Second
|
||||
if int64(d/time.Second) != dur.Seconds {
|
||||
return 0, fmt.Errorf("duration: %v is out of range for time.Duration", dur)
|
||||
}
|
||||
if dur.Nanos != 0 {
|
||||
d += time.Duration(dur.Nanos) * time.Nanosecond
|
||||
if (d < 0) != (dur.Nanos < 0) {
|
||||
return 0, fmt.Errorf("duration: %v is out of range for time.Duration", dur)
|
||||
}
|
||||
}
|
||||
return d, nil
|
||||
}
|
||||
|
||||
// DurationProto converts a time.Duration to a durationpb.Duration.
|
||||
func DurationProto(d time.Duration) *durationpb.Duration {
|
||||
nanos := d.Nanoseconds()
|
||||
secs := nanos / 1e9
|
||||
nanos -= secs * 1e9
|
||||
return &durationpb.Duration{
|
||||
Seconds: int64(secs),
|
||||
Nanos: int32(nanos),
|
||||
}
|
||||
}
|
||||
|
||||
// validateDuration determines whether the durationpb.Duration is valid
|
||||
// according to the definition in google/protobuf/duration.proto.
|
||||
// A valid durpb.Duration may still be too large to fit into a time.Duration
|
||||
// Note that the range of durationpb.Duration is about 10,000 years,
|
||||
// while the range of time.Duration is about 290 years.
|
||||
func validateDuration(dur *durationpb.Duration) error {
|
||||
if dur == nil {
|
||||
return errors.New("duration: nil Duration")
|
||||
}
|
||||
if dur.Seconds < minSeconds || dur.Seconds > maxSeconds {
|
||||
return fmt.Errorf("duration: %v: seconds out of range", dur)
|
||||
}
|
||||
if dur.Nanos <= -1e9 || dur.Nanos >= 1e9 {
|
||||
return fmt.Errorf("duration: %v: nanos out of range", dur)
|
||||
}
|
||||
// Seconds and Nanos must have the same sign, unless d.Nanos is zero.
|
||||
if (dur.Seconds < 0 && dur.Nanos > 0) || (dur.Seconds > 0 && dur.Nanos < 0) {
|
||||
return fmt.Errorf("duration: %v: seconds and nanos have different signs", dur)
|
||||
}
|
||||
return nil
|
||||
}
|
63
vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go
generated
vendored
Normal file
63
vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go
generated
vendored
Normal file
@ -0,0 +1,63 @@
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// source: github.com/golang/protobuf/ptypes/duration/duration.proto
|
||||
|
||||
package duration
|
||||
|
||||
import (
|
||||
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
|
||||
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
|
||||
durationpb "google.golang.org/protobuf/types/known/durationpb"
|
||||
reflect "reflect"
|
||||
)
|
||||
|
||||
// Symbols defined in public import of google/protobuf/duration.proto.
|
||||
|
||||
type Duration = durationpb.Duration
|
||||
|
||||
var File_github_com_golang_protobuf_ptypes_duration_duration_proto protoreflect.FileDescriptor
|
||||
|
||||
var file_github_com_golang_protobuf_ptypes_duration_duration_proto_rawDesc = []byte{
|
||||
0x0a, 0x39, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c,
|
||||
0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79,
|
||||
0x70, 0x65, 0x73, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x64, 0x75, 0x72,
|
||||
0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f,
|
||||
0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72,
|
||||
0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x42, 0x35, 0x5a, 0x33, 0x67,
|
||||
0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67,
|
||||
0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, 0x70, 0x65, 0x73,
|
||||
0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x3b, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69,
|
||||
0x6f, 0x6e, 0x50, 0x00, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
|
||||
}
|
||||
|
||||
var file_github_com_golang_protobuf_ptypes_duration_duration_proto_goTypes = []interface{}{}
|
||||
var file_github_com_golang_protobuf_ptypes_duration_duration_proto_depIdxs = []int32{
|
||||
0, // [0:0] is the sub-list for method output_type
|
||||
0, // [0:0] is the sub-list for method input_type
|
||||
0, // [0:0] is the sub-list for extension type_name
|
||||
0, // [0:0] is the sub-list for extension extendee
|
||||
0, // [0:0] is the sub-list for field type_name
|
||||
}
|
||||
|
||||
func init() { file_github_com_golang_protobuf_ptypes_duration_duration_proto_init() }
|
||||
func file_github_com_golang_protobuf_ptypes_duration_duration_proto_init() {
|
||||
if File_github_com_golang_protobuf_ptypes_duration_duration_proto != nil {
|
||||
return
|
||||
}
|
||||
type x struct{}
|
||||
out := protoimpl.TypeBuilder{
|
||||
File: protoimpl.DescBuilder{
|
||||
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
|
||||
RawDescriptor: file_github_com_golang_protobuf_ptypes_duration_duration_proto_rawDesc,
|
||||
NumEnums: 0,
|
||||
NumMessages: 0,
|
||||
NumExtensions: 0,
|
||||
NumServices: 0,
|
||||
},
|
||||
GoTypes: file_github_com_golang_protobuf_ptypes_duration_duration_proto_goTypes,
|
||||
DependencyIndexes: file_github_com_golang_protobuf_ptypes_duration_duration_proto_depIdxs,
|
||||
}.Build()
|
||||
File_github_com_golang_protobuf_ptypes_duration_duration_proto = out.File
|
||||
file_github_com_golang_protobuf_ptypes_duration_duration_proto_rawDesc = nil
|
||||
file_github_com_golang_protobuf_ptypes_duration_duration_proto_goTypes = nil
|
||||
file_github_com_golang_protobuf_ptypes_duration_duration_proto_depIdxs = nil
|
||||
}
|
103
vendor/github.com/golang/protobuf/ptypes/timestamp.go
generated
vendored
Normal file
103
vendor/github.com/golang/protobuf/ptypes/timestamp.go
generated
vendored
Normal file
@ -0,0 +1,103 @@
|
||||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package ptypes
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
timestamppb "github.com/golang/protobuf/ptypes/timestamp"
|
||||
)
|
||||
|
||||
// Range of google.protobuf.Duration as specified in timestamp.proto.
|
||||
const (
|
||||
// Seconds field of the earliest valid Timestamp.
|
||||
// This is time.Date(1, 1, 1, 0, 0, 0, 0, time.UTC).Unix().
|
||||
minValidSeconds = -62135596800
|
||||
// Seconds field just after the latest valid Timestamp.
|
||||
// This is time.Date(10000, 1, 1, 0, 0, 0, 0, time.UTC).Unix().
|
||||
maxValidSeconds = 253402300800
|
||||
)
|
||||
|
||||
// Timestamp converts a timestamppb.Timestamp to a time.Time.
|
||||
// It returns an error if the argument is invalid.
|
||||
//
|
||||
// Unlike most Go functions, if Timestamp returns an error, the first return
|
||||
// value is not the zero time.Time. Instead, it is the value obtained from the
|
||||
// time.Unix function when passed the contents of the Timestamp, in the UTC
|
||||
// locale. This may or may not be a meaningful time; many invalid Timestamps
|
||||
// do map to valid time.Times.
|
||||
//
|
||||
// A nil Timestamp returns an error. The first return value in that case is
|
||||
// undefined.
|
||||
func Timestamp(ts *timestamppb.Timestamp) (time.Time, error) {
|
||||
// Don't return the zero value on error, because corresponds to a valid
|
||||
// timestamp. Instead return whatever time.Unix gives us.
|
||||
var t time.Time
|
||||
if ts == nil {
|
||||
t = time.Unix(0, 0).UTC() // treat nil like the empty Timestamp
|
||||
} else {
|
||||
t = time.Unix(ts.Seconds, int64(ts.Nanos)).UTC()
|
||||
}
|
||||
return t, validateTimestamp(ts)
|
||||
}
|
||||
|
||||
// TimestampNow returns a google.protobuf.Timestamp for the current time.
|
||||
func TimestampNow() *timestamppb.Timestamp {
|
||||
ts, err := TimestampProto(time.Now())
|
||||
if err != nil {
|
||||
panic("ptypes: time.Now() out of Timestamp range")
|
||||
}
|
||||
return ts
|
||||
}
|
||||
|
||||
// TimestampProto converts the time.Time to a google.protobuf.Timestamp proto.
|
||||
// It returns an error if the resulting Timestamp is invalid.
|
||||
func TimestampProto(t time.Time) (*timestamppb.Timestamp, error) {
|
||||
ts := ×tamppb.Timestamp{
|
||||
Seconds: t.Unix(),
|
||||
Nanos: int32(t.Nanosecond()),
|
||||
}
|
||||
if err := validateTimestamp(ts); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return ts, nil
|
||||
}
|
||||
|
||||
// TimestampString returns the RFC 3339 string for valid Timestamps.
|
||||
// For invalid Timestamps, it returns an error message in parentheses.
|
||||
func TimestampString(ts *timestamppb.Timestamp) string {
|
||||
t, err := Timestamp(ts)
|
||||
if err != nil {
|
||||
return fmt.Sprintf("(%v)", err)
|
||||
}
|
||||
return t.Format(time.RFC3339Nano)
|
||||
}
|
||||
|
||||
// validateTimestamp determines whether a Timestamp is valid.
|
||||
// A valid timestamp represents a time in the range [0001-01-01, 10000-01-01)
|
||||
// and has a Nanos field in the range [0, 1e9).
|
||||
//
|
||||
// If the Timestamp is valid, validateTimestamp returns nil.
|
||||
// Otherwise, it returns an error that describes the problem.
|
||||
//
|
||||
// Every valid Timestamp can be represented by a time.Time,
|
||||
// but the converse is not true.
|
||||
func validateTimestamp(ts *timestamppb.Timestamp) error {
|
||||
if ts == nil {
|
||||
return errors.New("timestamp: nil Timestamp")
|
||||
}
|
||||
if ts.Seconds < minValidSeconds {
|
||||
return fmt.Errorf("timestamp: %v before 0001-01-01", ts)
|
||||
}
|
||||
if ts.Seconds >= maxValidSeconds {
|
||||
return fmt.Errorf("timestamp: %v after 10000-01-01", ts)
|
||||
}
|
||||
if ts.Nanos < 0 || ts.Nanos >= 1e9 {
|
||||
return fmt.Errorf("timestamp: %v: nanos not in range [0, 1e9)", ts)
|
||||
}
|
||||
return nil
|
||||
}
|
64
vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go
generated
vendored
Normal file
64
vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go
generated
vendored
Normal file
@ -0,0 +1,64 @@
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// source: github.com/golang/protobuf/ptypes/timestamp/timestamp.proto
|
||||
|
||||
package timestamp
|
||||
|
||||
import (
|
||||
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
|
||||
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
|
||||
timestamppb "google.golang.org/protobuf/types/known/timestamppb"
|
||||
reflect "reflect"
|
||||
)
|
||||
|
||||
// Symbols defined in public import of google/protobuf/timestamp.proto.
|
||||
|
||||
type Timestamp = timestamppb.Timestamp
|
||||
|
||||
var File_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto protoreflect.FileDescriptor
|
||||
|
||||
var file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_rawDesc = []byte{
|
||||
0x0a, 0x3b, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c,
|
||||
0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79,
|
||||
0x70, 0x65, 0x73, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2f, 0x74, 0x69,
|
||||
0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67,
|
||||
0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74,
|
||||
0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x42, 0x37,
|
||||
0x5a, 0x35, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c,
|
||||
0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79,
|
||||
0x70, 0x65, 0x73, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x3b, 0x74, 0x69,
|
||||
0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x50, 0x00, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f,
|
||||
0x33,
|
||||
}
|
||||
|
||||
var file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_goTypes = []interface{}{}
|
||||
var file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_depIdxs = []int32{
|
||||
0, // [0:0] is the sub-list for method output_type
|
||||
0, // [0:0] is the sub-list for method input_type
|
||||
0, // [0:0] is the sub-list for extension type_name
|
||||
0, // [0:0] is the sub-list for extension extendee
|
||||
0, // [0:0] is the sub-list for field type_name
|
||||
}
|
||||
|
||||
func init() { file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_init() }
|
||||
func file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_init() {
|
||||
if File_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto != nil {
|
||||
return
|
||||
}
|
||||
type x struct{}
|
||||
out := protoimpl.TypeBuilder{
|
||||
File: protoimpl.DescBuilder{
|
||||
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
|
||||
RawDescriptor: file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_rawDesc,
|
||||
NumEnums: 0,
|
||||
NumMessages: 0,
|
||||
NumExtensions: 0,
|
||||
NumServices: 0,
|
||||
},
|
||||
GoTypes: file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_goTypes,
|
||||
DependencyIndexes: file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_depIdxs,
|
||||
}.Build()
|
||||
File_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto = out.File
|
||||
file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_rawDesc = nil
|
||||
file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_goTypes = nil
|
||||
file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_depIdxs = nil
|
||||
}
|
50
vendor/github.com/prometheus/client_golang/prometheus/counter.go
generated
vendored
50
vendor/github.com/prometheus/client_golang/prometheus/counter.go
generated
vendored
@ -17,6 +17,7 @@ import (
|
||||
"errors"
|
||||
"math"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
dto "github.com/prometheus/client_model/go"
|
||||
)
|
||||
@ -42,11 +43,27 @@ type Counter interface {
|
||||
Add(float64)
|
||||
}
|
||||
|
||||
// ExemplarAdder is implemented by Counters that offer the option of adding a
|
||||
// value to the Counter together with an exemplar. Its AddWithExemplar method
|
||||
// works like the Add method of the Counter interface but also replaces the
|
||||
// currently saved exemplar (if any) with a new one, created from the provided
|
||||
// value, the current time as timestamp, and the provided labels. Empty Labels
|
||||
// will lead to a valid (label-less) exemplar. But if Labels is nil, the current
|
||||
// exemplar is left in place. AddWithExemplar panics if the value is < 0, if any
|
||||
// of the provided labels are invalid, or if the provided labels contain more
|
||||
// than 64 runes in total.
|
||||
type ExemplarAdder interface {
|
||||
AddWithExemplar(value float64, exemplar Labels)
|
||||
}
|
||||
|
||||
// CounterOpts is an alias for Opts. See there for doc comments.
|
||||
type CounterOpts Opts
|
||||
|
||||
// NewCounter creates a new Counter based on the provided CounterOpts.
|
||||
//
|
||||
// The returned implementation also implements ExemplarAdder. It is safe to
|
||||
// perform the corresponding type assertion.
|
||||
//
|
||||
// The returned implementation tracks the counter value in two separate
|
||||
// variables, a float64 and a uint64. The latter is used to track calls of the
|
||||
// Inc method and calls of the Add method with a value that can be represented
|
||||
@ -61,7 +78,7 @@ func NewCounter(opts CounterOpts) Counter {
|
||||
nil,
|
||||
opts.ConstLabels,
|
||||
)
|
||||
result := &counter{desc: desc, labelPairs: desc.constLabelPairs}
|
||||
result := &counter{desc: desc, labelPairs: desc.constLabelPairs, now: time.Now}
|
||||
result.init(result) // Init self-collection.
|
||||
return result
|
||||
}
|
||||
@ -78,6 +95,9 @@ type counter struct {
|
||||
desc *Desc
|
||||
|
||||
labelPairs []*dto.LabelPair
|
||||
exemplar atomic.Value // Containing nil or a *dto.Exemplar.
|
||||
|
||||
now func() time.Time // To mock out time.Now() for testing.
|
||||
}
|
||||
|
||||
func (c *counter) Desc() *Desc {
|
||||
@ -88,6 +108,7 @@ func (c *counter) Add(v float64) {
|
||||
if v < 0 {
|
||||
panic(errors.New("counter cannot decrease in value"))
|
||||
}
|
||||
|
||||
ival := uint64(v)
|
||||
if float64(ival) == v {
|
||||
atomic.AddUint64(&c.valInt, ival)
|
||||
@ -103,6 +124,11 @@ func (c *counter) Add(v float64) {
|
||||
}
|
||||
}
|
||||
|
||||
func (c *counter) AddWithExemplar(v float64, e Labels) {
|
||||
c.Add(v)
|
||||
c.updateExemplar(v, e)
|
||||
}
|
||||
|
||||
func (c *counter) Inc() {
|
||||
atomic.AddUint64(&c.valInt, 1)
|
||||
}
|
||||
@ -112,7 +138,23 @@ func (c *counter) Write(out *dto.Metric) error {
|
||||
ival := atomic.LoadUint64(&c.valInt)
|
||||
val := fval + float64(ival)
|
||||
|
||||
return populateMetric(CounterValue, val, c.labelPairs, out)
|
||||
var exemplar *dto.Exemplar
|
||||
if e := c.exemplar.Load(); e != nil {
|
||||
exemplar = e.(*dto.Exemplar)
|
||||
}
|
||||
|
||||
return populateMetric(CounterValue, val, c.labelPairs, exemplar, out)
|
||||
}
|
||||
|
||||
func (c *counter) updateExemplar(v float64, l Labels) {
|
||||
if l == nil {
|
||||
return
|
||||
}
|
||||
e, err := newExemplar(v, c.now(), l)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
c.exemplar.Store(e)
|
||||
}
|
||||
|
||||
// CounterVec is a Collector that bundles a set of Counters that all share the
|
||||
@ -138,7 +180,7 @@ func NewCounterVec(opts CounterOpts, labelNames []string) *CounterVec {
|
||||
if len(lvs) != len(desc.variableLabels) {
|
||||
panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels, lvs))
|
||||
}
|
||||
result := &counter{desc: desc, labelPairs: makeLabelPairs(desc, lvs)}
|
||||
result := &counter{desc: desc, labelPairs: makeLabelPairs(desc, lvs), now: time.Now}
|
||||
result.init(result) // Init self-collection.
|
||||
return result
|
||||
}),
|
||||
@ -267,6 +309,8 @@ type CounterFunc interface {
|
||||
// provided function must be concurrency-safe. The function should also honor
|
||||
// the contract for a Counter (values only go up, not down), but compliance will
|
||||
// not be checked.
|
||||
//
|
||||
// Check out the ExampleGaugeFunc examples for the similar GaugeFunc.
|
||||
func NewCounterFunc(opts CounterOpts, function func() float64) CounterFunc {
|
||||
return newValueFunc(NewDesc(
|
||||
BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
|
||||
|
1
vendor/github.com/prometheus/client_golang/prometheus/desc.go
generated
vendored
1
vendor/github.com/prometheus/client_golang/prometheus/desc.go
generated
vendored
@ -20,6 +20,7 @@ import (
|
||||
"strings"
|
||||
|
||||
"github.com/cespare/xxhash/v2"
|
||||
//lint:ignore SA1019 Need to keep deprecated package for compatibility.
|
||||
"github.com/golang/protobuf/proto"
|
||||
"github.com/prometheus/common/model"
|
||||
|
||||
|
37
vendor/github.com/prometheus/client_golang/prometheus/doc.go
generated
vendored
37
vendor/github.com/prometheus/client_golang/prometheus/doc.go
generated
vendored
@ -84,25 +84,21 @@
|
||||
// of those four metric types can be found in the Prometheus docs:
|
||||
// https://prometheus.io/docs/concepts/metric_types/
|
||||
//
|
||||
// A fifth "type" of metric is Untyped. It behaves like a Gauge, but signals the
|
||||
// Prometheus server not to assume anything about its type.
|
||||
//
|
||||
// In addition to the fundamental metric types Gauge, Counter, Summary,
|
||||
// Histogram, and Untyped, a very important part of the Prometheus data model is
|
||||
// the partitioning of samples along dimensions called labels, which results in
|
||||
// In addition to the fundamental metric types Gauge, Counter, Summary, and
|
||||
// Histogram, a very important part of the Prometheus data model is the
|
||||
// partitioning of samples along dimensions called labels, which results in
|
||||
// metric vectors. The fundamental types are GaugeVec, CounterVec, SummaryVec,
|
||||
// HistogramVec, and UntypedVec.
|
||||
// and HistogramVec.
|
||||
//
|
||||
// While only the fundamental metric types implement the Metric interface, both
|
||||
// the metrics and their vector versions implement the Collector interface. A
|
||||
// Collector manages the collection of a number of Metrics, but for convenience,
|
||||
// a Metric can also “collect itself”. Note that Gauge, Counter, Summary,
|
||||
// Histogram, and Untyped are interfaces themselves while GaugeVec, CounterVec,
|
||||
// SummaryVec, HistogramVec, and UntypedVec are not.
|
||||
// a Metric can also “collect itself”. Note that Gauge, Counter, Summary, and
|
||||
// Histogram are interfaces themselves while GaugeVec, CounterVec, SummaryVec,
|
||||
// and HistogramVec are not.
|
||||
//
|
||||
// To create instances of Metrics and their vector versions, you need a suitable
|
||||
// …Opts struct, i.e. GaugeOpts, CounterOpts, SummaryOpts, HistogramOpts, or
|
||||
// UntypedOpts.
|
||||
// …Opts struct, i.e. GaugeOpts, CounterOpts, SummaryOpts, or HistogramOpts.
|
||||
//
|
||||
// Custom Collectors and constant Metrics
|
||||
//
|
||||
@ -118,13 +114,16 @@
|
||||
// existing numbers into Prometheus Metrics during collection. An own
|
||||
// implementation of the Collector interface is perfect for that. You can create
|
||||
// Metric instances “on the fly” using NewConstMetric, NewConstHistogram, and
|
||||
// NewConstSummary (and their respective Must… versions). That will happen in
|
||||
// the Collect method. The Describe method has to return separate Desc
|
||||
// instances, representative of the “throw-away” metrics to be created later.
|
||||
// NewDesc comes in handy to create those Desc instances. Alternatively, you
|
||||
// could return no Desc at all, which will mark the Collector “unchecked”. No
|
||||
// checks are performed at registration time, but metric consistency will still
|
||||
// be ensured at scrape time, i.e. any inconsistencies will lead to scrape
|
||||
// NewConstSummary (and their respective Must… versions). NewConstMetric is used
|
||||
// for all metric types with just a float64 as their value: Counter, Gauge, and
|
||||
// a special “type” called Untyped. Use the latter if you are not sure if the
|
||||
// mirrored metric is a Counter or a Gauge. Creation of the Metric instance
|
||||
// happens in the Collect method. The Describe method has to return separate
|
||||
// Desc instances, representative of the “throw-away” metrics to be created
|
||||
// later. NewDesc comes in handy to create those Desc instances. Alternatively,
|
||||
// you could return no Desc at all, which will mark the Collector “unchecked”.
|
||||
// No checks are performed at registration time, but metric consistency will
|
||||
// still be ensured at scrape time, i.e. any inconsistencies will lead to scrape
|
||||
// errors. Thus, with unchecked Collectors, the responsibility to not collect
|
||||
// metrics that lead to inconsistencies in the total scrape result lies with the
|
||||
// implementer of the Collector. While this is not a desirable state, it is
|
||||
|
11
vendor/github.com/prometheus/client_golang/prometheus/gauge.go
generated
vendored
11
vendor/github.com/prometheus/client_golang/prometheus/gauge.go
generated
vendored
@ -123,7 +123,7 @@ func (g *gauge) Sub(val float64) {
|
||||
|
||||
func (g *gauge) Write(out *dto.Metric) error {
|
||||
val := math.Float64frombits(atomic.LoadUint64(&g.valBits))
|
||||
return populateMetric(GaugeValue, val, g.labelPairs, out)
|
||||
return populateMetric(GaugeValue, val, g.labelPairs, nil, out)
|
||||
}
|
||||
|
||||
// GaugeVec is a Collector that bundles a set of Gauges that all share the same
|
||||
@ -273,9 +273,12 @@ type GaugeFunc interface {
|
||||
// NewGaugeFunc creates a new GaugeFunc based on the provided GaugeOpts. The
|
||||
// value reported is determined by calling the given function from within the
|
||||
// Write method. Take into account that metric collection may happen
|
||||
// concurrently. If that results in concurrent calls to Write, like in the case
|
||||
// where a GaugeFunc is directly registered with Prometheus, the provided
|
||||
// function must be concurrency-safe.
|
||||
// concurrently. Therefore, it must be safe to call the provided function
|
||||
// concurrently.
|
||||
//
|
||||
// NewGaugeFunc is a good way to create an “info” style metric with a constant
|
||||
// value of 1. Example:
|
||||
// https://github.com/prometheus/common/blob/8558a5b7db3c84fa38b4766966059a7bd5bfa2ee/version/info.go#L36-L56
|
||||
func NewGaugeFunc(opts GaugeOpts, function func() float64) GaugeFunc {
|
||||
return newValueFunc(NewDesc(
|
||||
BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
|
||||
|
2
vendor/github.com/prometheus/client_golang/prometheus/go_collector.go
generated
vendored
2
vendor/github.com/prometheus/client_golang/prometheus/go_collector.go
generated
vendored
@ -73,7 +73,7 @@ func NewGoCollector() Collector {
|
||||
nil, nil),
|
||||
gcDesc: NewDesc(
|
||||
"go_gc_duration_seconds",
|
||||
"A summary of the GC invocation durations.",
|
||||
"A summary of the pause duration of garbage collection cycles.",
|
||||
nil, nil),
|
||||
goInfoDesc: NewDesc(
|
||||
"go_info",
|
||||
|
115
vendor/github.com/prometheus/client_golang/prometheus/histogram.go
generated
vendored
115
vendor/github.com/prometheus/client_golang/prometheus/histogram.go
generated
vendored
@ -20,7 +20,9 @@ import (
|
||||
"sort"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
//lint:ignore SA1019 Need to keep deprecated package for compatibility.
|
||||
"github.com/golang/protobuf/proto"
|
||||
|
||||
dto "github.com/prometheus/client_model/go"
|
||||
@ -138,7 +140,7 @@ type HistogramOpts struct {
|
||||
// better covered by target labels set by the scraping Prometheus
|
||||
// server, or by one specific metric (e.g. a build_info or a
|
||||
// machine_role metric). See also
|
||||
// https://prometheus.io/docs/instrumenting/writing_exporters/#target-labels,-not-static-scraped-labels
|
||||
// https://prometheus.io/docs/instrumenting/writing_exporters/#target-labels-not-static-scraped-labels
|
||||
ConstLabels Labels
|
||||
|
||||
// Buckets defines the buckets into which observations are counted. Each
|
||||
@ -151,6 +153,10 @@ type HistogramOpts struct {
|
||||
|
||||
// NewHistogram creates a new Histogram based on the provided HistogramOpts. It
|
||||
// panics if the buckets in HistogramOpts are not in strictly increasing order.
|
||||
//
|
||||
// The returned implementation also implements ExemplarObserver. It is safe to
|
||||
// perform the corresponding type assertion. Exemplars are tracked separately
|
||||
// for each bucket.
|
||||
func NewHistogram(opts HistogramOpts) Histogram {
|
||||
return newHistogram(
|
||||
NewDesc(
|
||||
@ -188,6 +194,7 @@ func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogr
|
||||
upperBounds: opts.Buckets,
|
||||
labelPairs: makeLabelPairs(desc, labelValues),
|
||||
counts: [2]*histogramCounts{{}, {}},
|
||||
now: time.Now,
|
||||
}
|
||||
for i, upperBound := range h.upperBounds {
|
||||
if i < len(h.upperBounds)-1 {
|
||||
@ -205,9 +212,10 @@ func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogr
|
||||
}
|
||||
}
|
||||
// Finally we know the final length of h.upperBounds and can make buckets
|
||||
// for both counts:
|
||||
// for both counts as well as exemplars:
|
||||
h.counts[0].buckets = make([]uint64, len(h.upperBounds))
|
||||
h.counts[1].buckets = make([]uint64, len(h.upperBounds))
|
||||
h.exemplars = make([]atomic.Value, len(h.upperBounds)+1)
|
||||
|
||||
h.init(h) // Init self-collection.
|
||||
return h
|
||||
@ -254,6 +262,9 @@ type histogram struct {
|
||||
|
||||
upperBounds []float64
|
||||
labelPairs []*dto.LabelPair
|
||||
exemplars []atomic.Value // One more than buckets (to include +Inf), each a *dto.Exemplar.
|
||||
|
||||
now func() time.Time // To mock out time.Now() for testing.
|
||||
}
|
||||
|
||||
func (h *histogram) Desc() *Desc {
|
||||
@ -261,36 +272,13 @@ func (h *histogram) Desc() *Desc {
|
||||
}
|
||||
|
||||
func (h *histogram) Observe(v float64) {
|
||||
// TODO(beorn7): For small numbers of buckets (<30), a linear search is
|
||||
// slightly faster than the binary search. If we really care, we could
|
||||
// switch from one search strategy to the other depending on the number
|
||||
// of buckets.
|
||||
//
|
||||
// Microbenchmarks (BenchmarkHistogramNoLabels):
|
||||
// 11 buckets: 38.3 ns/op linear - binary 48.7 ns/op
|
||||
// 100 buckets: 78.1 ns/op linear - binary 54.9 ns/op
|
||||
// 300 buckets: 154 ns/op linear - binary 61.6 ns/op
|
||||
i := sort.SearchFloat64s(h.upperBounds, v)
|
||||
h.observe(v, h.findBucket(v))
|
||||
}
|
||||
|
||||
// We increment h.countAndHotIdx so that the counter in the lower
|
||||
// 63 bits gets incremented. At the same time, we get the new value
|
||||
// back, which we can use to find the currently-hot counts.
|
||||
n := atomic.AddUint64(&h.countAndHotIdx, 1)
|
||||
hotCounts := h.counts[n>>63]
|
||||
|
||||
if i < len(h.upperBounds) {
|
||||
atomic.AddUint64(&hotCounts.buckets[i], 1)
|
||||
}
|
||||
for {
|
||||
oldBits := atomic.LoadUint64(&hotCounts.sumBits)
|
||||
newBits := math.Float64bits(math.Float64frombits(oldBits) + v)
|
||||
if atomic.CompareAndSwapUint64(&hotCounts.sumBits, oldBits, newBits) {
|
||||
break
|
||||
}
|
||||
}
|
||||
// Increment count last as we take it as a signal that the observation
|
||||
// is complete.
|
||||
atomic.AddUint64(&hotCounts.count, 1)
|
||||
func (h *histogram) ObserveWithExemplar(v float64, e Labels) {
|
||||
i := h.findBucket(v)
|
||||
h.observe(v, i)
|
||||
h.updateExemplar(v, i, e)
|
||||
}
|
||||
|
||||
func (h *histogram) Write(out *dto.Metric) error {
|
||||
@ -329,6 +317,18 @@ func (h *histogram) Write(out *dto.Metric) error {
|
||||
CumulativeCount: proto.Uint64(cumCount),
|
||||
UpperBound: proto.Float64(upperBound),
|
||||
}
|
||||
if e := h.exemplars[i].Load(); e != nil {
|
||||
his.Bucket[i].Exemplar = e.(*dto.Exemplar)
|
||||
}
|
||||
}
|
||||
// If there is an exemplar for the +Inf bucket, we have to add that bucket explicitly.
|
||||
if e := h.exemplars[len(h.upperBounds)].Load(); e != nil {
|
||||
b := &dto.Bucket{
|
||||
CumulativeCount: proto.Uint64(count),
|
||||
UpperBound: proto.Float64(math.Inf(1)),
|
||||
Exemplar: e.(*dto.Exemplar),
|
||||
}
|
||||
his.Bucket = append(his.Bucket, b)
|
||||
}
|
||||
|
||||
out.Histogram = his
|
||||
@ -352,6 +352,57 @@ func (h *histogram) Write(out *dto.Metric) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// findBucket returns the index of the bucket for the provided value, or
|
||||
// len(h.upperBounds) for the +Inf bucket.
|
||||
func (h *histogram) findBucket(v float64) int {
|
||||
// TODO(beorn7): For small numbers of buckets (<30), a linear search is
|
||||
// slightly faster than the binary search. If we really care, we could
|
||||
// switch from one search strategy to the other depending on the number
|
||||
// of buckets.
|
||||
//
|
||||
// Microbenchmarks (BenchmarkHistogramNoLabels):
|
||||
// 11 buckets: 38.3 ns/op linear - binary 48.7 ns/op
|
||||
// 100 buckets: 78.1 ns/op linear - binary 54.9 ns/op
|
||||
// 300 buckets: 154 ns/op linear - binary 61.6 ns/op
|
||||
return sort.SearchFloat64s(h.upperBounds, v)
|
||||
}
|
||||
|
||||
// observe is the implementation for Observe without the findBucket part.
|
||||
func (h *histogram) observe(v float64, bucket int) {
|
||||
// We increment h.countAndHotIdx so that the counter in the lower
|
||||
// 63 bits gets incremented. At the same time, we get the new value
|
||||
// back, which we can use to find the currently-hot counts.
|
||||
n := atomic.AddUint64(&h.countAndHotIdx, 1)
|
||||
hotCounts := h.counts[n>>63]
|
||||
|
||||
if bucket < len(h.upperBounds) {
|
||||
atomic.AddUint64(&hotCounts.buckets[bucket], 1)
|
||||
}
|
||||
for {
|
||||
oldBits := atomic.LoadUint64(&hotCounts.sumBits)
|
||||
newBits := math.Float64bits(math.Float64frombits(oldBits) + v)
|
||||
if atomic.CompareAndSwapUint64(&hotCounts.sumBits, oldBits, newBits) {
|
||||
break
|
||||
}
|
||||
}
|
||||
// Increment count last as we take it as a signal that the observation
|
||||
// is complete.
|
||||
atomic.AddUint64(&hotCounts.count, 1)
|
||||
}
|
||||
|
||||
// updateExemplar replaces the exemplar for the provided bucket. With empty
|
||||
// labels, it's a no-op. It panics if any of the labels is invalid.
|
||||
func (h *histogram) updateExemplar(v float64, bucket int, l Labels) {
|
||||
if l == nil {
|
||||
return
|
||||
}
|
||||
e, err := newExemplar(v, h.now(), l)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
h.exemplars[bucket].Store(e)
|
||||
}
|
||||
|
||||
// HistogramVec is a Collector that bundles a set of Histograms that all share the
|
||||
// same Desc, but have different values for their variable labels. This is used
|
||||
// if you want to count the same thing partitioned by various dimensions
|
||||
@ -556,7 +607,7 @@ func NewConstHistogram(
|
||||
}
|
||||
|
||||
// MustNewConstHistogram is a version of NewConstHistogram that panics where
|
||||
// NewConstMetric would have returned an error.
|
||||
// NewConstHistogram would have returned an error.
|
||||
func MustNewConstHistogram(
|
||||
desc *Desc,
|
||||
count uint64,
|
||||
|
1
vendor/github.com/prometheus/client_golang/prometheus/metric.go
generated
vendored
1
vendor/github.com/prometheus/client_golang/prometheus/metric.go
generated
vendored
@ -17,6 +17,7 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
//lint:ignore SA1019 Need to keep deprecated package for compatibility.
|
||||
"github.com/golang/protobuf/proto"
|
||||
"github.com/prometheus/common/model"
|
||||
|
||||
|
12
vendor/github.com/prometheus/client_golang/prometheus/observer.go
generated
vendored
12
vendor/github.com/prometheus/client_golang/prometheus/observer.go
generated
vendored
@ -50,3 +50,15 @@ type ObserverVec interface {
|
||||
|
||||
Collector
|
||||
}
|
||||
|
||||
// ExemplarObserver is implemented by Observers that offer the option of
|
||||
// observing a value together with an exemplar. Its ObserveWithExemplar method
|
||||
// works like the Observe method of an Observer but also replaces the currently
|
||||
// saved exemplar (if any) with a new one, created from the provided value, the
|
||||
// current time as timestamp, and the provided Labels. Empty Labels will lead to
|
||||
// a valid (label-less) exemplar. But if Labels is nil, the current exemplar is
|
||||
// left in place. ObserveWithExemplar panics if any of the provided labels are
|
||||
// invalid or if the provided labels contain more than 64 runes in total.
|
||||
type ExemplarObserver interface {
|
||||
ObserveWithExemplar(value float64, exemplar Labels)
|
||||
}
|
||||
|
24
vendor/github.com/prometheus/client_golang/prometheus/process_collector_windows.go
generated
vendored
24
vendor/github.com/prometheus/client_golang/prometheus/process_collector_windows.go
generated
vendored
@ -33,18 +33,22 @@ var (
|
||||
)
|
||||
|
||||
type processMemoryCounters struct {
|
||||
// https://docs.microsoft.com/en-us/windows/desktop/api/psapi/ns-psapi-_process_memory_counters_ex
|
||||
// System interface description
|
||||
// https://docs.microsoft.com/en-us/windows/desktop/api/psapi/ns-psapi-process_memory_counters_ex
|
||||
|
||||
// Refer to the Golang internal implementation
|
||||
// https://golang.org/src/internal/syscall/windows/psapi_windows.go
|
||||
_ uint32
|
||||
PageFaultCount uint32
|
||||
PeakWorkingSetSize uint64
|
||||
WorkingSetSize uint64
|
||||
QuotaPeakPagedPoolUsage uint64
|
||||
QuotaPagedPoolUsage uint64
|
||||
QuotaPeakNonPagedPoolUsage uint64
|
||||
QuotaNonPagedPoolUsage uint64
|
||||
PagefileUsage uint64
|
||||
PeakPagefileUsage uint64
|
||||
PrivateUsage uint64
|
||||
PeakWorkingSetSize uintptr
|
||||
WorkingSetSize uintptr
|
||||
QuotaPeakPagedPoolUsage uintptr
|
||||
QuotaPagedPoolUsage uintptr
|
||||
QuotaPeakNonPagedPoolUsage uintptr
|
||||
QuotaNonPagedPoolUsage uintptr
|
||||
PagefileUsage uintptr
|
||||
PeakPagefileUsage uintptr
|
||||
PrivateUsage uintptr
|
||||
}
|
||||
|
||||
func getProcessMemoryInfo(handle windows.Handle) (processMemoryCounters, error) {
|
||||
|
303
vendor/github.com/prometheus/client_golang/prometheus/promauto/auto.go
generated
vendored
303
vendor/github.com/prometheus/client_golang/prometheus/promauto/auto.go
generated
vendored
@ -11,11 +11,16 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package promauto provides constructors for the usual Prometheus metrics that
|
||||
// return them already registered with the global registry
|
||||
// (prometheus.DefaultRegisterer). This allows very compact code, avoiding any
|
||||
// references to the registry altogether, but all the constructors in this
|
||||
// package will panic if the registration fails.
|
||||
// Package promauto provides alternative constructors for the fundamental
|
||||
// Prometheus metric types and their …Vec and …Func variants. The difference to
|
||||
// their counterparts in the prometheus package is that the promauto
|
||||
// constructors return Collectors that are already registered with a
|
||||
// registry. There are two sets of constructors. The constructors in the first
|
||||
// set are top-level functions, while the constructors in the other set are
|
||||
// methods of the Factory type. The top-level function return Collectors
|
||||
// registered with the global registry (prometheus.DefaultRegisterer), while the
|
||||
// methods return Collectors registered with the registry the Factory was
|
||||
// constructed with. All constructors panic if the registration fails.
|
||||
//
|
||||
// The following example is a complete program to create a histogram of normally
|
||||
// distributed random numbers from the math/rand package:
|
||||
@ -79,51 +84,79 @@
|
||||
// http.ListenAndServe(":1971", nil)
|
||||
// }
|
||||
//
|
||||
// A Factory is created with the With(prometheus.Registerer) function, which
|
||||
// enables two usage pattern. With(prometheus.Registerer) can be called once per
|
||||
// line:
|
||||
//
|
||||
// var (
|
||||
// reg = prometheus.NewRegistry()
|
||||
// randomNumbers = promauto.With(reg).NewHistogram(prometheus.HistogramOpts{
|
||||
// Name: "random_numbers",
|
||||
// Help: "A histogram of normally distributed random numbers.",
|
||||
// Buckets: prometheus.LinearBuckets(-3, .1, 61),
|
||||
// })
|
||||
// requestCount = promauto.With(reg).NewCounterVec(
|
||||
// prometheus.CounterOpts{
|
||||
// Name: "http_requests_total",
|
||||
// Help: "Total number of HTTP requests by status code and method.",
|
||||
// },
|
||||
// []string{"code", "method"},
|
||||
// )
|
||||
// )
|
||||
//
|
||||
// Or it can be used to create a Factory once to be used multiple times:
|
||||
//
|
||||
// var (
|
||||
// reg = prometheus.NewRegistry()
|
||||
// factory = promauto.With(reg)
|
||||
// randomNumbers = factory.NewHistogram(prometheus.HistogramOpts{
|
||||
// Name: "random_numbers",
|
||||
// Help: "A histogram of normally distributed random numbers.",
|
||||
// Buckets: prometheus.LinearBuckets(-3, .1, 61),
|
||||
// })
|
||||
// requestCount = factory.NewCounterVec(
|
||||
// prometheus.CounterOpts{
|
||||
// Name: "http_requests_total",
|
||||
// Help: "Total number of HTTP requests by status code and method.",
|
||||
// },
|
||||
// []string{"code", "method"},
|
||||
// )
|
||||
// )
|
||||
//
|
||||
// This appears very handy. So why are these constructors locked away in a
|
||||
// separate package? There are two caveats:
|
||||
// separate package?
|
||||
//
|
||||
// First, in more complex programs, global state is often quite problematic.
|
||||
// That's the reason why the metrics constructors in the prometheus package do
|
||||
// not interact with the global prometheus.DefaultRegisterer on their own. You
|
||||
// are free to use the Register or MustRegister functions to register them with
|
||||
// the global prometheus.DefaultRegisterer, but you could as well choose a local
|
||||
// Registerer (usually created with prometheus.NewRegistry, but there are other
|
||||
// scenarios, e.g. testing).
|
||||
// The main problem is that registration may fail, e.g. if a metric inconsistent
|
||||
// with or equal to the newly to be registered one is already registered.
|
||||
// Therefore, the Register method in the prometheus.Registerer interface returns
|
||||
// an error, and the same is the case for the top-level prometheus.Register
|
||||
// function that registers with the global registry. The prometheus package also
|
||||
// provides MustRegister versions for both. They panic if the registration
|
||||
// fails, and they clearly call this out by using the Must… idiom. Panicking is
|
||||
// problematic in this case because it doesn't just happen on input provided by
|
||||
// the caller that is invalid on its own. Things are a bit more subtle here:
|
||||
// Metric creation and registration tend to be spread widely over the
|
||||
// codebase. It can easily happen that an incompatible metric is added to an
|
||||
// unrelated part of the code, and suddenly code that used to work perfectly
|
||||
// fine starts to panic (provided that the registration of the newly added
|
||||
// metric happens before the registration of the previously existing
|
||||
// metric). This may come as an even bigger surprise with the global registry,
|
||||
// where simply importing another package can trigger a panic (if the newly
|
||||
// imported package registers metrics in its init function). At least, in the
|
||||
// prometheus package, creation of metrics and other collectors is separate from
|
||||
// registration. You first create the metric, and then you decide explicitly if
|
||||
// you want to register it with a local or the global registry, and if you want
|
||||
// to handle the error or risk a panic. With the constructors in the promauto
|
||||
// package, registration is automatic, and if it fails, it will always
|
||||
// panic. Furthermore, the constructors will often be called in the var section
|
||||
// of a file, which means that panicking will happen as a side effect of merely
|
||||
// importing a package.
|
||||
//
|
||||
// The second issue is that registration may fail, e.g. if a metric inconsistent
|
||||
// with the newly to be registered one is already registered. But how to signal
|
||||
// and handle a panic in the automatic registration with the default registry?
|
||||
// The only way is panicking. While panicking on invalid input provided by the
|
||||
// programmer is certainly fine, things are a bit more subtle in this case: You
|
||||
// might just add another package to the program, and that package (in its init
|
||||
// function) happens to register a metric with the same name as your code. Now,
|
||||
// all of a sudden, either your code or the code of the newly imported package
|
||||
// panics, depending on initialization order, without any opportunity to handle
|
||||
// the case gracefully. Even worse is a scenario where registration happens
|
||||
// later during the runtime (e.g. upon loading some kind of plugin), where the
|
||||
// panic could be triggered long after the code has been deployed to
|
||||
// production. A possibility to panic should be explicitly called out by the
|
||||
// Must… idiom, cf. prometheus.MustRegister. But adding a separate set of
|
||||
// constructors in the prometheus package called MustRegisterNewCounterVec or
|
||||
// similar would be quite unwieldy. Adding an extra MustRegister method to each
|
||||
// metric, returning the registered metric, would result in nice code for those
|
||||
// using the method, but would pollute every single metric interface for
|
||||
// everybody avoiding the global registry.
|
||||
// A separate package allows conservative users to entirely ignore it. And
|
||||
// whoever wants to use it, will do so explicitly, with an opportunity to read
|
||||
// this warning.
|
||||
//
|
||||
// To address both issues, the problematic auto-registering and possibly
|
||||
// panicking constructors are all in this package with a clear warning
|
||||
// ahead. And whoever cares about avoiding global state and possibly panicking
|
||||
// function calls can simply ignore the existence of the promauto package
|
||||
// altogether.
|
||||
//
|
||||
// A final note: There is a similar case in the net/http package of the standard
|
||||
// library. It has DefaultServeMux as a global instance of ServeMux, and the
|
||||
// Handle function acts on it, panicking if a handler for the same pattern has
|
||||
// already been registered. However, one might argue that the whole HTTP routing
|
||||
// is usually set up closely together in the same package or file, while
|
||||
// Prometheus metrics tend to be spread widely over the codebase, increasing the
|
||||
// chance of surprising registration failures. Furthermore, the use of global
|
||||
// state in net/http has been criticized widely, and some avoid it altogether.
|
||||
// Enjoy promauto responsibly!
|
||||
package promauto
|
||||
|
||||
import "github.com/prometheus/client_golang/prometheus"
|
||||
@ -132,9 +165,7 @@ import "github.com/prometheus/client_golang/prometheus"
|
||||
// but it automatically registers the Counter with the
|
||||
// prometheus.DefaultRegisterer. If the registration fails, NewCounter panics.
|
||||
func NewCounter(opts prometheus.CounterOpts) prometheus.Counter {
|
||||
c := prometheus.NewCounter(opts)
|
||||
prometheus.MustRegister(c)
|
||||
return c
|
||||
return With(prometheus.DefaultRegisterer).NewCounter(opts)
|
||||
}
|
||||
|
||||
// NewCounterVec works like the function of the same name in the prometheus
|
||||
@ -142,9 +173,7 @@ func NewCounter(opts prometheus.CounterOpts) prometheus.Counter {
|
||||
// prometheus.DefaultRegisterer. If the registration fails, NewCounterVec
|
||||
// panics.
|
||||
func NewCounterVec(opts prometheus.CounterOpts, labelNames []string) *prometheus.CounterVec {
|
||||
c := prometheus.NewCounterVec(opts, labelNames)
|
||||
prometheus.MustRegister(c)
|
||||
return c
|
||||
return With(prometheus.DefaultRegisterer).NewCounterVec(opts, labelNames)
|
||||
}
|
||||
|
||||
// NewCounterFunc works like the function of the same name in the prometheus
|
||||
@ -152,45 +181,35 @@ func NewCounterVec(opts prometheus.CounterOpts, labelNames []string) *prometheus
|
||||
// prometheus.DefaultRegisterer. If the registration fails, NewCounterFunc
|
||||
// panics.
|
||||
func NewCounterFunc(opts prometheus.CounterOpts, function func() float64) prometheus.CounterFunc {
|
||||
g := prometheus.NewCounterFunc(opts, function)
|
||||
prometheus.MustRegister(g)
|
||||
return g
|
||||
return With(prometheus.DefaultRegisterer).NewCounterFunc(opts, function)
|
||||
}
|
||||
|
||||
// NewGauge works like the function of the same name in the prometheus package
|
||||
// but it automatically registers the Gauge with the
|
||||
// prometheus.DefaultRegisterer. If the registration fails, NewGauge panics.
|
||||
func NewGauge(opts prometheus.GaugeOpts) prometheus.Gauge {
|
||||
g := prometheus.NewGauge(opts)
|
||||
prometheus.MustRegister(g)
|
||||
return g
|
||||
return With(prometheus.DefaultRegisterer).NewGauge(opts)
|
||||
}
|
||||
|
||||
// NewGaugeVec works like the function of the same name in the prometheus
|
||||
// package but it automatically registers the GaugeVec with the
|
||||
// prometheus.DefaultRegisterer. If the registration fails, NewGaugeVec panics.
|
||||
func NewGaugeVec(opts prometheus.GaugeOpts, labelNames []string) *prometheus.GaugeVec {
|
||||
g := prometheus.NewGaugeVec(opts, labelNames)
|
||||
prometheus.MustRegister(g)
|
||||
return g
|
||||
return With(prometheus.DefaultRegisterer).NewGaugeVec(opts, labelNames)
|
||||
}
|
||||
|
||||
// NewGaugeFunc works like the function of the same name in the prometheus
|
||||
// package but it automatically registers the GaugeFunc with the
|
||||
// prometheus.DefaultRegisterer. If the registration fails, NewGaugeFunc panics.
|
||||
func NewGaugeFunc(opts prometheus.GaugeOpts, function func() float64) prometheus.GaugeFunc {
|
||||
g := prometheus.NewGaugeFunc(opts, function)
|
||||
prometheus.MustRegister(g)
|
||||
return g
|
||||
return With(prometheus.DefaultRegisterer).NewGaugeFunc(opts, function)
|
||||
}
|
||||
|
||||
// NewSummary works like the function of the same name in the prometheus package
|
||||
// but it automatically registers the Summary with the
|
||||
// prometheus.DefaultRegisterer. If the registration fails, NewSummary panics.
|
||||
func NewSummary(opts prometheus.SummaryOpts) prometheus.Summary {
|
||||
s := prometheus.NewSummary(opts)
|
||||
prometheus.MustRegister(s)
|
||||
return s
|
||||
return With(prometheus.DefaultRegisterer).NewSummary(opts)
|
||||
}
|
||||
|
||||
// NewSummaryVec works like the function of the same name in the prometheus
|
||||
@ -198,18 +217,14 @@ func NewSummary(opts prometheus.SummaryOpts) prometheus.Summary {
|
||||
// prometheus.DefaultRegisterer. If the registration fails, NewSummaryVec
|
||||
// panics.
|
||||
func NewSummaryVec(opts prometheus.SummaryOpts, labelNames []string) *prometheus.SummaryVec {
|
||||
s := prometheus.NewSummaryVec(opts, labelNames)
|
||||
prometheus.MustRegister(s)
|
||||
return s
|
||||
return With(prometheus.DefaultRegisterer).NewSummaryVec(opts, labelNames)
|
||||
}
|
||||
|
||||
// NewHistogram works like the function of the same name in the prometheus
|
||||
// package but it automatically registers the Histogram with the
|
||||
// prometheus.DefaultRegisterer. If the registration fails, NewHistogram panics.
|
||||
func NewHistogram(opts prometheus.HistogramOpts) prometheus.Histogram {
|
||||
h := prometheus.NewHistogram(opts)
|
||||
prometheus.MustRegister(h)
|
||||
return h
|
||||
return With(prometheus.DefaultRegisterer).NewHistogram(opts)
|
||||
}
|
||||
|
||||
// NewHistogramVec works like the function of the same name in the prometheus
|
||||
@ -217,7 +232,145 @@ func NewHistogram(opts prometheus.HistogramOpts) prometheus.Histogram {
|
||||
// prometheus.DefaultRegisterer. If the registration fails, NewHistogramVec
|
||||
// panics.
|
||||
func NewHistogramVec(opts prometheus.HistogramOpts, labelNames []string) *prometheus.HistogramVec {
|
||||
h := prometheus.NewHistogramVec(opts, labelNames)
|
||||
prometheus.MustRegister(h)
|
||||
return With(prometheus.DefaultRegisterer).NewHistogramVec(opts, labelNames)
|
||||
}
|
||||
|
||||
// NewUntypedFunc works like the function of the same name in the prometheus
|
||||
// package but it automatically registers the UntypedFunc with the
|
||||
// prometheus.DefaultRegisterer. If the registration fails, NewUntypedFunc
|
||||
// panics.
|
||||
func NewUntypedFunc(opts prometheus.UntypedOpts, function func() float64) prometheus.UntypedFunc {
|
||||
return With(prometheus.DefaultRegisterer).NewUntypedFunc(opts, function)
|
||||
}
|
||||
|
||||
// Factory provides factory methods to create Collectors that are automatically
|
||||
// registered with a Registerer. Create a Factory with the With function,
|
||||
// providing a Registerer to auto-register created Collectors with. The zero
|
||||
// value of a Factory creates Collectors that are not registered with any
|
||||
// Registerer. All methods of the Factory panic if the registration fails.
|
||||
type Factory struct {
|
||||
r prometheus.Registerer
|
||||
}
|
||||
|
||||
// With creates a Factory using the provided Registerer for registration of the
|
||||
// created Collectors. If the provided Registerer is nil, the returned Factory
|
||||
// creates Collectors that are not registered with any Registerer.
|
||||
func With(r prometheus.Registerer) Factory { return Factory{r} }
|
||||
|
||||
// NewCounter works like the function of the same name in the prometheus package
|
||||
// but it automatically registers the Counter with the Factory's Registerer.
|
||||
func (f Factory) NewCounter(opts prometheus.CounterOpts) prometheus.Counter {
|
||||
c := prometheus.NewCounter(opts)
|
||||
if f.r != nil {
|
||||
f.r.MustRegister(c)
|
||||
}
|
||||
return c
|
||||
}
|
||||
|
||||
// NewCounterVec works like the function of the same name in the prometheus
|
||||
// package but it automatically registers the CounterVec with the Factory's
|
||||
// Registerer.
|
||||
func (f Factory) NewCounterVec(opts prometheus.CounterOpts, labelNames []string) *prometheus.CounterVec {
|
||||
c := prometheus.NewCounterVec(opts, labelNames)
|
||||
if f.r != nil {
|
||||
f.r.MustRegister(c)
|
||||
}
|
||||
return c
|
||||
}
|
||||
|
||||
// NewCounterFunc works like the function of the same name in the prometheus
|
||||
// package but it automatically registers the CounterFunc with the Factory's
|
||||
// Registerer.
|
||||
func (f Factory) NewCounterFunc(opts prometheus.CounterOpts, function func() float64) prometheus.CounterFunc {
|
||||
c := prometheus.NewCounterFunc(opts, function)
|
||||
if f.r != nil {
|
||||
f.r.MustRegister(c)
|
||||
}
|
||||
return c
|
||||
}
|
||||
|
||||
// NewGauge works like the function of the same name in the prometheus package
|
||||
// but it automatically registers the Gauge with the Factory's Registerer.
|
||||
func (f Factory) NewGauge(opts prometheus.GaugeOpts) prometheus.Gauge {
|
||||
g := prometheus.NewGauge(opts)
|
||||
if f.r != nil {
|
||||
f.r.MustRegister(g)
|
||||
}
|
||||
return g
|
||||
}
|
||||
|
||||
// NewGaugeVec works like the function of the same name in the prometheus
|
||||
// package but it automatically registers the GaugeVec with the Factory's
|
||||
// Registerer.
|
||||
func (f Factory) NewGaugeVec(opts prometheus.GaugeOpts, labelNames []string) *prometheus.GaugeVec {
|
||||
g := prometheus.NewGaugeVec(opts, labelNames)
|
||||
if f.r != nil {
|
||||
f.r.MustRegister(g)
|
||||
}
|
||||
return g
|
||||
}
|
||||
|
||||
// NewGaugeFunc works like the function of the same name in the prometheus
|
||||
// package but it automatically registers the GaugeFunc with the Factory's
|
||||
// Registerer.
|
||||
func (f Factory) NewGaugeFunc(opts prometheus.GaugeOpts, function func() float64) prometheus.GaugeFunc {
|
||||
g := prometheus.NewGaugeFunc(opts, function)
|
||||
if f.r != nil {
|
||||
f.r.MustRegister(g)
|
||||
}
|
||||
return g
|
||||
}
|
||||
|
||||
// NewSummary works like the function of the same name in the prometheus package
|
||||
// but it automatically registers the Summary with the Factory's Registerer.
|
||||
func (f Factory) NewSummary(opts prometheus.SummaryOpts) prometheus.Summary {
|
||||
s := prometheus.NewSummary(opts)
|
||||
if f.r != nil {
|
||||
f.r.MustRegister(s)
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// NewSummaryVec works like the function of the same name in the prometheus
|
||||
// package but it automatically registers the SummaryVec with the Factory's
|
||||
// Registerer.
|
||||
func (f Factory) NewSummaryVec(opts prometheus.SummaryOpts, labelNames []string) *prometheus.SummaryVec {
|
||||
s := prometheus.NewSummaryVec(opts, labelNames)
|
||||
if f.r != nil {
|
||||
f.r.MustRegister(s)
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// NewHistogram works like the function of the same name in the prometheus
|
||||
// package but it automatically registers the Histogram with the Factory's
|
||||
// Registerer.
|
||||
func (f Factory) NewHistogram(opts prometheus.HistogramOpts) prometheus.Histogram {
|
||||
h := prometheus.NewHistogram(opts)
|
||||
if f.r != nil {
|
||||
f.r.MustRegister(h)
|
||||
}
|
||||
return h
|
||||
}
|
||||
|
||||
// NewHistogramVec works like the function of the same name in the prometheus
|
||||
// package but it automatically registers the HistogramVec with the Factory's
|
||||
// Registerer.
|
||||
func (f Factory) NewHistogramVec(opts prometheus.HistogramOpts, labelNames []string) *prometheus.HistogramVec {
|
||||
h := prometheus.NewHistogramVec(opts, labelNames)
|
||||
if f.r != nil {
|
||||
f.r.MustRegister(h)
|
||||
}
|
||||
return h
|
||||
}
|
||||
|
||||
// NewUntypedFunc works like the function of the same name in the prometheus
|
||||
// package but it automatically registers the UntypedFunc with the Factory's
|
||||
// Registerer.
|
||||
func (f Factory) NewUntypedFunc(opts prometheus.UntypedOpts, function func() float64) prometheus.UntypedFunc {
|
||||
u := prometheus.NewUntypedFunc(opts, function)
|
||||
if f.r != nil {
|
||||
f.r.MustRegister(u)
|
||||
}
|
||||
return u
|
||||
}
|
||||
|
10
vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go
generated
vendored
10
vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go
generated
vendored
@ -53,12 +53,16 @@ func (r *responseWriterDelegator) Written() int64 {
|
||||
}
|
||||
|
||||
func (r *responseWriterDelegator) WriteHeader(code int) {
|
||||
if r.observeWriteHeader != nil && !r.wroteHeader {
|
||||
// Only call observeWriteHeader for the 1st time. It's a bug if
|
||||
// WriteHeader is called more than once, but we want to protect
|
||||
// against it here. Note that we still delegate the WriteHeader
|
||||
// to the original ResponseWriter to not mask the bug from it.
|
||||
r.observeWriteHeader(code)
|
||||
}
|
||||
r.status = code
|
||||
r.wroteHeader = true
|
||||
r.ResponseWriter.WriteHeader(code)
|
||||
if r.observeWriteHeader != nil {
|
||||
r.observeWriteHeader(code)
|
||||
}
|
||||
}
|
||||
|
||||
func (r *responseWriterDelegator) Write(b []byte) (int, error) {
|
||||
|
82
vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go
generated
vendored
82
vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go
generated
vendored
@ -144,7 +144,12 @@ func HandlerFor(reg prometheus.Gatherer, opts HandlerOpts) http.Handler {
|
||||
}
|
||||
}
|
||||
|
||||
contentType := expfmt.Negotiate(req.Header)
|
||||
var contentType expfmt.Format
|
||||
if opts.EnableOpenMetrics {
|
||||
contentType = expfmt.NegotiateIncludingOpenMetrics(req.Header)
|
||||
} else {
|
||||
contentType = expfmt.Negotiate(req.Header)
|
||||
}
|
||||
header := rsp.Header()
|
||||
header.Set(contentTypeHeader, string(contentType))
|
||||
|
||||
@ -162,28 +167,40 @@ func HandlerFor(reg prometheus.Gatherer, opts HandlerOpts) http.Handler {
|
||||
|
||||
enc := expfmt.NewEncoder(w, contentType)
|
||||
|
||||
var lastErr error
|
||||
for _, mf := range mfs {
|
||||
if err := enc.Encode(mf); err != nil {
|
||||
lastErr = err
|
||||
if opts.ErrorLog != nil {
|
||||
opts.ErrorLog.Println("error encoding and sending metric family:", err)
|
||||
}
|
||||
errCnt.WithLabelValues("encoding").Inc()
|
||||
switch opts.ErrorHandling {
|
||||
case PanicOnError:
|
||||
panic(err)
|
||||
case ContinueOnError:
|
||||
// Handled later.
|
||||
case HTTPErrorOnError:
|
||||
httpError(rsp, err)
|
||||
return
|
||||
}
|
||||
// handleError handles the error according to opts.ErrorHandling
|
||||
// and returns true if we have to abort after the handling.
|
||||
handleError := func(err error) bool {
|
||||
if err == nil {
|
||||
return false
|
||||
}
|
||||
if opts.ErrorLog != nil {
|
||||
opts.ErrorLog.Println("error encoding and sending metric family:", err)
|
||||
}
|
||||
errCnt.WithLabelValues("encoding").Inc()
|
||||
switch opts.ErrorHandling {
|
||||
case PanicOnError:
|
||||
panic(err)
|
||||
case HTTPErrorOnError:
|
||||
// We cannot really send an HTTP error at this
|
||||
// point because we most likely have written
|
||||
// something to rsp already. But at least we can
|
||||
// stop sending.
|
||||
return true
|
||||
}
|
||||
// Do nothing in all other cases, including ContinueOnError.
|
||||
return false
|
||||
}
|
||||
|
||||
if lastErr != nil {
|
||||
httpError(rsp, lastErr)
|
||||
for _, mf := range mfs {
|
||||
if handleError(enc.Encode(mf)) {
|
||||
return
|
||||
}
|
||||
}
|
||||
if closer, ok := enc.(expfmt.Closer); ok {
|
||||
// This in particular takes care of the final "# EOF\n" line for OpenMetrics.
|
||||
if handleError(closer.Close()) {
|
||||
return
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
@ -255,7 +272,12 @@ type HandlerErrorHandling int
|
||||
// errors are encountered.
|
||||
const (
|
||||
// Serve an HTTP status code 500 upon the first error
|
||||
// encountered. Report the error message in the body.
|
||||
// encountered. Report the error message in the body. Note that HTTP
|
||||
// errors cannot be served anymore once the beginning of a regular
|
||||
// payload has been sent. Thus, in the (unlikely) case that encoding the
|
||||
// payload into the negotiated wire format fails, serving the response
|
||||
// will simply be aborted. Set an ErrorLog in HandlerOpts to detect
|
||||
// those errors.
|
||||
HTTPErrorOnError HandlerErrorHandling = iota
|
||||
// Ignore errors and try to serve as many metrics as possible. However,
|
||||
// if no metrics can be served, serve an HTTP status code 500 and the
|
||||
@ -318,6 +340,16 @@ type HandlerOpts struct {
|
||||
// away). Until the implementation is improved, it is recommended to
|
||||
// implement a separate timeout in potentially slow Collectors.
|
||||
Timeout time.Duration
|
||||
// If true, the experimental OpenMetrics encoding is added to the
|
||||
// possible options during content negotiation. Note that Prometheus
|
||||
// 2.5.0+ will negotiate OpenMetrics as first priority. OpenMetrics is
|
||||
// the only way to transmit exemplars. However, the move to OpenMetrics
|
||||
// is not completely transparent. Most notably, the values of "quantile"
|
||||
// labels of Summaries and "le" labels of Histograms are formatted with
|
||||
// a trailing ".0" if they would otherwise look like integer numbers
|
||||
// (which changes the identity of the resulting series on the Prometheus
|
||||
// server).
|
||||
EnableOpenMetrics bool
|
||||
}
|
||||
|
||||
// gzipAccepted returns whether the client will accept gzip-encoded content.
|
||||
@ -334,11 +366,9 @@ func gzipAccepted(header http.Header) bool {
|
||||
}
|
||||
|
||||
// httpError removes any content-encoding header and then calls http.Error with
|
||||
// the provided error and http.StatusInternalServerErrer. Error contents is
|
||||
// supposed to be uncompressed plain text. However, same as with a plain
|
||||
// http.Error, any header settings will be void if the header has already been
|
||||
// sent. The error message will still be written to the writer, but it will
|
||||
// probably be of limited use.
|
||||
// the provided error and http.StatusInternalServerError. Error contents is
|
||||
// supposed to be uncompressed plain text. Same as with a plain http.Error, this
|
||||
// must not be called if the header or any payload has already been sent.
|
||||
func httpError(rsp http.ResponseWriter, err error) {
|
||||
rsp.Header().Del(contentEncodingHeader)
|
||||
http.Error(
|
||||
|
1
vendor/github.com/prometheus/client_golang/prometheus/registry.go
generated
vendored
1
vendor/github.com/prometheus/client_golang/prometheus/registry.go
generated
vendored
@ -26,6 +26,7 @@ import (
|
||||
"unicode/utf8"
|
||||
|
||||
"github.com/cespare/xxhash/v2"
|
||||
//lint:ignore SA1019 Need to keep deprecated package for compatibility.
|
||||
"github.com/golang/protobuf/proto"
|
||||
"github.com/prometheus/common/expfmt"
|
||||
|
||||
|
1
vendor/github.com/prometheus/client_golang/prometheus/summary.go
generated
vendored
1
vendor/github.com/prometheus/client_golang/prometheus/summary.go
generated
vendored
@ -23,6 +23,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/beorn7/perks/quantile"
|
||||
//lint:ignore SA1019 Need to keep deprecated package for compatibility.
|
||||
"github.com/golang/protobuf/proto"
|
||||
|
||||
dto "github.com/prometheus/client_model/go"
|
||||
|
51
vendor/github.com/prometheus/client_golang/prometheus/value.go
generated
vendored
51
vendor/github.com/prometheus/client_golang/prometheus/value.go
generated
vendored
@ -16,8 +16,12 @@ package prometheus
|
||||
import (
|
||||
"fmt"
|
||||
"sort"
|
||||
"time"
|
||||
"unicode/utf8"
|
||||
|
||||
//lint:ignore SA1019 Need to keep deprecated package for compatibility.
|
||||
"github.com/golang/protobuf/proto"
|
||||
"github.com/golang/protobuf/ptypes"
|
||||
|
||||
dto "github.com/prometheus/client_model/go"
|
||||
)
|
||||
@ -25,7 +29,8 @@ import (
|
||||
// ValueType is an enumeration of metric types that represent a simple value.
|
||||
type ValueType int
|
||||
|
||||
// Possible values for the ValueType enum.
|
||||
// Possible values for the ValueType enum. Use UntypedValue to mark a metric
|
||||
// with an unknown type.
|
||||
const (
|
||||
_ ValueType = iota
|
||||
CounterValue
|
||||
@ -69,7 +74,7 @@ func (v *valueFunc) Desc() *Desc {
|
||||
}
|
||||
|
||||
func (v *valueFunc) Write(out *dto.Metric) error {
|
||||
return populateMetric(v.valType, v.function(), v.labelPairs, out)
|
||||
return populateMetric(v.valType, v.function(), v.labelPairs, nil, out)
|
||||
}
|
||||
|
||||
// NewConstMetric returns a metric with one fixed value that cannot be
|
||||
@ -116,19 +121,20 @@ func (m *constMetric) Desc() *Desc {
|
||||
}
|
||||
|
||||
func (m *constMetric) Write(out *dto.Metric) error {
|
||||
return populateMetric(m.valType, m.val, m.labelPairs, out)
|
||||
return populateMetric(m.valType, m.val, m.labelPairs, nil, out)
|
||||
}
|
||||
|
||||
func populateMetric(
|
||||
t ValueType,
|
||||
v float64,
|
||||
labelPairs []*dto.LabelPair,
|
||||
e *dto.Exemplar,
|
||||
m *dto.Metric,
|
||||
) error {
|
||||
m.Label = labelPairs
|
||||
switch t {
|
||||
case CounterValue:
|
||||
m.Counter = &dto.Counter{Value: proto.Float64(v)}
|
||||
m.Counter = &dto.Counter{Value: proto.Float64(v), Exemplar: e}
|
||||
case GaugeValue:
|
||||
m.Gauge = &dto.Gauge{Value: proto.Float64(v)}
|
||||
case UntypedValue:
|
||||
@ -160,3 +166,40 @@ func makeLabelPairs(desc *Desc, labelValues []string) []*dto.LabelPair {
|
||||
sort.Sort(labelPairSorter(labelPairs))
|
||||
return labelPairs
|
||||
}
|
||||
|
||||
// ExemplarMaxRunes is the max total number of runes allowed in exemplar labels.
|
||||
const ExemplarMaxRunes = 64
|
||||
|
||||
// newExemplar creates a new dto.Exemplar from the provided values. An error is
|
||||
// returned if any of the label names or values are invalid or if the total
|
||||
// number of runes in the label names and values exceeds ExemplarMaxRunes.
|
||||
func newExemplar(value float64, ts time.Time, l Labels) (*dto.Exemplar, error) {
|
||||
e := &dto.Exemplar{}
|
||||
e.Value = proto.Float64(value)
|
||||
tsProto, err := ptypes.TimestampProto(ts)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
e.Timestamp = tsProto
|
||||
labelPairs := make([]*dto.LabelPair, 0, len(l))
|
||||
var runes int
|
||||
for name, value := range l {
|
||||
if !checkLabelName(name) {
|
||||
return nil, fmt.Errorf("exemplar label name %q is invalid", name)
|
||||
}
|
||||
runes += utf8.RuneCountInString(name)
|
||||
if !utf8.ValidString(value) {
|
||||
return nil, fmt.Errorf("exemplar label value %q is not valid UTF-8", value)
|
||||
}
|
||||
runes += utf8.RuneCountInString(value)
|
||||
labelPairs = append(labelPairs, &dto.LabelPair{
|
||||
Name: proto.String(name),
|
||||
Value: proto.String(value),
|
||||
})
|
||||
}
|
||||
if runes > ExemplarMaxRunes {
|
||||
return nil, fmt.Errorf("exemplar labels have %d runes, exceeding the limit of %d", runes, ExemplarMaxRunes)
|
||||
}
|
||||
e.Label = labelPairs
|
||||
return e, nil
|
||||
}
|
||||
|
14
vendor/github.com/prometheus/client_golang/prometheus/vec.go
generated
vendored
14
vendor/github.com/prometheus/client_golang/prometheus/vec.go
generated
vendored
@ -24,7 +24,7 @@ import (
|
||||
// their label values. metricVec is not used directly (and therefore
|
||||
// unexported). It is used as a building block for implementations of vectors of
|
||||
// a given metric type, like GaugeVec, CounterVec, SummaryVec, and HistogramVec.
|
||||
// It also handles label currying. It uses basicMetricVec internally.
|
||||
// It also handles label currying.
|
||||
type metricVec struct {
|
||||
*metricMap
|
||||
|
||||
@ -91,6 +91,18 @@ func (m *metricVec) Delete(labels Labels) bool {
|
||||
return m.metricMap.deleteByHashWithLabels(h, labels, m.curry)
|
||||
}
|
||||
|
||||
// Without explicit forwarding of Describe, Collect, Reset, those methods won't
|
||||
// show up in GoDoc.
|
||||
|
||||
// Describe implements Collector.
|
||||
func (m *metricVec) Describe(ch chan<- *Desc) { m.metricMap.Describe(ch) }
|
||||
|
||||
// Collect implements Collector.
|
||||
func (m *metricVec) Collect(ch chan<- Metric) { m.metricMap.Collect(ch) }
|
||||
|
||||
// Reset deletes all metrics in this vector.
|
||||
func (m *metricVec) Reset() { m.metricMap.Reset() }
|
||||
|
||||
func (m *metricVec) curryWith(labels Labels) (*metricVec, error) {
|
||||
var (
|
||||
newCurry []curriedLabelValue
|
||||
|
14
vendor/github.com/prometheus/client_golang/prometheus/wrap.go
generated
vendored
14
vendor/github.com/prometheus/client_golang/prometheus/wrap.go
generated
vendored
@ -17,6 +17,7 @@ import (
|
||||
"fmt"
|
||||
"sort"
|
||||
|
||||
//lint:ignore SA1019 Need to keep deprecated package for compatibility.
|
||||
"github.com/golang/protobuf/proto"
|
||||
|
||||
dto "github.com/prometheus/client_model/go"
|
||||
@ -27,7 +28,8 @@ import (
|
||||
// registered with the wrapped Registerer in a modified way. The modified
|
||||
// Collector adds the provided Labels to all Metrics it collects (as
|
||||
// ConstLabels). The Metrics collected by the unmodified Collector must not
|
||||
// duplicate any of those labels.
|
||||
// duplicate any of those labels. Wrapping a nil value is valid, resulting
|
||||
// in a no-op Registerer.
|
||||
//
|
||||
// WrapRegistererWith provides a way to add fixed labels to a subset of
|
||||
// Collectors. It should not be used to add fixed labels to all metrics exposed.
|
||||
@ -50,6 +52,7 @@ func WrapRegistererWith(labels Labels, reg Registerer) Registerer {
|
||||
// Registerer. Collectors registered with the returned Registerer will be
|
||||
// registered with the wrapped Registerer in a modified way. The modified
|
||||
// Collector adds the provided prefix to the name of all Metrics it collects.
|
||||
// Wrapping a nil value is valid, resulting in a no-op Registerer.
|
||||
//
|
||||
// WrapRegistererWithPrefix is useful to have one place to prefix all metrics of
|
||||
// a sub-system. To make this work, register metrics of the sub-system with the
|
||||
@ -80,6 +83,9 @@ type wrappingRegisterer struct {
|
||||
}
|
||||
|
||||
func (r *wrappingRegisterer) Register(c Collector) error {
|
||||
if r.wrappedRegisterer == nil {
|
||||
return nil
|
||||
}
|
||||
return r.wrappedRegisterer.Register(&wrappingCollector{
|
||||
wrappedCollector: c,
|
||||
prefix: r.prefix,
|
||||
@ -88,6 +94,9 @@ func (r *wrappingRegisterer) Register(c Collector) error {
|
||||
}
|
||||
|
||||
func (r *wrappingRegisterer) MustRegister(cs ...Collector) {
|
||||
if r.wrappedRegisterer == nil {
|
||||
return
|
||||
}
|
||||
for _, c := range cs {
|
||||
if err := r.Register(c); err != nil {
|
||||
panic(err)
|
||||
@ -96,6 +105,9 @@ func (r *wrappingRegisterer) MustRegister(cs ...Collector) {
|
||||
}
|
||||
|
||||
func (r *wrappingRegisterer) Unregister(c Collector) bool {
|
||||
if r.wrappedRegisterer == nil {
|
||||
return false
|
||||
}
|
||||
return r.wrappedRegisterer.Unregister(&wrappingCollector{
|
||||
wrappedCollector: c,
|
||||
prefix: r.prefix,
|
||||
|
266
vendor/github.com/prometheus/client_model/go/metrics.pb.go
generated
vendored
266
vendor/github.com/prometheus/client_model/go/metrics.pb.go
generated
vendored
@ -1,11 +1,14 @@
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// source: metrics.proto
|
||||
|
||||
package io_prometheus_client // import "github.com/prometheus/client_model/go"
|
||||
package io_prometheus_client
|
||||
|
||||
import proto "github.com/golang/protobuf/proto"
|
||||
import fmt "fmt"
|
||||
import math "math"
|
||||
import (
|
||||
fmt "fmt"
|
||||
proto "github.com/golang/protobuf/proto"
|
||||
timestamp "github.com/golang/protobuf/ptypes/timestamp"
|
||||
math "math"
|
||||
)
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ = proto.Marshal
|
||||
@ -16,7 +19,7 @@ var _ = math.Inf
|
||||
// is compatible with the proto package it is being compiled against.
|
||||
// A compilation error at this line likely means your copy of the
|
||||
// proto package needs to be updated.
|
||||
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
|
||||
const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
|
||||
|
||||
type MetricType int32
|
||||
|
||||
@ -35,6 +38,7 @@ var MetricType_name = map[int32]string{
|
||||
3: "UNTYPED",
|
||||
4: "HISTOGRAM",
|
||||
}
|
||||
|
||||
var MetricType_value = map[string]int32{
|
||||
"COUNTER": 0,
|
||||
"GAUGE": 1,
|
||||
@ -48,9 +52,11 @@ func (x MetricType) Enum() *MetricType {
|
||||
*p = x
|
||||
return p
|
||||
}
|
||||
|
||||
func (x MetricType) String() string {
|
||||
return proto.EnumName(MetricType_name, int32(x))
|
||||
}
|
||||
|
||||
func (x *MetricType) UnmarshalJSON(data []byte) error {
|
||||
value, err := proto.UnmarshalJSONEnum(MetricType_value, data, "MetricType")
|
||||
if err != nil {
|
||||
@ -59,8 +65,9 @@ func (x *MetricType) UnmarshalJSON(data []byte) error {
|
||||
*x = MetricType(value)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (MetricType) EnumDescriptor() ([]byte, []int) {
|
||||
return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{0}
|
||||
return fileDescriptor_6039342a2ba47b72, []int{0}
|
||||
}
|
||||
|
||||
type LabelPair struct {
|
||||
@ -75,16 +82,17 @@ func (m *LabelPair) Reset() { *m = LabelPair{} }
|
||||
func (m *LabelPair) String() string { return proto.CompactTextString(m) }
|
||||
func (*LabelPair) ProtoMessage() {}
|
||||
func (*LabelPair) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{0}
|
||||
return fileDescriptor_6039342a2ba47b72, []int{0}
|
||||
}
|
||||
|
||||
func (m *LabelPair) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_LabelPair.Unmarshal(m, b)
|
||||
}
|
||||
func (m *LabelPair) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_LabelPair.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (dst *LabelPair) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_LabelPair.Merge(dst, src)
|
||||
func (m *LabelPair) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_LabelPair.Merge(m, src)
|
||||
}
|
||||
func (m *LabelPair) XXX_Size() int {
|
||||
return xxx_messageInfo_LabelPair.Size(m)
|
||||
@ -120,16 +128,17 @@ func (m *Gauge) Reset() { *m = Gauge{} }
|
||||
func (m *Gauge) String() string { return proto.CompactTextString(m) }
|
||||
func (*Gauge) ProtoMessage() {}
|
||||
func (*Gauge) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{1}
|
||||
return fileDescriptor_6039342a2ba47b72, []int{1}
|
||||
}
|
||||
|
||||
func (m *Gauge) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_Gauge.Unmarshal(m, b)
|
||||
}
|
||||
func (m *Gauge) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_Gauge.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (dst *Gauge) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_Gauge.Merge(dst, src)
|
||||
func (m *Gauge) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_Gauge.Merge(m, src)
|
||||
}
|
||||
func (m *Gauge) XXX_Size() int {
|
||||
return xxx_messageInfo_Gauge.Size(m)
|
||||
@ -148,26 +157,28 @@ func (m *Gauge) GetValue() float64 {
|
||||
}
|
||||
|
||||
type Counter struct {
|
||||
Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"`
|
||||
Exemplar *Exemplar `protobuf:"bytes,2,opt,name=exemplar" json:"exemplar,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *Counter) Reset() { *m = Counter{} }
|
||||
func (m *Counter) String() string { return proto.CompactTextString(m) }
|
||||
func (*Counter) ProtoMessage() {}
|
||||
func (*Counter) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{2}
|
||||
return fileDescriptor_6039342a2ba47b72, []int{2}
|
||||
}
|
||||
|
||||
func (m *Counter) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_Counter.Unmarshal(m, b)
|
||||
}
|
||||
func (m *Counter) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_Counter.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (dst *Counter) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_Counter.Merge(dst, src)
|
||||
func (m *Counter) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_Counter.Merge(m, src)
|
||||
}
|
||||
func (m *Counter) XXX_Size() int {
|
||||
return xxx_messageInfo_Counter.Size(m)
|
||||
@ -185,6 +196,13 @@ func (m *Counter) GetValue() float64 {
|
||||
return 0
|
||||
}
|
||||
|
||||
func (m *Counter) GetExemplar() *Exemplar {
|
||||
if m != nil {
|
||||
return m.Exemplar
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type Quantile struct {
|
||||
Quantile *float64 `protobuf:"fixed64,1,opt,name=quantile" json:"quantile,omitempty"`
|
||||
Value *float64 `protobuf:"fixed64,2,opt,name=value" json:"value,omitempty"`
|
||||
@ -197,16 +215,17 @@ func (m *Quantile) Reset() { *m = Quantile{} }
|
||||
func (m *Quantile) String() string { return proto.CompactTextString(m) }
|
||||
func (*Quantile) ProtoMessage() {}
|
||||
func (*Quantile) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{3}
|
||||
return fileDescriptor_6039342a2ba47b72, []int{3}
|
||||
}
|
||||
|
||||
func (m *Quantile) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_Quantile.Unmarshal(m, b)
|
||||
}
|
||||
func (m *Quantile) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_Quantile.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (dst *Quantile) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_Quantile.Merge(dst, src)
|
||||
func (m *Quantile) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_Quantile.Merge(m, src)
|
||||
}
|
||||
func (m *Quantile) XXX_Size() int {
|
||||
return xxx_messageInfo_Quantile.Size(m)
|
||||
@ -244,16 +263,17 @@ func (m *Summary) Reset() { *m = Summary{} }
|
||||
func (m *Summary) String() string { return proto.CompactTextString(m) }
|
||||
func (*Summary) ProtoMessage() {}
|
||||
func (*Summary) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{4}
|
||||
return fileDescriptor_6039342a2ba47b72, []int{4}
|
||||
}
|
||||
|
||||
func (m *Summary) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_Summary.Unmarshal(m, b)
|
||||
}
|
||||
func (m *Summary) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_Summary.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (dst *Summary) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_Summary.Merge(dst, src)
|
||||
func (m *Summary) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_Summary.Merge(m, src)
|
||||
}
|
||||
func (m *Summary) XXX_Size() int {
|
||||
return xxx_messageInfo_Summary.Size(m)
|
||||
@ -296,16 +316,17 @@ func (m *Untyped) Reset() { *m = Untyped{} }
|
||||
func (m *Untyped) String() string { return proto.CompactTextString(m) }
|
||||
func (*Untyped) ProtoMessage() {}
|
||||
func (*Untyped) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{5}
|
||||
return fileDescriptor_6039342a2ba47b72, []int{5}
|
||||
}
|
||||
|
||||
func (m *Untyped) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_Untyped.Unmarshal(m, b)
|
||||
}
|
||||
func (m *Untyped) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_Untyped.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (dst *Untyped) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_Untyped.Merge(dst, src)
|
||||
func (m *Untyped) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_Untyped.Merge(m, src)
|
||||
}
|
||||
func (m *Untyped) XXX_Size() int {
|
||||
return xxx_messageInfo_Untyped.Size(m)
|
||||
@ -336,16 +357,17 @@ func (m *Histogram) Reset() { *m = Histogram{} }
|
||||
func (m *Histogram) String() string { return proto.CompactTextString(m) }
|
||||
func (*Histogram) ProtoMessage() {}
|
||||
func (*Histogram) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{6}
|
||||
return fileDescriptor_6039342a2ba47b72, []int{6}
|
||||
}
|
||||
|
||||
func (m *Histogram) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_Histogram.Unmarshal(m, b)
|
||||
}
|
||||
func (m *Histogram) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_Histogram.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (dst *Histogram) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_Histogram.Merge(dst, src)
|
||||
func (m *Histogram) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_Histogram.Merge(m, src)
|
||||
}
|
||||
func (m *Histogram) XXX_Size() int {
|
||||
return xxx_messageInfo_Histogram.Size(m)
|
||||
@ -378,27 +400,29 @@ func (m *Histogram) GetBucket() []*Bucket {
|
||||
}
|
||||
|
||||
type Bucket struct {
|
||||
CumulativeCount *uint64 `protobuf:"varint,1,opt,name=cumulative_count,json=cumulativeCount" json:"cumulative_count,omitempty"`
|
||||
UpperBound *float64 `protobuf:"fixed64,2,opt,name=upper_bound,json=upperBound" json:"upper_bound,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
CumulativeCount *uint64 `protobuf:"varint,1,opt,name=cumulative_count,json=cumulativeCount" json:"cumulative_count,omitempty"`
|
||||
UpperBound *float64 `protobuf:"fixed64,2,opt,name=upper_bound,json=upperBound" json:"upper_bound,omitempty"`
|
||||
Exemplar *Exemplar `protobuf:"bytes,3,opt,name=exemplar" json:"exemplar,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *Bucket) Reset() { *m = Bucket{} }
|
||||
func (m *Bucket) String() string { return proto.CompactTextString(m) }
|
||||
func (*Bucket) ProtoMessage() {}
|
||||
func (*Bucket) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{7}
|
||||
return fileDescriptor_6039342a2ba47b72, []int{7}
|
||||
}
|
||||
|
||||
func (m *Bucket) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_Bucket.Unmarshal(m, b)
|
||||
}
|
||||
func (m *Bucket) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_Bucket.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (dst *Bucket) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_Bucket.Merge(dst, src)
|
||||
func (m *Bucket) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_Bucket.Merge(m, src)
|
||||
}
|
||||
func (m *Bucket) XXX_Size() int {
|
||||
return xxx_messageInfo_Bucket.Size(m)
|
||||
@ -423,6 +447,68 @@ func (m *Bucket) GetUpperBound() float64 {
|
||||
return 0
|
||||
}
|
||||
|
||||
func (m *Bucket) GetExemplar() *Exemplar {
|
||||
if m != nil {
|
||||
return m.Exemplar
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type Exemplar struct {
|
||||
Label []*LabelPair `protobuf:"bytes,1,rep,name=label" json:"label,omitempty"`
|
||||
Value *float64 `protobuf:"fixed64,2,opt,name=value" json:"value,omitempty"`
|
||||
Timestamp *timestamp.Timestamp `protobuf:"bytes,3,opt,name=timestamp" json:"timestamp,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *Exemplar) Reset() { *m = Exemplar{} }
|
||||
func (m *Exemplar) String() string { return proto.CompactTextString(m) }
|
||||
func (*Exemplar) ProtoMessage() {}
|
||||
func (*Exemplar) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_6039342a2ba47b72, []int{8}
|
||||
}
|
||||
|
||||
func (m *Exemplar) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_Exemplar.Unmarshal(m, b)
|
||||
}
|
||||
func (m *Exemplar) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_Exemplar.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *Exemplar) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_Exemplar.Merge(m, src)
|
||||
}
|
||||
func (m *Exemplar) XXX_Size() int {
|
||||
return xxx_messageInfo_Exemplar.Size(m)
|
||||
}
|
||||
func (m *Exemplar) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_Exemplar.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_Exemplar proto.InternalMessageInfo
|
||||
|
||||
func (m *Exemplar) GetLabel() []*LabelPair {
|
||||
if m != nil {
|
||||
return m.Label
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *Exemplar) GetValue() float64 {
|
||||
if m != nil && m.Value != nil {
|
||||
return *m.Value
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (m *Exemplar) GetTimestamp() *timestamp.Timestamp {
|
||||
if m != nil {
|
||||
return m.Timestamp
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type Metric struct {
|
||||
Label []*LabelPair `protobuf:"bytes,1,rep,name=label" json:"label,omitempty"`
|
||||
Gauge *Gauge `protobuf:"bytes,2,opt,name=gauge" json:"gauge,omitempty"`
|
||||
@ -440,16 +526,17 @@ func (m *Metric) Reset() { *m = Metric{} }
|
||||
func (m *Metric) String() string { return proto.CompactTextString(m) }
|
||||
func (*Metric) ProtoMessage() {}
|
||||
func (*Metric) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{8}
|
||||
return fileDescriptor_6039342a2ba47b72, []int{9}
|
||||
}
|
||||
|
||||
func (m *Metric) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_Metric.Unmarshal(m, b)
|
||||
}
|
||||
func (m *Metric) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_Metric.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (dst *Metric) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_Metric.Merge(dst, src)
|
||||
func (m *Metric) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_Metric.Merge(m, src)
|
||||
}
|
||||
func (m *Metric) XXX_Size() int {
|
||||
return xxx_messageInfo_Metric.Size(m)
|
||||
@ -523,16 +610,17 @@ func (m *MetricFamily) Reset() { *m = MetricFamily{} }
|
||||
func (m *MetricFamily) String() string { return proto.CompactTextString(m) }
|
||||
func (*MetricFamily) ProtoMessage() {}
|
||||
func (*MetricFamily) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{9}
|
||||
return fileDescriptor_6039342a2ba47b72, []int{10}
|
||||
}
|
||||
|
||||
func (m *MetricFamily) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_MetricFamily.Unmarshal(m, b)
|
||||
}
|
||||
func (m *MetricFamily) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_MetricFamily.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (dst *MetricFamily) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_MetricFamily.Merge(dst, src)
|
||||
func (m *MetricFamily) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_MetricFamily.Merge(m, src)
|
||||
}
|
||||
func (m *MetricFamily) XXX_Size() int {
|
||||
return xxx_messageInfo_MetricFamily.Size(m)
|
||||
@ -572,6 +660,7 @@ func (m *MetricFamily) GetMetric() []*Metric {
|
||||
}
|
||||
|
||||
func init() {
|
||||
proto.RegisterEnum("io.prometheus.client.MetricType", MetricType_name, MetricType_value)
|
||||
proto.RegisterType((*LabelPair)(nil), "io.prometheus.client.LabelPair")
|
||||
proto.RegisterType((*Gauge)(nil), "io.prometheus.client.Gauge")
|
||||
proto.RegisterType((*Counter)(nil), "io.prometheus.client.Counter")
|
||||
@ -580,50 +669,55 @@ func init() {
|
||||
proto.RegisterType((*Untyped)(nil), "io.prometheus.client.Untyped")
|
||||
proto.RegisterType((*Histogram)(nil), "io.prometheus.client.Histogram")
|
||||
proto.RegisterType((*Bucket)(nil), "io.prometheus.client.Bucket")
|
||||
proto.RegisterType((*Exemplar)(nil), "io.prometheus.client.Exemplar")
|
||||
proto.RegisterType((*Metric)(nil), "io.prometheus.client.Metric")
|
||||
proto.RegisterType((*MetricFamily)(nil), "io.prometheus.client.MetricFamily")
|
||||
proto.RegisterEnum("io.prometheus.client.MetricType", MetricType_name, MetricType_value)
|
||||
}
|
||||
|
||||
func init() { proto.RegisterFile("metrics.proto", fileDescriptor_metrics_c97c9a2b9560cb8f) }
|
||||
func init() { proto.RegisterFile("metrics.proto", fileDescriptor_6039342a2ba47b72) }
|
||||
|
||||
var fileDescriptor_metrics_c97c9a2b9560cb8f = []byte{
|
||||
// 591 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x54, 0x4f, 0x4f, 0xdb, 0x4e,
|
||||
0x14, 0xfc, 0x99, 0xd8, 0x09, 0x7e, 0x86, 0x5f, 0xad, 0x15, 0x07, 0xab, 0x2d, 0x25, 0xcd, 0x89,
|
||||
0xf6, 0x10, 0x54, 0x04, 0xaa, 0x44, 0xdb, 0x03, 0x50, 0x1a, 0x2a, 0xd5, 0x40, 0x37, 0xc9, 0x81,
|
||||
0x5e, 0xac, 0x8d, 0x59, 0x25, 0x56, 0xbd, 0xb6, 0x6b, 0xef, 0x22, 0xe5, 0xdc, 0x43, 0xbf, 0x47,
|
||||
0xbf, 0x68, 0xab, 0xfd, 0xe3, 0x18, 0x24, 0xc3, 0xa9, 0xb7, 0xb7, 0xf3, 0x66, 0xde, 0x8e, 0x77,
|
||||
0xc7, 0x0b, 0x9b, 0x8c, 0xf2, 0x32, 0x89, 0xab, 0x61, 0x51, 0xe6, 0x3c, 0x47, 0x5b, 0x49, 0x2e,
|
||||
0x2b, 0x46, 0xf9, 0x82, 0x8a, 0x6a, 0x18, 0xa7, 0x09, 0xcd, 0xf8, 0xe0, 0x10, 0xdc, 0x2f, 0x64,
|
||||
0x46, 0xd3, 0x2b, 0x92, 0x94, 0x08, 0x81, 0x9d, 0x11, 0x46, 0x03, 0xab, 0x6f, 0xed, 0xba, 0x58,
|
||||
0xd5, 0x68, 0x0b, 0x9c, 0x5b, 0x92, 0x0a, 0x1a, 0xac, 0x29, 0x50, 0x2f, 0x06, 0xdb, 0xe0, 0x8c,
|
||||
0x88, 0x98, 0xdf, 0x69, 0x4b, 0x8d, 0x55, 0xb7, 0x77, 0xa0, 0x77, 0x9a, 0x8b, 0x8c, 0xd3, 0xf2,
|
||||
0x01, 0xc2, 0x7b, 0x58, 0xff, 0x2a, 0x48, 0xc6, 0x93, 0x94, 0xa2, 0xa7, 0xb0, 0xfe, 0xc3, 0xd4,
|
||||
0x86, 0xb4, 0x5a, 0xdf, 0xdf, 0x7d, 0xa5, 0xfe, 0x65, 0x41, 0x6f, 0x2c, 0x18, 0x23, 0xe5, 0x12,
|
||||
0xbd, 0x84, 0x8d, 0x8a, 0xb0, 0x22, 0xa5, 0x51, 0x2c, 0x77, 0x54, 0x13, 0x6c, 0xec, 0x69, 0x4c,
|
||||
0x99, 0x40, 0xdb, 0x00, 0x86, 0x52, 0x09, 0x66, 0x26, 0xb9, 0x1a, 0x19, 0x0b, 0x86, 0x8e, 0xee,
|
||||
0xec, 0xdf, 0xe9, 0x77, 0x76, 0xbd, 0xfd, 0x17, 0xc3, 0xb6, 0xb3, 0x1a, 0xd6, 0x8e, 0x1b, 0x7f,
|
||||
0xf2, 0x43, 0xa7, 0x19, 0x5f, 0x16, 0xf4, 0xe6, 0x81, 0x0f, 0xfd, 0x69, 0x81, 0x7b, 0x9e, 0x54,
|
||||
0x3c, 0x9f, 0x97, 0x84, 0xfd, 0x03, 0xb3, 0x07, 0xd0, 0x9d, 0x89, 0xf8, 0x3b, 0xe5, 0xc6, 0xea,
|
||||
0xf3, 0x76, 0xab, 0x27, 0x8a, 0x83, 0x0d, 0x77, 0x30, 0x81, 0xae, 0x46, 0xd0, 0x2b, 0xf0, 0x63,
|
||||
0xc1, 0x44, 0x4a, 0x78, 0x72, 0x7b, 0xdf, 0xc5, 0x93, 0x06, 0xd7, 0x4e, 0x76, 0xc0, 0x13, 0x45,
|
||||
0x41, 0xcb, 0x68, 0x96, 0x8b, 0xec, 0xc6, 0x58, 0x01, 0x05, 0x9d, 0x48, 0x64, 0xf0, 0x67, 0x0d,
|
||||
0xba, 0xa1, 0xca, 0x18, 0x3a, 0x04, 0x27, 0x95, 0x31, 0x0a, 0x2c, 0xe5, 0x6a, 0xa7, 0xdd, 0xd5,
|
||||
0x2a, 0x69, 0x58, 0xb3, 0xd1, 0x1b, 0x70, 0xe6, 0x32, 0x46, 0x6a, 0xb8, 0xb7, 0xff, 0xac, 0x5d,
|
||||
0xa6, 0x92, 0x86, 0x35, 0x13, 0xbd, 0x85, 0x5e, 0xac, 0xa3, 0x15, 0x74, 0x94, 0x68, 0xbb, 0x5d,
|
||||
0x64, 0xf2, 0x87, 0x6b, 0xb6, 0x14, 0x56, 0x3a, 0x33, 0x81, 0xfd, 0x98, 0xd0, 0x04, 0x0b, 0xd7,
|
||||
0x6c, 0x29, 0x14, 0xfa, 0x8e, 0x03, 0xe7, 0x31, 0xa1, 0x09, 0x02, 0xae, 0xd9, 0xe8, 0x03, 0xb8,
|
||||
0x8b, 0xfa, 0xea, 0x83, 0x9e, 0x92, 0x3e, 0x70, 0x30, 0xab, 0x84, 0xe0, 0x46, 0x21, 0xc3, 0xc2,
|
||||
0x13, 0x46, 0x2b, 0x4e, 0x58, 0x11, 0xb1, 0x2a, 0xe8, 0xf6, 0xad, 0xdd, 0x0e, 0xf6, 0x56, 0x58,
|
||||
0x58, 0x0d, 0x7e, 0x5b, 0xb0, 0xa1, 0x6f, 0xe0, 0x13, 0x61, 0x49, 0xba, 0x6c, 0xfd, 0x83, 0x11,
|
||||
0xd8, 0x0b, 0x9a, 0x16, 0xe6, 0x07, 0x56, 0x35, 0x3a, 0x00, 0x5b, 0x7a, 0x54, 0x47, 0xf8, 0xff,
|
||||
0x7e, 0xbf, 0xdd, 0x95, 0x9e, 0x3c, 0x59, 0x16, 0x14, 0x2b, 0xb6, 0x0c, 0x9f, 0x7e, 0x53, 0x02,
|
||||
0xfb, 0xb1, 0xf0, 0x69, 0x1d, 0x36, 0xdc, 0xd7, 0x21, 0x40, 0x33, 0x09, 0x79, 0xd0, 0x3b, 0xbd,
|
||||
0x9c, 0x5e, 0x4c, 0xce, 0xb0, 0xff, 0x1f, 0x72, 0xc1, 0x19, 0x1d, 0x4f, 0x47, 0x67, 0xbe, 0x25,
|
||||
0xf1, 0xf1, 0x34, 0x0c, 0x8f, 0xf1, 0xb5, 0xbf, 0x26, 0x17, 0xd3, 0x8b, 0xc9, 0xf5, 0xd5, 0xd9,
|
||||
0x47, 0xbf, 0x83, 0x36, 0xc1, 0x3d, 0xff, 0x3c, 0x9e, 0x5c, 0x8e, 0xf0, 0x71, 0xe8, 0xdb, 0x27,
|
||||
0x18, 0x5a, 0x5f, 0xb2, 0x6f, 0x47, 0xf3, 0x84, 0x2f, 0xc4, 0x6c, 0x18, 0xe7, 0x6c, 0xaf, 0xe9,
|
||||
0xee, 0xe9, 0x6e, 0xc4, 0xf2, 0x1b, 0x9a, 0xee, 0xcd, 0xf3, 0x77, 0x49, 0x1e, 0x35, 0xdd, 0x48,
|
||||
0x77, 0xff, 0x06, 0x00, 0x00, 0xff, 0xff, 0x45, 0x21, 0x7f, 0x64, 0x2b, 0x05, 0x00, 0x00,
|
||||
var fileDescriptor_6039342a2ba47b72 = []byte{
|
||||
// 665 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x54, 0xcd, 0x6e, 0xd3, 0x4c,
|
||||
0x14, 0xfd, 0xdc, 0x38, 0x3f, 0xbe, 0x69, 0x3f, 0xa2, 0x51, 0x17, 0x56, 0xa1, 0x24, 0x78, 0x55,
|
||||
0x58, 0x38, 0xa2, 0x6a, 0x05, 0x2a, 0xb0, 0x68, 0x4b, 0x48, 0x91, 0x48, 0x5b, 0x26, 0xc9, 0xa2,
|
||||
0xb0, 0x88, 0x1c, 0x77, 0x70, 0x2c, 0x3c, 0xb1, 0xb1, 0x67, 0x2a, 0xb2, 0x66, 0xc1, 0x16, 0x5e,
|
||||
0x81, 0x17, 0x05, 0xcd, 0x8f, 0x6d, 0x2a, 0xb9, 0x95, 0x40, 0xec, 0x66, 0xee, 0x3d, 0xe7, 0xfa,
|
||||
0xcc, 0xf8, 0x9c, 0x81, 0x0d, 0x4a, 0x58, 0x1a, 0xfa, 0x99, 0x9b, 0xa4, 0x31, 0x8b, 0xd1, 0x66,
|
||||
0x18, 0x8b, 0x15, 0x25, 0x6c, 0x41, 0x78, 0xe6, 0xfa, 0x51, 0x48, 0x96, 0x6c, 0xab, 0x1b, 0xc4,
|
||||
0x71, 0x10, 0x91, 0xbe, 0xc4, 0xcc, 0xf9, 0x87, 0x3e, 0x0b, 0x29, 0xc9, 0x98, 0x47, 0x13, 0x45,
|
||||
0x73, 0xf6, 0xc1, 0x7a, 0xe3, 0xcd, 0x49, 0x74, 0xee, 0x85, 0x29, 0x42, 0x60, 0x2e, 0x3d, 0x4a,
|
||||
0x6c, 0xa3, 0x67, 0xec, 0x58, 0x58, 0xae, 0xd1, 0x26, 0xd4, 0xaf, 0xbc, 0x88, 0x13, 0x7b, 0x4d,
|
||||
0x16, 0xd5, 0xc6, 0xd9, 0x86, 0xfa, 0xd0, 0xe3, 0xc1, 0x6f, 0x6d, 0xc1, 0x31, 0xf2, 0xf6, 0x7b,
|
||||
0x68, 0x1e, 0xc7, 0x7c, 0xc9, 0x48, 0x5a, 0x0d, 0x40, 0x07, 0xd0, 0x22, 0x9f, 0x09, 0x4d, 0x22,
|
||||
0x2f, 0x95, 0x83, 0xdb, 0xbb, 0xf7, 0xdd, 0xaa, 0x03, 0xb8, 0x03, 0x8d, 0xc2, 0x05, 0xde, 0x79,
|
||||
0x0e, 0xad, 0xb7, 0xdc, 0x5b, 0xb2, 0x30, 0x22, 0x68, 0x0b, 0x5a, 0x9f, 0xf4, 0x5a, 0x7f, 0xa0,
|
||||
0xd8, 0x5f, 0x57, 0x5e, 0x48, 0xfb, 0x6a, 0x40, 0x73, 0xcc, 0x29, 0xf5, 0xd2, 0x15, 0x7a, 0x00,
|
||||
0xeb, 0x99, 0x47, 0x93, 0x88, 0xcc, 0x7c, 0xa1, 0x56, 0x4e, 0x30, 0x71, 0x5b, 0xd5, 0xe4, 0x01,
|
||||
0xd0, 0x36, 0x80, 0x86, 0x64, 0x9c, 0xea, 0x49, 0x96, 0xaa, 0x8c, 0x39, 0x15, 0xe7, 0x28, 0xbe,
|
||||
0x5f, 0xeb, 0xd5, 0x6e, 0x3e, 0x47, 0xae, 0xb8, 0xd4, 0xe7, 0x74, 0xa1, 0x39, 0x5d, 0xb2, 0x55,
|
||||
0x42, 0x2e, 0x6f, 0xb8, 0xc5, 0x2f, 0x06, 0x58, 0x27, 0x61, 0xc6, 0xe2, 0x20, 0xf5, 0xe8, 0x3f,
|
||||
0x10, 0xbb, 0x07, 0x8d, 0x39, 0xf7, 0x3f, 0x12, 0xa6, 0xa5, 0xde, 0xab, 0x96, 0x7a, 0x24, 0x31,
|
||||
0x58, 0x63, 0x9d, 0x6f, 0x06, 0x34, 0x54, 0x09, 0x3d, 0x84, 0x8e, 0xcf, 0x29, 0x8f, 0x3c, 0x16,
|
||||
0x5e, 0x5d, 0x97, 0x71, 0xa7, 0xac, 0x2b, 0x29, 0x5d, 0x68, 0xf3, 0x24, 0x21, 0xe9, 0x6c, 0x1e,
|
||||
0xf3, 0xe5, 0xa5, 0xd6, 0x02, 0xb2, 0x74, 0x24, 0x2a, 0xd7, 0x1c, 0x50, 0xfb, 0x43, 0x07, 0x7c,
|
||||
0x37, 0xa0, 0x95, 0x97, 0xd1, 0x3e, 0xd4, 0x23, 0xe1, 0x60, 0xdb, 0x90, 0x87, 0xea, 0x56, 0x4f,
|
||||
0x29, 0x4c, 0x8e, 0x15, 0xba, 0xda, 0x1d, 0xe8, 0x29, 0x58, 0x45, 0x42, 0xb4, 0xac, 0x2d, 0x57,
|
||||
0x65, 0xc8, 0xcd, 0x33, 0xe4, 0x4e, 0x72, 0x04, 0x2e, 0xc1, 0xce, 0xcf, 0x35, 0x68, 0x8c, 0x64,
|
||||
0x22, 0xff, 0x56, 0xd1, 0x63, 0xa8, 0x07, 0x22, 0x53, 0x3a, 0x10, 0x77, 0xab, 0x69, 0x32, 0x76,
|
||||
0x58, 0x21, 0xd1, 0x13, 0x68, 0xfa, 0x2a, 0x67, 0x5a, 0xec, 0x76, 0x35, 0x49, 0x87, 0x11, 0xe7,
|
||||
0x68, 0x41, 0xcc, 0x54, 0x08, 0x6c, 0xf3, 0x36, 0xa2, 0x4e, 0x0a, 0xce, 0xd1, 0x82, 0xc8, 0x95,
|
||||
0x69, 0xed, 0xfa, 0x6d, 0x44, 0xed, 0x6c, 0x9c, 0xa3, 0xd1, 0x0b, 0xb0, 0x16, 0xb9, 0x97, 0xed,
|
||||
0xa6, 0xa4, 0xde, 0x70, 0x31, 0x85, 0xe5, 0x71, 0xc9, 0x10, 0xee, 0x2f, 0xee, 0x7a, 0x46, 0x33,
|
||||
0xbb, 0xd1, 0x33, 0x76, 0x6a, 0xb8, 0x5d, 0xd4, 0x46, 0x99, 0xf3, 0xc3, 0x80, 0x75, 0xf5, 0x07,
|
||||
0x5e, 0x79, 0x34, 0x8c, 0x56, 0x95, 0xcf, 0x19, 0x02, 0x73, 0x41, 0xa2, 0x44, 0xbf, 0x66, 0x72,
|
||||
0x8d, 0xf6, 0xc0, 0x14, 0x1a, 0xe5, 0x15, 0xfe, 0xbf, 0xdb, 0xab, 0x56, 0xa5, 0x26, 0x4f, 0x56,
|
||||
0x09, 0xc1, 0x12, 0x2d, 0xd2, 0xa4, 0x5e, 0x60, 0xdb, 0xbc, 0x2d, 0x4d, 0x8a, 0x87, 0x35, 0xf6,
|
||||
0xd1, 0x08, 0xa0, 0x9c, 0x84, 0xda, 0xd0, 0x3c, 0x3e, 0x9b, 0x9e, 0x4e, 0x06, 0xb8, 0xf3, 0x1f,
|
||||
0xb2, 0xa0, 0x3e, 0x3c, 0x9c, 0x0e, 0x07, 0x1d, 0x43, 0xd4, 0xc7, 0xd3, 0xd1, 0xe8, 0x10, 0x5f,
|
||||
0x74, 0xd6, 0xc4, 0x66, 0x7a, 0x3a, 0xb9, 0x38, 0x1f, 0xbc, 0xec, 0xd4, 0xd0, 0x06, 0x58, 0x27,
|
||||
0xaf, 0xc7, 0x93, 0xb3, 0x21, 0x3e, 0x1c, 0x75, 0xcc, 0x23, 0x0c, 0x95, 0xef, 0xfe, 0xbb, 0x83,
|
||||
0x20, 0x64, 0x0b, 0x3e, 0x77, 0xfd, 0x98, 0xf6, 0xcb, 0x6e, 0x5f, 0x75, 0x67, 0x34, 0xbe, 0x24,
|
||||
0x51, 0x3f, 0x88, 0x9f, 0x85, 0xf1, 0xac, 0xec, 0xce, 0x54, 0xf7, 0x57, 0x00, 0x00, 0x00, 0xff,
|
||||
0xff, 0xd0, 0x84, 0x91, 0x73, 0x59, 0x06, 0x00, 0x00,
|
||||
}
|
||||
|
2
vendor/github.com/prometheus/common/expfmt/decode.go
generated
vendored
2
vendor/github.com/prometheus/common/expfmt/decode.go
generated
vendored
@ -164,7 +164,7 @@ func (sd *SampleDecoder) Decode(s *model.Vector) error {
|
||||
}
|
||||
|
||||
// ExtractSamples builds a slice of samples from the provided metric
|
||||
// families. If an error occurrs during sample extraction, it continues to
|
||||
// families. If an error occurs during sample extraction, it continues to
|
||||
// extract from the remaining metric families. The returned error is the last
|
||||
// error that has occurred.
|
||||
func ExtractSamples(o *DecodeOptions, fams ...*dto.MetricFamily) (model.Vector, error) {
|
||||
|
128
vendor/github.com/prometheus/common/expfmt/encode.go
generated
vendored
128
vendor/github.com/prometheus/common/expfmt/encode.go
generated
vendored
@ -30,17 +30,38 @@ type Encoder interface {
|
||||
Encode(*dto.MetricFamily) error
|
||||
}
|
||||
|
||||
type encoder func(*dto.MetricFamily) error
|
||||
|
||||
func (e encoder) Encode(v *dto.MetricFamily) error {
|
||||
return e(v)
|
||||
// Closer is implemented by Encoders that need to be closed to finalize
|
||||
// encoding. (For example, OpenMetrics needs a final `# EOF` line.)
|
||||
//
|
||||
// Note that all Encoder implementations returned from this package implement
|
||||
// Closer, too, even if the Close call is a no-op. This happens in preparation
|
||||
// for adding a Close method to the Encoder interface directly in a (mildly
|
||||
// breaking) release in the future.
|
||||
type Closer interface {
|
||||
Close() error
|
||||
}
|
||||
|
||||
// Negotiate returns the Content-Type based on the given Accept header.
|
||||
// If no appropriate accepted type is found, FmtText is returned.
|
||||
type encoderCloser struct {
|
||||
encode func(*dto.MetricFamily) error
|
||||
close func() error
|
||||
}
|
||||
|
||||
func (ec encoderCloser) Encode(v *dto.MetricFamily) error {
|
||||
return ec.encode(v)
|
||||
}
|
||||
|
||||
func (ec encoderCloser) Close() error {
|
||||
return ec.close()
|
||||
}
|
||||
|
||||
// Negotiate returns the Content-Type based on the given Accept header. If no
|
||||
// appropriate accepted type is found, FmtText is returned (which is the
|
||||
// Prometheus text format). This function will never negotiate FmtOpenMetrics,
|
||||
// as the support is still experimental. To include the option to negotiate
|
||||
// FmtOpenMetrics, use NegotiateOpenMetrics.
|
||||
func Negotiate(h http.Header) Format {
|
||||
for _, ac := range goautoneg.ParseAccept(h.Get(hdrAccept)) {
|
||||
// Check for protocol buffer
|
||||
ver := ac.Params["version"]
|
||||
if ac.Type+"/"+ac.SubType == ProtoType && ac.Params["proto"] == ProtoProtocol {
|
||||
switch ac.Params["encoding"] {
|
||||
case "delimited":
|
||||
@ -51,8 +72,6 @@ func Negotiate(h http.Header) Format {
|
||||
return FmtProtoCompact
|
||||
}
|
||||
}
|
||||
// Check for text format.
|
||||
ver := ac.Params["version"]
|
||||
if ac.Type == "text" && ac.SubType == "plain" && (ver == TextVersion || ver == "") {
|
||||
return FmtText
|
||||
}
|
||||
@ -60,29 +79,84 @@ func Negotiate(h http.Header) Format {
|
||||
return FmtText
|
||||
}
|
||||
|
||||
// NewEncoder returns a new encoder based on content type negotiation.
|
||||
// NegotiateIncludingOpenMetrics works like Negotiate but includes
|
||||
// FmtOpenMetrics as an option for the result. Note that this function is
|
||||
// temporary and will disappear once FmtOpenMetrics is fully supported and as
|
||||
// such may be negotiated by the normal Negotiate function.
|
||||
func NegotiateIncludingOpenMetrics(h http.Header) Format {
|
||||
for _, ac := range goautoneg.ParseAccept(h.Get(hdrAccept)) {
|
||||
ver := ac.Params["version"]
|
||||
if ac.Type+"/"+ac.SubType == ProtoType && ac.Params["proto"] == ProtoProtocol {
|
||||
switch ac.Params["encoding"] {
|
||||
case "delimited":
|
||||
return FmtProtoDelim
|
||||
case "text":
|
||||
return FmtProtoText
|
||||
case "compact-text":
|
||||
return FmtProtoCompact
|
||||
}
|
||||
}
|
||||
if ac.Type == "text" && ac.SubType == "plain" && (ver == TextVersion || ver == "") {
|
||||
return FmtText
|
||||
}
|
||||
if ac.Type+"/"+ac.SubType == OpenMetricsType && (ver == OpenMetricsVersion || ver == "") {
|
||||
return FmtOpenMetrics
|
||||
}
|
||||
}
|
||||
return FmtText
|
||||
}
|
||||
|
||||
// NewEncoder returns a new encoder based on content type negotiation. All
|
||||
// Encoder implementations returned by NewEncoder also implement Closer, and
|
||||
// callers should always call the Close method. It is currently only required
|
||||
// for FmtOpenMetrics, but a future (breaking) release will add the Close method
|
||||
// to the Encoder interface directly. The current version of the Encoder
|
||||
// interface is kept for backwards compatibility.
|
||||
func NewEncoder(w io.Writer, format Format) Encoder {
|
||||
switch format {
|
||||
case FmtProtoDelim:
|
||||
return encoder(func(v *dto.MetricFamily) error {
|
||||
_, err := pbutil.WriteDelimited(w, v)
|
||||
return err
|
||||
})
|
||||
return encoderCloser{
|
||||
encode: func(v *dto.MetricFamily) error {
|
||||
_, err := pbutil.WriteDelimited(w, v)
|
||||
return err
|
||||
},
|
||||
close: func() error { return nil },
|
||||
}
|
||||
case FmtProtoCompact:
|
||||
return encoder(func(v *dto.MetricFamily) error {
|
||||
_, err := fmt.Fprintln(w, v.String())
|
||||
return err
|
||||
})
|
||||
return encoderCloser{
|
||||
encode: func(v *dto.MetricFamily) error {
|
||||
_, err := fmt.Fprintln(w, v.String())
|
||||
return err
|
||||
},
|
||||
close: func() error { return nil },
|
||||
}
|
||||
case FmtProtoText:
|
||||
return encoder(func(v *dto.MetricFamily) error {
|
||||
_, err := fmt.Fprintln(w, proto.MarshalTextString(v))
|
||||
return err
|
||||
})
|
||||
return encoderCloser{
|
||||
encode: func(v *dto.MetricFamily) error {
|
||||
_, err := fmt.Fprintln(w, proto.MarshalTextString(v))
|
||||
return err
|
||||
},
|
||||
close: func() error { return nil },
|
||||
}
|
||||
case FmtText:
|
||||
return encoder(func(v *dto.MetricFamily) error {
|
||||
_, err := MetricFamilyToText(w, v)
|
||||
return err
|
||||
})
|
||||
return encoderCloser{
|
||||
encode: func(v *dto.MetricFamily) error {
|
||||
_, err := MetricFamilyToText(w, v)
|
||||
return err
|
||||
},
|
||||
close: func() error { return nil },
|
||||
}
|
||||
case FmtOpenMetrics:
|
||||
return encoderCloser{
|
||||
encode: func(v *dto.MetricFamily) error {
|
||||
_, err := MetricFamilyToOpenMetrics(w, v)
|
||||
return err
|
||||
},
|
||||
close: func() error {
|
||||
_, err := FinalizeOpenMetrics(w)
|
||||
return err
|
||||
},
|
||||
}
|
||||
}
|
||||
panic("expfmt.NewEncoder: unknown format")
|
||||
panic(fmt.Errorf("expfmt.NewEncoder: unknown format %q", format))
|
||||
}
|
||||
|
11
vendor/github.com/prometheus/common/expfmt/expfmt.go
generated
vendored
11
vendor/github.com/prometheus/common/expfmt/expfmt.go
generated
vendored
@ -19,10 +19,12 @@ type Format string
|
||||
|
||||
// Constants to assemble the Content-Type values for the different wire protocols.
|
||||
const (
|
||||
TextVersion = "0.0.4"
|
||||
ProtoType = `application/vnd.google.protobuf`
|
||||
ProtoProtocol = `io.prometheus.client.MetricFamily`
|
||||
ProtoFmt = ProtoType + "; proto=" + ProtoProtocol + ";"
|
||||
TextVersion = "0.0.4"
|
||||
ProtoType = `application/vnd.google.protobuf`
|
||||
ProtoProtocol = `io.prometheus.client.MetricFamily`
|
||||
ProtoFmt = ProtoType + "; proto=" + ProtoProtocol + ";"
|
||||
OpenMetricsType = `application/openmetrics-text`
|
||||
OpenMetricsVersion = "0.0.1"
|
||||
|
||||
// The Content-Type values for the different wire protocols.
|
||||
FmtUnknown Format = `<unknown>`
|
||||
@ -30,6 +32,7 @@ const (
|
||||
FmtProtoDelim Format = ProtoFmt + ` encoding=delimited`
|
||||
FmtProtoText Format = ProtoFmt + ` encoding=text`
|
||||
FmtProtoCompact Format = ProtoFmt + ` encoding=compact-text`
|
||||
FmtOpenMetrics Format = OpenMetricsType + `; version=` + OpenMetricsVersion + `; charset=utf-8`
|
||||
)
|
||||
|
||||
const (
|
||||
|
527
vendor/github.com/prometheus/common/expfmt/openmetrics_create.go
generated
vendored
Normal file
527
vendor/github.com/prometheus/common/expfmt/openmetrics_create.go
generated
vendored
Normal file
@ -0,0 +1,527 @@
|
||||
// Copyright 2020 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package expfmt
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/golang/protobuf/ptypes"
|
||||
"github.com/prometheus/common/model"
|
||||
|
||||
dto "github.com/prometheus/client_model/go"
|
||||
)
|
||||
|
||||
// MetricFamilyToOpenMetrics converts a MetricFamily proto message into the
|
||||
// OpenMetrics text format and writes the resulting lines to 'out'. It returns
|
||||
// the number of bytes written and any error encountered. The output will have
|
||||
// the same order as the input, no further sorting is performed. Furthermore,
|
||||
// this function assumes the input is already sanitized and does not perform any
|
||||
// sanity checks. If the input contains duplicate metrics or invalid metric or
|
||||
// label names, the conversion will result in invalid text format output.
|
||||
//
|
||||
// This function fulfills the type 'expfmt.encoder'.
|
||||
//
|
||||
// Note that OpenMetrics requires a final `# EOF` line. Since this function acts
|
||||
// on individual metric families, it is the responsibility of the caller to
|
||||
// append this line to 'out' once all metric families have been written.
|
||||
// Conveniently, this can be done by calling FinalizeOpenMetrics.
|
||||
//
|
||||
// The output should be fully OpenMetrics compliant. However, there are a few
|
||||
// missing features and peculiarities to avoid complications when switching from
|
||||
// Prometheus to OpenMetrics or vice versa:
|
||||
//
|
||||
// - Counters are expected to have the `_total` suffix in their metric name. In
|
||||
// the output, the suffix will be truncated from the `# TYPE` and `# HELP`
|
||||
// line. A counter with a missing `_total` suffix is not an error. However,
|
||||
// its type will be set to `unknown` in that case to avoid invalid OpenMetrics
|
||||
// output.
|
||||
//
|
||||
// - No support for the following (optional) features: `# UNIT` line, `_created`
|
||||
// line, info type, stateset type, gaugehistogram type.
|
||||
//
|
||||
// - The size of exemplar labels is not checked (i.e. it's possible to create
|
||||
// exemplars that are larger than allowed by the OpenMetrics specification).
|
||||
//
|
||||
// - The value of Counters is not checked. (OpenMetrics doesn't allow counters
|
||||
// with a `NaN` value.)
|
||||
func MetricFamilyToOpenMetrics(out io.Writer, in *dto.MetricFamily) (written int, err error) {
|
||||
name := in.GetName()
|
||||
if name == "" {
|
||||
return 0, fmt.Errorf("MetricFamily has no name: %s", in)
|
||||
}
|
||||
|
||||
// Try the interface upgrade. If it doesn't work, we'll use a
|
||||
// bufio.Writer from the sync.Pool.
|
||||
w, ok := out.(enhancedWriter)
|
||||
if !ok {
|
||||
b := bufPool.Get().(*bufio.Writer)
|
||||
b.Reset(out)
|
||||
w = b
|
||||
defer func() {
|
||||
bErr := b.Flush()
|
||||
if err == nil {
|
||||
err = bErr
|
||||
}
|
||||
bufPool.Put(b)
|
||||
}()
|
||||
}
|
||||
|
||||
var (
|
||||
n int
|
||||
metricType = in.GetType()
|
||||
shortName = name
|
||||
)
|
||||
if metricType == dto.MetricType_COUNTER && strings.HasSuffix(shortName, "_total") {
|
||||
shortName = name[:len(name)-6]
|
||||
}
|
||||
|
||||
// Comments, first HELP, then TYPE.
|
||||
if in.Help != nil {
|
||||
n, err = w.WriteString("# HELP ")
|
||||
written += n
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
n, err = w.WriteString(shortName)
|
||||
written += n
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = w.WriteByte(' ')
|
||||
written++
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
n, err = writeEscapedString(w, *in.Help, true)
|
||||
written += n
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = w.WriteByte('\n')
|
||||
written++
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
n, err = w.WriteString("# TYPE ")
|
||||
written += n
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
n, err = w.WriteString(shortName)
|
||||
written += n
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
switch metricType {
|
||||
case dto.MetricType_COUNTER:
|
||||
if strings.HasSuffix(name, "_total") {
|
||||
n, err = w.WriteString(" counter\n")
|
||||
} else {
|
||||
n, err = w.WriteString(" unknown\n")
|
||||
}
|
||||
case dto.MetricType_GAUGE:
|
||||
n, err = w.WriteString(" gauge\n")
|
||||
case dto.MetricType_SUMMARY:
|
||||
n, err = w.WriteString(" summary\n")
|
||||
case dto.MetricType_UNTYPED:
|
||||
n, err = w.WriteString(" unknown\n")
|
||||
case dto.MetricType_HISTOGRAM:
|
||||
n, err = w.WriteString(" histogram\n")
|
||||
default:
|
||||
return written, fmt.Errorf("unknown metric type %s", metricType.String())
|
||||
}
|
||||
written += n
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
// Finally the samples, one line for each.
|
||||
for _, metric := range in.Metric {
|
||||
switch metricType {
|
||||
case dto.MetricType_COUNTER:
|
||||
if metric.Counter == nil {
|
||||
return written, fmt.Errorf(
|
||||
"expected counter in metric %s %s", name, metric,
|
||||
)
|
||||
}
|
||||
// Note that we have ensured above that either the name
|
||||
// ends on `_total` or that the rendered type is
|
||||
// `unknown`. Therefore, no `_total` must be added here.
|
||||
n, err = writeOpenMetricsSample(
|
||||
w, name, "", metric, "", 0,
|
||||
metric.Counter.GetValue(), 0, false,
|
||||
metric.Counter.Exemplar,
|
||||
)
|
||||
case dto.MetricType_GAUGE:
|
||||
if metric.Gauge == nil {
|
||||
return written, fmt.Errorf(
|
||||
"expected gauge in metric %s %s", name, metric,
|
||||
)
|
||||
}
|
||||
n, err = writeOpenMetricsSample(
|
||||
w, name, "", metric, "", 0,
|
||||
metric.Gauge.GetValue(), 0, false,
|
||||
nil,
|
||||
)
|
||||
case dto.MetricType_UNTYPED:
|
||||
if metric.Untyped == nil {
|
||||
return written, fmt.Errorf(
|
||||
"expected untyped in metric %s %s", name, metric,
|
||||
)
|
||||
}
|
||||
n, err = writeOpenMetricsSample(
|
||||
w, name, "", metric, "", 0,
|
||||
metric.Untyped.GetValue(), 0, false,
|
||||
nil,
|
||||
)
|
||||
case dto.MetricType_SUMMARY:
|
||||
if metric.Summary == nil {
|
||||
return written, fmt.Errorf(
|
||||
"expected summary in metric %s %s", name, metric,
|
||||
)
|
||||
}
|
||||
for _, q := range metric.Summary.Quantile {
|
||||
n, err = writeOpenMetricsSample(
|
||||
w, name, "", metric,
|
||||
model.QuantileLabel, q.GetQuantile(),
|
||||
q.GetValue(), 0, false,
|
||||
nil,
|
||||
)
|
||||
written += n
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
n, err = writeOpenMetricsSample(
|
||||
w, name, "_sum", metric, "", 0,
|
||||
metric.Summary.GetSampleSum(), 0, false,
|
||||
nil,
|
||||
)
|
||||
written += n
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
n, err = writeOpenMetricsSample(
|
||||
w, name, "_count", metric, "", 0,
|
||||
0, metric.Summary.GetSampleCount(), true,
|
||||
nil,
|
||||
)
|
||||
case dto.MetricType_HISTOGRAM:
|
||||
if metric.Histogram == nil {
|
||||
return written, fmt.Errorf(
|
||||
"expected histogram in metric %s %s", name, metric,
|
||||
)
|
||||
}
|
||||
infSeen := false
|
||||
for _, b := range metric.Histogram.Bucket {
|
||||
n, err = writeOpenMetricsSample(
|
||||
w, name, "_bucket", metric,
|
||||
model.BucketLabel, b.GetUpperBound(),
|
||||
0, b.GetCumulativeCount(), true,
|
||||
b.Exemplar,
|
||||
)
|
||||
written += n
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if math.IsInf(b.GetUpperBound(), +1) {
|
||||
infSeen = true
|
||||
}
|
||||
}
|
||||
if !infSeen {
|
||||
n, err = writeOpenMetricsSample(
|
||||
w, name, "_bucket", metric,
|
||||
model.BucketLabel, math.Inf(+1),
|
||||
0, metric.Histogram.GetSampleCount(), true,
|
||||
nil,
|
||||
)
|
||||
written += n
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
n, err = writeOpenMetricsSample(
|
||||
w, name, "_sum", metric, "", 0,
|
||||
metric.Histogram.GetSampleSum(), 0, false,
|
||||
nil,
|
||||
)
|
||||
written += n
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
n, err = writeOpenMetricsSample(
|
||||
w, name, "_count", metric, "", 0,
|
||||
0, metric.Histogram.GetSampleCount(), true,
|
||||
nil,
|
||||
)
|
||||
default:
|
||||
return written, fmt.Errorf(
|
||||
"unexpected type in metric %s %s", name, metric,
|
||||
)
|
||||
}
|
||||
written += n
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// FinalizeOpenMetrics writes the final `# EOF\n` line required by OpenMetrics.
|
||||
func FinalizeOpenMetrics(w io.Writer) (written int, err error) {
|
||||
return w.Write([]byte("# EOF\n"))
|
||||
}
|
||||
|
||||
// writeOpenMetricsSample writes a single sample in OpenMetrics text format to
|
||||
// w, given the metric name, the metric proto message itself, optionally an
|
||||
// additional label name with a float64 value (use empty string as label name if
|
||||
// not required), the value (optionally as float64 or uint64, determined by
|
||||
// useIntValue), and optionally an exemplar (use nil if not required). The
|
||||
// function returns the number of bytes written and any error encountered.
|
||||
func writeOpenMetricsSample(
|
||||
w enhancedWriter,
|
||||
name, suffix string,
|
||||
metric *dto.Metric,
|
||||
additionalLabelName string, additionalLabelValue float64,
|
||||
floatValue float64, intValue uint64, useIntValue bool,
|
||||
exemplar *dto.Exemplar,
|
||||
) (int, error) {
|
||||
var written int
|
||||
n, err := w.WriteString(name)
|
||||
written += n
|
||||
if err != nil {
|
||||
return written, err
|
||||
}
|
||||
if suffix != "" {
|
||||
n, err = w.WriteString(suffix)
|
||||
written += n
|
||||
if err != nil {
|
||||
return written, err
|
||||
}
|
||||
}
|
||||
n, err = writeOpenMetricsLabelPairs(
|
||||
w, metric.Label, additionalLabelName, additionalLabelValue,
|
||||
)
|
||||
written += n
|
||||
if err != nil {
|
||||
return written, err
|
||||
}
|
||||
err = w.WriteByte(' ')
|
||||
written++
|
||||
if err != nil {
|
||||
return written, err
|
||||
}
|
||||
if useIntValue {
|
||||
n, err = writeUint(w, intValue)
|
||||
} else {
|
||||
n, err = writeOpenMetricsFloat(w, floatValue)
|
||||
}
|
||||
written += n
|
||||
if err != nil {
|
||||
return written, err
|
||||
}
|
||||
if metric.TimestampMs != nil {
|
||||
err = w.WriteByte(' ')
|
||||
written++
|
||||
if err != nil {
|
||||
return written, err
|
||||
}
|
||||
// TODO(beorn7): Format this directly without converting to a float first.
|
||||
n, err = writeOpenMetricsFloat(w, float64(*metric.TimestampMs)/1000)
|
||||
written += n
|
||||
if err != nil {
|
||||
return written, err
|
||||
}
|
||||
}
|
||||
if exemplar != nil {
|
||||
n, err = writeExemplar(w, exemplar)
|
||||
written += n
|
||||
if err != nil {
|
||||
return written, err
|
||||
}
|
||||
}
|
||||
err = w.WriteByte('\n')
|
||||
written++
|
||||
if err != nil {
|
||||
return written, err
|
||||
}
|
||||
return written, nil
|
||||
}
|
||||
|
||||
// writeOpenMetricsLabelPairs works like writeOpenMetrics but formats the float
|
||||
// in OpenMetrics style.
|
||||
func writeOpenMetricsLabelPairs(
|
||||
w enhancedWriter,
|
||||
in []*dto.LabelPair,
|
||||
additionalLabelName string, additionalLabelValue float64,
|
||||
) (int, error) {
|
||||
if len(in) == 0 && additionalLabelName == "" {
|
||||
return 0, nil
|
||||
}
|
||||
var (
|
||||
written int
|
||||
separator byte = '{'
|
||||
)
|
||||
for _, lp := range in {
|
||||
err := w.WriteByte(separator)
|
||||
written++
|
||||
if err != nil {
|
||||
return written, err
|
||||
}
|
||||
n, err := w.WriteString(lp.GetName())
|
||||
written += n
|
||||
if err != nil {
|
||||
return written, err
|
||||
}
|
||||
n, err = w.WriteString(`="`)
|
||||
written += n
|
||||
if err != nil {
|
||||
return written, err
|
||||
}
|
||||
n, err = writeEscapedString(w, lp.GetValue(), true)
|
||||
written += n
|
||||
if err != nil {
|
||||
return written, err
|
||||
}
|
||||
err = w.WriteByte('"')
|
||||
written++
|
||||
if err != nil {
|
||||
return written, err
|
||||
}
|
||||
separator = ','
|
||||
}
|
||||
if additionalLabelName != "" {
|
||||
err := w.WriteByte(separator)
|
||||
written++
|
||||
if err != nil {
|
||||
return written, err
|
||||
}
|
||||
n, err := w.WriteString(additionalLabelName)
|
||||
written += n
|
||||
if err != nil {
|
||||
return written, err
|
||||
}
|
||||
n, err = w.WriteString(`="`)
|
||||
written += n
|
||||
if err != nil {
|
||||
return written, err
|
||||
}
|
||||
n, err = writeOpenMetricsFloat(w, additionalLabelValue)
|
||||
written += n
|
||||
if err != nil {
|
||||
return written, err
|
||||
}
|
||||
err = w.WriteByte('"')
|
||||
written++
|
||||
if err != nil {
|
||||
return written, err
|
||||
}
|
||||
}
|
||||
err := w.WriteByte('}')
|
||||
written++
|
||||
if err != nil {
|
||||
return written, err
|
||||
}
|
||||
return written, nil
|
||||
}
|
||||
|
||||
// writeExemplar writes the provided exemplar in OpenMetrics format to w. The
|
||||
// function returns the number of bytes written and any error encountered.
|
||||
func writeExemplar(w enhancedWriter, e *dto.Exemplar) (int, error) {
|
||||
written := 0
|
||||
n, err := w.WriteString(" # ")
|
||||
written += n
|
||||
if err != nil {
|
||||
return written, err
|
||||
}
|
||||
n, err = writeOpenMetricsLabelPairs(w, e.Label, "", 0)
|
||||
written += n
|
||||
if err != nil {
|
||||
return written, err
|
||||
}
|
||||
err = w.WriteByte(' ')
|
||||
written++
|
||||
if err != nil {
|
||||
return written, err
|
||||
}
|
||||
n, err = writeOpenMetricsFloat(w, e.GetValue())
|
||||
written += n
|
||||
if err != nil {
|
||||
return written, err
|
||||
}
|
||||
if e.Timestamp != nil {
|
||||
err = w.WriteByte(' ')
|
||||
written++
|
||||
if err != nil {
|
||||
return written, err
|
||||
}
|
||||
ts, err := ptypes.Timestamp((*e).Timestamp)
|
||||
if err != nil {
|
||||
return written, err
|
||||
}
|
||||
// TODO(beorn7): Format this directly from components of ts to
|
||||
// avoid overflow/underflow and precision issues of the float
|
||||
// conversion.
|
||||
n, err = writeOpenMetricsFloat(w, float64(ts.UnixNano())/1e9)
|
||||
written += n
|
||||
if err != nil {
|
||||
return written, err
|
||||
}
|
||||
}
|
||||
return written, nil
|
||||
}
|
||||
|
||||
// writeOpenMetricsFloat works like writeFloat but appends ".0" if the resulting
|
||||
// number would otherwise contain neither a "." nor an "e".
|
||||
func writeOpenMetricsFloat(w enhancedWriter, f float64) (int, error) {
|
||||
switch {
|
||||
case f == 1:
|
||||
return w.WriteString("1.0")
|
||||
case f == 0:
|
||||
return w.WriteString("0.0")
|
||||
case f == -1:
|
||||
return w.WriteString("-1.0")
|
||||
case math.IsNaN(f):
|
||||
return w.WriteString("NaN")
|
||||
case math.IsInf(f, +1):
|
||||
return w.WriteString("+Inf")
|
||||
case math.IsInf(f, -1):
|
||||
return w.WriteString("-Inf")
|
||||
default:
|
||||
bp := numBufPool.Get().(*[]byte)
|
||||
*bp = strconv.AppendFloat((*bp)[:0], f, 'g', -1, 64)
|
||||
if !bytes.ContainsAny(*bp, "e.") {
|
||||
*bp = append(*bp, '.', '0')
|
||||
}
|
||||
written, err := w.Write(*bp)
|
||||
numBufPool.Put(bp)
|
||||
return written, err
|
||||
}
|
||||
}
|
||||
|
||||
// writeUint is like writeInt just for uint64.
|
||||
func writeUint(w enhancedWriter, u uint64) (int, error) {
|
||||
bp := numBufPool.Get().(*[]byte)
|
||||
*bp = strconv.AppendUint((*bp)[:0], u, 10)
|
||||
written, err := w.Write(*bp)
|
||||
numBufPool.Put(bp)
|
||||
return written, err
|
||||
}
|
3
vendor/github.com/prometheus/common/expfmt/text_create.go
generated
vendored
3
vendor/github.com/prometheus/common/expfmt/text_create.go
generated
vendored
@ -423,9 +423,8 @@ var (
|
||||
func writeEscapedString(w enhancedWriter, v string, includeDoubleQuote bool) (int, error) {
|
||||
if includeDoubleQuote {
|
||||
return quotedEscaper.WriteString(w, v)
|
||||
} else {
|
||||
return escaper.WriteString(w, v)
|
||||
}
|
||||
return escaper.WriteString(w, v)
|
||||
}
|
||||
|
||||
// writeFloat is equivalent to fmt.Fprint with a float64 argument but hardcodes
|
||||
|
2
vendor/github.com/prometheus/common/model/fnv.go
generated
vendored
2
vendor/github.com/prometheus/common/model/fnv.go
generated
vendored
@ -20,7 +20,7 @@ const (
|
||||
prime64 = 1099511628211
|
||||
)
|
||||
|
||||
// hashNew initializies a new fnv64a hash value.
|
||||
// hashNew initializes a new fnv64a hash value.
|
||||
func hashNew() uint64 {
|
||||
return offset64
|
||||
}
|
||||
|
100
vendor/github.com/prometheus/common/model/time.go
generated
vendored
100
vendor/github.com/prometheus/common/model/time.go
generated
vendored
@ -181,73 +181,77 @@ func (d *Duration) Type() string {
|
||||
return "duration"
|
||||
}
|
||||
|
||||
var durationRE = regexp.MustCompile("^([0-9]+)(y|w|d|h|m|s|ms)$")
|
||||
var durationRE = regexp.MustCompile("^(([0-9]+)y)?(([0-9]+)w)?(([0-9]+)d)?(([0-9]+)h)?(([0-9]+)m)?(([0-9]+)s)?(([0-9]+)ms)?$")
|
||||
|
||||
// ParseDuration parses a string into a time.Duration, assuming that a year
|
||||
// always has 365d, a week always has 7d, and a day always has 24h.
|
||||
func ParseDuration(durationStr string) (Duration, error) {
|
||||
switch durationStr {
|
||||
case "0":
|
||||
// Allow 0 without a unit.
|
||||
return 0, nil
|
||||
case "":
|
||||
return 0, fmt.Errorf("empty duration string")
|
||||
}
|
||||
matches := durationRE.FindStringSubmatch(durationStr)
|
||||
if len(matches) != 3 {
|
||||
if matches == nil {
|
||||
return 0, fmt.Errorf("not a valid duration string: %q", durationStr)
|
||||
}
|
||||
var (
|
||||
n, _ = strconv.Atoi(matches[1])
|
||||
dur = time.Duration(n) * time.Millisecond
|
||||
)
|
||||
switch unit := matches[2]; unit {
|
||||
case "y":
|
||||
dur *= 1000 * 60 * 60 * 24 * 365
|
||||
case "w":
|
||||
dur *= 1000 * 60 * 60 * 24 * 7
|
||||
case "d":
|
||||
dur *= 1000 * 60 * 60 * 24
|
||||
case "h":
|
||||
dur *= 1000 * 60 * 60
|
||||
case "m":
|
||||
dur *= 1000 * 60
|
||||
case "s":
|
||||
dur *= 1000
|
||||
case "ms":
|
||||
// Value already correct
|
||||
default:
|
||||
return 0, fmt.Errorf("invalid time unit in duration string: %q", unit)
|
||||
var dur time.Duration
|
||||
|
||||
// Parse the match at pos `pos` in the regex and use `mult` to turn that
|
||||
// into ms, then add that value to the total parsed duration.
|
||||
m := func(pos int, mult time.Duration) {
|
||||
if matches[pos] == "" {
|
||||
return
|
||||
}
|
||||
n, _ := strconv.Atoi(matches[pos])
|
||||
d := time.Duration(n) * time.Millisecond
|
||||
dur += d * mult
|
||||
}
|
||||
|
||||
m(2, 1000*60*60*24*365) // y
|
||||
m(4, 1000*60*60*24*7) // w
|
||||
m(6, 1000*60*60*24) // d
|
||||
m(8, 1000*60*60) // h
|
||||
m(10, 1000*60) // m
|
||||
m(12, 1000) // s
|
||||
m(14, 1) // ms
|
||||
|
||||
return Duration(dur), nil
|
||||
}
|
||||
|
||||
func (d Duration) String() string {
|
||||
var (
|
||||
ms = int64(time.Duration(d) / time.Millisecond)
|
||||
unit = "ms"
|
||||
ms = int64(time.Duration(d) / time.Millisecond)
|
||||
r = ""
|
||||
)
|
||||
if ms == 0 {
|
||||
return "0s"
|
||||
}
|
||||
factors := map[string]int64{
|
||||
"y": 1000 * 60 * 60 * 24 * 365,
|
||||
"w": 1000 * 60 * 60 * 24 * 7,
|
||||
"d": 1000 * 60 * 60 * 24,
|
||||
"h": 1000 * 60 * 60,
|
||||
"m": 1000 * 60,
|
||||
"s": 1000,
|
||||
"ms": 1,
|
||||
|
||||
f := func(unit string, mult int64, exact bool) {
|
||||
if exact && ms%mult != 0 {
|
||||
return
|
||||
}
|
||||
if v := ms / mult; v > 0 {
|
||||
r += fmt.Sprintf("%d%s", v, unit)
|
||||
ms -= v * mult
|
||||
}
|
||||
}
|
||||
|
||||
switch int64(0) {
|
||||
case ms % factors["y"]:
|
||||
unit = "y"
|
||||
case ms % factors["w"]:
|
||||
unit = "w"
|
||||
case ms % factors["d"]:
|
||||
unit = "d"
|
||||
case ms % factors["h"]:
|
||||
unit = "h"
|
||||
case ms % factors["m"]:
|
||||
unit = "m"
|
||||
case ms % factors["s"]:
|
||||
unit = "s"
|
||||
}
|
||||
return fmt.Sprintf("%v%v", ms/factors[unit], unit)
|
||||
// Only format years and weeks if the remainder is zero, as it is often
|
||||
// easier to read 90d than 12w6d.
|
||||
f("y", 1000*60*60*24*365, true)
|
||||
f("w", 1000*60*60*24*7, true)
|
||||
|
||||
f("d", 1000*60*60*24, false)
|
||||
f("h", 1000*60*60, false)
|
||||
f("m", 1000*60, false)
|
||||
f("s", 1000, false)
|
||||
f("ms", 1, false)
|
||||
|
||||
return r
|
||||
}
|
||||
|
||||
// MarshalYAML implements the yaml.Marshaler interface.
|
||||
|
6
vendor/github.com/prometheus/procfs/.golangci.yml
generated
vendored
6
vendor/github.com/prometheus/procfs/.golangci.yml
generated
vendored
@ -1,6 +1,4 @@
|
||||
# Run only staticcheck for now. Additional linters will be enabled one-by-one.
|
||||
---
|
||||
linters:
|
||||
enable:
|
||||
- staticcheck
|
||||
- govet
|
||||
disable-all: true
|
||||
- golint
|
||||
|
109
vendor/github.com/prometheus/procfs/CONTRIBUTING.md
generated
vendored
109
vendor/github.com/prometheus/procfs/CONTRIBUTING.md
generated
vendored
@ -2,17 +2,120 @@
|
||||
|
||||
Prometheus uses GitHub to manage reviews of pull requests.
|
||||
|
||||
* If you are a new contributor see: [Steps to Contribute](#steps-to-contribute)
|
||||
|
||||
* If you have a trivial fix or improvement, go ahead and create a pull request,
|
||||
addressing (with `@...`) the maintainer of this repository (see
|
||||
addressing (with `@...`) a suitable maintainer of this repository (see
|
||||
[MAINTAINERS.md](MAINTAINERS.md)) in the description of the pull request.
|
||||
|
||||
* If you plan to do something more involved, first discuss your ideas
|
||||
on our [mailing list](https://groups.google.com/forum/?fromgroups#!forum/prometheus-developers).
|
||||
This will avoid unnecessary work and surely give you and us a good deal
|
||||
of inspiration.
|
||||
of inspiration. Also please see our [non-goals issue](https://github.com/prometheus/docs/issues/149) on areas that the Prometheus community doesn't plan to work on.
|
||||
|
||||
* Relevant coding style guidelines are the [Go Code Review
|
||||
Comments](https://code.google.com/p/go-wiki/wiki/CodeReviewComments)
|
||||
and the _Formatting and style_ section of Peter Bourgon's [Go: Best
|
||||
Practices for Production
|
||||
Environments](http://peter.bourgon.org/go-in-production/#formatting-and-style).
|
||||
Environments](https://peter.bourgon.org/go-in-production/#formatting-and-style).
|
||||
|
||||
* Be sure to sign off on the [DCO](https://github.com/probot/dco#how-it-works)
|
||||
|
||||
## Steps to Contribute
|
||||
|
||||
Should you wish to work on an issue, please claim it first by commenting on the GitHub issue that you want to work on it. This is to prevent duplicated efforts from contributors on the same issue.
|
||||
|
||||
Please check the [`help-wanted`](https://github.com/prometheus/procfs/issues?q=is%3Aissue+is%3Aopen+label%3A%22help+wanted%22) label to find issues that are good for getting started. If you have questions about one of the issues, with or without the tag, please comment on them and one of the maintainers will clarify it. For a quicker response, contact us over [IRC](https://prometheus.io/community).
|
||||
|
||||
For quickly compiling and testing your changes do:
|
||||
```
|
||||
make test # Make sure all the tests pass before you commit and push :)
|
||||
```
|
||||
|
||||
We use [`golangci-lint`](https://github.com/golangci/golangci-lint) for linting the code. If it reports an issue and you think that the warning needs to be disregarded or is a false-positive, you can add a special comment `//nolint:linter1[,linter2,...]` before the offending line. Use this sparingly though, fixing the code to comply with the linter's recommendation is in general the preferred course of action.
|
||||
|
||||
## Pull Request Checklist
|
||||
|
||||
* Branch from the master branch and, if needed, rebase to the current master branch before submitting your pull request. If it doesn't merge cleanly with master you may be asked to rebase your changes.
|
||||
|
||||
* Commits should be as small as possible, while ensuring that each commit is correct independently (i.e., each commit should compile and pass tests).
|
||||
|
||||
* If your patch is not getting reviewed or you need a specific person to review it, you can @-reply a reviewer asking for a review in the pull request or a comment, or you can ask for a review on IRC channel [#prometheus](https://webchat.freenode.net/?channels=#prometheus) on irc.freenode.net (for the easiest start, [join via Riot](https://riot.im/app/#/room/#prometheus:matrix.org)).
|
||||
|
||||
* Add tests relevant to the fixed bug or new feature.
|
||||
|
||||
## Dependency management
|
||||
|
||||
The Prometheus project uses [Go modules](https://golang.org/cmd/go/#hdr-Modules__module_versions__and_more) to manage dependencies on external packages. This requires a working Go environment with version 1.12 or greater installed.
|
||||
|
||||
All dependencies are vendored in the `vendor/` directory.
|
||||
|
||||
To add or update a new dependency, use the `go get` command:
|
||||
|
||||
```bash
|
||||
# Pick the latest tagged release.
|
||||
go get example.com/some/module/pkg
|
||||
|
||||
# Pick a specific version.
|
||||
go get example.com/some/module/pkg@vX.Y.Z
|
||||
```
|
||||
|
||||
Tidy up the `go.mod` and `go.sum` files and copy the new/updated dependency to the `vendor/` directory:
|
||||
|
||||
|
||||
```bash
|
||||
# The GO111MODULE variable can be omitted when the code isn't located in GOPATH.
|
||||
GO111MODULE=on go mod tidy
|
||||
|
||||
GO111MODULE=on go mod vendor
|
||||
```
|
||||
|
||||
You have to commit the changes to `go.mod`, `go.sum` and the `vendor/` directory before submitting the pull request.
|
||||
|
||||
|
||||
## API Implementation Guidelines
|
||||
|
||||
### Naming and Documentation
|
||||
|
||||
Public functions and structs should normally be named according to the file(s) being read and parsed. For example,
|
||||
the `fs.BuddyInfo()` function reads the file `/proc/buddyinfo`. In addition, the godoc for each public function
|
||||
should contain the path to the file(s) being read and a URL of the linux kernel documentation describing the file(s).
|
||||
|
||||
### Reading vs. Parsing
|
||||
|
||||
Most functionality in this library consists of reading files and then parsing the text into structured data. In most
|
||||
cases reading and parsing should be separated into different functions/methods with a public `fs.Thing()` method and
|
||||
a private `parseThing(r Reader)` function. This provides a logical separation and allows parsing to be tested
|
||||
directly without the need to read from the filesystem. Using a `Reader` argument is preferred over other data types
|
||||
such as `string` or `*File` because it provides the most flexibility regarding the data source. When a set of files
|
||||
in a directory needs to be parsed, then a `path` string parameter to the parse function can be used instead.
|
||||
|
||||
### /proc and /sys filesystem I/O
|
||||
|
||||
The `proc` and `sys` filesystems are pseudo file systems and work a bit differently from standard disk I/O.
|
||||
Many of the files are changing continuously and the data being read can in some cases change between subsequent
|
||||
reads in the same file. Also, most of the files are relatively small (less than a few KBs), and system calls
|
||||
to the `stat` function will often return the wrong size. Therefore, for most files it's recommended to read the
|
||||
full file in a single operation using an internal utility function called `util.ReadFileNoStat`.
|
||||
This function is similar to `ioutil.ReadFile`, but it avoids the system call to `stat` to get the current size of
|
||||
the file.
|
||||
|
||||
Note that parsing the file's contents can still be performed one line at a time. This is done by first reading
|
||||
the full file, and then using a scanner on the `[]byte` or `string` containing the data.
|
||||
|
||||
```
|
||||
data, err := util.ReadFileNoStat("/proc/cpuinfo")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
reader := bytes.NewReader(data)
|
||||
scanner := bufio.NewScanner(reader)
|
||||
```
|
||||
|
||||
The `/sys` filesystem contains many very small files which contain only a single numeric or text value. These files
|
||||
can be read using an internal function called `util.SysReadFile` which is similar to `ioutil.ReadFile` but does
|
||||
not bother to check the size of the file before reading.
|
||||
```
|
||||
data, err := util.SysReadFile("/sys/class/power_supply/BAT0/capacity")
|
||||
```
|
||||
|
||||
|
43
vendor/github.com/prometheus/procfs/Makefile.common
generated
vendored
43
vendor/github.com/prometheus/procfs/Makefile.common
generated
vendored
@ -69,12 +69,21 @@ else
|
||||
GO_BUILD_PLATFORM ?= $(GOHOSTOS)-$(GOHOSTARCH)
|
||||
endif
|
||||
|
||||
PROMU_VERSION ?= 0.4.0
|
||||
GOTEST := $(GO) test
|
||||
GOTEST_DIR :=
|
||||
ifneq ($(CIRCLE_JOB),)
|
||||
ifneq ($(shell which gotestsum),)
|
||||
GOTEST_DIR := test-results
|
||||
GOTEST := gotestsum --junitfile $(GOTEST_DIR)/unit-tests.xml --
|
||||
endif
|
||||
endif
|
||||
|
||||
PROMU_VERSION ?= 0.5.0
|
||||
PROMU_URL := https://github.com/prometheus/promu/releases/download/v$(PROMU_VERSION)/promu-$(PROMU_VERSION).$(GO_BUILD_PLATFORM).tar.gz
|
||||
|
||||
GOLANGCI_LINT :=
|
||||
GOLANGCI_LINT_OPTS ?=
|
||||
GOLANGCI_LINT_VERSION ?= v1.16.0
|
||||
GOLANGCI_LINT_VERSION ?= v1.18.0
|
||||
# golangci-lint only supports linux, darwin and windows platforms on i386/amd64.
|
||||
# windows isn't included here because of the path separator being different.
|
||||
ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux darwin))
|
||||
@ -86,7 +95,8 @@ endif
|
||||
PREFIX ?= $(shell pwd)
|
||||
BIN_DIR ?= $(shell pwd)
|
||||
DOCKER_IMAGE_TAG ?= $(subst /,-,$(shell git rev-parse --abbrev-ref HEAD))
|
||||
DOCKERFILE_PATH ?= ./
|
||||
DOCKERFILE_PATH ?= ./Dockerfile
|
||||
DOCKERBUILD_CONTEXT ?= ./
|
||||
DOCKER_REPO ?= prom
|
||||
|
||||
DOCKER_ARCHS ?= amd64
|
||||
@ -140,15 +150,29 @@ else
|
||||
$(GO) get $(GOOPTS) -t ./...
|
||||
endif
|
||||
|
||||
.PHONY: update-go-deps
|
||||
update-go-deps:
|
||||
@echo ">> updating Go dependencies"
|
||||
@for m in $$($(GO) list -mod=readonly -m -f '{{ if and (not .Indirect) (not .Main)}}{{.Path}}{{end}}' all); do \
|
||||
$(GO) get $$m; \
|
||||
done
|
||||
GO111MODULE=$(GO111MODULE) $(GO) mod tidy
|
||||
ifneq (,$(wildcard vendor))
|
||||
GO111MODULE=$(GO111MODULE) $(GO) mod vendor
|
||||
endif
|
||||
|
||||
.PHONY: common-test-short
|
||||
common-test-short:
|
||||
common-test-short: $(GOTEST_DIR)
|
||||
@echo ">> running short tests"
|
||||
GO111MODULE=$(GO111MODULE) $(GO) test -short $(GOOPTS) $(pkgs)
|
||||
GO111MODULE=$(GO111MODULE) $(GOTEST) -short $(GOOPTS) $(pkgs)
|
||||
|
||||
.PHONY: common-test
|
||||
common-test:
|
||||
common-test: $(GOTEST_DIR)
|
||||
@echo ">> running all tests"
|
||||
GO111MODULE=$(GO111MODULE) $(GO) test $(test-flags) $(GOOPTS) $(pkgs)
|
||||
GO111MODULE=$(GO111MODULE) $(GOTEST) $(test-flags) $(GOOPTS) $(pkgs)
|
||||
|
||||
$(GOTEST_DIR):
|
||||
@mkdir -p $@
|
||||
|
||||
.PHONY: common-format
|
||||
common-format:
|
||||
@ -200,7 +224,7 @@ endif
|
||||
.PHONY: common-build
|
||||
common-build: promu
|
||||
@echo ">> building binaries"
|
||||
GO111MODULE=$(GO111MODULE) $(PROMU) build --prefix $(PREFIX)
|
||||
GO111MODULE=$(GO111MODULE) $(PROMU) build --prefix $(PREFIX) $(PROMU_BINARIES)
|
||||
|
||||
.PHONY: common-tarball
|
||||
common-tarball: promu
|
||||
@ -211,9 +235,10 @@ common-tarball: promu
|
||||
common-docker: $(BUILD_DOCKER_ARCHS)
|
||||
$(BUILD_DOCKER_ARCHS): common-docker-%:
|
||||
docker build -t "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(DOCKER_IMAGE_TAG)" \
|
||||
-f $(DOCKERFILE_PATH) \
|
||||
--build-arg ARCH="$*" \
|
||||
--build-arg OS="linux" \
|
||||
$(DOCKERFILE_PATH)
|
||||
$(DOCKERBUILD_CONTEXT)
|
||||
|
||||
.PHONY: common-docker-publish $(PUBLISH_DOCKER_ARCHS)
|
||||
common-docker-publish: $(PUBLISH_DOCKER_ARCHS)
|
||||
|
16
vendor/github.com/prometheus/procfs/README.md
generated
vendored
16
vendor/github.com/prometheus/procfs/README.md
generated
vendored
@ -1,6 +1,6 @@
|
||||
# procfs
|
||||
|
||||
This procfs package provides functions to retrieve system, kernel and process
|
||||
This package provides functions to retrieve system, kernel, and process
|
||||
metrics from the pseudo-filesystems /proc and /sys.
|
||||
|
||||
*WARNING*: This package is a work in progress. Its API may still break in
|
||||
@ -13,7 +13,8 @@ backwards-incompatible ways without warnings. Use it at your own risk.
|
||||
## Usage
|
||||
|
||||
The procfs library is organized by packages based on whether the gathered data is coming from
|
||||
/proc, /sys, or both. Each package contains an `FS` type which represents the path to either /proc, /sys, or both. For example, current cpu statistics are gathered from
|
||||
/proc, /sys, or both. Each package contains an `FS` type which represents the path to either /proc,
|
||||
/sys, or both. For example, cpu statistics are gathered from
|
||||
`/proc/stat` and are available via the root procfs package. First, the proc filesystem mount
|
||||
point is initialized, and then the stat information is read.
|
||||
|
||||
@ -29,10 +30,17 @@ Some sub-packages such as `blockdevice`, require access to both the proc and sys
|
||||
stats, err := fs.ProcDiskstats()
|
||||
```
|
||||
|
||||
## Package Organization
|
||||
|
||||
The packages in this project are organized according to (1) whether the data comes from the `/proc` or
|
||||
`/sys` filesystem and (2) the type of information being retrieved. For example, most process information
|
||||
can be gathered from the functions in the root `procfs` package. Information about block devices such as disk drives
|
||||
is available in the `blockdevices` sub-package.
|
||||
|
||||
## Building and Testing
|
||||
|
||||
The procfs library is normally built as part of another application. However, when making
|
||||
changes to the library, the `make test` command can be used to run the API test suite.
|
||||
The procfs library is intended to be built as part of another application, so there are no distributable binaries.
|
||||
However, most of the API includes unit tests which can be run with `make test`.
|
||||
|
||||
### Updating Test Fixtures
|
||||
|
||||
|
270
vendor/github.com/prometheus/procfs/cpuinfo.go
generated
vendored
270
vendor/github.com/prometheus/procfs/cpuinfo.go
generated
vendored
@ -11,14 +11,19 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// +build linux
|
||||
|
||||
package procfs
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"io/ioutil"
|
||||
"errors"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/prometheus/procfs/internal/util"
|
||||
)
|
||||
|
||||
// CPUInfo contains general information about a system CPU found in /proc/cpuinfo
|
||||
@ -51,24 +56,41 @@ type CPUInfo struct {
|
||||
PowerManagement string
|
||||
}
|
||||
|
||||
var (
|
||||
cpuinfoClockRegexp = regexp.MustCompile(`([\d.]+)`)
|
||||
cpuinfoS390XProcessorRegexp = regexp.MustCompile(`^processor\s+(\d+):.*`)
|
||||
)
|
||||
|
||||
// CPUInfo returns information about current system CPUs.
|
||||
// See https://www.kernel.org/doc/Documentation/filesystems/proc.txt
|
||||
func (fs FS) CPUInfo() ([]CPUInfo, error) {
|
||||
data, err := ioutil.ReadFile(fs.proc.Path("cpuinfo"))
|
||||
data, err := util.ReadFileNoStat(fs.proc.Path("cpuinfo"))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return parseCPUInfo(data)
|
||||
}
|
||||
|
||||
// parseCPUInfo parses data from /proc/cpuinfo
|
||||
func parseCPUInfo(info []byte) ([]CPUInfo, error) {
|
||||
cpuinfo := []CPUInfo{}
|
||||
i := -1
|
||||
func parseCPUInfoX86(info []byte) ([]CPUInfo, error) {
|
||||
scanner := bufio.NewScanner(bytes.NewReader(info))
|
||||
|
||||
// find the first "processor" line
|
||||
firstLine := firstNonEmptyLine(scanner)
|
||||
if !strings.HasPrefix(firstLine, "processor") || !strings.Contains(firstLine, ":") {
|
||||
return nil, errors.New("invalid cpuinfo file: " + firstLine)
|
||||
}
|
||||
field := strings.SplitN(firstLine, ": ", 2)
|
||||
v, err := strconv.ParseUint(field[1], 0, 32)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
firstcpu := CPUInfo{Processor: uint(v)}
|
||||
cpuinfo := []CPUInfo{firstcpu}
|
||||
i := 0
|
||||
|
||||
for scanner.Scan() {
|
||||
line := scanner.Text()
|
||||
if strings.TrimSpace(line) == "" {
|
||||
if !strings.Contains(line, ":") {
|
||||
continue
|
||||
}
|
||||
field := strings.SplitN(line, ": ", 2)
|
||||
@ -81,7 +103,7 @@ func parseCPUInfo(info []byte) ([]CPUInfo, error) {
|
||||
return nil, err
|
||||
}
|
||||
cpuinfo[i].Processor = uint(v)
|
||||
case "vendor_id":
|
||||
case "vendor", "vendor_id":
|
||||
cpuinfo[i].VendorID = field[1]
|
||||
case "cpu family":
|
||||
cpuinfo[i].CPUFamily = field[1]
|
||||
@ -162,5 +184,237 @@ func parseCPUInfo(info []byte) ([]CPUInfo, error) {
|
||||
}
|
||||
}
|
||||
return cpuinfo, nil
|
||||
}
|
||||
|
||||
func parseCPUInfoARM(info []byte) ([]CPUInfo, error) {
|
||||
scanner := bufio.NewScanner(bytes.NewReader(info))
|
||||
|
||||
firstLine := firstNonEmptyLine(scanner)
|
||||
match, _ := regexp.MatchString("^[Pp]rocessor", firstLine)
|
||||
if !match || !strings.Contains(firstLine, ":") {
|
||||
return nil, errors.New("invalid cpuinfo file: " + firstLine)
|
||||
}
|
||||
field := strings.SplitN(firstLine, ": ", 2)
|
||||
cpuinfo := []CPUInfo{}
|
||||
featuresLine := ""
|
||||
commonCPUInfo := CPUInfo{}
|
||||
i := 0
|
||||
if strings.TrimSpace(field[0]) == "Processor" {
|
||||
commonCPUInfo = CPUInfo{ModelName: field[1]}
|
||||
i = -1
|
||||
} else {
|
||||
v, err := strconv.ParseUint(field[1], 0, 32)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
firstcpu := CPUInfo{Processor: uint(v)}
|
||||
cpuinfo = []CPUInfo{firstcpu}
|
||||
}
|
||||
|
||||
for scanner.Scan() {
|
||||
line := scanner.Text()
|
||||
if !strings.Contains(line, ":") {
|
||||
continue
|
||||
}
|
||||
field := strings.SplitN(line, ": ", 2)
|
||||
switch strings.TrimSpace(field[0]) {
|
||||
case "processor":
|
||||
cpuinfo = append(cpuinfo, commonCPUInfo) // start of the next processor
|
||||
i++
|
||||
v, err := strconv.ParseUint(field[1], 0, 32)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
cpuinfo[i].Processor = uint(v)
|
||||
case "BogoMIPS":
|
||||
if i == -1 {
|
||||
cpuinfo = append(cpuinfo, commonCPUInfo) // There is only one processor
|
||||
i++
|
||||
cpuinfo[i].Processor = 0
|
||||
}
|
||||
v, err := strconv.ParseFloat(field[1], 64)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
cpuinfo[i].BogoMips = v
|
||||
case "Features":
|
||||
featuresLine = line
|
||||
case "model name":
|
||||
cpuinfo[i].ModelName = field[1]
|
||||
}
|
||||
}
|
||||
fields := strings.SplitN(featuresLine, ": ", 2)
|
||||
for i := range cpuinfo {
|
||||
cpuinfo[i].Flags = strings.Fields(fields[1])
|
||||
}
|
||||
return cpuinfo, nil
|
||||
|
||||
}
|
||||
|
||||
func parseCPUInfoS390X(info []byte) ([]CPUInfo, error) {
|
||||
scanner := bufio.NewScanner(bytes.NewReader(info))
|
||||
|
||||
firstLine := firstNonEmptyLine(scanner)
|
||||
if !strings.HasPrefix(firstLine, "vendor_id") || !strings.Contains(firstLine, ":") {
|
||||
return nil, errors.New("invalid cpuinfo file: " + firstLine)
|
||||
}
|
||||
field := strings.SplitN(firstLine, ": ", 2)
|
||||
cpuinfo := []CPUInfo{}
|
||||
commonCPUInfo := CPUInfo{VendorID: field[1]}
|
||||
|
||||
for scanner.Scan() {
|
||||
line := scanner.Text()
|
||||
if !strings.Contains(line, ":") {
|
||||
continue
|
||||
}
|
||||
field := strings.SplitN(line, ": ", 2)
|
||||
switch strings.TrimSpace(field[0]) {
|
||||
case "bogomips per cpu":
|
||||
v, err := strconv.ParseFloat(field[1], 64)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
commonCPUInfo.BogoMips = v
|
||||
case "features":
|
||||
commonCPUInfo.Flags = strings.Fields(field[1])
|
||||
}
|
||||
if strings.HasPrefix(line, "processor") {
|
||||
match := cpuinfoS390XProcessorRegexp.FindStringSubmatch(line)
|
||||
if len(match) < 2 {
|
||||
return nil, errors.New("Invalid line found in cpuinfo: " + line)
|
||||
}
|
||||
cpu := commonCPUInfo
|
||||
v, err := strconv.ParseUint(match[1], 0, 32)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
cpu.Processor = uint(v)
|
||||
cpuinfo = append(cpuinfo, cpu)
|
||||
}
|
||||
if strings.HasPrefix(line, "cpu number") {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
i := 0
|
||||
for scanner.Scan() {
|
||||
line := scanner.Text()
|
||||
if !strings.Contains(line, ":") {
|
||||
continue
|
||||
}
|
||||
field := strings.SplitN(line, ": ", 2)
|
||||
switch strings.TrimSpace(field[0]) {
|
||||
case "cpu number":
|
||||
i++
|
||||
case "cpu MHz dynamic":
|
||||
clock := cpuinfoClockRegexp.FindString(strings.TrimSpace(field[1]))
|
||||
v, err := strconv.ParseFloat(clock, 64)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
cpuinfo[i].CPUMHz = v
|
||||
}
|
||||
}
|
||||
|
||||
return cpuinfo, nil
|
||||
}
|
||||
|
||||
func parseCPUInfoMips(info []byte) ([]CPUInfo, error) {
|
||||
scanner := bufio.NewScanner(bytes.NewReader(info))
|
||||
|
||||
// find the first "processor" line
|
||||
firstLine := firstNonEmptyLine(scanner)
|
||||
if !strings.HasPrefix(firstLine, "system type") || !strings.Contains(firstLine, ":") {
|
||||
return nil, errors.New("invalid cpuinfo file: " + firstLine)
|
||||
}
|
||||
field := strings.SplitN(firstLine, ": ", 2)
|
||||
cpuinfo := []CPUInfo{}
|
||||
systemType := field[1]
|
||||
|
||||
i := 0
|
||||
|
||||
for scanner.Scan() {
|
||||
line := scanner.Text()
|
||||
if !strings.Contains(line, ":") {
|
||||
continue
|
||||
}
|
||||
field := strings.SplitN(line, ": ", 2)
|
||||
switch strings.TrimSpace(field[0]) {
|
||||
case "processor":
|
||||
v, err := strconv.ParseUint(field[1], 0, 32)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
i = int(v)
|
||||
cpuinfo = append(cpuinfo, CPUInfo{}) // start of the next processor
|
||||
cpuinfo[i].Processor = uint(v)
|
||||
cpuinfo[i].VendorID = systemType
|
||||
case "cpu model":
|
||||
cpuinfo[i].ModelName = field[1]
|
||||
case "BogoMIPS":
|
||||
v, err := strconv.ParseFloat(field[1], 64)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
cpuinfo[i].BogoMips = v
|
||||
}
|
||||
}
|
||||
return cpuinfo, nil
|
||||
}
|
||||
|
||||
func parseCPUInfoPPC(info []byte) ([]CPUInfo, error) {
|
||||
scanner := bufio.NewScanner(bytes.NewReader(info))
|
||||
|
||||
firstLine := firstNonEmptyLine(scanner)
|
||||
if !strings.HasPrefix(firstLine, "processor") || !strings.Contains(firstLine, ":") {
|
||||
return nil, errors.New("invalid cpuinfo file: " + firstLine)
|
||||
}
|
||||
field := strings.SplitN(firstLine, ": ", 2)
|
||||
v, err := strconv.ParseUint(field[1], 0, 32)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
firstcpu := CPUInfo{Processor: uint(v)}
|
||||
cpuinfo := []CPUInfo{firstcpu}
|
||||
i := 0
|
||||
|
||||
for scanner.Scan() {
|
||||
line := scanner.Text()
|
||||
if !strings.Contains(line, ":") {
|
||||
continue
|
||||
}
|
||||
field := strings.SplitN(line, ": ", 2)
|
||||
switch strings.TrimSpace(field[0]) {
|
||||
case "processor":
|
||||
cpuinfo = append(cpuinfo, CPUInfo{}) // start of the next processor
|
||||
i++
|
||||
v, err := strconv.ParseUint(field[1], 0, 32)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
cpuinfo[i].Processor = uint(v)
|
||||
case "cpu":
|
||||
cpuinfo[i].VendorID = field[1]
|
||||
case "clock":
|
||||
clock := cpuinfoClockRegexp.FindString(strings.TrimSpace(field[1]))
|
||||
v, err := strconv.ParseFloat(clock, 64)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
cpuinfo[i].CPUMHz = v
|
||||
}
|
||||
}
|
||||
return cpuinfo, nil
|
||||
}
|
||||
|
||||
// firstNonEmptyLine advances the scanner to the first non-empty line
|
||||
// and returns the contents of that line
|
||||
func firstNonEmptyLine(scanner *bufio.Scanner) string {
|
||||
for scanner.Scan() {
|
||||
line := scanner.Text()
|
||||
if strings.TrimSpace(line) != "" {
|
||||
return line
|
||||
}
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
18
vendor/github.com/prometheus/procfs/cpuinfo_arm.go
generated
vendored
Normal file
18
vendor/github.com/prometheus/procfs/cpuinfo_arm.go
generated
vendored
Normal file
@ -0,0 +1,18 @@
|
||||
// Copyright 2020 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// +build linux
|
||||
|
||||
package procfs
|
||||
|
||||
var parseCPUInfo = parseCPUInfoARM
|
19
vendor/github.com/prometheus/procfs/cpuinfo_arm64.go
generated
vendored
Normal file
19
vendor/github.com/prometheus/procfs/cpuinfo_arm64.go
generated
vendored
Normal file
@ -0,0 +1,19 @@
|
||||
// Copyright 2020 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// +build linux
|
||||
// +build arm64
|
||||
|
||||
package procfs
|
||||
|
||||
var parseCPUInfo = parseCPUInfoARM
|
19
vendor/github.com/prometheus/procfs/cpuinfo_default.go
generated
vendored
Normal file
19
vendor/github.com/prometheus/procfs/cpuinfo_default.go
generated
vendored
Normal file
@ -0,0 +1,19 @@
|
||||
// Copyright 2020 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// +build linux
|
||||
// +build 386 amd64
|
||||
|
||||
package procfs
|
||||
|
||||
var parseCPUInfo = parseCPUInfoX86
|
18
vendor/github.com/prometheus/procfs/cpuinfo_mips.go
generated
vendored
Normal file
18
vendor/github.com/prometheus/procfs/cpuinfo_mips.go
generated
vendored
Normal file
@ -0,0 +1,18 @@
|
||||
// Copyright 2020 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// +build linux
|
||||
|
||||
package procfs
|
||||
|
||||
var parseCPUInfo = parseCPUInfoMips
|
18
vendor/github.com/prometheus/procfs/cpuinfo_mips64.go
generated
vendored
Normal file
18
vendor/github.com/prometheus/procfs/cpuinfo_mips64.go
generated
vendored
Normal file
@ -0,0 +1,18 @@
|
||||
// Copyright 2020 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// +build linux
|
||||
|
||||
package procfs
|
||||
|
||||
var parseCPUInfo = parseCPUInfoMips
|
18
vendor/github.com/prometheus/procfs/cpuinfo_mips64le.go
generated
vendored
Normal file
18
vendor/github.com/prometheus/procfs/cpuinfo_mips64le.go
generated
vendored
Normal file
@ -0,0 +1,18 @@
|
||||
// Copyright 2020 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// +build linux
|
||||
|
||||
package procfs
|
||||
|
||||
var parseCPUInfo = parseCPUInfoMips
|
18
vendor/github.com/prometheus/procfs/cpuinfo_mipsle.go
generated
vendored
Normal file
18
vendor/github.com/prometheus/procfs/cpuinfo_mipsle.go
generated
vendored
Normal file
@ -0,0 +1,18 @@
|
||||
// Copyright 2020 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// +build linux
|
||||
|
||||
package procfs
|
||||
|
||||
var parseCPUInfo = parseCPUInfoMips
|
18
vendor/github.com/prometheus/procfs/cpuinfo_ppc64.go
generated
vendored
Normal file
18
vendor/github.com/prometheus/procfs/cpuinfo_ppc64.go
generated
vendored
Normal file
@ -0,0 +1,18 @@
|
||||
// Copyright 2020 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// +build linux
|
||||
|
||||
package procfs
|
||||
|
||||
var parseCPUInfo = parseCPUInfoPPC
|
18
vendor/github.com/prometheus/procfs/cpuinfo_ppc64le.go
generated
vendored
Normal file
18
vendor/github.com/prometheus/procfs/cpuinfo_ppc64le.go
generated
vendored
Normal file
@ -0,0 +1,18 @@
|
||||
// Copyright 2020 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// +build linux
|
||||
|
||||
package procfs
|
||||
|
||||
var parseCPUInfo = parseCPUInfoPPC
|
18
vendor/github.com/prometheus/procfs/cpuinfo_s390x.go
generated
vendored
Normal file
18
vendor/github.com/prometheus/procfs/cpuinfo_s390x.go
generated
vendored
Normal file
@ -0,0 +1,18 @@
|
||||
// Copyright 2020 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// +build linux
|
||||
|
||||
package procfs
|
||||
|
||||
var parseCPUInfo = parseCPUInfoS390X
|
158
vendor/github.com/prometheus/procfs/crypto.go
generated
vendored
158
vendor/github.com/prometheus/procfs/crypto.go
generated
vendored
@ -14,10 +14,10 @@
|
||||
package procfs
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"strconv"
|
||||
"io"
|
||||
"strings"
|
||||
|
||||
"github.com/prometheus/procfs/internal/util"
|
||||
@ -52,80 +52,102 @@ type Crypto struct {
|
||||
// structs containing the relevant info. More information available here:
|
||||
// https://kernel.readthedocs.io/en/sphinx-samples/crypto-API.html
|
||||
func (fs FS) Crypto() ([]Crypto, error) {
|
||||
data, err := ioutil.ReadFile(fs.proc.Path("crypto"))
|
||||
path := fs.proc.Path("crypto")
|
||||
b, err := util.ReadFileNoStat(path)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error parsing crypto %s: %s", fs.proc.Path("crypto"), err)
|
||||
return nil, fmt.Errorf("error reading crypto %s: %s", path, err)
|
||||
}
|
||||
crypto, err := parseCrypto(data)
|
||||
|
||||
crypto, err := parseCrypto(bytes.NewReader(b))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error parsing crypto %s: %s", fs.proc.Path("crypto"), err)
|
||||
return nil, fmt.Errorf("error parsing crypto %s: %s", path, err)
|
||||
}
|
||||
|
||||
return crypto, nil
|
||||
}
|
||||
|
||||
func parseCrypto(cryptoData []byte) ([]Crypto, error) {
|
||||
crypto := []Crypto{}
|
||||
// parseCrypto parses a /proc/crypto stream into Crypto elements.
|
||||
func parseCrypto(r io.Reader) ([]Crypto, error) {
|
||||
var out []Crypto
|
||||
|
||||
cryptoBlocks := bytes.Split(cryptoData, []byte("\n\n"))
|
||||
|
||||
for _, block := range cryptoBlocks {
|
||||
var newCryptoElem Crypto
|
||||
|
||||
lines := strings.Split(string(block), "\n")
|
||||
for _, line := range lines {
|
||||
if strings.TrimSpace(line) == "" || line[0] == ' ' {
|
||||
continue
|
||||
}
|
||||
fields := strings.Split(line, ":")
|
||||
key := strings.TrimSpace(fields[0])
|
||||
value := strings.TrimSpace(fields[1])
|
||||
vp := util.NewValueParser(value)
|
||||
|
||||
switch strings.TrimSpace(key) {
|
||||
case "async":
|
||||
b, err := strconv.ParseBool(value)
|
||||
if err == nil {
|
||||
newCryptoElem.Async = b
|
||||
}
|
||||
case "blocksize":
|
||||
newCryptoElem.Blocksize = vp.PUInt64()
|
||||
case "chunksize":
|
||||
newCryptoElem.Chunksize = vp.PUInt64()
|
||||
case "digestsize":
|
||||
newCryptoElem.Digestsize = vp.PUInt64()
|
||||
case "driver":
|
||||
newCryptoElem.Driver = value
|
||||
case "geniv":
|
||||
newCryptoElem.Geniv = value
|
||||
case "internal":
|
||||
newCryptoElem.Internal = value
|
||||
case "ivsize":
|
||||
newCryptoElem.Ivsize = vp.PUInt64()
|
||||
case "maxauthsize":
|
||||
newCryptoElem.Maxauthsize = vp.PUInt64()
|
||||
case "max keysize":
|
||||
newCryptoElem.MaxKeysize = vp.PUInt64()
|
||||
case "min keysize":
|
||||
newCryptoElem.MinKeysize = vp.PUInt64()
|
||||
case "module":
|
||||
newCryptoElem.Module = value
|
||||
case "name":
|
||||
newCryptoElem.Name = value
|
||||
case "priority":
|
||||
newCryptoElem.Priority = vp.PInt64()
|
||||
case "refcnt":
|
||||
newCryptoElem.Refcnt = vp.PInt64()
|
||||
case "seedsize":
|
||||
newCryptoElem.Seedsize = vp.PUInt64()
|
||||
case "selftest":
|
||||
newCryptoElem.Selftest = value
|
||||
case "type":
|
||||
newCryptoElem.Type = value
|
||||
case "walksize":
|
||||
newCryptoElem.Walksize = vp.PUInt64()
|
||||
}
|
||||
s := bufio.NewScanner(r)
|
||||
for s.Scan() {
|
||||
text := s.Text()
|
||||
switch {
|
||||
case strings.HasPrefix(text, "name"):
|
||||
// Each crypto element begins with its name.
|
||||
out = append(out, Crypto{})
|
||||
case text == "":
|
||||
continue
|
||||
}
|
||||
|
||||
kv := strings.Split(text, ":")
|
||||
if len(kv) != 2 {
|
||||
return nil, fmt.Errorf("malformed crypto line: %q", text)
|
||||
}
|
||||
|
||||
k := strings.TrimSpace(kv[0])
|
||||
v := strings.TrimSpace(kv[1])
|
||||
|
||||
// Parse the key/value pair into the currently focused element.
|
||||
c := &out[len(out)-1]
|
||||
if err := c.parseKV(k, v); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
crypto = append(crypto, newCryptoElem)
|
||||
}
|
||||
return crypto, nil
|
||||
|
||||
if err := s.Err(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return out, nil
|
||||
}
|
||||
|
||||
// parseKV parses a key/value pair into the appropriate field of c.
|
||||
func (c *Crypto) parseKV(k, v string) error {
|
||||
vp := util.NewValueParser(v)
|
||||
|
||||
switch k {
|
||||
case "async":
|
||||
// Interpret literal yes as true.
|
||||
c.Async = v == "yes"
|
||||
case "blocksize":
|
||||
c.Blocksize = vp.PUInt64()
|
||||
case "chunksize":
|
||||
c.Chunksize = vp.PUInt64()
|
||||
case "digestsize":
|
||||
c.Digestsize = vp.PUInt64()
|
||||
case "driver":
|
||||
c.Driver = v
|
||||
case "geniv":
|
||||
c.Geniv = v
|
||||
case "internal":
|
||||
c.Internal = v
|
||||
case "ivsize":
|
||||
c.Ivsize = vp.PUInt64()
|
||||
case "maxauthsize":
|
||||
c.Maxauthsize = vp.PUInt64()
|
||||
case "max keysize":
|
||||
c.MaxKeysize = vp.PUInt64()
|
||||
case "min keysize":
|
||||
c.MinKeysize = vp.PUInt64()
|
||||
case "module":
|
||||
c.Module = v
|
||||
case "name":
|
||||
c.Name = v
|
||||
case "priority":
|
||||
c.Priority = vp.PInt64()
|
||||
case "refcnt":
|
||||
c.Refcnt = vp.PInt64()
|
||||
case "seedsize":
|
||||
c.Seedsize = vp.PUInt64()
|
||||
case "selftest":
|
||||
c.Selftest = v
|
||||
case "type":
|
||||
c.Type = v
|
||||
case "walksize":
|
||||
c.Walksize = vp.PUInt64()
|
||||
}
|
||||
|
||||
return vp.Err()
|
||||
}
|
||||
|
1574
vendor/github.com/prometheus/procfs/fixtures.ttar
generated
vendored
1574
vendor/github.com/prometheus/procfs/fixtures.ttar
generated
vendored
File diff suppressed because it is too large
Load Diff
422
vendor/github.com/prometheus/procfs/fscache.go
generated
vendored
Normal file
422
vendor/github.com/prometheus/procfs/fscache.go
generated
vendored
Normal file
@ -0,0 +1,422 @@
|
||||
// Copyright 2019 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package procfs
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/prometheus/procfs/internal/util"
|
||||
)
|
||||
|
||||
// Fscacheinfo represents fscache statistics.
|
||||
type Fscacheinfo struct {
|
||||
// Number of index cookies allocated
|
||||
IndexCookiesAllocated uint64
|
||||
// data storage cookies allocated
|
||||
DataStorageCookiesAllocated uint64
|
||||
// Number of special cookies allocated
|
||||
SpecialCookiesAllocated uint64
|
||||
// Number of objects allocated
|
||||
ObjectsAllocated uint64
|
||||
// Number of object allocation failures
|
||||
ObjectAllocationsFailure uint64
|
||||
// Number of objects that reached the available state
|
||||
ObjectsAvailable uint64
|
||||
// Number of objects that reached the dead state
|
||||
ObjectsDead uint64
|
||||
// Number of objects that didn't have a coherency check
|
||||
ObjectsWithoutCoherencyCheck uint64
|
||||
// Number of objects that passed a coherency check
|
||||
ObjectsWithCoherencyCheck uint64
|
||||
// Number of objects that needed a coherency data update
|
||||
ObjectsNeedCoherencyCheckUpdate uint64
|
||||
// Number of objects that were declared obsolete
|
||||
ObjectsDeclaredObsolete uint64
|
||||
// Number of pages marked as being cached
|
||||
PagesMarkedAsBeingCached uint64
|
||||
// Number of uncache page requests seen
|
||||
UncachePagesRequestSeen uint64
|
||||
// Number of acquire cookie requests seen
|
||||
AcquireCookiesRequestSeen uint64
|
||||
// Number of acq reqs given a NULL parent
|
||||
AcquireRequestsWithNullParent uint64
|
||||
// Number of acq reqs rejected due to no cache available
|
||||
AcquireRequestsRejectedNoCacheAvailable uint64
|
||||
// Number of acq reqs succeeded
|
||||
AcquireRequestsSucceeded uint64
|
||||
// Number of acq reqs rejected due to error
|
||||
AcquireRequestsRejectedDueToError uint64
|
||||
// Number of acq reqs failed on ENOMEM
|
||||
AcquireRequestsFailedDueToEnomem uint64
|
||||
// Number of lookup calls made on cache backends
|
||||
LookupsNumber uint64
|
||||
// Number of negative lookups made
|
||||
LookupsNegative uint64
|
||||
// Number of positive lookups made
|
||||
LookupsPositive uint64
|
||||
// Number of objects created by lookup
|
||||
ObjectsCreatedByLookup uint64
|
||||
// Number of lookups timed out and requeued
|
||||
LookupsTimedOutAndRequed uint64
|
||||
InvalidationsNumber uint64
|
||||
InvalidationsRunning uint64
|
||||
// Number of update cookie requests seen
|
||||
UpdateCookieRequestSeen uint64
|
||||
// Number of upd reqs given a NULL parent
|
||||
UpdateRequestsWithNullParent uint64
|
||||
// Number of upd reqs granted CPU time
|
||||
UpdateRequestsRunning uint64
|
||||
// Number of relinquish cookie requests seen
|
||||
RelinquishCookiesRequestSeen uint64
|
||||
// Number of rlq reqs given a NULL parent
|
||||
RelinquishCookiesWithNullParent uint64
|
||||
// Number of rlq reqs waited on completion of creation
|
||||
RelinquishRequestsWaitingCompleteCreation uint64
|
||||
// Relinqs rtr
|
||||
RelinquishRetries uint64
|
||||
// Number of attribute changed requests seen
|
||||
AttributeChangedRequestsSeen uint64
|
||||
// Number of attr changed requests queued
|
||||
AttributeChangedRequestsQueued uint64
|
||||
// Number of attr changed rejected -ENOBUFS
|
||||
AttributeChangedRejectDueToEnobufs uint64
|
||||
// Number of attr changed failed -ENOMEM
|
||||
AttributeChangedFailedDueToEnomem uint64
|
||||
// Number of attr changed ops given CPU time
|
||||
AttributeChangedOps uint64
|
||||
// Number of allocation requests seen
|
||||
AllocationRequestsSeen uint64
|
||||
// Number of successful alloc reqs
|
||||
AllocationOkRequests uint64
|
||||
// Number of alloc reqs that waited on lookup completion
|
||||
AllocationWaitingOnLookup uint64
|
||||
// Number of alloc reqs rejected -ENOBUFS
|
||||
AllocationsRejectedDueToEnobufs uint64
|
||||
// Number of alloc reqs aborted -ERESTARTSYS
|
||||
AllocationsAbortedDueToErestartsys uint64
|
||||
// Number of alloc reqs submitted
|
||||
AllocationOperationsSubmitted uint64
|
||||
// Number of alloc reqs waited for CPU time
|
||||
AllocationsWaitedForCPU uint64
|
||||
// Number of alloc reqs aborted due to object death
|
||||
AllocationsAbortedDueToObjectDeath uint64
|
||||
// Number of retrieval (read) requests seen
|
||||
RetrievalsReadRequests uint64
|
||||
// Number of successful retr reqs
|
||||
RetrievalsOk uint64
|
||||
// Number of retr reqs that waited on lookup completion
|
||||
RetrievalsWaitingLookupCompletion uint64
|
||||
// Number of retr reqs returned -ENODATA
|
||||
RetrievalsReturnedEnodata uint64
|
||||
// Number of retr reqs rejected -ENOBUFS
|
||||
RetrievalsRejectedDueToEnobufs uint64
|
||||
// Number of retr reqs aborted -ERESTARTSYS
|
||||
RetrievalsAbortedDueToErestartsys uint64
|
||||
// Number of retr reqs failed -ENOMEM
|
||||
RetrievalsFailedDueToEnomem uint64
|
||||
// Number of retr reqs submitted
|
||||
RetrievalsRequests uint64
|
||||
// Number of retr reqs waited for CPU time
|
||||
RetrievalsWaitingCPU uint64
|
||||
// Number of retr reqs aborted due to object death
|
||||
RetrievalsAbortedDueToObjectDeath uint64
|
||||
// Number of storage (write) requests seen
|
||||
StoreWriteRequests uint64
|
||||
// Number of successful store reqs
|
||||
StoreSuccessfulRequests uint64
|
||||
// Number of store reqs on a page already pending storage
|
||||
StoreRequestsOnPendingStorage uint64
|
||||
// Number of store reqs rejected -ENOBUFS
|
||||
StoreRequestsRejectedDueToEnobufs uint64
|
||||
// Number of store reqs failed -ENOMEM
|
||||
StoreRequestsFailedDueToEnomem uint64
|
||||
// Number of store reqs submitted
|
||||
StoreRequestsSubmitted uint64
|
||||
// Number of store reqs granted CPU time
|
||||
StoreRequestsRunning uint64
|
||||
// Number of pages given store req processing time
|
||||
StorePagesWithRequestsProcessing uint64
|
||||
// Number of store reqs deleted from tracking tree
|
||||
StoreRequestsDeleted uint64
|
||||
// Number of store reqs over store limit
|
||||
StoreRequestsOverStoreLimit uint64
|
||||
// Number of release reqs against pages with no pending store
|
||||
ReleaseRequestsAgainstPagesWithNoPendingStorage uint64
|
||||
// Number of release reqs against pages stored by time lock granted
|
||||
ReleaseRequestsAgainstPagesStoredByTimeLockGranted uint64
|
||||
// Number of release reqs ignored due to in-progress store
|
||||
ReleaseRequestsIgnoredDueToInProgressStore uint64
|
||||
// Number of page stores cancelled due to release req
|
||||
PageStoresCancelledByReleaseRequests uint64
|
||||
VmscanWaiting uint64
|
||||
// Number of times async ops added to pending queues
|
||||
OpsPending uint64
|
||||
// Number of times async ops given CPU time
|
||||
OpsRunning uint64
|
||||
// Number of times async ops queued for processing
|
||||
OpsEnqueued uint64
|
||||
// Number of async ops cancelled
|
||||
OpsCancelled uint64
|
||||
// Number of async ops rejected due to object lookup/create failure
|
||||
OpsRejected uint64
|
||||
// Number of async ops initialised
|
||||
OpsInitialised uint64
|
||||
// Number of async ops queued for deferred release
|
||||
OpsDeferred uint64
|
||||
// Number of async ops released (should equal ini=N when idle)
|
||||
OpsReleased uint64
|
||||
// Number of deferred-release async ops garbage collected
|
||||
OpsGarbageCollected uint64
|
||||
// Number of in-progress alloc_object() cache ops
|
||||
CacheopAllocationsinProgress uint64
|
||||
// Number of in-progress lookup_object() cache ops
|
||||
CacheopLookupObjectInProgress uint64
|
||||
// Number of in-progress lookup_complete() cache ops
|
||||
CacheopLookupCompleteInPorgress uint64
|
||||
// Number of in-progress grab_object() cache ops
|
||||
CacheopGrabObjectInProgress uint64
|
||||
CacheopInvalidations uint64
|
||||
// Number of in-progress update_object() cache ops
|
||||
CacheopUpdateObjectInProgress uint64
|
||||
// Number of in-progress drop_object() cache ops
|
||||
CacheopDropObjectInProgress uint64
|
||||
// Number of in-progress put_object() cache ops
|
||||
CacheopPutObjectInProgress uint64
|
||||
// Number of in-progress attr_changed() cache ops
|
||||
CacheopAttributeChangeInProgress uint64
|
||||
// Number of in-progress sync_cache() cache ops
|
||||
CacheopSyncCacheInProgress uint64
|
||||
// Number of in-progress read_or_alloc_page() cache ops
|
||||
CacheopReadOrAllocPageInProgress uint64
|
||||
// Number of in-progress read_or_alloc_pages() cache ops
|
||||
CacheopReadOrAllocPagesInProgress uint64
|
||||
// Number of in-progress allocate_page() cache ops
|
||||
CacheopAllocatePageInProgress uint64
|
||||
// Number of in-progress allocate_pages() cache ops
|
||||
CacheopAllocatePagesInProgress uint64
|
||||
// Number of in-progress write_page() cache ops
|
||||
CacheopWritePagesInProgress uint64
|
||||
// Number of in-progress uncache_page() cache ops
|
||||
CacheopUncachePagesInProgress uint64
|
||||
// Number of in-progress dissociate_pages() cache ops
|
||||
CacheopDissociatePagesInProgress uint64
|
||||
// Number of object lookups/creations rejected due to lack of space
|
||||
CacheevLookupsAndCreationsRejectedLackSpace uint64
|
||||
// Number of stale objects deleted
|
||||
CacheevStaleObjectsDeleted uint64
|
||||
// Number of objects retired when relinquished
|
||||
CacheevRetiredWhenReliquished uint64
|
||||
// Number of objects culled
|
||||
CacheevObjectsCulled uint64
|
||||
}
|
||||
|
||||
// Fscacheinfo returns information about current fscache statistics.
|
||||
// See https://www.kernel.org/doc/Documentation/filesystems/caching/fscache.txt
|
||||
func (fs FS) Fscacheinfo() (Fscacheinfo, error) {
|
||||
b, err := util.ReadFileNoStat(fs.proc.Path("fs/fscache/stats"))
|
||||
if err != nil {
|
||||
return Fscacheinfo{}, err
|
||||
}
|
||||
|
||||
m, err := parseFscacheinfo(bytes.NewReader(b))
|
||||
if err != nil {
|
||||
return Fscacheinfo{}, fmt.Errorf("failed to parse Fscacheinfo: %v", err)
|
||||
}
|
||||
|
||||
return *m, nil
|
||||
}
|
||||
|
||||
func setFSCacheFields(fields []string, setFields ...*uint64) error {
|
||||
var err error
|
||||
if len(fields) < len(setFields) {
|
||||
return fmt.Errorf("Insufficient number of fields, expected %v, got %v", len(setFields), len(fields))
|
||||
}
|
||||
|
||||
for i := range setFields {
|
||||
*setFields[i], err = strconv.ParseUint(strings.Split(fields[i], "=")[1], 0, 64)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func parseFscacheinfo(r io.Reader) (*Fscacheinfo, error) {
|
||||
var m Fscacheinfo
|
||||
s := bufio.NewScanner(r)
|
||||
for s.Scan() {
|
||||
fields := strings.Fields(s.Text())
|
||||
if len(fields) < 2 {
|
||||
return nil, fmt.Errorf("malformed Fscacheinfo line: %q", s.Text())
|
||||
}
|
||||
|
||||
switch fields[0] {
|
||||
case "Cookies:":
|
||||
err := setFSCacheFields(fields[1:], &m.IndexCookiesAllocated, &m.DataStorageCookiesAllocated,
|
||||
&m.SpecialCookiesAllocated)
|
||||
if err != nil {
|
||||
return &m, err
|
||||
}
|
||||
case "Objects:":
|
||||
err := setFSCacheFields(fields[1:], &m.ObjectsAllocated, &m.ObjectAllocationsFailure,
|
||||
&m.ObjectsAvailable, &m.ObjectsDead)
|
||||
if err != nil {
|
||||
return &m, err
|
||||
}
|
||||
case "ChkAux":
|
||||
err := setFSCacheFields(fields[2:], &m.ObjectsWithoutCoherencyCheck, &m.ObjectsWithCoherencyCheck,
|
||||
&m.ObjectsNeedCoherencyCheckUpdate, &m.ObjectsDeclaredObsolete)
|
||||
if err != nil {
|
||||
return &m, err
|
||||
}
|
||||
case "Pages":
|
||||
err := setFSCacheFields(fields[2:], &m.PagesMarkedAsBeingCached, &m.UncachePagesRequestSeen)
|
||||
if err != nil {
|
||||
return &m, err
|
||||
}
|
||||
case "Acquire:":
|
||||
err := setFSCacheFields(fields[1:], &m.AcquireCookiesRequestSeen, &m.AcquireRequestsWithNullParent,
|
||||
&m.AcquireRequestsRejectedNoCacheAvailable, &m.AcquireRequestsSucceeded, &m.AcquireRequestsRejectedDueToError,
|
||||
&m.AcquireRequestsFailedDueToEnomem)
|
||||
if err != nil {
|
||||
return &m, err
|
||||
}
|
||||
case "Lookups:":
|
||||
err := setFSCacheFields(fields[1:], &m.LookupsNumber, &m.LookupsNegative, &m.LookupsPositive,
|
||||
&m.ObjectsCreatedByLookup, &m.LookupsTimedOutAndRequed)
|
||||
if err != nil {
|
||||
return &m, err
|
||||
}
|
||||
case "Invals":
|
||||
err := setFSCacheFields(fields[2:], &m.InvalidationsNumber, &m.InvalidationsRunning)
|
||||
if err != nil {
|
||||
return &m, err
|
||||
}
|
||||
case "Updates:":
|
||||
err := setFSCacheFields(fields[1:], &m.UpdateCookieRequestSeen, &m.UpdateRequestsWithNullParent,
|
||||
&m.UpdateRequestsRunning)
|
||||
if err != nil {
|
||||
return &m, err
|
||||
}
|
||||
case "Relinqs:":
|
||||
err := setFSCacheFields(fields[1:], &m.RelinquishCookiesRequestSeen, &m.RelinquishCookiesWithNullParent,
|
||||
&m.RelinquishRequestsWaitingCompleteCreation, &m.RelinquishRetries)
|
||||
if err != nil {
|
||||
return &m, err
|
||||
}
|
||||
case "AttrChg:":
|
||||
err := setFSCacheFields(fields[1:], &m.AttributeChangedRequestsSeen, &m.AttributeChangedRequestsQueued,
|
||||
&m.AttributeChangedRejectDueToEnobufs, &m.AttributeChangedFailedDueToEnomem, &m.AttributeChangedOps)
|
||||
if err != nil {
|
||||
return &m, err
|
||||
}
|
||||
case "Allocs":
|
||||
if strings.Split(fields[2], "=")[0] == "n" {
|
||||
err := setFSCacheFields(fields[2:], &m.AllocationRequestsSeen, &m.AllocationOkRequests,
|
||||
&m.AllocationWaitingOnLookup, &m.AllocationsRejectedDueToEnobufs, &m.AllocationsAbortedDueToErestartsys)
|
||||
if err != nil {
|
||||
return &m, err
|
||||
}
|
||||
} else {
|
||||
err := setFSCacheFields(fields[2:], &m.AllocationOperationsSubmitted, &m.AllocationsWaitedForCPU,
|
||||
&m.AllocationsAbortedDueToObjectDeath)
|
||||
if err != nil {
|
||||
return &m, err
|
||||
}
|
||||
}
|
||||
case "Retrvls:":
|
||||
if strings.Split(fields[1], "=")[0] == "n" {
|
||||
err := setFSCacheFields(fields[1:], &m.RetrievalsReadRequests, &m.RetrievalsOk, &m.RetrievalsWaitingLookupCompletion,
|
||||
&m.RetrievalsReturnedEnodata, &m.RetrievalsRejectedDueToEnobufs, &m.RetrievalsAbortedDueToErestartsys,
|
||||
&m.RetrievalsFailedDueToEnomem)
|
||||
if err != nil {
|
||||
return &m, err
|
||||
}
|
||||
} else {
|
||||
err := setFSCacheFields(fields[1:], &m.RetrievalsRequests, &m.RetrievalsWaitingCPU, &m.RetrievalsAbortedDueToObjectDeath)
|
||||
if err != nil {
|
||||
return &m, err
|
||||
}
|
||||
}
|
||||
case "Stores":
|
||||
if strings.Split(fields[2], "=")[0] == "n" {
|
||||
err := setFSCacheFields(fields[2:], &m.StoreWriteRequests, &m.StoreSuccessfulRequests,
|
||||
&m.StoreRequestsOnPendingStorage, &m.StoreRequestsRejectedDueToEnobufs, &m.StoreRequestsFailedDueToEnomem)
|
||||
if err != nil {
|
||||
return &m, err
|
||||
}
|
||||
} else {
|
||||
err := setFSCacheFields(fields[2:], &m.StoreRequestsSubmitted, &m.StoreRequestsRunning,
|
||||
&m.StorePagesWithRequestsProcessing, &m.StoreRequestsDeleted, &m.StoreRequestsOverStoreLimit)
|
||||
if err != nil {
|
||||
return &m, err
|
||||
}
|
||||
}
|
||||
case "VmScan":
|
||||
err := setFSCacheFields(fields[2:], &m.ReleaseRequestsAgainstPagesWithNoPendingStorage,
|
||||
&m.ReleaseRequestsAgainstPagesStoredByTimeLockGranted, &m.ReleaseRequestsIgnoredDueToInProgressStore,
|
||||
&m.PageStoresCancelledByReleaseRequests, &m.VmscanWaiting)
|
||||
if err != nil {
|
||||
return &m, err
|
||||
}
|
||||
case "Ops":
|
||||
if strings.Split(fields[2], "=")[0] == "pend" {
|
||||
err := setFSCacheFields(fields[2:], &m.OpsPending, &m.OpsRunning, &m.OpsEnqueued, &m.OpsCancelled, &m.OpsRejected)
|
||||
if err != nil {
|
||||
return &m, err
|
||||
}
|
||||
} else {
|
||||
err := setFSCacheFields(fields[2:], &m.OpsInitialised, &m.OpsDeferred, &m.OpsReleased, &m.OpsGarbageCollected)
|
||||
if err != nil {
|
||||
return &m, err
|
||||
}
|
||||
}
|
||||
case "CacheOp:":
|
||||
if strings.Split(fields[1], "=")[0] == "alo" {
|
||||
err := setFSCacheFields(fields[1:], &m.CacheopAllocationsinProgress, &m.CacheopLookupObjectInProgress,
|
||||
&m.CacheopLookupCompleteInPorgress, &m.CacheopGrabObjectInProgress)
|
||||
if err != nil {
|
||||
return &m, err
|
||||
}
|
||||
} else if strings.Split(fields[1], "=")[0] == "inv" {
|
||||
err := setFSCacheFields(fields[1:], &m.CacheopInvalidations, &m.CacheopUpdateObjectInProgress,
|
||||
&m.CacheopDropObjectInProgress, &m.CacheopPutObjectInProgress, &m.CacheopAttributeChangeInProgress,
|
||||
&m.CacheopSyncCacheInProgress)
|
||||
if err != nil {
|
||||
return &m, err
|
||||
}
|
||||
} else {
|
||||
err := setFSCacheFields(fields[1:], &m.CacheopReadOrAllocPageInProgress, &m.CacheopReadOrAllocPagesInProgress,
|
||||
&m.CacheopAllocatePageInProgress, &m.CacheopAllocatePagesInProgress, &m.CacheopWritePagesInProgress,
|
||||
&m.CacheopUncachePagesInProgress, &m.CacheopDissociatePagesInProgress)
|
||||
if err != nil {
|
||||
return &m, err
|
||||
}
|
||||
}
|
||||
case "CacheEv:":
|
||||
err := setFSCacheFields(fields[1:], &m.CacheevLookupsAndCreationsRejectedLackSpace, &m.CacheevStaleObjectsDeleted,
|
||||
&m.CacheevRetiredWhenReliquished, &m.CacheevObjectsCulled)
|
||||
if err != nil {
|
||||
return &m, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return &m, nil
|
||||
}
|
7
vendor/github.com/prometheus/procfs/go.mod
generated
vendored
7
vendor/github.com/prometheus/procfs/go.mod
generated
vendored
@ -1,6 +1,9 @@
|
||||
module github.com/prometheus/procfs
|
||||
|
||||
go 1.12
|
||||
|
||||
require (
|
||||
github.com/google/go-cmp v0.3.0
|
||||
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4
|
||||
github.com/google/go-cmp v0.3.1
|
||||
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e
|
||||
golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e
|
||||
)
|
||||
|
10
vendor/github.com/prometheus/procfs/go.sum
generated
vendored
10
vendor/github.com/prometheus/procfs/go.sum
generated
vendored
@ -1,4 +1,6 @@
|
||||
github.com/google/go-cmp v0.3.0 h1:crn/baboCvb5fXaQ0IJ1SGTsTVrWpDsCWC8EGETZijY=
|
||||
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4 h1:YUO/7uOKsKeq9UokNS62b8FYywz3ker1l1vDZRCRefw=
|
||||
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
github.com/google/go-cmp v0.3.1 h1:Xye71clBPdm5HgqGwUkwhbynsUJZhDbS20FvLhQ2izg=
|
||||
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e h1:vcxGaoTs7kV8m5Np9uUNQin4BrLOthgV7252N8V+FwY=
|
||||
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e h1:LwyF2AFISC9nVbS6MgzsaQNSUsRXI49GS+YQ5KX/QH0=
|
||||
golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
|
2
vendor/github.com/prometheus/procfs/internal/fs/fs.go
generated
vendored
2
vendor/github.com/prometheus/procfs/internal/fs/fs.go
generated
vendored
@ -26,7 +26,7 @@ const (
|
||||
// DefaultSysMountPoint is the common mount point of the sys filesystem.
|
||||
DefaultSysMountPoint = "/sys"
|
||||
|
||||
// DefaultConfigfsMountPoint is the commont mount point of the configfs
|
||||
// DefaultConfigfsMountPoint is the common mount point of the configfs
|
||||
DefaultConfigfsMountPoint = "/sys/kernel/config"
|
||||
)
|
||||
|
||||
|
9
vendor/github.com/prometheus/procfs/internal/util/parse.go
generated
vendored
9
vendor/github.com/prometheus/procfs/internal/util/parse.go
generated
vendored
@ -73,6 +73,15 @@ func ReadUintFromFile(path string) (uint64, error) {
|
||||
return strconv.ParseUint(strings.TrimSpace(string(data)), 10, 64)
|
||||
}
|
||||
|
||||
// ReadIntFromFile reads a file and attempts to parse a int64 from it.
|
||||
func ReadIntFromFile(path string) (int64, error) {
|
||||
data, err := ioutil.ReadFile(path)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return strconv.ParseInt(strings.TrimSpace(string(data)), 10, 64)
|
||||
}
|
||||
|
||||
// ParseBool parses a string into a boolean pointer.
|
||||
func ParseBool(b string) *bool {
|
||||
var truth bool
|
||||
|
38
vendor/github.com/prometheus/procfs/internal/util/readfile.go
generated
vendored
Normal file
38
vendor/github.com/prometheus/procfs/internal/util/readfile.go
generated
vendored
Normal file
@ -0,0 +1,38 @@
|
||||
// Copyright 2019 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package util
|
||||
|
||||
import (
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
)
|
||||
|
||||
// ReadFileNoStat uses ioutil.ReadAll to read contents of entire file.
|
||||
// This is similar to ioutil.ReadFile but without the call to os.Stat, because
|
||||
// many files in /proc and /sys report incorrect file sizes (either 0 or 4096).
|
||||
// Reads a max file size of 512kB. For files larger than this, a scanner
|
||||
// should be used.
|
||||
func ReadFileNoStat(filename string) ([]byte, error) {
|
||||
const maxBufferSize = 1024 * 512
|
||||
|
||||
f, err := os.Open(filename)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
reader := io.LimitReader(f, maxBufferSize)
|
||||
return ioutil.ReadAll(reader)
|
||||
}
|
5
vendor/github.com/prometheus/procfs/internal/util/sysreadfile.go
generated
vendored
5
vendor/github.com/prometheus/procfs/internal/util/sysreadfile.go
generated
vendored
@ -23,6 +23,8 @@ import (
|
||||
|
||||
// SysReadFile is a simplified ioutil.ReadFile that invokes syscall.Read directly.
|
||||
// https://github.com/prometheus/node_exporter/pull/728/files
|
||||
//
|
||||
// Note that this function will not read files larger than 128 bytes.
|
||||
func SysReadFile(file string) (string, error) {
|
||||
f, err := os.Open(file)
|
||||
if err != nil {
|
||||
@ -35,7 +37,8 @@ func SysReadFile(file string) (string, error) {
|
||||
//
|
||||
// Since we either want to read data or bail immediately, do the simplest
|
||||
// possible read using syscall directly.
|
||||
b := make([]byte, 128)
|
||||
const sysFileBufferSize = 128
|
||||
b := make([]byte, sysFileBufferSize)
|
||||
n, err := syscall.Read(int(f.Fd()), b)
|
||||
if err != nil {
|
||||
return "", err
|
||||
|
18
vendor/github.com/prometheus/procfs/internal/util/valueparser.go
generated
vendored
18
vendor/github.com/prometheus/procfs/internal/util/valueparser.go
generated
vendored
@ -33,6 +33,9 @@ func NewValueParser(v string) *ValueParser {
|
||||
return &ValueParser{v: v}
|
||||
}
|
||||
|
||||
// Int interprets the underlying value as an int and returns that value.
|
||||
func (vp *ValueParser) Int() int { return int(vp.int64()) }
|
||||
|
||||
// PInt64 interprets the underlying value as an int64 and returns a pointer to
|
||||
// that value.
|
||||
func (vp *ValueParser) PInt64() *int64 {
|
||||
@ -40,16 +43,27 @@ func (vp *ValueParser) PInt64() *int64 {
|
||||
return nil
|
||||
}
|
||||
|
||||
v := vp.int64()
|
||||
return &v
|
||||
}
|
||||
|
||||
// int64 interprets the underlying value as an int64 and returns that value.
|
||||
// TODO: export if/when necessary.
|
||||
func (vp *ValueParser) int64() int64 {
|
||||
if vp.err != nil {
|
||||
return 0
|
||||
}
|
||||
|
||||
// A base value of zero makes ParseInt infer the correct base using the
|
||||
// string's prefix, if any.
|
||||
const base = 0
|
||||
v, err := strconv.ParseInt(vp.v, base, 64)
|
||||
if err != nil {
|
||||
vp.err = err
|
||||
return nil
|
||||
return 0
|
||||
}
|
||||
|
||||
return &v
|
||||
return v
|
||||
}
|
||||
|
||||
// PUInt64 interprets the underlying value as an uint64 and returns a pointer to
|
||||
|
12
vendor/github.com/prometheus/procfs/ipvs.go
generated
vendored
12
vendor/github.com/prometheus/procfs/ipvs.go
generated
vendored
@ -15,6 +15,7 @@ package procfs
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"fmt"
|
||||
@ -24,6 +25,8 @@ import (
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/prometheus/procfs/internal/util"
|
||||
)
|
||||
|
||||
// IPVSStats holds IPVS statistics, as exposed by the kernel in `/proc/net/ip_vs_stats`.
|
||||
@ -64,17 +67,16 @@ type IPVSBackendStatus struct {
|
||||
|
||||
// IPVSStats reads the IPVS statistics from the specified `proc` filesystem.
|
||||
func (fs FS) IPVSStats() (IPVSStats, error) {
|
||||
file, err := os.Open(fs.proc.Path("net/ip_vs_stats"))
|
||||
data, err := util.ReadFileNoStat(fs.proc.Path("net/ip_vs_stats"))
|
||||
if err != nil {
|
||||
return IPVSStats{}, err
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
return parseIPVSStats(file)
|
||||
return parseIPVSStats(bytes.NewReader(data))
|
||||
}
|
||||
|
||||
// parseIPVSStats performs the actual parsing of `ip_vs_stats`.
|
||||
func parseIPVSStats(file io.Reader) (IPVSStats, error) {
|
||||
func parseIPVSStats(r io.Reader) (IPVSStats, error) {
|
||||
var (
|
||||
statContent []byte
|
||||
statLines []string
|
||||
@ -82,7 +84,7 @@ func parseIPVSStats(file io.Reader) (IPVSStats, error) {
|
||||
stats IPVSStats
|
||||
)
|
||||
|
||||
statContent, err := ioutil.ReadAll(file)
|
||||
statContent, err := ioutil.ReadAll(r)
|
||||
if err != nil {
|
||||
return IPVSStats{}, err
|
||||
}
|
||||
|
62
vendor/github.com/prometheus/procfs/kernel_random.go
generated
vendored
Normal file
62
vendor/github.com/prometheus/procfs/kernel_random.go
generated
vendored
Normal file
@ -0,0 +1,62 @@
|
||||
// Copyright 2020 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// +build !windows
|
||||
|
||||
package procfs
|
||||
|
||||
import (
|
||||
"os"
|
||||
|
||||
"github.com/prometheus/procfs/internal/util"
|
||||
)
|
||||
|
||||
// KernelRandom contains information about to the kernel's random number generator.
|
||||
type KernelRandom struct {
|
||||
// EntropyAvaliable gives the available entropy, in bits.
|
||||
EntropyAvaliable *uint64
|
||||
// PoolSize gives the size of the entropy pool, in bytes.
|
||||
PoolSize *uint64
|
||||
// URandomMinReseedSeconds is the number of seconds after which the DRNG will be reseeded.
|
||||
URandomMinReseedSeconds *uint64
|
||||
// WriteWakeupThreshold the number of bits of entropy below which we wake up processes
|
||||
// that do a select(2) or poll(2) for write access to /dev/random.
|
||||
WriteWakeupThreshold *uint64
|
||||
// ReadWakeupThreshold is the number of bits of entropy required for waking up processes that sleep
|
||||
// waiting for entropy from /dev/random.
|
||||
ReadWakeupThreshold *uint64
|
||||
}
|
||||
|
||||
// KernelRandom returns values from /proc/sys/kernel/random.
|
||||
func (fs FS) KernelRandom() (KernelRandom, error) {
|
||||
random := KernelRandom{}
|
||||
|
||||
for file, p := range map[string]**uint64{
|
||||
"entropy_avail": &random.EntropyAvaliable,
|
||||
"poolsize": &random.PoolSize,
|
||||
"urandom_min_reseed_secs": &random.URandomMinReseedSeconds,
|
||||
"write_wakeup_threshold": &random.WriteWakeupThreshold,
|
||||
"read_wakeup_threshold": &random.ReadWakeupThreshold,
|
||||
} {
|
||||
val, err := util.ReadUintFromFile(fs.proc.Path("sys", "kernel", "random", file))
|
||||
if os.IsNotExist(err) {
|
||||
continue
|
||||
}
|
||||
if err != nil {
|
||||
return random, err
|
||||
}
|
||||
*p = &val
|
||||
}
|
||||
|
||||
return random, nil
|
||||
}
|
62
vendor/github.com/prometheus/procfs/loadavg.go
generated
vendored
Normal file
62
vendor/github.com/prometheus/procfs/loadavg.go
generated
vendored
Normal file
@ -0,0 +1,62 @@
|
||||
// Copyright 2019 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package procfs
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/prometheus/procfs/internal/util"
|
||||
)
|
||||
|
||||
// LoadAvg represents an entry in /proc/loadavg
|
||||
type LoadAvg struct {
|
||||
Load1 float64
|
||||
Load5 float64
|
||||
Load15 float64
|
||||
}
|
||||
|
||||
// LoadAvg returns loadavg from /proc.
|
||||
func (fs FS) LoadAvg() (*LoadAvg, error) {
|
||||
path := fs.proc.Path("loadavg")
|
||||
|
||||
data, err := util.ReadFileNoStat(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return parseLoad(data)
|
||||
}
|
||||
|
||||
// Parse /proc loadavg and return 1m, 5m and 15m.
|
||||
func parseLoad(loadavgBytes []byte) (*LoadAvg, error) {
|
||||
loads := make([]float64, 3)
|
||||
parts := strings.Fields(string(loadavgBytes))
|
||||
if len(parts) < 3 {
|
||||
return nil, fmt.Errorf("malformed loadavg line: too few fields in loadavg string: %s", string(loadavgBytes))
|
||||
}
|
||||
|
||||
var err error
|
||||
for i, load := range parts[0:3] {
|
||||
loads[i], err = strconv.ParseFloat(load, 64)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not parse load '%s': %s", load, err)
|
||||
}
|
||||
}
|
||||
return &LoadAvg{
|
||||
Load1: loads[0],
|
||||
Load5: loads[1],
|
||||
Load15: loads[2],
|
||||
}, nil
|
||||
}
|
2
vendor/github.com/prometheus/procfs/mdstat.go
generated
vendored
2
vendor/github.com/prometheus/procfs/mdstat.go
generated
vendored
@ -52,7 +52,7 @@ type MDStat struct {
|
||||
func (fs FS) MDStat() ([]MDStat, error) {
|
||||
data, err := ioutil.ReadFile(fs.proc.Path("mdstat"))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error parsing mdstat %s: %s", fs.proc.Path("mdstat"), err)
|
||||
return nil, err
|
||||
}
|
||||
mdstat, err := parseMDStat(data)
|
||||
if err != nil {
|
||||
|
277
vendor/github.com/prometheus/procfs/meminfo.go
generated
vendored
Normal file
277
vendor/github.com/prometheus/procfs/meminfo.go
generated
vendored
Normal file
@ -0,0 +1,277 @@
|
||||
// Copyright 2019 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package procfs
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/prometheus/procfs/internal/util"
|
||||
)
|
||||
|
||||
// Meminfo represents memory statistics.
|
||||
type Meminfo struct {
|
||||
// Total usable ram (i.e. physical ram minus a few reserved
|
||||
// bits and the kernel binary code)
|
||||
MemTotal uint64
|
||||
// The sum of LowFree+HighFree
|
||||
MemFree uint64
|
||||
// An estimate of how much memory is available for starting
|
||||
// new applications, without swapping. Calculated from
|
||||
// MemFree, SReclaimable, the size of the file LRU lists, and
|
||||
// the low watermarks in each zone. The estimate takes into
|
||||
// account that the system needs some page cache to function
|
||||
// well, and that not all reclaimable slab will be
|
||||
// reclaimable, due to items being in use. The impact of those
|
||||
// factors will vary from system to system.
|
||||
MemAvailable uint64
|
||||
// Relatively temporary storage for raw disk blocks shouldn't
|
||||
// get tremendously large (20MB or so)
|
||||
Buffers uint64
|
||||
Cached uint64
|
||||
// Memory that once was swapped out, is swapped back in but
|
||||
// still also is in the swapfile (if memory is needed it
|
||||
// doesn't need to be swapped out AGAIN because it is already
|
||||
// in the swapfile. This saves I/O)
|
||||
SwapCached uint64
|
||||
// Memory that has been used more recently and usually not
|
||||
// reclaimed unless absolutely necessary.
|
||||
Active uint64
|
||||
// Memory which has been less recently used. It is more
|
||||
// eligible to be reclaimed for other purposes
|
||||
Inactive uint64
|
||||
ActiveAnon uint64
|
||||
InactiveAnon uint64
|
||||
ActiveFile uint64
|
||||
InactiveFile uint64
|
||||
Unevictable uint64
|
||||
Mlocked uint64
|
||||
// total amount of swap space available
|
||||
SwapTotal uint64
|
||||
// Memory which has been evicted from RAM, and is temporarily
|
||||
// on the disk
|
||||
SwapFree uint64
|
||||
// Memory which is waiting to get written back to the disk
|
||||
Dirty uint64
|
||||
// Memory which is actively being written back to the disk
|
||||
Writeback uint64
|
||||
// Non-file backed pages mapped into userspace page tables
|
||||
AnonPages uint64
|
||||
// files which have been mapped, such as libraries
|
||||
Mapped uint64
|
||||
Shmem uint64
|
||||
// in-kernel data structures cache
|
||||
Slab uint64
|
||||
// Part of Slab, that might be reclaimed, such as caches
|
||||
SReclaimable uint64
|
||||
// Part of Slab, that cannot be reclaimed on memory pressure
|
||||
SUnreclaim uint64
|
||||
KernelStack uint64
|
||||
// amount of memory dedicated to the lowest level of page
|
||||
// tables.
|
||||
PageTables uint64
|
||||
// NFS pages sent to the server, but not yet committed to
|
||||
// stable storage
|
||||
NFSUnstable uint64
|
||||
// Memory used for block device "bounce buffers"
|
||||
Bounce uint64
|
||||
// Memory used by FUSE for temporary writeback buffers
|
||||
WritebackTmp uint64
|
||||
// Based on the overcommit ratio ('vm.overcommit_ratio'),
|
||||
// this is the total amount of memory currently available to
|
||||
// be allocated on the system. This limit is only adhered to
|
||||
// if strict overcommit accounting is enabled (mode 2 in
|
||||
// 'vm.overcommit_memory').
|
||||
// The CommitLimit is calculated with the following formula:
|
||||
// CommitLimit = ([total RAM pages] - [total huge TLB pages]) *
|
||||
// overcommit_ratio / 100 + [total swap pages]
|
||||
// For example, on a system with 1G of physical RAM and 7G
|
||||
// of swap with a `vm.overcommit_ratio` of 30 it would
|
||||
// yield a CommitLimit of 7.3G.
|
||||
// For more details, see the memory overcommit documentation
|
||||
// in vm/overcommit-accounting.
|
||||
CommitLimit uint64
|
||||
// The amount of memory presently allocated on the system.
|
||||
// The committed memory is a sum of all of the memory which
|
||||
// has been allocated by processes, even if it has not been
|
||||
// "used" by them as of yet. A process which malloc()'s 1G
|
||||
// of memory, but only touches 300M of it will show up as
|
||||
// using 1G. This 1G is memory which has been "committed" to
|
||||
// by the VM and can be used at any time by the allocating
|
||||
// application. With strict overcommit enabled on the system
|
||||
// (mode 2 in 'vm.overcommit_memory'),allocations which would
|
||||
// exceed the CommitLimit (detailed above) will not be permitted.
|
||||
// This is useful if one needs to guarantee that processes will
|
||||
// not fail due to lack of memory once that memory has been
|
||||
// successfully allocated.
|
||||
CommittedAS uint64
|
||||
// total size of vmalloc memory area
|
||||
VmallocTotal uint64
|
||||
// amount of vmalloc area which is used
|
||||
VmallocUsed uint64
|
||||
// largest contiguous block of vmalloc area which is free
|
||||
VmallocChunk uint64
|
||||
HardwareCorrupted uint64
|
||||
AnonHugePages uint64
|
||||
ShmemHugePages uint64
|
||||
ShmemPmdMapped uint64
|
||||
CmaTotal uint64
|
||||
CmaFree uint64
|
||||
HugePagesTotal uint64
|
||||
HugePagesFree uint64
|
||||
HugePagesRsvd uint64
|
||||
HugePagesSurp uint64
|
||||
Hugepagesize uint64
|
||||
DirectMap4k uint64
|
||||
DirectMap2M uint64
|
||||
DirectMap1G uint64
|
||||
}
|
||||
|
||||
// Meminfo returns an information about current kernel/system memory statistics.
|
||||
// See https://www.kernel.org/doc/Documentation/filesystems/proc.txt
|
||||
func (fs FS) Meminfo() (Meminfo, error) {
|
||||
b, err := util.ReadFileNoStat(fs.proc.Path("meminfo"))
|
||||
if err != nil {
|
||||
return Meminfo{}, err
|
||||
}
|
||||
|
||||
m, err := parseMemInfo(bytes.NewReader(b))
|
||||
if err != nil {
|
||||
return Meminfo{}, fmt.Errorf("failed to parse meminfo: %v", err)
|
||||
}
|
||||
|
||||
return *m, nil
|
||||
}
|
||||
|
||||
func parseMemInfo(r io.Reader) (*Meminfo, error) {
|
||||
var m Meminfo
|
||||
s := bufio.NewScanner(r)
|
||||
for s.Scan() {
|
||||
// Each line has at least a name and value; we ignore the unit.
|
||||
fields := strings.Fields(s.Text())
|
||||
if len(fields) < 2 {
|
||||
return nil, fmt.Errorf("malformed meminfo line: %q", s.Text())
|
||||
}
|
||||
|
||||
v, err := strconv.ParseUint(fields[1], 0, 64)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
switch fields[0] {
|
||||
case "MemTotal:":
|
||||
m.MemTotal = v
|
||||
case "MemFree:":
|
||||
m.MemFree = v
|
||||
case "MemAvailable:":
|
||||
m.MemAvailable = v
|
||||
case "Buffers:":
|
||||
m.Buffers = v
|
||||
case "Cached:":
|
||||
m.Cached = v
|
||||
case "SwapCached:":
|
||||
m.SwapCached = v
|
||||
case "Active:":
|
||||
m.Active = v
|
||||
case "Inactive:":
|
||||
m.Inactive = v
|
||||
case "Active(anon):":
|
||||
m.ActiveAnon = v
|
||||
case "Inactive(anon):":
|
||||
m.InactiveAnon = v
|
||||
case "Active(file):":
|
||||
m.ActiveFile = v
|
||||
case "Inactive(file):":
|
||||
m.InactiveFile = v
|
||||
case "Unevictable:":
|
||||
m.Unevictable = v
|
||||
case "Mlocked:":
|
||||
m.Mlocked = v
|
||||
case "SwapTotal:":
|
||||
m.SwapTotal = v
|
||||
case "SwapFree:":
|
||||
m.SwapFree = v
|
||||
case "Dirty:":
|
||||
m.Dirty = v
|
||||
case "Writeback:":
|
||||
m.Writeback = v
|
||||
case "AnonPages:":
|
||||
m.AnonPages = v
|
||||
case "Mapped:":
|
||||
m.Mapped = v
|
||||
case "Shmem:":
|
||||
m.Shmem = v
|
||||
case "Slab:":
|
||||
m.Slab = v
|
||||
case "SReclaimable:":
|
||||
m.SReclaimable = v
|
||||
case "SUnreclaim:":
|
||||
m.SUnreclaim = v
|
||||
case "KernelStack:":
|
||||
m.KernelStack = v
|
||||
case "PageTables:":
|
||||
m.PageTables = v
|
||||
case "NFS_Unstable:":
|
||||
m.NFSUnstable = v
|
||||
case "Bounce:":
|
||||
m.Bounce = v
|
||||
case "WritebackTmp:":
|
||||
m.WritebackTmp = v
|
||||
case "CommitLimit:":
|
||||
m.CommitLimit = v
|
||||
case "Committed_AS:":
|
||||
m.CommittedAS = v
|
||||
case "VmallocTotal:":
|
||||
m.VmallocTotal = v
|
||||
case "VmallocUsed:":
|
||||
m.VmallocUsed = v
|
||||
case "VmallocChunk:":
|
||||
m.VmallocChunk = v
|
||||
case "HardwareCorrupted:":
|
||||
m.HardwareCorrupted = v
|
||||
case "AnonHugePages:":
|
||||
m.AnonHugePages = v
|
||||
case "ShmemHugePages:":
|
||||
m.ShmemHugePages = v
|
||||
case "ShmemPmdMapped:":
|
||||
m.ShmemPmdMapped = v
|
||||
case "CmaTotal:":
|
||||
m.CmaTotal = v
|
||||
case "CmaFree:":
|
||||
m.CmaFree = v
|
||||
case "HugePages_Total:":
|
||||
m.HugePagesTotal = v
|
||||
case "HugePages_Free:":
|
||||
m.HugePagesFree = v
|
||||
case "HugePages_Rsvd:":
|
||||
m.HugePagesRsvd = v
|
||||
case "HugePages_Surp:":
|
||||
m.HugePagesSurp = v
|
||||
case "Hugepagesize:":
|
||||
m.Hugepagesize = v
|
||||
case "DirectMap4k:":
|
||||
m.DirectMap4k = v
|
||||
case "DirectMap2M:":
|
||||
m.DirectMap2M = v
|
||||
case "DirectMap1G:":
|
||||
m.DirectMap1G = v
|
||||
}
|
||||
}
|
||||
|
||||
return &m, nil
|
||||
}
|
128
vendor/github.com/prometheus/procfs/mountinfo.go
generated
vendored
128
vendor/github.com/prometheus/procfs/mountinfo.go
generated
vendored
@ -15,19 +15,13 @@ package procfs
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
var validOptionalFields = map[string]bool{
|
||||
"shared": true,
|
||||
"master": true,
|
||||
"propagate_from": true,
|
||||
"unbindable": true,
|
||||
}
|
||||
"github.com/prometheus/procfs/internal/util"
|
||||
)
|
||||
|
||||
// A MountInfo is a type that describes the details, options
|
||||
// for each mount, parsed from /proc/self/mountinfo.
|
||||
@ -35,10 +29,10 @@ var validOptionalFields = map[string]bool{
|
||||
// is described in the following man page.
|
||||
// http://man7.org/linux/man-pages/man5/proc.5.html
|
||||
type MountInfo struct {
|
||||
// Unique Id for the mount
|
||||
MountId int
|
||||
// The Id of the parent mount
|
||||
ParentId int
|
||||
// Unique ID for the mount
|
||||
MountID int
|
||||
// The ID of the parent mount
|
||||
ParentID int
|
||||
// The value of `st_dev` for the files on this FS
|
||||
MajorMinorVer string
|
||||
// The pathname of the directory in the FS that forms
|
||||
@ -58,18 +52,10 @@ type MountInfo struct {
|
||||
SuperOptions map[string]string
|
||||
}
|
||||
|
||||
// Returns part of the mountinfo line, if it exists, else an empty string.
|
||||
func getStringSliceElement(parts []string, idx int, defaultValue string) string {
|
||||
if idx >= len(parts) {
|
||||
return defaultValue
|
||||
}
|
||||
return parts[idx]
|
||||
}
|
||||
|
||||
// Reads each line of the mountinfo file, and returns a list of formatted MountInfo structs.
|
||||
func parseMountInfo(r io.Reader) ([]*MountInfo, error) {
|
||||
func parseMountInfo(info []byte) ([]*MountInfo, error) {
|
||||
mounts := []*MountInfo{}
|
||||
scanner := bufio.NewScanner(r)
|
||||
scanner := bufio.NewScanner(bytes.NewReader(info))
|
||||
for scanner.Scan() {
|
||||
mountString := scanner.Text()
|
||||
parsedMounts, err := parseMountInfoString(mountString)
|
||||
@ -89,58 +75,76 @@ func parseMountInfo(r io.Reader) ([]*MountInfo, error) {
|
||||
func parseMountInfoString(mountString string) (*MountInfo, error) {
|
||||
var err error
|
||||
|
||||
// OptionalFields can be zero, hence these checks to ensure we do not populate the wrong values in the wrong spots
|
||||
separatorIndex := strings.Index(mountString, "-")
|
||||
if separatorIndex == -1 {
|
||||
return nil, fmt.Errorf("no separator found in mountinfo string: %s", mountString)
|
||||
mountInfo := strings.Split(mountString, " ")
|
||||
mountInfoLength := len(mountInfo)
|
||||
if mountInfoLength < 10 {
|
||||
return nil, fmt.Errorf("couldn't find enough fields in mount string: %s", mountString)
|
||||
}
|
||||
beforeFields := strings.Fields(mountString[:separatorIndex])
|
||||
afterFields := strings.Fields(mountString[separatorIndex+1:])
|
||||
if (len(beforeFields) + len(afterFields)) < 7 {
|
||||
return nil, fmt.Errorf("too few fields")
|
||||
|
||||
if mountInfo[mountInfoLength-4] != "-" {
|
||||
return nil, fmt.Errorf("couldn't find separator in expected field: %s", mountInfo[mountInfoLength-4])
|
||||
}
|
||||
|
||||
mount := &MountInfo{
|
||||
MajorMinorVer: getStringSliceElement(beforeFields, 2, ""),
|
||||
Root: getStringSliceElement(beforeFields, 3, ""),
|
||||
MountPoint: getStringSliceElement(beforeFields, 4, ""),
|
||||
Options: mountOptionsParser(getStringSliceElement(beforeFields, 5, "")),
|
||||
MajorMinorVer: mountInfo[2],
|
||||
Root: mountInfo[3],
|
||||
MountPoint: mountInfo[4],
|
||||
Options: mountOptionsParser(mountInfo[5]),
|
||||
OptionalFields: nil,
|
||||
FSType: getStringSliceElement(afterFields, 0, ""),
|
||||
Source: getStringSliceElement(afterFields, 1, ""),
|
||||
SuperOptions: mountOptionsParser(getStringSliceElement(afterFields, 2, "")),
|
||||
FSType: mountInfo[mountInfoLength-3],
|
||||
Source: mountInfo[mountInfoLength-2],
|
||||
SuperOptions: mountOptionsParser(mountInfo[mountInfoLength-1]),
|
||||
}
|
||||
|
||||
mount.MountId, err = strconv.Atoi(getStringSliceElement(beforeFields, 0, ""))
|
||||
mount.MountID, err = strconv.Atoi(mountInfo[0])
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to parse mount ID")
|
||||
}
|
||||
mount.ParentId, err = strconv.Atoi(getStringSliceElement(beforeFields, 1, ""))
|
||||
mount.ParentID, err = strconv.Atoi(mountInfo[1])
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to parse parent ID")
|
||||
}
|
||||
// Has optional fields, which is a space separated list of values.
|
||||
// Example: shared:2 master:7
|
||||
if len(beforeFields) > 6 {
|
||||
mount.OptionalFields = make(map[string]string)
|
||||
optionalFields := beforeFields[6:]
|
||||
for _, field := range optionalFields {
|
||||
optionSplit := strings.Split(field, ":")
|
||||
target, value := optionSplit[0], ""
|
||||
if len(optionSplit) == 2 {
|
||||
value = optionSplit[1]
|
||||
}
|
||||
// Checks if the 'keys' in the optional fields in the mountinfo line are acceptable.
|
||||
// Allowed 'keys' are shared, master, propagate_from, unbindable.
|
||||
if _, ok := validOptionalFields[target]; ok {
|
||||
mount.OptionalFields[target] = value
|
||||
}
|
||||
if mountInfo[6] != "" {
|
||||
mount.OptionalFields, err = mountOptionsParseOptionalFields(mountInfo[6 : mountInfoLength-4])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return mount, nil
|
||||
}
|
||||
|
||||
// Parses the mount options, superblock options.
|
||||
// mountOptionsIsValidField checks a string against a valid list of optional fields keys.
|
||||
func mountOptionsIsValidField(s string) bool {
|
||||
switch s {
|
||||
case
|
||||
"shared",
|
||||
"master",
|
||||
"propagate_from",
|
||||
"unbindable":
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// mountOptionsParseOptionalFields parses a list of optional fields strings into a double map of strings.
|
||||
func mountOptionsParseOptionalFields(o []string) (map[string]string, error) {
|
||||
optionalFields := make(map[string]string)
|
||||
for _, field := range o {
|
||||
optionSplit := strings.SplitN(field, ":", 2)
|
||||
value := ""
|
||||
if len(optionSplit) == 2 {
|
||||
value = optionSplit[1]
|
||||
}
|
||||
if mountOptionsIsValidField(optionSplit[0]) {
|
||||
optionalFields[optionSplit[0]] = value
|
||||
}
|
||||
}
|
||||
return optionalFields, nil
|
||||
}
|
||||
|
||||
// mountOptionsParser parses the mount options, superblock options.
|
||||
func mountOptionsParser(mountOptions string) map[string]string {
|
||||
opts := make(map[string]string)
|
||||
options := strings.Split(mountOptions, ",")
|
||||
@ -157,22 +161,20 @@ func mountOptionsParser(mountOptions string) map[string]string {
|
||||
return opts
|
||||
}
|
||||
|
||||
// Retrieves mountinfo information from `/proc/self/mountinfo`.
|
||||
// GetMounts retrieves mountinfo information from `/proc/self/mountinfo`.
|
||||
func GetMounts() ([]*MountInfo, error) {
|
||||
f, err := os.Open("/proc/self/mountinfo")
|
||||
data, err := util.ReadFileNoStat("/proc/self/mountinfo")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer f.Close()
|
||||
return parseMountInfo(f)
|
||||
return parseMountInfo(data)
|
||||
}
|
||||
|
||||
// Retrieves mountinfo information from a processes' `/proc/<pid>/mountinfo`.
|
||||
// GetProcMounts retrieves mountinfo information from a processes' `/proc/<pid>/mountinfo`.
|
||||
func GetProcMounts(pid int) ([]*MountInfo, error) {
|
||||
f, err := os.Open(fmt.Sprintf("/proc/%d/mountinfo", pid))
|
||||
data, err := util.ReadFileNoStat(fmt.Sprintf("/proc/%d/mountinfo", pid))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer f.Close()
|
||||
return parseMountInfo(f)
|
||||
return parseMountInfo(data)
|
||||
}
|
||||
|
20
vendor/github.com/prometheus/procfs/mountstats.go
generated
vendored
20
vendor/github.com/prometheus/procfs/mountstats.go
generated
vendored
@ -186,6 +186,8 @@ type NFSOperationStats struct {
|
||||
CumulativeTotalResponseMilliseconds uint64
|
||||
// Duration from when a request was enqueued to when it was completely handled.
|
||||
CumulativeTotalRequestMilliseconds uint64
|
||||
// The count of operations that complete with tk_status < 0. These statuses usually indicate error conditions.
|
||||
Errors uint64
|
||||
}
|
||||
|
||||
// A NFSTransportStats contains statistics for the NFS mount RPC requests and
|
||||
@ -494,8 +496,8 @@ func parseNFSEventsStats(ss []string) (*NFSEventsStats, error) {
|
||||
// line is reached.
|
||||
func parseNFSOperationStats(s *bufio.Scanner) ([]NFSOperationStats, error) {
|
||||
const (
|
||||
// Number of expected fields in each per-operation statistics set
|
||||
numFields = 9
|
||||
// Minimum number of expected fields in each per-operation statistics set
|
||||
minFields = 9
|
||||
)
|
||||
|
||||
var ops []NFSOperationStats
|
||||
@ -508,12 +510,12 @@ func parseNFSOperationStats(s *bufio.Scanner) ([]NFSOperationStats, error) {
|
||||
break
|
||||
}
|
||||
|
||||
if len(ss) != numFields {
|
||||
if len(ss) < minFields {
|
||||
return nil, fmt.Errorf("invalid NFS per-operations stats: %v", ss)
|
||||
}
|
||||
|
||||
// Skip string operation name for integers
|
||||
ns := make([]uint64, 0, numFields-1)
|
||||
ns := make([]uint64, 0, minFields-1)
|
||||
for _, st := range ss[1:] {
|
||||
n, err := strconv.ParseUint(st, 10, 64)
|
||||
if err != nil {
|
||||
@ -523,7 +525,7 @@ func parseNFSOperationStats(s *bufio.Scanner) ([]NFSOperationStats, error) {
|
||||
ns = append(ns, n)
|
||||
}
|
||||
|
||||
ops = append(ops, NFSOperationStats{
|
||||
opStats := NFSOperationStats{
|
||||
Operation: strings.TrimSuffix(ss[0], ":"),
|
||||
Requests: ns[0],
|
||||
Transmissions: ns[1],
|
||||
@ -533,7 +535,13 @@ func parseNFSOperationStats(s *bufio.Scanner) ([]NFSOperationStats, error) {
|
||||
CumulativeQueueMilliseconds: ns[5],
|
||||
CumulativeTotalResponseMilliseconds: ns[6],
|
||||
CumulativeTotalRequestMilliseconds: ns[7],
|
||||
})
|
||||
}
|
||||
|
||||
if len(ns) > 8 {
|
||||
opStats.Errors = ns[8]
|
||||
}
|
||||
|
||||
ops = append(ops, opStats)
|
||||
}
|
||||
|
||||
return ops, s.Err()
|
||||
|
153
vendor/github.com/prometheus/procfs/net_conntrackstat.go
generated
vendored
Normal file
153
vendor/github.com/prometheus/procfs/net_conntrackstat.go
generated
vendored
Normal file
@ -0,0 +1,153 @@
|
||||
// Copyright 2020 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package procfs
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/prometheus/procfs/internal/util"
|
||||
)
|
||||
|
||||
// A ConntrackStatEntry represents one line from net/stat/nf_conntrack
|
||||
// and contains netfilter conntrack statistics at one CPU core
|
||||
type ConntrackStatEntry struct {
|
||||
Entries uint64
|
||||
Found uint64
|
||||
Invalid uint64
|
||||
Ignore uint64
|
||||
Insert uint64
|
||||
InsertFailed uint64
|
||||
Drop uint64
|
||||
EarlyDrop uint64
|
||||
SearchRestart uint64
|
||||
}
|
||||
|
||||
// ConntrackStat retrieves netfilter's conntrack statistics, split by CPU cores
|
||||
func (fs FS) ConntrackStat() ([]ConntrackStatEntry, error) {
|
||||
return readConntrackStat(fs.proc.Path("net", "stat", "nf_conntrack"))
|
||||
}
|
||||
|
||||
// Parses a slice of ConntrackStatEntries from the given filepath
|
||||
func readConntrackStat(path string) ([]ConntrackStatEntry, error) {
|
||||
// This file is small and can be read with one syscall.
|
||||
b, err := util.ReadFileNoStat(path)
|
||||
if err != nil {
|
||||
// Do not wrap this error so the caller can detect os.IsNotExist and
|
||||
// similar conditions.
|
||||
return nil, err
|
||||
}
|
||||
|
||||
stat, err := parseConntrackStat(bytes.NewReader(b))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to read conntrack stats from %q: %v", path, err)
|
||||
}
|
||||
|
||||
return stat, nil
|
||||
}
|
||||
|
||||
// Reads the contents of a conntrack statistics file and parses a slice of ConntrackStatEntries
|
||||
func parseConntrackStat(r io.Reader) ([]ConntrackStatEntry, error) {
|
||||
var entries []ConntrackStatEntry
|
||||
|
||||
scanner := bufio.NewScanner(r)
|
||||
scanner.Scan()
|
||||
for scanner.Scan() {
|
||||
fields := strings.Fields(scanner.Text())
|
||||
conntrackEntry, err := parseConntrackStatEntry(fields)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
entries = append(entries, *conntrackEntry)
|
||||
}
|
||||
|
||||
return entries, nil
|
||||
}
|
||||
|
||||
// Parses a ConntrackStatEntry from given array of fields
|
||||
func parseConntrackStatEntry(fields []string) (*ConntrackStatEntry, error) {
|
||||
if len(fields) != 17 {
|
||||
return nil, fmt.Errorf("invalid conntrackstat entry, missing fields")
|
||||
}
|
||||
entry := &ConntrackStatEntry{}
|
||||
|
||||
entries, err := parseConntrackStatField(fields[0])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
entry.Entries = entries
|
||||
|
||||
found, err := parseConntrackStatField(fields[2])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
entry.Found = found
|
||||
|
||||
invalid, err := parseConntrackStatField(fields[4])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
entry.Invalid = invalid
|
||||
|
||||
ignore, err := parseConntrackStatField(fields[5])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
entry.Ignore = ignore
|
||||
|
||||
insert, err := parseConntrackStatField(fields[8])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
entry.Insert = insert
|
||||
|
||||
insertFailed, err := parseConntrackStatField(fields[9])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
entry.InsertFailed = insertFailed
|
||||
|
||||
drop, err := parseConntrackStatField(fields[10])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
entry.Drop = drop
|
||||
|
||||
earlyDrop, err := parseConntrackStatField(fields[11])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
entry.EarlyDrop = earlyDrop
|
||||
|
||||
searchRestart, err := parseConntrackStatField(fields[16])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
entry.SearchRestart = searchRestart
|
||||
|
||||
return entry, nil
|
||||
}
|
||||
|
||||
// Parses a uint64 from given hex in string
|
||||
func parseConntrackStatField(field string) (uint64, error) {
|
||||
val, err := strconv.ParseUint(field, 16, 64)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("couldn't parse \"%s\" field: %s", field, err)
|
||||
}
|
||||
return val, err
|
||||
}
|
1
vendor/github.com/prometheus/procfs/net_dev.go
generated
vendored
1
vendor/github.com/prometheus/procfs/net_dev.go
generated
vendored
@ -183,7 +183,6 @@ func (netDev NetDev) Total() NetDevLine {
|
||||
names = append(names, ifc.Name)
|
||||
total.RxBytes += ifc.RxBytes
|
||||
total.RxPackets += ifc.RxPackets
|
||||
total.RxPackets += ifc.RxPackets
|
||||
total.RxErrors += ifc.RxErrors
|
||||
total.RxDropped += ifc.RxDropped
|
||||
total.RxFIFO += ifc.RxFIFO
|
||||
|
163
vendor/github.com/prometheus/procfs/net_sockstat.go
generated
vendored
Normal file
163
vendor/github.com/prometheus/procfs/net_sockstat.go
generated
vendored
Normal file
@ -0,0 +1,163 @@
|
||||
// Copyright 2019 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package procfs
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"strings"
|
||||
|
||||
"github.com/prometheus/procfs/internal/util"
|
||||
)
|
||||
|
||||
// A NetSockstat contains the output of /proc/net/sockstat{,6} for IPv4 or IPv6,
|
||||
// respectively.
|
||||
type NetSockstat struct {
|
||||
// Used is non-nil for IPv4 sockstat results, but nil for IPv6.
|
||||
Used *int
|
||||
Protocols []NetSockstatProtocol
|
||||
}
|
||||
|
||||
// A NetSockstatProtocol contains statistics about a given socket protocol.
|
||||
// Pointer fields indicate that the value may or may not be present on any
|
||||
// given protocol.
|
||||
type NetSockstatProtocol struct {
|
||||
Protocol string
|
||||
InUse int
|
||||
Orphan *int
|
||||
TW *int
|
||||
Alloc *int
|
||||
Mem *int
|
||||
Memory *int
|
||||
}
|
||||
|
||||
// NetSockstat retrieves IPv4 socket statistics.
|
||||
func (fs FS) NetSockstat() (*NetSockstat, error) {
|
||||
return readSockstat(fs.proc.Path("net", "sockstat"))
|
||||
}
|
||||
|
||||
// NetSockstat6 retrieves IPv6 socket statistics.
|
||||
//
|
||||
// If IPv6 is disabled on this kernel, the returned error can be checked with
|
||||
// os.IsNotExist.
|
||||
func (fs FS) NetSockstat6() (*NetSockstat, error) {
|
||||
return readSockstat(fs.proc.Path("net", "sockstat6"))
|
||||
}
|
||||
|
||||
// readSockstat opens and parses a NetSockstat from the input file.
|
||||
func readSockstat(name string) (*NetSockstat, error) {
|
||||
// This file is small and can be read with one syscall.
|
||||
b, err := util.ReadFileNoStat(name)
|
||||
if err != nil {
|
||||
// Do not wrap this error so the caller can detect os.IsNotExist and
|
||||
// similar conditions.
|
||||
return nil, err
|
||||
}
|
||||
|
||||
stat, err := parseSockstat(bytes.NewReader(b))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to read sockstats from %q: %v", name, err)
|
||||
}
|
||||
|
||||
return stat, nil
|
||||
}
|
||||
|
||||
// parseSockstat reads the contents of a sockstat file and parses a NetSockstat.
|
||||
func parseSockstat(r io.Reader) (*NetSockstat, error) {
|
||||
var stat NetSockstat
|
||||
s := bufio.NewScanner(r)
|
||||
for s.Scan() {
|
||||
// Expect a minimum of a protocol and one key/value pair.
|
||||
fields := strings.Split(s.Text(), " ")
|
||||
if len(fields) < 3 {
|
||||
return nil, fmt.Errorf("malformed sockstat line: %q", s.Text())
|
||||
}
|
||||
|
||||
// The remaining fields are key/value pairs.
|
||||
kvs, err := parseSockstatKVs(fields[1:])
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error parsing sockstat key/value pairs from %q: %v", s.Text(), err)
|
||||
}
|
||||
|
||||
// The first field is the protocol. We must trim its colon suffix.
|
||||
proto := strings.TrimSuffix(fields[0], ":")
|
||||
switch proto {
|
||||
case "sockets":
|
||||
// Special case: IPv4 has a sockets "used" key/value pair that we
|
||||
// embed at the top level of the structure.
|
||||
used := kvs["used"]
|
||||
stat.Used = &used
|
||||
default:
|
||||
// Parse all other lines as individual protocols.
|
||||
nsp := parseSockstatProtocol(kvs)
|
||||
nsp.Protocol = proto
|
||||
stat.Protocols = append(stat.Protocols, nsp)
|
||||
}
|
||||
}
|
||||
|
||||
if err := s.Err(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &stat, nil
|
||||
}
|
||||
|
||||
// parseSockstatKVs parses a string slice into a map of key/value pairs.
|
||||
func parseSockstatKVs(kvs []string) (map[string]int, error) {
|
||||
if len(kvs)%2 != 0 {
|
||||
return nil, errors.New("odd number of fields in key/value pairs")
|
||||
}
|
||||
|
||||
// Iterate two values at a time to gather key/value pairs.
|
||||
out := make(map[string]int, len(kvs)/2)
|
||||
for i := 0; i < len(kvs); i += 2 {
|
||||
vp := util.NewValueParser(kvs[i+1])
|
||||
out[kvs[i]] = vp.Int()
|
||||
|
||||
if err := vp.Err(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return out, nil
|
||||
}
|
||||
|
||||
// parseSockstatProtocol parses a NetSockstatProtocol from the input kvs map.
|
||||
func parseSockstatProtocol(kvs map[string]int) NetSockstatProtocol {
|
||||
var nsp NetSockstatProtocol
|
||||
for k, v := range kvs {
|
||||
// Capture the range variable to ensure we get unique pointers for
|
||||
// each of the optional fields.
|
||||
v := v
|
||||
switch k {
|
||||
case "inuse":
|
||||
nsp.InUse = v
|
||||
case "orphan":
|
||||
nsp.Orphan = &v
|
||||
case "tw":
|
||||
nsp.TW = &v
|
||||
case "alloc":
|
||||
nsp.Alloc = &v
|
||||
case "mem":
|
||||
nsp.Mem = &v
|
||||
case "memory":
|
||||
nsp.Memory = &v
|
||||
}
|
||||
}
|
||||
|
||||
return nsp
|
||||
}
|
115
vendor/github.com/prometheus/procfs/net_softnet.go
generated
vendored
115
vendor/github.com/prometheus/procfs/net_softnet.go
generated
vendored
@ -14,78 +14,89 @@
|
||||
package procfs
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"io"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/prometheus/procfs/internal/util"
|
||||
)
|
||||
|
||||
// For the proc file format details,
|
||||
// see https://elixir.bootlin.com/linux/v4.17/source/net/core/net-procfs.c#L162
|
||||
// See:
|
||||
// * Linux 2.6.23 https://elixir.bootlin.com/linux/v2.6.23/source/net/core/dev.c#L2343
|
||||
// * Linux 4.17 https://elixir.bootlin.com/linux/v4.17/source/net/core/net-procfs.c#L162
|
||||
// and https://elixir.bootlin.com/linux/v4.17/source/include/linux/netdevice.h#L2810.
|
||||
|
||||
// SoftnetEntry contains a single row of data from /proc/net/softnet_stat
|
||||
type SoftnetEntry struct {
|
||||
// SoftnetStat contains a single row of data from /proc/net/softnet_stat
|
||||
type SoftnetStat struct {
|
||||
// Number of processed packets
|
||||
Processed uint
|
||||
Processed uint32
|
||||
// Number of dropped packets
|
||||
Dropped uint
|
||||
Dropped uint32
|
||||
// Number of times processing packets ran out of quota
|
||||
TimeSqueezed uint
|
||||
TimeSqueezed uint32
|
||||
}
|
||||
|
||||
// GatherSoftnetStats reads /proc/net/softnet_stat, parse the relevant columns,
|
||||
// and then return a slice of SoftnetEntry's.
|
||||
func (fs FS) GatherSoftnetStats() ([]SoftnetEntry, error) {
|
||||
data, err := ioutil.ReadFile(fs.proc.Path("net/softnet_stat"))
|
||||
var softNetProcFile = "net/softnet_stat"
|
||||
|
||||
// NetSoftnetStat reads data from /proc/net/softnet_stat.
|
||||
func (fs FS) NetSoftnetStat() ([]SoftnetStat, error) {
|
||||
b, err := util.ReadFileNoStat(fs.proc.Path(softNetProcFile))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error reading softnet %s: %s", fs.proc.Path("net/softnet_stat"), err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return parseSoftnetEntries(data)
|
||||
}
|
||||
|
||||
func parseSoftnetEntries(data []byte) ([]SoftnetEntry, error) {
|
||||
lines := strings.Split(string(data), "\n")
|
||||
entries := make([]SoftnetEntry, 0)
|
||||
var err error
|
||||
const (
|
||||
expectedColumns = 11
|
||||
)
|
||||
for _, line := range lines {
|
||||
columns := strings.Fields(line)
|
||||
width := len(columns)
|
||||
if width == 0 {
|
||||
continue
|
||||
}
|
||||
if width != expectedColumns {
|
||||
return []SoftnetEntry{}, fmt.Errorf("%d columns were detected, but %d were expected", width, expectedColumns)
|
||||
}
|
||||
var entry SoftnetEntry
|
||||
if entry, err = parseSoftnetEntry(columns); err != nil {
|
||||
return []SoftnetEntry{}, err
|
||||
}
|
||||
entries = append(entries, entry)
|
||||
entries, err := parseSoftnet(bytes.NewReader(b))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to parse /proc/net/softnet_stat: %v", err)
|
||||
}
|
||||
|
||||
return entries, nil
|
||||
}
|
||||
|
||||
func parseSoftnetEntry(columns []string) (SoftnetEntry, error) {
|
||||
var err error
|
||||
var processed, dropped, timeSqueezed uint64
|
||||
if processed, err = strconv.ParseUint(columns[0], 16, 32); err != nil {
|
||||
return SoftnetEntry{}, fmt.Errorf("Unable to parse column 0: %s", err)
|
||||
func parseSoftnet(r io.Reader) ([]SoftnetStat, error) {
|
||||
const minColumns = 9
|
||||
|
||||
s := bufio.NewScanner(r)
|
||||
|
||||
var stats []SoftnetStat
|
||||
for s.Scan() {
|
||||
columns := strings.Fields(s.Text())
|
||||
width := len(columns)
|
||||
|
||||
if width < minColumns {
|
||||
return nil, fmt.Errorf("%d columns were detected, but at least %d were expected", width, minColumns)
|
||||
}
|
||||
|
||||
// We only parse the first three columns at the moment.
|
||||
us, err := parseHexUint32s(columns[0:3])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
stats = append(stats, SoftnetStat{
|
||||
Processed: us[0],
|
||||
Dropped: us[1],
|
||||
TimeSqueezed: us[2],
|
||||
})
|
||||
}
|
||||
if dropped, err = strconv.ParseUint(columns[1], 16, 32); err != nil {
|
||||
return SoftnetEntry{}, fmt.Errorf("Unable to parse column 1: %s", err)
|
||||
}
|
||||
if timeSqueezed, err = strconv.ParseUint(columns[2], 16, 32); err != nil {
|
||||
return SoftnetEntry{}, fmt.Errorf("Unable to parse column 2: %s", err)
|
||||
}
|
||||
return SoftnetEntry{
|
||||
Processed: uint(processed),
|
||||
Dropped: uint(dropped),
|
||||
TimeSqueezed: uint(timeSqueezed),
|
||||
}, nil
|
||||
|
||||
return stats, nil
|
||||
}
|
||||
|
||||
func parseHexUint32s(ss []string) ([]uint32, error) {
|
||||
us := make([]uint32, 0, len(ss))
|
||||
for _, s := range ss {
|
||||
u, err := strconv.ParseUint(s, 16, 32)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
us = append(us, uint32(u))
|
||||
}
|
||||
|
||||
return us, nil
|
||||
}
|
||||
|
229
vendor/github.com/prometheus/procfs/net_udp.go
generated
vendored
Normal file
229
vendor/github.com/prometheus/procfs/net_udp.go
generated
vendored
Normal file
@ -0,0 +1,229 @@
|
||||
// Copyright 2020 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package procfs
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
const (
|
||||
// readLimit is used by io.LimitReader while reading the content of the
|
||||
// /proc/net/udp{,6} files. The number of lines inside such a file is dynamic
|
||||
// as each line represents a single used socket.
|
||||
// In theory, the number of available sockets is 65535 (2^16 - 1) per IP.
|
||||
// With e.g. 150 Byte per line and the maximum number of 65535,
|
||||
// the reader needs to handle 150 Byte * 65535 =~ 10 MB for a single IP.
|
||||
readLimit = 4294967296 // Byte -> 4 GiB
|
||||
)
|
||||
|
||||
type (
|
||||
// NetUDP represents the contents of /proc/net/udp{,6} file without the header.
|
||||
NetUDP []*netUDPLine
|
||||
|
||||
// NetUDPSummary provides already computed values like the total queue lengths or
|
||||
// the total number of used sockets. In contrast to NetUDP it does not collect
|
||||
// the parsed lines into a slice.
|
||||
NetUDPSummary struct {
|
||||
// TxQueueLength shows the total queue length of all parsed tx_queue lengths.
|
||||
TxQueueLength uint64
|
||||
// RxQueueLength shows the total queue length of all parsed rx_queue lengths.
|
||||
RxQueueLength uint64
|
||||
// UsedSockets shows the total number of parsed lines representing the
|
||||
// number of used sockets.
|
||||
UsedSockets uint64
|
||||
}
|
||||
|
||||
// netUDPLine represents the fields parsed from a single line
|
||||
// in /proc/net/udp{,6}. Fields which are not used by UDP are skipped.
|
||||
// For the proc file format details, see https://linux.die.net/man/5/proc.
|
||||
netUDPLine struct {
|
||||
Sl uint64
|
||||
LocalAddr net.IP
|
||||
LocalPort uint64
|
||||
RemAddr net.IP
|
||||
RemPort uint64
|
||||
St uint64
|
||||
TxQueue uint64
|
||||
RxQueue uint64
|
||||
UID uint64
|
||||
}
|
||||
)
|
||||
|
||||
// NetUDP returns the IPv4 kernel/networking statistics for UDP datagrams
|
||||
// read from /proc/net/udp.
|
||||
func (fs FS) NetUDP() (NetUDP, error) {
|
||||
return newNetUDP(fs.proc.Path("net/udp"))
|
||||
}
|
||||
|
||||
// NetUDP6 returns the IPv6 kernel/networking statistics for UDP datagrams
|
||||
// read from /proc/net/udp6.
|
||||
func (fs FS) NetUDP6() (NetUDP, error) {
|
||||
return newNetUDP(fs.proc.Path("net/udp6"))
|
||||
}
|
||||
|
||||
// NetUDPSummary returns already computed statistics like the total queue lengths
|
||||
// for UDP datagrams read from /proc/net/udp.
|
||||
func (fs FS) NetUDPSummary() (*NetUDPSummary, error) {
|
||||
return newNetUDPSummary(fs.proc.Path("net/udp"))
|
||||
}
|
||||
|
||||
// NetUDP6Summary returns already computed statistics like the total queue lengths
|
||||
// for UDP datagrams read from /proc/net/udp6.
|
||||
func (fs FS) NetUDP6Summary() (*NetUDPSummary, error) {
|
||||
return newNetUDPSummary(fs.proc.Path("net/udp6"))
|
||||
}
|
||||
|
||||
// newNetUDP creates a new NetUDP{,6} from the contents of the given file.
|
||||
func newNetUDP(file string) (NetUDP, error) {
|
||||
f, err := os.Open(file)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
netUDP := NetUDP{}
|
||||
|
||||
lr := io.LimitReader(f, readLimit)
|
||||
s := bufio.NewScanner(lr)
|
||||
s.Scan() // skip first line with headers
|
||||
for s.Scan() {
|
||||
fields := strings.Fields(s.Text())
|
||||
line, err := parseNetUDPLine(fields)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
netUDP = append(netUDP, line)
|
||||
}
|
||||
if err := s.Err(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return netUDP, nil
|
||||
}
|
||||
|
||||
// newNetUDPSummary creates a new NetUDP{,6} from the contents of the given file.
|
||||
func newNetUDPSummary(file string) (*NetUDPSummary, error) {
|
||||
f, err := os.Open(file)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
netUDPSummary := &NetUDPSummary{}
|
||||
|
||||
lr := io.LimitReader(f, readLimit)
|
||||
s := bufio.NewScanner(lr)
|
||||
s.Scan() // skip first line with headers
|
||||
for s.Scan() {
|
||||
fields := strings.Fields(s.Text())
|
||||
line, err := parseNetUDPLine(fields)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
netUDPSummary.TxQueueLength += line.TxQueue
|
||||
netUDPSummary.RxQueueLength += line.RxQueue
|
||||
netUDPSummary.UsedSockets++
|
||||
}
|
||||
if err := s.Err(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return netUDPSummary, nil
|
||||
}
|
||||
|
||||
// parseNetUDPLine parses a single line, represented by a list of fields.
|
||||
func parseNetUDPLine(fields []string) (*netUDPLine, error) {
|
||||
line := &netUDPLine{}
|
||||
if len(fields) < 8 {
|
||||
return nil, fmt.Errorf(
|
||||
"cannot parse net udp socket line as it has less then 8 columns: %s",
|
||||
strings.Join(fields, " "),
|
||||
)
|
||||
}
|
||||
var err error // parse error
|
||||
|
||||
// sl
|
||||
s := strings.Split(fields[0], ":")
|
||||
if len(s) != 2 {
|
||||
return nil, fmt.Errorf(
|
||||
"cannot parse sl field in udp socket line: %s", fields[0])
|
||||
}
|
||||
|
||||
if line.Sl, err = strconv.ParseUint(s[0], 0, 64); err != nil {
|
||||
return nil, fmt.Errorf("cannot parse sl value in udp socket line: %s", err)
|
||||
}
|
||||
// local_address
|
||||
l := strings.Split(fields[1], ":")
|
||||
if len(l) != 2 {
|
||||
return nil, fmt.Errorf(
|
||||
"cannot parse local_address field in udp socket line: %s", fields[1])
|
||||
}
|
||||
if line.LocalAddr, err = hex.DecodeString(l[0]); err != nil {
|
||||
return nil, fmt.Errorf(
|
||||
"cannot parse local_address value in udp socket line: %s", err)
|
||||
}
|
||||
if line.LocalPort, err = strconv.ParseUint(l[1], 16, 64); err != nil {
|
||||
return nil, fmt.Errorf(
|
||||
"cannot parse local_address port value in udp socket line: %s", err)
|
||||
}
|
||||
|
||||
// remote_address
|
||||
r := strings.Split(fields[2], ":")
|
||||
if len(r) != 2 {
|
||||
return nil, fmt.Errorf(
|
||||
"cannot parse rem_address field in udp socket line: %s", fields[1])
|
||||
}
|
||||
if line.RemAddr, err = hex.DecodeString(r[0]); err != nil {
|
||||
return nil, fmt.Errorf(
|
||||
"cannot parse rem_address value in udp socket line: %s", err)
|
||||
}
|
||||
if line.RemPort, err = strconv.ParseUint(r[1], 16, 64); err != nil {
|
||||
return nil, fmt.Errorf(
|
||||
"cannot parse rem_address port value in udp socket line: %s", err)
|
||||
}
|
||||
|
||||
// st
|
||||
if line.St, err = strconv.ParseUint(fields[3], 16, 64); err != nil {
|
||||
return nil, fmt.Errorf(
|
||||
"cannot parse st value in udp socket line: %s", err)
|
||||
}
|
||||
|
||||
// tx_queue and rx_queue
|
||||
q := strings.Split(fields[4], ":")
|
||||
if len(q) != 2 {
|
||||
return nil, fmt.Errorf(
|
||||
"cannot parse tx/rx queues in udp socket line as it has a missing colon: %s",
|
||||
fields[4],
|
||||
)
|
||||
}
|
||||
if line.TxQueue, err = strconv.ParseUint(q[0], 16, 64); err != nil {
|
||||
return nil, fmt.Errorf("cannot parse tx_queue value in udp socket line: %s", err)
|
||||
}
|
||||
if line.RxQueue, err = strconv.ParseUint(q[1], 16, 64); err != nil {
|
||||
return nil, fmt.Errorf("cannot parse rx_queue value in udp socket line: %s", err)
|
||||
}
|
||||
|
||||
// uid
|
||||
if line.UID, err = strconv.ParseUint(fields[7], 0, 64); err != nil {
|
||||
return nil, fmt.Errorf(
|
||||
"cannot parse uid value in udp socket line: %s", err)
|
||||
}
|
||||
|
||||
return line, nil
|
||||
}
|
228
vendor/github.com/prometheus/procfs/net_unix.go
generated
vendored
228
vendor/github.com/prometheus/procfs/net_unix.go
generated
vendored
@ -15,7 +15,6 @@ package procfs
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
@ -27,25 +26,15 @@ import (
|
||||
// see https://elixir.bootlin.com/linux/v4.17/source/net/unix/af_unix.c#L2815
|
||||
// and https://elixir.bootlin.com/linux/latest/source/include/uapi/linux/net.h#L48.
|
||||
|
||||
const (
|
||||
netUnixKernelPtrIdx = iota
|
||||
netUnixRefCountIdx
|
||||
_
|
||||
netUnixFlagsIdx
|
||||
netUnixTypeIdx
|
||||
netUnixStateIdx
|
||||
netUnixInodeIdx
|
||||
|
||||
// Inode and Path are optional.
|
||||
netUnixStaticFieldsCnt = 6
|
||||
)
|
||||
|
||||
// Constants for the various /proc/net/unix enumerations.
|
||||
// TODO: match against x/sys/unix or similar?
|
||||
const (
|
||||
netUnixTypeStream = 1
|
||||
netUnixTypeDgram = 2
|
||||
netUnixTypeSeqpacket = 5
|
||||
|
||||
netUnixFlagListen = 1 << 16
|
||||
netUnixFlagDefault = 0
|
||||
netUnixFlagListen = 1 << 16
|
||||
|
||||
netUnixStateUnconnected = 1
|
||||
netUnixStateConnecting = 2
|
||||
@ -53,129 +42,127 @@ const (
|
||||
netUnixStateDisconnected = 4
|
||||
)
|
||||
|
||||
var errInvalidKernelPtrFmt = errors.New("Invalid Num(the kernel table slot number) format")
|
||||
// NetUNIXType is the type of the type field.
|
||||
type NetUNIXType uint64
|
||||
|
||||
// NetUnixType is the type of the type field.
|
||||
type NetUnixType uint64
|
||||
// NetUNIXFlags is the type of the flags field.
|
||||
type NetUNIXFlags uint64
|
||||
|
||||
// NetUnixFlags is the type of the flags field.
|
||||
type NetUnixFlags uint64
|
||||
// NetUNIXState is the type of the state field.
|
||||
type NetUNIXState uint64
|
||||
|
||||
// NetUnixState is the type of the state field.
|
||||
type NetUnixState uint64
|
||||
|
||||
// NetUnixLine represents a line of /proc/net/unix.
|
||||
type NetUnixLine struct {
|
||||
// NetUNIXLine represents a line of /proc/net/unix.
|
||||
type NetUNIXLine struct {
|
||||
KernelPtr string
|
||||
RefCount uint64
|
||||
Protocol uint64
|
||||
Flags NetUnixFlags
|
||||
Type NetUnixType
|
||||
State NetUnixState
|
||||
Flags NetUNIXFlags
|
||||
Type NetUNIXType
|
||||
State NetUNIXState
|
||||
Inode uint64
|
||||
Path string
|
||||
}
|
||||
|
||||
// NetUnix holds the data read from /proc/net/unix.
|
||||
type NetUnix struct {
|
||||
Rows []*NetUnixLine
|
||||
// NetUNIX holds the data read from /proc/net/unix.
|
||||
type NetUNIX struct {
|
||||
Rows []*NetUNIXLine
|
||||
}
|
||||
|
||||
// NewNetUnix returns data read from /proc/net/unix.
|
||||
func NewNetUnix() (*NetUnix, error) {
|
||||
fs, err := NewFS(DefaultMountPoint)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return fs.NewNetUnix()
|
||||
// NetUNIX returns data read from /proc/net/unix.
|
||||
func (fs FS) NetUNIX() (*NetUNIX, error) {
|
||||
return readNetUNIX(fs.proc.Path("net/unix"))
|
||||
}
|
||||
|
||||
// NewNetUnix returns data read from /proc/net/unix.
|
||||
func (fs FS) NewNetUnix() (*NetUnix, error) {
|
||||
return NewNetUnixByPath(fs.proc.Path("net/unix"))
|
||||
}
|
||||
|
||||
// NewNetUnixByPath returns data read from /proc/net/unix by file path.
|
||||
// It might returns an error with partial parsed data, if an error occur after some data parsed.
|
||||
func NewNetUnixByPath(path string) (*NetUnix, error) {
|
||||
f, err := os.Open(path)
|
||||
// readNetUNIX reads data in /proc/net/unix format from the specified file.
|
||||
func readNetUNIX(file string) (*NetUNIX, error) {
|
||||
// This file could be quite large and a streaming read is desirable versus
|
||||
// reading the entire contents at once.
|
||||
f, err := os.Open(file)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer f.Close()
|
||||
return NewNetUnixByReader(f)
|
||||
|
||||
return parseNetUNIX(f)
|
||||
}
|
||||
|
||||
// NewNetUnixByReader returns data read from /proc/net/unix by a reader.
|
||||
// It might returns an error with partial parsed data, if an error occur after some data parsed.
|
||||
func NewNetUnixByReader(reader io.Reader) (*NetUnix, error) {
|
||||
nu := &NetUnix{
|
||||
Rows: make([]*NetUnixLine, 0, 32),
|
||||
}
|
||||
scanner := bufio.NewScanner(reader)
|
||||
// Omit the header line.
|
||||
scanner.Scan()
|
||||
header := scanner.Text()
|
||||
// From the man page of proc(5), it does not contain an Inode field,
|
||||
// but in actually it exists.
|
||||
// This code works for both cases.
|
||||
hasInode := strings.Contains(header, "Inode")
|
||||
// parseNetUNIX creates a NetUnix structure from the incoming stream.
|
||||
func parseNetUNIX(r io.Reader) (*NetUNIX, error) {
|
||||
// Begin scanning by checking for the existence of Inode.
|
||||
s := bufio.NewScanner(r)
|
||||
s.Scan()
|
||||
|
||||
minFieldsCnt := netUnixStaticFieldsCnt
|
||||
// From the man page of proc(5), it does not contain an Inode field,
|
||||
// but in actually it exists. This code works for both cases.
|
||||
hasInode := strings.Contains(s.Text(), "Inode")
|
||||
|
||||
// Expect a minimum number of fields, but Inode and Path are optional:
|
||||
// Num RefCount Protocol Flags Type St Inode Path
|
||||
minFields := 6
|
||||
if hasInode {
|
||||
minFieldsCnt++
|
||||
minFields++
|
||||
}
|
||||
for scanner.Scan() {
|
||||
line := scanner.Text()
|
||||
item, err := nu.parseLine(line, hasInode, minFieldsCnt)
|
||||
|
||||
var nu NetUNIX
|
||||
for s.Scan() {
|
||||
line := s.Text()
|
||||
item, err := nu.parseLine(line, hasInode, minFields)
|
||||
if err != nil {
|
||||
return nu, err
|
||||
return nil, fmt.Errorf("failed to parse /proc/net/unix data %q: %v", line, err)
|
||||
}
|
||||
|
||||
nu.Rows = append(nu.Rows, item)
|
||||
}
|
||||
|
||||
return nu, scanner.Err()
|
||||
if err := s.Err(); err != nil {
|
||||
return nil, fmt.Errorf("failed to scan /proc/net/unix data: %v", err)
|
||||
}
|
||||
|
||||
return &nu, nil
|
||||
}
|
||||
|
||||
func (u *NetUnix) parseLine(line string, hasInode bool, minFieldsCnt int) (*NetUnixLine, error) {
|
||||
func (u *NetUNIX) parseLine(line string, hasInode bool, min int) (*NetUNIXLine, error) {
|
||||
fields := strings.Fields(line)
|
||||
fieldsLen := len(fields)
|
||||
if fieldsLen < minFieldsCnt {
|
||||
return nil, fmt.Errorf(
|
||||
"Parse Unix domain failed: expect at least %d fields but got %d",
|
||||
minFieldsCnt, fieldsLen)
|
||||
|
||||
l := len(fields)
|
||||
if l < min {
|
||||
return nil, fmt.Errorf("expected at least %d fields but got %d", min, l)
|
||||
}
|
||||
kernelPtr, err := u.parseKernelPtr(fields[netUnixKernelPtrIdx])
|
||||
|
||||
// Field offsets are as follows:
|
||||
// Num RefCount Protocol Flags Type St Inode Path
|
||||
|
||||
kernelPtr := strings.TrimSuffix(fields[0], ":")
|
||||
|
||||
users, err := u.parseUsers(fields[1])
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Parse Unix domain num(%s) failed: %s", fields[netUnixKernelPtrIdx], err)
|
||||
return nil, fmt.Errorf("failed to parse ref count(%s): %v", fields[1], err)
|
||||
}
|
||||
users, err := u.parseUsers(fields[netUnixRefCountIdx])
|
||||
|
||||
flags, err := u.parseFlags(fields[3])
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Parse Unix domain ref count(%s) failed: %s", fields[netUnixRefCountIdx], err)
|
||||
return nil, fmt.Errorf("failed to parse flags(%s): %v", fields[3], err)
|
||||
}
|
||||
flags, err := u.parseFlags(fields[netUnixFlagsIdx])
|
||||
|
||||
typ, err := u.parseType(fields[4])
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Parse Unix domain flags(%s) failed: %s", fields[netUnixFlagsIdx], err)
|
||||
return nil, fmt.Errorf("failed to parse type(%s): %v", fields[4], err)
|
||||
}
|
||||
typ, err := u.parseType(fields[netUnixTypeIdx])
|
||||
|
||||
state, err := u.parseState(fields[5])
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Parse Unix domain type(%s) failed: %s", fields[netUnixTypeIdx], err)
|
||||
}
|
||||
state, err := u.parseState(fields[netUnixStateIdx])
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Parse Unix domain state(%s) failed: %s", fields[netUnixStateIdx], err)
|
||||
return nil, fmt.Errorf("failed to parse state(%s): %v", fields[5], err)
|
||||
}
|
||||
|
||||
var inode uint64
|
||||
if hasInode {
|
||||
inodeStr := fields[netUnixInodeIdx]
|
||||
inode, err = u.parseInode(inodeStr)
|
||||
inode, err = u.parseInode(fields[6])
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Parse Unix domain inode(%s) failed: %s", inodeStr, err)
|
||||
return nil, fmt.Errorf("failed to parse inode(%s): %v", fields[6], err)
|
||||
}
|
||||
}
|
||||
|
||||
nuLine := &NetUnixLine{
|
||||
n := &NetUNIXLine{
|
||||
KernelPtr: kernelPtr,
|
||||
RefCount: users,
|
||||
Type: typ,
|
||||
@ -185,61 +172,56 @@ func (u *NetUnix) parseLine(line string, hasInode bool, minFieldsCnt int) (*NetU
|
||||
}
|
||||
|
||||
// Path field is optional.
|
||||
if fieldsLen > minFieldsCnt {
|
||||
pathIdx := netUnixInodeIdx + 1
|
||||
if l > min {
|
||||
// Path occurs at either index 6 or 7 depending on whether inode is
|
||||
// already present.
|
||||
pathIdx := 7
|
||||
if !hasInode {
|
||||
pathIdx--
|
||||
}
|
||||
nuLine.Path = fields[pathIdx]
|
||||
|
||||
n.Path = fields[pathIdx]
|
||||
}
|
||||
|
||||
return nuLine, nil
|
||||
return n, nil
|
||||
}
|
||||
|
||||
func (u NetUnix) parseKernelPtr(str string) (string, error) {
|
||||
if !strings.HasSuffix(str, ":") {
|
||||
return "", errInvalidKernelPtrFmt
|
||||
}
|
||||
return str[:len(str)-1], nil
|
||||
func (u NetUNIX) parseUsers(s string) (uint64, error) {
|
||||
return strconv.ParseUint(s, 16, 32)
|
||||
}
|
||||
|
||||
func (u NetUnix) parseUsers(hexStr string) (uint64, error) {
|
||||
return strconv.ParseUint(hexStr, 16, 32)
|
||||
}
|
||||
|
||||
func (u NetUnix) parseProtocol(hexStr string) (uint64, error) {
|
||||
return strconv.ParseUint(hexStr, 16, 32)
|
||||
}
|
||||
|
||||
func (u NetUnix) parseType(hexStr string) (NetUnixType, error) {
|
||||
typ, err := strconv.ParseUint(hexStr, 16, 16)
|
||||
func (u NetUNIX) parseType(s string) (NetUNIXType, error) {
|
||||
typ, err := strconv.ParseUint(s, 16, 16)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return NetUnixType(typ), nil
|
||||
|
||||
return NetUNIXType(typ), nil
|
||||
}
|
||||
|
||||
func (u NetUnix) parseFlags(hexStr string) (NetUnixFlags, error) {
|
||||
flags, err := strconv.ParseUint(hexStr, 16, 32)
|
||||
func (u NetUNIX) parseFlags(s string) (NetUNIXFlags, error) {
|
||||
flags, err := strconv.ParseUint(s, 16, 32)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return NetUnixFlags(flags), nil
|
||||
|
||||
return NetUNIXFlags(flags), nil
|
||||
}
|
||||
|
||||
func (u NetUnix) parseState(hexStr string) (NetUnixState, error) {
|
||||
st, err := strconv.ParseInt(hexStr, 16, 8)
|
||||
func (u NetUNIX) parseState(s string) (NetUNIXState, error) {
|
||||
st, err := strconv.ParseInt(s, 16, 8)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return NetUnixState(st), nil
|
||||
|
||||
return NetUNIXState(st), nil
|
||||
}
|
||||
|
||||
func (u NetUnix) parseInode(inodeStr string) (uint64, error) {
|
||||
return strconv.ParseUint(inodeStr, 10, 64)
|
||||
func (u NetUNIX) parseInode(s string) (uint64, error) {
|
||||
return strconv.ParseUint(s, 10, 64)
|
||||
}
|
||||
|
||||
func (t NetUnixType) String() string {
|
||||
func (t NetUNIXType) String() string {
|
||||
switch t {
|
||||
case netUnixTypeStream:
|
||||
return "stream"
|
||||
@ -251,7 +233,7 @@ func (t NetUnixType) String() string {
|
||||
return "unknown"
|
||||
}
|
||||
|
||||
func (f NetUnixFlags) String() string {
|
||||
func (f NetUNIXFlags) String() string {
|
||||
switch f {
|
||||
case netUnixFlagListen:
|
||||
return "listen"
|
||||
@ -260,7 +242,7 @@ func (f NetUnixFlags) String() string {
|
||||
}
|
||||
}
|
||||
|
||||
func (s NetUnixState) String() string {
|
||||
func (s NetUNIXState) String() string {
|
||||
switch s {
|
||||
case netUnixStateUnconnected:
|
||||
return "unconnected"
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user