mirror of
https://github.com/rclone/rclone.git
synced 2024-11-25 01:44:41 +01:00
vendor: update to latest versions of everything
This commit is contained in:
parent
4415aa5c2e
commit
467fe30a5e
50
go.mod
50
go.mod
@ -2,66 +2,64 @@ module github.com/ncw/rclone
|
||||
|
||||
require (
|
||||
bazil.org/fuse v0.0.0-20180421153158-65cc252bf669
|
||||
cloud.google.com/go v0.23.0 // indirect
|
||||
github.com/Azure/azure-pipeline-go v0.0.0-20180607212504-7571e8eb0876 // indirect
|
||||
cloud.google.com/go v0.28.0 // indirect
|
||||
github.com/Azure/azure-pipeline-go v0.1.8 // indirect
|
||||
github.com/Azure/azure-storage-blob-go v0.0.0-20180906215025-bb46532f68b7
|
||||
github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78
|
||||
github.com/Unknwon/goconfig v0.0.0-20180308125533-ef1e4c783f8f
|
||||
github.com/a8m/tree v0.0.0-20180321023834-3cf936ce15d6
|
||||
github.com/abbot/go-http-auth v0.4.0
|
||||
github.com/aws/aws-sdk-go v1.14.8
|
||||
github.com/aws/aws-sdk-go v1.15.39
|
||||
github.com/billziss-gh/cgofuse v1.1.0
|
||||
github.com/coreos/bbolt v0.0.0-20180318001526-af9db2027c98
|
||||
github.com/cpuguy83/go-md2man v1.0.8 // indirect
|
||||
github.com/davecgh/go-spew v1.1.0 // indirect
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
github.com/djherbis/times v1.0.1
|
||||
github.com/dropbox/dropbox-sdk-go-unofficial v4.1.0+incompatible
|
||||
github.com/go-ini/ini v1.37.0 // indirect
|
||||
github.com/go-ini/ini v1.38.2 // indirect
|
||||
github.com/goftp/file-driver v0.0.0-20180502053751-5d604a0fc0c9 // indirect
|
||||
github.com/goftp/server v0.0.0-20180914132916-1fd52c8552f1
|
||||
github.com/golang/lint v0.0.0-20180702182130-06c8688daad7 // indirect
|
||||
github.com/golang/protobuf v1.1.0 // indirect
|
||||
github.com/google/go-querystring v0.0.0-20170111101155-53e6ce116135 // indirect
|
||||
github.com/google/go-querystring v1.0.0 // indirect
|
||||
github.com/gopherjs/gopherjs v0.0.0-20180825215210-0210a2f0f73c // indirect
|
||||
github.com/inconshreveable/mousetrap v1.0.0 // indirect
|
||||
github.com/jlaffaye/ftp v0.0.0-20180404123514-2403248fa8cc
|
||||
github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8 // indirect
|
||||
github.com/jlaffaye/ftp v0.0.0-20180808211605-3f6433f7eae3
|
||||
github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af // indirect
|
||||
github.com/jtolds/gls v4.2.1+incompatible // indirect
|
||||
github.com/kardianos/osext v0.0.0-20170510131534-ae77be60afb1 // indirect
|
||||
github.com/kisielk/errcheck v1.1.0 // indirect
|
||||
github.com/kr/fs v0.1.0 // indirect
|
||||
github.com/mattn/go-runewidth v0.0.2 // indirect
|
||||
github.com/kr/pretty v0.1.0 // indirect
|
||||
github.com/mattn/go-runewidth v0.0.3 // indirect
|
||||
github.com/ncw/go-acd v0.0.0-20171120105400-887eb06ab6a2
|
||||
github.com/ncw/swift v1.0.40
|
||||
github.com/nsf/termbox-go v0.0.0-20180613055208-5c94acc5e6eb
|
||||
github.com/ncw/swift v1.0.41
|
||||
github.com/nsf/termbox-go v0.0.0-20180819125858-b66b20ab708e
|
||||
github.com/okzk/sdnotify v0.0.0-20180710141335-d9becc38acbd
|
||||
github.com/patrickmn/go-cache v2.1.0+incompatible
|
||||
github.com/pengsrc/go-shared v0.2.0 // indirect
|
||||
github.com/pkg/errors v0.8.0
|
||||
github.com/pkg/sftp v1.8.0
|
||||
github.com/pkg/sftp v1.8.3
|
||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||
github.com/rfjakob/eme v0.0.0-20170305125520-01668ae55fe0
|
||||
github.com/rfjakob/eme v0.0.0-20171028163933-2222dbd4ba46
|
||||
github.com/russross/blackfriday v1.5.1 // indirect
|
||||
github.com/sevlyar/go-daemon v0.1.4
|
||||
github.com/skratchdot/open-golang v0.0.0-20160302144031-75fb7ed4208c
|
||||
github.com/smartystreets/assertions v0.0.0-20180820201707-7c9eb446e3cf // indirect
|
||||
github.com/smartystreets/goconvey v0.0.0-20180222194500-ef6db91d284a // indirect
|
||||
github.com/spf13/cobra v0.0.3
|
||||
github.com/spf13/pflag v1.0.1
|
||||
github.com/spf13/pflag v1.0.2
|
||||
github.com/stretchr/testify v1.2.2
|
||||
github.com/t3rm1n4l/go-mega v0.0.0-20180817194457-854bf31d998b
|
||||
github.com/xanzy/ssh-agent v0.2.0
|
||||
github.com/yunify/qingstor-sdk-go v2.2.14+incompatible
|
||||
golang.org/x/crypto v0.0.0-20180617042118-027cca12c2d6
|
||||
golang.org/x/lint v0.0.0-20180702182130-06c8688daad7 // indirect
|
||||
golang.org/x/net v0.0.0-20180611182652-db08ff08e862
|
||||
golang.org/x/oauth2 v0.0.0-20180603041954-1e0a3fa8ba9a
|
||||
github.com/yunify/qingstor-sdk-go v2.2.15+incompatible
|
||||
golang.org/x/crypto v0.0.0-20180910181607-0e37d006457b
|
||||
golang.org/x/net v0.0.0-20180921000356-2f5d2388922f
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f // indirect
|
||||
golang.org/x/sys v0.0.0-20180616030259-6c888cc515d3
|
||||
golang.org/x/sys v0.0.0-20180920110915-d641721ec2de
|
||||
golang.org/x/text v0.3.0
|
||||
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2
|
||||
golang.org/x/tools v0.0.0-20180904205237-0aa4b8830f48 // indirect
|
||||
google.golang.org/api v0.0.0-20180614000435-2eea9ba0a3d9
|
||||
google.golang.org/appengine v1.1.0 // indirect
|
||||
google.golang.org/api v0.0.0-20180921000521-920bb1beccf7
|
||||
google.golang.org/appengine v1.2.0 // indirect
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 // indirect
|
||||
gopkg.in/ini.v1 v1.38.2 // indirect
|
||||
gopkg.in/yaml.v2 v2.2.1 // indirect
|
||||
)
|
||||
|
110
go.sum
110
go.sum
@ -1,11 +1,9 @@
|
||||
bazil.org/fuse v0.0.0-20180421153158-65cc252bf669 h1:FNCRpXiquG1aoyqcIWVFmpTSKVcx2bQD38uZZeGtdlw=
|
||||
bazil.org/fuse v0.0.0-20180421153158-65cc252bf669/go.mod h1:Xbm+BRKSBEpa4q4hTSxohYNQpsxXPbPry4JJWOB3LB8=
|
||||
cloud.google.com/go v0.23.0 h1:w1svupRqvZnfjN9+KksMiggoIRQuMzWkVzpxcR96xDs=
|
||||
cloud.google.com/go v0.23.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||
github.com/Azure/azure-pipeline-go v0.0.0-20180607212504-7571e8eb0876 h1:3c3mGlhASTJh6H6Ba9EHv2FDSmEUyJuJHR6UD7b+YuE=
|
||||
github.com/Azure/azure-pipeline-go v0.0.0-20180607212504-7571e8eb0876/go.mod h1:XA1kFWRVhSK+KNFiOhfv83Fv8L9achrP7OxIzeTn1Yg=
|
||||
github.com/Azure/azure-storage-blob-go v0.0.0-20180712005634-eaae161d9d5e h1:Ix5oKbq0MlolI+T4EPCL9sddfEw6LgRMpC+qx0Kz5/E=
|
||||
github.com/Azure/azure-storage-blob-go v0.0.0-20180712005634-eaae161d9d5e/go.mod h1:x2mtS6O3mnMEZOJp7d7oldh8IvatBrMfReiyQ+cKgKY=
|
||||
cloud.google.com/go v0.28.0 h1:KZ/88LWSw8NxMkjdQyX7LQSGR9PkHr4PaVuNm8zgFq0=
|
||||
cloud.google.com/go v0.28.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||
github.com/Azure/azure-pipeline-go v0.1.8 h1:KmVRa8oFMaargVesEuuEoiLCQ4zCCwQ8QX/xg++KS20=
|
||||
github.com/Azure/azure-pipeline-go v0.1.8/go.mod h1:XA1kFWRVhSK+KNFiOhfv83Fv8L9achrP7OxIzeTn1Yg=
|
||||
github.com/Azure/azure-storage-blob-go v0.0.0-20180906215025-bb46532f68b7 h1:/dp3cg/uwEn3nCIvapCpvwMLMCW34Z9U/UOeTroS6p0=
|
||||
github.com/Azure/azure-storage-blob-go v0.0.0-20180906215025-bb46532f68b7/go.mod h1:x2mtS6O3mnMEZOJp7d7oldh8IvatBrMfReiyQ+cKgKY=
|
||||
github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78 h1:w+iIsaOQNcT7OZ575w+acHgRric5iCyQh+xv+KJ4HB8=
|
||||
@ -16,56 +14,60 @@ github.com/a8m/tree v0.0.0-20180321023834-3cf936ce15d6 h1:UCQe3W9LxwL2ff5r0PqQfS
|
||||
github.com/a8m/tree v0.0.0-20180321023834-3cf936ce15d6/go.mod h1:FSdwKX97koS5efgm8WevNf7XS3PqtyFkKDDXrz778cg=
|
||||
github.com/abbot/go-http-auth v0.4.0 h1:QjmvZ5gSC7jm3Zg54DqWE/T5m1t2AfDu6QlXJT0EVT0=
|
||||
github.com/abbot/go-http-auth v0.4.0/go.mod h1:Cz6ARTIzApMJDzh5bRMSUou6UMSp0IEXg9km/ci7TJM=
|
||||
github.com/aws/aws-sdk-go v1.14.8 h1:vO7TW0IyYlGTjs2eWlHbv88uK9kjj8eiadyKEo4GUO4=
|
||||
github.com/aws/aws-sdk-go v1.14.8/go.mod h1:ZRmQr0FajVIyZ4ZzBYKG5P3ZqPz9IHG41ZoMu1ADI3k=
|
||||
github.com/aws/aws-sdk-go v1.15.39 h1:MUxm375zGxxVhgL5NNCIDuwn4SkMQsX8CQWD+zLULcw=
|
||||
github.com/aws/aws-sdk-go v1.15.39/go.mod h1:mFuSZ37Z9YOHbQEwBWztmVzqXrEkub65tZoCYDt7FT0=
|
||||
github.com/billziss-gh/cgofuse v1.1.0 h1:tATn9ZDvuPcOVlvR4tJitGHgAqy1y18+4mKmRfdfjec=
|
||||
github.com/billziss-gh/cgofuse v1.1.0/go.mod h1:LJjoaUojlVjgo5GQoEJTcJNqZJeRU0nCR84CyxKt2YM=
|
||||
github.com/coreos/bbolt v0.0.0-20180318001526-af9db2027c98 h1:0gQU5Ebjs1V8Ow5bBzxZzr0peNjJILSkSb30IfZtshQ=
|
||||
github.com/coreos/bbolt v0.0.0-20180318001526-af9db2027c98/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
|
||||
github.com/cpuguy83/go-md2man v1.0.8 h1:DwoNytLphI8hzS2Af4D0dfaEaiSq2bN05mEm4R6vf8M=
|
||||
github.com/cpuguy83/go-md2man v1.0.8/go.mod h1:N6JayAiVKtlHSnuTCeuLSQVs75hb8q+dYQLjr7cDsKY=
|
||||
github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/djherbis/times v1.0.1 h1:nVRrVOTFd2r0C7wCQdIDz/fqt8yO0EEzr5f6aXfXiS0=
|
||||
github.com/djherbis/times v1.0.1/go.mod h1:CGMZlo255K5r4Yw0b9RRfFQpM2y7uOmxg4jm9HsaVf8=
|
||||
github.com/dropbox/dropbox-sdk-go-unofficial v4.1.0+incompatible h1:ZFvUIiBbGhDY5zF8yjLoWhUAYs7uDodUpbvTS5oelDE=
|
||||
github.com/dropbox/dropbox-sdk-go-unofficial v4.1.0+incompatible/go.mod h1:lr+LhMM3F6Y3lW1T9j2U5l7QeuWm87N9+PPXo3yH4qY=
|
||||
github.com/go-ini/ini v1.37.0 h1:/FpMfveJbc7ExTTDgT5nL9Vw+aZdst/c2dOxC931U+M=
|
||||
github.com/go-ini/ini v1.37.0/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8=
|
||||
github.com/go-ini/ini v1.25.4/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8=
|
||||
github.com/go-ini/ini v1.38.2 h1:6Hl/z3p3iFkA0dlDfzYxuFuUGD+kaweypF6btsR2/Q4=
|
||||
github.com/go-ini/ini v1.38.2/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8=
|
||||
github.com/goftp/file-driver v0.0.0-20180502053751-5d604a0fc0c9 h1:cC0Hbb+18DJ4i6ybqDybvj4wdIDS4vnD0QEci98PgM8=
|
||||
github.com/goftp/file-driver v0.0.0-20180502053751-5d604a0fc0c9/go.mod h1:GpOj6zuVBG3Inr9qjEnuVTgBlk2lZ1S9DcoFiXWyKss=
|
||||
github.com/goftp/server v0.0.0-20180914132916-1fd52c8552f1 h1:WjgeEHEDLGx56ndxS6FYi6qFjZGajSVHPuEPdpJ60cI=
|
||||
github.com/goftp/server v0.0.0-20180914132916-1fd52c8552f1/go.mod h1:k/SS6VWkxY7dHPhoMQ8IdRu8L4lQtmGbhyXGg+vCnXE=
|
||||
github.com/golang/lint v0.0.0-20180702182130-06c8688daad7 h1:2hRPrmiwPrp3fQX967rNJIhQPtiGXdlQWAxKbKw3VHA=
|
||||
github.com/golang/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:tluoj9z5200jBnyusfRPU2LqT6J+DAorxEvtC7LHB+E=
|
||||
github.com/golang/protobuf v1.1.0 h1:0iH4Ffd/meGoXqF2lSAhZHt8X+cPgkfn/cb6Cce5Vpc=
|
||||
github.com/golang/protobuf v1.1.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/google/go-querystring v0.0.0-20170111101155-53e6ce116135 h1:zLTLjkaOFEFIOxY5BWLFLwh+cL8vOBW4XJ2aqLE/Tf0=
|
||||
github.com/google/go-querystring v0.0.0-20170111101155-53e6ce116135/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck=
|
||||
github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM=
|
||||
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/google/go-querystring v1.0.0 h1:Xkwi/a1rcvNg1PPYe5vI8GbeBY/jrVuDX5ASuANWTrk=
|
||||
github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck=
|
||||
github.com/gopherjs/gopherjs v0.0.0-20180825215210-0210a2f0f73c h1:16eHWuMGvCjSfgRJKqIzapE78onvvTbdi1rMkU00lZw=
|
||||
github.com/gopherjs/gopherjs v0.0.0-20180825215210-0210a2f0f73c/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
|
||||
github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM=
|
||||
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
|
||||
github.com/jlaffaye/ftp v0.0.0-20180404123514-2403248fa8cc h1:lWFup/SOhwcpvRJIFqx/WQis5U4SrOSyWfSqvfdF09w=
|
||||
github.com/jlaffaye/ftp v0.0.0-20180404123514-2403248fa8cc/go.mod h1:lli8NYPQOFy3O++YmYbqVgOcQ1JPCwdOy+5zSjKJ9qY=
|
||||
github.com/jlaffaye/ftp v0.0.0-20180808211605-3f6433f7eae3 h1:UHMAxHSQqPxQi/55JSaFYbV0eqjoxlT9NYTS2AuGklQ=
|
||||
github.com/jlaffaye/ftp v0.0.0-20180808211605-3f6433f7eae3/go.mod h1:lli8NYPQOFy3O++YmYbqVgOcQ1JPCwdOy+5zSjKJ9qY=
|
||||
github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8 h1:12VvqtR6Aowv3l/EQUlocDHW2Cp4G9WJVH7uyH8QFJE=
|
||||
github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
|
||||
github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af h1:pmfjZENx5imkbgOkpRUYLnmbU7UEFbjtDA2hxJ1ichM=
|
||||
github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
|
||||
github.com/jtolds/gls v4.2.1+incompatible h1:fSuqC+Gmlu6l/ZYAoZzx2pyucC8Xza35fpRVWLVmUEE=
|
||||
github.com/jtolds/gls v4.2.1+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
|
||||
github.com/kardianos/osext v0.0.0-20170510131534-ae77be60afb1 h1:PJPDf8OUfOK1bb/NeTKd4f1QXZItOX389VN3B6qC8ro=
|
||||
github.com/kardianos/osext v0.0.0-20170510131534-ae77be60afb1/go.mod h1:1NbS8ALrpOvjt0rHPNLyCIeMtbizbir8U//inJ+zuB8=
|
||||
github.com/kisielk/errcheck v1.1.0 h1:ZqfnKyx9KGpRcW04j5nnPDgRgoXUeLh2YFBeFzphcA0=
|
||||
github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
|
||||
github.com/kisielk/gotool v1.0.0 h1:AV2c/EiW3KqPNT9ZKl07ehoAGi4C5/01Cfbblndcapg=
|
||||
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||
github.com/kr/fs v0.1.0 h1:Jskdu9ieNAYnjxsi0LbQp1ulIKZV1LAFgK1tWhpZgl8=
|
||||
github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg=
|
||||
github.com/mattn/go-runewidth v0.0.2 h1:UnlwIPBGaTZfPQ6T1IGzPI0EkYAQmT9fAEJ/poFC63o=
|
||||
github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
|
||||
github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
|
||||
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
|
||||
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||
github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
|
||||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||
github.com/mattn/go-runewidth v0.0.3 h1:a+kO+98RDGEfo6asOGMmpodZq4FNtnGP54yps8BzLR4=
|
||||
github.com/mattn/go-runewidth v0.0.3/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
|
||||
github.com/ncw/go-acd v0.0.0-20171120105400-887eb06ab6a2 h1:VlXvEx6JbFp7F9iz92zXP2Ew+9VupSpfybr+TxmjdH0=
|
||||
github.com/ncw/go-acd v0.0.0-20171120105400-887eb06ab6a2/go.mod h1:MLIrzg7gp/kzVBxRE1olT7CWYMCklcUWU+ekoxOD9x0=
|
||||
github.com/ncw/swift v1.0.40 h1:0c+kzSF82qgP2TvDHwC534eoAMYTRS1jmr6KIMftTk0=
|
||||
github.com/ncw/swift v1.0.40/go.mod h1:23YIA4yWVnGwv2dQlN4bB7egfYX6YLn0Yo/S6zZO/ZM=
|
||||
github.com/nsf/termbox-go v0.0.0-20180613055208-5c94acc5e6eb h1:YahEjAGkJtCrkqgVHhX6n8ZX+CZ3hDRL9fjLYugLfSs=
|
||||
github.com/nsf/termbox-go v0.0.0-20180613055208-5c94acc5e6eb/go.mod h1:IuKpRQcYE1Tfu+oAQqaLisqDeXgjyyltCfsaoYN18NQ=
|
||||
github.com/ncw/swift v1.0.41 h1:kfoTVQKt1A4n0m1Q3YWku9OoXfpo06biqVfi73yseBs=
|
||||
github.com/ncw/swift v1.0.41/go.mod h1:23YIA4yWVnGwv2dQlN4bB7egfYX6YLn0Yo/S6zZO/ZM=
|
||||
github.com/nsf/termbox-go v0.0.0-20180819125858-b66b20ab708e h1:fvw0uluMptljaRKSU8459cJ4bmi3qUYyMs5kzpic2fY=
|
||||
github.com/nsf/termbox-go v0.0.0-20180819125858-b66b20ab708e/go.mod h1:IuKpRQcYE1Tfu+oAQqaLisqDeXgjyyltCfsaoYN18NQ=
|
||||
github.com/okzk/sdnotify v0.0.0-20180710141335-d9becc38acbd h1:+iAPaTbi1gZpcpDwe/BW1fx7Xoesv69hLNGPheoyhBs=
|
||||
github.com/okzk/sdnotify v0.0.0-20180710141335-d9becc38acbd/go.mod h1:4soZNh0zW0LtYGdQ416i0jO0EIqMGcbtaspRS4BDvRQ=
|
||||
github.com/patrickmn/go-cache v2.1.0+incompatible h1:HRMgzkcYKYpi3C8ajMPV8OFXaaRUnok+kx1WdO15EQc=
|
||||
@ -74,12 +76,12 @@ github.com/pengsrc/go-shared v0.2.0 h1:Ho86LhaXOYgv9FjBmIp5CO0LmaIj49H2HZhYh0+7u
|
||||
github.com/pengsrc/go-shared v0.2.0/go.mod h1:jVblp62SafmidSkvWrXyxAme3gaTfEtWwRPGz5cpvHg=
|
||||
github.com/pkg/errors v0.8.0 h1:WdK/asTD0HN+q6hsWO3/vpuAkAr+tw6aNJNDFFf0+qw=
|
||||
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkg/sftp v1.8.0 h1:SJ2EX5aeifvl4zzbci3urQbr5p7Xc/A7/Ia9T8ahhNM=
|
||||
github.com/pkg/sftp v1.8.0/go.mod h1:NxmoDg/QLVWluQDUYG7XBZTLUpKeFa8e3aMf1BfjyHk=
|
||||
github.com/pkg/sftp v1.8.3 h1:9jSe2SxTM8/3bXZjtqnkgTBW+lA8db0knZJyns7gpBA=
|
||||
github.com/pkg/sftp v1.8.3/go.mod h1:NxmoDg/QLVWluQDUYG7XBZTLUpKeFa8e3aMf1BfjyHk=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/rfjakob/eme v0.0.0-20170305125520-01668ae55fe0 h1:A0JhyqmDea4+VnaZrjzuOxt2SEgy0Sil7a7FTjPUGrc=
|
||||
github.com/rfjakob/eme v0.0.0-20170305125520-01668ae55fe0/go.mod h1:U2bmx0hDj8EyDdcxmD5t3XHDnBFnyNNc22n1R4008eM=
|
||||
github.com/rfjakob/eme v0.0.0-20171028163933-2222dbd4ba46 h1:w2CpS5muK+jyydnmlkqpAhzKmHmMBzBkfYUDjQNS1Dk=
|
||||
github.com/rfjakob/eme v0.0.0-20171028163933-2222dbd4ba46/go.mod h1:U2bmx0hDj8EyDdcxmD5t3XHDnBFnyNNc22n1R4008eM=
|
||||
github.com/russross/blackfriday v1.5.1 h1:B8ZN6pD4PVofmlDCDUdELeYrbsVIDM/bpjW3v3zgcRc=
|
||||
github.com/russross/blackfriday v1.5.1/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g=
|
||||
github.com/sevlyar/go-daemon v0.1.4 h1:Ayxp/9SNHwPBjV+kKbnHl2ch6rhxTu08jfkGkoxgULQ=
|
||||
@ -92,41 +94,39 @@ github.com/smartystreets/goconvey v0.0.0-20180222194500-ef6db91d284a h1:JSvGDIbm
|
||||
github.com/smartystreets/goconvey v0.0.0-20180222194500-ef6db91d284a/go.mod h1:XDJAKZRPZ1CvBcN2aX5YOUTYGHki24fSF0Iv48Ibg0s=
|
||||
github.com/spf13/cobra v0.0.3 h1:ZlrZ4XsMRm04Fr5pSFxBgfND2EBVa1nLpiy1stUsX/8=
|
||||
github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
|
||||
github.com/spf13/pflag v1.0.1 h1:aCvUg6QPl3ibpQUxyLkrEkCHtPqYJL4x9AuhqVqFis4=
|
||||
github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
|
||||
github.com/spf13/pflag v1.0.2 h1:Fy0orTDgHdbnzHcsOgfCN4LtHf0ec3wwtiwJqwvf3Gc=
|
||||
github.com/spf13/pflag v1.0.2/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
|
||||
github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w=
|
||||
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||
github.com/t3rm1n4l/go-mega v0.0.0-20180817194457-854bf31d998b h1:Yt/fB2INfWb29Vcya4X0BNCLmObKmDdt0o0IndFzEY8=
|
||||
github.com/t3rm1n4l/go-mega v0.0.0-20180817194457-854bf31d998b/go.mod h1:ObGZcW2yPzFXEsaTZVMgLKhdUSHMYM3aEDE/u7YnfU8=
|
||||
github.com/xanzy/ssh-agent v0.2.0 h1:Adglfbi5p9Z0BmK2oKU9nTG+zKfniSfnaMYB+ULd+Ro=
|
||||
github.com/xanzy/ssh-agent v0.2.0/go.mod h1:0NyE30eGUDliuLEHJgYte/zncp2zdTStcOnWhgSqHD8=
|
||||
github.com/yunify/qingstor-sdk-go v2.2.14+incompatible h1:FKJYNihc8AXfaSm1R8RpcoqvCYR68Nml5Hku0VtF1cI=
|
||||
github.com/yunify/qingstor-sdk-go v2.2.14+incompatible/go.mod h1:w6wqLDQ5bBTzxGJ55581UrSwLrsTAsdo9N6yX/8d9RY=
|
||||
golang.org/x/crypto v0.0.0-20180617042118-027cca12c2d6 h1:Y9MTpro8EV2sz/pZRxSgNsvSfMXLmIHhQO4BGv2My/Q=
|
||||
golang.org/x/crypto v0.0.0-20180617042118-027cca12c2d6/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/lint v0.0.0-20180702182130-06c8688daad7 h1:00BeQWmeaGazuOrq8Q5K5d3/cHaGuFrZzpaHBXfrsUA=
|
||||
golang.org/x/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||
golang.org/x/net v0.0.0-20180611182652-db08ff08e862 h1:JZi6BqOZ+iSgmLWe6llhGrNnEnK+YB/MRkStwnEfbqM=
|
||||
golang.org/x/net v0.0.0-20180611182652-db08ff08e862/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/oauth2 v0.0.0-20180603041954-1e0a3fa8ba9a h1:1Fy38jwe/QZhQfFQBy6dMj9F/WU1C+jo3/zLNr/WhW4=
|
||||
golang.org/x/oauth2 v0.0.0-20180603041954-1e0a3fa8ba9a/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
github.com/yunify/qingstor-sdk-go v2.2.15+incompatible h1:/Z0q3/eSMoPYAuRmhjWtuGSmVVciFC6hfm3yfCKuvz0=
|
||||
github.com/yunify/qingstor-sdk-go v2.2.15+incompatible/go.mod h1:w6wqLDQ5bBTzxGJ55581UrSwLrsTAsdo9N6yX/8d9RY=
|
||||
golang.org/x/crypto v0.0.0-20180910181607-0e37d006457b h1:2b9XGzhjiYsYPnKXoEfL7klWZQIt8IfyRCz62gCqqlQ=
|
||||
golang.org/x/crypto v0.0.0-20180910181607-0e37d006457b/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180921000356-2f5d2388922f h1:QM2QVxvDoW9PFSPp/zy9FgxJLfaWTZlS61KEPtBwacM=
|
||||
golang.org/x/net v0.0.0-20180921000356-2f5d2388922f/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be h1:vEDujvNQGv4jgYKudGeI/+DAX4Jffq6hpD55MmoEvKs=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f h1:wMNYb4v58l5UBM7MYRLPG6ZhfOqbKu7X5eyFl8ZhKvA=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sys v0.0.0-20180616030259-6c888cc515d3 h1:FCfAlbS73+IQQJktaKGHldMdL2bGDVpm+OrCEbVz1f4=
|
||||
golang.org/x/sys v0.0.0-20180616030259-6c888cc515d3/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180920110915-d641721ec2de h1:soC2mvPVpAV+Ld2qtpNn1eq25WTn76uIGNV23bofu6Q=
|
||||
golang.org/x/sys v0.0.0-20180920110915-d641721ec2de/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2 h1:+DCIGbF/swA92ohVg0//6X2IVY3KZs6p9mix0ziNYJM=
|
||||
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20180904205237-0aa4b8830f48 h1:PIz+xUHW4G/jqfFWeKhQ96ZV/t2HDsXfWj923rV0bZY=
|
||||
golang.org/x/tools v0.0.0-20180904205237-0aa4b8830f48/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
google.golang.org/api v0.0.0-20180614000435-2eea9ba0a3d9 h1:HcrS+AkAU4PJZAVkIazs99Rc+swHNe1NnV4Gm3RSDCo=
|
||||
google.golang.org/api v0.0.0-20180614000435-2eea9ba0a3d9/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0=
|
||||
google.golang.org/appengine v1.1.0 h1:igQkv0AAhEIvTEpD5LIpAfav2eeVO9HBTjvKHVJPRSs=
|
||||
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
||||
google.golang.org/api v0.0.0-20180921000521-920bb1beccf7 h1:XKT3Wlpn+o6Car1ot74Z4R+R9CeRfITCLZb0Q9/mpx4=
|
||||
google.golang.org/api v0.0.0-20180921000521-920bb1beccf7/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0=
|
||||
google.golang.org/appengine v1.2.0 h1:S0iUepdCWODXRvtE+gcRDd15L+k+k1AiHlMiMjefH24=
|
||||
google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY=
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/ini.v1 v1.38.2 h1:dGcbywv4RufeGeiMycPT/plKB5FtmLKLnWKwBiLhUA4=
|
||||
gopkg.in/ini.v1 v1.38.2/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
|
||||
gopkg.in/yaml.v2 v2.2.1 h1:mUhvW9EsL+naU5Q3cakzfE91YhliOondGd6ZrsDBHQE=
|
||||
|
456
vendor/cloud.google.com/go/compute/metadata/metadata.go
generated
vendored
456
vendor/cloud.google.com/go/compute/metadata/metadata.go
generated
vendored
@ -1,4 +1,4 @@
|
||||
// Copyright 2014 Google Inc. All Rights Reserved.
|
||||
// Copyright 2014 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
@ -64,7 +64,7 @@ var (
|
||||
)
|
||||
|
||||
var (
|
||||
metaClient = &http.Client{
|
||||
defaultClient = &Client{hc: &http.Client{
|
||||
Transport: &http.Transport{
|
||||
Dial: (&net.Dialer{
|
||||
Timeout: 2 * time.Second,
|
||||
@ -72,15 +72,15 @@ var (
|
||||
}).Dial,
|
||||
ResponseHeaderTimeout: 2 * time.Second,
|
||||
},
|
||||
}
|
||||
subscribeClient = &http.Client{
|
||||
}}
|
||||
subscribeClient = &Client{hc: &http.Client{
|
||||
Transport: &http.Transport{
|
||||
Dial: (&net.Dialer{
|
||||
Timeout: 2 * time.Second,
|
||||
KeepAlive: 30 * time.Second,
|
||||
}).Dial,
|
||||
},
|
||||
}
|
||||
}}
|
||||
)
|
||||
|
||||
// NotDefinedError is returned when requested metadata is not defined.
|
||||
@ -95,74 +95,16 @@ func (suffix NotDefinedError) Error() string {
|
||||
return fmt.Sprintf("metadata: GCE metadata %q not defined", string(suffix))
|
||||
}
|
||||
|
||||
// Get returns a value from the metadata service.
|
||||
// The suffix is appended to "http://${GCE_METADATA_HOST}/computeMetadata/v1/".
|
||||
//
|
||||
// If the GCE_METADATA_HOST environment variable is not defined, a default of
|
||||
// 169.254.169.254 will be used instead.
|
||||
//
|
||||
// If the requested metadata is not defined, the returned error will
|
||||
// be of type NotDefinedError.
|
||||
func Get(suffix string) (string, error) {
|
||||
val, _, err := getETag(metaClient, suffix)
|
||||
return val, err
|
||||
}
|
||||
|
||||
// getETag returns a value from the metadata service as well as the associated
|
||||
// ETag using the provided client. This func is otherwise equivalent to Get.
|
||||
func getETag(client *http.Client, suffix string) (value, etag string, err error) {
|
||||
// Using a fixed IP makes it very difficult to spoof the metadata service in
|
||||
// a container, which is an important use-case for local testing of cloud
|
||||
// deployments. To enable spoofing of the metadata service, the environment
|
||||
// variable GCE_METADATA_HOST is first inspected to decide where metadata
|
||||
// requests shall go.
|
||||
host := os.Getenv(metadataHostEnv)
|
||||
if host == "" {
|
||||
// Using 169.254.169.254 instead of "metadata" here because Go
|
||||
// binaries built with the "netgo" tag and without cgo won't
|
||||
// know the search suffix for "metadata" is
|
||||
// ".google.internal", and this IP address is documented as
|
||||
// being stable anyway.
|
||||
host = metadataIP
|
||||
}
|
||||
url := "http://" + host + "/computeMetadata/v1/" + suffix
|
||||
req, _ := http.NewRequest("GET", url, nil)
|
||||
req.Header.Set("Metadata-Flavor", "Google")
|
||||
req.Header.Set("User-Agent", userAgent)
|
||||
res, err := client.Do(req)
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
defer res.Body.Close()
|
||||
if res.StatusCode == http.StatusNotFound {
|
||||
return "", "", NotDefinedError(suffix)
|
||||
}
|
||||
if res.StatusCode != 200 {
|
||||
return "", "", fmt.Errorf("status code %d trying to fetch %s", res.StatusCode, url)
|
||||
}
|
||||
all, err := ioutil.ReadAll(res.Body)
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
return string(all), res.Header.Get("Etag"), nil
|
||||
}
|
||||
|
||||
func getTrimmed(suffix string) (s string, err error) {
|
||||
s, err = Get(suffix)
|
||||
s = strings.TrimSpace(s)
|
||||
return
|
||||
}
|
||||
|
||||
func (c *cachedValue) get() (v string, err error) {
|
||||
func (c *cachedValue) get(cl *Client) (v string, err error) {
|
||||
defer c.mu.Unlock()
|
||||
c.mu.Lock()
|
||||
if c.v != "" {
|
||||
return c.v, nil
|
||||
}
|
||||
if c.trim {
|
||||
v, err = getTrimmed(c.k)
|
||||
v, err = cl.getTrimmed(c.k)
|
||||
} else {
|
||||
v, err = Get(c.k)
|
||||
v, err = cl.Get(c.k)
|
||||
}
|
||||
if err == nil {
|
||||
c.v = v
|
||||
@ -201,7 +143,7 @@ func testOnGCE() bool {
|
||||
go func() {
|
||||
req, _ := http.NewRequest("GET", "http://"+metadataIP, nil)
|
||||
req.Header.Set("User-Agent", userAgent)
|
||||
res, err := ctxhttp.Do(ctx, metaClient, req)
|
||||
res, err := ctxhttp.Do(ctx, defaultClient.hc, req)
|
||||
if err != nil {
|
||||
resc <- false
|
||||
return
|
||||
@ -266,6 +208,255 @@ func systemInfoSuggestsGCE() bool {
|
||||
return name == "Google" || name == "Google Compute Engine"
|
||||
}
|
||||
|
||||
// Subscribe calls Client.Subscribe on a client designed for subscribing (one with no
|
||||
// ResponseHeaderTimeout).
|
||||
func Subscribe(suffix string, fn func(v string, ok bool) error) error {
|
||||
return subscribeClient.Subscribe(suffix, fn)
|
||||
}
|
||||
|
||||
// Get calls Client.Get on the default client.
|
||||
func Get(suffix string) (string, error) { return defaultClient.Get(suffix) }
|
||||
|
||||
// ProjectID returns the current instance's project ID string.
|
||||
func ProjectID() (string, error) { return defaultClient.ProjectID() }
|
||||
|
||||
// NumericProjectID returns the current instance's numeric project ID.
|
||||
func NumericProjectID() (string, error) { return defaultClient.NumericProjectID() }
|
||||
|
||||
// InternalIP returns the instance's primary internal IP address.
|
||||
func InternalIP() (string, error) { return defaultClient.InternalIP() }
|
||||
|
||||
// ExternalIP returns the instance's primary external (public) IP address.
|
||||
func ExternalIP() (string, error) { return defaultClient.ExternalIP() }
|
||||
|
||||
// Hostname returns the instance's hostname. This will be of the form
|
||||
// "<instanceID>.c.<projID>.internal".
|
||||
func Hostname() (string, error) { return defaultClient.Hostname() }
|
||||
|
||||
// InstanceTags returns the list of user-defined instance tags,
|
||||
// assigned when initially creating a GCE instance.
|
||||
func InstanceTags() ([]string, error) { return defaultClient.InstanceTags() }
|
||||
|
||||
// InstanceID returns the current VM's numeric instance ID.
|
||||
func InstanceID() (string, error) { return defaultClient.InstanceID() }
|
||||
|
||||
// InstanceName returns the current VM's instance ID string.
|
||||
func InstanceName() (string, error) { return defaultClient.InstanceName() }
|
||||
|
||||
// Zone returns the current VM's zone, such as "us-central1-b".
|
||||
func Zone() (string, error) { return defaultClient.Zone() }
|
||||
|
||||
// InstanceAttributes calls Client.InstanceAttributes on the default client.
|
||||
func InstanceAttributes() ([]string, error) { return defaultClient.InstanceAttributes() }
|
||||
|
||||
// ProjectAttributes calls Client.ProjectAttributes on the default client.
|
||||
func ProjectAttributes() ([]string, error) { return defaultClient.ProjectAttributes() }
|
||||
|
||||
// InstanceAttributeValue calls Client.InstanceAttributeValue on the default client.
|
||||
func InstanceAttributeValue(attr string) (string, error) {
|
||||
return defaultClient.InstanceAttributeValue(attr)
|
||||
}
|
||||
|
||||
// ProjectAttributeValue calls Client.ProjectAttributeValue on the default client.
|
||||
func ProjectAttributeValue(attr string) (string, error) {
|
||||
return defaultClient.ProjectAttributeValue(attr)
|
||||
}
|
||||
|
||||
// Scopes calls Client.Scopes on the default client.
|
||||
func Scopes(serviceAccount string) ([]string, error) { return defaultClient.Scopes(serviceAccount) }
|
||||
|
||||
func strsContains(ss []string, s string) bool {
|
||||
for _, v := range ss {
|
||||
if v == s {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// A Client provides metadata.
|
||||
type Client struct {
|
||||
hc *http.Client
|
||||
}
|
||||
|
||||
// NewClient returns a Client that can be used to fetch metadata. All HTTP requests
|
||||
// will use the given http.Client instead of the default client.
|
||||
func NewClient(c *http.Client) *Client {
|
||||
return &Client{hc: c}
|
||||
}
|
||||
|
||||
// getETag returns a value from the metadata service as well as the associated ETag.
|
||||
// This func is otherwise equivalent to Get.
|
||||
func (c *Client) getETag(suffix string) (value, etag string, err error) {
|
||||
// Using a fixed IP makes it very difficult to spoof the metadata service in
|
||||
// a container, which is an important use-case for local testing of cloud
|
||||
// deployments. To enable spoofing of the metadata service, the environment
|
||||
// variable GCE_METADATA_HOST is first inspected to decide where metadata
|
||||
// requests shall go.
|
||||
host := os.Getenv(metadataHostEnv)
|
||||
if host == "" {
|
||||
// Using 169.254.169.254 instead of "metadata" here because Go
|
||||
// binaries built with the "netgo" tag and without cgo won't
|
||||
// know the search suffix for "metadata" is
|
||||
// ".google.internal", and this IP address is documented as
|
||||
// being stable anyway.
|
||||
host = metadataIP
|
||||
}
|
||||
url := "http://" + host + "/computeMetadata/v1/" + suffix
|
||||
req, _ := http.NewRequest("GET", url, nil)
|
||||
req.Header.Set("Metadata-Flavor", "Google")
|
||||
req.Header.Set("User-Agent", userAgent)
|
||||
res, err := c.hc.Do(req)
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
defer res.Body.Close()
|
||||
if res.StatusCode == http.StatusNotFound {
|
||||
return "", "", NotDefinedError(suffix)
|
||||
}
|
||||
if res.StatusCode != 200 {
|
||||
return "", "", fmt.Errorf("status code %d trying to fetch %s", res.StatusCode, url)
|
||||
}
|
||||
all, err := ioutil.ReadAll(res.Body)
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
return string(all), res.Header.Get("Etag"), nil
|
||||
}
|
||||
|
||||
// Get returns a value from the metadata service.
|
||||
// The suffix is appended to "http://${GCE_METADATA_HOST}/computeMetadata/v1/".
|
||||
//
|
||||
// If the GCE_METADATA_HOST environment variable is not defined, a default of
|
||||
// 169.254.169.254 will be used instead.
|
||||
//
|
||||
// If the requested metadata is not defined, the returned error will
|
||||
// be of type NotDefinedError.
|
||||
func (c *Client) Get(suffix string) (string, error) {
|
||||
val, _, err := c.getETag(suffix)
|
||||
return val, err
|
||||
}
|
||||
|
||||
func (c *Client) getTrimmed(suffix string) (s string, err error) {
|
||||
s, err = c.Get(suffix)
|
||||
s = strings.TrimSpace(s)
|
||||
return
|
||||
}
|
||||
|
||||
func (c *Client) lines(suffix string) ([]string, error) {
|
||||
j, err := c.Get(suffix)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
s := strings.Split(strings.TrimSpace(j), "\n")
|
||||
for i := range s {
|
||||
s[i] = strings.TrimSpace(s[i])
|
||||
}
|
||||
return s, nil
|
||||
}
|
||||
|
||||
// ProjectID returns the current instance's project ID string.
|
||||
func (c *Client) ProjectID() (string, error) { return projID.get(c) }
|
||||
|
||||
// NumericProjectID returns the current instance's numeric project ID.
|
||||
func (c *Client) NumericProjectID() (string, error) { return projNum.get(c) }
|
||||
|
||||
// InstanceID returns the current VM's numeric instance ID.
|
||||
func (c *Client) InstanceID() (string, error) { return instID.get(c) }
|
||||
|
||||
// InternalIP returns the instance's primary internal IP address.
|
||||
func (c *Client) InternalIP() (string, error) {
|
||||
return c.getTrimmed("instance/network-interfaces/0/ip")
|
||||
}
|
||||
|
||||
// ExternalIP returns the instance's primary external (public) IP address.
|
||||
func (c *Client) ExternalIP() (string, error) {
|
||||
return c.getTrimmed("instance/network-interfaces/0/access-configs/0/external-ip")
|
||||
}
|
||||
|
||||
// Hostname returns the instance's hostname. This will be of the form
|
||||
// "<instanceID>.c.<projID>.internal".
|
||||
func (c *Client) Hostname() (string, error) {
|
||||
return c.getTrimmed("instance/hostname")
|
||||
}
|
||||
|
||||
// InstanceTags returns the list of user-defined instance tags,
|
||||
// assigned when initially creating a GCE instance.
|
||||
func (c *Client) InstanceTags() ([]string, error) {
|
||||
var s []string
|
||||
j, err := c.Get("instance/tags")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := json.NewDecoder(strings.NewReader(j)).Decode(&s); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return s, nil
|
||||
}
|
||||
|
||||
// InstanceName returns the current VM's instance ID string.
|
||||
func (c *Client) InstanceName() (string, error) {
|
||||
host, err := c.Hostname()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return strings.Split(host, ".")[0], nil
|
||||
}
|
||||
|
||||
// Zone returns the current VM's zone, such as "us-central1-b".
|
||||
func (c *Client) Zone() (string, error) {
|
||||
zone, err := c.getTrimmed("instance/zone")
|
||||
// zone is of the form "projects/<projNum>/zones/<zoneName>".
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return zone[strings.LastIndex(zone, "/")+1:], nil
|
||||
}
|
||||
|
||||
// InstanceAttributes returns the list of user-defined attributes,
|
||||
// assigned when initially creating a GCE VM instance. The value of an
|
||||
// attribute can be obtained with InstanceAttributeValue.
|
||||
func (c *Client) InstanceAttributes() ([]string, error) { return c.lines("instance/attributes/") }
|
||||
|
||||
// ProjectAttributes returns the list of user-defined attributes
|
||||
// applying to the project as a whole, not just this VM. The value of
|
||||
// an attribute can be obtained with ProjectAttributeValue.
|
||||
func (c *Client) ProjectAttributes() ([]string, error) { return c.lines("project/attributes/") }
|
||||
|
||||
// InstanceAttributeValue returns the value of the provided VM
|
||||
// instance attribute.
|
||||
//
|
||||
// If the requested attribute is not defined, the returned error will
|
||||
// be of type NotDefinedError.
|
||||
//
|
||||
// InstanceAttributeValue may return ("", nil) if the attribute was
|
||||
// defined to be the empty string.
|
||||
func (c *Client) InstanceAttributeValue(attr string) (string, error) {
|
||||
return c.Get("instance/attributes/" + attr)
|
||||
}
|
||||
|
||||
// ProjectAttributeValue returns the value of the provided
|
||||
// project attribute.
|
||||
//
|
||||
// If the requested attribute is not defined, the returned error will
|
||||
// be of type NotDefinedError.
|
||||
//
|
||||
// ProjectAttributeValue may return ("", nil) if the attribute was
|
||||
// defined to be the empty string.
|
||||
func (c *Client) ProjectAttributeValue(attr string) (string, error) {
|
||||
return c.Get("project/attributes/" + attr)
|
||||
}
|
||||
|
||||
// Scopes returns the service account scopes for the given account.
|
||||
// The account may be empty or the string "default" to use the instance's
|
||||
// main account.
|
||||
func (c *Client) Scopes(serviceAccount string) ([]string, error) {
|
||||
if serviceAccount == "" {
|
||||
serviceAccount = "default"
|
||||
}
|
||||
return c.lines("instance/service-accounts/" + serviceAccount + "/scopes")
|
||||
}
|
||||
|
||||
// Subscribe subscribes to a value from the metadata service.
|
||||
// The suffix is appended to "http://${GCE_METADATA_HOST}/computeMetadata/v1/".
|
||||
// The suffix may contain query parameters.
|
||||
@ -275,11 +466,11 @@ func systemInfoSuggestsGCE() bool {
|
||||
// and ok false. Subscribe blocks until fn returns a non-nil error or the value
|
||||
// is deleted. Subscribe returns the error value returned from the last call to
|
||||
// fn, which may be nil when ok == false.
|
||||
func Subscribe(suffix string, fn func(v string, ok bool) error) error {
|
||||
func (c *Client) Subscribe(suffix string, fn func(v string, ok bool) error) error {
|
||||
const failedSubscribeSleep = time.Second * 5
|
||||
|
||||
// First check to see if the metadata value exists at all.
|
||||
val, lastETag, err := getETag(subscribeClient, suffix)
|
||||
val, lastETag, err := c.getETag(suffix)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -295,7 +486,7 @@ func Subscribe(suffix string, fn func(v string, ok bool) error) error {
|
||||
suffix += "?wait_for_change=true&last_etag="
|
||||
}
|
||||
for {
|
||||
val, etag, err := getETag(subscribeClient, suffix+url.QueryEscape(lastETag))
|
||||
val, etag, err := c.getETag(suffix + url.QueryEscape(lastETag))
|
||||
if err != nil {
|
||||
if _, deleted := err.(NotDefinedError); !deleted {
|
||||
time.Sleep(failedSubscribeSleep)
|
||||
@ -310,128 +501,3 @@ func Subscribe(suffix string, fn func(v string, ok bool) error) error {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ProjectID returns the current instance's project ID string.
|
||||
func ProjectID() (string, error) { return projID.get() }
|
||||
|
||||
// NumericProjectID returns the current instance's numeric project ID.
|
||||
func NumericProjectID() (string, error) { return projNum.get() }
|
||||
|
||||
// InternalIP returns the instance's primary internal IP address.
|
||||
func InternalIP() (string, error) {
|
||||
return getTrimmed("instance/network-interfaces/0/ip")
|
||||
}
|
||||
|
||||
// ExternalIP returns the instance's primary external (public) IP address.
|
||||
func ExternalIP() (string, error) {
|
||||
return getTrimmed("instance/network-interfaces/0/access-configs/0/external-ip")
|
||||
}
|
||||
|
||||
// Hostname returns the instance's hostname. This will be of the form
|
||||
// "<instanceID>.c.<projID>.internal".
|
||||
func Hostname() (string, error) {
|
||||
return getTrimmed("instance/hostname")
|
||||
}
|
||||
|
||||
// InstanceTags returns the list of user-defined instance tags,
|
||||
// assigned when initially creating a GCE instance.
|
||||
func InstanceTags() ([]string, error) {
|
||||
var s []string
|
||||
j, err := Get("instance/tags")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := json.NewDecoder(strings.NewReader(j)).Decode(&s); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return s, nil
|
||||
}
|
||||
|
||||
// InstanceID returns the current VM's numeric instance ID.
|
||||
func InstanceID() (string, error) {
|
||||
return instID.get()
|
||||
}
|
||||
|
||||
// InstanceName returns the current VM's instance ID string.
|
||||
func InstanceName() (string, error) {
|
||||
host, err := Hostname()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return strings.Split(host, ".")[0], nil
|
||||
}
|
||||
|
||||
// Zone returns the current VM's zone, such as "us-central1-b".
|
||||
func Zone() (string, error) {
|
||||
zone, err := getTrimmed("instance/zone")
|
||||
// zone is of the form "projects/<projNum>/zones/<zoneName>".
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return zone[strings.LastIndex(zone, "/")+1:], nil
|
||||
}
|
||||
|
||||
// InstanceAttributes returns the list of user-defined attributes,
|
||||
// assigned when initially creating a GCE VM instance. The value of an
|
||||
// attribute can be obtained with InstanceAttributeValue.
|
||||
func InstanceAttributes() ([]string, error) { return lines("instance/attributes/") }
|
||||
|
||||
// ProjectAttributes returns the list of user-defined attributes
|
||||
// applying to the project as a whole, not just this VM. The value of
|
||||
// an attribute can be obtained with ProjectAttributeValue.
|
||||
func ProjectAttributes() ([]string, error) { return lines("project/attributes/") }
|
||||
|
||||
func lines(suffix string) ([]string, error) {
|
||||
j, err := Get(suffix)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
s := strings.Split(strings.TrimSpace(j), "\n")
|
||||
for i := range s {
|
||||
s[i] = strings.TrimSpace(s[i])
|
||||
}
|
||||
return s, nil
|
||||
}
|
||||
|
||||
// InstanceAttributeValue returns the value of the provided VM
|
||||
// instance attribute.
|
||||
//
|
||||
// If the requested attribute is not defined, the returned error will
|
||||
// be of type NotDefinedError.
|
||||
//
|
||||
// InstanceAttributeValue may return ("", nil) if the attribute was
|
||||
// defined to be the empty string.
|
||||
func InstanceAttributeValue(attr string) (string, error) {
|
||||
return Get("instance/attributes/" + attr)
|
||||
}
|
||||
|
||||
// ProjectAttributeValue returns the value of the provided
|
||||
// project attribute.
|
||||
//
|
||||
// If the requested attribute is not defined, the returned error will
|
||||
// be of type NotDefinedError.
|
||||
//
|
||||
// ProjectAttributeValue may return ("", nil) if the attribute was
|
||||
// defined to be the empty string.
|
||||
func ProjectAttributeValue(attr string) (string, error) {
|
||||
return Get("project/attributes/" + attr)
|
||||
}
|
||||
|
||||
// Scopes returns the service account scopes for the given account.
|
||||
// The account may be empty or the string "default" to use the instance's
|
||||
// main account.
|
||||
func Scopes(serviceAccount string) ([]string, error) {
|
||||
if serviceAccount == "" {
|
||||
serviceAccount = "default"
|
||||
}
|
||||
return lines("instance/service-accounts/" + serviceAccount + "/scopes")
|
||||
}
|
||||
|
||||
func strsContains(ss []string, s string) bool {
|
||||
for _, v := range ss {
|
||||
if v == s {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
4
vendor/github.com/aws/aws-sdk-go/aws/client/client.go
generated
vendored
4
vendor/github.com/aws/aws-sdk-go/aws/client/client.go
generated
vendored
@ -91,6 +91,6 @@ func (c *Client) AddDebugHandlers() {
|
||||
return
|
||||
}
|
||||
|
||||
c.Handlers.Send.PushFrontNamed(request.NamedHandler{Name: "awssdk.client.LogRequest", Fn: logRequest})
|
||||
c.Handlers.Send.PushBackNamed(request.NamedHandler{Name: "awssdk.client.LogResponse", Fn: logResponse})
|
||||
c.Handlers.Send.PushFrontNamed(LogHTTPRequestHandler)
|
||||
c.Handlers.Send.PushBackNamed(LogHTTPResponseHandler)
|
||||
}
|
||||
|
88
vendor/github.com/aws/aws-sdk-go/aws/client/logger.go
generated
vendored
88
vendor/github.com/aws/aws-sdk-go/aws/client/logger.go
generated
vendored
@ -44,12 +44,22 @@ func (reader *teeReaderCloser) Close() error {
|
||||
return reader.Source.Close()
|
||||
}
|
||||
|
||||
// LogHTTPRequestHandler is a SDK request handler to log the HTTP request sent
|
||||
// to a service. Will include the HTTP request body if the LogLevel of the
|
||||
// request matches LogDebugWithHTTPBody.
|
||||
var LogHTTPRequestHandler = request.NamedHandler{
|
||||
Name: "awssdk.client.LogRequest",
|
||||
Fn: logRequest,
|
||||
}
|
||||
|
||||
func logRequest(r *request.Request) {
|
||||
logBody := r.Config.LogLevel.Matches(aws.LogDebugWithHTTPBody)
|
||||
bodySeekable := aws.IsReaderSeekable(r.Body)
|
||||
dumpedBody, err := httputil.DumpRequestOut(r.HTTPRequest, logBody)
|
||||
|
||||
b, err := httputil.DumpRequestOut(r.HTTPRequest, logBody)
|
||||
if err != nil {
|
||||
r.Config.Logger.Log(fmt.Sprintf(logReqErrMsg, r.ClientInfo.ServiceName, r.Operation.Name, err))
|
||||
r.Config.Logger.Log(fmt.Sprintf(logReqErrMsg,
|
||||
r.ClientInfo.ServiceName, r.Operation.Name, err))
|
||||
return
|
||||
}
|
||||
|
||||
@ -63,7 +73,28 @@ func logRequest(r *request.Request) {
|
||||
r.ResetBody()
|
||||
}
|
||||
|
||||
r.Config.Logger.Log(fmt.Sprintf(logReqMsg, r.ClientInfo.ServiceName, r.Operation.Name, string(dumpedBody)))
|
||||
r.Config.Logger.Log(fmt.Sprintf(logReqMsg,
|
||||
r.ClientInfo.ServiceName, r.Operation.Name, string(b)))
|
||||
}
|
||||
|
||||
// LogHTTPRequestHeaderHandler is a SDK request handler to log the HTTP request sent
|
||||
// to a service. Will only log the HTTP request's headers. The request payload
|
||||
// will not be read.
|
||||
var LogHTTPRequestHeaderHandler = request.NamedHandler{
|
||||
Name: "awssdk.client.LogRequestHeader",
|
||||
Fn: logRequestHeader,
|
||||
}
|
||||
|
||||
func logRequestHeader(r *request.Request) {
|
||||
b, err := httputil.DumpRequestOut(r.HTTPRequest, false)
|
||||
if err != nil {
|
||||
r.Config.Logger.Log(fmt.Sprintf(logReqErrMsg,
|
||||
r.ClientInfo.ServiceName, r.Operation.Name, err))
|
||||
return
|
||||
}
|
||||
|
||||
r.Config.Logger.Log(fmt.Sprintf(logReqMsg,
|
||||
r.ClientInfo.ServiceName, r.Operation.Name, string(b)))
|
||||
}
|
||||
|
||||
const logRespMsg = `DEBUG: Response %s/%s Details:
|
||||
@ -76,27 +107,44 @@ const logRespErrMsg = `DEBUG ERROR: Response %s/%s:
|
||||
%s
|
||||
-----------------------------------------------------`
|
||||
|
||||
// LogHTTPResponseHandler is a SDK request handler to log the HTTP response
|
||||
// received from a service. Will include the HTTP response body if the LogLevel
|
||||
// of the request matches LogDebugWithHTTPBody.
|
||||
var LogHTTPResponseHandler = request.NamedHandler{
|
||||
Name: "awssdk.client.LogResponse",
|
||||
Fn: logResponse,
|
||||
}
|
||||
|
||||
func logResponse(r *request.Request) {
|
||||
lw := &logWriter{r.Config.Logger, bytes.NewBuffer(nil)}
|
||||
|
||||
logBody := r.Config.LogLevel.Matches(aws.LogDebugWithHTTPBody)
|
||||
if logBody {
|
||||
r.HTTPResponse.Body = &teeReaderCloser{
|
||||
Reader: io.TeeReader(r.HTTPResponse.Body, lw),
|
||||
Source: r.HTTPResponse.Body,
|
||||
}
|
||||
}
|
||||
|
||||
handlerFn := func(req *request.Request) {
|
||||
body, err := httputil.DumpResponse(req.HTTPResponse, false)
|
||||
b, err := httputil.DumpResponse(req.HTTPResponse, false)
|
||||
if err != nil {
|
||||
lw.Logger.Log(fmt.Sprintf(logRespErrMsg, req.ClientInfo.ServiceName, req.Operation.Name, err))
|
||||
lw.Logger.Log(fmt.Sprintf(logRespErrMsg,
|
||||
req.ClientInfo.ServiceName, req.Operation.Name, err))
|
||||
return
|
||||
}
|
||||
|
||||
lw.Logger.Log(fmt.Sprintf(logRespMsg,
|
||||
req.ClientInfo.ServiceName, req.Operation.Name, string(b)))
|
||||
|
||||
if logBody {
|
||||
b, err := ioutil.ReadAll(lw.buf)
|
||||
if err != nil {
|
||||
lw.Logger.Log(fmt.Sprintf(logRespErrMsg, req.ClientInfo.ServiceName, req.Operation.Name, err))
|
||||
lw.Logger.Log(fmt.Sprintf(logRespErrMsg,
|
||||
req.ClientInfo.ServiceName, req.Operation.Name, err))
|
||||
return
|
||||
}
|
||||
lw.Logger.Log(fmt.Sprintf(logRespMsg, req.ClientInfo.ServiceName, req.Operation.Name, string(body)))
|
||||
if req.Config.LogLevel.Matches(aws.LogDebugWithHTTPBody) {
|
||||
|
||||
lw.Logger.Log(string(b))
|
||||
}
|
||||
}
|
||||
@ -110,3 +158,27 @@ func logResponse(r *request.Request) {
|
||||
Name: handlerName, Fn: handlerFn,
|
||||
})
|
||||
}
|
||||
|
||||
// LogHTTPResponseHeaderHandler is a SDK request handler to log the HTTP
|
||||
// response received from a service. Will only log the HTTP response's headers.
|
||||
// The response payload will not be read.
|
||||
var LogHTTPResponseHeaderHandler = request.NamedHandler{
|
||||
Name: "awssdk.client.LogResponseHeader",
|
||||
Fn: logResponseHeader,
|
||||
}
|
||||
|
||||
func logResponseHeader(r *request.Request) {
|
||||
if r.Config.Logger == nil {
|
||||
return
|
||||
}
|
||||
|
||||
b, err := httputil.DumpResponse(r.HTTPResponse, false)
|
||||
if err != nil {
|
||||
r.Config.Logger.Log(fmt.Sprintf(logRespErrMsg,
|
||||
r.ClientInfo.ServiceName, r.Operation.Name, err))
|
||||
return
|
||||
}
|
||||
|
||||
r.Config.Logger.Log(fmt.Sprintf(logRespMsg,
|
||||
r.ClientInfo.ServiceName, r.Operation.Name, string(b)))
|
||||
}
|
||||
|
9
vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go
generated
vendored
9
vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go
generated
vendored
@ -158,13 +158,14 @@ func (e *Expiry) SetExpiration(expiration time.Time, window time.Duration) {
|
||||
|
||||
// IsExpired returns if the credentials are expired.
|
||||
func (e *Expiry) IsExpired() bool {
|
||||
if e.CurrentTime == nil {
|
||||
e.CurrentTime = time.Now
|
||||
curTime := e.CurrentTime
|
||||
if curTime == nil {
|
||||
curTime = time.Now
|
||||
}
|
||||
return e.expiration.Before(e.CurrentTime())
|
||||
return e.expiration.Before(curTime())
|
||||
}
|
||||
|
||||
// A Credentials provides synchronous safe retrieval of AWS credentials Value.
|
||||
// A Credentials provides concurrency safe retrieval of AWS credentials Value.
|
||||
// Credentials will cache the credentials value until they expire. Once the value
|
||||
// expires the next Get will attempt to retrieve valid credentials.
|
||||
//
|
||||
|
6
vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go
generated
vendored
6
vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go
generated
vendored
@ -4,7 +4,6 @@ import (
|
||||
"bufio"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"path"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
@ -12,6 +11,7 @@ import (
|
||||
"github.com/aws/aws-sdk-go/aws/client"
|
||||
"github.com/aws/aws-sdk-go/aws/credentials"
|
||||
"github.com/aws/aws-sdk-go/aws/ec2metadata"
|
||||
"github.com/aws/aws-sdk-go/internal/sdkuri"
|
||||
)
|
||||
|
||||
// ProviderName provides a name of EC2Role provider
|
||||
@ -125,7 +125,7 @@ type ec2RoleCredRespBody struct {
|
||||
Message string
|
||||
}
|
||||
|
||||
const iamSecurityCredsPath = "/iam/security-credentials"
|
||||
const iamSecurityCredsPath = "iam/security-credentials/"
|
||||
|
||||
// requestCredList requests a list of credentials from the EC2 service.
|
||||
// If there are no credentials, or there is an error making or receiving the request
|
||||
@ -153,7 +153,7 @@ func requestCredList(client *ec2metadata.EC2Metadata) ([]string, error) {
|
||||
// If the credentials cannot be found, or there is an error reading the response
|
||||
// and error will be returned.
|
||||
func requestCred(client *ec2metadata.EC2Metadata, credsName string) (ec2RoleCredRespBody, error) {
|
||||
resp, err := client.GetMetadata(path.Join(iamSecurityCredsPath, credsName))
|
||||
resp, err := client.GetMetadata(sdkuri.PathJoin(iamSecurityCredsPath, credsName))
|
||||
if err != nil {
|
||||
return ec2RoleCredRespBody{},
|
||||
awserr.New("EC2RoleRequestError",
|
||||
|
9
vendor/github.com/aws/aws-sdk-go/aws/csm/reporter.go
generated
vendored
9
vendor/github.com/aws/aws-sdk-go/aws/csm/reporter.go
generated
vendored
@ -90,14 +90,13 @@ func (rep *Reporter) sendAPICallAttemptMetric(r *request.Request) {
|
||||
}
|
||||
|
||||
func setError(m *metric, err awserr.Error) {
|
||||
msg := err.Message()
|
||||
msg := err.Error()
|
||||
code := err.Code()
|
||||
|
||||
switch code {
|
||||
case "RequestError",
|
||||
"SerializationError",
|
||||
request.CanceledErrorCode:
|
||||
|
||||
m.SDKException = &code
|
||||
m.SDKExceptionMessage = &msg
|
||||
default:
|
||||
@ -223,8 +222,10 @@ func (rep *Reporter) InjectHandlers(handlers *request.Handlers) {
|
||||
}
|
||||
|
||||
apiCallHandler := request.NamedHandler{Name: APICallMetricHandlerName, Fn: rep.sendAPICallMetric}
|
||||
handlers.Complete.PushFrontNamed(apiCallHandler)
|
||||
|
||||
apiCallAttemptHandler := request.NamedHandler{Name: APICallAttemptMetricHandlerName, Fn: rep.sendAPICallAttemptMetric}
|
||||
|
||||
handlers.Complete.PushFrontNamed(apiCallHandler)
|
||||
handlers.Complete.PushFrontNamed(apiCallAttemptHandler)
|
||||
|
||||
handlers.AfterRetry.PushFrontNamed(apiCallAttemptHandler)
|
||||
}
|
||||
|
17
vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go
generated
vendored
17
vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go
generated
vendored
@ -92,12 +92,23 @@ func Handlers() request.Handlers {
|
||||
func CredChain(cfg *aws.Config, handlers request.Handlers) *credentials.Credentials {
|
||||
return credentials.NewCredentials(&credentials.ChainProvider{
|
||||
VerboseErrors: aws.BoolValue(cfg.CredentialsChainVerboseErrors),
|
||||
Providers: []credentials.Provider{
|
||||
Providers: CredProviders(cfg, handlers),
|
||||
})
|
||||
}
|
||||
|
||||
// CredProviders returns the slice of providers used in
|
||||
// the default credential chain.
|
||||
//
|
||||
// For applications that need to use some other provider (for example use
|
||||
// different environment variables for legacy reasons) but still fall back
|
||||
// on the default chain of providers. This allows that default chaint to be
|
||||
// automatically updated
|
||||
func CredProviders(cfg *aws.Config, handlers request.Handlers) []credentials.Provider {
|
||||
return []credentials.Provider{
|
||||
&credentials.EnvProvider{},
|
||||
&credentials.SharedCredentialsProvider{Filename: "", Profile: ""},
|
||||
RemoteCredProvider(*cfg, handlers),
|
||||
},
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
const (
|
||||
|
8
vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go
generated
vendored
8
vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go
generated
vendored
@ -4,12 +4,12 @@ import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"path"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||
"github.com/aws/aws-sdk-go/aws/request"
|
||||
"github.com/aws/aws-sdk-go/internal/sdkuri"
|
||||
)
|
||||
|
||||
// GetMetadata uses the path provided to request information from the EC2
|
||||
@ -19,7 +19,7 @@ func (c *EC2Metadata) GetMetadata(p string) (string, error) {
|
||||
op := &request.Operation{
|
||||
Name: "GetMetadata",
|
||||
HTTPMethod: "GET",
|
||||
HTTPPath: path.Join("/", "meta-data", p),
|
||||
HTTPPath: sdkuri.PathJoin("/meta-data", p),
|
||||
}
|
||||
|
||||
output := &metadataOutput{}
|
||||
@ -35,7 +35,7 @@ func (c *EC2Metadata) GetUserData() (string, error) {
|
||||
op := &request.Operation{
|
||||
Name: "GetUserData",
|
||||
HTTPMethod: "GET",
|
||||
HTTPPath: path.Join("/", "user-data"),
|
||||
HTTPPath: "/user-data",
|
||||
}
|
||||
|
||||
output := &metadataOutput{}
|
||||
@ -56,7 +56,7 @@ func (c *EC2Metadata) GetDynamicData(p string) (string, error) {
|
||||
op := &request.Operation{
|
||||
Name: "GetDynamicData",
|
||||
HTTPMethod: "GET",
|
||||
HTTPPath: path.Join("/", "dynamic", p),
|
||||
HTTPPath: sdkuri.PathJoin("/dynamic", p),
|
||||
}
|
||||
|
||||
output := &metadataOutput{}
|
||||
|
22
vendor/github.com/aws/aws-sdk-go/aws/endpoints/decode.go
generated
vendored
22
vendor/github.com/aws/aws-sdk-go/aws/endpoints/decode.go
generated
vendored
@ -84,6 +84,7 @@ func decodeV3Endpoints(modelDef modelDefinition, opts DecodeModelOptions) (Resol
|
||||
custAddEC2Metadata(p)
|
||||
custAddS3DualStack(p)
|
||||
custRmIotDataService(p)
|
||||
custFixAppAutoscalingChina(p)
|
||||
}
|
||||
|
||||
return ps, nil
|
||||
@ -122,6 +123,27 @@ func custRmIotDataService(p *partition) {
|
||||
delete(p.Services, "data.iot")
|
||||
}
|
||||
|
||||
func custFixAppAutoscalingChina(p *partition) {
|
||||
if p.ID != "aws-cn" {
|
||||
return
|
||||
}
|
||||
|
||||
const serviceName = "application-autoscaling"
|
||||
s, ok := p.Services[serviceName]
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
|
||||
const expectHostname = `autoscaling.{region}.amazonaws.com`
|
||||
if e, a := s.Defaults.Hostname, expectHostname; e != a {
|
||||
fmt.Printf("custFixAppAutoscalingChina: ignoring customization, expected %s, got %s\n", e, a)
|
||||
return
|
||||
}
|
||||
|
||||
s.Defaults.Hostname = expectHostname + ".cn"
|
||||
p.Services[serviceName] = s
|
||||
}
|
||||
|
||||
type decodeModelError struct {
|
||||
awsError
|
||||
}
|
||||
|
212
vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go
generated
vendored
212
vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go
generated
vendored
@ -50,6 +50,7 @@ const (
|
||||
AcmPcaServiceID = "acm-pca" // AcmPca.
|
||||
ApiMediatailorServiceID = "api.mediatailor" // ApiMediatailor.
|
||||
ApiPricingServiceID = "api.pricing" // ApiPricing.
|
||||
ApiSagemakerServiceID = "api.sagemaker" // ApiSagemaker.
|
||||
ApigatewayServiceID = "apigateway" // Apigateway.
|
||||
ApplicationAutoscalingServiceID = "application-autoscaling" // ApplicationAutoscaling.
|
||||
Appstream2ServiceID = "appstream2" // Appstream2.
|
||||
@ -112,6 +113,7 @@ const (
|
||||
ImportexportServiceID = "importexport" // Importexport.
|
||||
InspectorServiceID = "inspector" // Inspector.
|
||||
IotServiceID = "iot" // Iot.
|
||||
IotanalyticsServiceID = "iotanalytics" // Iotanalytics.
|
||||
KinesisServiceID = "kinesis" // Kinesis.
|
||||
KinesisanalyticsServiceID = "kinesisanalytics" // Kinesisanalytics.
|
||||
KinesisvideoServiceID = "kinesisvideo" // Kinesisvideo.
|
||||
@ -146,7 +148,6 @@ const (
|
||||
RuntimeLexServiceID = "runtime.lex" // RuntimeLex.
|
||||
RuntimeSagemakerServiceID = "runtime.sagemaker" // RuntimeSagemaker.
|
||||
S3ServiceID = "s3" // S3.
|
||||
SagemakerServiceID = "sagemaker" // Sagemaker.
|
||||
SdbServiceID = "sdb" // Sdb.
|
||||
SecretsmanagerServiceID = "secretsmanager" // Secretsmanager.
|
||||
ServerlessrepoServiceID = "serverlessrepo" // Serverlessrepo.
|
||||
@ -304,6 +305,7 @@ var awsPartition = partition{
|
||||
"ca-central-1": endpoint{},
|
||||
"eu-central-1": endpoint{},
|
||||
"eu-west-1": endpoint{},
|
||||
"eu-west-2": endpoint{},
|
||||
"us-east-1": endpoint{},
|
||||
"us-east-2": endpoint{},
|
||||
"us-west-2": endpoint{},
|
||||
@ -330,6 +332,19 @@ var awsPartition = partition{
|
||||
"us-east-1": endpoint{},
|
||||
},
|
||||
},
|
||||
"api.sagemaker": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
"ap-northeast-1": endpoint{},
|
||||
"ap-northeast-2": endpoint{},
|
||||
"ap-southeast-2": endpoint{},
|
||||
"eu-central-1": endpoint{},
|
||||
"eu-west-1": endpoint{},
|
||||
"us-east-1": endpoint{},
|
||||
"us-east-2": endpoint{},
|
||||
"us-west-2": endpoint{},
|
||||
},
|
||||
},
|
||||
"apigateway": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
@ -394,10 +409,13 @@ var awsPartition = partition{
|
||||
|
||||
Endpoints: endpoints{
|
||||
"ap-northeast-1": endpoint{},
|
||||
"ap-northeast-2": endpoint{},
|
||||
"ap-south-1": endpoint{},
|
||||
"ap-southeast-1": endpoint{},
|
||||
"ap-southeast-2": endpoint{},
|
||||
"eu-central-1": endpoint{},
|
||||
"eu-west-1": endpoint{},
|
||||
"eu-west-2": endpoint{},
|
||||
"us-east-1": endpoint{},
|
||||
"us-east-2": endpoint{},
|
||||
"us-west-2": endpoint{},
|
||||
@ -453,6 +471,7 @@ var awsPartition = partition{
|
||||
"eu-central-1": endpoint{},
|
||||
"eu-west-1": endpoint{},
|
||||
"eu-west-2": endpoint{},
|
||||
"sa-east-1": endpoint{},
|
||||
"us-east-1": endpoint{},
|
||||
"us-east-2": endpoint{},
|
||||
"us-west-1": endpoint{},
|
||||
@ -571,6 +590,7 @@ var awsPartition = partition{
|
||||
"ca-central-1": endpoint{},
|
||||
"eu-central-1": endpoint{},
|
||||
"eu-west-1": endpoint{},
|
||||
"eu-west-2": endpoint{},
|
||||
"us-east-1": endpoint{},
|
||||
"us-east-2": endpoint{},
|
||||
"us-west-1": endpoint{},
|
||||
@ -786,6 +806,7 @@ var awsPartition = partition{
|
||||
Protocols: []string{"https"},
|
||||
},
|
||||
Endpoints: endpoints{
|
||||
"ap-southeast-2": endpoint{},
|
||||
"eu-west-1": endpoint{},
|
||||
"us-east-1": endpoint{},
|
||||
"us-east-2": endpoint{},
|
||||
@ -1029,6 +1050,12 @@ var awsPartition = partition{
|
||||
"eu-west-1": endpoint{},
|
||||
"eu-west-2": endpoint{},
|
||||
"eu-west-3": endpoint{},
|
||||
"fips": endpoint{
|
||||
Hostname: "elasticache-fips.us-west-1.amazonaws.com",
|
||||
CredentialScope: credentialScope{
|
||||
Region: "us-west-1",
|
||||
},
|
||||
},
|
||||
"sa-east-1": endpoint{},
|
||||
"us-east-1": endpoint{},
|
||||
"us-east-2": endpoint{},
|
||||
@ -1059,7 +1086,9 @@ var awsPartition = partition{
|
||||
"elasticfilesystem": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
"ap-northeast-1": endpoint{},
|
||||
"ap-northeast-2": endpoint{},
|
||||
"ap-southeast-1": endpoint{},
|
||||
"ap-southeast-2": endpoint{},
|
||||
"eu-central-1": endpoint{},
|
||||
"eu-west-1": endpoint{},
|
||||
@ -1094,7 +1123,7 @@ var awsPartition = partition{
|
||||
"elasticmapreduce": service{
|
||||
Defaults: endpoint{
|
||||
SSLCommonName: "{region}.{service}.{dnsSuffix}",
|
||||
Protocols: []string{"http", "https"},
|
||||
Protocols: []string{"https"},
|
||||
},
|
||||
Endpoints: endpoints{
|
||||
"ap-northeast-1": endpoint{},
|
||||
@ -1193,10 +1222,16 @@ var awsPartition = partition{
|
||||
|
||||
Endpoints: endpoints{
|
||||
"ap-northeast-1": endpoint{},
|
||||
"ap-northeast-2": endpoint{},
|
||||
"ap-south-1": endpoint{},
|
||||
"ap-southeast-1": endpoint{},
|
||||
"ap-southeast-2": endpoint{},
|
||||
"ca-central-1": endpoint{},
|
||||
"eu-central-1": endpoint{},
|
||||
"eu-west-1": endpoint{},
|
||||
"eu-west-2": endpoint{},
|
||||
"eu-west-3": endpoint{},
|
||||
"sa-east-1": endpoint{},
|
||||
"us-east-1": endpoint{},
|
||||
"us-east-2": endpoint{},
|
||||
"us-west-1": endpoint{},
|
||||
@ -1208,6 +1243,7 @@ var awsPartition = partition{
|
||||
Protocols: []string{"https"},
|
||||
},
|
||||
Endpoints: endpoints{
|
||||
"eu-west-1": endpoint{},
|
||||
"us-east-1": endpoint{},
|
||||
"us-west-2": endpoint{},
|
||||
},
|
||||
@ -1246,6 +1282,7 @@ var awsPartition = partition{
|
||||
"eu-west-1": endpoint{},
|
||||
"eu-west-2": endpoint{},
|
||||
"eu-west-3": endpoint{},
|
||||
"sa-east-1": endpoint{},
|
||||
"us-east-1": endpoint{},
|
||||
"us-east-2": endpoint{},
|
||||
"us-west-1": endpoint{},
|
||||
@ -1260,6 +1297,7 @@ var awsPartition = partition{
|
||||
"ap-south-1": endpoint{},
|
||||
"ap-southeast-1": endpoint{},
|
||||
"ap-southeast-2": endpoint{},
|
||||
"ca-central-1": endpoint{},
|
||||
"eu-central-1": endpoint{},
|
||||
"eu-west-1": endpoint{},
|
||||
"eu-west-2": endpoint{},
|
||||
@ -1277,6 +1315,7 @@ var awsPartition = partition{
|
||||
"ap-northeast-1": endpoint{},
|
||||
"ap-southeast-2": endpoint{},
|
||||
"eu-central-1": endpoint{},
|
||||
"eu-west-1": endpoint{},
|
||||
"us-east-1": endpoint{},
|
||||
"us-west-2": endpoint{},
|
||||
},
|
||||
@ -1373,6 +1412,15 @@ var awsPartition = partition{
|
||||
"us-west-2": endpoint{},
|
||||
},
|
||||
},
|
||||
"iotanalytics": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
"eu-west-1": endpoint{},
|
||||
"us-east-1": endpoint{},
|
||||
"us-east-2": endpoint{},
|
||||
"us-west-2": endpoint{},
|
||||
},
|
||||
},
|
||||
"kinesis": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
@ -1396,6 +1444,7 @@ var awsPartition = partition{
|
||||
"kinesisanalytics": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
"eu-central-1": endpoint{},
|
||||
"eu-west-1": endpoint{},
|
||||
"us-east-1": endpoint{},
|
||||
"us-west-2": endpoint{},
|
||||
@ -1526,9 +1575,12 @@ var awsPartition = partition{
|
||||
Endpoints: endpoints{
|
||||
"ap-northeast-1": endpoint{},
|
||||
"ap-northeast-2": endpoint{},
|
||||
"ap-south-1": endpoint{},
|
||||
"ap-southeast-1": endpoint{},
|
||||
"ap-southeast-2": endpoint{},
|
||||
"eu-central-1": endpoint{},
|
||||
"eu-west-1": endpoint{},
|
||||
"sa-east-1": endpoint{},
|
||||
"us-east-1": endpoint{},
|
||||
"us-west-2": endpoint{},
|
||||
},
|
||||
@ -1552,6 +1604,7 @@ var awsPartition = partition{
|
||||
|
||||
Endpoints: endpoints{
|
||||
"ap-northeast-1": endpoint{},
|
||||
"ap-northeast-2": endpoint{},
|
||||
"ap-southeast-2": endpoint{},
|
||||
"eu-central-1": endpoint{},
|
||||
"eu-west-1": endpoint{},
|
||||
@ -1810,6 +1863,7 @@ var awsPartition = partition{
|
||||
"eu-central-1": endpoint{},
|
||||
"eu-west-1": endpoint{},
|
||||
"eu-west-2": endpoint{},
|
||||
"eu-west-3": endpoint{},
|
||||
"sa-east-1": endpoint{},
|
||||
"us-east-1": endpoint{},
|
||||
"us-east-2": endpoint{},
|
||||
@ -1852,6 +1906,9 @@ var awsPartition = partition{
|
||||
|
||||
Endpoints: endpoints{
|
||||
"ap-northeast-1": endpoint{},
|
||||
"ap-northeast-2": endpoint{},
|
||||
"ap-southeast-2": endpoint{},
|
||||
"eu-central-1": endpoint{},
|
||||
"eu-west-1": endpoint{},
|
||||
"us-east-1": endpoint{},
|
||||
"us-east-2": endpoint{},
|
||||
@ -1917,16 +1974,6 @@ var awsPartition = partition{
|
||||
},
|
||||
},
|
||||
},
|
||||
"sagemaker": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
"ap-northeast-1": endpoint{},
|
||||
"eu-west-1": endpoint{},
|
||||
"us-east-1": endpoint{},
|
||||
"us-east-2": endpoint{},
|
||||
"us-west-2": endpoint{},
|
||||
},
|
||||
},
|
||||
"sdb": service{
|
||||
Defaults: endpoint{
|
||||
Protocols: []string{"http", "https"},
|
||||
@ -1957,11 +2004,36 @@ var awsPartition = partition{
|
||||
"eu-central-1": endpoint{},
|
||||
"eu-west-1": endpoint{},
|
||||
"eu-west-2": endpoint{},
|
||||
"eu-west-3": endpoint{},
|
||||
"sa-east-1": endpoint{},
|
||||
"us-east-1": endpoint{},
|
||||
"us-east-1-fips": endpoint{
|
||||
Hostname: "secretsmanager-fips.us-east-1.amazonaws.com",
|
||||
CredentialScope: credentialScope{
|
||||
Region: "us-east-1",
|
||||
},
|
||||
},
|
||||
"us-east-2": endpoint{},
|
||||
"us-east-2-fips": endpoint{
|
||||
Hostname: "secretsmanager-fips.us-east-2.amazonaws.com",
|
||||
CredentialScope: credentialScope{
|
||||
Region: "us-east-2",
|
||||
},
|
||||
},
|
||||
"us-west-1": endpoint{},
|
||||
"us-west-1-fips": endpoint{
|
||||
Hostname: "secretsmanager-fips.us-west-1.amazonaws.com",
|
||||
CredentialScope: credentialScope{
|
||||
Region: "us-west-1",
|
||||
},
|
||||
},
|
||||
"us-west-2": endpoint{},
|
||||
"us-west-2-fips": endpoint{
|
||||
Hostname: "secretsmanager-fips.us-west-2.amazonaws.com",
|
||||
CredentialScope: credentialScope{
|
||||
Region: "us-west-2",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"serverlessrepo": service{
|
||||
@ -2036,9 +2108,15 @@ var awsPartition = partition{
|
||||
"servicediscovery": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
"ap-northeast-1": endpoint{},
|
||||
"ap-southeast-1": endpoint{},
|
||||
"ap-southeast-2": endpoint{},
|
||||
"eu-central-1": endpoint{},
|
||||
"eu-west-1": endpoint{},
|
||||
"eu-west-2": endpoint{},
|
||||
"us-east-1": endpoint{},
|
||||
"us-east-2": endpoint{},
|
||||
"us-west-1": endpoint{},
|
||||
"us-west-2": endpoint{},
|
||||
},
|
||||
},
|
||||
@ -2129,10 +2207,30 @@ var awsPartition = partition{
|
||||
"eu-west-1": endpoint{},
|
||||
"eu-west-2": endpoint{},
|
||||
"eu-west-3": endpoint{},
|
||||
"fips-us-east-1": endpoint{},
|
||||
"fips-us-east-2": endpoint{},
|
||||
"fips-us-west-1": endpoint{},
|
||||
"fips-us-west-2": endpoint{},
|
||||
"fips-us-east-1": endpoint{
|
||||
Hostname: "sqs-fips.us-east-1.amazonaws.com",
|
||||
CredentialScope: credentialScope{
|
||||
Region: "us-east-1",
|
||||
},
|
||||
},
|
||||
"fips-us-east-2": endpoint{
|
||||
Hostname: "sqs-fips.us-east-2.amazonaws.com",
|
||||
CredentialScope: credentialScope{
|
||||
Region: "us-east-2",
|
||||
},
|
||||
},
|
||||
"fips-us-west-1": endpoint{
|
||||
Hostname: "sqs-fips.us-west-1.amazonaws.com",
|
||||
CredentialScope: credentialScope{
|
||||
Region: "us-west-1",
|
||||
},
|
||||
},
|
||||
"fips-us-west-2": endpoint{
|
||||
Hostname: "sqs-fips.us-west-2.amazonaws.com",
|
||||
CredentialScope: credentialScope{
|
||||
Region: "us-west-2",
|
||||
},
|
||||
},
|
||||
"sa-east-1": endpoint{},
|
||||
"us-east-1": endpoint{
|
||||
SSLCommonName: "queue.{dnsSuffix}",
|
||||
@ -2167,6 +2265,7 @@ var awsPartition = partition{
|
||||
Endpoints: endpoints{
|
||||
"ap-northeast-1": endpoint{},
|
||||
"ap-northeast-2": endpoint{},
|
||||
"ap-south-1": endpoint{},
|
||||
"ap-southeast-1": endpoint{},
|
||||
"ap-southeast-2": endpoint{},
|
||||
"ca-central-1": endpoint{},
|
||||
@ -2340,8 +2439,26 @@ var awsPartition = partition{
|
||||
Endpoints: endpoints{
|
||||
"eu-west-1": endpoint{},
|
||||
"us-east-1": endpoint{},
|
||||
"us-east-1-fips": endpoint{
|
||||
Hostname: "translate-fips.us-east-1.amazonaws.com",
|
||||
CredentialScope: credentialScope{
|
||||
Region: "us-east-1",
|
||||
},
|
||||
},
|
||||
"us-east-2": endpoint{},
|
||||
"us-east-2-fips": endpoint{
|
||||
Hostname: "translate-fips.us-east-2.amazonaws.com",
|
||||
CredentialScope: credentialScope{
|
||||
Region: "us-east-2",
|
||||
},
|
||||
},
|
||||
"us-west-2": endpoint{},
|
||||
"us-west-2-fips": endpoint{
|
||||
Hostname: "translate-fips.us-west-2.amazonaws.com",
|
||||
CredentialScope: credentialScope{
|
||||
Region: "us-west-2",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"waf": service{
|
||||
@ -2462,11 +2579,12 @@ var awscnPartition = partition{
|
||||
|
||||
Endpoints: endpoints{
|
||||
"cn-north-1": endpoint{},
|
||||
"cn-northwest-1": endpoint{},
|
||||
},
|
||||
},
|
||||
"application-autoscaling": service{
|
||||
Defaults: endpoint{
|
||||
Hostname: "autoscaling.{region}.amazonaws.com",
|
||||
Hostname: "autoscaling.{region}.amazonaws.com.cn",
|
||||
Protocols: []string{"http", "https"},
|
||||
CredentialScope: credentialScope{
|
||||
Service: "application-autoscaling",
|
||||
@ -2527,6 +2645,13 @@ var awscnPartition = partition{
|
||||
"cn-northwest-1": endpoint{},
|
||||
},
|
||||
},
|
||||
"ds": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
"cn-north-1": endpoint{},
|
||||
"cn-northwest-1": endpoint{},
|
||||
},
|
||||
},
|
||||
"dynamodb": service{
|
||||
Defaults: endpoint{
|
||||
Protocols: []string{"http", "https"},
|
||||
@ -2595,7 +2720,7 @@ var awscnPartition = partition{
|
||||
},
|
||||
"elasticmapreduce": service{
|
||||
Defaults: endpoint{
|
||||
Protocols: []string{"http", "https"},
|
||||
Protocols: []string{"https"},
|
||||
},
|
||||
Endpoints: endpoints{
|
||||
"cn-north-1": endpoint{},
|
||||
@ -2658,6 +2783,7 @@ var awscnPartition = partition{
|
||||
|
||||
Endpoints: endpoints{
|
||||
"cn-north-1": endpoint{},
|
||||
"cn-northwest-1": endpoint{},
|
||||
},
|
||||
},
|
||||
"logs": service{
|
||||
@ -2819,6 +2945,12 @@ var awsusgovPartition = partition{
|
||||
"us-gov-west-1": endpoint{},
|
||||
},
|
||||
},
|
||||
"application-autoscaling": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
"us-gov-west-1": endpoint{},
|
||||
},
|
||||
},
|
||||
"autoscaling": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
@ -2923,6 +3055,12 @@ var awsusgovPartition = partition{
|
||||
"elasticache": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
"fips": endpoint{
|
||||
Hostname: "elasticache-fips.us-gov-west-1.amazonaws.com",
|
||||
CredentialScope: credentialScope{
|
||||
Region: "us-gov-west-1",
|
||||
},
|
||||
},
|
||||
"us-gov-west-1": endpoint{},
|
||||
},
|
||||
},
|
||||
@ -2944,7 +3082,7 @@ var awsusgovPartition = partition{
|
||||
|
||||
Endpoints: endpoints{
|
||||
"us-gov-west-1": endpoint{
|
||||
Protocols: []string{"http", "https"},
|
||||
Protocols: []string{"https"},
|
||||
},
|
||||
},
|
||||
},
|
||||
@ -2981,6 +3119,22 @@ var awsusgovPartition = partition{
|
||||
},
|
||||
},
|
||||
},
|
||||
"inspector": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
"us-gov-west-1": endpoint{},
|
||||
},
|
||||
},
|
||||
"iot": service{
|
||||
Defaults: endpoint{
|
||||
CredentialScope: credentialScope{
|
||||
Service: "execute-api",
|
||||
},
|
||||
},
|
||||
Endpoints: endpoints{
|
||||
"us-gov-west-1": endpoint{},
|
||||
},
|
||||
},
|
||||
"kinesis": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
@ -3097,6 +3251,12 @@ var awsusgovPartition = partition{
|
||||
"us-gov-west-1": endpoint{},
|
||||
},
|
||||
},
|
||||
"states": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
"us-gov-west-1": endpoint{},
|
||||
},
|
||||
},
|
||||
"storagegateway": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
@ -3137,5 +3297,19 @@ var awsusgovPartition = partition{
|
||||
"us-gov-west-1": endpoint{},
|
||||
},
|
||||
},
|
||||
"translate": service{
|
||||
Defaults: endpoint{
|
||||
Protocols: []string{"https"},
|
||||
},
|
||||
Endpoints: endpoints{
|
||||
"us-gov-west-1": endpoint{},
|
||||
"us-gov-west-1-fips": endpoint{
|
||||
Hostname: "translate-fips.us-gov-west-1.amazonaws.com",
|
||||
CredentialScope: credentialScope{
|
||||
Region: "us-gov-west-1",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
2
vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go
generated
vendored
2
vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go
generated
vendored
@ -97,7 +97,7 @@ func isNestedErrorRetryable(parentErr awserr.Error) bool {
|
||||
}
|
||||
|
||||
if t, ok := err.(temporaryError); ok {
|
||||
return t.Temporary()
|
||||
return t.Temporary() || isErrConnectionReset(err)
|
||||
}
|
||||
|
||||
return isErrConnectionReset(err)
|
||||
|
2
vendor/github.com/aws/aws-sdk-go/aws/session/doc.go
generated
vendored
2
vendor/github.com/aws/aws-sdk-go/aws/session/doc.go
generated
vendored
@ -128,7 +128,7 @@ read. The Session will be created from configuration values from the shared
|
||||
credentials file (~/.aws/credentials) over those in the shared config file (~/.aws/config).
|
||||
|
||||
Credentials are the values the SDK should use for authenticating requests with
|
||||
AWS Services. They arfrom a configuration file will need to include both
|
||||
AWS Services. They are from a configuration file will need to include both
|
||||
aws_access_key_id and aws_secret_access_key must be provided together in the
|
||||
same file to be considered valid. The values will be ignored if not a complete
|
||||
group. aws_session_token is an optional field that can be provided if both of
|
||||
|
8
vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go
generated
vendored
8
vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go
generated
vendored
@ -739,7 +739,15 @@ func makeSha256Reader(reader io.ReadSeeker) []byte {
|
||||
start, _ := reader.Seek(0, sdkio.SeekCurrent)
|
||||
defer reader.Seek(start, sdkio.SeekStart)
|
||||
|
||||
// Use CopyN to avoid allocating the 32KB buffer in io.Copy for bodies
|
||||
// smaller than 32KB. Fall back to io.Copy if we fail to determine the size.
|
||||
size, err := aws.SeekerLen(reader)
|
||||
if err != nil {
|
||||
io.Copy(hash, reader)
|
||||
} else {
|
||||
io.CopyN(hash, reader, size)
|
||||
}
|
||||
|
||||
return hash.Sum(nil)
|
||||
}
|
||||
|
||||
|
2
vendor/github.com/aws/aws-sdk-go/aws/version.go
generated
vendored
2
vendor/github.com/aws/aws-sdk-go/aws/version.go
generated
vendored
@ -5,4 +5,4 @@ package aws
|
||||
const SDKName = "aws-sdk-go"
|
||||
|
||||
// SDKVersion is the version of this SDK
|
||||
const SDKVersion = "1.14.8"
|
||||
const SDKVersion = "1.15.39"
|
||||
|
23
vendor/github.com/aws/aws-sdk-go/internal/sdkuri/path.go
generated
vendored
Normal file
23
vendor/github.com/aws/aws-sdk-go/internal/sdkuri/path.go
generated
vendored
Normal file
@ -0,0 +1,23 @@
|
||||
package sdkuri
|
||||
|
||||
import (
|
||||
"path"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// PathJoin will join the elements of the path delimited by the "/"
|
||||
// character. Similar to path.Join with the exception the trailing "/"
|
||||
// character is preserved if present.
|
||||
func PathJoin(elems ...string) string {
|
||||
if len(elems) == 0 {
|
||||
return ""
|
||||
}
|
||||
|
||||
hasTrailing := strings.HasSuffix(elems[len(elems)-1], "/")
|
||||
str := path.Join(elems...)
|
||||
if hasTrailing && str != "/" {
|
||||
str += "/"
|
||||
}
|
||||
|
||||
return str
|
||||
}
|
36
vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/api.go
generated
vendored
36
vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/api.go
generated
vendored
@ -94,6 +94,9 @@ func (r *EventReader) ReadEvent() (event interface{}, err error) {
|
||||
switch typ {
|
||||
case EventMessageType:
|
||||
return r.unmarshalEventMessage(msg)
|
||||
case ExceptionMessageType:
|
||||
err = r.unmarshalEventException(msg)
|
||||
return nil, err
|
||||
case ErrorMessageType:
|
||||
return nil, r.unmarshalErrorMessage(msg)
|
||||
default:
|
||||
@ -122,6 +125,39 @@ func (r *EventReader) unmarshalEventMessage(
|
||||
return ev, nil
|
||||
}
|
||||
|
||||
func (r *EventReader) unmarshalEventException(
|
||||
msg eventstream.Message,
|
||||
) (err error) {
|
||||
eventType, err := GetHeaderString(msg, ExceptionTypeHeader)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ev, err := r.unmarshalerForEventType(eventType)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = ev.UnmarshalEvent(r.payloadUnmarshaler, msg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var ok bool
|
||||
err, ok = ev.(error)
|
||||
if !ok {
|
||||
err = messageError{
|
||||
code: "SerializationError",
|
||||
msg: fmt.Sprintf(
|
||||
"event stream exception %s mapped to non-error %T, %v",
|
||||
eventType, ev, ev,
|
||||
),
|
||||
}
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func (r *EventReader) unmarshalErrorMessage(msg eventstream.Message) (err error) {
|
||||
var msgErr messageError
|
||||
|
||||
|
2
vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/header_value.go
generated
vendored
2
vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/header_value.go
generated
vendored
@ -464,7 +464,7 @@ func (v *TimestampValue) decode(r io.Reader) error {
|
||||
func timeFromEpochMilli(t int64) time.Time {
|
||||
secs := t / 1e3
|
||||
msec := t % 1e3
|
||||
return time.Unix(secs, msec*int64(time.Millisecond))
|
||||
return time.Unix(secs, msec*int64(time.Millisecond)).UTC()
|
||||
}
|
||||
|
||||
// An UUIDValue provides eventstream encoding, and representation of a UUID
|
||||
|
7
vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go
generated
vendored
7
vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go
generated
vendored
@ -233,7 +233,12 @@ func (q *queryParser) parseScalar(v url.Values, r reflect.Value, name string, ta
|
||||
v.Set(name, strconv.FormatFloat(float64(value), 'f', -1, 32))
|
||||
case time.Time:
|
||||
const ISO8601UTC = "2006-01-02T15:04:05Z"
|
||||
v.Set(name, value.UTC().Format(ISO8601UTC))
|
||||
format := tag.Get("timestampFormat")
|
||||
if len(format) == 0 {
|
||||
format = protocol.ISO8601TimeFormatName
|
||||
}
|
||||
|
||||
v.Set(name, protocol.FormatTime(format, value))
|
||||
default:
|
||||
return fmt.Errorf("unsupported value for param %s: %v (%s)", name, r.Interface(), r.Type().Name())
|
||||
}
|
||||
|
6
vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal.go
generated
vendored
6
vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal.go
generated
vendored
@ -23,7 +23,11 @@ func Unmarshal(r *request.Request) {
|
||||
decoder := xml.NewDecoder(r.HTTPResponse.Body)
|
||||
err := xmlutil.UnmarshalXML(r.Data, decoder, r.Operation.Name+"Result")
|
||||
if err != nil {
|
||||
r.Error = awserr.New("SerializationError", "failed decoding Query response", err)
|
||||
r.Error = awserr.NewRequestFailure(
|
||||
awserr.New("SerializationError", "failed decoding Query response", err),
|
||||
r.HTTPResponse.StatusCode,
|
||||
r.RequestID,
|
||||
)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
14
vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal_error.go
generated
vendored
14
vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal_error.go
generated
vendored
@ -28,7 +28,11 @@ func UnmarshalError(r *request.Request) {
|
||||
|
||||
bodyBytes, err := ioutil.ReadAll(r.HTTPResponse.Body)
|
||||
if err != nil {
|
||||
r.Error = awserr.New("SerializationError", "failed to read from query HTTP response body", err)
|
||||
r.Error = awserr.NewRequestFailure(
|
||||
awserr.New("SerializationError", "failed to read from query HTTP response body", err),
|
||||
r.HTTPResponse.StatusCode,
|
||||
r.RequestID,
|
||||
)
|
||||
return
|
||||
}
|
||||
|
||||
@ -61,6 +65,10 @@ func UnmarshalError(r *request.Request) {
|
||||
}
|
||||
|
||||
// Failed to retrieve any error message from the response body
|
||||
r.Error = awserr.New("SerializationError",
|
||||
"failed to decode query XML error response", decodeErr)
|
||||
r.Error = awserr.NewRequestFailure(
|
||||
awserr.New("SerializationError",
|
||||
"failed to decode query XML error response", decodeErr),
|
||||
r.HTTPResponse.StatusCode,
|
||||
r.RequestID,
|
||||
)
|
||||
}
|
||||
|
14
vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go
generated
vendored
14
vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go
generated
vendored
@ -20,11 +20,6 @@ import (
|
||||
"github.com/aws/aws-sdk-go/private/protocol"
|
||||
)
|
||||
|
||||
// RFC1123GMT is a RFC1123 (RFC822) formated timestame. This format is not
|
||||
// using the standard library's time.RFC1123 due to the desire to always use
|
||||
// GMT as the timezone.
|
||||
const RFC1123GMT = "Mon, 2 Jan 2006 15:04:05 GMT"
|
||||
|
||||
// Whether the byte value can be sent without escaping in AWS URLs
|
||||
var noEscape [256]bool
|
||||
|
||||
@ -272,7 +267,14 @@ func convertType(v reflect.Value, tag reflect.StructTag) (str string, err error)
|
||||
case float64:
|
||||
str = strconv.FormatFloat(value, 'f', -1, 64)
|
||||
case time.Time:
|
||||
str = value.UTC().Format(RFC1123GMT)
|
||||
format := tag.Get("timestampFormat")
|
||||
if len(format) == 0 {
|
||||
format = protocol.RFC822TimeFormatName
|
||||
if tag.Get("location") == "querystring" {
|
||||
format = protocol.ISO8601TimeFormatName
|
||||
}
|
||||
}
|
||||
str = protocol.FormatTime(format, value)
|
||||
case aws.JSONValue:
|
||||
if len(value) == 0 {
|
||||
return "", errValueNotSet
|
||||
|
6
vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go
generated
vendored
6
vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go
generated
vendored
@ -198,7 +198,11 @@ func unmarshalHeader(v reflect.Value, header string, tag reflect.StructTag) erro
|
||||
}
|
||||
v.Set(reflect.ValueOf(&f))
|
||||
case *time.Time:
|
||||
t, err := time.Parse(time.RFC1123, header)
|
||||
format := tag.Get("timestampFormat")
|
||||
if len(format) == 0 {
|
||||
format = protocol.RFC822TimeFormatName
|
||||
}
|
||||
t, err := protocol.ParseTime(format, header)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
12
vendor/github.com/aws/aws-sdk-go/private/protocol/restxml/restxml.go
generated
vendored
12
vendor/github.com/aws/aws-sdk-go/private/protocol/restxml/restxml.go
generated
vendored
@ -36,7 +36,11 @@ func Build(r *request.Request) {
|
||||
var buf bytes.Buffer
|
||||
err := xmlutil.BuildXML(r.Params, xml.NewEncoder(&buf))
|
||||
if err != nil {
|
||||
r.Error = awserr.New("SerializationError", "failed to encode rest XML request", err)
|
||||
r.Error = awserr.NewRequestFailure(
|
||||
awserr.New("SerializationError", "failed to encode rest XML request", err),
|
||||
r.HTTPResponse.StatusCode,
|
||||
r.RequestID,
|
||||
)
|
||||
return
|
||||
}
|
||||
r.SetBufferBody(buf.Bytes())
|
||||
@ -50,7 +54,11 @@ func Unmarshal(r *request.Request) {
|
||||
decoder := xml.NewDecoder(r.HTTPResponse.Body)
|
||||
err := xmlutil.UnmarshalXML(r.Data, decoder, "")
|
||||
if err != nil {
|
||||
r.Error = awserr.New("SerializationError", "failed to decode REST XML response", err)
|
||||
r.Error = awserr.NewRequestFailure(
|
||||
awserr.New("SerializationError", "failed to decode REST XML response", err),
|
||||
r.HTTPResponse.StatusCode,
|
||||
r.RequestID,
|
||||
)
|
||||
return
|
||||
}
|
||||
} else {
|
||||
|
72
vendor/github.com/aws/aws-sdk-go/private/protocol/timestamp.go
generated
vendored
Normal file
72
vendor/github.com/aws/aws-sdk-go/private/protocol/timestamp.go
generated
vendored
Normal file
@ -0,0 +1,72 @@
|
||||
package protocol
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Names of time formats supported by the SDK
|
||||
const (
|
||||
RFC822TimeFormatName = "rfc822"
|
||||
ISO8601TimeFormatName = "iso8601"
|
||||
UnixTimeFormatName = "unixTimestamp"
|
||||
)
|
||||
|
||||
// Time formats supported by the SDK
|
||||
const (
|
||||
// RFC 7231#section-7.1.1.1 timetamp format. e.g Tue, 29 Apr 2014 18:30:38 GMT
|
||||
RFC822TimeFormat = "Mon, 2 Jan 2006 15:04:05 GMT"
|
||||
|
||||
// RFC3339 a subset of the ISO8601 timestamp format. e.g 2014-04-29T18:30:38Z
|
||||
ISO8601TimeFormat = "2006-01-02T15:04:05Z"
|
||||
)
|
||||
|
||||
// IsKnownTimestampFormat returns if the timestamp format name
|
||||
// is know to the SDK's protocols.
|
||||
func IsKnownTimestampFormat(name string) bool {
|
||||
switch name {
|
||||
case RFC822TimeFormatName:
|
||||
fallthrough
|
||||
case ISO8601TimeFormatName:
|
||||
fallthrough
|
||||
case UnixTimeFormatName:
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// FormatTime returns a string value of the time.
|
||||
func FormatTime(name string, t time.Time) string {
|
||||
t = t.UTC()
|
||||
|
||||
switch name {
|
||||
case RFC822TimeFormatName:
|
||||
return t.Format(RFC822TimeFormat)
|
||||
case ISO8601TimeFormatName:
|
||||
return t.Format(ISO8601TimeFormat)
|
||||
case UnixTimeFormatName:
|
||||
return strconv.FormatInt(t.Unix(), 10)
|
||||
default:
|
||||
panic("unknown timestamp format name, " + name)
|
||||
}
|
||||
}
|
||||
|
||||
// ParseTime attempts to parse the time given the format. Returns
|
||||
// the time if it was able to be parsed, and fails otherwise.
|
||||
func ParseTime(formatName, value string) (time.Time, error) {
|
||||
switch formatName {
|
||||
case RFC822TimeFormatName:
|
||||
return time.Parse(RFC822TimeFormat, value)
|
||||
case ISO8601TimeFormatName:
|
||||
return time.Parse(ISO8601TimeFormat, value)
|
||||
case UnixTimeFormatName:
|
||||
v, err := strconv.ParseFloat(value, 64)
|
||||
if err != nil {
|
||||
return time.Time{}, err
|
||||
}
|
||||
return time.Unix(int64(v), 0), nil
|
||||
default:
|
||||
panic("unknown timestamp format name, " + formatName)
|
||||
}
|
||||
}
|
30
vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go
generated
vendored
30
vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go
generated
vendored
@ -13,9 +13,13 @@ import (
|
||||
"github.com/aws/aws-sdk-go/private/protocol"
|
||||
)
|
||||
|
||||
// BuildXML will serialize params into an xml.Encoder.
|
||||
// Error will be returned if the serialization of any of the params or nested values fails.
|
||||
// BuildXML will serialize params into an xml.Encoder. Error will be returned
|
||||
// if the serialization of any of the params or nested values fails.
|
||||
func BuildXML(params interface{}, e *xml.Encoder) error {
|
||||
return buildXML(params, e, false)
|
||||
}
|
||||
|
||||
func buildXML(params interface{}, e *xml.Encoder, sorted bool) error {
|
||||
b := xmlBuilder{encoder: e, namespaces: map[string]string{}}
|
||||
root := NewXMLElement(xml.Name{})
|
||||
if err := b.buildValue(reflect.ValueOf(params), root, ""); err != nil {
|
||||
@ -23,7 +27,7 @@ func BuildXML(params interface{}, e *xml.Encoder) error {
|
||||
}
|
||||
for _, c := range root.Children {
|
||||
for _, v := range c {
|
||||
return StructToXML(e, v, false)
|
||||
return StructToXML(e, v, sorted)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
@ -90,8 +94,6 @@ func (b *xmlBuilder) buildStruct(value reflect.Value, current *XMLNode, tag refl
|
||||
return nil
|
||||
}
|
||||
|
||||
fieldAdded := false
|
||||
|
||||
// unwrap payloads
|
||||
if payload := tag.Get("payload"); payload != "" {
|
||||
field, _ := value.Type().FieldByName(payload)
|
||||
@ -119,6 +121,8 @@ func (b *xmlBuilder) buildStruct(value reflect.Value, current *XMLNode, tag refl
|
||||
child.Attr = append(child.Attr, ns)
|
||||
}
|
||||
|
||||
var payloadFields, nonPayloadFields int
|
||||
|
||||
t := value.Type()
|
||||
for i := 0; i < value.NumField(); i++ {
|
||||
member := elemOf(value.Field(i))
|
||||
@ -133,8 +137,10 @@ func (b *xmlBuilder) buildStruct(value reflect.Value, current *XMLNode, tag refl
|
||||
|
||||
mTag := field.Tag
|
||||
if mTag.Get("location") != "" { // skip non-body members
|
||||
nonPayloadFields++
|
||||
continue
|
||||
}
|
||||
payloadFields++
|
||||
|
||||
if protocol.CanSetIdempotencyToken(value.Field(i), field) {
|
||||
token := protocol.GetIdempotencyToken()
|
||||
@ -149,11 +155,11 @@ func (b *xmlBuilder) buildStruct(value reflect.Value, current *XMLNode, tag refl
|
||||
if err := b.buildValue(member, child, mTag); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fieldAdded = true
|
||||
}
|
||||
|
||||
if fieldAdded { // only append this child if we have one ore more valid members
|
||||
// Only case where the child shape is not added is if the shape only contains
|
||||
// non-payload fields, e.g headers/query.
|
||||
if !(payloadFields == 0 && nonPayloadFields > 0) {
|
||||
current.AddChild(child)
|
||||
}
|
||||
|
||||
@ -278,8 +284,12 @@ func (b *xmlBuilder) buildScalar(value reflect.Value, current *XMLNode, tag refl
|
||||
case float32:
|
||||
str = strconv.FormatFloat(float64(converted), 'f', -1, 32)
|
||||
case time.Time:
|
||||
const ISO8601UTC = "2006-01-02T15:04:05Z"
|
||||
str = converted.UTC().Format(ISO8601UTC)
|
||||
format := tag.Get("timestampFormat")
|
||||
if len(format) == 0 {
|
||||
format = protocol.ISO8601TimeFormatName
|
||||
}
|
||||
|
||||
str = protocol.FormatTime(format, converted)
|
||||
default:
|
||||
return fmt.Errorf("unsupported value for param %s: %v (%s)",
|
||||
tag.Get("locationName"), value.Interface(), value.Type().Name())
|
||||
|
10
vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go
generated
vendored
10
vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go
generated
vendored
@ -9,6 +9,8 @@ import (
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go/private/protocol"
|
||||
)
|
||||
|
||||
// UnmarshalXML deserializes an xml.Decoder into the container v. V
|
||||
@ -253,8 +255,12 @@ func parseScalar(r reflect.Value, node *XMLNode, tag reflect.StructTag) error {
|
||||
}
|
||||
r.Set(reflect.ValueOf(&v))
|
||||
case *time.Time:
|
||||
const ISO8601UTC = "2006-01-02T15:04:05Z"
|
||||
t, err := time.Parse(ISO8601UTC, node.Text)
|
||||
format := tag.Get("timestampFormat")
|
||||
if len(format) == 0 {
|
||||
format = protocol.ISO8601TimeFormatName
|
||||
}
|
||||
|
||||
t, err := protocol.ParseTime(format, node.Text)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
1
vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/xml_to_struct.go
generated
vendored
1
vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/xml_to_struct.go
generated
vendored
@ -29,6 +29,7 @@ func NewXMLElement(name xml.Name) *XMLNode {
|
||||
|
||||
// AddChild adds child to the XMLNode.
|
||||
func (n *XMLNode) AddChild(child *XMLNode) {
|
||||
child.parent = n
|
||||
if _, ok := n.Children[child.Name.Local]; !ok {
|
||||
n.Children[child.Name.Local] = []*XMLNode{}
|
||||
}
|
||||
|
426
vendor/github.com/aws/aws-sdk-go/service/s3/api.go
generated
vendored
426
vendor/github.com/aws/aws-sdk-go/service/s3/api.go
generated
vendored
@ -11,7 +11,9 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||
"github.com/aws/aws-sdk-go/aws/awsutil"
|
||||
"github.com/aws/aws-sdk-go/aws/client"
|
||||
"github.com/aws/aws-sdk-go/aws/request"
|
||||
"github.com/aws/aws-sdk-go/private/protocol"
|
||||
"github.com/aws/aws-sdk-go/private/protocol/eventstream"
|
||||
@ -1956,6 +1958,8 @@ const opGetBucketLifecycle = "GetBucketLifecycle"
|
||||
// }
|
||||
//
|
||||
// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLifecycle
|
||||
//
|
||||
// Deprecated: GetBucketLifecycle has been deprecated
|
||||
func (c *S3) GetBucketLifecycleRequest(input *GetBucketLifecycleInput) (req *request.Request, output *GetBucketLifecycleOutput) {
|
||||
if c.Client.Config.Logger != nil {
|
||||
c.Client.Config.Logger.Log("This operation, GetBucketLifecycle, has been deprecated")
|
||||
@ -1986,6 +1990,8 @@ func (c *S3) GetBucketLifecycleRequest(input *GetBucketLifecycleInput) (req *req
|
||||
// See the AWS API reference guide for Amazon Simple Storage Service's
|
||||
// API operation GetBucketLifecycle for usage and error information.
|
||||
// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLifecycle
|
||||
//
|
||||
// Deprecated: GetBucketLifecycle has been deprecated
|
||||
func (c *S3) GetBucketLifecycle(input *GetBucketLifecycleInput) (*GetBucketLifecycleOutput, error) {
|
||||
req, out := c.GetBucketLifecycleRequest(input)
|
||||
return out, req.Send()
|
||||
@ -2000,6 +2006,8 @@ func (c *S3) GetBucketLifecycle(input *GetBucketLifecycleInput) (*GetBucketLifec
|
||||
// the context is nil a panic will occur. In the future the SDK may create
|
||||
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
|
||||
// for more information on using Contexts.
|
||||
//
|
||||
// Deprecated: GetBucketLifecycleWithContext has been deprecated
|
||||
func (c *S3) GetBucketLifecycleWithContext(ctx aws.Context, input *GetBucketLifecycleInput, opts ...request.Option) (*GetBucketLifecycleOutput, error) {
|
||||
req, out := c.GetBucketLifecycleRequest(input)
|
||||
req.SetContext(ctx)
|
||||
@ -2331,6 +2339,8 @@ const opGetBucketNotification = "GetBucketNotification"
|
||||
// }
|
||||
//
|
||||
// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketNotification
|
||||
//
|
||||
// Deprecated: GetBucketNotification has been deprecated
|
||||
func (c *S3) GetBucketNotificationRequest(input *GetBucketNotificationConfigurationRequest) (req *request.Request, output *NotificationConfigurationDeprecated) {
|
||||
if c.Client.Config.Logger != nil {
|
||||
c.Client.Config.Logger.Log("This operation, GetBucketNotification, has been deprecated")
|
||||
@ -2361,6 +2371,8 @@ func (c *S3) GetBucketNotificationRequest(input *GetBucketNotificationConfigurat
|
||||
// See the AWS API reference guide for Amazon Simple Storage Service's
|
||||
// API operation GetBucketNotification for usage and error information.
|
||||
// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketNotification
|
||||
//
|
||||
// Deprecated: GetBucketNotification has been deprecated
|
||||
func (c *S3) GetBucketNotification(input *GetBucketNotificationConfigurationRequest) (*NotificationConfigurationDeprecated, error) {
|
||||
req, out := c.GetBucketNotificationRequest(input)
|
||||
return out, req.Send()
|
||||
@ -2375,6 +2387,8 @@ func (c *S3) GetBucketNotification(input *GetBucketNotificationConfigurationRequ
|
||||
// the context is nil a panic will occur. In the future the SDK may create
|
||||
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
|
||||
// for more information on using Contexts.
|
||||
//
|
||||
// Deprecated: GetBucketNotificationWithContext has been deprecated
|
||||
func (c *S3) GetBucketNotificationWithContext(ctx aws.Context, input *GetBucketNotificationConfigurationRequest, opts ...request.Option) (*NotificationConfigurationDeprecated, error) {
|
||||
req, out := c.GetBucketNotificationRequest(input)
|
||||
req.SetContext(ctx)
|
||||
@ -4813,6 +4827,8 @@ const opPutBucketLifecycle = "PutBucketLifecycle"
|
||||
// }
|
||||
//
|
||||
// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLifecycle
|
||||
//
|
||||
// Deprecated: PutBucketLifecycle has been deprecated
|
||||
func (c *S3) PutBucketLifecycleRequest(input *PutBucketLifecycleInput) (req *request.Request, output *PutBucketLifecycleOutput) {
|
||||
if c.Client.Config.Logger != nil {
|
||||
c.Client.Config.Logger.Log("This operation, PutBucketLifecycle, has been deprecated")
|
||||
@ -4845,6 +4861,8 @@ func (c *S3) PutBucketLifecycleRequest(input *PutBucketLifecycleInput) (req *req
|
||||
// See the AWS API reference guide for Amazon Simple Storage Service's
|
||||
// API operation PutBucketLifecycle for usage and error information.
|
||||
// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLifecycle
|
||||
//
|
||||
// Deprecated: PutBucketLifecycle has been deprecated
|
||||
func (c *S3) PutBucketLifecycle(input *PutBucketLifecycleInput) (*PutBucketLifecycleOutput, error) {
|
||||
req, out := c.PutBucketLifecycleRequest(input)
|
||||
return out, req.Send()
|
||||
@ -4859,6 +4877,8 @@ func (c *S3) PutBucketLifecycle(input *PutBucketLifecycleInput) (*PutBucketLifec
|
||||
// the context is nil a panic will occur. In the future the SDK may create
|
||||
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
|
||||
// for more information on using Contexts.
|
||||
//
|
||||
// Deprecated: PutBucketLifecycleWithContext has been deprecated
|
||||
func (c *S3) PutBucketLifecycleWithContext(ctx aws.Context, input *PutBucketLifecycleInput, opts ...request.Option) (*PutBucketLifecycleOutput, error) {
|
||||
req, out := c.PutBucketLifecycleRequest(input)
|
||||
req.SetContext(ctx)
|
||||
@ -5124,6 +5144,8 @@ const opPutBucketNotification = "PutBucketNotification"
|
||||
// }
|
||||
//
|
||||
// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketNotification
|
||||
//
|
||||
// Deprecated: PutBucketNotification has been deprecated
|
||||
func (c *S3) PutBucketNotificationRequest(input *PutBucketNotificationInput) (req *request.Request, output *PutBucketNotificationOutput) {
|
||||
if c.Client.Config.Logger != nil {
|
||||
c.Client.Config.Logger.Log("This operation, PutBucketNotification, has been deprecated")
|
||||
@ -5156,6 +5178,8 @@ func (c *S3) PutBucketNotificationRequest(input *PutBucketNotificationInput) (re
|
||||
// See the AWS API reference guide for Amazon Simple Storage Service's
|
||||
// API operation PutBucketNotification for usage and error information.
|
||||
// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketNotification
|
||||
//
|
||||
// Deprecated: PutBucketNotification has been deprecated
|
||||
func (c *S3) PutBucketNotification(input *PutBucketNotificationInput) (*PutBucketNotificationOutput, error) {
|
||||
req, out := c.PutBucketNotificationRequest(input)
|
||||
return out, req.Send()
|
||||
@ -5170,6 +5194,8 @@ func (c *S3) PutBucketNotification(input *PutBucketNotificationInput) (*PutBucke
|
||||
// the context is nil a panic will occur. In the future the SDK may create
|
||||
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
|
||||
// for more information on using Contexts.
|
||||
//
|
||||
// Deprecated: PutBucketNotificationWithContext has been deprecated
|
||||
func (c *S3) PutBucketNotificationWithContext(ctx aws.Context, input *PutBucketNotificationInput, opts ...request.Option) (*PutBucketNotificationOutput, error) {
|
||||
req, out := c.PutBucketNotificationRequest(input)
|
||||
req.SetContext(ctx)
|
||||
@ -5377,7 +5403,8 @@ func (c *S3) PutBucketReplicationRequest(input *PutBucketReplicationInput) (req
|
||||
// PutBucketReplication API operation for Amazon Simple Storage Service.
|
||||
//
|
||||
// Creates a new replication configuration (or replaces an existing one, if
|
||||
// present).
|
||||
// present). For more information, see Cross-Region Replication (CRR) ( https://docs.aws.amazon.com/AmazonS3/latest/dev/crr.html)
|
||||
// in the Amazon S3 Developer Guide.
|
||||
//
|
||||
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
|
||||
// with awserr.Error's Code and Message methods to get detailed information about
|
||||
@ -6062,6 +6089,7 @@ func (c *S3) SelectObjectContentRequest(input *SelectObjectContentInput) (req *r
|
||||
|
||||
output = &SelectObjectContentOutput{}
|
||||
req = c.newRequest(op, input, output)
|
||||
req.Handlers.Send.Swap(client.LogHTTPResponseHandler.Name, client.LogHTTPResponseHeaderHandler)
|
||||
req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, rest.UnmarshalHandler)
|
||||
req.Handlers.Unmarshal.PushBack(output.runEventStreamLoop)
|
||||
return
|
||||
@ -6817,7 +6845,7 @@ type Bucket struct {
|
||||
_ struct{} `type:"structure"`
|
||||
|
||||
// Date the bucket was created.
|
||||
CreationDate *time.Time `type:"timestamp" timestampFormat:"iso8601"`
|
||||
CreationDate *time.Time `type:"timestamp"`
|
||||
|
||||
// The name of the bucket.
|
||||
Name *string `type:"string"`
|
||||
@ -7064,6 +7092,11 @@ func (s *CORSRule) SetMaxAgeSeconds(v int64) *CORSRule {
|
||||
type CSVInput struct {
|
||||
_ struct{} `type:"structure"`
|
||||
|
||||
// Specifies that CSV field values may contain quoted record delimiters and
|
||||
// such records should be allowed. Default value is FALSE. Setting this value
|
||||
// to TRUE may lower performance.
|
||||
AllowQuotedRecordDelimiter *bool `type:"boolean"`
|
||||
|
||||
// Single character used to indicate a row should be ignored when present at
|
||||
// the start of a row.
|
||||
Comments *string `type:"string"`
|
||||
@ -7095,6 +7128,12 @@ func (s CSVInput) GoString() string {
|
||||
return s.String()
|
||||
}
|
||||
|
||||
// SetAllowQuotedRecordDelimiter sets the AllowQuotedRecordDelimiter field's value.
|
||||
func (s *CSVInput) SetAllowQuotedRecordDelimiter(v bool) *CSVInput {
|
||||
s.AllowQuotedRecordDelimiter = &v
|
||||
return s
|
||||
}
|
||||
|
||||
// SetComments sets the Comments field's value.
|
||||
func (s *CSVInput) SetComments(v string) *CSVInput {
|
||||
s.Comments = &v
|
||||
@ -7198,6 +7237,8 @@ type CloudFunctionConfiguration struct {
|
||||
CloudFunction *string `type:"string"`
|
||||
|
||||
// Bucket event for which to send notifications.
|
||||
//
|
||||
// Deprecated: Event has been deprecated
|
||||
Event *string `deprecated:"true" type:"string" enum:"Event"`
|
||||
|
||||
Events []*string `locationName:"Event" type:"list" flattened:"true"`
|
||||
@ -7562,7 +7603,7 @@ func (s *Condition) SetKeyPrefixEquals(v string) *Condition {
|
||||
}
|
||||
|
||||
type ContinuationEvent struct {
|
||||
_ struct{} `type:"structure"`
|
||||
_ struct{} `locationName:"ContinuationEvent" type:"structure"`
|
||||
}
|
||||
|
||||
// String returns the string representation
|
||||
@ -7623,14 +7664,14 @@ type CopyObjectInput struct {
|
||||
CopySourceIfMatch *string `location:"header" locationName:"x-amz-copy-source-if-match" type:"string"`
|
||||
|
||||
// Copies the object if it has been modified since the specified time.
|
||||
CopySourceIfModifiedSince *time.Time `location:"header" locationName:"x-amz-copy-source-if-modified-since" type:"timestamp" timestampFormat:"rfc822"`
|
||||
CopySourceIfModifiedSince *time.Time `location:"header" locationName:"x-amz-copy-source-if-modified-since" type:"timestamp"`
|
||||
|
||||
// Copies the object if its entity tag (ETag) is different than the specified
|
||||
// ETag.
|
||||
CopySourceIfNoneMatch *string `location:"header" locationName:"x-amz-copy-source-if-none-match" type:"string"`
|
||||
|
||||
// Copies the object if it hasn't been modified since the specified time.
|
||||
CopySourceIfUnmodifiedSince *time.Time `location:"header" locationName:"x-amz-copy-source-if-unmodified-since" type:"timestamp" timestampFormat:"rfc822"`
|
||||
CopySourceIfUnmodifiedSince *time.Time `location:"header" locationName:"x-amz-copy-source-if-unmodified-since" type:"timestamp"`
|
||||
|
||||
// Specifies the algorithm to use when decrypting the source object (e.g., AES256).
|
||||
CopySourceSSECustomerAlgorithm *string `location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-algorithm" type:"string"`
|
||||
@ -7646,7 +7687,7 @@ type CopyObjectInput struct {
|
||||
CopySourceSSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-key-MD5" type:"string"`
|
||||
|
||||
// The date and time at which the object is no longer cacheable.
|
||||
Expires *time.Time `location:"header" locationName:"Expires" type:"timestamp" timestampFormat:"rfc822"`
|
||||
Expires *time.Time `location:"header" locationName:"Expires" type:"timestamp"`
|
||||
|
||||
// Gives the grantee READ, READ_ACP, and WRITE_ACP permissions on the object.
|
||||
GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"`
|
||||
@ -8075,7 +8116,7 @@ type CopyObjectResult struct {
|
||||
|
||||
ETag *string `type:"string"`
|
||||
|
||||
LastModified *time.Time `type:"timestamp" timestampFormat:"iso8601"`
|
||||
LastModified *time.Time `type:"timestamp"`
|
||||
}
|
||||
|
||||
// String returns the string representation
|
||||
@ -8107,7 +8148,7 @@ type CopyPartResult struct {
|
||||
ETag *string `type:"string"`
|
||||
|
||||
// Date and time at which the object was uploaded.
|
||||
LastModified *time.Time `type:"timestamp" timestampFormat:"iso8601"`
|
||||
LastModified *time.Time `type:"timestamp"`
|
||||
}
|
||||
|
||||
// String returns the string representation
|
||||
@ -8311,7 +8352,7 @@ type CreateMultipartUploadInput struct {
|
||||
ContentType *string `location:"header" locationName:"Content-Type" type:"string"`
|
||||
|
||||
// The date and time at which the object is no longer cacheable.
|
||||
Expires *time.Time `location:"header" locationName:"Expires" type:"timestamp" timestampFormat:"rfc822"`
|
||||
Expires *time.Time `location:"header" locationName:"Expires" type:"timestamp"`
|
||||
|
||||
// Gives the grantee READ, READ_ACP, and WRITE_ACP permissions on the object.
|
||||
GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"`
|
||||
@ -8559,7 +8600,7 @@ type CreateMultipartUploadOutput struct {
|
||||
_ struct{} `type:"structure"`
|
||||
|
||||
// Date when multipart upload will become eligible for abort operation by lifecycle.
|
||||
AbortDate *time.Time `location:"header" locationName:"x-amz-abort-date" type:"timestamp" timestampFormat:"rfc822"`
|
||||
AbortDate *time.Time `location:"header" locationName:"x-amz-abort-date" type:"timestamp"`
|
||||
|
||||
// Id of the lifecycle rule that makes a multipart upload eligible for abort
|
||||
// operation.
|
||||
@ -9240,6 +9281,14 @@ func (s DeleteBucketPolicyOutput) GoString() string {
|
||||
type DeleteBucketReplicationInput struct {
|
||||
_ struct{} `type:"structure"`
|
||||
|
||||
// Deletes the replication subresource associated with the specified bucket.
|
||||
//
|
||||
// There is usually some time lag before replication configuration deletion
|
||||
// is fully propagated to all the Amazon S3 systems.
|
||||
//
|
||||
// For more information, see Cross-Region Replication (CRR) ( https://docs.aws.amazon.com/AmazonS3/latest/dev/crr.html)
|
||||
// in the Amazon S3 Developer Guide.
|
||||
//
|
||||
// Bucket is a required field
|
||||
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
|
||||
}
|
||||
@ -9419,7 +9468,7 @@ type DeleteMarkerEntry struct {
|
||||
Key *string `min:"1" type:"string"`
|
||||
|
||||
// Date and time the object was last modified.
|
||||
LastModified *time.Time `type:"timestamp" timestampFormat:"iso8601"`
|
||||
LastModified *time.Time `type:"timestamp"`
|
||||
|
||||
Owner *Owner `type:"structure"`
|
||||
|
||||
@ -9467,6 +9516,33 @@ func (s *DeleteMarkerEntry) SetVersionId(v string) *DeleteMarkerEntry {
|
||||
return s
|
||||
}
|
||||
|
||||
// Specifies whether Amazon S3 should replicate delete makers.
|
||||
type DeleteMarkerReplication struct {
|
||||
_ struct{} `type:"structure"`
|
||||
|
||||
// The status of the delete marker replication.
|
||||
//
|
||||
// In the current implementation, Amazon S3 does not replicate the delete markers.
|
||||
// Therefore, the status must be Disabled.
|
||||
Status *string `type:"string" enum:"DeleteMarkerReplicationStatus"`
|
||||
}
|
||||
|
||||
// String returns the string representation
|
||||
func (s DeleteMarkerReplication) String() string {
|
||||
return awsutil.Prettify(s)
|
||||
}
|
||||
|
||||
// GoString returns the string representation
|
||||
func (s DeleteMarkerReplication) GoString() string {
|
||||
return s.String()
|
||||
}
|
||||
|
||||
// SetStatus sets the Status field's value.
|
||||
func (s *DeleteMarkerReplication) SetStatus(v string) *DeleteMarkerReplication {
|
||||
s.Status = &v
|
||||
return s
|
||||
}
|
||||
|
||||
type DeleteObjectInput struct {
|
||||
_ struct{} `type:"structure"`
|
||||
|
||||
@ -9863,19 +9939,34 @@ type Destination struct {
|
||||
_ struct{} `type:"structure"`
|
||||
|
||||
// Container for information regarding the access control for replicas.
|
||||
//
|
||||
// Use only in a cross-account scenario, where source and destination bucket
|
||||
// owners are not the same, when you want to change replica ownership to the
|
||||
// AWS account that owns the destination bucket. If you don't add this element
|
||||
// to the replication configuration, the replicas are owned by same AWS account
|
||||
// that owns the source object.
|
||||
AccessControlTranslation *AccessControlTranslation `type:"structure"`
|
||||
|
||||
// Account ID of the destination bucket. Currently this is only being verified
|
||||
// if Access Control Translation is enabled
|
||||
// Account ID of the destination bucket. Currently Amazon S3 verifies this value
|
||||
// only if Access Control Translation is enabled.
|
||||
//
|
||||
// In a cross-account scenario, if you tell Amazon S3 to change replica ownership
|
||||
// to the AWS account that owns the destination bucket by adding the AccessControlTranslation
|
||||
// element, this is the account ID of the destination bucket owner.
|
||||
Account *string `type:"string"`
|
||||
|
||||
// Amazon resource name (ARN) of the bucket where you want Amazon S3 to store
|
||||
// replicas of the object identified by the rule.
|
||||
//
|
||||
// If you have multiple rules in your replication configuration, all rules must
|
||||
// specify the same bucket as the destination. A replication configuration can
|
||||
// replicate objects only to one destination bucket.
|
||||
//
|
||||
// Bucket is a required field
|
||||
Bucket *string `type:"string" required:"true"`
|
||||
|
||||
// Container for information regarding encryption based configuration for replicas.
|
||||
// Container that provides encryption-related information. You must specify
|
||||
// this element if the SourceSelectionCriteria is specified.
|
||||
EncryptionConfiguration *EncryptionConfiguration `type:"structure"`
|
||||
|
||||
// The class of storage used to store the object.
|
||||
@ -10012,7 +10103,8 @@ func (s *Encryption) SetKMSKeyId(v string) *Encryption {
|
||||
type EncryptionConfiguration struct {
|
||||
_ struct{} `type:"structure"`
|
||||
|
||||
// The id of the KMS key used to encrypt the replica object.
|
||||
// The ID of the AWS KMS key for the region where the destination bucket resides.
|
||||
// Amazon S3 uses this key to encrypt the replica object.
|
||||
ReplicaKmsKeyID *string `type:"string"`
|
||||
}
|
||||
|
||||
@ -10033,7 +10125,7 @@ func (s *EncryptionConfiguration) SetReplicaKmsKeyID(v string) *EncryptionConfig
|
||||
}
|
||||
|
||||
type EndEvent struct {
|
||||
_ struct{} `type:"structure"`
|
||||
_ struct{} `locationName:"EndEvent" type:"structure"`
|
||||
}
|
||||
|
||||
// String returns the string representation
|
||||
@ -10153,6 +10245,7 @@ type FilterRule struct {
|
||||
// the filtering rule applies. Maximum prefix length can be up to 1,024 characters.
|
||||
// Overlapping prefixes and suffixes are not supported. For more information,
|
||||
// go to Configuring Event Notifications (http://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html)
|
||||
// in the Amazon Simple Storage Service Developer Guide.
|
||||
Name *string `type:"string" enum:"FilterRuleName"`
|
||||
|
||||
Value *string `type:"string"`
|
||||
@ -11574,7 +11667,7 @@ type GetObjectInput struct {
|
||||
|
||||
// Return the object only if it has been modified since the specified time,
|
||||
// otherwise return a 304 (not modified).
|
||||
IfModifiedSince *time.Time `location:"header" locationName:"If-Modified-Since" type:"timestamp" timestampFormat:"rfc822"`
|
||||
IfModifiedSince *time.Time `location:"header" locationName:"If-Modified-Since" type:"timestamp"`
|
||||
|
||||
// Return the object only if its entity tag (ETag) is different from the one
|
||||
// specified, otherwise return a 304 (not modified).
|
||||
@ -11582,7 +11675,7 @@ type GetObjectInput struct {
|
||||
|
||||
// Return the object only if it has not been modified since the specified time,
|
||||
// otherwise return a 412 (precondition failed).
|
||||
IfUnmodifiedSince *time.Time `location:"header" locationName:"If-Unmodified-Since" type:"timestamp" timestampFormat:"rfc822"`
|
||||
IfUnmodifiedSince *time.Time `location:"header" locationName:"If-Unmodified-Since" type:"timestamp"`
|
||||
|
||||
// Key is a required field
|
||||
Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"`
|
||||
@ -11618,7 +11711,7 @@ type GetObjectInput struct {
|
||||
ResponseContentType *string `location:"querystring" locationName:"response-content-type" type:"string"`
|
||||
|
||||
// Sets the Expires header of the response.
|
||||
ResponseExpires *time.Time `location:"querystring" locationName:"response-expires" type:"timestamp" timestampFormat:"iso8601"`
|
||||
ResponseExpires *time.Time `location:"querystring" locationName:"response-expires" type:"timestamp"`
|
||||
|
||||
// Specifies the algorithm to use to when encrypting the object (e.g., AES256).
|
||||
SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"`
|
||||
@ -11845,7 +11938,7 @@ type GetObjectOutput struct {
|
||||
Expires *string `location:"header" locationName:"Expires" type:"string"`
|
||||
|
||||
// Last modified date of the object
|
||||
LastModified *time.Time `location:"header" locationName:"Last-Modified" type:"timestamp" timestampFormat:"rfc822"`
|
||||
LastModified *time.Time `location:"header" locationName:"Last-Modified" type:"timestamp"`
|
||||
|
||||
// A map of metadata to store with the object in S3.
|
||||
Metadata map[string]*string `location:"headers" locationName:"x-amz-meta-" type:"map"`
|
||||
@ -12505,7 +12598,7 @@ type HeadObjectInput struct {
|
||||
|
||||
// Return the object only if it has been modified since the specified time,
|
||||
// otherwise return a 304 (not modified).
|
||||
IfModifiedSince *time.Time `location:"header" locationName:"If-Modified-Since" type:"timestamp" timestampFormat:"rfc822"`
|
||||
IfModifiedSince *time.Time `location:"header" locationName:"If-Modified-Since" type:"timestamp"`
|
||||
|
||||
// Return the object only if its entity tag (ETag) is different from the one
|
||||
// specified, otherwise return a 304 (not modified).
|
||||
@ -12513,7 +12606,7 @@ type HeadObjectInput struct {
|
||||
|
||||
// Return the object only if it has not been modified since the specified time,
|
||||
// otherwise return a 412 (precondition failed).
|
||||
IfUnmodifiedSince *time.Time `location:"header" locationName:"If-Unmodified-Since" type:"timestamp" timestampFormat:"rfc822"`
|
||||
IfUnmodifiedSince *time.Time `location:"header" locationName:"If-Unmodified-Since" type:"timestamp"`
|
||||
|
||||
// Key is a required field
|
||||
Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"`
|
||||
@ -12717,7 +12810,7 @@ type HeadObjectOutput struct {
|
||||
Expires *string `location:"header" locationName:"Expires" type:"string"`
|
||||
|
||||
// Last modified date of the object
|
||||
LastModified *time.Time `location:"header" locationName:"Last-Modified" type:"timestamp" timestampFormat:"rfc822"`
|
||||
LastModified *time.Time `location:"header" locationName:"Last-Modified" type:"timestamp"`
|
||||
|
||||
// A map of metadata to store with the object in S3.
|
||||
Metadata map[string]*string `location:"headers" locationName:"x-amz-meta-" type:"map"`
|
||||
@ -13011,12 +13104,15 @@ type InputSerialization struct {
|
||||
// Describes the serialization of a CSV-encoded object.
|
||||
CSV *CSVInput `type:"structure"`
|
||||
|
||||
// Specifies object's compression format. Valid values: NONE, GZIP. Default
|
||||
// Specifies object's compression format. Valid values: NONE, GZIP, BZIP2. Default
|
||||
// Value: NONE.
|
||||
CompressionType *string `type:"string" enum:"CompressionType"`
|
||||
|
||||
// Specifies JSON as object's input serialization format.
|
||||
JSON *JSONInput `type:"structure"`
|
||||
|
||||
// Specifies Parquet as object's input serialization format.
|
||||
Parquet *ParquetInput `type:"structure"`
|
||||
}
|
||||
|
||||
// String returns the string representation
|
||||
@ -13047,6 +13143,12 @@ func (s *InputSerialization) SetJSON(v *JSONInput) *InputSerialization {
|
||||
return s
|
||||
}
|
||||
|
||||
// SetParquet sets the Parquet field's value.
|
||||
func (s *InputSerialization) SetParquet(v *ParquetInput) *InputSerialization {
|
||||
s.Parquet = v
|
||||
return s
|
||||
}
|
||||
|
||||
type InventoryConfiguration struct {
|
||||
_ struct{} `type:"structure"`
|
||||
|
||||
@ -13517,6 +13619,7 @@ type LambdaFunctionConfiguration struct {
|
||||
|
||||
// Container for object key name filtering rules. For information about key
|
||||
// name filtering, go to Configuring Event Notifications (http://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html)
|
||||
// in the Amazon Simple Storage Service Developer Guide.
|
||||
Filter *NotificationConfigurationFilter `type:"structure"`
|
||||
|
||||
// Optional unique identifier for configurations in a notification configuration.
|
||||
@ -13699,6 +13802,8 @@ type LifecycleRule struct {
|
||||
|
||||
// Prefix identifying one or more objects to which the rule applies. This is
|
||||
// deprecated; use Filter instead.
|
||||
//
|
||||
// Deprecated: Prefix has been deprecated
|
||||
Prefix *string `deprecated:"true" type:"string"`
|
||||
|
||||
// If 'Enabled', the rule is currently being applied. If 'Disabled', the rule
|
||||
@ -15334,7 +15439,7 @@ type ListPartsOutput struct {
|
||||
_ struct{} `type:"structure"`
|
||||
|
||||
// Date when multipart upload will become eligible for abort operation by lifecycle.
|
||||
AbortDate *time.Time `location:"header" locationName:"x-amz-abort-date" type:"timestamp" timestampFormat:"rfc822"`
|
||||
AbortDate *time.Time `location:"header" locationName:"x-amz-abort-date" type:"timestamp"`
|
||||
|
||||
// Id of the lifecycle rule that makes a multipart upload eligible for abort
|
||||
// operation.
|
||||
@ -15890,7 +15995,7 @@ type MultipartUpload struct {
|
||||
_ struct{} `type:"structure"`
|
||||
|
||||
// Date and time at which the multipart upload was initiated.
|
||||
Initiated *time.Time `type:"timestamp" timestampFormat:"iso8601"`
|
||||
Initiated *time.Time `type:"timestamp"`
|
||||
|
||||
// Identifies who initiated the multipart upload.
|
||||
Initiator *Initiator `type:"structure"`
|
||||
@ -15964,7 +16069,8 @@ type NoncurrentVersionExpiration struct {
|
||||
// Specifies the number of days an object is noncurrent before Amazon S3 can
|
||||
// perform the associated action. For information about the noncurrent days
|
||||
// calculations, see How Amazon S3 Calculates When an Object Became Noncurrent
|
||||
// (http://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html)
|
||||
// (http://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html) in
|
||||
// the Amazon Simple Storage Service Developer Guide.
|
||||
NoncurrentDays *int64 `type:"integer"`
|
||||
}
|
||||
|
||||
@ -15996,7 +16102,8 @@ type NoncurrentVersionTransition struct {
|
||||
// Specifies the number of days an object is noncurrent before Amazon S3 can
|
||||
// perform the associated action. For information about the noncurrent days
|
||||
// calculations, see How Amazon S3 Calculates When an Object Became Noncurrent
|
||||
// (http://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html)
|
||||
// (http://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html) in
|
||||
// the Amazon Simple Storage Service Developer Guide.
|
||||
NoncurrentDays *int64 `type:"integer"`
|
||||
|
||||
// The class of storage used to store the object.
|
||||
@ -16145,6 +16252,7 @@ func (s *NotificationConfigurationDeprecated) SetTopicConfiguration(v *TopicConf
|
||||
|
||||
// Container for object key name filtering rules. For information about key
|
||||
// name filtering, go to Configuring Event Notifications (http://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html)
|
||||
// in the Amazon Simple Storage Service Developer Guide.
|
||||
type NotificationConfigurationFilter struct {
|
||||
_ struct{} `type:"structure"`
|
||||
|
||||
@ -16175,7 +16283,7 @@ type Object struct {
|
||||
|
||||
Key *string `min:"1" type:"string"`
|
||||
|
||||
LastModified *time.Time `type:"timestamp" timestampFormat:"iso8601"`
|
||||
LastModified *time.Time `type:"timestamp"`
|
||||
|
||||
Owner *Owner `type:"structure"`
|
||||
|
||||
@ -16294,7 +16402,7 @@ type ObjectVersion struct {
|
||||
Key *string `min:"1" type:"string"`
|
||||
|
||||
// Date and time the object was last modified.
|
||||
LastModified *time.Time `type:"timestamp" timestampFormat:"iso8601"`
|
||||
LastModified *time.Time `type:"timestamp"`
|
||||
|
||||
Owner *Owner `type:"structure"`
|
||||
|
||||
@ -16468,6 +16576,20 @@ func (s *Owner) SetID(v string) *Owner {
|
||||
return s
|
||||
}
|
||||
|
||||
type ParquetInput struct {
|
||||
_ struct{} `type:"structure"`
|
||||
}
|
||||
|
||||
// String returns the string representation
|
||||
func (s ParquetInput) String() string {
|
||||
return awsutil.Prettify(s)
|
||||
}
|
||||
|
||||
// GoString returns the string representation
|
||||
func (s ParquetInput) GoString() string {
|
||||
return s.String()
|
||||
}
|
||||
|
||||
type Part struct {
|
||||
_ struct{} `type:"structure"`
|
||||
|
||||
@ -16475,7 +16597,7 @@ type Part struct {
|
||||
ETag *string `type:"string"`
|
||||
|
||||
// Date and time at which the part was uploaded.
|
||||
LastModified *time.Time `type:"timestamp" timestampFormat:"iso8601"`
|
||||
LastModified *time.Time `type:"timestamp"`
|
||||
|
||||
// Part number identifying the part. This is a positive integer between 1 and
|
||||
// 10,000.
|
||||
@ -16561,7 +16683,7 @@ func (s *Progress) SetBytesScanned(v int64) *Progress {
|
||||
}
|
||||
|
||||
type ProgressEvent struct {
|
||||
_ struct{} `type:"structure" payload:"Details"`
|
||||
_ struct{} `locationName:"ProgressEvent" type:"structure" payload:"Details"`
|
||||
|
||||
// The Progress event details.
|
||||
Details *Progress `locationName:"Details" type:"structure"`
|
||||
@ -16595,7 +16717,7 @@ func (s *ProgressEvent) UnmarshalEvent(
|
||||
if err := payloadUnmarshaler.UnmarshalPayload(
|
||||
bytes.NewReader(msg.Payload), s,
|
||||
); err != nil {
|
||||
return fmt.Errorf("failed to unmarshal payload, %v", err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@ -18257,7 +18379,7 @@ type PutObjectInput struct {
|
||||
ContentType *string `location:"header" locationName:"Content-Type" type:"string"`
|
||||
|
||||
// The date and time at which the object is no longer cacheable.
|
||||
Expires *time.Time `location:"header" locationName:"Expires" type:"timestamp" timestampFormat:"rfc822"`
|
||||
Expires *time.Time `location:"header" locationName:"Expires" type:"timestamp"`
|
||||
|
||||
// Gives the grantee READ, READ_ACP, and WRITE_ACP permissions on the object.
|
||||
GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"`
|
||||
@ -18730,6 +18852,7 @@ type QueueConfiguration struct {
|
||||
|
||||
// Container for object key name filtering rules. For information about key
|
||||
// name filtering, go to Configuring Event Notifications (http://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html)
|
||||
// in the Amazon Simple Storage Service Developer Guide.
|
||||
Filter *NotificationConfigurationFilter `type:"structure"`
|
||||
|
||||
// Optional unique identifier for configurations in a notification configuration.
|
||||
@ -18797,6 +18920,8 @@ type QueueConfigurationDeprecated struct {
|
||||
_ struct{} `type:"structure"`
|
||||
|
||||
// Bucket event for which to send notifications.
|
||||
//
|
||||
// Deprecated: Event has been deprecated
|
||||
Event *string `deprecated:"true" type:"string" enum:"Event"`
|
||||
|
||||
Events []*string `locationName:"Event" type:"list" flattened:"true"`
|
||||
@ -18843,7 +18968,7 @@ func (s *QueueConfigurationDeprecated) SetQueue(v string) *QueueConfigurationDep
|
||||
}
|
||||
|
||||
type RecordsEvent struct {
|
||||
_ struct{} `type:"structure" payload:"Payload"`
|
||||
_ struct{} `locationName:"RecordsEvent" type:"structure" payload:"Payload"`
|
||||
|
||||
// The byte array of partial, one or more result records.
|
||||
//
|
||||
@ -19008,8 +19133,8 @@ type ReplicationConfiguration struct {
|
||||
// Role is a required field
|
||||
Role *string `type:"string" required:"true"`
|
||||
|
||||
// Container for information about a particular replication rule. Replication
|
||||
// configuration must have at least one rule and can contain up to 1,000 rules.
|
||||
// Container for one or more replication rules. Replication configuration must
|
||||
// have at least one rule and can contain up to 1,000 rules.
|
||||
//
|
||||
// Rules is a required field
|
||||
Rules []*ReplicationRule `locationName:"Rule" type:"list" flattened:"true" required:"true"`
|
||||
@ -19067,22 +19192,50 @@ func (s *ReplicationConfiguration) SetRules(v []*ReplicationRule) *ReplicationCo
|
||||
type ReplicationRule struct {
|
||||
_ struct{} `type:"structure"`
|
||||
|
||||
// Specifies whether Amazon S3 should replicate delete makers.
|
||||
DeleteMarkerReplication *DeleteMarkerReplication `type:"structure"`
|
||||
|
||||
// Container for replication destination information.
|
||||
//
|
||||
// Destination is a required field
|
||||
Destination *Destination `type:"structure" required:"true"`
|
||||
|
||||
// Filter that identifies subset of objects to which the replication rule applies.
|
||||
// A Filter must specify exactly one Prefix, Tag, or an And child element.
|
||||
Filter *ReplicationRuleFilter `type:"structure"`
|
||||
|
||||
// Unique identifier for the rule. The value cannot be longer than 255 characters.
|
||||
ID *string `type:"string"`
|
||||
|
||||
// Object keyname prefix identifying one or more objects to which the rule applies.
|
||||
// Maximum prefix length can be up to 1,024 characters. Overlapping prefixes
|
||||
// are not supported.
|
||||
// Maximum prefix length can be up to 1,024 characters.
|
||||
//
|
||||
// Prefix is a required field
|
||||
Prefix *string `type:"string" required:"true"`
|
||||
// Deprecated: Prefix has been deprecated
|
||||
Prefix *string `deprecated:"true" type:"string"`
|
||||
|
||||
// Container for filters that define which source objects should be replicated.
|
||||
// The priority associated with the rule. If you specify multiple rules in a
|
||||
// replication configuration, then Amazon S3 applies rule priority in the event
|
||||
// there are conflicts (two or more rules identify the same object based on
|
||||
// filter specified). The rule with higher priority takes precedence. For example,
|
||||
//
|
||||
// * Same object quality prefix based filter criteria If prefixes you specified
|
||||
// in multiple rules overlap.
|
||||
//
|
||||
// * Same object qualify tag based filter criteria specified in multiple
|
||||
// rules
|
||||
//
|
||||
// For more information, see Cross-Region Replication (CRR) ( https://docs.aws.amazon.com/AmazonS3/latest/dev/crr.html)
|
||||
// in the Amazon S3 Developer Guide.
|
||||
Priority *int64 `type:"integer"`
|
||||
|
||||
// Container that describes additional filters in identifying source objects
|
||||
// that you want to replicate. Currently, Amazon S3 supports only the filter
|
||||
// that you can specify for objects created with server-side encryption using
|
||||
// an AWS KMS-managed key. You can choose to enable or disable replication of
|
||||
// these objects.
|
||||
//
|
||||
// if you want Amazon S3 to replicate objects created with server-side encryption
|
||||
// using AWS KMS-managed keys.
|
||||
SourceSelectionCriteria *SourceSelectionCriteria `type:"structure"`
|
||||
|
||||
// The rule is ignored if status is not Enabled.
|
||||
@ -19107,9 +19260,6 @@ func (s *ReplicationRule) Validate() error {
|
||||
if s.Destination == nil {
|
||||
invalidParams.Add(request.NewErrParamRequired("Destination"))
|
||||
}
|
||||
if s.Prefix == nil {
|
||||
invalidParams.Add(request.NewErrParamRequired("Prefix"))
|
||||
}
|
||||
if s.Status == nil {
|
||||
invalidParams.Add(request.NewErrParamRequired("Status"))
|
||||
}
|
||||
@ -19118,6 +19268,11 @@ func (s *ReplicationRule) Validate() error {
|
||||
invalidParams.AddNested("Destination", err.(request.ErrInvalidParams))
|
||||
}
|
||||
}
|
||||
if s.Filter != nil {
|
||||
if err := s.Filter.Validate(); err != nil {
|
||||
invalidParams.AddNested("Filter", err.(request.ErrInvalidParams))
|
||||
}
|
||||
}
|
||||
if s.SourceSelectionCriteria != nil {
|
||||
if err := s.SourceSelectionCriteria.Validate(); err != nil {
|
||||
invalidParams.AddNested("SourceSelectionCriteria", err.(request.ErrInvalidParams))
|
||||
@ -19130,12 +19285,24 @@ func (s *ReplicationRule) Validate() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetDeleteMarkerReplication sets the DeleteMarkerReplication field's value.
|
||||
func (s *ReplicationRule) SetDeleteMarkerReplication(v *DeleteMarkerReplication) *ReplicationRule {
|
||||
s.DeleteMarkerReplication = v
|
||||
return s
|
||||
}
|
||||
|
||||
// SetDestination sets the Destination field's value.
|
||||
func (s *ReplicationRule) SetDestination(v *Destination) *ReplicationRule {
|
||||
s.Destination = v
|
||||
return s
|
||||
}
|
||||
|
||||
// SetFilter sets the Filter field's value.
|
||||
func (s *ReplicationRule) SetFilter(v *ReplicationRuleFilter) *ReplicationRule {
|
||||
s.Filter = v
|
||||
return s
|
||||
}
|
||||
|
||||
// SetID sets the ID field's value.
|
||||
func (s *ReplicationRule) SetID(v string) *ReplicationRule {
|
||||
s.ID = &v
|
||||
@ -19148,6 +19315,12 @@ func (s *ReplicationRule) SetPrefix(v string) *ReplicationRule {
|
||||
return s
|
||||
}
|
||||
|
||||
// SetPriority sets the Priority field's value.
|
||||
func (s *ReplicationRule) SetPriority(v int64) *ReplicationRule {
|
||||
s.Priority = &v
|
||||
return s
|
||||
}
|
||||
|
||||
// SetSourceSelectionCriteria sets the SourceSelectionCriteria field's value.
|
||||
func (s *ReplicationRule) SetSourceSelectionCriteria(v *SourceSelectionCriteria) *ReplicationRule {
|
||||
s.SourceSelectionCriteria = v
|
||||
@ -19160,6 +19333,130 @@ func (s *ReplicationRule) SetStatus(v string) *ReplicationRule {
|
||||
return s
|
||||
}
|
||||
|
||||
type ReplicationRuleAndOperator struct {
|
||||
_ struct{} `type:"structure"`
|
||||
|
||||
Prefix *string `type:"string"`
|
||||
|
||||
Tags []*Tag `locationName:"Tag" locationNameList:"Tag" type:"list" flattened:"true"`
|
||||
}
|
||||
|
||||
// String returns the string representation
|
||||
func (s ReplicationRuleAndOperator) String() string {
|
||||
return awsutil.Prettify(s)
|
||||
}
|
||||
|
||||
// GoString returns the string representation
|
||||
func (s ReplicationRuleAndOperator) GoString() string {
|
||||
return s.String()
|
||||
}
|
||||
|
||||
// Validate inspects the fields of the type to determine if they are valid.
|
||||
func (s *ReplicationRuleAndOperator) Validate() error {
|
||||
invalidParams := request.ErrInvalidParams{Context: "ReplicationRuleAndOperator"}
|
||||
if s.Tags != nil {
|
||||
for i, v := range s.Tags {
|
||||
if v == nil {
|
||||
continue
|
||||
}
|
||||
if err := v.Validate(); err != nil {
|
||||
invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if invalidParams.Len() > 0 {
|
||||
return invalidParams
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetPrefix sets the Prefix field's value.
|
||||
func (s *ReplicationRuleAndOperator) SetPrefix(v string) *ReplicationRuleAndOperator {
|
||||
s.Prefix = &v
|
||||
return s
|
||||
}
|
||||
|
||||
// SetTags sets the Tags field's value.
|
||||
func (s *ReplicationRuleAndOperator) SetTags(v []*Tag) *ReplicationRuleAndOperator {
|
||||
s.Tags = v
|
||||
return s
|
||||
}
|
||||
|
||||
// Filter that identifies subset of objects to which the replication rule applies.
|
||||
// A Filter must specify exactly one Prefix, Tag, or an And child element.
|
||||
type ReplicationRuleFilter struct {
|
||||
_ struct{} `type:"structure"`
|
||||
|
||||
// Container for specifying rule filters. These filters determine the subset
|
||||
// of objects to which the rule applies. The element is required only if you
|
||||
// specify more than one filter. For example:
|
||||
//
|
||||
// * You specify both a Prefix and a Tag filters. Then you wrap these in
|
||||
// an And tag.
|
||||
//
|
||||
// * You specify filter based on multiple tags. Then you wrap the Tag elements
|
||||
// in an And tag.
|
||||
And *ReplicationRuleAndOperator `type:"structure"`
|
||||
|
||||
// Object keyname prefix that identifies subset of objects to which the rule
|
||||
// applies.
|
||||
Prefix *string `type:"string"`
|
||||
|
||||
// Container for specifying a tag key and value.
|
||||
//
|
||||
// The rule applies only to objects having the tag in its tagset.
|
||||
Tag *Tag `type:"structure"`
|
||||
}
|
||||
|
||||
// String returns the string representation
|
||||
func (s ReplicationRuleFilter) String() string {
|
||||
return awsutil.Prettify(s)
|
||||
}
|
||||
|
||||
// GoString returns the string representation
|
||||
func (s ReplicationRuleFilter) GoString() string {
|
||||
return s.String()
|
||||
}
|
||||
|
||||
// Validate inspects the fields of the type to determine if they are valid.
|
||||
func (s *ReplicationRuleFilter) Validate() error {
|
||||
invalidParams := request.ErrInvalidParams{Context: "ReplicationRuleFilter"}
|
||||
if s.And != nil {
|
||||
if err := s.And.Validate(); err != nil {
|
||||
invalidParams.AddNested("And", err.(request.ErrInvalidParams))
|
||||
}
|
||||
}
|
||||
if s.Tag != nil {
|
||||
if err := s.Tag.Validate(); err != nil {
|
||||
invalidParams.AddNested("Tag", err.(request.ErrInvalidParams))
|
||||
}
|
||||
}
|
||||
|
||||
if invalidParams.Len() > 0 {
|
||||
return invalidParams
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetAnd sets the And field's value.
|
||||
func (s *ReplicationRuleFilter) SetAnd(v *ReplicationRuleAndOperator) *ReplicationRuleFilter {
|
||||
s.And = v
|
||||
return s
|
||||
}
|
||||
|
||||
// SetPrefix sets the Prefix field's value.
|
||||
func (s *ReplicationRuleFilter) SetPrefix(v string) *ReplicationRuleFilter {
|
||||
s.Prefix = &v
|
||||
return s
|
||||
}
|
||||
|
||||
// SetTag sets the Tag field's value.
|
||||
func (s *ReplicationRuleFilter) SetTag(v *Tag) *ReplicationRuleFilter {
|
||||
s.Tag = v
|
||||
return s
|
||||
}
|
||||
|
||||
type RequestPaymentConfiguration struct {
|
||||
_ struct{} `type:"structure"`
|
||||
|
||||
@ -19887,8 +20184,11 @@ func (r *readSelectObjectContentEventStream) unmarshalerForEventType(
|
||||
case "Stats":
|
||||
return &StatsEvent{}, nil
|
||||
default:
|
||||
return nil, fmt.Errorf(
|
||||
"unknown event type name, %s, for SelectObjectContentEventStream", eventType)
|
||||
return nil, awserr.New(
|
||||
request.ErrCodeSerialization,
|
||||
fmt.Sprintf("unknown event type name, %s, for SelectObjectContentEventStream", eventType),
|
||||
nil,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
@ -19898,7 +20198,7 @@ func (r *readSelectObjectContentEventStream) unmarshalerForEventType(
|
||||
// Amazon S3 uses this to parse object data into records, and returns only records
|
||||
// that match the specified SQL expression. You must also specify the data serialization
|
||||
// format for the response. For more information, go to S3Select API Documentation
|
||||
// (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectSELECTContent.html)
|
||||
// (http://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectSELECTContent.html).
|
||||
type SelectObjectContentInput struct {
|
||||
_ struct{} `locationName:"SelectObjectContentRequest" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"`
|
||||
|
||||
@ -19936,15 +20236,15 @@ type SelectObjectContentInput struct {
|
||||
RequestProgress *RequestProgress `type:"structure"`
|
||||
|
||||
// The SSE Algorithm used to encrypt the object. For more information, go to
|
||||
// Server-Side Encryption (Using Customer-Provided Encryption Keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html)
|
||||
// Server-Side Encryption (Using Customer-Provided Encryption Keys (http://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html).
|
||||
SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"`
|
||||
|
||||
// The SSE Customer Key. For more information, go to Server-Side Encryption
|
||||
// (Using Customer-Provided Encryption Keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html)
|
||||
// (Using Customer-Provided Encryption Keys (http://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html).
|
||||
SSECustomerKey *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string"`
|
||||
|
||||
// The SSE Customer Key MD5. For more information, go to Server-Side Encryption
|
||||
// (Using Customer-Provided Encryption Keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html)
|
||||
// (Using Customer-Provided Encryption Keys (http://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html).
|
||||
SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"`
|
||||
}
|
||||
|
||||
@ -20335,6 +20635,8 @@ type SourceSelectionCriteria struct {
|
||||
_ struct{} `type:"structure"`
|
||||
|
||||
// Container for filter information of selection of KMS Encrypted S3 objects.
|
||||
// The element is required if you include SourceSelectionCriteria in the replication
|
||||
// configuration.
|
||||
SseKmsEncryptedObjects *SseKmsEncryptedObjects `type:"structure"`
|
||||
}
|
||||
|
||||
@ -20451,7 +20753,7 @@ func (s *Stats) SetBytesScanned(v int64) *Stats {
|
||||
}
|
||||
|
||||
type StatsEvent struct {
|
||||
_ struct{} `type:"structure" payload:"Details"`
|
||||
_ struct{} `locationName:"StatsEvent" type:"structure" payload:"Details"`
|
||||
|
||||
// The Stats event details.
|
||||
Details *Stats `locationName:"Details" type:"structure"`
|
||||
@ -20485,7 +20787,7 @@ func (s *StatsEvent) UnmarshalEvent(
|
||||
if err := payloadUnmarshaler.UnmarshalPayload(
|
||||
bytes.NewReader(msg.Payload), s,
|
||||
); err != nil {
|
||||
return fmt.Errorf("failed to unmarshal payload, %v", err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@ -20743,6 +21045,7 @@ type TopicConfiguration struct {
|
||||
|
||||
// Container for object key name filtering rules. For information about key
|
||||
// name filtering, go to Configuring Event Notifications (http://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html)
|
||||
// in the Amazon Simple Storage Service Developer Guide.
|
||||
Filter *NotificationConfigurationFilter `type:"structure"`
|
||||
|
||||
// Optional unique identifier for configurations in a notification configuration.
|
||||
@ -20810,6 +21113,8 @@ type TopicConfigurationDeprecated struct {
|
||||
_ struct{} `type:"structure"`
|
||||
|
||||
// Bucket event for which to send notifications.
|
||||
//
|
||||
// Deprecated: Event has been deprecated
|
||||
Event *string `deprecated:"true" type:"string" enum:"Event"`
|
||||
|
||||
Events []*string `locationName:"Event" type:"list" flattened:"true"`
|
||||
@ -20916,14 +21221,14 @@ type UploadPartCopyInput struct {
|
||||
CopySourceIfMatch *string `location:"header" locationName:"x-amz-copy-source-if-match" type:"string"`
|
||||
|
||||
// Copies the object if it has been modified since the specified time.
|
||||
CopySourceIfModifiedSince *time.Time `location:"header" locationName:"x-amz-copy-source-if-modified-since" type:"timestamp" timestampFormat:"rfc822"`
|
||||
CopySourceIfModifiedSince *time.Time `location:"header" locationName:"x-amz-copy-source-if-modified-since" type:"timestamp"`
|
||||
|
||||
// Copies the object if its entity tag (ETag) is different than the specified
|
||||
// ETag.
|
||||
CopySourceIfNoneMatch *string `location:"header" locationName:"x-amz-copy-source-if-none-match" type:"string"`
|
||||
|
||||
// Copies the object if it hasn't been modified since the specified time.
|
||||
CopySourceIfUnmodifiedSince *time.Time `location:"header" locationName:"x-amz-copy-source-if-unmodified-since" type:"timestamp" timestampFormat:"rfc822"`
|
||||
CopySourceIfUnmodifiedSince *time.Time `location:"header" locationName:"x-amz-copy-source-if-unmodified-since" type:"timestamp"`
|
||||
|
||||
// The range of bytes to copy from the source object. The range value must use
|
||||
// the form bytes=first-last, where the first and last are the zero-based byte
|
||||
@ -21676,6 +21981,17 @@ const (
|
||||
|
||||
// CompressionTypeGzip is a CompressionType enum value
|
||||
CompressionTypeGzip = "GZIP"
|
||||
|
||||
// CompressionTypeBzip2 is a CompressionType enum value
|
||||
CompressionTypeBzip2 = "BZIP2"
|
||||
)
|
||||
|
||||
const (
|
||||
// DeleteMarkerReplicationStatusEnabled is a DeleteMarkerReplicationStatus enum value
|
||||
DeleteMarkerReplicationStatusEnabled = "Enabled"
|
||||
|
||||
// DeleteMarkerReplicationStatusDisabled is a DeleteMarkerReplicationStatus enum value
|
||||
DeleteMarkerReplicationStatusDisabled = "Disabled"
|
||||
)
|
||||
|
||||
// Requests Amazon S3 to encode the object keys in the response and specifies
|
||||
|
5
vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/batch.go
generated
vendored
5
vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/batch.go
generated
vendored
@ -338,6 +338,11 @@ func (d *BatchDelete) Delete(ctx aws.Context, iter BatchDeleteIterator) error {
|
||||
}
|
||||
}
|
||||
|
||||
// iter.Next() could return false (above) plus populate iter.Err()
|
||||
if iter.Err() != nil {
|
||||
errs = append(errs, newError(iter.Err(), nil, nil))
|
||||
}
|
||||
|
||||
if input != nil && len(input.Delete.Objects) > 0 {
|
||||
if err := deleteBatch(ctx, d, input, objects); err != nil {
|
||||
errs = append(errs, err...)
|
||||
|
14
vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/download.go
generated
vendored
14
vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/download.go
generated
vendored
@ -39,6 +39,8 @@ type Downloader struct {
|
||||
// The number of goroutines to spin up in parallel when sending parts.
|
||||
// If this is set to zero, the DefaultDownloadConcurrency value will be used.
|
||||
//
|
||||
// Concurrency of 1 will download the parts sequentially.
|
||||
//
|
||||
// Concurrency is ignored if the Range input parameter is provided.
|
||||
Concurrency int
|
||||
|
||||
@ -135,6 +137,9 @@ type maxRetrier interface {
|
||||
// The w io.WriterAt can be satisfied by an os.File to do multipart concurrent
|
||||
// downloads, or in memory []byte wrapper using aws.WriteAtBuffer.
|
||||
//
|
||||
// Specifying a Downloader.Concurrency of 1 will cause the Downloader to
|
||||
// download the parts from S3 sequentially.
|
||||
//
|
||||
// If the GetObjectInput's Range value is provided that will cause the downloader
|
||||
// to perform a single GetObjectInput request for that object's range. This will
|
||||
// caused the part size, and concurrency configurations to be ignored.
|
||||
@ -147,7 +152,7 @@ func (d Downloader) Download(w io.WriterAt, input *s3.GetObjectInput, options ..
|
||||
//
|
||||
// DownloadWithContext is the same as Download with the additional support for
|
||||
// Context input parameters. The Context must not be nil. A nil Context will
|
||||
// cause a panic. Use the Context to add deadlining, timeouts, ect. The
|
||||
// cause a panic. Use the Context to add deadlining, timeouts, etc. The
|
||||
// DownloadWithContext may create sub-contexts for individual underlying
|
||||
// requests.
|
||||
//
|
||||
@ -160,6 +165,9 @@ func (d Downloader) Download(w io.WriterAt, input *s3.GetObjectInput, options ..
|
||||
// The w io.WriterAt can be satisfied by an os.File to do multipart concurrent
|
||||
// downloads, or in memory []byte wrapper using aws.WriteAtBuffer.
|
||||
//
|
||||
// Specifying a Downloader.Concurrency of 1 will cause the Downloader to
|
||||
// download the parts from S3 sequentially.
|
||||
//
|
||||
// It is safe to call this method concurrently across goroutines.
|
||||
//
|
||||
// If the GetObjectInput's Range value is provided that will cause the downloader
|
||||
@ -207,14 +215,14 @@ func (d Downloader) DownloadWithContext(ctx aws.Context, w io.WriterAt, input *s
|
||||
//
|
||||
// objects := []s3manager.BatchDownloadObject {
|
||||
// {
|
||||
// Input: &s3.GetObjectInput {
|
||||
// Object: &s3.GetObjectInput {
|
||||
// Bucket: aws.String("bucket"),
|
||||
// Key: aws.String("foo"),
|
||||
// },
|
||||
// Writer: fooFile,
|
||||
// },
|
||||
// {
|
||||
// Input: &s3.GetObjectInput {
|
||||
// Object: &s3.GetObjectInput {
|
||||
// Bucket: aws.String("bucket"),
|
||||
// Key: aws.String("bar"),
|
||||
// },
|
||||
|
9
vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/upload.go
generated
vendored
9
vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/upload.go
generated
vendored
@ -241,6 +241,8 @@ type Uploader struct {
|
||||
// E.g: 5GB file, with MaxUploadParts set to 100, will upload the file
|
||||
// as 100, 50MB parts.
|
||||
// With a limited of s3.MaxUploadParts (10,000 parts).
|
||||
//
|
||||
// Defaults to package const's MaxUploadParts value.
|
||||
MaxUploadParts int
|
||||
|
||||
// The client to use when uploading to S3.
|
||||
@ -357,7 +359,7 @@ func (u Uploader) Upload(input *UploadInput, options ...func(*Uploader)) (*Uploa
|
||||
//
|
||||
// UploadWithContext is the same as Upload with the additional support for
|
||||
// Context input parameters. The Context must not be nil. A nil Context will
|
||||
// cause a panic. Use the context to add deadlining, timeouts, ect. The
|
||||
// cause a panic. Use the context to add deadlining, timeouts, etc. The
|
||||
// UploadWithContext may create sub-contexts for individual underlying requests.
|
||||
//
|
||||
// Additional functional options can be provided to configure the individual
|
||||
@ -395,7 +397,7 @@ func (u Uploader) UploadWithContext(ctx aws.Context, input *UploadInput, opts ..
|
||||
// },
|
||||
// }
|
||||
//
|
||||
// iter := &s3managee.UploadObjectsIterator{Objects: objects}
|
||||
// iter := &s3manager.UploadObjectsIterator{Objects: objects}
|
||||
// if err := svc.UploadWithIterator(aws.BackgroundContext(), iter); err != nil {
|
||||
// return err
|
||||
// }
|
||||
@ -477,6 +479,9 @@ func (u *uploader) init() {
|
||||
if u.cfg.PartSize == 0 {
|
||||
u.cfg.PartSize = DefaultUploadPartSize
|
||||
}
|
||||
if u.cfg.MaxUploadParts == 0 {
|
||||
u.cfg.MaxUploadParts = MaxUploadParts
|
||||
}
|
||||
|
||||
u.bufferPool = sync.Pool{
|
||||
New: func() interface{} { return make([]byte, u.cfg.PartSize) },
|
||||
|
6
vendor/github.com/aws/aws-sdk-go/service/s3/service.go
generated
vendored
6
vendor/github.com/aws/aws-sdk-go/service/s3/service.go
generated
vendored
@ -29,9 +29,9 @@ var initRequest func(*request.Request)
|
||||
|
||||
// Service information constants
|
||||
const (
|
||||
ServiceName = "s3" // Service endpoint prefix API calls made to.
|
||||
EndpointsID = ServiceName // Service ID for Regions and Endpoints metadata.
|
||||
ServiceID = "S3" // ServiceID is a unique identifer of a specific service
|
||||
ServiceName = "s3" // Name of service.
|
||||
EndpointsID = ServiceName // ID to lookup a service endpoint with.
|
||||
ServiceID = "S3" // ServiceID is a unique identifer of a specific service.
|
||||
)
|
||||
|
||||
// New creates a new instance of the S3 client with a session.
|
||||
|
2
vendor/github.com/aws/aws-sdk-go/service/sts/api.go
generated
vendored
2
vendor/github.com/aws/aws-sdk-go/service/sts/api.go
generated
vendored
@ -1908,7 +1908,7 @@ type Credentials struct {
|
||||
// The date on which the current credentials expire.
|
||||
//
|
||||
// Expiration is a required field
|
||||
Expiration *time.Time `type:"timestamp" timestampFormat:"iso8601" required:"true"`
|
||||
Expiration *time.Time `type:"timestamp" required:"true"`
|
||||
|
||||
// The secret access key that can be used to sign requests.
|
||||
//
|
||||
|
6
vendor/github.com/aws/aws-sdk-go/service/sts/service.go
generated
vendored
6
vendor/github.com/aws/aws-sdk-go/service/sts/service.go
generated
vendored
@ -29,9 +29,9 @@ var initRequest func(*request.Request)
|
||||
|
||||
// Service information constants
|
||||
const (
|
||||
ServiceName = "sts" // Service endpoint prefix API calls made to.
|
||||
EndpointsID = ServiceName // Service ID for Regions and Endpoints metadata.
|
||||
ServiceID = "STS" // ServiceID is a unique identifer of a specific service
|
||||
ServiceName = "sts" // Name of service.
|
||||
EndpointsID = ServiceName // ID to lookup a service endpoint with.
|
||||
ServiceID = "STS" // ServiceID is a unique identifer of a specific service.
|
||||
)
|
||||
|
||||
// New creates a new instance of the STS client with a session.
|
||||
|
2
vendor/github.com/davecgh/go-spew/LICENSE
generated
vendored
2
vendor/github.com/davecgh/go-spew/LICENSE
generated
vendored
@ -2,7 +2,7 @@ ISC License
|
||||
|
||||
Copyright (c) 2012-2016 Dave Collins <dave@davec.name>
|
||||
|
||||
Permission to use, copy, modify, and distribute this software for any
|
||||
Permission to use, copy, modify, and/or distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
|
183
vendor/github.com/davecgh/go-spew/spew/bypass.go
generated
vendored
183
vendor/github.com/davecgh/go-spew/spew/bypass.go
generated
vendored
@ -16,7 +16,9 @@
|
||||
// when the code is not running on Google App Engine, compiled by GopherJS, and
|
||||
// "-tags safe" is not added to the go build command line. The "disableunsafe"
|
||||
// tag is deprecated and thus should not be used.
|
||||
// +build !js,!appengine,!safe,!disableunsafe
|
||||
// Go versions prior to 1.4 are disabled because they use a different layout
|
||||
// for interfaces which make the implementation of unsafeReflectValue more complex.
|
||||
// +build !js,!appengine,!safe,!disableunsafe,go1.4
|
||||
|
||||
package spew
|
||||
|
||||
@ -34,80 +36,49 @@ const (
|
||||
ptrSize = unsafe.Sizeof((*byte)(nil))
|
||||
)
|
||||
|
||||
var (
|
||||
// offsetPtr, offsetScalar, and offsetFlag are the offsets for the
|
||||
// internal reflect.Value fields. These values are valid before golang
|
||||
// commit ecccf07e7f9d which changed the format. The are also valid
|
||||
// after commit 82f48826c6c7 which changed the format again to mirror
|
||||
// the original format. Code in the init function updates these offsets
|
||||
// as necessary.
|
||||
offsetPtr = uintptr(ptrSize)
|
||||
offsetScalar = uintptr(0)
|
||||
offsetFlag = uintptr(ptrSize * 2)
|
||||
type flag uintptr
|
||||
|
||||
// flagKindWidth and flagKindShift indicate various bits that the
|
||||
// reflect package uses internally to track kind information.
|
||||
//
|
||||
// flagRO indicates whether or not the value field of a reflect.Value is
|
||||
// read-only.
|
||||
//
|
||||
// flagIndir indicates whether the value field of a reflect.Value is
|
||||
// the actual data or a pointer to the data.
|
||||
//
|
||||
// These values are valid before golang commit 90a7c3c86944 which
|
||||
// changed their positions. Code in the init function updates these
|
||||
// flags as necessary.
|
||||
flagKindWidth = uintptr(5)
|
||||
flagKindShift = uintptr(flagKindWidth - 1)
|
||||
flagRO = uintptr(1 << 0)
|
||||
flagIndir = uintptr(1 << 1)
|
||||
var (
|
||||
// flagRO indicates whether the value field of a reflect.Value
|
||||
// is read-only.
|
||||
flagRO flag
|
||||
|
||||
// flagAddr indicates whether the address of the reflect.Value's
|
||||
// value may be taken.
|
||||
flagAddr flag
|
||||
)
|
||||
|
||||
func init() {
|
||||
// Older versions of reflect.Value stored small integers directly in the
|
||||
// ptr field (which is named val in the older versions). Versions
|
||||
// between commits ecccf07e7f9d and 82f48826c6c7 added a new field named
|
||||
// scalar for this purpose which unfortunately came before the flag
|
||||
// field, so the offset of the flag field is different for those
|
||||
// versions.
|
||||
//
|
||||
// This code constructs a new reflect.Value from a known small integer
|
||||
// and checks if the size of the reflect.Value struct indicates it has
|
||||
// the scalar field. When it does, the offsets are updated accordingly.
|
||||
vv := reflect.ValueOf(0xf00)
|
||||
if unsafe.Sizeof(vv) == (ptrSize * 4) {
|
||||
offsetScalar = ptrSize * 2
|
||||
offsetFlag = ptrSize * 3
|
||||
}
|
||||
// flagKindMask holds the bits that make up the kind
|
||||
// part of the flags field. In all the supported versions,
|
||||
// it is in the lower 5 bits.
|
||||
const flagKindMask = flag(0x1f)
|
||||
|
||||
// Commit 90a7c3c86944 changed the flag positions such that the low
|
||||
// order bits are the kind. This code extracts the kind from the flags
|
||||
// field and ensures it's the correct type. When it's not, the flag
|
||||
// order has been changed to the newer format, so the flags are updated
|
||||
// accordingly.
|
||||
upf := unsafe.Pointer(uintptr(unsafe.Pointer(&vv)) + offsetFlag)
|
||||
upfv := *(*uintptr)(upf)
|
||||
flagKindMask := uintptr((1<<flagKindWidth - 1) << flagKindShift)
|
||||
if (upfv&flagKindMask)>>flagKindShift != uintptr(reflect.Int) {
|
||||
flagKindShift = 0
|
||||
flagRO = 1 << 5
|
||||
flagIndir = 1 << 6
|
||||
// Different versions of Go have used different
|
||||
// bit layouts for the flags type. This table
|
||||
// records the known combinations.
|
||||
var okFlags = []struct {
|
||||
ro, addr flag
|
||||
}{{
|
||||
// From Go 1.4 to 1.5
|
||||
ro: 1 << 5,
|
||||
addr: 1 << 7,
|
||||
}, {
|
||||
// Up to Go tip.
|
||||
ro: 1<<5 | 1<<6,
|
||||
addr: 1 << 8,
|
||||
}}
|
||||
|
||||
// Commit adf9b30e5594 modified the flags to separate the
|
||||
// flagRO flag into two bits which specifies whether or not the
|
||||
// field is embedded. This causes flagIndir to move over a bit
|
||||
// and means that flagRO is the combination of either of the
|
||||
// original flagRO bit and the new bit.
|
||||
//
|
||||
// This code detects the change by extracting what used to be
|
||||
// the indirect bit to ensure it's set. When it's not, the flag
|
||||
// order has been changed to the newer format, so the flags are
|
||||
// updated accordingly.
|
||||
if upfv&flagIndir == 0 {
|
||||
flagRO = 3 << 5
|
||||
flagIndir = 1 << 7
|
||||
}
|
||||
var flagValOffset = func() uintptr {
|
||||
field, ok := reflect.TypeOf(reflect.Value{}).FieldByName("flag")
|
||||
if !ok {
|
||||
panic("reflect.Value has no flag field")
|
||||
}
|
||||
return field.Offset
|
||||
}()
|
||||
|
||||
// flagField returns a pointer to the flag field of a reflect.Value.
|
||||
func flagField(v *reflect.Value) *flag {
|
||||
return (*flag)(unsafe.Pointer(uintptr(unsafe.Pointer(v)) + flagValOffset))
|
||||
}
|
||||
|
||||
// unsafeReflectValue converts the passed reflect.Value into a one that bypasses
|
||||
@ -119,34 +90,56 @@ func init() {
|
||||
// This allows us to check for implementations of the Stringer and error
|
||||
// interfaces to be used for pretty printing ordinarily unaddressable and
|
||||
// inaccessible values such as unexported struct fields.
|
||||
func unsafeReflectValue(v reflect.Value) (rv reflect.Value) {
|
||||
indirects := 1
|
||||
vt := v.Type()
|
||||
upv := unsafe.Pointer(uintptr(unsafe.Pointer(&v)) + offsetPtr)
|
||||
rvf := *(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&v)) + offsetFlag))
|
||||
if rvf&flagIndir != 0 {
|
||||
vt = reflect.PtrTo(v.Type())
|
||||
indirects++
|
||||
} else if offsetScalar != 0 {
|
||||
// The value is in the scalar field when it's not one of the
|
||||
// reference types.
|
||||
switch vt.Kind() {
|
||||
case reflect.Uintptr:
|
||||
case reflect.Chan:
|
||||
case reflect.Func:
|
||||
case reflect.Map:
|
||||
case reflect.Ptr:
|
||||
case reflect.UnsafePointer:
|
||||
default:
|
||||
upv = unsafe.Pointer(uintptr(unsafe.Pointer(&v)) +
|
||||
offsetScalar)
|
||||
func unsafeReflectValue(v reflect.Value) reflect.Value {
|
||||
if !v.IsValid() || (v.CanInterface() && v.CanAddr()) {
|
||||
return v
|
||||
}
|
||||
flagFieldPtr := flagField(&v)
|
||||
*flagFieldPtr &^= flagRO
|
||||
*flagFieldPtr |= flagAddr
|
||||
return v
|
||||
}
|
||||
|
||||
pv := reflect.NewAt(vt, upv)
|
||||
rv = pv
|
||||
for i := 0; i < indirects; i++ {
|
||||
rv = rv.Elem()
|
||||
// Sanity checks against future reflect package changes
|
||||
// to the type or semantics of the Value.flag field.
|
||||
func init() {
|
||||
field, ok := reflect.TypeOf(reflect.Value{}).FieldByName("flag")
|
||||
if !ok {
|
||||
panic("reflect.Value has no flag field")
|
||||
}
|
||||
return rv
|
||||
if field.Type.Kind() != reflect.TypeOf(flag(0)).Kind() {
|
||||
panic("reflect.Value flag field has changed kind")
|
||||
}
|
||||
type t0 int
|
||||
var t struct {
|
||||
A t0
|
||||
// t0 will have flagEmbedRO set.
|
||||
t0
|
||||
// a will have flagStickyRO set
|
||||
a t0
|
||||
}
|
||||
vA := reflect.ValueOf(t).FieldByName("A")
|
||||
va := reflect.ValueOf(t).FieldByName("a")
|
||||
vt0 := reflect.ValueOf(t).FieldByName("t0")
|
||||
|
||||
// Infer flagRO from the difference between the flags
|
||||
// for the (otherwise identical) fields in t.
|
||||
flagPublic := *flagField(&vA)
|
||||
flagWithRO := *flagField(&va) | *flagField(&vt0)
|
||||
flagRO = flagPublic ^ flagWithRO
|
||||
|
||||
// Infer flagAddr from the difference between a value
|
||||
// taken from a pointer and not.
|
||||
vPtrA := reflect.ValueOf(&t).Elem().FieldByName("A")
|
||||
flagNoPtr := *flagField(&vA)
|
||||
flagPtr := *flagField(&vPtrA)
|
||||
flagAddr = flagNoPtr ^ flagPtr
|
||||
|
||||
// Check that the inferred flags tally with one of the known versions.
|
||||
for _, f := range okFlags {
|
||||
if flagRO == f.ro && flagAddr == f.addr {
|
||||
return
|
||||
}
|
||||
}
|
||||
panic("reflect.Value read-only flag has changed semantics")
|
||||
}
|
||||
|
2
vendor/github.com/davecgh/go-spew/spew/bypasssafe.go
generated
vendored
2
vendor/github.com/davecgh/go-spew/spew/bypasssafe.go
generated
vendored
@ -16,7 +16,7 @@
|
||||
// when the code is running on Google App Engine, compiled by GopherJS, or
|
||||
// "-tags safe" is added to the go build command line. The "disableunsafe"
|
||||
// tag is deprecated and thus should not be used.
|
||||
// +build js appengine safe disableunsafe
|
||||
// +build js appengine safe disableunsafe !go1.4
|
||||
|
||||
package spew
|
||||
|
||||
|
2
vendor/github.com/davecgh/go-spew/spew/common.go
generated
vendored
2
vendor/github.com/davecgh/go-spew/spew/common.go
generated
vendored
@ -180,7 +180,7 @@ func printComplex(w io.Writer, c complex128, floatPrecision int) {
|
||||
w.Write(closeParenBytes)
|
||||
}
|
||||
|
||||
// printHexPtr outputs a uintptr formatted as hexidecimal with a leading '0x'
|
||||
// printHexPtr outputs a uintptr formatted as hexadecimal with a leading '0x'
|
||||
// prefix to Writer w.
|
||||
func printHexPtr(w io.Writer, p uintptr) {
|
||||
// Null pointer.
|
||||
|
10
vendor/github.com/davecgh/go-spew/spew/dump.go
generated
vendored
10
vendor/github.com/davecgh/go-spew/spew/dump.go
generated
vendored
@ -35,16 +35,16 @@ var (
|
||||
|
||||
// cCharRE is a regular expression that matches a cgo char.
|
||||
// It is used to detect character arrays to hexdump them.
|
||||
cCharRE = regexp.MustCompile("^.*\\._Ctype_char$")
|
||||
cCharRE = regexp.MustCompile(`^.*\._Ctype_char$`)
|
||||
|
||||
// cUnsignedCharRE is a regular expression that matches a cgo unsigned
|
||||
// char. It is used to detect unsigned character arrays to hexdump
|
||||
// them.
|
||||
cUnsignedCharRE = regexp.MustCompile("^.*\\._Ctype_unsignedchar$")
|
||||
cUnsignedCharRE = regexp.MustCompile(`^.*\._Ctype_unsignedchar$`)
|
||||
|
||||
// cUint8tCharRE is a regular expression that matches a cgo uint8_t.
|
||||
// It is used to detect uint8_t arrays to hexdump them.
|
||||
cUint8tCharRE = regexp.MustCompile("^.*\\._Ctype_uint8_t$")
|
||||
cUint8tCharRE = regexp.MustCompile(`^.*\._Ctype_uint8_t$`)
|
||||
)
|
||||
|
||||
// dumpState contains information about the state of a dump operation.
|
||||
@ -143,10 +143,10 @@ func (d *dumpState) dumpPtr(v reflect.Value) {
|
||||
// Display dereferenced value.
|
||||
d.w.Write(openParenBytes)
|
||||
switch {
|
||||
case nilFound == true:
|
||||
case nilFound:
|
||||
d.w.Write(nilAngleBytes)
|
||||
|
||||
case cycleFound == true:
|
||||
case cycleFound:
|
||||
d.w.Write(circularBytes)
|
||||
|
||||
default:
|
||||
|
4
vendor/github.com/davecgh/go-spew/spew/format.go
generated
vendored
4
vendor/github.com/davecgh/go-spew/spew/format.go
generated
vendored
@ -182,10 +182,10 @@ func (f *formatState) formatPtr(v reflect.Value) {
|
||||
|
||||
// Display dereferenced value.
|
||||
switch {
|
||||
case nilFound == true:
|
||||
case nilFound:
|
||||
f.fs.Write(nilAngleBytes)
|
||||
|
||||
case cycleFound == true:
|
||||
case cycleFound:
|
||||
f.fs.Write(circularShortBytes)
|
||||
|
||||
default:
|
||||
|
1
vendor/github.com/go-ini/ini/.travis.yml
generated
vendored
1
vendor/github.com/go-ini/ini/.travis.yml
generated
vendored
@ -1,7 +1,6 @@
|
||||
sudo: false
|
||||
language: go
|
||||
go:
|
||||
- 1.5.x
|
||||
- 1.6.x
|
||||
- 1.7.x
|
||||
- 1.8.x
|
||||
|
2
vendor/github.com/go-ini/ini/README.md
generated
vendored
2
vendor/github.com/go-ini/ini/README.md
generated
vendored
@ -1,4 +1,4 @@
|
||||
INI [![Build Status](https://travis-ci.org/go-ini/ini.svg?branch=master)](https://travis-ci.org/go-ini/ini) [![Sourcegraph](https://sourcegraph.com/github.com/go-ini/ini/-/badge.svg)](https://sourcegraph.com/github.com/go-ini/ini?badge)
|
||||
INI [![Build Status](https://travis-ci.org/go-ini/ini.svg?branch=master)](https://travis-ci.org/go-ini/ini) [![Sourcegraph](https://img.shields.io/badge/view%20on-Sourcegraph-brightgreen.svg)](https://sourcegraph.com/github.com/go-ini/ini)
|
||||
===
|
||||
|
||||
![](https://avatars0.githubusercontent.com/u/10216035?v=3&s=200)
|
||||
|
29
vendor/github.com/go-ini/ini/file.go
generated
vendored
29
vendor/github.com/go-ini/ini/file.go
generated
vendored
@ -237,15 +237,20 @@ func (f *File) writeToBuffer(indent string) (*bytes.Buffer, error) {
|
||||
for i, sname := range f.sectionList {
|
||||
sec := f.Section(sname)
|
||||
if len(sec.Comment) > 0 {
|
||||
if sec.Comment[0] != '#' && sec.Comment[0] != ';' {
|
||||
sec.Comment = "; " + sec.Comment
|
||||
// Support multiline comments
|
||||
lines := strings.Split(sec.Comment, LineBreak)
|
||||
for i := range lines {
|
||||
if lines[i][0] != '#' && lines[i][0] != ';' {
|
||||
lines[i] = "; " + lines[i]
|
||||
} else {
|
||||
sec.Comment = sec.Comment[:1] + " " + strings.TrimSpace(sec.Comment[1:])
|
||||
lines[i] = lines[i][:1] + " " + strings.TrimSpace(lines[i][1:])
|
||||
}
|
||||
if _, err := buf.WriteString(sec.Comment + LineBreak); err != nil {
|
||||
|
||||
if _, err := buf.WriteString(lines[i] + LineBreak); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if i > 0 || DefaultHeader {
|
||||
if _, err := buf.WriteString("[" + sname + "]" + LineBreak); err != nil {
|
||||
@ -300,19 +305,21 @@ func (f *File) writeToBuffer(indent string) (*bytes.Buffer, error) {
|
||||
if len(indent) > 0 && sname != DEFAULT_SECTION {
|
||||
buf.WriteString(indent)
|
||||
}
|
||||
if key.Comment[0] != '#' && key.Comment[0] != ';' {
|
||||
key.Comment = "; " + key.Comment
|
||||
} else {
|
||||
key.Comment = key.Comment[:1] + " " + strings.TrimSpace(key.Comment[1:])
|
||||
}
|
||||
|
||||
// Support multiline comments
|
||||
key.Comment = strings.Replace(key.Comment, "\n", "\n; ", -1)
|
||||
lines := strings.Split(key.Comment, LineBreak)
|
||||
for i := range lines {
|
||||
if lines[i][0] != '#' && lines[i][0] != ';' {
|
||||
lines[i] = "; " + lines[i]
|
||||
} else {
|
||||
lines[i] = lines[i][:1] + " " + strings.TrimSpace(lines[i][1:])
|
||||
}
|
||||
|
||||
if _, err := buf.WriteString(key.Comment + LineBreak); err != nil {
|
||||
if _, err := buf.WriteString(lines[i] + LineBreak); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if len(indent) > 0 && sname != DEFAULT_SECTION {
|
||||
buf.WriteString(indent)
|
||||
|
10
vendor/github.com/go-ini/ini/ini.go
generated
vendored
10
vendor/github.com/go-ini/ini/ini.go
generated
vendored
@ -1,3 +1,5 @@
|
||||
// +build go1.6
|
||||
|
||||
// Copyright 2014 Unknwon
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License"): you may
|
||||
@ -32,7 +34,7 @@ const (
|
||||
|
||||
// Maximum allowed depth when recursively substituing variable names.
|
||||
_DEPTH_VALUES = 99
|
||||
_VERSION = "1.37.0"
|
||||
_VERSION = "1.38.2"
|
||||
)
|
||||
|
||||
// Version returns current package version literal.
|
||||
@ -132,6 +134,8 @@ type LoadOptions struct {
|
||||
IgnoreContinuation bool
|
||||
// IgnoreInlineComment indicates whether to ignore comments at the end of value and treat it as part of value.
|
||||
IgnoreInlineComment bool
|
||||
// SkipUnrecognizableLines indicates whether to skip unrecognizable lines that do not conform to key/value pairs.
|
||||
SkipUnrecognizableLines bool
|
||||
// AllowBooleanKeys indicates whether to allow boolean type keys or treat as value is missing.
|
||||
// This type of keys are mostly used in my.cnf.
|
||||
AllowBooleanKeys bool
|
||||
@ -157,7 +161,7 @@ type LoadOptions struct {
|
||||
// when value is NOT surrounded by any quotes.
|
||||
// Note: UNSTABLE, behavior might change to only unescape inside double quotes but may noy necessary at all.
|
||||
UnescapeValueCommentSymbols bool
|
||||
// Some INI formats allow group blocks that store a block of raw content that doesn't otherwise
|
||||
// UnparseableSections stores a list of blocks that are allowed with raw content which do not otherwise
|
||||
// conform to key/value pairs. Specify the names of those blocks here.
|
||||
UnparseableSections []string
|
||||
}
|
||||
@ -200,7 +204,7 @@ func InsensitiveLoad(source interface{}, others ...interface{}) (*File, error) {
|
||||
return LoadSources(LoadOptions{Insensitive: true}, source, others...)
|
||||
}
|
||||
|
||||
// InsensitiveLoad has exactly same functionality as Load function
|
||||
// ShadowLoad has exactly same functionality as Load function
|
||||
// except it allows have shadow keys.
|
||||
func ShadowLoad(source interface{}, others ...interface{}) (*File, error) {
|
||||
return LoadSources(LoadOptions{AllowShadows: true}, source, others...)
|
||||
|
14
vendor/github.com/go-ini/ini/parser.go
generated
vendored
14
vendor/github.com/go-ini/ini/parser.go
generated
vendored
@ -339,8 +339,7 @@ func (f *File) parse(reader io.Reader) (err error) {
|
||||
|
||||
// NOTE: Iterate and increase `currentPeekSize` until
|
||||
// the size of the parser buffer is found.
|
||||
// TODO: When Golang 1.10 is the lowest version supported,
|
||||
// replace with `parserBufferSize := p.buf.Size()`.
|
||||
// TODO(unknwon): When Golang 1.10 is the lowest version supported, replace with `parserBufferSize := p.buf.Size()`.
|
||||
parserBufferSize := 0
|
||||
// NOTE: Peek 1kb at a time.
|
||||
currentPeekSize := 1024
|
||||
@ -390,8 +389,7 @@ func (f *File) parse(reader io.Reader) (err error) {
|
||||
// Section
|
||||
if line[0] == '[' {
|
||||
// Read to the next ']' (TODO: support quoted strings)
|
||||
// TODO(unknwon): use LastIndexByte when stop supporting Go1.4
|
||||
closeIdx := bytes.LastIndex(line, []byte("]"))
|
||||
closeIdx := bytes.LastIndexByte(line, ']')
|
||||
if closeIdx == -1 {
|
||||
return fmt.Errorf("unclosed section: %s", line)
|
||||
}
|
||||
@ -433,7 +431,9 @@ func (f *File) parse(reader io.Reader) (err error) {
|
||||
kname, offset, err := readKeyName(line)
|
||||
if err != nil {
|
||||
// Treat as boolean key when desired, and whole line is key name.
|
||||
if IsErrDelimiterNotFound(err) && f.options.AllowBooleanKeys {
|
||||
if IsErrDelimiterNotFound(err) {
|
||||
switch {
|
||||
case f.options.AllowBooleanKeys:
|
||||
kname, err := p.readValue(line,
|
||||
parserBufferSize,
|
||||
f.options.IgnoreContinuation,
|
||||
@ -452,6 +452,10 @@ func (f *File) parse(reader io.Reader) (err error) {
|
||||
key.Comment = strings.TrimSpace(p.comment.String())
|
||||
p.comment.Reset()
|
||||
continue
|
||||
|
||||
case f.options.SkipUnrecognizableLines:
|
||||
continue
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
1
vendor/github.com/go-ini/ini/section.go
generated
vendored
1
vendor/github.com/go-ini/ini/section.go
generated
vendored
@ -82,6 +82,7 @@ func (s *Section) NewKey(name, val string) (*Key, error) {
|
||||
}
|
||||
} else {
|
||||
s.keys[name].value = val
|
||||
s.keysHash[name] = val
|
||||
}
|
||||
return s.keys[name], nil
|
||||
}
|
||||
|
3
vendor/github.com/golang/protobuf/LICENSE
generated
vendored
3
vendor/github.com/golang/protobuf/LICENSE
generated
vendored
@ -1,7 +1,4 @@
|
||||
Go support for Protocol Buffers - Google's data interchange format
|
||||
|
||||
Copyright 2010 The Go Authors. All rights reserved.
|
||||
https://github.com/golang/protobuf
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
|
18
vendor/github.com/golang/protobuf/proto/encode.go
generated
vendored
18
vendor/github.com/golang/protobuf/proto/encode.go
generated
vendored
@ -37,27 +37,9 @@ package proto
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"reflect"
|
||||
)
|
||||
|
||||
// RequiredNotSetError is the error returned if Marshal is called with
|
||||
// a protocol buffer struct whose required fields have not
|
||||
// all been initialized. It is also the error returned if Unmarshal is
|
||||
// called with an encoded protocol buffer that does not include all the
|
||||
// required fields.
|
||||
//
|
||||
// When printed, RequiredNotSetError reports the first unset required field in a
|
||||
// message. If the field cannot be precisely determined, it is reported as
|
||||
// "{Unknown}".
|
||||
type RequiredNotSetError struct {
|
||||
field string
|
||||
}
|
||||
|
||||
func (e *RequiredNotSetError) Error() string {
|
||||
return fmt.Sprintf("proto: required field %q not set", e.field)
|
||||
}
|
||||
|
||||
var (
|
||||
// errRepeatedHasNil is the error returned if Marshal is called with
|
||||
// a struct with a repeated field containing a nil element.
|
||||
|
62
vendor/github.com/golang/protobuf/proto/lib.go
generated
vendored
62
vendor/github.com/golang/protobuf/proto/lib.go
generated
vendored
@ -265,7 +265,6 @@ package proto
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"log"
|
||||
"reflect"
|
||||
@ -274,7 +273,66 @@ import (
|
||||
"sync"
|
||||
)
|
||||
|
||||
var errInvalidUTF8 = errors.New("proto: invalid UTF-8 string")
|
||||
// RequiredNotSetError is an error type returned by either Marshal or Unmarshal.
|
||||
// Marshal reports this when a required field is not initialized.
|
||||
// Unmarshal reports this when a required field is missing from the wire data.
|
||||
type RequiredNotSetError struct{ field string }
|
||||
|
||||
func (e *RequiredNotSetError) Error() string {
|
||||
if e.field == "" {
|
||||
return fmt.Sprintf("proto: required field not set")
|
||||
}
|
||||
return fmt.Sprintf("proto: required field %q not set", e.field)
|
||||
}
|
||||
func (e *RequiredNotSetError) RequiredNotSet() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
type invalidUTF8Error struct{ field string }
|
||||
|
||||
func (e *invalidUTF8Error) Error() string {
|
||||
if e.field == "" {
|
||||
return "proto: invalid UTF-8 detected"
|
||||
}
|
||||
return fmt.Sprintf("proto: field %q contains invalid UTF-8", e.field)
|
||||
}
|
||||
func (e *invalidUTF8Error) InvalidUTF8() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// errInvalidUTF8 is a sentinel error to identify fields with invalid UTF-8.
|
||||
// This error should not be exposed to the external API as such errors should
|
||||
// be recreated with the field information.
|
||||
var errInvalidUTF8 = &invalidUTF8Error{}
|
||||
|
||||
// isNonFatal reports whether the error is either a RequiredNotSet error
|
||||
// or a InvalidUTF8 error.
|
||||
func isNonFatal(err error) bool {
|
||||
if re, ok := err.(interface{ RequiredNotSet() bool }); ok && re.RequiredNotSet() {
|
||||
return true
|
||||
}
|
||||
if re, ok := err.(interface{ InvalidUTF8() bool }); ok && re.InvalidUTF8() {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
type nonFatal struct{ E error }
|
||||
|
||||
// Merge merges err into nf and reports whether it was successful.
|
||||
// Otherwise it returns false for any fatal non-nil errors.
|
||||
func (nf *nonFatal) Merge(err error) (ok bool) {
|
||||
if err == nil {
|
||||
return true // not an error
|
||||
}
|
||||
if !isNonFatal(err) {
|
||||
return false // fatal error
|
||||
}
|
||||
if nf.E == nil {
|
||||
nf.E = err // store first instance of non-fatal error
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// Message is implemented by generated protocol buffer messages.
|
||||
type Message interface {
|
||||
|
14
vendor/github.com/golang/protobuf/proto/properties.go
generated
vendored
14
vendor/github.com/golang/protobuf/proto/properties.go
generated
vendored
@ -139,7 +139,7 @@ type Properties struct {
|
||||
Repeated bool
|
||||
Packed bool // relevant for repeated primitives only
|
||||
Enum string // set for enum types only
|
||||
proto3 bool // whether this is known to be a proto3 field; set for []byte only
|
||||
proto3 bool // whether this is known to be a proto3 field
|
||||
oneof bool // whether this is a oneof field
|
||||
|
||||
Default string // default value
|
||||
@ -149,8 +149,8 @@ type Properties struct {
|
||||
sprop *StructProperties // set for struct types only
|
||||
|
||||
mtype reflect.Type // set for map types only
|
||||
mkeyprop *Properties // set for map types only
|
||||
mvalprop *Properties // set for map types only
|
||||
MapKeyProp *Properties // set for map types only
|
||||
MapValProp *Properties // set for map types only
|
||||
}
|
||||
|
||||
// String formats the properties in the protobuf struct field tag style.
|
||||
@ -275,16 +275,16 @@ func (p *Properties) setFieldProps(typ reflect.Type, f *reflect.StructField, loc
|
||||
|
||||
case reflect.Map:
|
||||
p.mtype = t1
|
||||
p.mkeyprop = &Properties{}
|
||||
p.mkeyprop.init(reflect.PtrTo(p.mtype.Key()), "Key", f.Tag.Get("protobuf_key"), nil, lockGetProp)
|
||||
p.mvalprop = &Properties{}
|
||||
p.MapKeyProp = &Properties{}
|
||||
p.MapKeyProp.init(reflect.PtrTo(p.mtype.Key()), "Key", f.Tag.Get("protobuf_key"), nil, lockGetProp)
|
||||
p.MapValProp = &Properties{}
|
||||
vtype := p.mtype.Elem()
|
||||
if vtype.Kind() != reflect.Ptr && vtype.Kind() != reflect.Slice {
|
||||
// The value type is not a message (*T) or bytes ([]byte),
|
||||
// so we need encoders for the pointer to this type.
|
||||
vtype = reflect.PtrTo(vtype)
|
||||
}
|
||||
p.mvalprop.init(vtype, "Value", f.Tag.Get("protobuf_val"), nil, lockGetProp)
|
||||
p.MapValProp.init(vtype, "Value", f.Tag.Get("protobuf_val"), nil, lockGetProp)
|
||||
}
|
||||
|
||||
if p.stype != nil {
|
||||
|
190
vendor/github.com/golang/protobuf/proto/table_marshal.go
generated
vendored
190
vendor/github.com/golang/protobuf/proto/table_marshal.go
generated
vendored
@ -231,7 +231,7 @@ func (u *marshalInfo) marshal(b []byte, ptr pointer, deterministic bool) ([]byte
|
||||
return b, err
|
||||
}
|
||||
|
||||
var err, errreq error
|
||||
var err, errLater error
|
||||
// The old marshaler encodes extensions at beginning.
|
||||
if u.extensions.IsValid() {
|
||||
e := ptr.offset(u.extensions).toExtensions()
|
||||
@ -252,11 +252,13 @@ func (u *marshalInfo) marshal(b []byte, ptr pointer, deterministic bool) ([]byte
|
||||
}
|
||||
}
|
||||
for _, f := range u.fields {
|
||||
if f.required && errreq == nil {
|
||||
if f.required {
|
||||
if ptr.offset(f.field).getPointer().isNil() {
|
||||
// Required field is not set.
|
||||
// We record the error but keep going, to give a complete marshaling.
|
||||
errreq = &RequiredNotSetError{f.name}
|
||||
if errLater == nil {
|
||||
errLater = &RequiredNotSetError{f.name}
|
||||
}
|
||||
continue
|
||||
}
|
||||
}
|
||||
@ -269,14 +271,21 @@ func (u *marshalInfo) marshal(b []byte, ptr pointer, deterministic bool) ([]byte
|
||||
if err1, ok := err.(*RequiredNotSetError); ok {
|
||||
// Required field in submessage is not set.
|
||||
// We record the error but keep going, to give a complete marshaling.
|
||||
if errreq == nil {
|
||||
errreq = &RequiredNotSetError{f.name + "." + err1.field}
|
||||
if errLater == nil {
|
||||
errLater = &RequiredNotSetError{f.name + "." + err1.field}
|
||||
}
|
||||
continue
|
||||
}
|
||||
if err == errRepeatedHasNil {
|
||||
err = errors.New("proto: repeated field " + f.name + " has nil element")
|
||||
}
|
||||
if err == errInvalidUTF8 {
|
||||
if errLater == nil {
|
||||
fullName := revProtoTypes[reflect.PtrTo(u.typ)] + "." + f.name
|
||||
errLater = &invalidUTF8Error{fullName}
|
||||
}
|
||||
continue
|
||||
}
|
||||
return b, err
|
||||
}
|
||||
}
|
||||
@ -284,7 +293,7 @@ func (u *marshalInfo) marshal(b []byte, ptr pointer, deterministic bool) ([]byte
|
||||
s := *ptr.offset(u.unrecognized).toBytes()
|
||||
b = append(b, s...)
|
||||
}
|
||||
return b, errreq
|
||||
return b, errLater
|
||||
}
|
||||
|
||||
// computeMarshalInfo initializes the marshal info.
|
||||
@ -530,6 +539,7 @@ func typeMarshaler(t reflect.Type, tags []string, nozero, oneof bool) (sizer, ma
|
||||
|
||||
packed := false
|
||||
proto3 := false
|
||||
validateUTF8 := true
|
||||
for i := 2; i < len(tags); i++ {
|
||||
if tags[i] == "packed" {
|
||||
packed = true
|
||||
@ -538,6 +548,7 @@ func typeMarshaler(t reflect.Type, tags []string, nozero, oneof bool) (sizer, ma
|
||||
proto3 = true
|
||||
}
|
||||
}
|
||||
validateUTF8 = validateUTF8 && proto3
|
||||
|
||||
switch t.Kind() {
|
||||
case reflect.Bool:
|
||||
@ -735,6 +746,18 @@ func typeMarshaler(t reflect.Type, tags []string, nozero, oneof bool) (sizer, ma
|
||||
}
|
||||
return sizeFloat64Value, appendFloat64Value
|
||||
case reflect.String:
|
||||
if validateUTF8 {
|
||||
if pointer {
|
||||
return sizeStringPtr, appendUTF8StringPtr
|
||||
}
|
||||
if slice {
|
||||
return sizeStringSlice, appendUTF8StringSlice
|
||||
}
|
||||
if nozero {
|
||||
return sizeStringValueNoZero, appendUTF8StringValueNoZero
|
||||
}
|
||||
return sizeStringValue, appendUTF8StringValue
|
||||
}
|
||||
if pointer {
|
||||
return sizeStringPtr, appendStringPtr
|
||||
}
|
||||
@ -1984,9 +2007,6 @@ func appendBoolPackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byt
|
||||
}
|
||||
func appendStringValue(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
|
||||
v := *ptr.toString()
|
||||
if !utf8.ValidString(v) {
|
||||
return nil, errInvalidUTF8
|
||||
}
|
||||
b = appendVarint(b, wiretag)
|
||||
b = appendVarint(b, uint64(len(v)))
|
||||
b = append(b, v...)
|
||||
@ -1997,9 +2017,6 @@ func appendStringValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]b
|
||||
if v == "" {
|
||||
return b, nil
|
||||
}
|
||||
if !utf8.ValidString(v) {
|
||||
return nil, errInvalidUTF8
|
||||
}
|
||||
b = appendVarint(b, wiretag)
|
||||
b = appendVarint(b, uint64(len(v)))
|
||||
b = append(b, v...)
|
||||
@ -2011,9 +2028,6 @@ func appendStringPtr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, err
|
||||
return b, nil
|
||||
}
|
||||
v := *p
|
||||
if !utf8.ValidString(v) {
|
||||
return nil, errInvalidUTF8
|
||||
}
|
||||
b = appendVarint(b, wiretag)
|
||||
b = appendVarint(b, uint64(len(v)))
|
||||
b = append(b, v...)
|
||||
@ -2022,12 +2036,74 @@ func appendStringPtr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, err
|
||||
func appendStringSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
|
||||
s := *ptr.toStringSlice()
|
||||
for _, v := range s {
|
||||
b = appendVarint(b, wiretag)
|
||||
b = appendVarint(b, uint64(len(v)))
|
||||
b = append(b, v...)
|
||||
}
|
||||
return b, nil
|
||||
}
|
||||
func appendUTF8StringValue(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
|
||||
var invalidUTF8 bool
|
||||
v := *ptr.toString()
|
||||
if !utf8.ValidString(v) {
|
||||
return nil, errInvalidUTF8
|
||||
invalidUTF8 = true
|
||||
}
|
||||
b = appendVarint(b, wiretag)
|
||||
b = appendVarint(b, uint64(len(v)))
|
||||
b = append(b, v...)
|
||||
if invalidUTF8 {
|
||||
return b, errInvalidUTF8
|
||||
}
|
||||
return b, nil
|
||||
}
|
||||
func appendUTF8StringValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
|
||||
var invalidUTF8 bool
|
||||
v := *ptr.toString()
|
||||
if v == "" {
|
||||
return b, nil
|
||||
}
|
||||
if !utf8.ValidString(v) {
|
||||
invalidUTF8 = true
|
||||
}
|
||||
b = appendVarint(b, wiretag)
|
||||
b = appendVarint(b, uint64(len(v)))
|
||||
b = append(b, v...)
|
||||
if invalidUTF8 {
|
||||
return b, errInvalidUTF8
|
||||
}
|
||||
return b, nil
|
||||
}
|
||||
func appendUTF8StringPtr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
|
||||
var invalidUTF8 bool
|
||||
p := *ptr.toStringPtr()
|
||||
if p == nil {
|
||||
return b, nil
|
||||
}
|
||||
v := *p
|
||||
if !utf8.ValidString(v) {
|
||||
invalidUTF8 = true
|
||||
}
|
||||
b = appendVarint(b, wiretag)
|
||||
b = appendVarint(b, uint64(len(v)))
|
||||
b = append(b, v...)
|
||||
if invalidUTF8 {
|
||||
return b, errInvalidUTF8
|
||||
}
|
||||
return b, nil
|
||||
}
|
||||
func appendUTF8StringSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
|
||||
var invalidUTF8 bool
|
||||
s := *ptr.toStringSlice()
|
||||
for _, v := range s {
|
||||
if !utf8.ValidString(v) {
|
||||
invalidUTF8 = true
|
||||
}
|
||||
b = appendVarint(b, wiretag)
|
||||
b = appendVarint(b, uint64(len(v)))
|
||||
b = append(b, v...)
|
||||
}
|
||||
if invalidUTF8 {
|
||||
return b, errInvalidUTF8
|
||||
}
|
||||
return b, nil
|
||||
}
|
||||
@ -2107,7 +2183,8 @@ func makeGroupSliceMarshaler(u *marshalInfo) (sizer, marshaler) {
|
||||
},
|
||||
func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
|
||||
s := ptr.getPointerSlice()
|
||||
var err, errreq error
|
||||
var err error
|
||||
var nerr nonFatal
|
||||
for _, v := range s {
|
||||
if v.isNil() {
|
||||
return b, errRepeatedHasNil
|
||||
@ -2115,22 +2192,14 @@ func makeGroupSliceMarshaler(u *marshalInfo) (sizer, marshaler) {
|
||||
b = appendVarint(b, wiretag) // start group
|
||||
b, err = u.marshal(b, v, deterministic)
|
||||
b = appendVarint(b, wiretag+(WireEndGroup-WireStartGroup)) // end group
|
||||
if err != nil {
|
||||
if _, ok := err.(*RequiredNotSetError); ok {
|
||||
// Required field in submessage is not set.
|
||||
// We record the error but keep going, to give a complete marshaling.
|
||||
if errreq == nil {
|
||||
errreq = err
|
||||
}
|
||||
continue
|
||||
}
|
||||
if !nerr.Merge(err) {
|
||||
if err == ErrNil {
|
||||
err = errRepeatedHasNil
|
||||
}
|
||||
return b, err
|
||||
}
|
||||
}
|
||||
return b, errreq
|
||||
return b, nerr.E
|
||||
}
|
||||
}
|
||||
|
||||
@ -2174,7 +2243,8 @@ func makeMessageSliceMarshaler(u *marshalInfo) (sizer, marshaler) {
|
||||
},
|
||||
func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
|
||||
s := ptr.getPointerSlice()
|
||||
var err, errreq error
|
||||
var err error
|
||||
var nerr nonFatal
|
||||
for _, v := range s {
|
||||
if v.isNil() {
|
||||
return b, errRepeatedHasNil
|
||||
@ -2184,22 +2254,14 @@ func makeMessageSliceMarshaler(u *marshalInfo) (sizer, marshaler) {
|
||||
b = appendVarint(b, uint64(siz))
|
||||
b, err = u.marshal(b, v, deterministic)
|
||||
|
||||
if err != nil {
|
||||
if _, ok := err.(*RequiredNotSetError); ok {
|
||||
// Required field in submessage is not set.
|
||||
// We record the error but keep going, to give a complete marshaling.
|
||||
if errreq == nil {
|
||||
errreq = err
|
||||
}
|
||||
continue
|
||||
}
|
||||
if !nerr.Merge(err) {
|
||||
if err == ErrNil {
|
||||
err = errRepeatedHasNil
|
||||
}
|
||||
return b, err
|
||||
}
|
||||
}
|
||||
return b, errreq
|
||||
return b, nerr.E
|
||||
}
|
||||
}
|
||||
|
||||
@ -2223,6 +2285,25 @@ func makeMapMarshaler(f *reflect.StructField) (sizer, marshaler) {
|
||||
// value.
|
||||
// Key cannot be pointer-typed.
|
||||
valIsPtr := valType.Kind() == reflect.Ptr
|
||||
|
||||
// If value is a message with nested maps, calling
|
||||
// valSizer in marshal may be quadratic. We should use
|
||||
// cached version in marshal (but not in size).
|
||||
// If value is not message type, we don't have size cache,
|
||||
// but it cannot be nested either. Just use valSizer.
|
||||
valCachedSizer := valSizer
|
||||
if valIsPtr && valType.Elem().Kind() == reflect.Struct {
|
||||
u := getMarshalInfo(valType.Elem())
|
||||
valCachedSizer = func(ptr pointer, tagsize int) int {
|
||||
// Same as message sizer, but use cache.
|
||||
p := ptr.getPointer()
|
||||
if p.isNil() {
|
||||
return 0
|
||||
}
|
||||
siz := u.cachedsize(p)
|
||||
return siz + SizeVarint(uint64(siz)) + tagsize
|
||||
}
|
||||
}
|
||||
return func(ptr pointer, tagsize int) int {
|
||||
m := ptr.asPointerTo(t).Elem() // the map
|
||||
n := 0
|
||||
@ -2243,24 +2324,26 @@ func makeMapMarshaler(f *reflect.StructField) (sizer, marshaler) {
|
||||
if len(keys) > 1 && deterministic {
|
||||
sort.Sort(mapKeys(keys))
|
||||
}
|
||||
|
||||
var nerr nonFatal
|
||||
for _, k := range keys {
|
||||
ki := k.Interface()
|
||||
vi := m.MapIndex(k).Interface()
|
||||
kaddr := toAddrPointer(&ki, false) // pointer to key
|
||||
vaddr := toAddrPointer(&vi, valIsPtr) // pointer to value
|
||||
b = appendVarint(b, tag)
|
||||
siz := keySizer(kaddr, 1) + valSizer(vaddr, 1) // tag of key = 1 (size=1), tag of val = 2 (size=1)
|
||||
siz := keySizer(kaddr, 1) + valCachedSizer(vaddr, 1) // tag of key = 1 (size=1), tag of val = 2 (size=1)
|
||||
b = appendVarint(b, uint64(siz))
|
||||
b, err = keyMarshaler(b, kaddr, keyWireTag, deterministic)
|
||||
if err != nil {
|
||||
if !nerr.Merge(err) {
|
||||
return b, err
|
||||
}
|
||||
b, err = valMarshaler(b, vaddr, valWireTag, deterministic)
|
||||
if err != nil && err != ErrNil { // allow nil value in map
|
||||
if err != ErrNil && !nerr.Merge(err) { // allow nil value in map
|
||||
return b, err
|
||||
}
|
||||
}
|
||||
return b, nil
|
||||
return b, nerr.E
|
||||
}
|
||||
}
|
||||
|
||||
@ -2333,6 +2416,7 @@ func (u *marshalInfo) appendExtensions(b []byte, ext *XXX_InternalExtensions, de
|
||||
defer mu.Unlock()
|
||||
|
||||
var err error
|
||||
var nerr nonFatal
|
||||
|
||||
// Fast-path for common cases: zero or one extensions.
|
||||
// Don't bother sorting the keys.
|
||||
@ -2352,11 +2436,11 @@ func (u *marshalInfo) appendExtensions(b []byte, ext *XXX_InternalExtensions, de
|
||||
v := e.value
|
||||
p := toAddrPointer(&v, ei.isptr)
|
||||
b, err = ei.marshaler(b, p, ei.wiretag, deterministic)
|
||||
if err != nil {
|
||||
if !nerr.Merge(err) {
|
||||
return b, err
|
||||
}
|
||||
}
|
||||
return b, nil
|
||||
return b, nerr.E
|
||||
}
|
||||
|
||||
// Sort the keys to provide a deterministic encoding.
|
||||
@ -2383,11 +2467,11 @@ func (u *marshalInfo) appendExtensions(b []byte, ext *XXX_InternalExtensions, de
|
||||
v := e.value
|
||||
p := toAddrPointer(&v, ei.isptr)
|
||||
b, err = ei.marshaler(b, p, ei.wiretag, deterministic)
|
||||
if err != nil {
|
||||
if !nerr.Merge(err) {
|
||||
return b, err
|
||||
}
|
||||
}
|
||||
return b, nil
|
||||
return b, nerr.E
|
||||
}
|
||||
|
||||
// message set format is:
|
||||
@ -2444,6 +2528,7 @@ func (u *marshalInfo) appendMessageSet(b []byte, ext *XXX_InternalExtensions, de
|
||||
defer mu.Unlock()
|
||||
|
||||
var err error
|
||||
var nerr nonFatal
|
||||
|
||||
// Fast-path for common cases: zero or one extensions.
|
||||
// Don't bother sorting the keys.
|
||||
@ -2470,12 +2555,12 @@ func (u *marshalInfo) appendMessageSet(b []byte, ext *XXX_InternalExtensions, de
|
||||
v := e.value
|
||||
p := toAddrPointer(&v, ei.isptr)
|
||||
b, err = ei.marshaler(b, p, 3<<3|WireBytes, deterministic)
|
||||
if err != nil {
|
||||
if !nerr.Merge(err) {
|
||||
return b, err
|
||||
}
|
||||
b = append(b, 1<<3|WireEndGroup)
|
||||
}
|
||||
return b, nil
|
||||
return b, nerr.E
|
||||
}
|
||||
|
||||
// Sort the keys to provide a deterministic encoding.
|
||||
@ -2509,11 +2594,11 @@ func (u *marshalInfo) appendMessageSet(b []byte, ext *XXX_InternalExtensions, de
|
||||
p := toAddrPointer(&v, ei.isptr)
|
||||
b, err = ei.marshaler(b, p, 3<<3|WireBytes, deterministic)
|
||||
b = append(b, 1<<3|WireEndGroup)
|
||||
if err != nil {
|
||||
if !nerr.Merge(err) {
|
||||
return b, err
|
||||
}
|
||||
}
|
||||
return b, nil
|
||||
return b, nerr.E
|
||||
}
|
||||
|
||||
// sizeV1Extensions computes the size of encoded data for a V1-API extension field.
|
||||
@ -2556,6 +2641,7 @@ func (u *marshalInfo) appendV1Extensions(b []byte, m map[int32]Extension, determ
|
||||
sort.Ints(keys)
|
||||
|
||||
var err error
|
||||
var nerr nonFatal
|
||||
for _, k := range keys {
|
||||
e := m[int32(k)]
|
||||
if e.value == nil || e.desc == nil {
|
||||
@ -2572,11 +2658,11 @@ func (u *marshalInfo) appendV1Extensions(b []byte, m map[int32]Extension, determ
|
||||
v := e.value
|
||||
p := toAddrPointer(&v, ei.isptr)
|
||||
b, err = ei.marshaler(b, p, ei.wiretag, deterministic)
|
||||
if err != nil {
|
||||
if !nerr.Merge(err) {
|
||||
return b, err
|
||||
}
|
||||
}
|
||||
return b, nil
|
||||
return b, nerr.E
|
||||
}
|
||||
|
||||
// newMarshaler is the interface representing objects that can marshal themselves.
|
||||
|
144
vendor/github.com/golang/protobuf/proto/table_unmarshal.go
generated
vendored
144
vendor/github.com/golang/protobuf/proto/table_unmarshal.go
generated
vendored
@ -97,6 +97,8 @@ type unmarshalFieldInfo struct {
|
||||
|
||||
// if a required field, contains a single set bit at this field's index in the required field list.
|
||||
reqMask uint64
|
||||
|
||||
name string // name of the field, for error reporting
|
||||
}
|
||||
|
||||
var (
|
||||
@ -137,7 +139,7 @@ func (u *unmarshalInfo) unmarshal(m pointer, b []byte) error {
|
||||
return UnmarshalMessageSet(b, m.offset(u.extensions).toExtensions())
|
||||
}
|
||||
var reqMask uint64 // bitmask of required fields we've seen.
|
||||
var rnse *RequiredNotSetError // an instance of a RequiredNotSetError returned by a submessage.
|
||||
var errLater error
|
||||
for len(b) > 0 {
|
||||
// Read tag and wire type.
|
||||
// Special case 1 and 2 byte varints.
|
||||
@ -176,11 +178,20 @@ func (u *unmarshalInfo) unmarshal(m pointer, b []byte) error {
|
||||
if r, ok := err.(*RequiredNotSetError); ok {
|
||||
// Remember this error, but keep parsing. We need to produce
|
||||
// a full parse even if a required field is missing.
|
||||
rnse = r
|
||||
if errLater == nil {
|
||||
errLater = r
|
||||
}
|
||||
reqMask |= f.reqMask
|
||||
continue
|
||||
}
|
||||
if err != errInternalBadWireType {
|
||||
if err == errInvalidUTF8 {
|
||||
if errLater == nil {
|
||||
fullName := revProtoTypes[reflect.PtrTo(u.typ)] + "." + f.name
|
||||
errLater = &invalidUTF8Error{fullName}
|
||||
}
|
||||
continue
|
||||
}
|
||||
return err
|
||||
}
|
||||
// Fragments with bad wire type are treated as unknown fields.
|
||||
@ -239,20 +250,16 @@ func (u *unmarshalInfo) unmarshal(m pointer, b []byte) error {
|
||||
emap[int32(tag)] = e
|
||||
}
|
||||
}
|
||||
if rnse != nil {
|
||||
// A required field of a submessage/group is missing. Return that error.
|
||||
return rnse
|
||||
}
|
||||
if reqMask != u.reqMask {
|
||||
if reqMask != u.reqMask && errLater == nil {
|
||||
// A required field of this message is missing.
|
||||
for _, n := range u.reqFields {
|
||||
if reqMask&1 == 0 {
|
||||
return &RequiredNotSetError{n}
|
||||
errLater = &RequiredNotSetError{n}
|
||||
}
|
||||
reqMask >>= 1
|
||||
}
|
||||
}
|
||||
return nil
|
||||
return errLater
|
||||
}
|
||||
|
||||
// computeUnmarshalInfo fills in u with information for use
|
||||
@ -351,7 +358,7 @@ func (u *unmarshalInfo) computeUnmarshalInfo() {
|
||||
}
|
||||
|
||||
// Store the info in the correct slot in the message.
|
||||
u.setTag(tag, toField(&f), unmarshal, reqMask)
|
||||
u.setTag(tag, toField(&f), unmarshal, reqMask, name)
|
||||
}
|
||||
|
||||
// Find any types associated with oneof fields.
|
||||
@ -366,10 +373,17 @@ func (u *unmarshalInfo) computeUnmarshalInfo() {
|
||||
|
||||
f := typ.Field(0) // oneof implementers have one field
|
||||
baseUnmarshal := fieldUnmarshaler(&f)
|
||||
tagstr := strings.Split(f.Tag.Get("protobuf"), ",")[1]
|
||||
tag, err := strconv.Atoi(tagstr)
|
||||
tags := strings.Split(f.Tag.Get("protobuf"), ",")
|
||||
fieldNum, err := strconv.Atoi(tags[1])
|
||||
if err != nil {
|
||||
panic("protobuf tag field not an integer: " + tagstr)
|
||||
panic("protobuf tag field not an integer: " + tags[1])
|
||||
}
|
||||
var name string
|
||||
for _, tag := range tags {
|
||||
if strings.HasPrefix(tag, "name=") {
|
||||
name = strings.TrimPrefix(tag, "name=")
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// Find the oneof field that this struct implements.
|
||||
@ -380,7 +394,7 @@ func (u *unmarshalInfo) computeUnmarshalInfo() {
|
||||
// That lets us know where this struct should be stored
|
||||
// when we encounter it during unmarshaling.
|
||||
unmarshal := makeUnmarshalOneof(typ, of.ityp, baseUnmarshal)
|
||||
u.setTag(tag, of.field, unmarshal, 0)
|
||||
u.setTag(fieldNum, of.field, unmarshal, 0, name)
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -401,7 +415,7 @@ func (u *unmarshalInfo) computeUnmarshalInfo() {
|
||||
// [0 0] is [tag=0/wiretype=varint varint-encoded-0].
|
||||
u.setTag(0, zeroField, func(b []byte, f pointer, w int) ([]byte, error) {
|
||||
return nil, fmt.Errorf("proto: %s: illegal tag 0 (wire type %d)", t, w)
|
||||
}, 0)
|
||||
}, 0, "")
|
||||
|
||||
// Set mask for required field check.
|
||||
u.reqMask = uint64(1)<<uint(len(u.reqFields)) - 1
|
||||
@ -413,8 +427,9 @@ func (u *unmarshalInfo) computeUnmarshalInfo() {
|
||||
// tag = tag # for field
|
||||
// field/unmarshal = unmarshal info for that field.
|
||||
// reqMask = if required, bitmask for field position in required field list. 0 otherwise.
|
||||
func (u *unmarshalInfo) setTag(tag int, field field, unmarshal unmarshaler, reqMask uint64) {
|
||||
i := unmarshalFieldInfo{field: field, unmarshal: unmarshal, reqMask: reqMask}
|
||||
// name = short name of the field.
|
||||
func (u *unmarshalInfo) setTag(tag int, field field, unmarshal unmarshaler, reqMask uint64, name string) {
|
||||
i := unmarshalFieldInfo{field: field, unmarshal: unmarshal, reqMask: reqMask, name: name}
|
||||
n := u.typ.NumField()
|
||||
if tag >= 0 && (tag < 16 || tag < 2*n) { // TODO: what are the right numbers here?
|
||||
for len(u.dense) <= tag {
|
||||
@ -442,11 +457,17 @@ func typeUnmarshaler(t reflect.Type, tags string) unmarshaler {
|
||||
tagArray := strings.Split(tags, ",")
|
||||
encoding := tagArray[0]
|
||||
name := "unknown"
|
||||
proto3 := false
|
||||
validateUTF8 := true
|
||||
for _, tag := range tagArray[3:] {
|
||||
if strings.HasPrefix(tag, "name=") {
|
||||
name = tag[5:]
|
||||
}
|
||||
if tag == "proto3" {
|
||||
proto3 = true
|
||||
}
|
||||
}
|
||||
validateUTF8 = validateUTF8 && proto3
|
||||
|
||||
// Figure out packaging (pointer, slice, or both)
|
||||
slice := false
|
||||
@ -594,6 +615,15 @@ func typeUnmarshaler(t reflect.Type, tags string) unmarshaler {
|
||||
}
|
||||
return unmarshalBytesValue
|
||||
case reflect.String:
|
||||
if validateUTF8 {
|
||||
if pointer {
|
||||
return unmarshalUTF8StringPtr
|
||||
}
|
||||
if slice {
|
||||
return unmarshalUTF8StringSlice
|
||||
}
|
||||
return unmarshalUTF8StringValue
|
||||
}
|
||||
if pointer {
|
||||
return unmarshalStringPtr
|
||||
}
|
||||
@ -1448,9 +1478,6 @@ func unmarshalStringValue(b []byte, f pointer, w int) ([]byte, error) {
|
||||
return nil, io.ErrUnexpectedEOF
|
||||
}
|
||||
v := string(b[:x])
|
||||
if !utf8.ValidString(v) {
|
||||
return nil, errInvalidUTF8
|
||||
}
|
||||
*f.toString() = v
|
||||
return b[x:], nil
|
||||
}
|
||||
@ -1468,9 +1495,6 @@ func unmarshalStringPtr(b []byte, f pointer, w int) ([]byte, error) {
|
||||
return nil, io.ErrUnexpectedEOF
|
||||
}
|
||||
v := string(b[:x])
|
||||
if !utf8.ValidString(v) {
|
||||
return nil, errInvalidUTF8
|
||||
}
|
||||
*f.toStringPtr() = &v
|
||||
return b[x:], nil
|
||||
}
|
||||
@ -1488,14 +1512,72 @@ func unmarshalStringSlice(b []byte, f pointer, w int) ([]byte, error) {
|
||||
return nil, io.ErrUnexpectedEOF
|
||||
}
|
||||
v := string(b[:x])
|
||||
if !utf8.ValidString(v) {
|
||||
return nil, errInvalidUTF8
|
||||
}
|
||||
s := f.toStringSlice()
|
||||
*s = append(*s, v)
|
||||
return b[x:], nil
|
||||
}
|
||||
|
||||
func unmarshalUTF8StringValue(b []byte, f pointer, w int) ([]byte, error) {
|
||||
if w != WireBytes {
|
||||
return b, errInternalBadWireType
|
||||
}
|
||||
x, n := decodeVarint(b)
|
||||
if n == 0 {
|
||||
return nil, io.ErrUnexpectedEOF
|
||||
}
|
||||
b = b[n:]
|
||||
if x > uint64(len(b)) {
|
||||
return nil, io.ErrUnexpectedEOF
|
||||
}
|
||||
v := string(b[:x])
|
||||
*f.toString() = v
|
||||
if !utf8.ValidString(v) {
|
||||
return b[x:], errInvalidUTF8
|
||||
}
|
||||
return b[x:], nil
|
||||
}
|
||||
|
||||
func unmarshalUTF8StringPtr(b []byte, f pointer, w int) ([]byte, error) {
|
||||
if w != WireBytes {
|
||||
return b, errInternalBadWireType
|
||||
}
|
||||
x, n := decodeVarint(b)
|
||||
if n == 0 {
|
||||
return nil, io.ErrUnexpectedEOF
|
||||
}
|
||||
b = b[n:]
|
||||
if x > uint64(len(b)) {
|
||||
return nil, io.ErrUnexpectedEOF
|
||||
}
|
||||
v := string(b[:x])
|
||||
*f.toStringPtr() = &v
|
||||
if !utf8.ValidString(v) {
|
||||
return b[x:], errInvalidUTF8
|
||||
}
|
||||
return b[x:], nil
|
||||
}
|
||||
|
||||
func unmarshalUTF8StringSlice(b []byte, f pointer, w int) ([]byte, error) {
|
||||
if w != WireBytes {
|
||||
return b, errInternalBadWireType
|
||||
}
|
||||
x, n := decodeVarint(b)
|
||||
if n == 0 {
|
||||
return nil, io.ErrUnexpectedEOF
|
||||
}
|
||||
b = b[n:]
|
||||
if x > uint64(len(b)) {
|
||||
return nil, io.ErrUnexpectedEOF
|
||||
}
|
||||
v := string(b[:x])
|
||||
s := f.toStringSlice()
|
||||
*s = append(*s, v)
|
||||
if !utf8.ValidString(v) {
|
||||
return b[x:], errInvalidUTF8
|
||||
}
|
||||
return b[x:], nil
|
||||
}
|
||||
|
||||
var emptyBuf [0]byte
|
||||
|
||||
func unmarshalBytesValue(b []byte, f pointer, w int) ([]byte, error) {
|
||||
@ -1674,6 +1756,7 @@ func makeUnmarshalMap(f *reflect.StructField) unmarshaler {
|
||||
// Maps will be somewhat slow. Oh well.
|
||||
|
||||
// Read key and value from data.
|
||||
var nerr nonFatal
|
||||
k := reflect.New(kt)
|
||||
v := reflect.New(vt)
|
||||
for len(b) > 0 {
|
||||
@ -1694,7 +1777,7 @@ func makeUnmarshalMap(f *reflect.StructField) unmarshaler {
|
||||
err = errInternalBadWireType // skip unknown tag
|
||||
}
|
||||
|
||||
if err == nil {
|
||||
if nerr.Merge(err) {
|
||||
continue
|
||||
}
|
||||
if err != errInternalBadWireType {
|
||||
@ -1717,7 +1800,7 @@ func makeUnmarshalMap(f *reflect.StructField) unmarshaler {
|
||||
// Insert into map.
|
||||
m.SetMapIndex(k.Elem(), v.Elem())
|
||||
|
||||
return r, nil
|
||||
return r, nerr.E
|
||||
}
|
||||
}
|
||||
|
||||
@ -1743,15 +1826,16 @@ func makeUnmarshalOneof(typ, ityp reflect.Type, unmarshal unmarshaler) unmarshal
|
||||
// Unmarshal data into holder.
|
||||
// We unmarshal into the first field of the holder object.
|
||||
var err error
|
||||
var nerr nonFatal
|
||||
b, err = unmarshal(b, valToPointer(v).offset(field0), w)
|
||||
if err != nil {
|
||||
if !nerr.Merge(err) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Write pointer to holder into target field.
|
||||
f.asPointerTo(ityp).Elem().Set(v)
|
||||
|
||||
return b, nil
|
||||
return b, nerr.E
|
||||
}
|
||||
}
|
||||
|
||||
|
4
vendor/github.com/golang/protobuf/proto/text.go
generated
vendored
4
vendor/github.com/golang/protobuf/proto/text.go
generated
vendored
@ -353,7 +353,7 @@ func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if err := tm.writeAny(w, key, props.mkeyprop); err != nil {
|
||||
if err := tm.writeAny(w, key, props.MapKeyProp); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := w.WriteByte('\n'); err != nil {
|
||||
@ -370,7 +370,7 @@ func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if err := tm.writeAny(w, val, props.mvalprop); err != nil {
|
||||
if err := tm.writeAny(w, val, props.MapValProp); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := w.WriteByte('\n'); err != nil {
|
||||
|
6
vendor/github.com/golang/protobuf/proto/text_parser.go
generated
vendored
6
vendor/github.com/golang/protobuf/proto/text_parser.go
generated
vendored
@ -630,17 +630,17 @@ func (p *textParser) readStruct(sv reflect.Value, terminator string) error {
|
||||
if err := p.consumeToken(":"); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := p.readAny(key, props.mkeyprop); err != nil {
|
||||
if err := p.readAny(key, props.MapKeyProp); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := p.consumeOptionalSeparator(); err != nil {
|
||||
return err
|
||||
}
|
||||
case "value":
|
||||
if err := p.checkForColon(props.mvalprop, dst.Type().Elem()); err != nil {
|
||||
if err := p.checkForColon(props.MapValProp, dst.Type().Elem()); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := p.readAny(val, props.mvalprop); err != nil {
|
||||
if err := p.readAny(val, props.MapValProp); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := p.consumeOptionalSeparator(); err != nil {
|
||||
|
5
vendor/github.com/jlaffaye/ftp/ftp.go
generated
vendored
5
vendor/github.com/jlaffaye/ftp/ftp.go
generated
vendored
@ -188,6 +188,11 @@ func (c *ServerConn) setUTF8() error {
|
||||
return err
|
||||
}
|
||||
|
||||
// Workaround for FTP servers, that does not support this option.
|
||||
if code == StatusBadArguments {
|
||||
return nil
|
||||
}
|
||||
|
||||
// The ftpd "filezilla-server" has FEAT support for UTF8, but always returns
|
||||
// "202 UTF8 mode is always enabled. No need to send this command." when
|
||||
// trying to use it. That's OK
|
||||
|
2
vendor/github.com/jmespath/go-jmespath/.gitignore
generated
vendored
2
vendor/github.com/jmespath/go-jmespath/.gitignore
generated
vendored
@ -1,4 +1,4 @@
|
||||
jpgo
|
||||
/jpgo
|
||||
jmespath-fuzz.zip
|
||||
cpu.out
|
||||
go-jmespath.test
|
||||
|
2
vendor/github.com/jmespath/go-jmespath/api.go
generated
vendored
2
vendor/github.com/jmespath/go-jmespath/api.go
generated
vendored
@ -2,7 +2,7 @@ package jmespath
|
||||
|
||||
import "strconv"
|
||||
|
||||
// JmesPath is the epresentation of a compiled JMES path query. A JmesPath is
|
||||
// JMESPath is the epresentation of a compiled JMES path query. A JMESPath is
|
||||
// safe for concurrent use by multiple goroutines.
|
||||
type JMESPath struct {
|
||||
ast ASTNode
|
||||
|
14
vendor/github.com/mattn/go-runewidth/runewidth.go
generated
vendored
14
vendor/github.com/mattn/go-runewidth/runewidth.go
generated
vendored
@ -1,13 +1,24 @@
|
||||
package runewidth
|
||||
|
||||
import "os"
|
||||
|
||||
var (
|
||||
// EastAsianWidth will be set true if the current locale is CJK
|
||||
EastAsianWidth = IsEastAsian()
|
||||
EastAsianWidth bool
|
||||
|
||||
// DefaultCondition is a condition in current locale
|
||||
DefaultCondition = &Condition{EastAsianWidth}
|
||||
)
|
||||
|
||||
func init() {
|
||||
env := os.Getenv("RUNEWIDTH_EASTASIAN")
|
||||
if env == "" {
|
||||
EastAsianWidth = IsEastAsian()
|
||||
} else {
|
||||
EastAsianWidth = env == "1"
|
||||
}
|
||||
}
|
||||
|
||||
type interval struct {
|
||||
first rune
|
||||
last rune
|
||||
@ -55,6 +66,7 @@ var private = table{
|
||||
var nonprint = table{
|
||||
{0x0000, 0x001F}, {0x007F, 0x009F}, {0x00AD, 0x00AD},
|
||||
{0x070F, 0x070F}, {0x180B, 0x180E}, {0x200B, 0x200F},
|
||||
{0x2028, 0x2029},
|
||||
{0x202A, 0x202E}, {0x206A, 0x206F}, {0xD800, 0xDFFF},
|
||||
{0xFEFF, 0xFEFF}, {0xFFF9, 0xFFFB}, {0xFFFE, 0xFFFF},
|
||||
}
|
||||
|
14
vendor/github.com/ncw/swift/README.md
generated
vendored
14
vendor/github.com/ncw/swift/README.md
generated
vendored
@ -127,17 +127,25 @@ Contributors
|
||||
- Paul Querna <pquerna@apache.org>
|
||||
- Livio Soares <liviobs@gmail.com>
|
||||
- thesyncim <thesyncim@gmail.com>
|
||||
- lsowen <lsowen@s1network.com>
|
||||
- lsowen <lsowen@s1network.com> <logan@s1network.com>
|
||||
- Sylvain Baubeau <sbaubeau@redhat.com>
|
||||
- Chris Kastorff <encryptio@gmail.com>
|
||||
- Dai HaoJun <haojun.dai@hp.com>
|
||||
- Hua Wang <wanghua.humble@gmail.com>
|
||||
- Fabian Ruff <fabian@progra.de>
|
||||
- Fabian Ruff <fabian@progra.de> <fabian.ruff@sap.com>
|
||||
- Arturo Reuschenbach Puncernau <reuschenbach@gmail.com>
|
||||
- Petr Kotek <petr.kotek@bigcommerce.com>
|
||||
- Stefan Majewsky <stefan.majewsky@sap.com>
|
||||
- Stefan Majewsky <stefan.majewsky@sap.com> <majewsky@gmx.net>
|
||||
- Cezar Sa Espinola <cezarsa@gmail.com>
|
||||
- Sam Gunaratne <samgzeit@gmail.com>
|
||||
- Richard Scothern <richard.scothern@gmail.com>
|
||||
- Michel Couillard <couillard.michel@voxlog.ca>
|
||||
- Christopher Waldon <ckwaldon@us.ibm.com>
|
||||
- dennis <dai.haojun@gmail.com>
|
||||
- hag <hannes.georg@xing.com>
|
||||
- Alexander Neumann <alexander@bumpern.de>
|
||||
- eclipseo <30413512+eclipseo@users.noreply.github.com>
|
||||
- Yuri Per <yuri@acronis.com>
|
||||
- Falk Reimann <falk.reimann@sap.com>
|
||||
- Arthur Paim Arnold <arthurpaimarnold@gmail.com>
|
||||
- Bruno Michel <bmichel@menfin.info>
|
||||
|
4
vendor/github.com/ncw/swift/swift.go
generated
vendored
4
vendor/github.com/ncw/swift/swift.go
generated
vendored
@ -1895,6 +1895,10 @@ func (c *Connection) doBulkDelete(objects []string) (result BulkDeleteResult, er
|
||||
// * http://docs.openstack.org/trunk/openstack-object-storage/admin/content/object-storage-bulk-delete.html
|
||||
// * http://docs.rackspace.com/files/api/v1/cf-devguide/content/Bulk_Delete-d1e2338.html
|
||||
func (c *Connection) BulkDelete(container string, objectNames []string) (result BulkDeleteResult, err error) {
|
||||
if len(objectNames) == 0 {
|
||||
result.Errors = make(map[string]error)
|
||||
return
|
||||
}
|
||||
fullPaths := make([]string, len(objectNames))
|
||||
for i, name := range objectNames {
|
||||
fullPaths[i] = fmt.Sprintf("/%s/%s", container, name)
|
||||
|
1
vendor/github.com/nsf/termbox-go/README.md
generated
vendored
1
vendor/github.com/nsf/termbox-go/README.md
generated
vendored
@ -38,6 +38,7 @@ There are also some interesting projects using termbox-go:
|
||||
- [gotypist](https://github.com/pb-/gotypist) is a fun touch-typing tutor following Steve Yegge's method.
|
||||
- [cointop](https://github.com/miguelmota/cointop) is an interactive terminal based UI application for tracking cryptocurrencies.
|
||||
- [pexpo](https://github.com/nnao45/pexpo) is a terminal sending ping tool written in Go.
|
||||
- [jid](https://github.com/simeji/jid) is an interactive JSON drill down tool using filtering queries like jq.
|
||||
|
||||
### API reference
|
||||
[godoc.org/github.com/nsf/termbox-go](http://godoc.org/github.com/nsf/termbox-go)
|
||||
|
4
vendor/github.com/pkg/sftp/.travis.yml
generated
vendored
4
vendor/github.com/pkg/sftp/.travis.yml
generated
vendored
@ -4,8 +4,8 @@ go_import_path: github.com/pkg/sftp
|
||||
# current and previous stable releases, plus tip
|
||||
# remember to exclude previous and tip for macs below
|
||||
go:
|
||||
- 1.9.x
|
||||
- 1.10.x
|
||||
- 1.11.x
|
||||
- tip
|
||||
|
||||
os:
|
||||
@ -15,7 +15,7 @@ os:
|
||||
matrix:
|
||||
exclude:
|
||||
- os: osx
|
||||
go: 1.9.x
|
||||
go: 1.10.x
|
||||
- os: osx
|
||||
go: tip
|
||||
|
||||
|
2
vendor/github.com/pkg/sftp/attrs_unix.go
generated
vendored
2
vendor/github.com/pkg/sftp/attrs_unix.go
generated
vendored
@ -1,4 +1,4 @@
|
||||
// +build darwin dragonfly freebsd !android,linux netbsd openbsd solaris
|
||||
// +build darwin dragonfly freebsd !android,linux netbsd openbsd solaris aix
|
||||
// +build cgo
|
||||
|
||||
package sftp
|
||||
|
154
vendor/github.com/pkg/sftp/packet-manager.go
generated
vendored
154
vendor/github.com/pkg/sftp/packet-manager.go
generated
vendored
@ -7,30 +7,30 @@ import (
|
||||
)
|
||||
|
||||
// The goal of the packetManager is to keep the outgoing packets in the same
|
||||
// order as the incoming. This is due to some sftp clients requiring this
|
||||
// behavior (eg. winscp).
|
||||
// order as the incoming as is requires by section 7 of the RFC.
|
||||
|
||||
type packetManager struct {
|
||||
requests chan orderedPacket
|
||||
responses chan orderedPacket
|
||||
fini chan struct{}
|
||||
incoming orderedPackets
|
||||
outgoing orderedPackets
|
||||
sender packetSender // connection object
|
||||
working *sync.WaitGroup
|
||||
packetCount uint32
|
||||
}
|
||||
|
||||
type packetSender interface {
|
||||
sendPacket(encoding.BinaryMarshaler) error
|
||||
}
|
||||
|
||||
type packetManager struct {
|
||||
requests chan requestPacket
|
||||
responses chan responsePacket
|
||||
fini chan struct{}
|
||||
incoming requestPacketIDs
|
||||
outgoing responsePackets
|
||||
sender packetSender // connection object
|
||||
working *sync.WaitGroup
|
||||
}
|
||||
|
||||
func newPktMgr(sender packetSender) *packetManager {
|
||||
s := &packetManager{
|
||||
requests: make(chan requestPacket, SftpServerWorkerCount),
|
||||
responses: make(chan responsePacket, SftpServerWorkerCount),
|
||||
requests: make(chan orderedPacket, SftpServerWorkerCount),
|
||||
responses: make(chan orderedPacket, SftpServerWorkerCount),
|
||||
fini: make(chan struct{}),
|
||||
incoming: make([]uint32, 0, SftpServerWorkerCount),
|
||||
outgoing: make([]responsePacket, 0, SftpServerWorkerCount),
|
||||
incoming: make([]orderedPacket, 0, SftpServerWorkerCount),
|
||||
outgoing: make([]orderedPacket, 0, SftpServerWorkerCount),
|
||||
sender: sender,
|
||||
working: &sync.WaitGroup{},
|
||||
}
|
||||
@ -38,31 +38,56 @@ func newPktMgr(sender packetSender) *packetManager {
|
||||
return s
|
||||
}
|
||||
|
||||
type responsePackets []responsePacket
|
||||
//// packet ordering
|
||||
func (s *packetManager) newOrderId() uint32 {
|
||||
s.packetCount++
|
||||
return s.packetCount
|
||||
}
|
||||
|
||||
func (r responsePackets) Sort() {
|
||||
sort.Slice(r, func(i, j int) bool {
|
||||
return r[i].id() < r[j].id()
|
||||
})
|
||||
}
|
||||
|
||||
type requestPacketIDs []uint32
|
||||
|
||||
func (r requestPacketIDs) Sort() {
|
||||
sort.Slice(r, func(i, j int) bool {
|
||||
return r[i] < r[j]
|
||||
type orderedRequest struct {
|
||||
requestPacket
|
||||
orderid uint32
|
||||
}
|
||||
|
||||
func (s *packetManager) newOrderedRequest(p requestPacket) orderedRequest {
|
||||
return orderedRequest{requestPacket: p, orderid: s.newOrderId()}
|
||||
}
|
||||
func (p orderedRequest) orderId() uint32 { return p.orderid }
|
||||
func (p orderedRequest) setOrderId(oid uint32) { p.orderid = oid }
|
||||
|
||||
type orderedResponse struct {
|
||||
responsePacket
|
||||
orderid uint32
|
||||
}
|
||||
|
||||
func (s *packetManager) newOrderedResponse(p responsePacket, id uint32,
|
||||
) orderedResponse {
|
||||
return orderedResponse{responsePacket: p, orderid: id}
|
||||
}
|
||||
func (p orderedResponse) orderId() uint32 { return p.orderid }
|
||||
func (p orderedResponse) setOrderId(oid uint32) { p.orderid = oid }
|
||||
|
||||
type orderedPacket interface {
|
||||
id() uint32
|
||||
orderId() uint32
|
||||
}
|
||||
type orderedPackets []orderedPacket
|
||||
|
||||
func (o orderedPackets) Sort() {
|
||||
sort.Slice(o, func(i, j int) bool {
|
||||
return o[i].orderId() < o[j].orderId()
|
||||
})
|
||||
}
|
||||
|
||||
//// packet registry
|
||||
// register incoming packets to be handled
|
||||
// send id of 0 for packets without id
|
||||
func (s *packetManager) incomingPacket(pkt requestPacket) {
|
||||
func (s *packetManager) incomingPacket(pkt orderedRequest) {
|
||||
s.working.Add(1)
|
||||
s.requests <- pkt // buffer == SftpServerWorkerCount
|
||||
s.requests <- pkt
|
||||
}
|
||||
|
||||
// register outgoing packets as being ready
|
||||
func (s *packetManager) readyPacket(pkt responsePacket) {
|
||||
func (s *packetManager) readyPacket(pkt orderedResponse) {
|
||||
s.responses <- pkt
|
||||
s.working.Done()
|
||||
}
|
||||
@ -75,38 +100,37 @@ func (s *packetManager) close() {
|
||||
}
|
||||
|
||||
// Passed a worker function, returns a channel for incoming packets.
|
||||
// The goal is to process packets in the order they are received as is
|
||||
// requires by section 7 of the RFC, while maximizing throughput of file
|
||||
// transfers.
|
||||
func (s *packetManager) workerChan(runWorker func(requestChan)) requestChan {
|
||||
// Keep process packet responses in the order they are received while
|
||||
// maximizing throughput of file transfers.
|
||||
func (s *packetManager) workerChan(runWorker func(chan orderedRequest),
|
||||
) chan orderedRequest {
|
||||
|
||||
rwChan := make(chan requestPacket, SftpServerWorkerCount)
|
||||
// multiple workers for faster read/writes
|
||||
rwChan := make(chan orderedRequest, SftpServerWorkerCount)
|
||||
for i := 0; i < SftpServerWorkerCount; i++ {
|
||||
runWorker(rwChan)
|
||||
}
|
||||
|
||||
cmdChan := make(chan requestPacket)
|
||||
// single worker to enforce sequential processing of everything else
|
||||
cmdChan := make(chan orderedRequest)
|
||||
runWorker(cmdChan)
|
||||
|
||||
pktChan := make(chan requestPacket, SftpServerWorkerCount)
|
||||
pktChan := make(chan orderedRequest, SftpServerWorkerCount)
|
||||
go func() {
|
||||
// start with cmdChan
|
||||
curChan := cmdChan
|
||||
for pkt := range pktChan {
|
||||
// on file open packet, switch to rwChan
|
||||
switch pkt.(type) {
|
||||
case *sshFxpOpenPacket:
|
||||
curChan = rwChan
|
||||
// on file close packet, switch back to cmdChan
|
||||
// after waiting for any reads/writes to finish
|
||||
switch pkt.requestPacket.(type) {
|
||||
case *sshFxpReadPacket, *sshFxpWritePacket:
|
||||
s.incomingPacket(pkt)
|
||||
rwChan <- pkt
|
||||
continue
|
||||
case *sshFxpClosePacket:
|
||||
// wait for rwChan to finish
|
||||
// wait for reads/writes to finish when file is closed
|
||||
// incomingPacket() call must occur after this
|
||||
s.working.Wait()
|
||||
// stop using rwChan
|
||||
curChan = cmdChan
|
||||
}
|
||||
s.incomingPacket(pkt)
|
||||
curChan <- pkt
|
||||
// all non-RW use sequential cmdChan
|
||||
cmdChan <- pkt
|
||||
}
|
||||
close(rwChan)
|
||||
close(cmdChan)
|
||||
@ -121,17 +145,13 @@ func (s *packetManager) controller() {
|
||||
for {
|
||||
select {
|
||||
case pkt := <-s.requests:
|
||||
debug("incoming id: %v", pkt.id())
|
||||
s.incoming = append(s.incoming, pkt.id())
|
||||
if len(s.incoming) > 1 {
|
||||
debug("incoming id (oid): %v (%v)", pkt.id(), pkt.orderId())
|
||||
s.incoming = append(s.incoming, pkt)
|
||||
s.incoming.Sort()
|
||||
}
|
||||
case pkt := <-s.responses:
|
||||
debug("outgoing pkt: %v", pkt.id())
|
||||
debug("outgoing id (oid): %v (%v)", pkt.id(), pkt.orderId())
|
||||
s.outgoing = append(s.outgoing, pkt)
|
||||
if len(s.outgoing) > 1 {
|
||||
s.outgoing.Sort()
|
||||
}
|
||||
case <-s.fini:
|
||||
return
|
||||
}
|
||||
@ -149,10 +169,11 @@ func (s *packetManager) maybeSendPackets() {
|
||||
}
|
||||
out := s.outgoing[0]
|
||||
in := s.incoming[0]
|
||||
// debug("incoming: %v", s.incoming)
|
||||
// debug("outgoing: %v", outfilter(s.outgoing))
|
||||
if in == out.id() {
|
||||
s.sender.sendPacket(out)
|
||||
// debug("incoming: %v", ids(s.incoming))
|
||||
// debug("outgoing: %v", ids(s.outgoing))
|
||||
if in.orderId() == out.orderId() {
|
||||
debug("Sending packet: %v", out.id())
|
||||
s.sender.sendPacket(out.(encoding.BinaryMarshaler))
|
||||
// pop off heads
|
||||
copy(s.incoming, s.incoming[1:]) // shift left
|
||||
s.incoming = s.incoming[:len(s.incoming)-1] // remove last
|
||||
@ -164,7 +185,14 @@ func (s *packetManager) maybeSendPackets() {
|
||||
}
|
||||
}
|
||||
|
||||
//func outfilter(o []responsePacket) []uint32 {
|
||||
// func oids(o []orderedPacket) []uint32 {
|
||||
// res := make([]uint32, 0, len(o))
|
||||
// for _, v := range o {
|
||||
// res = append(res, v.orderId())
|
||||
// }
|
||||
// return res
|
||||
// }
|
||||
// func ids(o []orderedPacket) []uint32 {
|
||||
// res := make([]uint32, 0, len(o))
|
||||
// for _, v := range o {
|
||||
// res = append(res, v.id())
|
||||
|
3
vendor/github.com/pkg/sftp/packet-typing.go
generated
vendored
3
vendor/github.com/pkg/sftp/packet-typing.go
generated
vendored
@ -12,8 +12,6 @@ type requestPacket interface {
|
||||
id() uint32
|
||||
}
|
||||
|
||||
type requestChan chan requestPacket
|
||||
|
||||
type responsePacket interface {
|
||||
encoding.BinaryMarshaler
|
||||
id() uint32
|
||||
@ -77,6 +75,7 @@ func (p sshFxpStatusPacket) id() uint32 { return p.ID }
|
||||
func (p sshFxpStatResponse) id() uint32 { return p.ID }
|
||||
func (p sshFxpNamePacket) id() uint32 { return p.ID }
|
||||
func (p sshFxpHandlePacket) id() uint32 { return p.ID }
|
||||
func (p StatVFS) id() uint32 { return p.ID }
|
||||
func (p sshFxVersionPacket) id() uint32 { return 0 }
|
||||
|
||||
// take raw incoming packet data and build packet objects
|
||||
|
23
vendor/github.com/pkg/sftp/packet.go
generated
vendored
23
vendor/github.com/pkg/sftp/packet.go
generated
vendored
@ -125,15 +125,14 @@ func sendPacket(w io.Writer, m encoding.BinaryMarshaler) error {
|
||||
} else if debugDumpTxPacket {
|
||||
debug("send packet: %s %d bytes", fxp(bb[0]), len(bb))
|
||||
}
|
||||
l := uint32(len(bb))
|
||||
hdr := []byte{byte(l >> 24), byte(l >> 16), byte(l >> 8), byte(l)}
|
||||
_, err = w.Write(hdr)
|
||||
// Slide packet down 4 bytes to make room for length header.
|
||||
packet := append(bb, make([]byte, 4)...) // optimistically assume bb has capacity
|
||||
copy(packet[4:], bb)
|
||||
binary.BigEndian.PutUint32(packet[:4], uint32(len(bb)))
|
||||
|
||||
_, err = w.Write(packet)
|
||||
if err != nil {
|
||||
return errors.Errorf("failed to send packet header: %v", err)
|
||||
}
|
||||
_, err = w.Write(bb)
|
||||
if err != nil {
|
||||
return errors.Errorf("failed to send packet body: %v", err)
|
||||
return errors.Errorf("failed to send packet: %v", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@ -882,9 +881,9 @@ func (p sshFxpExtendedPacket) readonly() bool {
|
||||
return p.SpecificPacket.readonly()
|
||||
}
|
||||
|
||||
func (p sshFxpExtendedPacket) respond(svr *Server) error {
|
||||
func (p sshFxpExtendedPacket) respond(svr *Server) responsePacket {
|
||||
if p.SpecificPacket == nil {
|
||||
return nil
|
||||
return statusFromError(p, nil)
|
||||
}
|
||||
return p.SpecificPacket.respond(svr)
|
||||
}
|
||||
@ -954,7 +953,7 @@ func (p *sshFxpExtendedPacketPosixRename) UnmarshalBinary(b []byte) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p sshFxpExtendedPacketPosixRename) respond(s *Server) error {
|
||||
func (p sshFxpExtendedPacketPosixRename) respond(s *Server) responsePacket {
|
||||
err := os.Rename(p.Oldpath, p.Newpath)
|
||||
return s.sendError(p, err)
|
||||
return statusFromError(p, err)
|
||||
}
|
||||
|
1
vendor/github.com/pkg/sftp/request-example.go
generated
vendored
1
vendor/github.com/pkg/sftp/request-example.go
generated
vendored
@ -83,6 +83,7 @@ func (fs *root) Filecmd(r *Request) error {
|
||||
return &os.LinkError{Op: "rename", Old: r.Filepath, New: r.Target,
|
||||
Err: fmt.Errorf("dest file exists")}
|
||||
}
|
||||
file.name = r.Target
|
||||
fs.files[r.Target] = file
|
||||
delete(fs.files, r.Filepath)
|
||||
case "Rmdir", "Remove":
|
||||
|
31
vendor/github.com/pkg/sftp/request-server.go
generated
vendored
31
vendor/github.com/pkg/sftp/request-server.go
generated
vendored
@ -2,7 +2,6 @@ package sftp
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding"
|
||||
"io"
|
||||
"os"
|
||||
"path"
|
||||
@ -106,7 +105,7 @@ func (rs *RequestServer) Serve() error {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
var wg sync.WaitGroup
|
||||
runWorker := func(ch requestChan) {
|
||||
runWorker := func(ch chan orderedRequest) {
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
@ -143,7 +142,7 @@ func (rs *RequestServer) Serve() error {
|
||||
}
|
||||
}
|
||||
|
||||
pktChan <- pkt
|
||||
pktChan <- rs.pktMgr.newOrderedRequest(pkt)
|
||||
}
|
||||
|
||||
close(pktChan) // shuts down sftpServerWorkers
|
||||
@ -160,13 +159,13 @@ func (rs *RequestServer) Serve() error {
|
||||
}
|
||||
|
||||
func (rs *RequestServer) packetWorker(
|
||||
ctx context.Context, pktChan chan requestPacket,
|
||||
ctx context.Context, pktChan chan orderedRequest,
|
||||
) error {
|
||||
for pkt := range pktChan {
|
||||
var rpkt responsePacket
|
||||
switch pkt := pkt.(type) {
|
||||
switch pkt := pkt.requestPacket.(type) {
|
||||
case *sshFxInitPacket:
|
||||
rpkt = sshFxVersionPacket{sftpProtocolVersion, nil}
|
||||
rpkt = sshFxVersionPacket{Version: sftpProtocolVersion}
|
||||
case *sshFxpClosePacket:
|
||||
handle := pkt.getHandle()
|
||||
rpkt = statusFromError(pkt, rs.closeRequest(handle))
|
||||
@ -178,7 +177,7 @@ func (rs *RequestServer) packetWorker(
|
||||
if stat, ok := rpkt.(*sshFxpStatResponse); ok {
|
||||
if stat.info.IsDir() {
|
||||
handle := rs.nextRequest(request)
|
||||
rpkt = sshFxpHandlePacket{pkt.id(), handle}
|
||||
rpkt = sshFxpHandlePacket{ID: pkt.id(), Handle: handle}
|
||||
} else {
|
||||
rpkt = statusFromError(pkt, &os.PathError{
|
||||
Path: request.Filepath, Err: syscall.ENOTDIR})
|
||||
@ -187,7 +186,7 @@ func (rs *RequestServer) packetWorker(
|
||||
case *sshFxpOpenPacket:
|
||||
request := requestFromPacket(ctx, pkt)
|
||||
handle := rs.nextRequest(request)
|
||||
rpkt = sshFxpHandlePacket{pkt.id(), handle}
|
||||
rpkt = sshFxpHandlePacket{ID: pkt.id(), Handle: handle}
|
||||
if pkt.hasPflags(ssh_FXF_CREAT) {
|
||||
if p := request.call(rs.Handlers, pkt); !statusOk(p) {
|
||||
rpkt = p // if error in write, return it
|
||||
@ -209,10 +208,8 @@ func (rs *RequestServer) packetWorker(
|
||||
return errors.Errorf("unexpected packet type %T", pkt)
|
||||
}
|
||||
|
||||
err := rs.sendPacket(rpkt)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
rs.pktMgr.readyPacket(
|
||||
rs.pktMgr.newOrderedResponse(rpkt, pkt.orderId()))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@ -244,13 +241,3 @@ func cleanPath(p string) string {
|
||||
}
|
||||
return path.Clean(p)
|
||||
}
|
||||
|
||||
// Wrap underlying connection methods to use packetManager
|
||||
func (rs *RequestServer) sendPacket(m encoding.BinaryMarshaler) error {
|
||||
if pkt, ok := m.(responsePacket); ok {
|
||||
rs.pktMgr.readyPacket(pkt)
|
||||
} else {
|
||||
return errors.Errorf("unexpected packet type %T", m)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
56
vendor/github.com/pkg/sftp/request.go
generated
vendored
56
vendor/github.com/pkg/sftp/request.go
generated
vendored
@ -116,34 +116,12 @@ func (r *Request) lsInc(offset int64) {
|
||||
}
|
||||
|
||||
// manage file read/write state
|
||||
func (r *Request) setWriterState(wa io.WriterAt) {
|
||||
r.state.Lock()
|
||||
defer r.state.Unlock()
|
||||
r.state.writerAt = wa
|
||||
}
|
||||
func (r *Request) setReaderState(ra io.ReaderAt) {
|
||||
r.state.Lock()
|
||||
defer r.state.Unlock()
|
||||
r.state.readerAt = ra
|
||||
}
|
||||
func (r *Request) setListerState(la ListerAt) {
|
||||
r.state.Lock()
|
||||
defer r.state.Unlock()
|
||||
r.state.listerAt = la
|
||||
}
|
||||
|
||||
func (r *Request) getWriter() io.WriterAt {
|
||||
r.state.RLock()
|
||||
defer r.state.RUnlock()
|
||||
return r.state.writerAt
|
||||
}
|
||||
|
||||
func (r *Request) getReader() io.ReaderAt {
|
||||
r.state.RLock()
|
||||
defer r.state.RUnlock()
|
||||
return r.state.readerAt
|
||||
}
|
||||
|
||||
func (r *Request) getLister() ListerAt {
|
||||
r.state.RLock()
|
||||
defer r.state.RUnlock()
|
||||
@ -157,11 +135,15 @@ func (r *Request) close() error {
|
||||
r.cancelCtx()
|
||||
}
|
||||
}()
|
||||
rd := r.getReader()
|
||||
r.state.RLock()
|
||||
rd := r.state.readerAt
|
||||
r.state.RUnlock()
|
||||
if c, ok := rd.(io.Closer); ok {
|
||||
return c.Close()
|
||||
}
|
||||
wt := r.getWriter()
|
||||
r.state.RLock()
|
||||
wt := r.state.writerAt
|
||||
r.state.RUnlock()
|
||||
if c, ok := wt.(io.Closer); ok {
|
||||
return c.Close()
|
||||
}
|
||||
@ -204,13 +186,20 @@ func packetData(p requestPacket) (data []byte, offset int64, length uint32) {
|
||||
// wrap FileReader handler
|
||||
func fileget(h FileReader, r *Request, pkt requestPacket) responsePacket {
|
||||
var err error
|
||||
reader := r.getReader()
|
||||
r.state.RLock()
|
||||
reader := r.state.readerAt
|
||||
r.state.RUnlock()
|
||||
if reader == nil {
|
||||
reader, err = h.Fileread(r)
|
||||
r.state.Lock()
|
||||
if r.state.readerAt == nil {
|
||||
r.state.readerAt, err = h.Fileread(r)
|
||||
if err != nil {
|
||||
r.state.Unlock()
|
||||
return statusFromError(pkt, err)
|
||||
}
|
||||
r.setReaderState(reader)
|
||||
}
|
||||
reader = r.state.readerAt
|
||||
r.state.Unlock()
|
||||
}
|
||||
|
||||
_, offset, length := packetData(pkt)
|
||||
@ -230,13 +219,20 @@ func fileget(h FileReader, r *Request, pkt requestPacket) responsePacket {
|
||||
// wrap FileWriter handler
|
||||
func fileput(h FileWriter, r *Request, pkt requestPacket) responsePacket {
|
||||
var err error
|
||||
writer := r.getWriter()
|
||||
r.state.RLock()
|
||||
writer := r.state.writerAt
|
||||
r.state.RUnlock()
|
||||
if writer == nil {
|
||||
writer, err = h.Filewrite(r)
|
||||
r.state.Lock()
|
||||
if r.state.writerAt == nil {
|
||||
r.state.writerAt, err = h.Filewrite(r)
|
||||
if err != nil {
|
||||
r.state.Unlock()
|
||||
return statusFromError(pkt, err)
|
||||
}
|
||||
r.setWriterState(writer)
|
||||
}
|
||||
writer = r.state.writerAt
|
||||
r.state.Unlock()
|
||||
}
|
||||
|
||||
data, offset, _ := packetData(pkt)
|
||||
|
187
vendor/github.com/pkg/sftp/server.go
generated
vendored
187
vendor/github.com/pkg/sftp/server.go
generated
vendored
@ -66,7 +66,7 @@ func (svr *Server) getHandle(handle string) (*os.File, bool) {
|
||||
type serverRespondablePacket interface {
|
||||
encoding.BinaryUnmarshaler
|
||||
id() uint32
|
||||
respond(svr *Server) error
|
||||
respond(svr *Server) responsePacket
|
||||
}
|
||||
|
||||
// NewServer creates a new Server instance around the provided streams, serving
|
||||
@ -123,12 +123,11 @@ type rxPacket struct {
|
||||
}
|
||||
|
||||
// Up to N parallel servers
|
||||
func (svr *Server) sftpServerWorker(pktChan chan requestPacket) error {
|
||||
func (svr *Server) sftpServerWorker(pktChan chan orderedRequest) error {
|
||||
for pkt := range pktChan {
|
||||
|
||||
// readonly checks
|
||||
readonly := true
|
||||
switch pkt := pkt.(type) {
|
||||
switch pkt := pkt.requestPacket.(type) {
|
||||
case notReadOnly:
|
||||
readonly = false
|
||||
case *sshFxpOpenPacket:
|
||||
@ -140,9 +139,9 @@ func (svr *Server) sftpServerWorker(pktChan chan requestPacket) error {
|
||||
// If server is operating read-only and a write operation is requested,
|
||||
// return permission denied
|
||||
if !readonly && svr.readOnly {
|
||||
if err := svr.sendError(pkt, syscall.EPERM); err != nil {
|
||||
return errors.Wrap(err, "failed to send read only packet response")
|
||||
}
|
||||
svr.sendPacket(orderedResponse{
|
||||
responsePacket: statusFromError(pkt, syscall.EPERM),
|
||||
orderid: pkt.orderId()})
|
||||
continue
|
||||
}
|
||||
|
||||
@ -153,141 +152,145 @@ func (svr *Server) sftpServerWorker(pktChan chan requestPacket) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func handlePacket(s *Server, p interface{}) error {
|
||||
switch p := p.(type) {
|
||||
func handlePacket(s *Server, p orderedRequest) error {
|
||||
var rpkt responsePacket
|
||||
switch p := p.requestPacket.(type) {
|
||||
case *sshFxInitPacket:
|
||||
return s.sendPacket(sshFxVersionPacket{sftpProtocolVersion, nil})
|
||||
rpkt = sshFxVersionPacket{Version: sftpProtocolVersion}
|
||||
case *sshFxpStatPacket:
|
||||
// stat the requested file
|
||||
info, err := os.Stat(p.Path)
|
||||
if err != nil {
|
||||
return s.sendError(p, err)
|
||||
}
|
||||
return s.sendPacket(sshFxpStatResponse{
|
||||
rpkt = sshFxpStatResponse{
|
||||
ID: p.ID,
|
||||
info: info,
|
||||
})
|
||||
}
|
||||
if err != nil {
|
||||
rpkt = statusFromError(p, err)
|
||||
}
|
||||
case *sshFxpLstatPacket:
|
||||
// stat the requested file
|
||||
info, err := os.Lstat(p.Path)
|
||||
if err != nil {
|
||||
return s.sendError(p, err)
|
||||
}
|
||||
return s.sendPacket(sshFxpStatResponse{
|
||||
rpkt = sshFxpStatResponse{
|
||||
ID: p.ID,
|
||||
info: info,
|
||||
})
|
||||
}
|
||||
if err != nil {
|
||||
rpkt = statusFromError(p, err)
|
||||
}
|
||||
case *sshFxpFstatPacket:
|
||||
f, ok := s.getHandle(p.Handle)
|
||||
if !ok {
|
||||
return s.sendError(p, syscall.EBADF)
|
||||
}
|
||||
|
||||
info, err := f.Stat()
|
||||
if err != nil {
|
||||
return s.sendError(p, err)
|
||||
}
|
||||
|
||||
return s.sendPacket(sshFxpStatResponse{
|
||||
var err error = syscall.EBADF
|
||||
var info os.FileInfo
|
||||
if ok {
|
||||
info, err = f.Stat()
|
||||
rpkt = sshFxpStatResponse{
|
||||
ID: p.ID,
|
||||
info: info,
|
||||
})
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
rpkt = statusFromError(p, err)
|
||||
}
|
||||
case *sshFxpMkdirPacket:
|
||||
// TODO FIXME: ignore flags field
|
||||
err := os.Mkdir(p.Path, 0755)
|
||||
return s.sendError(p, err)
|
||||
rpkt = statusFromError(p, err)
|
||||
case *sshFxpRmdirPacket:
|
||||
err := os.Remove(p.Path)
|
||||
return s.sendError(p, err)
|
||||
rpkt = statusFromError(p, err)
|
||||
case *sshFxpRemovePacket:
|
||||
err := os.Remove(p.Filename)
|
||||
return s.sendError(p, err)
|
||||
rpkt = statusFromError(p, err)
|
||||
case *sshFxpRenamePacket:
|
||||
err := os.Rename(p.Oldpath, p.Newpath)
|
||||
return s.sendError(p, err)
|
||||
rpkt = statusFromError(p, err)
|
||||
case *sshFxpSymlinkPacket:
|
||||
err := os.Symlink(p.Targetpath, p.Linkpath)
|
||||
return s.sendError(p, err)
|
||||
rpkt = statusFromError(p, err)
|
||||
case *sshFxpClosePacket:
|
||||
return s.sendError(p, s.closeHandle(p.Handle))
|
||||
rpkt = statusFromError(p, s.closeHandle(p.Handle))
|
||||
case *sshFxpReadlinkPacket:
|
||||
f, err := os.Readlink(p.Path)
|
||||
if err != nil {
|
||||
return s.sendError(p, err)
|
||||
}
|
||||
|
||||
return s.sendPacket(sshFxpNamePacket{
|
||||
rpkt = sshFxpNamePacket{
|
||||
ID: p.ID,
|
||||
NameAttrs: []sshFxpNameAttr{{
|
||||
Name: f,
|
||||
LongName: f,
|
||||
Attrs: emptyFileStat,
|
||||
}},
|
||||
})
|
||||
|
||||
}
|
||||
if err != nil {
|
||||
rpkt = statusFromError(p, err)
|
||||
}
|
||||
case *sshFxpRealpathPacket:
|
||||
f, err := filepath.Abs(p.Path)
|
||||
if err != nil {
|
||||
return s.sendError(p, err)
|
||||
}
|
||||
f = cleanPath(f)
|
||||
return s.sendPacket(sshFxpNamePacket{
|
||||
rpkt = sshFxpNamePacket{
|
||||
ID: p.ID,
|
||||
NameAttrs: []sshFxpNameAttr{{
|
||||
Name: f,
|
||||
LongName: f,
|
||||
Attrs: emptyFileStat,
|
||||
}},
|
||||
})
|
||||
}
|
||||
if err != nil {
|
||||
rpkt = statusFromError(p, err)
|
||||
}
|
||||
case *sshFxpOpendirPacket:
|
||||
if stat, err := os.Stat(p.Path); err != nil {
|
||||
return s.sendError(p, err)
|
||||
rpkt = statusFromError(p, err)
|
||||
} else if !stat.IsDir() {
|
||||
return s.sendError(p, &os.PathError{
|
||||
rpkt = statusFromError(p, &os.PathError{
|
||||
Path: p.Path, Err: syscall.ENOTDIR})
|
||||
}
|
||||
return sshFxpOpenPacket{
|
||||
} else {
|
||||
rpkt = sshFxpOpenPacket{
|
||||
ID: p.ID,
|
||||
Path: p.Path,
|
||||
Pflags: ssh_FXF_READ,
|
||||
}.respond(s)
|
||||
}
|
||||
case *sshFxpReadPacket:
|
||||
var err error = syscall.EBADF
|
||||
f, ok := s.getHandle(p.Handle)
|
||||
if !ok {
|
||||
return s.sendError(p, syscall.EBADF)
|
||||
}
|
||||
|
||||
if ok {
|
||||
err = nil
|
||||
data := make([]byte, clamp(p.Len, s.maxTxPacket))
|
||||
n, err := f.ReadAt(data, int64(p.Offset))
|
||||
if err != nil && (err != io.EOF || n == 0) {
|
||||
return s.sendError(p, err)
|
||||
n, _err := f.ReadAt(data, int64(p.Offset))
|
||||
if _err != nil && (_err != io.EOF || n == 0) {
|
||||
err = _err
|
||||
}
|
||||
return s.sendPacket(sshFxpDataPacket{
|
||||
rpkt = sshFxpDataPacket{
|
||||
ID: p.ID,
|
||||
Length: uint32(n),
|
||||
Data: data[:n],
|
||||
})
|
||||
case *sshFxpWritePacket:
|
||||
f, ok := s.getHandle(p.Handle)
|
||||
if !ok {
|
||||
return s.sendError(p, syscall.EBADF)
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
rpkt = statusFromError(p, err)
|
||||
}
|
||||
|
||||
_, err := f.WriteAt(p.Data, int64(p.Offset))
|
||||
return s.sendError(p, err)
|
||||
case *sshFxpWritePacket:
|
||||
f, ok := s.getHandle(p.Handle)
|
||||
var err error = syscall.EBADF
|
||||
if ok {
|
||||
_, err = f.WriteAt(p.Data, int64(p.Offset))
|
||||
}
|
||||
rpkt = statusFromError(p, err)
|
||||
case serverRespondablePacket:
|
||||
err := p.respond(s)
|
||||
return errors.Wrap(err, "pkt.respond failed")
|
||||
rpkt = p.respond(s)
|
||||
default:
|
||||
return errors.Errorf("unexpected packet type %T", p)
|
||||
}
|
||||
|
||||
s.pktMgr.readyPacket(s.pktMgr.newOrderedResponse(rpkt, p.orderId()))
|
||||
return nil
|
||||
}
|
||||
|
||||
// Serve serves SFTP connections until the streams stop or the SFTP subsystem
|
||||
// is stopped.
|
||||
func (svr *Server) Serve() error {
|
||||
var wg sync.WaitGroup
|
||||
runWorker := func(ch requestChan) {
|
||||
runWorker := func(ch chan orderedRequest) {
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
@ -324,7 +327,7 @@ func (svr *Server) Serve() error {
|
||||
}
|
||||
}
|
||||
|
||||
pktChan <- pkt
|
||||
pktChan <- svr.pktMgr.newOrderedRequest(pkt)
|
||||
}
|
||||
|
||||
close(pktChan) // shuts down sftpServerWorkers
|
||||
@ -338,20 +341,6 @@ func (svr *Server) Serve() error {
|
||||
return err // error from recvPacket
|
||||
}
|
||||
|
||||
// Wrap underlying connection methods to use packetManager
|
||||
func (svr *Server) sendPacket(m encoding.BinaryMarshaler) error {
|
||||
if pkt, ok := m.(responsePacket); ok {
|
||||
svr.pktMgr.readyPacket(pkt)
|
||||
} else {
|
||||
return errors.Errorf("unexpected packet type %T", m)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (svr *Server) sendError(p ider, err error) error {
|
||||
return svr.sendPacket(statusFromError(p, err))
|
||||
}
|
||||
|
||||
type ider interface {
|
||||
id() uint32
|
||||
}
|
||||
@ -386,7 +375,7 @@ func (p sshFxpOpenPacket) hasPflags(flags ...uint32) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (p sshFxpOpenPacket) respond(svr *Server) error {
|
||||
func (p sshFxpOpenPacket) respond(svr *Server) responsePacket {
|
||||
var osFlags int
|
||||
if p.hasPflags(ssh_FXF_READ, ssh_FXF_WRITE) {
|
||||
osFlags |= os.O_RDWR
|
||||
@ -396,7 +385,7 @@ func (p sshFxpOpenPacket) respond(svr *Server) error {
|
||||
osFlags |= os.O_RDONLY
|
||||
} else {
|
||||
// how are they opening?
|
||||
return svr.sendError(p, syscall.EINVAL)
|
||||
return statusFromError(p, syscall.EINVAL)
|
||||
}
|
||||
|
||||
if p.hasPflags(ssh_FXF_APPEND) {
|
||||
@ -414,23 +403,23 @@ func (p sshFxpOpenPacket) respond(svr *Server) error {
|
||||
|
||||
f, err := os.OpenFile(p.Path, osFlags, 0644)
|
||||
if err != nil {
|
||||
return svr.sendError(p, err)
|
||||
return statusFromError(p, err)
|
||||
}
|
||||
|
||||
handle := svr.nextHandle(f)
|
||||
return svr.sendPacket(sshFxpHandlePacket{p.ID, handle})
|
||||
return sshFxpHandlePacket{ID: p.id(), Handle: handle}
|
||||
}
|
||||
|
||||
func (p sshFxpReaddirPacket) respond(svr *Server) error {
|
||||
func (p sshFxpReaddirPacket) respond(svr *Server) responsePacket {
|
||||
f, ok := svr.getHandle(p.Handle)
|
||||
if !ok {
|
||||
return svr.sendError(p, syscall.EBADF)
|
||||
return statusFromError(p, syscall.EBADF)
|
||||
}
|
||||
|
||||
dirname := f.Name()
|
||||
dirents, err := f.Readdir(128)
|
||||
if err != nil {
|
||||
return svr.sendError(p, err)
|
||||
return statusFromError(p, err)
|
||||
}
|
||||
|
||||
ret := sshFxpNamePacket{ID: p.ID}
|
||||
@ -441,10 +430,10 @@ func (p sshFxpReaddirPacket) respond(svr *Server) error {
|
||||
Attrs: []interface{}{dirent},
|
||||
})
|
||||
}
|
||||
return svr.sendPacket(ret)
|
||||
return ret
|
||||
}
|
||||
|
||||
func (p sshFxpSetstatPacket) respond(svr *Server) error {
|
||||
func (p sshFxpSetstatPacket) respond(svr *Server) responsePacket {
|
||||
// additional unmarshalling is required for each possibility here
|
||||
b := p.Attrs.([]byte)
|
||||
var err error
|
||||
@ -483,13 +472,13 @@ func (p sshFxpSetstatPacket) respond(svr *Server) error {
|
||||
}
|
||||
}
|
||||
|
||||
return svr.sendError(p, err)
|
||||
return statusFromError(p, err)
|
||||
}
|
||||
|
||||
func (p sshFxpFsetstatPacket) respond(svr *Server) error {
|
||||
func (p sshFxpFsetstatPacket) respond(svr *Server) responsePacket {
|
||||
f, ok := svr.getHandle(p.Handle)
|
||||
if !ok {
|
||||
return svr.sendError(p, syscall.EBADF)
|
||||
return statusFromError(p, syscall.EBADF)
|
||||
}
|
||||
|
||||
// additional unmarshalling is required for each possibility here
|
||||
@ -530,7 +519,7 @@ func (p sshFxpFsetstatPacket) respond(svr *Server) error {
|
||||
}
|
||||
}
|
||||
|
||||
return svr.sendError(p, err)
|
||||
return statusFromError(p, err)
|
||||
}
|
||||
|
||||
// translateErrno translates a syscall error number to a SFTP error code.
|
||||
|
8
vendor/github.com/pkg/sftp/server_statvfs_impl.go
generated
vendored
8
vendor/github.com/pkg/sftp/server_statvfs_impl.go
generated
vendored
@ -9,17 +9,17 @@ import (
|
||||
"syscall"
|
||||
)
|
||||
|
||||
func (p sshFxpExtendedPacketStatVFS) respond(svr *Server) error {
|
||||
func (p sshFxpExtendedPacketStatVFS) respond(svr *Server) responsePacket {
|
||||
stat := &syscall.Statfs_t{}
|
||||
if err := syscall.Statfs(p.Path, stat); err != nil {
|
||||
return svr.sendPacket(statusFromError(p, err))
|
||||
return statusFromError(p, err)
|
||||
}
|
||||
|
||||
retPkt, err := statvfsFromStatfst(stat)
|
||||
if err != nil {
|
||||
return svr.sendPacket(statusFromError(p, err))
|
||||
return statusFromError(p, err)
|
||||
}
|
||||
retPkt.ID = p.ID
|
||||
|
||||
return svr.sendPacket(retPkt)
|
||||
return retPkt
|
||||
}
|
||||
|
4
vendor/github.com/pkg/sftp/server_statvfs_stubs.go
generated
vendored
4
vendor/github.com/pkg/sftp/server_statvfs_stubs.go
generated
vendored
@ -6,6 +6,6 @@ import (
|
||||
"syscall"
|
||||
)
|
||||
|
||||
func (p sshFxpExtendedPacketStatVFS) respond(svr *Server) error {
|
||||
return syscall.ENOTSUP
|
||||
func (p sshFxpExtendedPacketStatVFS) respond(svr *Server) responsePacket {
|
||||
return statusFromError(p, syscall.ENOTSUP)
|
||||
}
|
||||
|
2
vendor/github.com/pkg/sftp/server_unix.go
generated
vendored
2
vendor/github.com/pkg/sftp/server_unix.go
generated
vendored
@ -1,4 +1,4 @@
|
||||
// +build darwin dragonfly freebsd !android,linux netbsd openbsd solaris
|
||||
// +build darwin dragonfly freebsd !android,linux netbsd openbsd solaris aix
|
||||
// +build cgo
|
||||
|
||||
package sftp
|
||||
|
3
vendor/github.com/rfjakob/eme/LICENSE
generated
vendored
3
vendor/github.com/rfjakob/eme/LICENSE
generated
vendored
@ -1,6 +1,6 @@
|
||||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2015
|
||||
Copyright (c) 2015 Jakob Unterwurzacher
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
@ -19,4 +19,3 @@ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
|
||||
|
53
vendor/github.com/rfjakob/eme/README.md
generated
vendored
53
vendor/github.com/rfjakob/eme/README.md
generated
vendored
@ -3,13 +3,12 @@ EME for Go [![Build Status](https://travis-ci.org/rfjakob/eme.svg?branch=master)
|
||||
|
||||
**EME** (ECB-Mix-ECB or, clearer, **Encrypt-Mix-Encrypt**) is a wide-block
|
||||
encryption mode developed by Halevi
|
||||
and Rogaway in 2003 _[eme]_ (square-bracketed italics like _[this]_ are literature references, detailed in the
|
||||
References secion).
|
||||
and Rogaway in 2003 [[eme]](#eme).
|
||||
|
||||
EME uses multiple invocations of a block cipher to construct a new
|
||||
cipher of bigger block size (in multiples of 16 bytes, up to 2048 bytes).
|
||||
|
||||
Quoting from the original _[eme]_ paper:
|
||||
Quoting from the original [[eme]](#eme) paper:
|
||||
|
||||
> We describe a block-cipher mode of operation, EME, that turns an n-bit block cipher into
|
||||
> a tweakable enciphering scheme that acts on strings of mn bits, where m ∈ [1..n]. The mode is
|
||||
@ -18,70 +17,80 @@ Quoting from the original _[eme]_ paper:
|
||||
> and a “lightweight mixing” in between. We prove EME secure, in the reduction-based sense of
|
||||
> modern cryptography.
|
||||
|
||||
Figure 2 from the _[eme]_ paper shows an overview of the transformation:
|
||||
Figure 2 from the [[eme]](#eme) paper shows an overview of the transformation:
|
||||
|
||||
[![Figure 2 form [eme]](paper-eme-fig2.png)](#)
|
||||
[![Figure 2 from [eme]](paper-eme-fig2.png)](#)
|
||||
|
||||
This is an implementation of EME in Go, complete with test vectors from IEEE.
|
||||
This is an implementation of EME in Go, complete with test vectors from IEEE [[p1619-2]](#p1619-2)
|
||||
and Halevi [[eme-32-testvec]](#eme-32-testvec).
|
||||
|
||||
Is it patentend?
|
||||
----------------
|
||||
|
||||
In 2007, the UC Davis has decided to abandon _[patabandon]_ the patent
|
||||
application for EME _[patappl]_.
|
||||
In 2007, the UC Davis has decided to abandon [[patabandon]](#patabandon)
|
||||
the patent application [[patappl]](#patappl) for EME.
|
||||
|
||||
Related algorithms
|
||||
------------------
|
||||
|
||||
**EME-32** is EME with the cipher set to AES and the length set to 512.
|
||||
That is, EME-32 _[eme-32-pdf]_ is a subset of EME.
|
||||
That is, EME-32 [[eme-32-pdf]](#eme-32-pdf) is a subset of EME.
|
||||
|
||||
**EME2**, also known as EME* _[emestar]_, is an extended version of EME
|
||||
**EME2**, also known as EME\* [[emestar]](#emestar), is an extended version of EME
|
||||
that has built-in handling for data that is not a multiple of 16 bytes
|
||||
long.
|
||||
EME2 has been selected for standardization in IEEE P1619.2 _[p1619.2]_.
|
||||
EME2 has been selected for standardization in IEEE P1619.2 [[p1619.2]](#p1619.2).
|
||||
|
||||
References
|
||||
----------
|
||||
|
||||
**[eme]** *A Parallelizable Enciphering Mode*
|
||||
#### [eme]
|
||||
*A Parallelizable Enciphering Mode*
|
||||
Shai Halevi, Phillip Rogaway, 28 Jul 2003
|
||||
https://eprint.iacr.org/2003/147.pdf
|
||||
|
||||
Note: This is the original EME paper. EME is specified for an arbitrary
|
||||
number of block-cipher blocks. EME-32 is a concrete implementation of
|
||||
EME with a fixed length of 32 AES blocks.
|
||||
|
||||
**[eme-32-email]** *Re: EME-32-AES with editorial comments*
|
||||
#### [eme-32-email]
|
||||
*Re: EME-32-AES with editorial comments*
|
||||
Shai Halevi, 07 Jun 2005
|
||||
http://grouper.ieee.org/groups/1619/email/msg00310.html
|
||||
|
||||
**[eme-32-pdf]** *Draft Standard for Tweakable Wide-block Encryption*
|
||||
#### [eme-32-pdf]
|
||||
*Draft Standard for Tweakable Wide-block Encryption*
|
||||
Shai Halevi, 02 June 2005
|
||||
http://grouper.ieee.org/groups/1619/email/pdf00020.pdf
|
||||
|
||||
Note: This is the latest version of the EME-32 draft that I could find. It
|
||||
includes test vectors and C source code.
|
||||
|
||||
**[eme-32-testvec]** *Re: Test vectors for LRW and EME*
|
||||
#### [eme-32-testvec]
|
||||
*Re: Test vectors for LRW and EME*
|
||||
Shai Halevi, 16 Nov 2004
|
||||
http://grouper.ieee.org/groups/1619/email/msg00218.html
|
||||
|
||||
**[emestar]** _EME*: extending EME to handle arbitrary-length
|
||||
messages with associated data_
|
||||
#### [emestar]
|
||||
*EME\*: extending EME to handle arbitrary-length messages with associated data*
|
||||
Shai Halevi, 27 May 2004
|
||||
https://eprint.iacr.org/2004/125.pdf
|
||||
|
||||
**[patabandon]** *Re: [P1619-2] Non-awareness patent statement made by UC Davis*
|
||||
#### [patabandon]
|
||||
*Re: [P1619-2] Non-awareness patent statement made by UC Davis*
|
||||
Mat Ball, 26 Nov 2007
|
||||
http://grouper.ieee.org/groups/1619/email-2/msg00005.html
|
||||
|
||||
**[patappl]** *Block cipher mode of operation for constructing a wide-blocksize block cipher from a conventional block cipher*
|
||||
#### [patappl]
|
||||
*Block cipher mode of operation for constructing a wide-blocksize block cipher from a conventional block cipher*
|
||||
US patent application US20040131182
|
||||
http://www.google.com/patents/US20040131182
|
||||
|
||||
**[p1619.2]** *IEEE P1619.2™/D9 Draft Standard for Wide-Block
|
||||
Encryption for Shared Storage Media*
|
||||
#### [p1619-2]
|
||||
*IEEE P1619.2™/D9 Draft Standard for Wide-Block Encryption for Shared Storage Media*
|
||||
IEEE, Dec 2008
|
||||
http://siswg.net/index2.php?option=com_docman&task=doc_view&gid=156&Itemid=41
|
||||
|
||||
Note: This is a draft version. The final version is not freely available
|
||||
and must be bought from IEEE.
|
||||
|
||||
@ -89,7 +98,7 @@ Package Changelog
|
||||
-----------------
|
||||
|
||||
v1.1, 2017-03-05
|
||||
* Add eme.New() / *EMECipher convenience wrapper
|
||||
* Add eme.New() / \*EMECipher convenience wrapper
|
||||
* Improve panic message and parameter wording
|
||||
|
||||
v1.0, 2015-12-08
|
||||
|
5
vendor/github.com/rfjakob/eme/benchmark.bash
generated
vendored
5
vendor/github.com/rfjakob/eme/benchmark.bash
generated
vendored
@ -1,6 +1,3 @@
|
||||
#!/bin/bash
|
||||
#!/bin/bash -eu
|
||||
|
||||
set -eu
|
||||
|
||||
go build
|
||||
go test -bench=.
|
||||
|
2
vendor/github.com/rfjakob/eme/eme.go
generated
vendored
2
vendor/github.com/rfjakob/eme/eme.go
generated
vendored
@ -109,7 +109,7 @@ func Transform(bc cipher.Block, tweak []byte, inputData []byte, direction direct
|
||||
// make following the paper easy.
|
||||
T := tweak
|
||||
// In the paper, the plaintext data is called "P" and the ciphertext is
|
||||
// called "C". Because encryption and decryption are virtually indentical,
|
||||
// called "C". Because encryption and decryption are virtually identical,
|
||||
// we share the code and always call the input data "P" and the output data
|
||||
// "C", regardless of the direction.
|
||||
P := inputData
|
||||
|
5
vendor/github.com/rfjakob/eme/test.bash
generated
vendored
Normal file
5
vendor/github.com/rfjakob/eme/test.bash
generated
vendored
Normal file
@ -0,0 +1,5 @@
|
||||
#!/bin/bash -eu
|
||||
|
||||
go build
|
||||
go test . "$@"
|
||||
go tool vet -all -shadow .
|
104
vendor/github.com/spf13/pflag/bytes.go
generated
vendored
104
vendor/github.com/spf13/pflag/bytes.go
generated
vendored
@ -1,6 +1,7 @@
|
||||
package pflag
|
||||
|
||||
import (
|
||||
"encoding/base64"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"strings"
|
||||
@ -9,10 +10,12 @@ import (
|
||||
// BytesHex adapts []byte for use as a flag. Value of flag is HEX encoded
|
||||
type bytesHexValue []byte
|
||||
|
||||
// String implements pflag.Value.String.
|
||||
func (bytesHex bytesHexValue) String() string {
|
||||
return fmt.Sprintf("%X", []byte(bytesHex))
|
||||
}
|
||||
|
||||
// Set implements pflag.Value.Set.
|
||||
func (bytesHex *bytesHexValue) Set(value string) error {
|
||||
bin, err := hex.DecodeString(strings.TrimSpace(value))
|
||||
|
||||
@ -25,6 +28,7 @@ func (bytesHex *bytesHexValue) Set(value string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Type implements pflag.Value.Type.
|
||||
func (*bytesHexValue) Type() string {
|
||||
return "bytesHex"
|
||||
}
|
||||
@ -103,3 +107,103 @@ func BytesHex(name string, value []byte, usage string) *[]byte {
|
||||
func BytesHexP(name, shorthand string, value []byte, usage string) *[]byte {
|
||||
return CommandLine.BytesHexP(name, shorthand, value, usage)
|
||||
}
|
||||
|
||||
// BytesBase64 adapts []byte for use as a flag. Value of flag is Base64 encoded
|
||||
type bytesBase64Value []byte
|
||||
|
||||
// String implements pflag.Value.String.
|
||||
func (bytesBase64 bytesBase64Value) String() string {
|
||||
return base64.StdEncoding.EncodeToString([]byte(bytesBase64))
|
||||
}
|
||||
|
||||
// Set implements pflag.Value.Set.
|
||||
func (bytesBase64 *bytesBase64Value) Set(value string) error {
|
||||
bin, err := base64.StdEncoding.DecodeString(strings.TrimSpace(value))
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
*bytesBase64 = bin
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Type implements pflag.Value.Type.
|
||||
func (*bytesBase64Value) Type() string {
|
||||
return "bytesBase64"
|
||||
}
|
||||
|
||||
func newBytesBase64Value(val []byte, p *[]byte) *bytesBase64Value {
|
||||
*p = val
|
||||
return (*bytesBase64Value)(p)
|
||||
}
|
||||
|
||||
func bytesBase64ValueConv(sval string) (interface{}, error) {
|
||||
|
||||
bin, err := base64.StdEncoding.DecodeString(sval)
|
||||
if err == nil {
|
||||
return bin, nil
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("invalid string being converted to Bytes: %s %s", sval, err)
|
||||
}
|
||||
|
||||
// GetBytesBase64 return the []byte value of a flag with the given name
|
||||
func (f *FlagSet) GetBytesBase64(name string) ([]byte, error) {
|
||||
val, err := f.getFlagType(name, "bytesBase64", bytesBase64ValueConv)
|
||||
|
||||
if err != nil {
|
||||
return []byte{}, err
|
||||
}
|
||||
|
||||
return val.([]byte), nil
|
||||
}
|
||||
|
||||
// BytesBase64Var defines an []byte flag with specified name, default value, and usage string.
|
||||
// The argument p points to an []byte variable in which to store the value of the flag.
|
||||
func (f *FlagSet) BytesBase64Var(p *[]byte, name string, value []byte, usage string) {
|
||||
f.VarP(newBytesBase64Value(value, p), name, "", usage)
|
||||
}
|
||||
|
||||
// BytesBase64VarP is like BytesBase64Var, but accepts a shorthand letter that can be used after a single dash.
|
||||
func (f *FlagSet) BytesBase64VarP(p *[]byte, name, shorthand string, value []byte, usage string) {
|
||||
f.VarP(newBytesBase64Value(value, p), name, shorthand, usage)
|
||||
}
|
||||
|
||||
// BytesBase64Var defines an []byte flag with specified name, default value, and usage string.
|
||||
// The argument p points to an []byte variable in which to store the value of the flag.
|
||||
func BytesBase64Var(p *[]byte, name string, value []byte, usage string) {
|
||||
CommandLine.VarP(newBytesBase64Value(value, p), name, "", usage)
|
||||
}
|
||||
|
||||
// BytesBase64VarP is like BytesBase64Var, but accepts a shorthand letter that can be used after a single dash.
|
||||
func BytesBase64VarP(p *[]byte, name, shorthand string, value []byte, usage string) {
|
||||
CommandLine.VarP(newBytesBase64Value(value, p), name, shorthand, usage)
|
||||
}
|
||||
|
||||
// BytesBase64 defines an []byte flag with specified name, default value, and usage string.
|
||||
// The return value is the address of an []byte variable that stores the value of the flag.
|
||||
func (f *FlagSet) BytesBase64(name string, value []byte, usage string) *[]byte {
|
||||
p := new([]byte)
|
||||
f.BytesBase64VarP(p, name, "", value, usage)
|
||||
return p
|
||||
}
|
||||
|
||||
// BytesBase64P is like BytesBase64, but accepts a shorthand letter that can be used after a single dash.
|
||||
func (f *FlagSet) BytesBase64P(name, shorthand string, value []byte, usage string) *[]byte {
|
||||
p := new([]byte)
|
||||
f.BytesBase64VarP(p, name, shorthand, value, usage)
|
||||
return p
|
||||
}
|
||||
|
||||
// BytesBase64 defines an []byte flag with specified name, default value, and usage string.
|
||||
// The return value is the address of an []byte variable that stores the value of the flag.
|
||||
func BytesBase64(name string, value []byte, usage string) *[]byte {
|
||||
return CommandLine.BytesBase64P(name, "", value, usage)
|
||||
}
|
||||
|
||||
// BytesBase64P is like BytesBase64, but accepts a shorthand letter that can be used after a single dash.
|
||||
func BytesBase64P(name, shorthand string, value []byte, usage string) *[]byte {
|
||||
return CommandLine.BytesBase64P(name, shorthand, value, usage)
|
||||
}
|
||||
|
3
vendor/github.com/spf13/pflag/flag.go
generated
vendored
3
vendor/github.com/spf13/pflag/flag.go
generated
vendored
@ -990,11 +990,12 @@ func (f *FlagSet) parseLongArg(s string, args []string, fn parseFunc) (a []strin
|
||||
}
|
||||
|
||||
func (f *FlagSet) parseSingleShortArg(shorthands string, args []string, fn parseFunc) (outShorts string, outArgs []string, err error) {
|
||||
outArgs = args
|
||||
|
||||
if strings.HasPrefix(shorthands, "test.") {
|
||||
return
|
||||
}
|
||||
|
||||
outArgs = args
|
||||
outShorts = shorthands[1:]
|
||||
c := shorthands[0]
|
||||
|
||||
|
1
vendor/github.com/yunify/qingstor-sdk-go/.gitignore
generated
vendored
1
vendor/github.com/yunify/qingstor-sdk-go/.gitignore
generated
vendored
@ -25,6 +25,5 @@ _testmain.go
|
||||
|
||||
out
|
||||
gen
|
||||
vendor
|
||||
coverage
|
||||
release
|
||||
|
2
vendor/github.com/yunify/qingstor-sdk-go/Makefile
generated
vendored
2
vendor/github.com/yunify/qingstor-sdk-go/Makefile
generated
vendored
@ -98,7 +98,7 @@ test-race:
|
||||
.PHONY: integration-test
|
||||
integration-test:
|
||||
@echo "Run integration test"
|
||||
pushd "./test"; go run *.go; popd
|
||||
pushd "./test"; go test; popd
|
||||
@echo "Done"
|
||||
|
||||
.PHONY: release
|
||||
|
12
vendor/github.com/yunify/qingstor-sdk-go/glide.lock
generated
vendored
12
vendor/github.com/yunify/qingstor-sdk-go/glide.lock
generated
vendored
@ -1,11 +1,6 @@
|
||||
hash: 6e1d4e547cdd58944ef920192480d29edd118cac0e8269d793315066d1249b4e
|
||||
updated: 2018-05-10T11:43:20.637418+08:00
|
||||
updated: 2018-07-16T14:42:51.093308+08:00
|
||||
imports:
|
||||
- name: github.com/DATA-DOG/godog
|
||||
version: 272624afcc2fc5f818ce069e2e417e7b4fa8bb6c
|
||||
subpackages:
|
||||
- colors
|
||||
- gherkin
|
||||
- name: github.com/pengsrc/go-shared
|
||||
version: db9bcfc423552eef790d989bb81ef055d0d26286
|
||||
subpackages:
|
||||
@ -18,6 +13,11 @@ imports:
|
||||
version: 287cf08546ab5e7e37d55a84f7ed3fd1db036de5
|
||||
repo: https://github.com/go-yaml/yaml.git
|
||||
testImports:
|
||||
- name: github.com/DATA-DOG/godog
|
||||
version: 623ff9946e5c72834c19a019a153b8e3a338a52c
|
||||
subpackages:
|
||||
- colors
|
||||
- gherkin
|
||||
- name: github.com/davecgh/go-spew
|
||||
version: 6d212800a42e8ab5c146b8ace3490ee17e5225f9
|
||||
subpackages:
|
||||
|
12
vendor/github.com/yunify/qingstor-sdk-go/request/builder/qingstor.go
generated
vendored
12
vendor/github.com/yunify/qingstor-sdk-go/request/builder/qingstor.go
generated
vendored
@ -22,10 +22,8 @@ import (
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"mime"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"path"
|
||||
"reflect"
|
||||
"regexp"
|
||||
"runtime"
|
||||
@ -134,16 +132,6 @@ func (qb *QingStorBuilder) parseURL() error {
|
||||
}
|
||||
|
||||
func (qb *QingStorBuilder) setupHeaders(httpRequest *http.Request) error {
|
||||
method := httpRequest.Method
|
||||
if method == "POST" || method == "PUT" || method == "DELETE" {
|
||||
if httpRequest.Header.Get("Content-Type") == "" {
|
||||
mimeType := mime.TypeByExtension(path.Ext(httpRequest.URL.Path))
|
||||
if mimeType != "" {
|
||||
httpRequest.Header.Set("Content-Type", mimeType)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if httpRequest.Header.Get("User-Agent") == "" {
|
||||
version := fmt.Sprintf(`Go v%s`, strings.Replace(runtime.Version(), "go", "", -1))
|
||||
system := fmt.Sprintf(`%s_%s_%s`, runtime.GOOS, runtime.GOARCH, runtime.Compiler)
|
||||
|
382
vendor/github.com/yunify/qingstor-sdk-go/service/bucket.go
generated
vendored
382
vendor/github.com/yunify/qingstor-sdk-go/service/bucket.go
generated
vendored
@ -209,6 +209,110 @@ type DeleteBucketExternalMirrorOutput struct {
|
||||
RequestID *string `location:"requestID"`
|
||||
}
|
||||
|
||||
// DeleteLifecycle does Delete Lifecycle information of the bucket.
|
||||
// Documentation URL: https://docs.qingcloud.com/qingstor/api/bucket/lifecycle/delete_lifecycle.html
|
||||
func (s *Bucket) DeleteLifecycle() (*DeleteBucketLifecycleOutput, error) {
|
||||
r, x, err := s.DeleteLifecycleRequest()
|
||||
|
||||
if err != nil {
|
||||
return x, err
|
||||
}
|
||||
|
||||
err = r.Send()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
requestID := r.HTTPResponse.Header.Get(http.CanonicalHeaderKey("X-QS-Request-ID"))
|
||||
x.RequestID = &requestID
|
||||
|
||||
return x, err
|
||||
}
|
||||
|
||||
// DeleteLifecycleRequest creates request and output object of DeleteBucketLifecycle.
|
||||
func (s *Bucket) DeleteLifecycleRequest() (*request.Request, *DeleteBucketLifecycleOutput, error) {
|
||||
|
||||
properties := *s.Properties
|
||||
|
||||
o := &data.Operation{
|
||||
Config: s.Config,
|
||||
Properties: &properties,
|
||||
APIName: "DELETE Bucket Lifecycle",
|
||||
RequestMethod: "DELETE",
|
||||
RequestURI: "/<bucket-name>?lifecycle",
|
||||
StatusCodes: []int{
|
||||
204, // Lifecycle deleted
|
||||
},
|
||||
}
|
||||
|
||||
x := &DeleteBucketLifecycleOutput{}
|
||||
r, err := request.New(o, nil, x)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
return r, x, nil
|
||||
}
|
||||
|
||||
// DeleteBucketLifecycleOutput presents output for DeleteBucketLifecycle.
|
||||
type DeleteBucketLifecycleOutput struct {
|
||||
StatusCode *int `location:"statusCode"`
|
||||
|
||||
RequestID *string `location:"requestID"`
|
||||
}
|
||||
|
||||
// DeleteNotification does Delete Notification information of the bucket.
|
||||
// Documentation URL: https://docs.qingcloud.com/qingstor/api/bucket/notification/delete_notification.html
|
||||
func (s *Bucket) DeleteNotification() (*DeleteBucketNotificationOutput, error) {
|
||||
r, x, err := s.DeleteNotificationRequest()
|
||||
|
||||
if err != nil {
|
||||
return x, err
|
||||
}
|
||||
|
||||
err = r.Send()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
requestID := r.HTTPResponse.Header.Get(http.CanonicalHeaderKey("X-QS-Request-ID"))
|
||||
x.RequestID = &requestID
|
||||
|
||||
return x, err
|
||||
}
|
||||
|
||||
// DeleteNotificationRequest creates request and output object of DeleteBucketNotification.
|
||||
func (s *Bucket) DeleteNotificationRequest() (*request.Request, *DeleteBucketNotificationOutput, error) {
|
||||
|
||||
properties := *s.Properties
|
||||
|
||||
o := &data.Operation{
|
||||
Config: s.Config,
|
||||
Properties: &properties,
|
||||
APIName: "DELETE Bucket Notification",
|
||||
RequestMethod: "DELETE",
|
||||
RequestURI: "/<bucket-name>?notification",
|
||||
StatusCodes: []int{
|
||||
204, // notification deleted
|
||||
},
|
||||
}
|
||||
|
||||
x := &DeleteBucketNotificationOutput{}
|
||||
r, err := request.New(o, nil, x)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
return r, x, nil
|
||||
}
|
||||
|
||||
// DeleteBucketNotificationOutput presents output for DeleteBucketNotification.
|
||||
type DeleteBucketNotificationOutput struct {
|
||||
StatusCode *int `location:"statusCode"`
|
||||
|
||||
RequestID *string `location:"requestID"`
|
||||
}
|
||||
|
||||
// DeletePolicy does Delete policy information of the bucket.
|
||||
// Documentation URL: https://docs.qingcloud.com/qingstor/api/bucket/policy/delete_policy.html
|
||||
func (s *Bucket) DeletePolicy() (*DeleteBucketPolicyOutput, error) {
|
||||
@ -519,6 +623,116 @@ type GetBucketExternalMirrorOutput struct {
|
||||
SourceSite *string `json:"source_site,omitempty" name:"source_site" location:"elements"`
|
||||
}
|
||||
|
||||
// GetLifecycle does Get Lifecycle information of the bucket.
|
||||
// Documentation URL: https://docs.qingcloud.com/qingstor/api/bucket/lifecycle/get_lifecycle.html
|
||||
func (s *Bucket) GetLifecycle() (*GetBucketLifecycleOutput, error) {
|
||||
r, x, err := s.GetLifecycleRequest()
|
||||
|
||||
if err != nil {
|
||||
return x, err
|
||||
}
|
||||
|
||||
err = r.Send()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
requestID := r.HTTPResponse.Header.Get(http.CanonicalHeaderKey("X-QS-Request-ID"))
|
||||
x.RequestID = &requestID
|
||||
|
||||
return x, err
|
||||
}
|
||||
|
||||
// GetLifecycleRequest creates request and output object of GetBucketLifecycle.
|
||||
func (s *Bucket) GetLifecycleRequest() (*request.Request, *GetBucketLifecycleOutput, error) {
|
||||
|
||||
properties := *s.Properties
|
||||
|
||||
o := &data.Operation{
|
||||
Config: s.Config,
|
||||
Properties: &properties,
|
||||
APIName: "GET Bucket Lifecycle",
|
||||
RequestMethod: "GET",
|
||||
RequestURI: "/<bucket-name>?lifecycle",
|
||||
StatusCodes: []int{
|
||||
200, // OK
|
||||
},
|
||||
}
|
||||
|
||||
x := &GetBucketLifecycleOutput{}
|
||||
r, err := request.New(o, nil, x)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
return r, x, nil
|
||||
}
|
||||
|
||||
// GetBucketLifecycleOutput presents output for GetBucketLifecycle.
|
||||
type GetBucketLifecycleOutput struct {
|
||||
StatusCode *int `location:"statusCode"`
|
||||
|
||||
RequestID *string `location:"requestID"`
|
||||
|
||||
// Bucket Lifecycle rule
|
||||
Rule []*RuleType `json:"rule,omitempty" name:"rule" location:"elements"`
|
||||
}
|
||||
|
||||
// GetNotification does Get Notification information of the bucket.
|
||||
// Documentation URL: https://docs.qingcloud.com/qingstor/api/bucket/notification/get_notification.html
|
||||
func (s *Bucket) GetNotification() (*GetBucketNotificationOutput, error) {
|
||||
r, x, err := s.GetNotificationRequest()
|
||||
|
||||
if err != nil {
|
||||
return x, err
|
||||
}
|
||||
|
||||
err = r.Send()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
requestID := r.HTTPResponse.Header.Get(http.CanonicalHeaderKey("X-QS-Request-ID"))
|
||||
x.RequestID = &requestID
|
||||
|
||||
return x, err
|
||||
}
|
||||
|
||||
// GetNotificationRequest creates request and output object of GetBucketNotification.
|
||||
func (s *Bucket) GetNotificationRequest() (*request.Request, *GetBucketNotificationOutput, error) {
|
||||
|
||||
properties := *s.Properties
|
||||
|
||||
o := &data.Operation{
|
||||
Config: s.Config,
|
||||
Properties: &properties,
|
||||
APIName: "GET Bucket Notification",
|
||||
RequestMethod: "GET",
|
||||
RequestURI: "/<bucket-name>?notification",
|
||||
StatusCodes: []int{
|
||||
200, // OK
|
||||
},
|
||||
}
|
||||
|
||||
x := &GetBucketNotificationOutput{}
|
||||
r, err := request.New(o, nil, x)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
return r, x, nil
|
||||
}
|
||||
|
||||
// GetBucketNotificationOutput presents output for GetBucketNotification.
|
||||
type GetBucketNotificationOutput struct {
|
||||
StatusCode *int `location:"statusCode"`
|
||||
|
||||
RequestID *string `location:"requestID"`
|
||||
|
||||
// Bucket Notification
|
||||
Notifications []*NotificationType `json:"notifications,omitempty" name:"notifications" location:"elements"`
|
||||
}
|
||||
|
||||
// GetPolicy does Get policy information of the bucket.
|
||||
// Documentation URL: https://https://docs.qingcloud.com/qingstor/api/bucket/policy/get_policy.html
|
||||
func (s *Bucket) GetPolicy() (*GetBucketPolicyOutput, error) {
|
||||
@ -1178,6 +1392,174 @@ type PutBucketExternalMirrorOutput struct {
|
||||
RequestID *string `location:"requestID"`
|
||||
}
|
||||
|
||||
// PutLifecycle does Set Lifecycle information of the bucket.
|
||||
// Documentation URL: https://docs.qingcloud.com/qingstor/api/bucket/lifecycle/put_lifecycle.html
|
||||
func (s *Bucket) PutLifecycle(input *PutBucketLifecycleInput) (*PutBucketLifecycleOutput, error) {
|
||||
r, x, err := s.PutLifecycleRequest(input)
|
||||
|
||||
if err != nil {
|
||||
return x, err
|
||||
}
|
||||
|
||||
err = r.Send()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
requestID := r.HTTPResponse.Header.Get(http.CanonicalHeaderKey("X-QS-Request-ID"))
|
||||
x.RequestID = &requestID
|
||||
|
||||
return x, err
|
||||
}
|
||||
|
||||
// PutLifecycleRequest creates request and output object of PutBucketLifecycle.
|
||||
func (s *Bucket) PutLifecycleRequest(input *PutBucketLifecycleInput) (*request.Request, *PutBucketLifecycleOutput, error) {
|
||||
|
||||
if input == nil {
|
||||
input = &PutBucketLifecycleInput{}
|
||||
}
|
||||
|
||||
properties := *s.Properties
|
||||
|
||||
o := &data.Operation{
|
||||
Config: s.Config,
|
||||
Properties: &properties,
|
||||
APIName: "PUT Bucket Lifecycle",
|
||||
RequestMethod: "PUT",
|
||||
RequestURI: "/<bucket-name>?lifecycle",
|
||||
StatusCodes: []int{
|
||||
200, // OK
|
||||
},
|
||||
}
|
||||
|
||||
x := &PutBucketLifecycleOutput{}
|
||||
r, err := request.New(o, input, x)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
return r, x, nil
|
||||
}
|
||||
|
||||
// PutBucketLifecycleInput presents input for PutBucketLifecycle.
|
||||
type PutBucketLifecycleInput struct {
|
||||
// Bucket Lifecycle rule
|
||||
Rule []*RuleType `json:"rule" name:"rule" location:"elements"` // Required
|
||||
|
||||
}
|
||||
|
||||
// Validate validates the input for PutBucketLifecycle.
|
||||
func (v *PutBucketLifecycleInput) Validate() error {
|
||||
|
||||
if len(v.Rule) == 0 {
|
||||
return errors.ParameterRequiredError{
|
||||
ParameterName: "Rule",
|
||||
ParentName: "PutBucketLifecycleInput",
|
||||
}
|
||||
}
|
||||
|
||||
if len(v.Rule) > 0 {
|
||||
for _, property := range v.Rule {
|
||||
if err := property.Validate(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// PutBucketLifecycleOutput presents output for PutBucketLifecycle.
|
||||
type PutBucketLifecycleOutput struct {
|
||||
StatusCode *int `location:"statusCode"`
|
||||
|
||||
RequestID *string `location:"requestID"`
|
||||
}
|
||||
|
||||
// PutNotification does Set Notification information of the bucket.
|
||||
// Documentation URL: https://docs.qingcloud.com/qingstor/api/bucket/notification/put_notification.html
|
||||
func (s *Bucket) PutNotification(input *PutBucketNotificationInput) (*PutBucketNotificationOutput, error) {
|
||||
r, x, err := s.PutNotificationRequest(input)
|
||||
|
||||
if err != nil {
|
||||
return x, err
|
||||
}
|
||||
|
||||
err = r.Send()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
requestID := r.HTTPResponse.Header.Get(http.CanonicalHeaderKey("X-QS-Request-ID"))
|
||||
x.RequestID = &requestID
|
||||
|
||||
return x, err
|
||||
}
|
||||
|
||||
// PutNotificationRequest creates request and output object of PutBucketNotification.
|
||||
func (s *Bucket) PutNotificationRequest(input *PutBucketNotificationInput) (*request.Request, *PutBucketNotificationOutput, error) {
|
||||
|
||||
if input == nil {
|
||||
input = &PutBucketNotificationInput{}
|
||||
}
|
||||
|
||||
properties := *s.Properties
|
||||
|
||||
o := &data.Operation{
|
||||
Config: s.Config,
|
||||
Properties: &properties,
|
||||
APIName: "PUT Bucket Notification",
|
||||
RequestMethod: "PUT",
|
||||
RequestURI: "/<bucket-name>?notification",
|
||||
StatusCodes: []int{
|
||||
200, // OK
|
||||
},
|
||||
}
|
||||
|
||||
x := &PutBucketNotificationOutput{}
|
||||
r, err := request.New(o, input, x)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
return r, x, nil
|
||||
}
|
||||
|
||||
// PutBucketNotificationInput presents input for PutBucketNotification.
|
||||
type PutBucketNotificationInput struct {
|
||||
// Bucket Notification
|
||||
Notifications []*NotificationType `json:"notifications" name:"notifications" location:"elements"` // Required
|
||||
|
||||
}
|
||||
|
||||
// Validate validates the input for PutBucketNotification.
|
||||
func (v *PutBucketNotificationInput) Validate() error {
|
||||
|
||||
if len(v.Notifications) == 0 {
|
||||
return errors.ParameterRequiredError{
|
||||
ParameterName: "Notifications",
|
||||
ParentName: "PutBucketNotificationInput",
|
||||
}
|
||||
}
|
||||
|
||||
if len(v.Notifications) > 0 {
|
||||
for _, property := range v.Notifications {
|
||||
if err := property.Validate(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// PutBucketNotificationOutput presents output for PutBucketNotification.
|
||||
type PutBucketNotificationOutput struct {
|
||||
StatusCode *int `location:"statusCode"`
|
||||
|
||||
RequestID *string `location:"requestID"`
|
||||
}
|
||||
|
||||
// PutPolicy does Set policy information of the bucket.
|
||||
// Documentation URL: https://docs.qingcloud.com/qingstor/api/bucket/policy/put_policy.html
|
||||
func (s *Bucket) PutPolicy(input *PutBucketPolicyInput) (*PutBucketPolicyOutput, error) {
|
||||
|
248
vendor/github.com/yunify/qingstor-sdk-go/service/types.go
generated
vendored
248
vendor/github.com/yunify/qingstor-sdk-go/service/types.go
generated
vendored
@ -33,6 +33,26 @@ type Properties struct {
|
||||
Zone *string `json:"zone" name:"zone"`
|
||||
}
|
||||
|
||||
// AbortIncompleteMultipartUploadType presents AbortIncompleteMultipartUpload.
|
||||
type AbortIncompleteMultipartUploadType struct {
|
||||
// days after initiation
|
||||
DaysAfterInitiation *int `json:"days_after_initiation" name:"days_after_initiation"` // Required
|
||||
|
||||
}
|
||||
|
||||
// Validate validates the AbortIncompleteMultipartUpload.
|
||||
func (v *AbortIncompleteMultipartUploadType) Validate() error {
|
||||
|
||||
if v.DaysAfterInitiation == nil {
|
||||
return errors.ParameterRequiredError{
|
||||
ParameterName: "DaysAfterInitiation",
|
||||
ParentName: "AbortIncompleteMultipartUpload",
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// ACLType presents ACL.
|
||||
type ACLType struct {
|
||||
Grantee *GranteeType `json:"grantee" name:"grantee"` // Required
|
||||
@ -106,6 +126,27 @@ func (v *BucketType) Validate() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// CloudfuncArgsType presents CloudfuncArgs.
|
||||
type CloudfuncArgsType struct {
|
||||
Action *string `json:"action" name:"action"` // Required
|
||||
KeyPrefix *string `json:"key_prefix,omitempty" name:"key_prefix" default:"gen"`
|
||||
KeySeprate *string `json:"key_seprate,omitempty" name:"key_seprate" default:"_"`
|
||||
SaveBucket *string `json:"save_bucket,omitempty" name:"save_bucket"`
|
||||
}
|
||||
|
||||
// Validate validates the CloudfuncArgs.
|
||||
func (v *CloudfuncArgsType) Validate() error {
|
||||
|
||||
if v.Action == nil {
|
||||
return errors.ParameterRequiredError{
|
||||
ParameterName: "Action",
|
||||
ParentName: "CloudfuncArgs",
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// ConditionType presents Condition.
|
||||
type ConditionType struct {
|
||||
IPAddress *IPAddressType `json:"ip_address,omitempty" name:"ip_address"`
|
||||
@ -185,6 +226,38 @@ func (v *CORSRuleType) Validate() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// ExpirationType presents Expiration.
|
||||
type ExpirationType struct {
|
||||
// days
|
||||
Days *int `json:"days,omitempty" name:"days"`
|
||||
}
|
||||
|
||||
// Validate validates the Expiration.
|
||||
func (v *ExpirationType) Validate() error {
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// FilterType presents Filter.
|
||||
type FilterType struct {
|
||||
// Prefix matching
|
||||
Prefix *string `json:"prefix" name:"prefix"` // Required
|
||||
|
||||
}
|
||||
|
||||
// Validate validates the Filter.
|
||||
func (v *FilterType) Validate() error {
|
||||
|
||||
if v.Prefix == nil {
|
||||
return errors.ParameterRequiredError{
|
||||
ParameterName: "Prefix",
|
||||
ParentName: "Filter",
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// GranteeType presents Grantee.
|
||||
type GranteeType struct {
|
||||
// Grantee user ID
|
||||
@ -306,6 +379,75 @@ func (v *NotIPAddressType) Validate() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// NotificationType presents Notification.
|
||||
type NotificationType struct {
|
||||
// Event processing service
|
||||
// Cloudfunc's available values: tupu-porn, notifier, image
|
||||
Cloudfunc *string `json:"cloudfunc" name:"cloudfunc"` // Required
|
||||
CloudfuncArgs *CloudfuncArgsType `json:"cloudfunc_args,omitempty" name:"cloudfunc_args"`
|
||||
// event types
|
||||
EventTypes []*string `json:"event_types" name:"event_types"` // Required
|
||||
// notification id
|
||||
ID *string `json:"id" name:"id"` // Required
|
||||
// notify url
|
||||
NotifyURL *string `json:"notify_url,omitempty" name:"notify_url"`
|
||||
// Object name matching rule
|
||||
ObjectFilters []*string `json:"object_filters,omitempty" name:"object_filters"`
|
||||
}
|
||||
|
||||
// Validate validates the Notification.
|
||||
func (v *NotificationType) Validate() error {
|
||||
|
||||
if v.Cloudfunc == nil {
|
||||
return errors.ParameterRequiredError{
|
||||
ParameterName: "Cloudfunc",
|
||||
ParentName: "Notification",
|
||||
}
|
||||
}
|
||||
|
||||
if v.Cloudfunc != nil {
|
||||
cloudfuncValidValues := []string{"tupu-porn", "notifier", "image"}
|
||||
cloudfuncParameterValue := fmt.Sprint(*v.Cloudfunc)
|
||||
|
||||
cloudfuncIsValid := false
|
||||
for _, value := range cloudfuncValidValues {
|
||||
if value == cloudfuncParameterValue {
|
||||
cloudfuncIsValid = true
|
||||
}
|
||||
}
|
||||
|
||||
if !cloudfuncIsValid {
|
||||
return errors.ParameterValueNotAllowedError{
|
||||
ParameterName: "Cloudfunc",
|
||||
ParameterValue: cloudfuncParameterValue,
|
||||
AllowedValues: cloudfuncValidValues,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if v.CloudfuncArgs != nil {
|
||||
if err := v.CloudfuncArgs.Validate(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if len(v.EventTypes) == 0 {
|
||||
return errors.ParameterRequiredError{
|
||||
ParameterName: "EventTypes",
|
||||
ParentName: "Notification",
|
||||
}
|
||||
}
|
||||
|
||||
if v.ID == nil {
|
||||
return errors.ParameterRequiredError{
|
||||
ParameterName: "ID",
|
||||
ParentName: "Notification",
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// ObjectPartType presents ObjectPart.
|
||||
type ObjectPartType struct {
|
||||
// Object part created time
|
||||
@ -345,6 +487,90 @@ func (v *OwnerType) Validate() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// RuleType presents Rule.
|
||||
type RuleType struct {
|
||||
AbortIncompleteMultipartUpload *AbortIncompleteMultipartUploadType `json:"abort_incomplete_multipart_upload,omitempty" name:"abort_incomplete_multipart_upload"`
|
||||
Expiration *ExpirationType `json:"expiration,omitempty" name:"expiration"`
|
||||
Filter *FilterType `json:"filter" name:"filter"` // Required
|
||||
// rule id
|
||||
ID *string `json:"id" name:"id"` // Required
|
||||
// rule status
|
||||
// Status's available values: enabled, disabled
|
||||
Status *string `json:"status" name:"status"` // Required
|
||||
Transition *TransitionType `json:"transition,omitempty" name:"transition"`
|
||||
}
|
||||
|
||||
// Validate validates the Rule.
|
||||
func (v *RuleType) Validate() error {
|
||||
|
||||
if v.AbortIncompleteMultipartUpload != nil {
|
||||
if err := v.AbortIncompleteMultipartUpload.Validate(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if v.Expiration != nil {
|
||||
if err := v.Expiration.Validate(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if v.Filter != nil {
|
||||
if err := v.Filter.Validate(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if v.Filter == nil {
|
||||
return errors.ParameterRequiredError{
|
||||
ParameterName: "Filter",
|
||||
ParentName: "Rule",
|
||||
}
|
||||
}
|
||||
|
||||
if v.ID == nil {
|
||||
return errors.ParameterRequiredError{
|
||||
ParameterName: "ID",
|
||||
ParentName: "Rule",
|
||||
}
|
||||
}
|
||||
|
||||
if v.Status == nil {
|
||||
return errors.ParameterRequiredError{
|
||||
ParameterName: "Status",
|
||||
ParentName: "Rule",
|
||||
}
|
||||
}
|
||||
|
||||
if v.Status != nil {
|
||||
statusValidValues := []string{"enabled", "disabled"}
|
||||
statusParameterValue := fmt.Sprint(*v.Status)
|
||||
|
||||
statusIsValid := false
|
||||
for _, value := range statusValidValues {
|
||||
if value == statusParameterValue {
|
||||
statusIsValid = true
|
||||
}
|
||||
}
|
||||
|
||||
if !statusIsValid {
|
||||
return errors.ParameterValueNotAllowedError{
|
||||
ParameterName: "Status",
|
||||
ParameterValue: statusParameterValue,
|
||||
AllowedValues: statusValidValues,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if v.Transition != nil {
|
||||
if err := v.Transition.Validate(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// StatementType presents Statement.
|
||||
type StatementType struct {
|
||||
// QingStor API methods
|
||||
@ -446,6 +672,28 @@ func (v *StringNotLikeType) Validate() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// TransitionType presents Transition.
|
||||
type TransitionType struct {
|
||||
// days
|
||||
Days *int `json:"days,omitempty" name:"days"`
|
||||
// storage class
|
||||
StorageClass *int `json:"storage_class" name:"storage_class"` // Required
|
||||
|
||||
}
|
||||
|
||||
// Validate validates the Transition.
|
||||
func (v *TransitionType) Validate() error {
|
||||
|
||||
if v.StorageClass == nil {
|
||||
return errors.ParameterRequiredError{
|
||||
ParameterName: "StorageClass",
|
||||
ParentName: "Transition",
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// UploadsType presents Uploads.
|
||||
type UploadsType struct {
|
||||
// Object part created time
|
||||
|
2
vendor/github.com/yunify/qingstor-sdk-go/version.go
generated
vendored
2
vendor/github.com/yunify/qingstor-sdk-go/version.go
generated
vendored
@ -20,4 +20,4 @@
|
||||
package sdk
|
||||
|
||||
// Version number.
|
||||
const Version = "2.2.14"
|
||||
const Version = "2.2.15"
|
||||
|
110
vendor/golang.org/x/crypto/internal/chacha20/chacha_generic.go
generated
vendored
110
vendor/golang.org/x/crypto/internal/chacha20/chacha_generic.go
generated
vendored
@ -32,6 +32,30 @@ func New(key [8]uint32, nonce [3]uint32) *Cipher {
|
||||
return &Cipher{key: key, nonce: nonce}
|
||||
}
|
||||
|
||||
// ChaCha20 constants spelling "expand 32-byte k"
|
||||
const (
|
||||
j0 uint32 = 0x61707865
|
||||
j1 uint32 = 0x3320646e
|
||||
j2 uint32 = 0x79622d32
|
||||
j3 uint32 = 0x6b206574
|
||||
)
|
||||
|
||||
func quarterRound(a, b, c, d uint32) (uint32, uint32, uint32, uint32) {
|
||||
a += b
|
||||
d ^= a
|
||||
d = (d << 16) | (d >> 16)
|
||||
c += d
|
||||
b ^= c
|
||||
b = (b << 12) | (b >> 20)
|
||||
a += b
|
||||
d ^= a
|
||||
d = (d << 8) | (d >> 24)
|
||||
c += d
|
||||
b ^= c
|
||||
b = (b << 7) | (b >> 25)
|
||||
return a, b, c, d
|
||||
}
|
||||
|
||||
// XORKeyStream XORs each byte in the given slice with a byte from the
|
||||
// cipher's key stream. Dst and src must overlap entirely or not at all.
|
||||
//
|
||||
@ -73,6 +97,9 @@ func (s *Cipher) XORKeyStream(dst, src []byte) {
|
||||
return
|
||||
}
|
||||
if haveAsm {
|
||||
if uint64(len(src))+uint64(s.counter)*64 > (1<<38)-64 {
|
||||
panic("chacha20: counter overflow")
|
||||
}
|
||||
s.xorKeyStreamAsm(dst, src)
|
||||
return
|
||||
}
|
||||
@ -85,59 +112,34 @@ func (s *Cipher) XORKeyStream(dst, src []byte) {
|
||||
copy(s.buf[len(s.buf)-64:], src[fin:])
|
||||
}
|
||||
|
||||
// qr calculates a quarter round
|
||||
qr := func(a, b, c, d uint32) (uint32, uint32, uint32, uint32) {
|
||||
a += b
|
||||
d ^= a
|
||||
d = (d << 16) | (d >> 16)
|
||||
c += d
|
||||
b ^= c
|
||||
b = (b << 12) | (b >> 20)
|
||||
a += b
|
||||
d ^= a
|
||||
d = (d << 8) | (d >> 24)
|
||||
c += d
|
||||
b ^= c
|
||||
b = (b << 7) | (b >> 25)
|
||||
return a, b, c, d
|
||||
}
|
||||
|
||||
// ChaCha20 constants
|
||||
const (
|
||||
j0 = 0x61707865
|
||||
j1 = 0x3320646e
|
||||
j2 = 0x79622d32
|
||||
j3 = 0x6b206574
|
||||
)
|
||||
|
||||
// pre-calculate most of the first round
|
||||
s1, s5, s9, s13 := qr(j1, s.key[1], s.key[5], s.nonce[0])
|
||||
s2, s6, s10, s14 := qr(j2, s.key[2], s.key[6], s.nonce[1])
|
||||
s3, s7, s11, s15 := qr(j3, s.key[3], s.key[7], s.nonce[2])
|
||||
s1, s5, s9, s13 := quarterRound(j1, s.key[1], s.key[5], s.nonce[0])
|
||||
s2, s6, s10, s14 := quarterRound(j2, s.key[2], s.key[6], s.nonce[1])
|
||||
s3, s7, s11, s15 := quarterRound(j3, s.key[3], s.key[7], s.nonce[2])
|
||||
|
||||
n := len(src)
|
||||
src, dst = src[:n:n], dst[:n:n] // BCE hint
|
||||
for i := 0; i < n; i += 64 {
|
||||
// calculate the remainder of the first round
|
||||
s0, s4, s8, s12 := qr(j0, s.key[0], s.key[4], s.counter)
|
||||
s0, s4, s8, s12 := quarterRound(j0, s.key[0], s.key[4], s.counter)
|
||||
|
||||
// execute the second round
|
||||
x0, x5, x10, x15 := qr(s0, s5, s10, s15)
|
||||
x1, x6, x11, x12 := qr(s1, s6, s11, s12)
|
||||
x2, x7, x8, x13 := qr(s2, s7, s8, s13)
|
||||
x3, x4, x9, x14 := qr(s3, s4, s9, s14)
|
||||
x0, x5, x10, x15 := quarterRound(s0, s5, s10, s15)
|
||||
x1, x6, x11, x12 := quarterRound(s1, s6, s11, s12)
|
||||
x2, x7, x8, x13 := quarterRound(s2, s7, s8, s13)
|
||||
x3, x4, x9, x14 := quarterRound(s3, s4, s9, s14)
|
||||
|
||||
// execute the remaining 18 rounds
|
||||
for i := 0; i < 9; i++ {
|
||||
x0, x4, x8, x12 = qr(x0, x4, x8, x12)
|
||||
x1, x5, x9, x13 = qr(x1, x5, x9, x13)
|
||||
x2, x6, x10, x14 = qr(x2, x6, x10, x14)
|
||||
x3, x7, x11, x15 = qr(x3, x7, x11, x15)
|
||||
x0, x4, x8, x12 = quarterRound(x0, x4, x8, x12)
|
||||
x1, x5, x9, x13 = quarterRound(x1, x5, x9, x13)
|
||||
x2, x6, x10, x14 = quarterRound(x2, x6, x10, x14)
|
||||
x3, x7, x11, x15 = quarterRound(x3, x7, x11, x15)
|
||||
|
||||
x0, x5, x10, x15 = qr(x0, x5, x10, x15)
|
||||
x1, x6, x11, x12 = qr(x1, x6, x11, x12)
|
||||
x2, x7, x8, x13 = qr(x2, x7, x8, x13)
|
||||
x3, x4, x9, x14 = qr(x3, x4, x9, x14)
|
||||
x0, x5, x10, x15 = quarterRound(x0, x5, x10, x15)
|
||||
x1, x6, x11, x12 = quarterRound(x1, x6, x11, x12)
|
||||
x2, x7, x8, x13 = quarterRound(x2, x7, x8, x13)
|
||||
x3, x4, x9, x14 = quarterRound(x3, x4, x9, x14)
|
||||
}
|
||||
|
||||
x0 += j0
|
||||
@ -234,3 +236,29 @@ func XORKeyStream(out, in []byte, counter *[16]byte, key *[32]byte) {
|
||||
}
|
||||
s.XORKeyStream(out, in)
|
||||
}
|
||||
|
||||
// HChaCha20 uses the ChaCha20 core to generate a derived key from a key and a
|
||||
// nonce. It should only be used as part of the XChaCha20 construction.
|
||||
func HChaCha20(key *[8]uint32, nonce *[4]uint32) [8]uint32 {
|
||||
x0, x1, x2, x3 := j0, j1, j2, j3
|
||||
x4, x5, x6, x7 := key[0], key[1], key[2], key[3]
|
||||
x8, x9, x10, x11 := key[4], key[5], key[6], key[7]
|
||||
x12, x13, x14, x15 := nonce[0], nonce[1], nonce[2], nonce[3]
|
||||
|
||||
for i := 0; i < 10; i++ {
|
||||
x0, x4, x8, x12 = quarterRound(x0, x4, x8, x12)
|
||||
x1, x5, x9, x13 = quarterRound(x1, x5, x9, x13)
|
||||
x2, x6, x10, x14 = quarterRound(x2, x6, x10, x14)
|
||||
x3, x7, x11, x15 = quarterRound(x3, x7, x11, x15)
|
||||
|
||||
x0, x5, x10, x15 = quarterRound(x0, x5, x10, x15)
|
||||
x1, x6, x11, x12 = quarterRound(x1, x6, x11, x12)
|
||||
x2, x7, x8, x13 = quarterRound(x2, x7, x8, x13)
|
||||
x3, x4, x9, x14 = quarterRound(x3, x4, x9, x14)
|
||||
}
|
||||
|
||||
var out [8]uint32
|
||||
out[0], out[1], out[2], out[3] = x0, x1, x2, x3
|
||||
out[4], out[5], out[6], out[7] = x12, x13, x14, x15
|
||||
return out
|
||||
}
|
||||
|
2
vendor/golang.org/x/crypto/internal/subtle/aliasing.go
generated
vendored
2
vendor/golang.org/x/crypto/internal/subtle/aliasing.go
generated
vendored
@ -2,6 +2,8 @@
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build !appengine
|
||||
|
||||
// Package subtle implements functions that are often useful in cryptographic
|
||||
// code but require careful thought to use correctly.
|
||||
package subtle // import "golang.org/x/crypto/internal/subtle"
|
||||
|
35
vendor/golang.org/x/crypto/internal/subtle/aliasing_appengine.go
generated
vendored
Normal file
35
vendor/golang.org/x/crypto/internal/subtle/aliasing_appengine.go
generated
vendored
Normal file
@ -0,0 +1,35 @@
|
||||
// Copyright 2018 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build appengine
|
||||
|
||||
// Package subtle implements functions that are often useful in cryptographic
|
||||
// code but require careful thought to use correctly.
|
||||
package subtle // import "golang.org/x/crypto/internal/subtle"
|
||||
|
||||
// This is the Google App Engine standard variant based on reflect
|
||||
// because the unsafe package and cgo are disallowed.
|
||||
|
||||
import "reflect"
|
||||
|
||||
// AnyOverlap reports whether x and y share memory at any (not necessarily
|
||||
// corresponding) index. The memory beyond the slice length is ignored.
|
||||
func AnyOverlap(x, y []byte) bool {
|
||||
return len(x) > 0 && len(y) > 0 &&
|
||||
reflect.ValueOf(&x[0]).Pointer() <= reflect.ValueOf(&y[len(y)-1]).Pointer() &&
|
||||
reflect.ValueOf(&y[0]).Pointer() <= reflect.ValueOf(&x[len(x)-1]).Pointer()
|
||||
}
|
||||
|
||||
// InexactOverlap reports whether x and y share memory at any non-corresponding
|
||||
// index. The memory beyond the slice length is ignored. Note that x and y can
|
||||
// have different lengths and still not have any inexact overlap.
|
||||
//
|
||||
// InexactOverlap can be used to implement the requirements of the crypto/cipher
|
||||
// AEAD, Block, BlockMode and Stream interfaces.
|
||||
func InexactOverlap(x, y []byte) bool {
|
||||
if len(x) == 0 || len(y) == 0 || &x[0] == &y[0] {
|
||||
return false
|
||||
}
|
||||
return AnyOverlap(x, y)
|
||||
}
|
9
vendor/golang.org/x/crypto/ssh/keys.go
generated
vendored
9
vendor/golang.org/x/crypto/ssh/keys.go
generated
vendored
@ -803,7 +803,7 @@ func encryptedBlock(block *pem.Block) bool {
|
||||
}
|
||||
|
||||
// ParseRawPrivateKey returns a private key from a PEM encoded private key. It
|
||||
// supports RSA (PKCS#1), DSA (OpenSSL), and ECDSA private keys.
|
||||
// supports RSA (PKCS#1), PKCS#8, DSA (OpenSSL), and ECDSA private keys.
|
||||
func ParseRawPrivateKey(pemBytes []byte) (interface{}, error) {
|
||||
block, _ := pem.Decode(pemBytes)
|
||||
if block == nil {
|
||||
@ -817,6 +817,9 @@ func ParseRawPrivateKey(pemBytes []byte) (interface{}, error) {
|
||||
switch block.Type {
|
||||
case "RSA PRIVATE KEY":
|
||||
return x509.ParsePKCS1PrivateKey(block.Bytes)
|
||||
// RFC5208 - https://tools.ietf.org/html/rfc5208
|
||||
case "PRIVATE KEY":
|
||||
return x509.ParsePKCS8PrivateKey(block.Bytes)
|
||||
case "EC PRIVATE KEY":
|
||||
return x509.ParseECPrivateKey(block.Bytes)
|
||||
case "DSA PRIVATE KEY":
|
||||
@ -900,8 +903,8 @@ func ParseDSAPrivateKey(der []byte) (*dsa.PrivateKey, error) {
|
||||
// Implemented based on the documentation at
|
||||
// https://github.com/openssh/openssh-portable/blob/master/PROTOCOL.key
|
||||
func parseOpenSSHPrivateKey(key []byte) (crypto.PrivateKey, error) {
|
||||
magic := append([]byte("openssh-key-v1"), 0)
|
||||
if !bytes.Equal(magic, key[0:len(magic)]) {
|
||||
const magic = "openssh-key-v1\x00"
|
||||
if len(key) < len(magic) || string(key[:len(magic)]) != magic {
|
||||
return nil, errors.New("ssh: invalid openssh private key format")
|
||||
}
|
||||
remaining := key[len(magic):]
|
||||
|
106
vendor/golang.org/x/net/html/parse.go
generated
vendored
106
vendor/golang.org/x/net/html/parse.go
generated
vendored
@ -209,27 +209,6 @@ loop:
|
||||
p.oe = p.oe[:i+1]
|
||||
}
|
||||
|
||||
// generateAllImpliedEndTags pops nodes off the stack of open elements as long as
|
||||
// the top node has a tag name of caption, colgroup, dd, div, dt, li, optgroup, option, p, rb,
|
||||
// rp, rt, rtc, span, tbody, td, tfoot, th, thead or tr.
|
||||
func (p *parser) generateAllImpliedEndTags() {
|
||||
var i int
|
||||
for i = len(p.oe) - 1; i >= 0; i-- {
|
||||
n := p.oe[i]
|
||||
if n.Type == ElementNode {
|
||||
switch n.DataAtom {
|
||||
// TODO: remove this divergence from the HTML5 spec
|
||||
case a.Caption, a.Colgroup, a.Dd, a.Div, a.Dt, a.Li, a.Optgroup, a.Option, a.P, a.Rb,
|
||||
a.Rp, a.Rt, a.Rtc, a.Span, a.Tbody, a.Td, a.Tfoot, a.Th, a.Thead, a.Tr:
|
||||
continue
|
||||
}
|
||||
}
|
||||
break
|
||||
}
|
||||
|
||||
p.oe = p.oe[:i+1]
|
||||
}
|
||||
|
||||
// addChild adds a child node n to the top element, and pushes n onto the stack
|
||||
// of open elements if it is an element node.
|
||||
func (p *parser) addChild(n *Node) {
|
||||
@ -276,7 +255,7 @@ func (p *parser) fosterParent(n *Node) {
|
||||
}
|
||||
}
|
||||
|
||||
if template != nil && (table == nil || j < i) {
|
||||
if template != nil && (table == nil || j > i) {
|
||||
template.AppendChild(n)
|
||||
return
|
||||
}
|
||||
@ -679,11 +658,16 @@ func inHeadIM(p *parser) bool {
|
||||
if !p.oe.contains(a.Template) {
|
||||
return true
|
||||
}
|
||||
p.generateAllImpliedEndTags()
|
||||
if n := p.oe.top(); n.DataAtom != a.Template {
|
||||
return true
|
||||
// TODO: remove this divergence from the HTML5 spec.
|
||||
//
|
||||
// See https://bugs.chromium.org/p/chromium/issues/detail?id=829668
|
||||
p.generateImpliedEndTags()
|
||||
for i := len(p.oe) - 1; i >= 0; i-- {
|
||||
if n := p.oe[i]; n.Namespace == "" && n.DataAtom == a.Template {
|
||||
p.oe = p.oe[:i]
|
||||
break
|
||||
}
|
||||
}
|
||||
p.popUntil(defaultScope, a.Template)
|
||||
p.clearActiveFormattingElements()
|
||||
p.templateStack.pop()
|
||||
p.resetInsertionMode()
|
||||
@ -860,9 +844,13 @@ func inBodyIM(p *parser) bool {
|
||||
// The newline, if any, will be dealt with by the TextToken case.
|
||||
p.framesetOK = false
|
||||
case a.Form:
|
||||
if p.oe.contains(a.Template) || p.form == nil {
|
||||
if p.form != nil && !p.oe.contains(a.Template) {
|
||||
// Ignore the token
|
||||
return true
|
||||
}
|
||||
p.popUntil(buttonScope, a.P)
|
||||
p.addElement()
|
||||
if !p.oe.contains(a.Template) {
|
||||
p.form = p.top()
|
||||
}
|
||||
case a.Li:
|
||||
@ -996,6 +984,14 @@ func inBodyIM(p *parser) bool {
|
||||
p.acknowledgeSelfClosingTag()
|
||||
p.popUntil(buttonScope, a.P)
|
||||
p.parseImpliedToken(StartTagToken, a.Form, a.Form.String())
|
||||
if p.form == nil {
|
||||
// NOTE: The 'isindex' element has been removed,
|
||||
// and the 'template' element has not been designed to be
|
||||
// collaborative with the index element.
|
||||
//
|
||||
// Ignore the token.
|
||||
return true
|
||||
}
|
||||
if action != "" {
|
||||
p.form.Attr = []Attribute{{Key: "action", Val: action}}
|
||||
}
|
||||
@ -1070,13 +1066,7 @@ func inBodyIM(p *parser) bool {
|
||||
p.acknowledgeSelfClosingTag()
|
||||
}
|
||||
return true
|
||||
case a.Frame:
|
||||
// TODO: remove this divergence from the HTML5 spec.
|
||||
if p.oe.contains(a.Template) {
|
||||
p.addElement()
|
||||
return true
|
||||
}
|
||||
case a.Caption, a.Col, a.Colgroup, a.Head, a.Tbody, a.Td, a.Tfoot, a.Th, a.Thead, a.Tr:
|
||||
case a.Caption, a.Col, a.Colgroup, a.Frame, a.Head, a.Tbody, a.Td, a.Tfoot, a.Th, a.Thead, a.Tr:
|
||||
// Ignore the token.
|
||||
default:
|
||||
p.reconstructActiveFormattingElements()
|
||||
@ -1098,12 +1088,13 @@ func inBodyIM(p *parser) bool {
|
||||
p.popUntil(defaultScope, p.tok.DataAtom)
|
||||
case a.Form:
|
||||
if p.oe.contains(a.Template) {
|
||||
if !p.oe.contains(a.Form) {
|
||||
i := p.indexOfElementInScope(defaultScope, a.Form)
|
||||
if i == -1 {
|
||||
// Ignore the token.
|
||||
return true
|
||||
}
|
||||
p.generateImpliedEndTags()
|
||||
if p.tok.DataAtom == a.Form {
|
||||
if p.oe[i].DataAtom != a.Form {
|
||||
// Ignore the token.
|
||||
return true
|
||||
}
|
||||
@ -1346,9 +1337,6 @@ func textIM(p *parser) bool {
|
||||
// Section 12.2.6.4.9.
|
||||
func inTableIM(p *parser) bool {
|
||||
switch p.tok.Type {
|
||||
case ErrorToken:
|
||||
// Stop parsing.
|
||||
return true
|
||||
case TextToken:
|
||||
p.tok.Data = strings.Replace(p.tok.Data, "\x00", "", -1)
|
||||
switch p.oe.top().DataAtom {
|
||||
@ -1443,6 +1431,8 @@ func inTableIM(p *parser) bool {
|
||||
case DoctypeToken:
|
||||
// Ignore the token.
|
||||
return true
|
||||
case ErrorToken:
|
||||
return inBodyIM(p)
|
||||
}
|
||||
|
||||
p.fosterParenting = true
|
||||
@ -1545,6 +1535,8 @@ func inColumnGroupIM(p *parser) bool {
|
||||
case a.Template:
|
||||
return inHeadIM(p)
|
||||
}
|
||||
case ErrorToken:
|
||||
return inBodyIM(p)
|
||||
}
|
||||
if p.oe.top().DataAtom != a.Colgroup {
|
||||
return true
|
||||
@ -1709,9 +1701,6 @@ func inCellIM(p *parser) bool {
|
||||
// Section 12.2.6.4.16.
|
||||
func inSelectIM(p *parser) bool {
|
||||
switch p.tok.Type {
|
||||
case ErrorToken:
|
||||
// Stop parsing.
|
||||
return true
|
||||
case TextToken:
|
||||
p.addText(strings.Replace(p.tok.Data, "\x00", "", -1))
|
||||
case StartTagToken:
|
||||
@ -1775,6 +1764,8 @@ func inSelectIM(p *parser) bool {
|
||||
case DoctypeToken:
|
||||
// Ignore the token.
|
||||
return true
|
||||
case ErrorToken:
|
||||
return inBodyIM(p)
|
||||
}
|
||||
|
||||
return true
|
||||
@ -1841,17 +1832,28 @@ func inTemplateIM(p *parser) bool {
|
||||
// Ignore the token.
|
||||
return true
|
||||
}
|
||||
}
|
||||
case ErrorToken:
|
||||
if !p.oe.contains(a.Template) {
|
||||
// Ignore the token.
|
||||
return true
|
||||
}
|
||||
p.popUntil(defaultScope, a.Template)
|
||||
// TODO: remove this divergence from the HTML5 spec.
|
||||
//
|
||||
// See https://bugs.chromium.org/p/chromium/issues/detail?id=829668
|
||||
p.generateImpliedEndTags()
|
||||
for i := len(p.oe) - 1; i >= 0; i-- {
|
||||
if n := p.oe[i]; n.Namespace == "" && n.DataAtom == a.Template {
|
||||
p.oe = p.oe[:i]
|
||||
break
|
||||
}
|
||||
}
|
||||
p.clearActiveFormattingElements()
|
||||
p.templateStack.pop()
|
||||
p.resetInsertionMode()
|
||||
return false
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Section 12.2.6.4.19.
|
||||
func afterBodyIM(p *parser) bool {
|
||||
@ -1923,11 +1925,6 @@ func inFramesetIM(p *parser) bool {
|
||||
p.acknowledgeSelfClosingTag()
|
||||
case a.Noframes:
|
||||
return inHeadIM(p)
|
||||
case a.Template:
|
||||
// TODO: remove this divergence from the HTML5 spec.
|
||||
//
|
||||
// See https://bugs.chromium.org/p/chromium/issues/detail?id=829668
|
||||
return inTemplateIM(p)
|
||||
}
|
||||
case EndTagToken:
|
||||
switch p.tok.DataAtom {
|
||||
@ -2220,6 +2217,15 @@ func (p *parser) parse() error {
|
||||
}
|
||||
|
||||
// Parse returns the parse tree for the HTML from the given Reader.
|
||||
//
|
||||
// It implements the HTML5 parsing algorithm
|
||||
// (https://html.spec.whatwg.org/multipage/syntax.html#tree-construction),
|
||||
// which is very complicated. The resultant tree can contain implicitly created
|
||||
// nodes that have no explicit <tag> listed in r's data, and nodes' parents can
|
||||
// differ from the nesting implied by a naive processing of start and end
|
||||
// <tag>s. Conversely, explicit <tag>s in r's data can be silently dropped,
|
||||
// with no corresponding node in the resulting tree.
|
||||
//
|
||||
// The input is assumed to be UTF-8 encoded.
|
||||
func Parse(r io.Reader) (*Node, error) {
|
||||
p := &parser{
|
||||
@ -2241,6 +2247,8 @@ func Parse(r io.Reader) (*Node, error) {
|
||||
// ParseFragment parses a fragment of HTML and returns the nodes that were
|
||||
// found. If the fragment is the InnerHTML for an existing element, pass that
|
||||
// element in context.
|
||||
//
|
||||
// It has the same intricacies as Parse.
|
||||
func ParseFragment(r io.Reader, context *Node) ([]*Node, error) {
|
||||
contextTag := ""
|
||||
if context != nil {
|
||||
|
28
vendor/golang.org/x/net/http2/client_conn_pool.go
generated
vendored
28
vendor/golang.org/x/net/http2/client_conn_pool.go
generated
vendored
@ -52,9 +52,31 @@ const (
|
||||
noDialOnMiss = false
|
||||
)
|
||||
|
||||
// shouldTraceGetConn reports whether getClientConn should call any
|
||||
// ClientTrace.GetConn hook associated with the http.Request.
|
||||
//
|
||||
// This complexity is needed to avoid double calls of the GetConn hook
|
||||
// during the back-and-forth between net/http and x/net/http2 (when the
|
||||
// net/http.Transport is upgraded to also speak http2), as well as support
|
||||
// the case where x/net/http2 is being used directly.
|
||||
func (p *clientConnPool) shouldTraceGetConn(st clientConnIdleState) bool {
|
||||
// If our Transport wasn't made via ConfigureTransport, always
|
||||
// trace the GetConn hook if provided, because that means the
|
||||
// http2 package is being used directly and it's the one
|
||||
// dialing, as opposed to net/http.
|
||||
if _, ok := p.t.ConnPool.(noDialClientConnPool); !ok {
|
||||
return true
|
||||
}
|
||||
// Otherwise, only use the GetConn hook if this connection has
|
||||
// been used previously for other requests. For fresh
|
||||
// connections, the net/http package does the dialing.
|
||||
return !st.freshConn
|
||||
}
|
||||
|
||||
func (p *clientConnPool) getClientConn(req *http.Request, addr string, dialOnMiss bool) (*ClientConn, error) {
|
||||
if isConnectionCloseRequest(req) && dialOnMiss {
|
||||
// It gets its own connection.
|
||||
traceGetConn(req, addr)
|
||||
const singleUse = true
|
||||
cc, err := p.t.dialClientConn(addr, singleUse)
|
||||
if err != nil {
|
||||
@ -64,7 +86,10 @@ func (p *clientConnPool) getClientConn(req *http.Request, addr string, dialOnMis
|
||||
}
|
||||
p.mu.Lock()
|
||||
for _, cc := range p.conns[addr] {
|
||||
if cc.CanTakeNewRequest() {
|
||||
if st := cc.idleState(); st.canTakeNewRequest {
|
||||
if p.shouldTraceGetConn(st) {
|
||||
traceGetConn(req, addr)
|
||||
}
|
||||
p.mu.Unlock()
|
||||
return cc, nil
|
||||
}
|
||||
@ -73,6 +98,7 @@ func (p *clientConnPool) getClientConn(req *http.Request, addr string, dialOnMis
|
||||
p.mu.Unlock()
|
||||
return nil, ErrNoCachedConn
|
||||
}
|
||||
traceGetConn(req, addr)
|
||||
call := p.getStartDialLocked(addr)
|
||||
p.mu.Unlock()
|
||||
<-call.done
|
||||
|
8
vendor/golang.org/x/net/http2/configure_transport.go
generated
vendored
8
vendor/golang.org/x/net/http2/configure_transport.go
generated
vendored
@ -57,7 +57,7 @@ func configureTransport(t1 *http.Transport) (*Transport, error) {
|
||||
|
||||
// registerHTTPSProtocol calls Transport.RegisterProtocol but
|
||||
// converting panics into errors.
|
||||
func registerHTTPSProtocol(t *http.Transport, rt http.RoundTripper) (err error) {
|
||||
func registerHTTPSProtocol(t *http.Transport, rt noDialH2RoundTripper) (err error) {
|
||||
defer func() {
|
||||
if e := recover(); e != nil {
|
||||
err = fmt.Errorf("%v", e)
|
||||
@ -69,10 +69,12 @@ func registerHTTPSProtocol(t *http.Transport, rt http.RoundTripper) (err error)
|
||||
|
||||
// noDialH2RoundTripper is a RoundTripper which only tries to complete the request
|
||||
// if there's already has a cached connection to the host.
|
||||
type noDialH2RoundTripper struct{ t *Transport }
|
||||
// (The field is exported so it can be accessed via reflect from net/http; tested
|
||||
// by TestNoDialH2RoundTripperType)
|
||||
type noDialH2RoundTripper struct{ *Transport }
|
||||
|
||||
func (rt noDialH2RoundTripper) RoundTrip(req *http.Request) (*http.Response, error) {
|
||||
res, err := rt.t.RoundTrip(req)
|
||||
res, err := rt.Transport.RoundTrip(req)
|
||||
if isNoCachedConnError(err) {
|
||||
return nil, http.ErrSkipAltProtocol
|
||||
}
|
||||
|
10
vendor/golang.org/x/net/http2/flow.go
generated
vendored
10
vendor/golang.org/x/net/http2/flow.go
generated
vendored
@ -41,10 +41,10 @@ func (f *flow) take(n int32) {
|
||||
// add adds n bytes (positive or negative) to the flow control window.
|
||||
// It returns false if the sum would exceed 2^31-1.
|
||||
func (f *flow) add(n int32) bool {
|
||||
remain := (1<<31 - 1) - f.n
|
||||
if n > remain {
|
||||
return false
|
||||
}
|
||||
f.n += n
|
||||
sum := f.n + n
|
||||
if (sum > n) == (f.n > 0) {
|
||||
f.n = sum
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
63
vendor/golang.org/x/net/http2/frame.go
generated
vendored
63
vendor/golang.org/x/net/http2/frame.go
generated
vendored
@ -733,32 +733,67 @@ func (f *SettingsFrame) IsAck() bool {
|
||||
return f.FrameHeader.Flags.Has(FlagSettingsAck)
|
||||
}
|
||||
|
||||
func (f *SettingsFrame) Value(s SettingID) (v uint32, ok bool) {
|
||||
func (f *SettingsFrame) Value(id SettingID) (v uint32, ok bool) {
|
||||
f.checkValid()
|
||||
buf := f.p
|
||||
for len(buf) > 0 {
|
||||
settingID := SettingID(binary.BigEndian.Uint16(buf[:2]))
|
||||
if settingID == s {
|
||||
return binary.BigEndian.Uint32(buf[2:6]), true
|
||||
for i := 0; i < f.NumSettings(); i++ {
|
||||
if s := f.Setting(i); s.ID == id {
|
||||
return s.Val, true
|
||||
}
|
||||
buf = buf[6:]
|
||||
}
|
||||
return 0, false
|
||||
}
|
||||
|
||||
// Setting returns the setting from the frame at the given 0-based index.
|
||||
// The index must be >= 0 and less than f.NumSettings().
|
||||
func (f *SettingsFrame) Setting(i int) Setting {
|
||||
buf := f.p
|
||||
return Setting{
|
||||
ID: SettingID(binary.BigEndian.Uint16(buf[i*6 : i*6+2])),
|
||||
Val: binary.BigEndian.Uint32(buf[i*6+2 : i*6+6]),
|
||||
}
|
||||
}
|
||||
|
||||
func (f *SettingsFrame) NumSettings() int { return len(f.p) / 6 }
|
||||
|
||||
// HasDuplicates reports whether f contains any duplicate setting IDs.
|
||||
func (f *SettingsFrame) HasDuplicates() bool {
|
||||
num := f.NumSettings()
|
||||
if num == 0 {
|
||||
return false
|
||||
}
|
||||
// If it's small enough (the common case), just do the n^2
|
||||
// thing and avoid a map allocation.
|
||||
if num < 10 {
|
||||
for i := 0; i < num; i++ {
|
||||
idi := f.Setting(i).ID
|
||||
for j := i + 1; j < num; j++ {
|
||||
idj := f.Setting(j).ID
|
||||
if idi == idj {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
seen := map[SettingID]bool{}
|
||||
for i := 0; i < num; i++ {
|
||||
id := f.Setting(i).ID
|
||||
if seen[id] {
|
||||
return true
|
||||
}
|
||||
seen[id] = true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// ForeachSetting runs fn for each setting.
|
||||
// It stops and returns the first error.
|
||||
func (f *SettingsFrame) ForeachSetting(fn func(Setting) error) error {
|
||||
f.checkValid()
|
||||
buf := f.p
|
||||
for len(buf) > 0 {
|
||||
if err := fn(Setting{
|
||||
SettingID(binary.BigEndian.Uint16(buf[:2])),
|
||||
binary.BigEndian.Uint32(buf[2:6]),
|
||||
}); err != nil {
|
||||
for i := 0; i < f.NumSettings(); i++ {
|
||||
if err := fn(f.Setting(i)); err != nil {
|
||||
return err
|
||||
}
|
||||
buf = buf[6:]
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user